blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2e7773d9265fed85bc1657e7b3f54bf72cd654d | 231f2dca688c2b1690ea23be467d035a9f38223a | /metadata/model.py | 39bd5af740f361f1b9da0f8963e2b350527bb8cc | [] | no_license | peternortonuk/projects | f525bc9717b1c7d81156cdfe7f4164dd826618a2 | 29c6ac829952f8fedb05935bebddc60d0bb7adb9 | refs/heads/master | 2021-08-22T13:03:31.503001 | 2021-07-21T15:33:12 | 2021-07-21T15:33:12 | 147,948,752 | 0 | 1 | null | 2021-06-14T13:28:25 | 2018-09-08T16:06:17 | Python | UTF-8 | Python | false | false | 503 | py | from __future__ import unicode_literals, print_function
from atom.api import Atom, List, Int, observe
import enaml
from enaml.qt.qt_application import QtApplication
class Metadata(Atom):
curve_id = Int()
attributes = List()
def main():
with enaml.imports():
from view import MetaDataView
curve = Metadata(curve_id=123, attributes=['gas', 'NBP'])
app = QtApplication()
v = MetaDataView(curve=curve)
v.show()
app.start()
if __name__ == '__main__':
main() | [
"peter.norton@gazprom-mt.com"
] | peter.norton@gazprom-mt.com |
28e61427879ca0fe867f77ca8c925cb1d322851b | 8b0991e44dc5e10dc361bfa6afaad600fe04a212 | /modules/reactor/reactions.py | 11e8123378524674869ba99268ccb5e7ec603ecb | [] | no_license | Cubiss/discord_bot | f3fd52eed075434bdd22de8e3ce1cdc42e0ba592 | 08686e067c4f9e0d2dc15c2c710c4eaf68694704 | refs/heads/master | 2023-06-24T08:37:32.900290 | 2023-06-11T22:20:50 | 2023-06-11T22:20:50 | 187,587,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | from classes.entity import Entity, EntityItem, Column
import sqlite3
import datetime
class Reactions(Entity):
def __init__(self, db: sqlite3.Connection):
super().__init__(db,
"Reactor",
[Column('ID', int, nullable=False, primary_key=True, auto_increment=True),
Column('USER_ID', int, nullable=False),
Column('USER_NAME', str, nullable=False),
Column('EMOTE', str, nullable=False),
Column('CHANCE', float, nullable=False, default=0),
Column('SERVER_ID', int),
Column('COOLDOWN', int, nullable=False, default=0),
Column('ENABLED', bool, nullable=False, default=True)
],
Reaction
)
class Reaction(EntityItem):
def __init__(self, *args, **kwargs):
self.ID = None
self.USER_ID = None
self.USER_NAME = None
self.EMOTE = None
self.CHANCE = None
self.SERVER_ID = None
self.COOLDOWN = None
self.ENABLED = None
self.last_used = datetime.datetime.min
super().__init__(*args, **kwargs)
pass
def get_emote(self):
return self.EMOTE.replace('<', '').replace('>', '')
| [
"solin.jakub@gmail.com"
] | solin.jakub@gmail.com |
f5007ffa39258aef482e33fb6f05392ebada382d | d85fbf635ae8991bd582888d3ab95f436f81187f | /lib/ult/vcoco_diagnose.py | dd276e9215a1369fbbca14cf4aacab069eb7cbd5 | [
"MIT"
] | permissive | issac8huxleg/iCAN | 72cd1703b9d08ad9d4dced28b647617c9fb08d50 | dc95e334f341a610813f1bbd8deafd99f5c0253d | refs/heads/master | 2020-03-27T17:28:13.596579 | 2018-08-31T00:23:36 | 2018-08-31T00:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,797 | py | # AUTORIGHTS
# ---------------------------------------------------------
# Copyright (c) 2017, Saurabh Gupta
#
# This file is part of the VCOCO dataset hooks and is available
# under the terms of the Simplified BSD License provided in
# LICENSE. Please retain this notice and LICENSE if you use
# this file (or any portion of it) in your project.
# ---------------------------------------------------------
# vsrl_data is a dictionary for each action class:
# image_id - Nx1
# ann_id - Nx1
# label - Nx1
# action_name - string
# role_name - ['agent', 'obj', 'instr']
# role_object_id - N x K matrix, obviously [:,0] is same as ann_id
import numpy as np
from pycocotools.coco import COCO
import os, json
import copy
import pickle
import ipdb
class VCOCOdiagnose(object):
def __init__(self, vsrl_annot_file, coco_annot_file,
split_file):
"""Input:
vslr_annot_file: path to the vcoco annotations
coco_annot_file: path to the coco annotations
split_file: image ids for split
"""
self.COCO = COCO(coco_annot_file)
self.VCOCO = _load_vcoco(vsrl_annot_file)
self.image_ids = np.loadtxt(open(split_file, 'r')) # Test set, 4946 images
# simple check
assert np.all(np.equal(np.sort(np.unique(self.VCOCO[0]['image_id'])), np.sort(self.image_ids)))
self._init_coco()
self._init_vcoco()
def _init_vcoco(self):
actions = [x['action_name'] for x in self.VCOCO]
roles = [x['role_name'] for x in self.VCOCO]
self.actions = actions
self.actions_to_id_map = {v: i for i, v in enumerate(self.actions)}
self.num_actions = len(self.actions)
self.roles = roles
def _init_coco(self):
category_ids = self.COCO.getCatIds()
categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.COCO.getCatIds())}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()}
def _get_vcocodb(self):
vcocodb = copy.deepcopy(self.COCO.loadImgs(self.image_ids.tolist()))
for entry in vcocodb:
self._prep_vcocodb_entry(entry)
self._add_gt_annotations(entry)
# print
if 0:
nums = np.zeros((self.num_actions), dtype=np.int32)
for entry in vcocodb:
for aid in range(self.num_actions):
nums[aid] += np.sum(np.logical_and(entry['gt_actions'][:, aid]==1, entry['gt_classes']==1))
for aid in range(self.num_actions):
print('Action %s = %d'%(self.actions[aid], nums[aid]))
return vcocodb
def _prep_vcocodb_entry(self, entry):
entry['boxes'] = np.empty((0, 4), dtype=np.float32)
entry['is_crowd'] = np.empty((0), dtype=np.bool)
entry['gt_classes'] = np.empty((0), dtype=np.int32)
entry['gt_actions'] = np.empty((0, self.num_actions), dtype=np.int32)
entry['gt_role_id'] = np.empty((0, self.num_actions, 2), dtype=np.int32)
def _add_gt_annotations(self, entry):
ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)
objs = self.COCO.loadAnns(ann_ids)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_ann_ids = []
width = entry['width']
height = entry['height']
for i, obj in enumerate(objs):
if 'ignore' in obj and obj['ignore'] == 1:
continue
# Convert form x1, y1, w, h to x1, y1, x2, y2
x1 = obj['bbox'][0]
y1 = obj['bbox'][1]
x2 = x1 + np.maximum(0., obj['bbox'][2] - 1.)
y2 = y1 + np.maximum(0., obj['bbox'][3] - 1.)
x1, y1, x2, y2 = clip_xyxy_to_image(
x1, y1, x2, y2, height, width)
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
valid_ann_ids.append(ann_ids[i])
num_valid_objs = len(valid_objs)
assert num_valid_objs == len(valid_ann_ids)
boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)
gt_actions = -np.ones((num_valid_objs, self.num_actions), dtype=entry['gt_actions'].dtype)
gt_role_id = -np.ones((num_valid_objs, self.num_actions, 2), dtype=entry['gt_role_id'].dtype)
for ix, obj in enumerate(valid_objs):
cls = self.json_category_id_to_contiguous_id[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
is_crowd[ix] = obj['iscrowd']
gt_actions[ix, :], gt_role_id[ix, :, :] = \
self._get_vsrl_data(valid_ann_ids[ix],
valid_ann_ids, valid_objs)
entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
entry['gt_actions'] = np.append(entry['gt_actions'], gt_actions, axis=0)
entry['gt_role_id'] = np.append(entry['gt_role_id'], gt_role_id, axis=0)
def _get_vsrl_data(self, ann_id, ann_ids, objs):
""" Get VSRL data for ann_id."""
action_id = -np.ones((self.num_actions), dtype=np.int32)
role_id = -np.ones((self.num_actions, 2), dtype=np.int32)
# check if ann_id in vcoco annotations
in_vcoco = np.where(self.VCOCO[0]['ann_id'] == ann_id)[0]
if in_vcoco.size > 0:
action_id[:] = 0
role_id[:] = -1
else:
return action_id, role_id
for i, x in enumerate(self.VCOCO):
assert x['action_name'] == self.actions[i]
has_label = np.where(np.logical_and(x['ann_id'] == ann_id, x['label'] == 1))[0]
if has_label.size > 0:
action_id[i] = 1
assert has_label.size == 1
rids = x['role_object_id'][has_label]
assert rids[0, 0] == ann_id
for j in range(1, rids.shape[1]):
if rids[0, j] == 0:
# no role
continue
aid = np.where(ann_ids == rids[0, j])[0]
assert aid.size > 0
role_id[i, j - 1] = aid
return action_id, role_id
def _collect_detections_for_image(self, dets, image_id):
agents = np.empty((0, 4 + self.num_actions), dtype=np.float32) # 4 + 26 = 30
roles = np.empty((0, 5 * self.num_actions, 2), dtype=np.float32) # (5 * 26), 2
for det in dets: # loop all detection instance
if det['image_id'] == image_id:# might be several
this_agent = np.zeros((1, 4 + self.num_actions), dtype=np.float32)
this_role = np.zeros((1, 5 * self.num_actions, 2), dtype=np.float32)
this_agent[0, :4] = det['person_box']
for aid in range(self.num_actions): # loop 26 actions
for j, rid in enumerate(self.roles[aid]):
if rid == 'agent':
this_agent[0, 4 + aid] = det[self.actions[aid] + '_' + rid]
else:
this_role[0, 5 * aid: 5 * aid + 5, j-1] = det[self.actions[aid] + '_' + rid]
agents = np.concatenate((agents, this_agent), axis=0)
roles = np.concatenate((roles, this_role), axis=0)
return agents, roles
def _do_eval(self, detections_file, ovr_thresh=0.5):
vcocodb = self._get_vcocodb()
#self._do_agent_eval(vcocodb, detections_file, ovr_thresh=ovr_thresh)
#self._do_role_eval(vcocodb, detections_file, ovr_thresh=ovr_thresh, eval_type='scenario_1')
#self._do_role_eval(vcocodb, detections_file, ovr_thresh=ovr_thresh, eval_type='scenario_2')
self._visualize_error(vcocodb, detections_file, ovr_thresh=ovr_thresh, eval_type='scenario_1')
def _visualize_error(self, vcocodb, detections_file, ovr_thresh=0.5, eval_type='scenario_1'):
with open(detections_file, 'rb') as f:
dets = pickle.load(f)
tp = [[[] for r in range(2)] for a in range(self.num_actions)]
fp1 = [[[] for r in range(2)] for a in range(self.num_actions)] # incorrect label
fp2 = [[[] for r in range(2)] for a in range(self.num_actions)] # bck
fp3 = [[[] for r in range(2)] for a in range(self.num_actions)] # person misloc
fp4 = [[[] for r in range(2)] for a in range(self.num_actions)] # obj misloc
fp5 = [[[] for r in range(2)] for a in range(self.num_actions)] # duplicate detection
fp6 = [[[] for r in range(2)] for a in range(self.num_actions)] # mis-grouping
fp7 = [[[] for r in range(2)] for a in range(self.num_actions)] # occlusion
sc = [[[] for r in range(2)] for a in range(self.num_actions)]
npos = np.zeros((self.num_actions), dtype=np.float32) # A + B
ndet = np.zeros((self.num_actions, 2), dtype=np.float32) # B + C
Test_occlusion = {}
for i in range(len(vcocodb)):
image_id = vcocodb[i]['id']# img ID, not the full name (e.g. id= 165, 'file_name' = COCO_train2014_000000000165.jpg )
gt_inds = np.where(vcocodb[i]['gt_classes'] == 1)[0]# index of the person's box among all object boxes
# person boxes
gt_boxes = vcocodb[i]['boxes'][gt_inds] # Nx4 all person's boxes in this image
gt_actions = vcocodb[i]['gt_actions'][gt_inds] # Nx26 binary array indicating the actions performed by this person
# some peorson instances don't have annotated actions
# we ignore those instances
ignore = np.any(gt_actions == -1, axis=1)
assert np.all(gt_actions[np.where(ignore==True)[0]]==-1)
for aid in range(self.num_actions):
npos[aid] += np.sum(gt_actions[:, aid] == 1) # how many actions are involved in this image(for all the human)
pred_agents, pred_roles = self._collect_detections_for_image(dets, image_id)
# pred_agents Mx30
# pred_roles Mx(5*26)x2
for aid in range(self.num_actions):
if len(self.roles[aid])<2:
# if action has no role, then no role AP computed
continue
for rid in range(len(self.roles[aid])-1): # rid = 0, instr; rid = 1, obj
# keep track of detected instances for each action for each role. Is this gt_human used or not.
covered = np.zeros((gt_boxes.shape[0]), dtype=np.bool)
# get gt roles for action and role
gt_role_inds = vcocodb[i]['gt_role_id'][gt_inds, aid, rid]# Nx1 index of the object among all detected objects related to this action. -1 means missing object.
gt_roles = -np.ones_like(gt_boxes) # Nx4 [-1, -1, -1, -1] means gt missing object
for j in range(gt_boxes.shape[0]): # loop all gt human instance
if gt_role_inds[j] > -1: #
gt_roles[j] = vcocodb[i]['boxes'][gt_role_inds[j]]
agent_boxes = pred_agents[:, :4] # Mx4 all detected human box
role_boxes = pred_roles[:, 5 * aid: 5 * aid + 4, rid] # Mx4 detected object(role) box for this human and action
agent_scores = pred_roles[:, 5 * aid + 4, rid] # Mx1, action score for this human, object and action
if role_boxes.shape[0] == 0:continue
valid = np.where(np.isnan(role_boxes).any() == False)[0]
agent_scores = agent_scores[valid]
agent_boxes = agent_boxes[valid, :]
role_boxes = role_boxes[valid, :]
#ndet[aid][rid] += agent_boxes.shape[0]
# sort in descending order
idx = agent_scores.argsort()[::-1]# A action can be done by multiple human.
for j in idx: # in this image, this action with highest action score
pred_box = agent_boxes[j, :]
overlaps = get_overlap(gt_boxes, pred_box) # gt_boxes: gt human box
jmax = overlaps.argmax() # which gt_box best matches this detected box
ovmax = overlaps.max()
# if matched with an instance with no annotations
# continue
if ignore[jmax]:
continue
# overlap between predicted role and gt role
if np.all(gt_roles[jmax, :] == -1): # if no gt role
if eval_type == 'scenario_1':
if np.all(role_boxes[j, :] == 0.0) or np.all(np.isnan(role_boxes[j, :])):
# if no role is predicted, mark it as correct role overlap
ov_role = 1.0
else:
# if a role is predicted, mark it as false
ov_role = -1.0
elif eval_type == 'scenario_2':
# if no gt role, role prediction is always correct, irrespective of the actual predition
ov_role = 1.0
else:
raise ValueError('Unknown eval type')
else:
ov_role = get_overlap(gt_roles[jmax, :].reshape((1, 4)), role_boxes[j, :])
is_true_action = (gt_actions[jmax, aid] == 1) # Is this gt human actually doing this action?
sc[aid][rid].append(agent_scores[j])
ndet[aid][rid] += 1
if np.all(gt_actions[:, aid] == 0): # All gt are not this action class. All detections are incorrect labels.
fp1[aid][rid].append(1)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
continue
elif is_true_action == False: # This detection j is a incorrect label
fp1[aid][rid].append(1)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
elif (ovmax < 0.1): # bck
fp1[aid][rid].append(0)
fp2[aid][rid].append(1)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
elif (ovmax < 0.5) & (ovmax >= 0.1): # person misloc
fp1[aid][rid].append(0)
fp2[aid][rid].append(0)
fp3[aid][rid].append(1)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
elif (ovmax >= 0.5) & (ov_role == -1.0): # occlusion
fp1[aid][rid].append(0)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(1)
tp[aid][rid].append(0)
elif (ovmax >= 0.5) & (0 <= ov_role <= 0.1): # mis-grouping
fp1[aid][rid].append(0)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(1)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
elif (ovmax >= 0.5) & (0.1 <= ov_role < 0.5): # obj misloc
fp1[aid][rid].append(0)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(1)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
elif (ovmax >= 0.5) & (ov_role >= 0.5): # true positive
if not covered[jmax]:
fp1[aid][rid].append(0)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(0)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(1)
covered[jmax] = True
else:
fp1[aid][rid].append(0)
fp2[aid][rid].append(0)
fp3[aid][rid].append(0)
fp4[aid][rid].append(0)
fp5[aid][rid].append(1)
fp6[aid][rid].append(0)
fp7[aid][rid].append(0)
tp[aid][rid].append(0)
fp_inc = np.zeros((self.num_actions, 2), dtype=np.float32)
fp_bck = np.zeros((self.num_actions, 2), dtype=np.float32)
fp_Hmis = np.zeros((self.num_actions, 2), dtype=np.float32)
fp_Omis = np.zeros((self.num_actions, 2), dtype=np.float32)
fp_dupl = np.zeros((self.num_actions, 2), dtype=np.float32)
fp_misg = np.zeros((self.num_actions, 2), dtype=np.float32)
fp_occl = np.zeros((self.num_actions, 2), dtype=np.float32)
rec = np.zeros((self.num_actions, 2), dtype=np.float32)
prec = np.zeros((self.num_actions, 2), dtype=np.float32)
tp_ = np.zeros((self.num_actions, 2), dtype=np.float32)
for aid in range(self.num_actions):
if len(self.roles[aid])<2:
continue
for rid in range(len(self.roles[aid])-1):
a_fp1 = np.array(fp1[aid][rid], dtype=np.float32)
a_fp2 = np.array(fp2[aid][rid], dtype=np.float32)
a_fp3 = np.array(fp3[aid][rid], dtype=np.float32)
a_fp4 = np.array(fp4[aid][rid], dtype=np.float32)
a_fp5 = np.array(fp5[aid][rid], dtype=np.float32)
a_fp6 = np.array(fp6[aid][rid], dtype=np.float32)
a_fp7 = np.array(fp7[aid][rid], dtype=np.float32)
a_sc = np.array(sc[aid][rid], dtype=np.float32)
a_tp = np.array(tp[aid][rid], dtype=np.float32)
# sort in descending score order
idx = a_sc.argsort()[::-1]
a_fp1 = a_fp1[idx]
a_fp2 = a_fp2[idx]
a_fp3 = a_fp3[idx]
a_fp4 = a_fp4[idx]
a_fp5 = a_fp5[idx]
a_fp6 = a_fp6[idx]
a_fp7 = a_fp7[idx]
a_tp = a_tp[idx]
a_sc = a_sc[idx]
# min(# GT, # not zero)
num_inst = int(min(npos[aid], len(a_sc)))
a_fp1 = a_fp1[:num_inst]
a_fp2 = a_fp2[:num_inst]
a_fp3 = a_fp3[:num_inst]
a_fp4 = a_fp4[:num_inst]
a_fp5 = a_fp5[:num_inst]
a_fp6 = a_fp6[:num_inst]
a_fp7 = a_fp7[:num_inst]
a_tp = a_tp[:num_inst]
a_sc = a_sc[:num_inst]
frac_fp1 = np.sum(a_fp1) / (num_inst - np.sum(a_tp))
frac_fp2 = np.sum(a_fp2) / (num_inst - np.sum(a_tp))
frac_fp3 = np.sum(a_fp3) / (num_inst - np.sum(a_tp))
frac_fp4 = np.sum(a_fp4) / (num_inst - np.sum(a_tp))
frac_fp5 = np.sum(a_fp5) / (num_inst - np.sum(a_tp))
frac_fp6 = np.sum(a_fp6) / (num_inst - np.sum(a_tp))
frac_fp7 = np.sum(a_fp7) / (num_inst - np.sum(a_tp))
tp_[aid, rid] = np.sum(a_tp)
rec[aid, rid] = np.sum(a_tp) / float(npos[aid])
prec[aid, rid] = np.sum(a_tp) / np.maximum(np.sum(a_fp1)+np.sum(a_fp2)+np.sum(a_fp3)+np.sum(a_fp4)+np.sum(a_fp5)+np.sum(a_fp6)+np.sum(a_fp7)+np.sum(a_tp), np.finfo(np.float64).eps)
fp_inc[aid, rid] = frac_fp1
fp_bck[aid, rid] = frac_fp2
fp_Hmis[aid, rid] = frac_fp3
fp_Omis[aid, rid] = frac_fp4
fp_dupl[aid, rid] = frac_fp5
fp_misg[aid, rid] = frac_fp6
fp_occl[aid, rid] = frac_fp7
print('--------------------------------------------Reporting Error Analysis (%)-----------------------------------------------')
print('{: >27} {:} {:} {:} {:} {:} {:}'.format(' ', 'inc', 'bck', 'H_mis', 'O_mis', 'mis-gr', 'occl'))
for aid in range(self.num_actions):
if len(self.roles[aid])<2: continue
for rid in range(len(self.roles[aid])-1):
print('{: >23}: {:6.2f} {:4.2f} {:4.2f} {:5.2f} {:5.2f} {:5.2f} (rec:{:5.2f} = #tp:{:4d}/#pos:{:4d}) (prec:{:5.2f} = #tp:{:4d}/#det:{:4d})'.format(self.actions[aid]+'-'+self.roles[aid][rid+1], \
fp_inc[aid, rid]*100.0, \
fp_bck[aid, rid]*100.0, \
fp_Hmis[aid, rid]*100.0, \
fp_Omis[aid, rid]*100.0, \
fp_misg[aid, rid]*100.0, \
fp_occl[aid, rid]*100.0, \
rec[aid, rid]*100.0, \
int(tp_[aid, rid]), \
int(npos[aid]), \
prec[aid, rid]*100.0, \
int(tp_[aid, rid]), \
int(ndet[aid, rid])))
def _load_vcoco(vcoco_file):
print('loading vcoco annotations...')
with open(vcoco_file, 'r') as f:
vsrl_data = json.load(f)
for i in range(len(vsrl_data)):
vsrl_data[i]['role_object_id'] = \
np.array(vsrl_data[i]['role_object_id']).reshape((len(vsrl_data[i]['role_name']), -1)).T
for j in ['ann_id', 'label', 'image_id']:
vsrl_data[i][j] = np.array(vsrl_data[i][j]).reshape((-1, 1))
return vsrl_data
def clip_xyxy_to_image(x1, y1, x2, y2, height, width):
x1 = np.minimum(width - 1., np.maximum(0., x1))
y1 = np.minimum(height - 1., np.maximum(0., y1))
x2 = np.minimum(width - 1., np.maximum(0., x2))
y2 = np.minimum(height - 1., np.maximum(0., y2))
return x1, y1, x2, y2
def get_overlap(boxes, ref_box):
ixmin = np.maximum(boxes[:, 0], ref_box[0])
iymin = np.maximum(boxes[:, 1], ref_box[1])
ixmax = np.minimum(boxes[:, 2], ref_box[2])
iymax = np.minimum(boxes[:, 3], ref_box[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((ref_box[2] - ref_box[0] + 1.) * (ref_box[3] - ref_box[1] + 1.) +
(boxes[:, 2] - boxes[:, 0] + 1.) *
(boxes[:, 3] - boxes[:, 1] + 1.) - inters)
overlaps = inters / uni
return overlaps
def voc_ap(rec, prec):
""" ap = voc_ap(rec, prec)
Compute VOC AP given precision and recall.
[as defined in PASCAL VOC]
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| [
"gaochen315@gmail.com"
] | gaochen315@gmail.com |
bfb5f46ecdf6fdd6cb6130db410bc9e8959f3a84 | 34bf67017440fe47658559f91fe153c153a359f4 | /150.py | e06cd68c244078e41d68a49e8dfabaa9969b2e62 | [] | no_license | KevinWangTHU/LeetCode | 1be5f8f1ab587eea5365abb940785c9fe26f5214 | a7916e0818b0853ec75e24724bde94c49234c7dc | refs/heads/master | 2021-05-04T10:16:26.666260 | 2017-08-09T04:17:12 | 2017-08-09T04:18:49 | 53,427,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for c in tokens:
if c in "+-*/":
b = stack.pop()
a = stack.pop()
if c == "+":
stack.append(a+b)
elif c == "-":
stack.append(a-b)
elif c == "*":
stack.append(a*b)
elif c == "/":
stack.append(int(float(a)/b)) # stupid python
else:
stack.append(int(c))
return stack[-1]
s=Solution()
print s.evalRPN(["1", "-11", "/"])
| [
"KevinWangTHU@gmail.com"
] | KevinWangTHU@gmail.com |
3a38ab9548c6903b0635947c7a3563676b080554 | d8ec574895259179e644281782f96578068cde59 | /df_goods/views.py | 1c721d8f9df6d7be3f90d30bb3935d81eccb105d | [] | no_license | zhenglihui007/djangolianxi | bcdc878854ffa4524ace7f03d8fd9b6e17b0bc7a | 98b7bcf42cbe9d93ff5b4784c6a84707b4caa1f8 | refs/heads/master | 2020-03-29T16:10:29.565775 | 2018-09-24T12:48:45 | 2018-09-24T12:48:45 | 150,101,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,982 | py | from django.shortcuts import render
from .models import *
from django.core.paginator import Paginator
# 主页
def index(request):
"""
index函数负责查询页面中需要展示的商品内容,
主要是每类最新的4种商品和4中点击率最高的商品,
每类商品需要查询2次
"""
count = request.session.get('count')
fruit = GoodsInfo.objects.filter(gtype__id=6).order_by("-id")[:4]
fruit2 = GoodsInfo.objects.filter(gtype__id=6).order_by("-gclick")[:4]
fish = GoodsInfo.objects.filter(gtype__id=7).order_by("-id")[:4]
fish2 = GoodsInfo.objects.filter(gtype__id=7).order_by("-gclick")[:3]
meat = GoodsInfo.objects.filter(gtype__id=8).order_by("-id")[:4]
meat2 = GoodsInfo.objects.filter(gtype__id=8).order_by("-gclick")[:4]
egg = GoodsInfo.objects.filter(gtype__id=9).order_by("-id")[:4]
egg2 = GoodsInfo.objects.filter(gtype__id=9).order_by("-gclick")[:4]
vegetables = GoodsInfo.objects.filter(gtype__id=10).order_by("-id")[:4]
vegetables2 = GoodsInfo.objects.filter(gtype__id=10).order_by("-gclick")[:4]
frozen = GoodsInfo.objects.filter(gtype__id=11).order_by("-id")[:4]
frozen2 = GoodsInfo.objects.filter(gtype__id=11).order_by("-gclick")[:4]
# 构造上下文
context = {'title': '首页', 'fruit': fruit,
'fish': fish, 'meat': meat, 'egg': egg,
'vegetables': vegetables, 'frozen': frozen,
'fruit2': fruit2, 'fish2': fish2, 'meat2': meat2,
'egg2': egg2, 'vegetables2': vegetables2, 'frozen2': frozen2,
'guest_cart': 1, 'page_name': 0, 'count': count}
# 返回渲染模板
return render(request, 'df_goods/index.html', context)
#商品列表
def goodlist(request, typeid, pageid, sort):
"""
goodlist函数负责展示某类商品的信息。
url中的参数依次代表
typeid:商品类型id;selectid:查询条件id,1为根据id查询,2位根据价格查询,3位根据点击量查询
"""
count = request.session.get('count')
# 获取最新发布的商品
newgood = GoodsInfo.objects.all().order_by('-id')[:2]
# 根据条件查询所有商品
if sort == '1': # 按最新 gtype_id
sumGoodList = GoodsInfo.objects.filter(gtype_id=typeid).order_by('-id')
elif sort == '2': # 按价格
sumGoodList = GoodsInfo.objects.filter(gtype_id=typeid).order_by('gprice')
elif sort == '3': # 按点击量
sumGoodList = GoodsInfo.objects.filter(gtype_id=typeid).order_by('-gclick')
# 分页
paginator = Paginator(sumGoodList, 15)
goodList = paginator.page(int(pageid))
pindexlist = paginator.page_range
# 确定商品类型
goodtype = TypeInfo.objects.get(id=typeid)
# 构造上下文 'count': count,
context = {'title': '商品详情', 'list': 1,
'guest_cart': 1, 'goodtype': goodtype,
'newgood': newgood, 'goodList': goodList,
'typeid': typeid, 'sort': sort,
'pindexlist': pindexlist, 'pageid': int(pageid), 'count': count}
# 渲染返回结果
return render(request, 'df_goods/list.html', context)
# 商品详页
def detail(request,id):
goods = GoodsInfo.objects.get(pk=int(id))
goods.gclick=goods.gclick+1
goods.save()
# 查询当前商品的类型 goodsinfo__id 值
# goodtype = TypeInfo.objects.get(goodsinfo__id=id)
goodtype = goods.gtype
# type = TypeInfo()
count = request.session.get('count')
#goods.gtype = typeinfo goods.gtype.goodsinfo_set -> typeinfo.goodsinfo_set
news = goods.gtype.goodsinfo_set.order_by('-id')[0:2]
# print '*' * 10
# print news[0].gtitle
# print goodtype 猪牛羊肉
# print goods.gtype 猪牛羊肉
context={'title':goods.gtype.ttitle,'guest_cart':1,
'g':goods,'newgood':news,'id':id,
'isDetail': True,'list':1,'goodtype': goodtype,'count':count}
response=render(request,'df_goods/detail.html',context)
#使用cookies记录最近浏览的商品id
#获取cookies
goods_ids = request.COOKIES.get('goods_ids', '')
#获取当前点击商品id
goods_id='%d'%goods.id
#判断cookies中商品id是否为空
if goods_ids!='':
#分割出每个商品id
goods_id_list=goods_ids.split(',')
#判断商品是否已经存在于列表
if goods_id_list.count(goods_id)>=1:
#存在则移除
goods_id_list.remove(goods_id)
#在第一位添加
goods_id_list.insert(0,goods_id)
#判断列表数是否超过5个
if len(goods_id_list)>=6:
#超过五个则删除第6个
del goods_id_list[5]
#添加商品id到cookies
goods_ids=','.join(goods_id_list)
else:
#第一次添加,直接追加
goods_ids=goods_id
response.set_cookie('goods_ids',goods_ids)
return response
| [
"695408566@qq.com"
] | 695408566@qq.com |
c0842d41794965ae90e16e12027140df0fc36516 | 4b2450b65f5802f524ddb8701baa0e71c929889b | /tuple.py | 823939b2e562231311ac67244932724f7b3b8ffa | [] | no_license | joedave1/python | 21e89dd0638156a3600bfb7fbf7422c73a79fc51 | ae51152a663aa2e512c5be7f6134c4b35d78e88d | refs/heads/master | 2020-06-29T11:22:05.627400 | 2019-08-16T08:51:14 | 2019-08-16T08:51:14 | 200,520,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | tuplex=input("enter the values ").split(",")
tup=tuple(tuplex)
print(tup)
a=int(input("enter the location "))
print(tup[a])
| [
"noreply@github.com"
] | joedave1.noreply@github.com |
7bebdced4f0b553045fdc19a3922805cfcdb4f5d | 4b3dab00da26675eb3eeedd87707d4dafd440f9c | /recipe_fio_aging_comparator/image.py | 3e7aab0155b14098d95ad288fcc0e3a915b53372 | [] | no_license | spetrovi/thesis | d4b5234b1191124650b4112e9279f647407ecb2a | 062b356efe14d376976a00fdbf9f92862baf394e | refs/heads/master | 2020-12-20T23:47:31.476635 | 2017-05-29T07:45:51 | 2017-05-29T07:45:51 | 40,310,459 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | import free_space_frag as fsf
import glob
from random import randint
def read_file(_file, op):
f = open(_file,op)
s = f.read()
f.close()
return s
def tr(val):
if val.find('i') >= 0:
suf = val[-3:]
val = int(val.split('-')[0][:-3])
else:
suf = val[-1:]
val = int(val.split('-')[0][:-1])
if suf.find('K') >= 0: return val*1000
if suf.find('M') >= 0: return val*1000000
if suf.find('G') >= 0: return val*1000000000
def d_image(fsystem, destination):
ID = 'image_'+str(randint(0,1000))
raw_files = glob.glob('./out/free_space_*.log')
raw_files.sort(key=lambda x: int(x.split('free_space_')[1][:-4]))
#print 'free files: '+str(len(raw_files))
histograms = []
for i, _file in enumerate(raw_files):
if len(raw_files) > 100 and i > 103:
histograms.append(fsf.Free_space_fragmentation(read_file(_file,'r'), fsystem))
if len(raw_files) < 100:
histograms.append(fsf.Free_space_fragmentation(read_file(_file,'r'), fsystem))
bins = []
for hist in histograms:
bins += hist.bins
bins = sorted(list(set(bins)),key=tr)
template = read_file('templates/image.js','r')
if fsystem == 'xfs':
template = str(map(lambda x: x.split('-')[0][:-3]+'-'+x.split('-')[1], bins)).join(template.split('XXX_BINS_XXX'))
else:
template = str(bins).join(template.split('XXX_BINS_XXX'))
template = str(range(0,len(histograms))).join(template.split('XXX_TIMES_XXX'))
template = str(len(histograms)*7).join(template.split('XXX_DEPTH_XXX'))
data = ''
for i, hist in enumerate(histograms):
data += '{\nstack: '+str(i)+',\ndata:['
for j, _bin in enumerate(hist.bins):
col = bins.index(_bin)
val = hist.extents[j]
data +='['+str(col)+','+str(val)+'],'
data += ']},'
template = data.join(template.split('XXX_DATA_XXX'))
template = ID.join(template.split('XXX_NAME_XXX'))
_file = open(destination+ID+'.js','w')
_file.write(template)
_file.close()
return ID
| [
"spetrovi@redhat.com"
] | spetrovi@redhat.com |
38f44da197420d879fc5d5e663f1ba9656c8ff3a | 6f0af0259ebb33bde4c15ee50b28025f74bf8b08 | /lesson5/task3/if_statement.py | 9c4d1e40499e178efa5a038f79b8a61e6304c619 | [] | no_license | passlonis/SUMMER_BOOTCAMP_2018_Python | 05c9125b1b1645a62fad40321a6b06469433cda0 | fe48298efabb16d54e4869cfb7c9b93ee3217fd4 | refs/heads/master | 2021-09-06T14:43:45.273006 | 2018-02-07T17:38:24 | 2018-02-07T17:38:24 | 119,430,976 | 0 | 0 | null | 2018-01-29T19:32:48 | 2018-01-29T19:32:48 | null | UTF-8 | Python | false | false | 874 | py | """
La palabra reservada if se usa para formar una instruccion condicional
que ejecuta algun codigo
especificado despues de verificar si su expresion es True.
Python usa indentacion para definir bloques de codigo.
"""
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
name = "John"
age = 17
if name == "John" or age == 17:
print("name is John")
print("John is 17 years old")
tasks = ['task1', 'task2']
if len(tasks) == 0:
print("empty")
print("------------------------------------------------------")
nombre = "ariel"
edad = 28
if(nombre == "ariel" and edad == 28):
print("hola soy ariel y tengo 28 anios")
lista = [1, 2, 3, 4, 5]
if(not len(lista) == 0):
print("La lista no esta vacia")
lista = []
if(len(lista)==0):
print("La lista esta vacia")
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
| [
"passlonis@hotmail.com"
] | passlonis@hotmail.com |
b284b019485d89e8d8b321e7650a3c146d3e3b5c | a9ce90c1cfec804f7b1af02d3dfd1adc22dc48f0 | /mysite/mysite/settings.py | 74746d25770f0948bf48cb4e1dcb5d6918dfb882 | [] | no_license | tejksat/django-tutorial-app | 27f9bd14744e3b1c8f2b7c6493bcb0373ddd1f24 | 9504eaa48586b8e8df11909d9aa31a86799f547c | refs/heads/master | 2022-12-20T08:15:17.529004 | 2020-09-21T08:55:54 | 2020-09-21T08:55:54 | 297,277,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5i8)9jn3-1youxxxwdt!5uu(o))#ti2f1x52!ew)rwf#suu=(t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'mydatabase',
'USER': 'mydatabaseuser',
'PASSWORD': 'mypassword',
'HOST': 'polls-db',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"Alexander.Koshevoy@jetbrains.com"
] | Alexander.Koshevoy@jetbrains.com |
eea3717e170b2c3269b11a3c59a00d878666df77 | 74d1b81f220f3320b83b252b446b2db0e4a7905a | /menu_manager/settings.py | 5ef25e944bb618fc3bba9018bb88e589b3ebb6cd | [] | no_license | codejasz/emenu | c2d85c707e442a1cc536e68c567d215a1502bd9c | 770e0559758140d10580b2617853265a8099503e | refs/heads/master | 2023-03-24T17:22:18.521256 | 2021-03-13T13:56:00 | 2021-03-13T13:56:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,411 | py | """
Django settings for menu_manager project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@*x)i2(zwwq6&ma&&eg)-nms*5!1ik7%62&--(q*yvsjvj^(+('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'menu_manager_app',
'rest_framework',
'django_filters',
'rest_framework_swagger',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'menu_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'menu_manager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'menu_db',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = "menu_manager_app.User"
REST_FRAMEWORK = {'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'}
| [
"adrian.kodjasz@gmail.com"
] | adrian.kodjasz@gmail.com |
a081bb8cb3ab13b0bfabc885921a8c8896e257f9 | a67c77720e270e70add2f8adaa2fb4b27ea43416 | /segmentation/model_305153142.py | f3aba997883b9efea799ed1884d9a4ebd709ce5d | [] | no_license | petrama/diplomski | 901a7b5f15a0e0f42746fcb727e4251666c1a8e2 | da6d77ad4a415a377ddf126311b640f61e38f297 | refs/heads/master | 2021-06-19T05:23:25.259712 | 2017-07-17T19:51:28 | 2017-07-17T19:51:28 | 97,515,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,636 | py | import tensorflow as tf
import tensorflow.contrib.layers as layers
from model_helper import read_vgg_init
import losses
FLAGS = tf.app.flags.FLAGS
def total_loss_sum(losses):
# Assemble all of the losses for the current tower only.
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
return total_loss
def create_init_op(vgg_layers):
variables = tf.contrib.framework.get_variables()
init_map = {}
for var in variables:
name_split = var.name.split('/')
if len(name_split) != 3:
continue
name = name_split[1] + '/' + name_split[2][:-2]
if name in vgg_layers:
print(var.name, ' --> init from ', name)
init_map[var.name] = vgg_layers[name]
print(var.name,vgg_layers[name].shape)
else:
print(var.name, ' --> random init')
init_op, init_feed = tf.contrib.framework.assign_from_values(init_map)
return init_op, init_feed
def build(inputs, labels, weights,vector_centr,instance_mask, is_training=True):
vgg_layers, vgg_layer_names = read_vgg_init(FLAGS.vgg_init_dir)
weight_decay = 5e-4
bn_params = {
# Decay for the moving averages.
'decay': 0.999,
'center': True,
'scale': True,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# None to force the updates
'updates_collections': None,
'is_training': is_training,
}
with tf.contrib.framework.arg_scope([layers.convolution2d],
kernel_size=3, stride=1, padding='SAME', rate=1, activation_fn=tf.nn.relu,
# normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
# weights_initializer=layers.variance_scaling_initializer(),
normalizer_fn=None, weights_initializer=None,
weights_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.convolution2d(inputs, 64, scope='conv1_1')
net = layers.convolution2d(net, 64, scope='conv1_2')
net = layers.max_pool2d(net, 2, 2, scope='pool1')
net = layers.convolution2d(net, 128, scope='conv2_1')
net = layers.convolution2d(net, 128, scope='conv2_2')
net = layers.max_pool2d(net, 2, 2, scope='pool2')
net = layers.convolution2d(net, 256, scope='conv3_1')
net = layers.convolution2d(net, 256, scope='conv3_2')
net = layers.convolution2d(net, 256, scope='conv3_3')
net = layers.max_pool2d(net, 2, 2, scope='pool3')
net = layers.convolution2d(net, 512, scope='conv4_1')
net = layers.convolution2d(net, 512, scope='conv4_2')
net = layers.convolution2d(net, 512, scope='conv4_3')
paddings = [[0, 0], [0, 0]]
crops = [[0, 0], [0, 0]]
block_size=2
net=tf.space_to_batch(net,paddings=paddings,block_size=block_size)
net = layers.convolution2d(net, 512, scope='conv5_1')
net = layers.convolution2d(net, 512, scope='conv5_2')
net = layers.convolution2d(net, 512, scope='conv5_3')
net=tf.batch_to_space(net,crops=crops,block_size=block_size)
with tf.contrib.framework.arg_scope([layers.convolution2d],stride=1,padding='SAME',
weights_initializer=layers.variance_scaling_initializer(),
activation_fn=tf.nn.relu,normalizer_fn=layers.batch_norm,
normalizer_params=bn_params,
weights_regularizer=layers.l2_regularizer(FLAGS.weight_decay)):
net = layers.convolution2d(net, 512, kernel_size=7, scope='conv6_1',rate=4)
net = layers.convolution2d(net, 512, kernel_size=3, scope='conv6_2',rate=8)
logits = layers.convolution2d(net, FLAGS.num_classes, 1,padding='SAME', activation_fn=None,scope='unary_2',rate=2)
print('logits',logits.get_shape())
xss=layers.convolution2d(net, 2, 3 ,padding='SAME', activation_fn=None,scope='centroid_regression',rate=2)
xss = tf.image.resize_bilinear(xss,[FLAGS.img_height,FLAGS.img_width],name='vector_to_centroid')
logits=tf.image.resize_bilinear(logits,[FLAGS.img_height,FLAGS.img_width],name='resize_score')
loss=get_loss(logits,labels,weights,vector_centr,xss,instance_mask,is_training=is_training)
if is_training:
init_op, init_feed = create_init_op(vgg_layers)
return logits, loss, init_op, init_feed
return logits,loss
def loss_centroids(gt,predicted,instance_mask):
print(gt)
diff=tf.abs(tf.cast(gt,tf.float32)) - predicted
norm=tf.reduce_sum(diff, axis=3)
print(norm)
return tf.reduce_mean(norm*tf.reshape(tf.cast(instance_mask,tf.float32),(FLAGS.batch_size,FLAGS.img_height,FLAGS.img_width)))
def get_loss(logits, labels,weights,vector_centr,xss,instance_mask, is_training):
#xent_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels))
xent_loss=losses.weighted_cross_entropy_loss(logits,labels,weights)
cen_loss=loss_centroids(vector_centr,xss,instance_mask)
total_loss = total_loss_sum([xent_loss*0.8,cen_loss*0.2])
if is_training:
loss_averages_op = losses.add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
| [
"petra.marce@gmail.com"
] | petra.marce@gmail.com |
80338a31f9a035ae8ce994b2933a16956197f272 | c5cad31bfc771d3261d1eead8e31856c19eb5e74 | /publishTool/treeDrag_test3.py | 96c3522a302df8e4e789c41a03f4afe63e02c445 | [] | no_license | alpha0080/mayaTool | 61b649825d85b88de2d24e9260e23f5c5aa559cb | 153da6e987d13fb7311ee55a2a3e635bc8c7afde | refs/heads/master | 2021-01-19T11:52:03.715437 | 2018-05-07T10:30:23 | 2018-05-07T10:30:23 | 88,001,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,834 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/alphaOnly/github/mayaTool/publishTool/treeDrag_test.ui'
#
# Created: Tue Sep 26 10:42:52 2017
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setMouseTracking(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
# self.treeWidget = QtWidgets.QTreeWidget(self.centralwidget)
# self.treeWidget.setGeometry(QtCore.QRect(90, 10, 256, 471))
# self.treeWidget.setDragEnabled(False)
# self.treeWidget.setObjectName("treeWidget")
# item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
# item_1 = QtWidgets.QTreeWidgetItem(item_0)
# item_1 = QtWidgets.QTreeWidgetItem(item_0)
# item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
# item_1 = QtWidgets.QTreeWidgetItem(item_0)
# item_1 = QtWidgets.QTreeWidgetItem(item_0)
# item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
#item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def mousePressEvent(self, event):
self.treeWidget.mousePressEvent(event)
print 'asd'
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow", None, -1))
#self.treeWidget.headerItem().setText(0, QtWidgets.QApplication.translate("MainWindow", "1", None, -1))
# __sortingEnabled = self.treeWidget.isSortingEnabled()
# self.treeWidget.setSortingEnabled(False)
#self.treeWidget.topLevelItem(0).setText(0, QtWidgets.QApplication.translate("MainWindow", "a", None, -1))
#self.treeWidget.topLevelItem(0).child(0).setText(0, QtWidgets.QApplication.translate("MainWindow", "a1", None, -1))
#self.treeWidget.topLevelItem(0).child(1).setText(0, QtWidgets.QApplication.translate("MainWindow", "a2", None, -1))
#self.treeWidget.topLevelItem(1).setText(0, QtWidgets.QApplication.translate("MainWindow", "b", None, -1))
# self.treeWidget.topLevelItem(1).child(0).setText(0, QtWidgets.QApplication.translate("MainWindow", "b1", None, -1))
# self.treeWidget.topLevelItem(1).child(1).setText(0, QtWidgets.QApplication.translate("MainWindow", "b2", None, -1))
#self.treeWidget.topLevelItem(2).setText(0, QtWidgets.QApplication.translate("MainWindow", "c", None, -1))
#self.treeWidget.topLevelItem(3).setText(0, QtWidgets.QApplication.translate("MainWindow", "d", None, -1))
#self.treeWidget.setSortingEnabled(__sortingEnabled)
class MyTreeWidget(QtWidgets.QTreeWidget,mod_MainWindow):
def __init__(self,parent= QtWidgets.QApplication.activeWindow()):
super(MyTreeWidget, self).__init__(parent)
self.setGeometry(QtCore.QRect(90, 10, 256, 471))
self.setDragEnabled(False)
self.setObjectName("treeWidget")
self.setDragDropOverwriteMode(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.setDefaultDropAction(QtCore.Qt.LinkAction)
self.setMouseTracking(True)
#print self
#print self.parent()
# print MyTreeWidget()
#self.centralwidget.setObjectName("centralwidget")
#self.treeWidget = QtWidgets.QTreeWidget(self.centralwidget)
#item_0 = QtWidgets.QTreeWidgetItem(self)
#self.treeWidget = QtWidgets.QTreeWidget(QtWidgets.QWidget(MainWindow))
# print parent.objectName()
print '11111'#,MyTreeWidget.objectName()
# self.itemDoubleClicked.connect(self.addHere)
def test(self):
print 'test'
def mousePressEvent(self, e):
#print self.treeWidget
#self.mousePressEvent(e)
#print self.dragEnterEvent(e)
# print aa
print 'asd'
'''
def delButtonPressed(self):
self.setDragEnabled(True)
print'ggaa',self.currentItem()
for item in self.selectedItems():
if item.deletable:
item.parent.removeChild(item)
MyTreeItemClass.child_list.remove(item)
def addHere(self, clickeditem, column):
global listbox
for x in listbox.selectedItems():
if clickeditem.can_add_here == True:
print('Adding item(s) here', clickeditem.treetext)
itemtext = x.text()
newtreeitem = MyTreeItemClass(itemtext, parent=clickeditem)
clickeditem.setExpanded(False) # need to use false to counter-act the double-clicking action?
else:
print('cannot add to this item')
'''
class mod_MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent= QtWidgets.QApplication.activeWindow()):
super(mod_MainWindow, self).__init__(parent)
print 'self',self,type(self),self.objectName()
print 'Ui_MainWindow',Ui_MainWindow,type(Ui_MainWindow)
print 'parent',parent,type(parent)
print 'mod_MainWindow',mod_MainWindow,type(mod_MainWindow)#,mod_MainWindow.objectName()
self.setupUi(self)
#print self.centralwidget.objectName()
#self.centralwidget.setMouseTracking(True)
# tree = QtWidgets.treeWidget()
#print self.treeWidget
self.treeWidget = MyTreeWidget(self)
# print self.treeWidget
#self.treeWidget.setDragEnabled(True)
# print tree
# self.treeWidget.setDragDropOverwriteMode(True)
# self.treeWidget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# self.treeWidget.setDefaultDropAction(QtCore.Qt.LinkAction)
# self.treeWidget.setMouseTracking(True)
# self.tree = MyTreeWidget(self)
# self.treeWidget.pressed.connect(self.test)
#self.treeWidget.itemCollapsed.connect(self.testB)
def testB(self):
print 'change'
#selectItem = self.treeWidget.currentItem()
#print 'item',selectItem.text(0)
#print 'parent',selectItem.parent().text(0)
#print 'bbb'
def test(self):
item = self.treeWidget.currentItem()
print dir(self.treeWidget.DragSelectingState)
print self.treeWidget
self.dropEvent()
# print 'press'
#selectItem = self.treeWidget.currentItem()
# selectItem = self.treeWidget.currentItem()
# print self.treeWidget.selectedItems(0)
# print 'parent',selectItem.parent().text(0)
# selectItem.pressed.connect(self.testB)
# print self.treeWidget.itemCollapsed
#self.treeWidget.itemEntered.connect(self.testB)
def keyPressEvent(self, e):
print 'press'
if e.key() == QtCore.Qt.Key_Delete:
#print('pressed delete key')
self.delButtonPressed()
def delButtonPressed(self):
# self.treeWidget.setDragEnabled(True)
print'ggaa',self.treeWidget.currentItem().text(0)
# for item in self.selectedItems():
# if item.deletable:
# item.parent.removeChild(item)
# MyTreeItemClass.child_list.remove(item)
#
#print 'press'
#print self.treeWidget
# l2 = QtWidgets.QTreeWidgetItem(["c"])
#print QtWidgets.QTreeWidget.pos(QtWidgets.QWidget(l2))
# print l2.pos()
def dropEvent(self, e):
item=self.itemAt(e.pos())
if item: self.addHere(item)
e.accept()
#print MyTreeView
def dragEnterEvent(self, event):
print 'dfdffdfdfd'
if event.mimeData().hasFormat("application/x-ltreedata"):
event.accept()
else:
event.ignore()
print 'cd'
def main():
global ui
app = QtWidgets.QApplication.instance()
if app == None: app = QtWidgets.QApplication(sys.argv)
try:
ui.close()
ui.deleteLater()
except: pass
ui = mod_MainWindow()
ui.show()
if __name__ == '__main__':
main()
| [
"alpha@mail.chungyo.net"
] | alpha@mail.chungyo.net |
d7cd9767b1cab8dbbc155381145988de64bd8bda | 0fb0ae31119aeed77529a08f41390c46a2384093 | /datasets/data_configs.py | 8e856b310ad8900834eb17b1bff7bf45d58e1410 | [
"MIT"
] | permissive | templeblock/TF_Slim_Framework | 5a8710c717c16a4c4ef493cfe98641ffae5aa189 | 46c9cf8da258b9b9abfd780965548add70c2d268 | refs/heads/master | 2020-05-09T12:48:05.678347 | 2019-04-11T15:37:14 | 2019-04-11T15:37:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | class DatasetConfig():
def __init__(self, file_pattern, split_sizes):
self.file_pattern = file_pattern
self.split_sizes = split_sizes
icdar2013 = DatasetConfig(
file_pattern = '*_%s.tfrecord',
split_sizes = {
'train': 229,
'test': 233
}
)
icdar2015 = DatasetConfig(
file_pattern = 'icdar2015_%s.tfrecord',
split_sizes = {
'train': 1000,
'test': 500
}
)
td500 = DatasetConfig(
file_pattern = '*_%s.tfrecord',
split_sizes = {
'train': 300,
'test': 200
}
)
tr400 = DatasetConfig(
file_pattern = 'tr400_%s.tfrecord',
split_sizes = {
'train': 400
}
)
scut = DatasetConfig(
file_pattern = 'scut_%s.tfrecord',
split_sizes = {
'train': 1715
}
)
synthtext = DatasetConfig(
file_pattern = '*.tfrecord',
split_sizes = {
'train': 858750
}
)
isbi = DatasetConfig(
file_pattern='medicalimage*.tfrecord',
split_sizes={
'train': 5641
}
)
dsb2018 = DatasetConfig(
file_pattern='DSB2018*.tfrecord',
split_sizes={
'train': 670
}
) | [
"546043882@qq.com"
] | 546043882@qq.com |
244cefbda8f2204ebdf6393288d58ecfef864398 | 805db436ed08bc263839f926b2ff065bdbef5d31 | /1-Basics/4-Repetition/02-For-Loop/05-Reverse/bot.py | d825eb68ed2196618f060d6306c5ecd006457a25 | [] | no_license | 4angua71/Com404 | 7e152d16c7f3d0591d5e068d58b463f36cb24c53 | e467cc31c3e14a49ff183c69723d7c42bf6a0445 | refs/heads/master | 2020-07-31T09:14:05.386679 | 2019-12-03T12:17:50 | 2019-12-03T12:17:50 | 210,556,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py |
user_word = str(input("What phrase do you see?"))
print("Reversing...\n")
print("The Phrase is: ",end="")
for position in range(len(user_word)-1, -1, -1):
print(user_word[position],end="")
print() | [
"4angua71@solent.ac.uk"
] | 4angua71@solent.ac.uk |
9c4e33b32a95d44e7d7ca4298866cc4c147abce9 | 90b2dd2feba8c080378d3a8c40cbe4dd43f0e6ee | /bsp/bsp_moj.py | f53e8434995f6e359f20b3e74c033050fb7e631f | [] | no_license | Doireidh/DungeonGen | d05bfbb1f1b2df5aa3462a45c3081f22e9a99cdf | 86b92570a9e8e6545886131380dbb370ff1ef6e2 | refs/heads/master | 2021-01-21T19:51:49.721922 | 2017-06-07T10:39:20 | 2017-06-07T10:39:20 | 92,170,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,688 | py | # -*- coding: UTF-8 -*-
import random
import time
from itertools import chain
class leaf():
MIN_LEAF_SIZE = 80
MAX_DEPTH = 5
def __init__(self, origin, width, height, depth = 1):
self.x, self.y = origin
self.height = height
self.width = width
self.l = None
self.r = None
self.depth = depth
self.room = room((self.x, self.y), self.width, self.height)
self.split_orientation = None #0 for vertical, 1 for horizontal split, None for not split
def __iter__(self):
if self.l:
yield from self.l
yield self
if self.r:
yield from self.r
#yield from self.leaves()
def split(self):
#split the leaf in two, unless the size of the leaf is too small
h_less = self.height <= self.MIN_LEAF_SIZE
w_less = self.width <= self.MIN_LEAF_SIZE
if h_less and w_less:
return
elif w_less:
self.split_h()
elif h_less:
self.split_v()
elif random.choice((0,1)) == 0:
self.split_v()
else:
self.split_h()
return
def split_v(self):
#spliting vertically, on x-axis
if self.depth > self.MAX_DEPTH:
return
movx = int(random.uniform(0.45*self.width, 0.55*self.width))
if self.width-movx < self.MIN_LEAF_SIZE:
return
self.split_orientation = 0
self.l = leaf((self.x, self.y), movx, self.height, self.depth+1)
self.r = leaf((self.x+movx, self.y), self.width-movx, self.height, self.depth+1)
self.l.split()
self.r.split()
#print("uspesno deljenje vertikalno")
def split_h(self):
#splitting horizontally, on y-axis. L is above, R is below.
if self.depth > self.MAX_DEPTH:
return
movy = int(random.uniform(0.45*self.height, 0.55*self.height))
if self.height-movy < self.MIN_LEAF_SIZE:
return
self.split_orientation = 1
self.l = leaf((self.x, self.y), self.width, movy, self.depth+1)
self.r = leaf((self.x, self.y+movy), self.width, self.height-movy, self.depth+1)
self.l.split()
self.r.split()
#print("uspesno deljenje horizontalno")
def packit(self):
#packs the leaf data
return (self.x, self.y, self.width, self.height)
def leaves(self):
#returns the leaf rooms in order
if self.l is None and self.r is None:
return [self.packit()]
elif self.l is None:
return self.r.leaves()
elif self.r is None:
return self.l.leaves()
else:
return self.l.leaves() + self.r.leaves()
class room:
#generates a room inside a leaf
MIN_ROOM_SIZE = 40
def __init__(self, origin, width, height):
self.x, self.y = origin
self.width = width
self.height = height
#move the origin
movx = int(random.triangular(1, 0.4*self.width, 0.05*self.width))
movy = int(random.triangular(1, 0.4*self.height, 0.05*self.height))
self.x += movx
self.y += movy
#reduce size
#movwidth = int(random.uniform(movx, self.width-movx))
#movheight = int(random.uniform(movy, self.height-movy))
movwidth = int(random.triangular(movx, self.width-movx,
movx+0.05*(self.width-movx)))
movheight = int(random.triangular(movy, self.height-movy,
movy+0.05*(self.height-movy)))
self.width -= movwidth
self.height -= movheight
if self.width < self.MIN_ROOM_SIZE:
self.width = self.MIN_ROOM_SIZE
if self.height < self.MIN_ROOM_SIZE:
self.height = self.MIN_ROOM_SIZE
def packit(self):
#packs the room data
return (self.x, self.y, self.width, self.height)
class dungeon:
def __init__(self, width, height, seed=None):
if seed is None:
self.seed = int(round(time.time()))
else:
self.seed = None
random.seed(self.seed)
self.tree = leaf((0,0), width, height)
#print("Uspesna init")
#self.tree.MIN_LEAF_SIZE = width/
self.tree.split()
def __iter__(self):
i = self.tree.__iter__()
return i
| [
"djoletaien@gmail.com"
] | djoletaien@gmail.com |
683aa5e2ae120f865163cdfa0014eb79dcc3b0b3 | a66937154323988a6e05958ca1acc9c3f70af51c | /dia3/3numbers.py | 8e18178c0eab02ede843ebcf882fc767ad7bddcd | [] | no_license | mariadelmarr/Python | f783ec4232e3ca09e1598a2f419169df3890e8ef | 07b76ea80d24dbedc7b3bcb734b234ce88bce6f6 | refs/heads/master | 2022-12-05T14:49:49.598249 | 2020-08-01T03:13:33 | 2020-08-01T03:13:33 | 262,640,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | number1 = int(input('Ingrese el primer numero. '))
number2 = int(input('Ingrese el segundo numero. '))
number3 = int(input('Ingrese el tercer numero. '))
if number1 > number2:
if number1 > number3:
print('El primer numero es mayor')
else:
print('El tercer numero es mayor')
else:
if number2 > number3:
print('El segundo numero es mayor')
else:
print('El tercer numero es mayor')
| [
"maria1.22dm@gmail.com"
] | maria1.22dm@gmail.com |
87e8ff2c23fba05bd5e8ab680a87c994fce96645 | 06fa646a51a30e07203352c2fc60cea64e798d0a | /practice9.py | aab2de86fa956989be143fa3f7d615e08721bbcc | [] | no_license | maciejpajkowski/python-training | e48f60240357199ecc4a69b54a1a4c11d0e1e8a2 | ad95f31857e9035f1b53a5486381ff7b53544645 | refs/heads/master | 2020-03-28T20:05:47.450060 | 2018-10-12T13:32:42 | 2018-10-12T13:32:42 | 149,038,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # https://www.practicepython.org/exercise/2014/04/02/09-guessing-game-one.html
import random
import os
def clear():
os.system('cls' if os.name=='nt' else 'clear')
clear()
print("Welcome to the number guessing game!")
print("------------------------------------")
tries = 0
while True:
if tries is 0:
num = random.randint(1,9)
print("Guess the number!")
userNum = input("Pick a number between 1 and 9 OR type 'exit' to quit: ")
tries += 1
clear()
if userNum == 'exit':
break
if (int(userNum) < num):
print("The number you are looking for is higher.")
elif (int(userNum) > num):
print("The number you are looking for is smaller.")
else:
print("Congratulations! The number is", num, end=". \n")
if (tries == 1):
print("You figured it out on the first try! Well done!")
tries = 0
print("Let's play again! \n")
else:
print("It took you", tries, "tries. \n")
tries = 0
print("Let's play again!")
print("-----------------")
| [
"32037952+maciejpajkowski@users.noreply.github.com"
] | 32037952+maciejpajkowski@users.noreply.github.com |
e011acb2a124eea64a9f2578db61d929b87c7b5f | 8f8498bb6f56b19d45a1989c8113a077348c0a02 | /SWEA/모의문제/벽돌 깨기.py | b7a3377810ef044255053ef97963de47f9b49836 | [] | no_license | gjtjdtn201/practice | a09b437c892b0b601e156c09cb1f053b52fab11b | ea45582b2773616b2b8f350b927559210009d89f | refs/heads/master | 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | import sys
sys.stdin = open('벽돌 깨기.txt', 'r')
from copy import deepcopy
from collections import deque
def chk(matrix, n, cnt):
global ans
if cnt == 0:
ans = 0
return
if n == N:
cnt = 0
for i in matrix:
cnt += W - i.count(0)
ans = min(cnt, ans)
return
for x in range(W):
matrix2 = deepcopy(matrix)
for y in range(H):
if matrix[y][x] != 0:
queue.append((y, x, matrix2[y][x]))
matrix2[y][x] = 0
boom(matrix2)
cnt = fall(matrix2)
chk(matrix2, n+1, cnt)
break
def boom(c_matrix):
while queue:
a, b, pos = queue.popleft()
for j in range(pos):
for i in range(4):
ny = a + dy[i] * j
nx = b + dx[i] * j
if 0 <= ny < H and 0 <= nx < W and c_matrix[ny][nx]:
if c_matrix[ny][nx] > 1:
queue.append((ny, nx, c_matrix[ny][nx]))
c_matrix[ny][nx] = 0
def fall(c_matrix):
cnt = 0
for x in range(W):
stack = []
for y in range(H):
if c_matrix[y][x]:
stack.append(c_matrix[y][x])
cnt += 1
idx = H - 1
while stack:
c_matrix[idx][x] = stack.pop()
idx -= 1
while idx >= 0:
c_matrix[idx][x] = 0
idx -= 1
return cnt
for tc in range(1, int(input())+1):
N, W, H = map(int, input().split())
matrix = []
for i in range(H):
matrix.append(list(map(int, input().split())))
dy = [1, 0, -1, 0]
dx = [0, 1, 0, -1]
ans = 987654321
chk2 = 0
queue = deque()
chk(matrix, 0, ans)
print('#{} {}'.format(tc, ans)) | [
"gjtjdtn201@naver.com"
] | gjtjdtn201@naver.com |
7b40d06cd25c05ce2cb560e0219372a7fef2e0a7 | 945aa2cd771d1f7daf2218ba3acac2f34dd4114a | /util/pwncheck.py | 50dadbbf625d8eccf0a57ca58c70cd7ffe616cab | [
"MIT"
] | permissive | ghsinfosec/pwncheck-flask | 24c32750494e10c5dc03bf71dfa4bfa8b4ed4743 | 158c7128fee408bd97b132e0cf0ff1a50f25f583 | refs/heads/master | 2023-08-22T11:07:10.708896 | 2021-09-29T19:24:29 | 2021-09-29T19:24:29 | 411,006,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import hashlib
import requests
def pwncheck(password):
# get the sha1 hash of submitted pw
sha1 = hashlib.sha1(password.encode('utf-8'))
pw_hash = sha1.hexdigest().upper() # convert to upper case
prefix = pw_hash[0:5] # necessary for the api, ref above
url = f'https://api.pwnedpasswords.com/range/{prefix}'
response = requests.get(url).content.decode('utf-8') # store the response in utf-8 format
# create a dictionary to store the key/value pairs of returned hashes
hash_data = dict(i.split(':') for i in response.split('\r\n'))
# include the prefix to store the full pw hash
hash_data = dict((prefix + key, value) for (key, value) in hash_data.items())
# check for the pw hash in the dictionary to find a match
for k, v in hash_data.items():
# match found, pw is compromised
if k == pw_hash:
status = f'[!!] The password you entered has been seen {v} \
times in data breaches! Do NOT use this password! [!!]'
message = 'compromised'
break
# no match found, pw is good for now
if pw_hash != k:
status = f'The password you entered has not been found \
in any data breaches!'
message = 'safe'
return status, message
| [
"heath.stewart1@gmail.com"
] | heath.stewart1@gmail.com |
9511317e7351349a784222017a0f118e2414330e | 153b9f3fd7994747978e5e03cd49930f5bb694ea | /recipeproject/urls.py | c73d3ffa5a529e4112b986cdc775cd24f1606c04 | [] | no_license | hartnetl/lord-of-the-recipes | 6153bc081bcf97a8e4017d3c81c31bfd28b2ab43 | 38628dcf21968bae6db2923e1d155e9ee4467e73 | refs/heads/main | 2023-09-05T08:31:03.829261 | 2021-11-02T11:53:53 | 2021-11-02T11:53:53 | 415,848,829 | 0 | 2 | null | 2021-10-15T10:45:37 | 2021-10-11T08:53:49 | Python | UTF-8 | Python | false | false | 989 | py | """recipeproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('summernote/', include('django_summernote.urls')),
path('accounts/', include('allauth.urls')),
path('', include('recipeapp.urls'), name='recipeapp_urls'),
]
handler404 = 'recipeapp.views.handle_not_found'
| [
"laura.hartnett@outlook.com"
] | laura.hartnett@outlook.com |
b02c518a46b33bf006790e9b15e644866c155642 | a7ccf8dd608646a6abd0e6b78d35ae23fafb920d | /robot.py | 776a5e6240efabfd5468c60ddce530e5c35c2ca2 | [
"BSD-2-Clause"
] | permissive | QinjieLin-NU/Visual-Push-Grasp | 1b9d0297f0ac67416bbc7a36ddc6b2fcefce80e8 | 93a959fa45b80b407baa72c5c5d68e1fb07a255f | refs/heads/master | 2022-12-21T20:37:24.518248 | 2020-09-21T00:06:59 | 2020-09-21T00:06:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,434 | py | import socket
import select
import struct
import time
import os
import numpy as np
import utils
from simulation import vrep
class Robot(object):
def __init__(self, is_sim, obj_mesh_dir, num_obj, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
is_testing, test_preset_cases, test_preset_file):
self.is_sim = is_sim
self.workspace_limits = workspace_limits
# If in simulation...
if self.is_sim:
# Define colors for object meshes (Tableau palette)
self.color_space = np.asarray([[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167]])/255.0 #pink
# Read files in object mesh directory
self.obj_mesh_dir = obj_mesh_dir
self.num_obj = num_obj
self.mesh_list = os.listdir(self.obj_mesh_dir)
# Randomly choose objects to add to scene
self.obj_mesh_ind = np.random.randint(0, len(self.mesh_list), size=self.num_obj)
self.obj_mesh_color = self.color_space[np.asarray(range(self.num_obj)) % 10, :]
# Make sure to have the server side running in V-REP:
# in a child script of a V-REP scene, add following command
# to be executed just once, at simulation start:
#
# simExtRemoteApiStart(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
# MODIFY remoteApiConnections.txt
# Connect to simulator
vrep.simxFinish(-1) # Just in case, close all opened connections
# self.sim_client = vrep.simxStart('192.168.100.181', 31502, True, True, 5000, 5) # Connect to V-REP on port 19997
# if self.sim_client == -1:
# print('Failed to connect to simulation (V-REP remote API server). Exiting.')
# exit()
# else:
# print('Connected to simulation.')
# self.restart_sim()
self.sim_client = None
vrep_ips = ['192.168.100.161','192.168.100.162','192.168.100.163','192.168.100.164','192.168.100.165','127.0.0.1']
for test_ip in vrep_ips:
print("...connecting to ",test_ip,"...")
self.sim_client = vrep.simxStart(test_ip, 31502, True, True, 5000, 5) # Connect to V-REP on port 19997
if self.sim_client == -1:
continue
else:
break
if self.sim_client == -1:
print('Failed to connect to simulation (V-REP remote API server). Exiting.')
exit()
else:
print('Connected to simulation.')
self.restart_sim()
self.is_testing = is_testing
self.test_preset_cases = test_preset_cases
self.test_preset_file = test_preset_file
# Setup virtual camera in simulation
self.setup_sim_camera()
# If testing, read object meshes and poses from test case file
if self.is_testing and self.test_preset_cases:
file = open(self.test_preset_file, 'r')
file_content = file.readlines()
self.test_obj_mesh_files = []
self.test_obj_mesh_colors = []
self.test_obj_positions = []
self.test_obj_orientations = []
for object_idx in range(self.num_obj):
file_content_curr_object = file_content[object_idx].split()
self.test_obj_mesh_files.append(os.path.join(self.obj_mesh_dir,file_content_curr_object[0]))
self.test_obj_mesh_colors.append([float(file_content_curr_object[1]),float(file_content_curr_object[2]),float(file_content_curr_object[3])])
self.test_obj_positions.append([float(file_content_curr_object[4]),float(file_content_curr_object[5]),float(file_content_curr_object[6])])
self.test_obj_orientations.append([float(file_content_curr_object[7]),float(file_content_curr_object[8]),float(file_content_curr_object[9])])
file.close()
self.obj_mesh_color = np.asarray(self.test_obj_mesh_colors)
# Add objects to simulation environment
self.add_objects()
# If in real-settings...
else:
# Connect to robot client
self.tcp_host_ip = tcp_host_ip
self.tcp_port = tcp_port
# self.tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Connect as real-time client to parse state data
self.rtc_host_ip = rtc_host_ip
self.rtc_port = rtc_port
# Default home joint configuration
# self.home_joint_config = [-np.pi, -np.pi/2, np.pi/2, -np.pi/2, -np.pi/2, 0]
self.home_joint_config = [-(180.0/360.0)*2*np.pi, -(84.2/360.0)*2*np.pi, (112.8/360.0)*2*np.pi, -(119.7/360.0)*2*np.pi, -(90.0/360.0)*2*np.pi, 0.0]
# Default joint speed configuration
self.joint_acc = 8 # Safe: 1.4
self.joint_vel = 3 # Safe: 1.05
# Joint tolerance for blocking calls
self.joint_tolerance = 0.01
# Default tool speed configuration
self.tool_acc = 1.2 # Safe: 0.5
self.tool_vel = 0.25 # Safe: 0.2
# Tool pose tolerance for blocking calls
self.tool_pose_tolerance = [0.002,0.002,0.002,0.01,0.01,0.01]
# Move robot to home pose
self.close_gripper()
self.go_home()
# Fetch RGB-D data from RealSense camera
from real.camera import Camera
self.camera = Camera()
self.cam_intrinsics = self.camera.intrinsics
# Load camera pose (from running calibrate.py), intrinsics and depth scale
self.cam_pose = np.loadtxt('real/camera_pose.txt', delimiter=' ')
self.cam_depth_scale = np.loadtxt('real/camera_depth_scale.txt', delimiter=' ')
def setup_sim_camera(self):
# Get handle to camera
sim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp', vrep.simx_opmode_blocking)
# Get camera pose and intrinsics in simulation
sim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1, vrep.simx_opmode_blocking)
sim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1, vrep.simx_opmode_blocking)
cam_trans = np.eye(4,4)
cam_trans[0:3,3] = np.asarray(cam_position)
cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]
cam_rotm = np.eye(4,4)
cam_rotm[0:3,0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))
self.cam_pose = np.dot(cam_trans, cam_rotm) # Compute rigid transformation representating camera pose
self.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])
self.cam_depth_scale = 1
# Get background image
self.bg_color_img, self.bg_depth_img = self.get_camera_data()
self.bg_depth_img = self.bg_depth_img * self.cam_depth_scale
def add_objects(self):
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
self.object_handles = []
sim_obj_handles = []
for object_idx in range(len(self.obj_mesh_ind)):
curr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])
if self.is_testing and self.test_preset_cases:
curr_mesh_file = self.test_obj_mesh_files[object_idx]
curr_shape_name = 'shape_%02d' % object_idx
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample()]
if self.is_testing and self.test_preset_cases:
object_position = [self.test_obj_positions[object_idx][0], self.test_obj_positions[object_idx][1], self.test_obj_positions[object_idx][2]]
object_orientation = [self.test_obj_orientations[object_idx][0], self.test_obj_orientations[object_idx][1], self.test_obj_orientations[object_idx][2]]
object_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]
ret_resp,ret_ints,ret_floats,ret_strings,ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer',vrep.sim_scripttype_childscript,'importShape',[0,0,255,0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)
if ret_resp == 8:
print('Failed to add new objects to simulation. Please restart.')
exit()
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
if not (self.is_testing and self.test_preset_cases):
time.sleep(2)
self.prev_obj_positions = []
self.obj_positions = []
def restart_sim(self):
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5,0,0.3), vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
while gripper_position[2] > 0.4: # V-REP bug requiring multiple starts and stops to restart
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
def check_sim(self):
# Check if simulation is stable by checking if gripper is within workspace
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
sim_ok = gripper_position[0] > self.workspace_limits[0][0] - 0.1 and gripper_position[0] < self.workspace_limits[0][1] + 0.1 and gripper_position[1] > self.workspace_limits[1][0] - 0.1 and gripper_position[1] < self.workspace_limits[1][1] + 0.1 and gripper_position[2] > self.workspace_limits[2][0] and gripper_position[2] < self.workspace_limits[2][1]
if not sim_ok:
print('Simulation unstable. Restarting environment.')
self.restart_sim()
self.add_objects()
def get_task_score(self):
key_positions = np.asarray([[-0.625, 0.125, 0.0], # red
[-0.625, -0.125, 0.0], # blue
[-0.375, 0.125, 0.0], # green
[-0.375, -0.125, 0.0]]) #yellow
obj_positions = np.asarray(self.get_obj_positions())
obj_positions.shape = (1, obj_positions.shape[0], obj_positions.shape[1])
obj_positions = np.tile(obj_positions, (key_positions.shape[0], 1, 1))
key_positions.shape = (key_positions.shape[0], 1, key_positions.shape[1])
key_positions = np.tile(key_positions, (1 ,obj_positions.shape[1] ,1))
key_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))
key_nn_idx = np.argmin(key_dist, axis=0)
return np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)
def check_goal_reached(self):
goal_reached = self.get_task_score() == self.num_obj
return goal_reached
# def stop_sim(self):
# if self.is_sim:
# # Now send some data to V-REP in a non-blocking fashion:
# # vrep.simxAddStatusbarMessage(sim_client,'Hello V-REP!',vrep.simx_opmode_oneshot)
# # # Start the simulation
# # vrep.simxStartSimulation(sim_client,vrep.simx_opmode_oneshot_wait)
# # # Stop simulation:
# # vrep.simxStopSimulation(sim_client,vrep.simx_opmode_oneshot_wait)
# # Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
# vrep.simxGetPingTime(self.sim_client)
# # Now close the connection to V-REP:
# vrep.simxFinish(self.sim_client)
def get_obj_positions(self):
obj_positions = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)
obj_positions.append(object_position)
return obj_positions
def get_obj_positions_and_orientations(self):
obj_positions = []
obj_orientations = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)
sim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)
obj_positions.append(object_position)
obj_orientations.append(object_orientation)
return obj_positions, obj_orientations
def reposition_objects(self, workspace_limits):
# Move gripper out of the way
self.move_to([-0.1, 0, 0.3], None)
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
# vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (-0.5,0,0.3), vrep.simx_opmode_blocking)
# time.sleep(1)
for object_handle in self.object_handles:
# Drop object at random x,y location and random orientation in robot workspace
drop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + workspace_limits[0][0] + 0.1
drop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample()]
vrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation, vrep.simx_opmode_blocking)
time.sleep(2)
def get_camera_data(self):
if self.is_sim:
# Get color image from simulation
sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, self.cam_handle, 0, vrep.simx_opmode_blocking)
color_img = np.asarray(raw_image)
color_img.shape = (resolution[1], resolution[0], 3)
color_img = color_img.astype(np.float)/255
color_img[color_img < 0] += 1
color_img *= 255
color_img = np.fliplr(color_img)
color_img = color_img.astype(np.uint8)
# Get depth image from simulation
sim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, self.cam_handle, vrep.simx_opmode_blocking)
depth_img = np.asarray(depth_buffer)
depth_img.shape = (resolution[1], resolution[0])
depth_img = np.fliplr(depth_img)
zNear = 0.01
zFar = 10
depth_img = depth_img * (zFar - zNear) + zNear
else:
# Get color and depth image from ROS service
color_img, depth_img = self.camera.get_data()
# color_img = self.camera.color_data.copy()
# depth_img = self.camera.depth_data.copy()
return color_img, depth_img
def parse_tcp_state_data(self, state_data, subpackage):
# Read package header
data_bytes = bytearray()
data_bytes.extend(state_data)
data_length = struct.unpack("!i", data_bytes[0:4])[0];
robot_message_type = data_bytes[4]
assert(robot_message_type == 16)
byte_idx = 5
# Parse sub-packages
subpackage_types = {'joint_data' : 1, 'cartesian_info' : 4, 'force_mode_data' : 7, 'tool_data' : 2}
while byte_idx < data_length:
# package_length = int.from_bytes(data_bytes[byte_idx:(byte_idx+4)], byteorder='big', signed=False)
package_length = struct.unpack("!i", data_bytes[byte_idx:(byte_idx+4)])[0]
byte_idx += 4
package_idx = data_bytes[byte_idx]
if package_idx == subpackage_types[subpackage]:
byte_idx += 1
break
byte_idx += package_length - 4
def parse_joint_data(data_bytes, byte_idx):
actual_joint_positions = [0,0,0,0,0,0]
target_joint_positions = [0,0,0,0,0,0]
for joint_idx in range(6):
actual_joint_positions[joint_idx] = struct.unpack('!d', data_bytes[(byte_idx+0):(byte_idx+8)])[0]
target_joint_positions[joint_idx] = struct.unpack('!d', data_bytes[(byte_idx+8):(byte_idx+16)])[0]
byte_idx += 41
return actual_joint_positions
def parse_cartesian_info(data_bytes, byte_idx):
actual_tool_pose = [0,0,0,0,0,0]
for pose_value_idx in range(6):
actual_tool_pose[pose_value_idx] = struct.unpack('!d', data_bytes[(byte_idx+0):(byte_idx+8)])[0]
byte_idx += 8
return actual_tool_pose
def parse_tool_data(data_bytes, byte_idx):
byte_idx += 2
tool_analog_input2 = struct.unpack('!d', data_bytes[(byte_idx+0):(byte_idx+8)])[0]
return tool_analog_input2
parse_functions = {'joint_data' : parse_joint_data, 'cartesian_info' : parse_cartesian_info, 'tool_data' : parse_tool_data}
return parse_functions[subpackage](data_bytes, byte_idx)
def parse_rtc_state_data(self, state_data):
# Read package header
data_bytes = bytearray()
data_bytes.extend(state_data)
data_length = struct.unpack("!i", data_bytes[0:4])[0];
assert(data_length == 812)
byte_idx = 4 + 8 + 8*48 + 24 + 120
TCP_forces = [0,0,0,0,0,0]
for joint_idx in range(6):
TCP_forces[joint_idx] = struct.unpack('!d', data_bytes[(byte_idx+0):(byte_idx+8)])[0]
byte_idx += 8
return TCP_forces
def close_gripper(self, async=False):
if self.is_sim:
gripper_motor_velocity = -0.5
gripper_motor_force = 100
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
gripper_fully_closed = False
while gripper_joint_position > -0.047: # Block until gripper is fully closed
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
# print(gripper_joint_position)
if new_gripper_joint_position >= gripper_joint_position:
return gripper_fully_closed
gripper_joint_position = new_gripper_joint_position
gripper_fully_closed = True
else:
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "set_digital_out(8,True)\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
if async:
gripper_fully_closed = True
else:
time.sleep(1.5)
gripper_fully_closed = self.check_grasp()
return gripper_fully_closed
def open_gripper(self, async=False):
if self.is_sim:
gripper_motor_velocity = 0.5
gripper_motor_force = 20
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
while gripper_joint_position < 0.0536: # Block until gripper is fully open
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
else:
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "set_digital_out(8,False)\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
if not async:
time.sleep(1.5)
def get_state(self):
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
state_data = self.tcp_socket.recv(2048)
self.tcp_socket.close()
return state_data
def move_to(self, tool_position, tool_orientation):
if self.is_sim:
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.02*move_direction/move_magnitude
num_move_steps = int(np.floor(move_magnitude/0.02))
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1], UR5_target_position[2] + move_step[2]),vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client,self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(tool_position[0],tool_position[1],tool_position[2]),vrep.simx_opmode_blocking)
else:
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "movel(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0)\n" % (tool_position[0],tool_position[1],tool_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.tool_acc,self.tool_vel)
self.tcp_socket.send(str.encode(tcp_command))
# Block until robot reaches target tool position
tcp_state_data = self.tcp_socket.recv(2048)
actual_tool_pose = self.parse_tcp_state_data(tcp_state_data, 'cartesian_info')
while not all([np.abs(actual_tool_pose[j] - tool_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
# [min(np.abs(actual_tool_pose[j] - tool_orientation[j-3]), np.abs(np.abs(actual_tool_pose[j] - tool_orientation[j-3]) - np.pi*2)) < self.tool_pose_tolerance[j] for j in range(3,6)]
# print([np.abs(actual_tool_pose[j] - tool_position[j]) for j in range(3)] + [min(np.abs(actual_tool_pose[j] - tool_orientation[j-3]), np.abs(np.abs(actual_tool_pose[j] - tool_orientation[j-3]) - np.pi*2)) for j in range(3,6)])
tcp_state_data = self.tcp_socket.recv(2048)
prev_actual_tool_pose = np.asarray(actual_tool_pose).copy()
actual_tool_pose = self.parse_tcp_state_data(tcp_state_data, 'cartesian_info')
time.sleep(0.01)
self.tcp_socket.close()
def guarded_move_to(self, tool_position, tool_orientation):
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.rtc_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
self.rtc_socket.connect((self.rtc_host_ip, self.rtc_port))
# Read actual tool position
tcp_state_data = self.tcp_socket.recv(2048)
actual_tool_pose = self.parse_tcp_state_data(tcp_state_data, 'cartesian_info')
execute_success = True
# Increment every cm, check force
self.tool_acc = 0.1 # 1.2 # 0.5
while not all([np.abs(actual_tool_pose[j] - tool_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
# [min(np.abs(actual_tool_pose[j] - tool_orientation[j-3]), np.abs(np.abs(actual_tool_pose[j] - tool_orientation[j-3]) - np.pi*2)) < self.tool_pose_tolerance[j] for j in range(3,6)]
# Compute motion trajectory in 1cm increments
increment = np.asarray([(tool_position[j] - actual_tool_pose[j]) for j in range(3)])
if np.linalg.norm(increment) < 0.01:
increment_position = tool_position
else:
increment = 0.01*increment/np.linalg.norm(increment)
increment_position = np.asarray(actual_tool_pose[0:3]) + increment
# Move to next increment position (blocking call)
tcp_command = "movel(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0)\n" % (increment_position[0],increment_position[1],increment_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.tool_acc,self.tool_vel)
self.tcp_socket.send(str.encode(tcp_command))
time_start = time.time()
tcp_state_data = self.tcp_socket.recv(2048)
actual_tool_pose = self.parse_tcp_state_data(tcp_state_data, 'cartesian_info')
while not all([np.abs(actual_tool_pose[j] - increment_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
# print([np.abs(actual_tool_pose[j] - increment_position[j]) for j in range(3)])
tcp_state_data = self.tcp_socket.recv(2048)
actual_tool_pose = self.parse_tcp_state_data(tcp_state_data, 'cartesian_info')
time_snapshot = time.time()
if time_snapshot - time_start > 1:
break
time.sleep(0.01)
# Reading TCP forces from real-time client connection
rtc_state_data = self.rtc_socket.recv(6496)
TCP_forces = self.parse_rtc_state_data(rtc_state_data)
# If TCP forces in x/y exceed 20 Newtons, stop moving
# print(TCP_forces[0:3])
if np.linalg.norm(np.asarray(TCP_forces[0:2])) > 20 or (time_snapshot - time_start) > 1:
print('Warning: contact detected! Movement halted. TCP forces: [%f, %f, %f]' % (TCP_forces[0], TCP_forces[1], TCP_forces[2]))
execute_success = False
break
time.sleep(0.01)
self.tool_acc = 1.2 # 1.2 # 0.5
self.tcp_socket.close()
self.rtc_socket.close()
return execute_success
def move_joints(self, joint_configuration):
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "movej([%f" % joint_configuration[0]
for joint_idx in range(1,6):
tcp_command = tcp_command + (",%f" % joint_configuration[joint_idx])
tcp_command = tcp_command + "],a=%f,v=%f)\n" % (self.joint_acc, self.joint_vel)
self.tcp_socket.send(str.encode(tcp_command))
# Block until robot reaches home state
state_data = self.tcp_socket.recv(2048)
actual_joint_positions = self.parse_tcp_state_data(state_data, 'joint_data')
while not all([np.abs(actual_joint_positions[j] - joint_configuration[j]) < self.joint_tolerance for j in range(6)]):
state_data = self.tcp_socket.recv(2048)
actual_joint_positions = self.parse_tcp_state_data(state_data, 'joint_data')
time.sleep(0.01)
self.tcp_socket.close()
def go_home(self):
self.move_joints(self.home_joint_config)
# Note: must be preceded by close_gripper()
def check_grasp(self):
state_data = self.get_state()
tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
return tool_analog_input2 > 0.26
# Primitives ----------------------------------------------------------
def grasp(self, position, heightmap_rotation_angle, workspace_limits):
print('Executing: grasp at (%f, %f, %f)' % (position[0], position[1], position[2]))
if self.is_sim:
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2
# Avoid collision with floor
position = np.asarray(position).copy()
position[2] = max(position[2] - 0.04, workspace_limits[2][0] + 0.02)
# Move gripper to location above grasp target
grasp_location_margin = 0.15
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
location_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_grasp_target
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05*move_direction/move_magnitude
num_move_steps = int(np.floor(move_direction[0]/move_step[0]))
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1])/rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0] + move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] + move_step[1]*min(step_iter,num_move_steps), UR5_target_position[2] + move_step[2]*min(step_iter,num_move_steps)),vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + rotation_step*min(step_iter,num_rotation_steps), np.pi/2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(tool_position[0],tool_position[1],tool_position[2]),vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)
# Ensure gripper is open
self.open_gripper()
# Approach grasp target
self.move_to(position, None)
# Close gripper to grasp target
gripper_full_closed = self.close_gripper()
# Move gripper to location above grasp target
self.move_to(location_above_grasp_target, None)
# Check if grasp is successful
gripper_full_closed = self.close_gripper()
grasp_success = not gripper_full_closed
# Move the grasped object elsewhere
if grasp_success:
object_positions = np.asarray(self.get_obj_positions())
object_positions = object_positions[:,2]
grasped_object_ind = np.argmax(object_positions)
grasped_object_handle = self.object_handles[grasped_object_ind]
vrep.simxSetObjectPosition(self.sim_client,grasped_object_handle,-1,(-0.5, 0.5 + 0.05*float(grasped_object_ind), 0.1),vrep.simx_opmode_blocking)
else:
# Compute tool orientation from heightmap rotation angle
grasp_orientation = [1.0,0.0]
if heightmap_rotation_angle > np.pi:
heightmap_rotation_angle = heightmap_rotation_angle - 2*np.pi
tool_rotation_angle = heightmap_rotation_angle/2
tool_orientation = np.asarray([grasp_orientation[0]*np.cos(tool_rotation_angle) - grasp_orientation[1]*np.sin(tool_rotation_angle), grasp_orientation[0]*np.sin(tool_rotation_angle) + grasp_orientation[1]*np.cos(tool_rotation_angle), 0.0])*np.pi
tool_orientation_angle = np.linalg.norm(tool_orientation)
tool_orientation_axis = tool_orientation/tool_orientation_angle
tool_orientation_rotm = utils.angle2rotm(tool_orientation_angle, tool_orientation_axis, point=None)[:3,:3]
# Compute tilted tool orientation during dropping into bin
tilt_rotm = utils.euler2rotm(np.asarray([-np.pi/4,0,0]))
tilted_tool_orientation_rotm = np.dot(tilt_rotm, tool_orientation_rotm)
tilted_tool_orientation_axis_angle = utils.rotm2angle(tilted_tool_orientation_rotm)
tilted_tool_orientation = tilted_tool_orientation_axis_angle[0]*np.asarray(tilted_tool_orientation_axis_angle[1:4])
# Attempt grasp
position = np.asarray(position).copy()
position[2] = max(position[2] - 0.05, workspace_limits[2][0])
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "def process():\n"
tcp_command += " set_digital_out(8,False)\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.09)\n" % (position[0],position[1],position[2]+0.1,tool_orientation[0],tool_orientation[1],0.0,self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (position[0],position[1],position[2],tool_orientation[0],tool_orientation[1],0.0,self.joint_acc*0.1,self.joint_vel*0.1)
tcp_command += " set_digital_out(8,True)\n"
tcp_command += "end\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
# Block until robot reaches target tool position and gripper fingers have stopped moving
state_data = self.get_state()
tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
timeout_t0 = time.time()
while True:
state_data = self.get_state()
new_tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
timeout_t1 = time.time()
if (tool_analog_input2 < 3.7 and (abs(new_tool_analog_input2 - tool_analog_input2) < 0.01) and all([np.abs(actual_tool_pose[j] - position[j]) < self.tool_pose_tolerance[j] for j in range(3)])) or (timeout_t1 - timeout_t0) > 5:
break
tool_analog_input2 = new_tool_analog_input2
# Check if gripper is open (grasp might be successful)
gripper_open = tool_analog_input2 > 0.26
# # Check if grasp is successful
# grasp_success = tool_analog_input2 > 0.26
home_position = [0.49,0.11,0.03]
bin_position = [0.5,-0.45,0.1]
# If gripper is open, drop object in bin and check if grasp is successful
grasp_success = False
if gripper_open:
# Pre-compute blend radius
blend_radius = min(abs(bin_position[1] - position[1])/2 - 0.01, 0.2)
# Attempt placing
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "def process():\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=%f)\n" % (position[0],position[1],bin_position[2],tool_orientation[0],tool_orientation[1],0.0,self.joint_acc,self.joint_vel,blend_radius)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=%f)\n" % (bin_position[0],bin_position[1],bin_position[2],tilted_tool_orientation[0],tilted_tool_orientation[1],tilted_tool_orientation[2],self.joint_acc,self.joint_vel,blend_radius)
tcp_command += " set_digital_out(8,False)\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.0)\n" % (home_position[0],home_position[1],home_position[2],tool_orientation[0],tool_orientation[1],0.0,self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += "end\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
# print(tcp_command) # Debug
# Measure gripper width until robot reaches near bin location
state_data = self.get_state()
measurements = []
while True:
state_data = self.get_state()
tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
measurements.append(tool_analog_input2)
if abs(actual_tool_pose[1] - bin_position[1]) < 0.2 or all([np.abs(actual_tool_pose[j] - home_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
break
# If gripper width did not change before reaching bin location, then object is in grip and grasp is successful
if len(measurements) >= 2:
if abs(measurements[0] - measurements[1]) < 0.1:
grasp_success = True
else:
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "def process():\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.09)\n" % (position[0],position[1],position[2]+0.1,tool_orientation[0],tool_orientation[1],0.0,self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.0)\n" % (home_position[0],home_position[1],home_position[2],tool_orientation[0],tool_orientation[1],0.0,self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += "end\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
# Block until robot reaches home location
state_data = self.get_state()
tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
while True:
state_data = self.get_state()
new_tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
if (abs(new_tool_analog_input2 - tool_analog_input2) < 0.01) and all([np.abs(actual_tool_pose[j] - home_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
break
tool_analog_input2 = new_tool_analog_input2
return grasp_success
def push(self, position, heightmap_rotation_angle, workspace_limits):
print('Executing: push at (%f, %f, %f)' % (position[0], position[1], position[2]))
if self.is_sim:
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2
# Adjust pushing point to be on tip of finger
position[2] = position[2] + 0.026
# Compute pushing direction
push_orientation = [1.0,0.0]
push_direction = np.asarray([push_orientation[0]*np.cos(heightmap_rotation_angle) - push_orientation[1]*np.sin(heightmap_rotation_angle), push_orientation[0]*np.sin(heightmap_rotation_angle) + push_orientation[1]*np.cos(heightmap_rotation_angle)])
# Move gripper to location above pushing point
pushing_point_margin = 0.1
location_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_pushing_point
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05*move_direction/move_magnitude
num_move_steps = int(np.floor(move_direction[0]/move_step[0]))
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1])/rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0] + move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] + move_step[1]*min(step_iter,num_move_steps), UR5_target_position[2] + move_step[2]*min(step_iter,num_move_steps)),vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + rotation_step*min(step_iter,num_rotation_steps), np.pi/2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(tool_position[0],tool_position[1],tool_position[2]),vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)
# Ensure gripper is closed
self.close_gripper()
# Approach pushing point
self.move_to(position, None)
# Compute target location (push to the right)
push_length = 0.1
target_x = min(max(position[0] + push_direction[0]*push_length, workspace_limits[0][0]), workspace_limits[0][1])
target_y = min(max(position[1] + push_direction[1]*push_length, workspace_limits[1][0]), workspace_limits[1][1])
push_length = np.sqrt(np.power(target_x-position[0],2)+np.power(target_y-position[1],2))
# Move in pushing direction towards target location
self.move_to([target_x, target_y, position[2]], None)
# Move gripper to location above grasp target
self.move_to([target_x, target_y, location_above_pushing_point[2]], None)
push_success = True
else:
# Compute tool orientation from heightmap rotation angle
push_orientation = [1.0,0.0]
tool_rotation_angle = heightmap_rotation_angle/2
tool_orientation = np.asarray([push_orientation[0]*np.cos(tool_rotation_angle) - push_orientation[1]*np.sin(tool_rotation_angle), push_orientation[0]*np.sin(tool_rotation_angle) + push_orientation[1]*np.cos(tool_rotation_angle), 0.0])*np.pi
tool_orientation_angle = np.linalg.norm(tool_orientation)
tool_orientation_axis = tool_orientation/tool_orientation_angle
tool_orientation_rotm = utils.angle2rotm(tool_orientation_angle, tool_orientation_axis, point=None)[:3,:3]
# Compute push direction and endpoint (push to right of rotated heightmap)
push_direction = np.asarray([push_orientation[0]*np.cos(heightmap_rotation_angle) - push_orientation[1]*np.sin(heightmap_rotation_angle), push_orientation[0]*np.sin(heightmap_rotation_angle) + push_orientation[1]*np.cos(heightmap_rotation_angle), 0.0])
target_x = min(max(position[0] + push_direction[0]*0.1, workspace_limits[0][0]), workspace_limits[0][1])
target_y = min(max(position[1] + push_direction[1]*0.1, workspace_limits[1][0]), workspace_limits[1][1])
push_endpoint = np.asarray([target_x, target_y, position[2]])
push_direction.shape = (3,1)
# Compute tilted tool orientation during push
tilt_axis = np.dot(utils.euler2rotm(np.asarray([0,0,np.pi/2]))[:3,:3], push_direction)
tilt_rotm = utils.angle2rotm(-np.pi/8, tilt_axis, point=None)[:3,:3]
tilted_tool_orientation_rotm = np.dot(tilt_rotm, tool_orientation_rotm)
tilted_tool_orientation_axis_angle = utils.rotm2angle(tilted_tool_orientation_rotm)
tilted_tool_orientation = tilted_tool_orientation_axis_angle[0]*np.asarray(tilted_tool_orientation_axis_angle[1:4])
# Push only within workspace limits
position = np.asarray(position).copy()
position[0] = min(max(position[0], workspace_limits[0][0]), workspace_limits[0][1])
position[1] = min(max(position[1], workspace_limits[1][0]), workspace_limits[1][1])
position[2] = max(position[2] + 0.005, workspace_limits[2][0] + 0.005) # Add buffer to surface
home_position = [0.49,0.11,0.03]
# Attempt push
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "def process():\n"
tcp_command += " set_digital_out(8,True)\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.09)\n" % (position[0],position[1],position[2]+0.1,tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (position[0],position[1],position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.1,self.joint_vel*0.1)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (push_endpoint[0],push_endpoint[1],push_endpoint[2],tilted_tool_orientation[0],tilted_tool_orientation[1],tilted_tool_orientation[2],self.joint_acc*0.1,self.joint_vel*0.1)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.03)\n" % (position[0],position[1],position[2]+0.1,tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (home_position[0],home_position[1],home_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += "end\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
# Block until robot reaches target tool position and gripper fingers have stopped moving
state_data = self.get_state()
while True:
state_data = self.get_state()
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
if all([np.abs(actual_tool_pose[j] - home_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
break
push_success = True
time.sleep(0.5)
return push_success
def restart_real(self):
# Compute tool orientation from heightmap rotation angle
grasp_orientation = [1.0,0.0]
tool_rotation_angle = -np.pi/4
tool_orientation = np.asarray([grasp_orientation[0]*np.cos(tool_rotation_angle) - grasp_orientation[1]*np.sin(tool_rotation_angle), grasp_orientation[0]*np.sin(tool_rotation_angle) + grasp_orientation[1]*np.cos(tool_rotation_angle), 0.0])*np.pi
tool_orientation_angle = np.linalg.norm(tool_orientation)
tool_orientation_axis = tool_orientation/tool_orientation_angle
tool_orientation_rotm = utils.angle2rotm(tool_orientation_angle, tool_orientation_axis, point=None)[:3,:3]
tilt_rotm = utils.euler2rotm(np.asarray([-np.pi/4,0,0]))
tilted_tool_orientation_rotm = np.dot(tilt_rotm, tool_orientation_rotm)
tilted_tool_orientation_axis_angle = utils.rotm2angle(tilted_tool_orientation_rotm)
tilted_tool_orientation = tilted_tool_orientation_axis_angle[0]*np.asarray(tilted_tool_orientation_axis_angle[1:4])
# Move to box grabbing position
box_grab_position = [0.5,-0.35,-0.12]
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "def process():\n"
tcp_command += " set_digital_out(8,False)\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.09)\n" % (box_grab_position[0],box_grab_position[1],box_grab_position[2]+0.1,tilted_tool_orientation[0],tilted_tool_orientation[1],tilted_tool_orientation[2],self.joint_acc,self.joint_vel)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (box_grab_position[0],box_grab_position[1],box_grab_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc,self.joint_vel)
tcp_command += " set_digital_out(8,True)\n"
tcp_command += "end\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
# Block until robot reaches box grabbing position and gripper fingers have stopped moving
state_data = self.get_state()
tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
while True:
state_data = self.get_state()
new_tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
if tool_analog_input2 < 3.7 and (abs(new_tool_analog_input2 - tool_analog_input2) < 0.01) and all([np.abs(actual_tool_pose[j] - box_grab_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
break
tool_analog_input2 = new_tool_analog_input2
# Move to box release position
box_release_position = [0.5,0.08,-0.12]
home_position = [0.49,0.11,0.03]
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
tcp_command = "def process():\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (box_release_position[0],box_release_position[1],box_release_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.1,self.joint_vel*0.1)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (box_release_position[0],box_release_position[1],box_release_position[2]+0.3,tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.02,self.joint_vel*0.02)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.29)\n" % (box_grab_position[0]-0.05,box_grab_position[1]+0.1,box_grab_position[2]+0.3,tilted_tool_orientation[0],tilted_tool_orientation[1],tilted_tool_orientation[2],self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (box_grab_position[0]-0.05,box_grab_position[1]+0.1,box_grab_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.5,self.joint_vel*0.5)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (box_grab_position[0],box_grab_position[1],box_grab_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.1,self.joint_vel*0.1)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (box_grab_position[0]+0.05,box_grab_position[1],box_grab_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc*0.1,self.joint_vel*0.1)
tcp_command += " set_digital_out(8,False)\n"
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.09)\n" % (box_grab_position[0],box_grab_position[1],box_grab_position[2]+0.1,tilted_tool_orientation[0],tilted_tool_orientation[1],tilted_tool_orientation[2],self.joint_acc,self.joint_vel)
tcp_command += " movej(p[%f,%f,%f,%f,%f,%f],a=%f,v=%f,t=0,r=0.00)\n" % (home_position[0],home_position[1],home_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2],self.joint_acc,self.joint_vel)
tcp_command += "end\n"
self.tcp_socket.send(str.encode(tcp_command))
self.tcp_socket.close()
# Block until robot reaches home position
state_data = self.get_state()
tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
while True:
state_data = self.get_state()
new_tool_analog_input2 = self.parse_tcp_state_data(state_data, 'tool_data')
actual_tool_pose = self.parse_tcp_state_data(state_data, 'cartesian_info')
if tool_analog_input2 > 3.0 and (abs(new_tool_analog_input2 - tool_analog_input2) < 0.01) and all([np.abs(actual_tool_pose[j] - home_position[j]) < self.tool_pose_tolerance[j] for j in range(3)]):
break
tool_analog_input2 = new_tool_analog_input2
# def place(self, position, orientation, workspace_limits):
# print('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))
# # Attempt placing
# position[2] = max(position[2], workspace_limits[2][0])
# self.move_to([position[0], position[1], position[2] + 0.2], orientation)
# self.move_to([position[0], position[1], position[2] + 0.05], orientation)
# self.tool_acc = 1 # 0.05
# self.tool_vel = 0.02 # 0.02
# self.move_to([position[0], position[1], position[2]], orientation)
# self.open_gripper()
# self.tool_acc = 1 # 0.5
# self.tool_vel = 0.2 # 0.2
# self.move_to([position[0], position[1], position[2] + 0.2], orientation)
# self.close_gripper()
# self.go_home()
# def place(self, position, heightmap_rotation_angle, workspace_limits):
# print('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))
# if self.is_sim:
# # Compute tool orientation from heightmap rotation angle
# tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2
# # Avoid collision with floor
# position[2] = max(position[2] + 0.04 + 0.02, workspace_limits[2][0] + 0.02)
# # Move gripper to location above place target
# place_location_margin = 0.1
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
# location_above_place_target = (position[0], position[1], position[2] + place_location_margin)
# self.move_to(location_above_place_target, None)
# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)
# if tool_rotation_angle - gripper_orientation[1] > 0:
# increment = 0.2
# else:
# increment = -0.2
# while abs(tool_rotation_angle - gripper_orientation[1]) >= 0.2:
# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + increment, np.pi/2), vrep.simx_opmode_blocking)
# time.sleep(0.01)
# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)
# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)
# # Approach place target
# self.move_to(position, None)
# # Ensure gripper is open
# self.open_gripper()
# # Move gripper to location above place target
# self.move_to(location_above_place_target, None)
# place_success = True
# return place_success
# JUNK
# command = "movel(p[%f,%f,%f,%f,%f,%f],0.5,0.2,0,0,a=1.2,v=0.25)\n" % (-0.5,-0.2,0.1,2.0171,2.4084,0)
# import socket
# HOST = "192.168.1.100"
# PORT = 30002
# s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# s.connect((HOST,PORT))
# j0 = 0
# j1 = -3.1415/2
# j2 = 3.1415/2
# j3 = -3.1415/2
# j4 = -3.1415/2
# j5 = 0;
# joint_acc = 1.2
# joint_vel = 0.25
# # command = "movej([%f,%f,%f,%f,%f,%f],a=%f,v=%f)\n" % (j0,j1,j2,j3,j4,j5,joint_acc,joint_vel)
# #
# # True closes
# command = "set_digital_out(8,True)\n"
# s.send(str.encode(command))
# data = s.recv(1024)
# s.close()
# print("Received",repr(data))
# print()
# String.Format ("movej([%f,%f,%f,%f,%f, %f], a={6}, v={7})\n", j0, j1, j2, j3, j4, j5, a, v); | [
"qinjielin2018@u.northwestern.edu"
] | qinjielin2018@u.northwestern.edu |
fb3a4f022d9c577cdc0450c21233f475b207d0f5 | 3d238bcd230dcf8ede2b6d93a6bae426d44fc438 | /model.py | 0653f8f7882f1d93b1f97b789b26b6633fa437bf | [] | no_license | hamzaelanssari/XOR-Problem | 6f79ef21fb9ed709dfff801014b0e2e987f7f19d | 08e3f01fbb07d218681c7b7099654161782c0eb1 | refs/heads/main | 2023-03-05T05:01:36.453581 | 2021-02-10T16:08:23 | 2021-02-10T16:08:23 | 337,770,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,760 | py | import numpy as np
from sklearn.metrics import confusion_matrix, f1_score, recall_score, precision_score, accuracy_score
from leaky_relu import leaky_relu
from relu import relu
from sigmoid import sigmoid
from sign import sign
from softmax import softmax
from step import step
from tanh import tanh
''' MultiLayerNN Class '''
class MultiLayerNN:
def __init__(self, epochs, lr, num_input_layers, num_hidden_layers, num_output_layers):
self.losses = []
# Epochs
self.epochs = epochs
# Learning rate
self.lr = lr
# Random weights and bias initialization
self.hidden_weights = np.random.uniform(size=(num_input_layers, num_hidden_layers))
self.hidden_bias = np.random.uniform(size=(1, num_hidden_layers))
self.output_weights = np.random.uniform(size=(num_hidden_layers, num_output_layers))
self.output_bias = np.random.uniform(size=(1, num_output_layers))
# Activation function initialization
self.hidden_function = None
self.output_function = None
def hidden_activation_function(self, hidden_activation_function):
if hidden_activation_function == 'sigmoid':
self.hidden_function = sigmoid
elif hidden_activation_function == 'softmax':
self.hidden_function = softmax
elif hidden_activation_function == 'relu':
self.hidden_function = relu
elif hidden_activation_function == 'tanh':
self.hidden_function = tanh
elif hidden_activation_function == 'leaky_relu':
self.hidden_function = leaky_relu
elif hidden_activation_function == 'sign':
self.hidden_function = sign
elif hidden_activation_function == 'step':
self.hidden_function = step
else:
self.hidden_function = relu
def output_activation_function(self, output_activation_function):
if output_activation_function == 'sigmoid':
self.output_function = sigmoid
elif output_activation_function == 'softmax':
self.output_function = softmax
elif output_activation_function == 'relu':
self.output_function = relu
elif output_activation_function == 'tanh':
self.output_function = tanh
elif output_activation_function == 'leaky_relu':
self.output_function = leaky_relu
elif output_activation_function == 'sign':
self.output_function = sign
elif output_activation_function == 'step':
self.output_function = step
else:
self.output_function = softmax
def activation_function(self, hidden_activation_function, output_activation_function):
self.hidden_activation_function(hidden_activation_function)
self.output_activation_function(output_activation_function)
# Loss Function
def loss(self, yp, y):
return (1 / 2) * np.square(yp - y)
def forward(self, inputs):
# Hidden Layer
hidden_layer_activation = np.dot(inputs, self.hidden_weights) + self.hidden_bias
hidden_layer_output = self.hidden_function.function(hidden_layer_activation)
# Output Layer
output_layer_activation = np.dot(hidden_layer_output, self.output_weights) + self.output_bias
predicted_output = self.output_function.function(output_layer_activation)
return hidden_layer_output, predicted_output
def backward(self, hidden_layer_output, predicted_output):
# Output Layer
error = self.expected_output - predicted_output
d_predicted_output = error * self.output_function.derivative(predicted_output)
# Hidden Layer
error_hidden_layer = d_predicted_output.dot(self.output_weights.T)
d_hidden_layer = error_hidden_layer * self.hidden_function.derivative(hidden_layer_output)
return d_hidden_layer, d_predicted_output
def fit(self, X, y):
np.random.seed(0)
# Input data
self.inputs = X
self.expected_output = y.reshape(len(y), 1)
# Training algorithm
for _ in range(self.epochs):
# Forward Propagation
hidden_layer_output, predicted_output = self.forward(self.inputs)
# Backpropagation
d_hidden_layer, d_predicted_output = self.backward(hidden_layer_output, predicted_output)
# Updating Weights and Biases
self.output_weights += hidden_layer_output.T.dot(d_predicted_output) * self.lr
self.output_bias += np.sum(d_predicted_output, axis=0, keepdims=True) * self.lr
self.hidden_weights += self.inputs.T.dot(d_hidden_layer) * self.lr
self.hidden_bias += np.sum(d_hidden_layer, axis=0, keepdims=True) * self.lr
# Loss
loss_ = self.loss(self.expected_output, predicted_output)[0]
self.losses.append(loss_)
def predict(self, inputs):
predicted_output = self.forward(inputs)[1]
predicted_output = np.squeeze(predicted_output)
return np.where(predicted_output >= 0.5, 1, 0)
def info_of_classification(self):
predicted_output = self.predict(self.inputs)
print("Accuracy : ", self.accuracy(predicted_output, self.expected_output))
print("F1 score : ", self.f1_score(predicted_output, self.expected_output))
print("Recall score : ", self.recall_score(predicted_output, self.expected_output))
print("Precision score: ", self.precision_score(predicted_output, self.expected_output))
print("Confusion Matrix : ", self.confusion_matrix(predicted_output, self.expected_output))
return self.accuracy(predicted_output, self.expected_output), self.f1_score(predicted_output,
self.expected_output), self.recall_score(
predicted_output, self.expected_output), self.precision_score(predicted_output, self.expected_output)
def accuracy(self, predicted_output, outputs):
return accuracy_score(predicted_output, outputs)
def confusion_matrix(self, predicted_output, outputs):
return confusion_matrix(outputs, predicted_output)
def f1_score(self, predicted_output, outputs):
return f1_score(outputs, predicted_output)
def recall_score(self, predicted_output, outputs):
return recall_score(outputs, predicted_output)
def precision_score(self, predicted_output, outputs):
return precision_score(outputs, predicted_output)
def draw_loss(self):
""" plt.plot(self.losses)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()"""
return self.losses
''' End MultiLayerNN Class'''
| [
"elanssarihamza@gmail.com"
] | elanssarihamza@gmail.com |
77b3624d8526e4ef07358cac1da8fe1215ecf7f2 | d6d30580241c1ae081d5744785ee6e71eeb38e7e | /Varnit/urls.py | 99739e8bfb58a719a3f09a3faa410306569f4df0 | [] | no_license | Varun-Sam/Django_rest_basic | 71c3f0329496d9636cb6edb86ec5448dd64e7d50 | 72751fe9c235dde392b7b16a2192fc2c8d6f5716 | refs/heads/main | 2023-03-20T08:10:48.416173 | 2021-03-12T12:55:33 | 2021-03-12T12:55:33 | 347,065,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.contrib import admin
from django.urls import path, include
from posts import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api/posts', views.PostList.as_view()),
path('api/posts/<int:pk>', views.PostRetrieveDestroy.as_view()),
path('api/posts/<int:pk>/vote', views.VoteCreate.as_view()),
path('api-auth/', include('rest_framework.urls')),
] | [
"varunsam9900@gmail.com"
] | varunsam9900@gmail.com |
d1a4b7c8435c09dd195a6f429c1a2e09a0089e14 | 0fde65201e7a853a9c8c599acc2201c50223bed7 | /file_read_write/read_xml.py | cb5441f44f492c5852804e4bab6974749a6169a0 | [] | no_license | coryeleven/learn_python | 4fb1818093427332835cb74f277041902091fa5c | d5d303c1d9dae54cca6a716f285484f7ec3e0c4d | refs/heads/master | 2023-07-12T06:48:24.447528 | 2021-08-24T08:36:57 | 2021-08-24T08:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | import xml.sax
class MenuHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentDate = ""
self.name = ""
self.price = ""
self.description = ""
self.calories = ""
def startElement(self,tag,attributes):
if tag == "breakfast_menu":
print("这是一个早餐的菜单")
year = attributes["year"]
print(f"年份 {year}\n")
def characters(self,content):
if self.CurrentDate == "name":
self.name = content
elif self.CurrentDate == "price":
self.price == content
elif self.CurrentDate == "description":
self.description == content
else:
pass
def endElment(self,tag):
if self.CurrentDate == "name":
print(f"name:{self.name}")
elif self.CurrentDate == "price":
print(f"price:{self.price}")
elif self.CurrentDate == "description":
print(f"description:{self.price}")
self.description = ""
elif self.CurrentDate == "calories":
print(f"calories:{self.calories}")
else:
pass
self.CurrentDate = ""
if __name__ == "__main__":
parser = xml.sax.make_parser()
Handler = MenuHandler()
parser.setContentHandler(Handler)
parser.parse("example.xml") | [
"coryeleven@gmail.com"
] | coryeleven@gmail.com |
0eda24f9c8ef5263c0629852ddd731e0c7123d59 | 4066aa884c79854d31f1b8acab56f2262ca9a936 | /mypython.py | a57a735db5fdafac4f079128bdd3764fc11024da | [] | no_license | atingupta2005/practice_1 | 1be5f0db848aa03d776a7f95f5894c40a32ed365 | 8b4c72665333c3231b1e731c55329150fcc919fd | refs/heads/main | 2022-12-20T11:04:02.437258 | 2020-10-06T04:48:36 | 2020-10-06T04:48:36 | 301,612,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | print("Hello World - v7")
print("It's Me")
print("Hello World - v6")
print("It's Me")
print("My another hello 1") | [
"atingupta2005@gmail.com"
] | atingupta2005@gmail.com |
b2e6ba7b932bbc26515ee5ba1b2b6011c99c011b | a76cd3e9e90cc835e5937d66e5e6ec9a2e64b9c9 | /src/match.py | c915d13b486564fd1c74b4ff869ce704ecc394f1 | [
"MIT"
] | permissive | cgmv123/CS-534 | 0b7e0b3a81ca370cb5169b7fad3470f241be62b0 | 32c13454eb8b7fc7b5c733eae5d4144805671544 | refs/heads/master | 2020-03-29T07:45:05.165180 | 2018-12-13T18:39:50 | 2018-12-13T18:39:50 | 149,677,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | import numpy as np
import cv2
REPROJ_THRESH = 4
RATIO = .75
def get_matches(lkp, rkp, Lfeatures, Rfeatures):
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(Lfeatures, Rfeatures, 2)
matches = []
for m in rawMatches:
if len(m) == 2 and m[0].distance < m[1].distance * RATIO:
matches.append((m[0].trainIdx, m[0].queryIdx))
if len(matches) > 4:
ptsA = np.float32([lkp[i].pt for (_, i) in matches])
ptsB = np.float32([rkp[i].pt for (i, _) in matches])
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, REPROJ_THRESH)
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
| [
"jasonmohoney@gmail.com"
] | jasonmohoney@gmail.com |
267b05aca90dd4a5b14ed14a893605455e9c1816 | dba783d28d233ff9885235663ddcbc97a3bcf209 | /preambles.py | a26352550aec5e7a9e806c67f8af834fd6e78986 | [] | no_license | micaleel/explanations | 7b2e49a4c5032b2d3cd71620267b4d5c9d356e13 | 0d4821e6af217b1b4ce300fb1a984789f522e7fe | refs/heads/master | 2021-06-19T20:10:25.018679 | 2017-07-12T16:27:35 | 2017-07-12T16:27:35 | 76,404,377 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | import os
import logging
from glob import glob
from pprint import pprint
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib as mpl
from IPython.display import display, set_matplotlib_formats
import matplotlib.pyplot as plt
LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s \n\t%(module)s:%(funcName)s:%(lineno)s'
logging.basicConfig(format=LOG_FORMAT)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
# logging.getLogger('elasticsearch').setLevel(logging.DEBUG)
set_matplotlib_formats('pdf', 'png')
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['savefig.bbox'] = "tight"
plt.rcParams['image.cmap'] = 'Set3'
plt.rcParams['image.interpolation'] = "none"
np.set_printoptions(precision=3)
pd.set_option('precision', 3)
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
#plt.rcParams["font.family"] = 'Merriweather Sans, Ubuntu'
mpl.rcParams['xtick.labelsize'] = 'small'
mpl.rcParams['ytick.labelsize'] = 'small'
log = logging.getLogger()
| [
"micaleel@gmail.com"
] | micaleel@gmail.com |
470b4a3e314832b2df1c3c664a216cdd98694203 | 32e85c664d2fd2eae8a21f5bb9f44e328f20ea74 | /ozonesondes_ndacc_ObsPack_plotoutput_v2.py | 124e5ab168a4ee39fddf383bec3916a8b7da993d | [] | no_license | rgryan92/UCL | 87e930dc8e716ea5f688c7baac986e3c8a79626d | 428e87e98827b5ee9c19d075621d0b22402081b4 | refs/heads/main | 2023-04-02T03:24:09.484969 | 2021-04-14T16:32:57 | 2021-04-14T16:32:57 | 323,225,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,437 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 8 11:33:24 2020
@author: rgryan
"""
import pandas as pd
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import glob
## Path to ozonesondes
path = '/Volumes/GoogleDrive/My Drive/Documents/Postdoc/rockets/'
osp = path+'ozonesondes/ndacc_2019_ozonesondes/test/'
osp = path+'ozonesondes/ndacc_2019_ozonesondes/'
outpath = path+'ozonesondes/obsPack_input/'
plotAvgs = True
avgdfs = []
stations = ["dumont d'urville", "South Pole", "Belgrano", "Boulder", "Hilo",
"Santa Cruz", "Natal Brazil" , "Neumayer", "Ny-Aalesund",
"Samoa", "Ittoqqortoormiit", "Wallops Flight Facility"]
def calc_DT(seconds):
return startDT + pd.Timedelta(str(seconds)+' seconds')
def calc_improvement(s,wr,nr):
wr_s, nr_s = wr-s, nr-s
if abs(wr_s) > abs(nr_s): # things got worse!
return -1*abs(wr-nr)
else:
return abs(wr-nr)
def improvement_cat(s,wr,nr):
wr_s, nr_s = wr-s, nr-s
if wr_s and nr_s < 0: # both simulations were less than sonde
return 'blue'
elif wr_s and nr_s > 0: # both simulations were greater than sonde
return 'red'
else:
return 'orange'
## Using glob, this reads all .csv sonde files in the folder
files = glob.glob(osp+'*.*')
for osf in files[:]:
print(osf[len(osp):])
## Use 'Readlines' to find the header of the file and location lat/lon
f = open(osf, "r")
c,flag,footer,l =0, 0, 0, [] # Counters for finding information in file
for line in f:
l.append(line)
c = c+1
try:
if list(filter(None, line.split(' ')))[0] == 's':
header=c-1
flag=1
elif list(filter(None, line.split(' ')))[0] == 'hPa':
header=c-1
flag=1
except IndexError:
continue
if line[:-1] in stations:
#print( ' Station is '+line[:-1] )
station, iGeo = line[:-1], c
if flag>0:
columns = list(filter(None, l[header-1].split(' ')))
if columns[-1] == '\n':
columns = columns[:-1]
else:
columns = columns
#print(columns)
top = list(filter(None, l[0].split(' ')))
if top[-1] == '\n':
top = top[:-1]
else:
top = top
startDT = pd.to_datetime( top[-3:-2][0] + ' '+ top[-2:-1][0][:8],dayfirst=True)
endDT = pd.to_datetime(top[-2:-1][0][8:]+' '+top[-1:][0][:8] ,dayfirst=True)
gf = pd.read_csv(osf, delim_whitespace=True, header=header)
gf.columns = columns
df = pd.DataFrame()
if 'ElapTime' in columns:
df['Duration'] = gf['ElapTime']
else:
df['Duration'] = gf['Time']
if 'Lat' and 'Lon' in columns:
df['lat'], df['lon'] = gf['Lat'], gf['Lon']
elif 'GPSLon' and 'GPSLat' in columns:
df['lat'], df['lon'] = gf['GPSLat'], gf['GPSLon']
else:
geo = list(filter(None, l[iGeo].split(' ')))
df['lat'], df['lon'] = float(geo[4]), float(geo[3])
if 'Press' in columns:
df['Pressure'] = gf['Press']
else:
print('different pressure label')
if 'PO3' in columns:
df['O3_sonde'] = 1e9*((gf['PO3']/1000)/(df['Pressure']*100))
else:
print('different o3 label')
df['Date_Time'] = list(map(calc_DT, df['Duration']))
df.set_index(pd.DatetimeIndex(df['Date_Time'], dayfirst=True), inplace=True)
mth = str(df.index.month[0])
#print('<><><><><> '+date+', '+station+', '+country+' <><><><><>')
try:
gcr1, gcr2, datestr = pd.DataFrame(), pd.DataFrame(), str(startDT)[:10].split('-')
dateplus1 = str(startDT + pd.Timedelta('1 day'))[:10]
nr = Dataset(path+'ozonesondes/obsPack_output/no_rockets/GEOSChem.ObsPack.'+datestr[0]+datestr[1]+datestr[2]+'_0000z.nc4')
wr = Dataset(path+'ozonesondes/obsPack_output/with_rockets/GEOSChem.ObsPack.'+datestr[0]+datestr[1]+datestr[2]+'_0000z.nc4')
gcr1['P_gchem'] = wr.variables['pressure'][:]
gcr1['O3_gcnr'] = nr.variables['O3'][:]*1e9
gcr1['O3_gcwr'] = wr.variables['O3'][:]*1e9
gcr1['lat'], gcr1['lon'] = wr.variables['lat'][:], wr.variables['lon'][:]
gcr1 = gcr1.astype('float')
gcr1['strLat'] = [ str(round(gcr1['lat'][i],1)) for i in range(len(gcr1)) ]
gcr1 = gcr1[gcr1['strLat'] == str(round(float(gcr1['lat'][0]),1)) ]
gcr1 = gcr1[gcr1['P_gchem']>0]
df.set_index(df['Pressure'], inplace=True)
df = df[~df.index.duplicated(keep='first')]
gcr1.set_index(gcr1['P_gchem'], inplace=True)
gcr1 = gcr1[~gcr1.index.duplicated(keep='first')]
ni = df['Pressure'].values.tolist() + gcr1['P_gchem'].values.tolist()
ni = list( dict.fromkeys(ni) )
df_ = df.reindex(ni).sort_index().interpolate()
#df_ = df_.reindex(gcr1['P_gchem'].values.tolist())
gcr1['sonde'] = df_['O3_sonde']
gcr1['wrDiff'] = gcr1['O3_gcwr'] - gcr1['sonde']
gcr1['nrDiff'] = gcr1['O3_gcnr'] - gcr1['sonde']
gcr1['imp'] = list(map(calc_improvement, gcr1['sonde'],gcr1['O3_gcwr'],gcr1['O3_gcnr']))
gcr1['cat'] = list(map(improvement_cat, gcr1['sonde'],gcr1['O3_gcwr'],gcr1['O3_gcnr']))
gcr1['zero'] = 0
imp, son = gcr1['imp'], gcr1['sonde']
gcw, gcn = gcr1['O3_gcwr'], gcr1['O3_gcnr']
if plotAvgs == False:
fig = plt.figure(figsize=(4,3))
h=fig.add_subplot(111)
#h.plot(gcr1['sonde'].values, gcr1['P_gchem'].values, color='orange', zorder=0)
#h.plot(gcr1['O3_gcnr'].values, gcr1['P_gchem'].values, '--', color='darkred', zorder=1)
#h.plot(gcr1['O3_gcwr'].values, gcr1['P_gchem'].values, color='dodgerblue', zorder=2)
#h.scatter(gcr1['wrDiff'].values, gcr1['P_gchem'].values, c='blue',marker='o')
#h.scatter(gcr1['nrDiff'].values, gcr1['P_gchem'].values, c='red', marker='x')
h.scatter(gcr1['imp'].values, gcr1['P_gchem'].values, color=gcr1['cat'].values, marker='x')
h.plot(gcr1['zero'].values, gcr1['P_gchem'].values, '--', color='black' )
try:
wrT =Dataset(path+'ozonesondes/obsPack_output/with_rockets/GEOSChem.ObsPack.'+datestrplus1+'_0000z.nc4')
nrT = Dataset(path+'ozonesondes/obsPack_output/no_rockets/GEOSChem.ObsPack.'+datestrplus1+'_0000z.nc4')
gcr2['P_gchem'] = wrT.variables['pressure'][:]
gcr2['O3_gcnr'] = nrT.variables['O3'][:]*1e9
gcr2['O3_gcwr'] = wrT.variables['O3'][:]*1e9
gcr2['lat'], gcr2['lon'] = wrT.variables['lat'][:], wrT.variables['lon'][:]
gcr2 = gcr2.astype('float')
gcr2['strLat'] = [ str(round(gcr2['lat'][i],1)) for i in range(len(gcr2)) ]
gcr2 = gcr2[gcr2['strLat'] == str(round(float(lat),1)) ]
gcr2 = gcr2[gcr2['P_gchem']>0]
gcr2.set_index(gcr2['P_gchem'], inplace=True)
gcr2 = gcr2[~gcr2.index.duplicated(keep='first')]
ni = df['Pressure'].values.tolist() + gcr2['P_gchem'].values.tolist()
ni = list( dict.fromkeys(ni) )
df_ = df.reindex(ni).sort_index().interpolate()
#df_ = df_.reindex(gcr2['P_gchem'].values.tolist())
gcr2['sonde'] = df_['O3_sonde']
gcr2['wrDiff'] = gcr2['O3_gcwr'] - gcr2['sonde']
gcr2['nrDiff'] = gcr2['O3_gcnr'] - gcr2['sonde']
gcr2['imp'] = list(map(calc_improvement, gcr2['sonde'],gcr2['O3_gcwr'],gcr2['O3_gcnr']))
gcr2['cat'] = list(map(improvement_cat, gcr2['sonde'],gcr2['O3_gcwr'],gcr2['O3_gcnr']))
gcr2['zero'] = 0
imp, son = pd.concat([gcr1['imp'], gcr2['imp']]), pd.concat([gcr1['sonde'], gcr2['sonde']])
gcw, gcn = pd.concat([gcr1['O3_gcwr'], gcr2['O3_gcwr']]), pd.concat([gcr1['O3_gcnr'], gcr2['O3_gcnr']])
if plotAvgs == False:
#h.plot(gcr2['sonde'].values, gcr2['P_gchem'].values, color='orange', zorder=0)
#h.plot(gcr2['O3_gcwr'].values, gcr2['P_gchem'].values, color='dodgerblue', zorder=2)
#h.plot(gcr2['O3_gcnr'].values, gcr2['P_gchem'].values, '--', color='darkred', zorder=2)
#h.scatter(gcr2['wrDiff'].values, gcr2['P_gchem'].values, c='blue', marker='o')
#h.scatter(gcr2['nrDiff'].values, gcr2['P_gchem'].values, c='red', marker='x')
h.scatter(gcr2['imp'].values, gcr2['P_gchem'].values, color=gcr2['cat'].values, marker='x')
h.plot(gcr2['zero'].values, gcr2['P_gchem'].values, '--', color='black')
except FileNotFoundError:
print(' No tomorrow data ')
if plotAvgs == False:
h.set_ylim(5,500)
h.set_yscale('log')
h.set_ylabel('Pressure (hPa)')
h.set_xlabel('O$_3$ diff (ppb)')
plt.gca().invert_yaxis()
#h.legend(['Rockets-Sonde', 'No Rockets-Sonde'], loc='upper right')
h.set_title(station+', '+country+' '+date)
fig.savefig(path+'/ozonesondes/plots/imp_'+station+'_'+country+'_'+date+'.png', bbox_inches='tight')
if plotAvgs==True:
# get space-free station code:
sc = station.replace(' ','')
sc = sc.replace( ")","" )
sc = sc.replace( "(","" )
try:
exec(sc+'_imp_'+mth+'['+datestr+'] = imp')
exec(sc+'_son_'+mth+'['+datestr+'] = son')
exec(sc+'_gcw_'+mth+'['+datestr+'] = gcw')
exec(sc+'_gcn_'+mth+'['+datestr+'] = gcn')
except NameError:
exec(sc+'_imp_'+mth+'=pd.DataFrame()')
exec(sc+'_son_'+mth+'=pd.DataFrame()')
exec(sc+'_gcw_'+mth+'=pd.DataFrame()')
exec(sc+'_gcn_'+mth+'=pd.DataFrame()')
avgdfs_imp.append(sc+'_imp_'+mth)
avgdfs_son.append(sc+'_son_'+mth)
avgdfs_gcn.append(sc+'_gcn_'+mth)
avgdfs_gcw.append(sc+'_gcw_'+mth)
exec(sc+'_imp_'+mth+'['+datestr+'] = imp')
exec(sc+'_son_'+mth+'['+datestr+'] = son')
exec(sc+'_gcn_'+mth+'['+datestr+'] = gcn')
exec(sc+'_gcw_'+mth+'['+datestr+'] = gcw')
except (KeyError, TypeError, ValueError) as error:
print(' -------X---> Data incomplete/cannot be processed at '+station+', '+country)
#%%
if plotAvgs == True:
if len(avgdfs_imp)>0:
for d in range(len(avgdfs_imp)):
exec('n=len('+avgdfs_imp[d]+'.columns)')
exec(avgdfs_imp[d]+'["mean"] = '+avgdfs_imp[d]+'.mean(axis=1)')
exec(avgdfs_imp[d]+'["std"] = '+avgdfs_imp[d]+'.std(axis=1)')
exec(avgdfs_son[d]+'["mean"] = '+avgdfs_son[d]+'.mean(axis=1)')
exec(avgdfs_son[d]+'["std"] = '+avgdfs_son[d]+'.std(axis=1)')
exec(avgdfs_gcn[d]+'["mean"] = '+avgdfs_gcn[d]+'.mean(axis=1)')
exec(avgdfs_gcn[d]+'["std"] = '+avgdfs_gcn[d]+'.std(axis=1)')
exec(avgdfs_gcw[d]+'["mean"] = '+avgdfs_gcw[d]+'.mean(axis=1)')
exec(avgdfs_gcw[d]+'["std"] = '+avgdfs_gcw[d]+'.std(axis=1)')
#fjg = plt.figure(figsize=(4,3))
fig, j = plt.subplots(1, 2, sharey=True, figsize=(6,4))
exec('j[0].errorbar('+avgdfs_son[d]+'["mean"].values, '+avgdfs_son[d]+'.index.values, xerr='+avgdfs_son[d]+'["std"].values ,\
fmt = "o--", color="orange", mfc="yellow")')
exec('j[0].errorbar('+avgdfs_gcn[d]+'["mean"].values, '+avgdfs_gcn[d]+'.index.values, xerr='+avgdfs_gcn[d]+'["std"].values ,\
fmt = "s-", color="red", mfc="white")')
exec('j[0].errorbar('+avgdfs_gcw[d]+'["mean"].values, '+avgdfs_gcw[d]+'.index.values, xerr='+avgdfs_gcw[d]+'["std"].values ,\
fmt = "d-", color="blue", mfc="cyan")')
j[0].set_ylim(5,500)
j[0].set_yscale('log')
j[0].set_xlabel('O$_3$ profile (ppb)')
#plt.gca().invert_yaxis()
j[0].set_title(avgdfs_son[d]+', n='+str(n))
j[0].legend(['Sonde', 'GC_NR', 'GC_WR'], loc='lower right')
exec('j[1].errorbar('+avgdfs_imp[d]+'["mean"].values, '+avgdfs_imp[d]+'.index.values, xerr='+avgdfs_imp[d]+'["std"].values ,\
fmt = "o", color="teal", mfc="white")')
j[1].set_ylim(5,500)
j[1].set_yscale('log')
j[1].set_ylabel('Pressure (hPa)')
j[1].set_xlabel('O$_3$ improvement (ppb)')
plt.gca().invert_yaxis()
j[1].set_title(avgdfs_imp[d]+', n='+str(n))
fig.savefig(path+'/ozonesondes/plots/imp_Avgs_andProfiles_'+avgdfs_imp[d]+'.png', bbox_inches='tight') | [
"noreply@github.com"
] | rgryan92.noreply@github.com |
5613ca4e8a5bcecfff6560fcc92ef64b58fdb49d | 5d97fcb1fffff6837e943c3b50353090531158b5 | /ipToSenseHATBinary.py | 9cc0e3079b37806de5b08b5f635bdfc6e4706df8 | [] | no_license | FrogletApps/Pi_IP_To_Sense_HAT | be818f5adec5f1fa92ee97f538b17de6088504a8 | 68ee9855345bf769359c9b639b98cd8bd9ffa1a0 | refs/heads/master | 2021-10-21T11:32:13.203520 | 2021-10-19T17:45:17 | 2021-10-19T17:45:17 | 121,801,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import socket
import time
from sense_hat import SenseHat
sense = SenseHat()
ipArray = []
ipBinary = []
# Create the binary digits, dot and blank
#Power of the LED (between 0 and 255)
P = 255
# G = Green, R = Red, B = Blue, N = off
G = [0,P,0]
R = [P,0,0]
B = [0,0,P]
N = [0,0,0]
#Digits are green
digits = [
[N,N,N,N], #0
[N,N,N,G], #1
[N,N,G,N], #2
[N,N,G,G], #3
[N,G,N,N], #4
[N,G,N,G], #5
[N,G,G,N], #6
[N,G,G,G], #7
[G,N,N,N], #8
[G,N,N,G] #9
]
#Dot is blue
dot = [B,B,B,B]
#Blank is red
blank = [R,R,R,R]
#Get the IP address (by making connection to google)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("www.google.co.uk",80))
#Put IP into array
ipArray = list(s.getsockname()[0])
#Close connection
s.close()
#find the right binary digit to match the decimal
for digit in ipArray:
if digit.isdigit():
ipBinary += digits[int(digit)]
elif digit == ".":
ipBinary = ipBinary + dot
#if the IP isn't long enough to fill the display then add red squares
while len(ipBinary)<64:
ipBinary = ipBinary + blank
sense.set_pixels(ipBinary)
#Display this on the sense hat display for 30 seconds
time.sleep(30)
sense.clear()
| [
"james_pearson@outlook.com"
] | james_pearson@outlook.com |
a4ce18bccfb3dd46c680d0f8683373cba42c5757 | 7f8b1ddd8894592f514ddb7b818880cb27bf5697 | /range.py | c7d364d6b4572833c2d18872de8bb8699836b437 | [] | no_license | code-with-Aravind/Python | f4642ae3ce6ab735562ee7cdcf5b19670e0d7085 | dbdbdb92e4c5083779d1fffd3ec6a61e9f945882 | refs/heads/main | 2023-07-31T23:17:39.234882 | 2021-09-07T04:06:57 | 2021-09-07T04:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | '#'"This program is used to check the no is there in range(0-200)"
x: int = int(input("Enter a number from 0-2000 : "))
if x > 0 or x >= 2000:
print(bool(x))
else:
print(bool())
| [
"noreply@github.com"
] | code-with-Aravind.noreply@github.com |
cb6868cba60e06c785107f454989117d7090dced | 3ee1ba0ffdd8f478c261233c1e38783aec1faf4e | /data.py | 292ac1940780d927584a6cccc0074c5f6c1983d4 | [] | no_license | Wispik/salesfinder_admin | a65b666010c32325319800fc343e1e8f0e5f2819 | 3250f204b16050bc9279520222f6e58d492677a7 | refs/heads/master | 2023-07-23T07:09:13.317680 | 2021-09-03T09:16:09 | 2021-09-03T09:16:09 | 402,712,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,495 | py | catalog_items = [
{
'c_name': 'Женщинам',
'c_id': 1,
'c_has_product':0
},
{
'c_name': 'Бытовая техника',
'c_id': 2,
'c_has_product':0
},
{
'c_name': 'Для ремонта',
'c_id': 3,
'c_has_product':0
},
{
'c_name': 'Мужчинам',
'c_id': 4,
'c_has_product':0
},
{
'c_name': 'Спорт',
'c_id': 5,
'c_has_product':0
},
{
'c_name': 'Дом',
'c_id': 6,
'c_has_product':0
},
{
'c_name': 'Детям',
'c_id': 7,
'c_has_product':0
},
{
'c_name': 'Красота',
'c_id': 8,
'c_has_product':0
},
{
'c_name': 'Автотовары',
'c_id': 9,
'c_has_product':0
},
{
'c_name': 'Детская электроника',
'c_id': 10,
'c_has_product':0
},
{
'c_name': 'Игрушки',
'c_id': 11,
'c_has_product':0
},
{
'c_name': 'Ювелирные изделия',
'c_id': 12,
'c_has_product':0
},
{
'c_name': 'Обувь',
'c_id': 13,
'c_has_product':0
},
{
'c_name': 'Продукты',
'c_id': 14,
'c_has_product':0
},
{
'c_name': 'Товары для взрослых',
'c_id': 15,
'c_has_product':0
},
{
'c_name': 'Аксессуары',
'c_id': 16,
'c_has_product':0
},
{
'c_name': 'Зоотовары',
'c_id': 17,
'c_has_product':0
},
{
'c_name': 'Канцтовары',
'c_id': 18,
'c_has_product':0
},
{
'c_name': 'Электроника',
'c_id': 19,
'c_has_product':0
},
{
'c_name': 'Здоровье',
'c_id': 20,
'c_has_product':0
},
{
'c_name': 'Premium',
'c_id': 21,
'c_has_product':0
}
]
catalog_items_1 = [
{
'c_name': 'Одежда',
'c_id': 100,
'c_has_product':10
},
{
'c_name': 'Большие размеры',
'c_id': 101,
'c_has_product':10
},
]
catalog_items_100 = [
{
'c_name': 'Блузки и рубашки',
'c_id': 1000,
'c_has_product':10
},
{
'c_name': 'Блузка',
'c_id': 1001,
'c_has_product':10
},
{
'c_name': 'Блузка-боди',
'c_id': 1002,
'c_has_product':10
},
{
'c_name': 'Рубашка',
'c_id': 1003,
'c_has_product':10
},
{
'c_name': 'Брюки',
'c_id': 1004,
'c_has_product':10
},
{
'c_name': 'Бриджи',
'c_id': 1005,
'c_has_product':10
},
] | [
"wispik71@yandex.ru"
] | wispik71@yandex.ru |
c0877e9a0ede9a94b1a0f1346969e05be4a567a3 | 20ec676e682da3155ba76d6c45b4109f2e7044bb | /Lab7/lab7assignmentEvenstad.py | e56bafdc35cd1097c88d460ddcd45bc7ce4fedd6 | [] | no_license | ryane22/CS160 | 159e50ff9751a2f74796cbb85d8a88bed07b1106 | 785a28f3c1c09aeaa09a00ef9df2f2fda9feae86 | refs/heads/master | 2016-09-08T01:55:57.694357 | 2014-02-04T03:04:33 | 2014-02-04T03:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,863 | py | #need things in (here)
def menuChoice():
choice = raw_input("Enter your choice: ")
if choice == "s" or choice == "o" or choice == "n" or choice == "a":
return choice
def confirm():
areYouSure = raw_input("Are you sure you want to choose", choice + "(yes or no):")
areYouSure = areYouSure.strip()
if areYouSure == "yes":
return True
elif areYouSure == "no":
return False
def isAFactor():
number1 = input("Enter a first number: ")
number2 = input("Enter a second number: ")
if number2%number1 == 0:
return True
else:
return False
#look at this more
'''
def endsWithPeriod():
.endsWith(".")
'''
def getPercentage():
number = input("Enter a number: ")
if number >= 0 and number <= 100:
return number
def getIntWithinRange():
minNum = input("Enter the minimum: ")
maxNum = input("Enter the maximum: ")
number = input("Enter a number: ")
if number >= minNum and number <= maxNum:
return number
#need values to return
def letterGrade():
value = input("Enter a number: ")
value = int(value)
if value >= 90:
return
elif value >= 80:
return
elif value >= 70:
return
elif value >= 60:
return
else:
return
#look through this carefully
def main():
print "the menu choice function will execute until the user selects a, \nthen the program will move on to the next function."
while True:
choice = menuChoice("Select an option (a, n, s, or o) ")
if choice == 's' or choice == 'n' or choice == 'o' or choice == 'a':
print "valid menu choice"
else:
print "invalid menu choice - check menuChoice function"
if choice == "a":
break
print
print
print "checking confirm function - the function shoud only accept yes or no, otherwise it asks again for confirmation"
choice = confirm("Continue with program (yes/no)? ")
print "You entered ", choice
if not (choice != "yes" or choice != "no"):
print "Invalid option allowed"
print
print
val1 = input ("enter the first value ")
val2 = input ("enter the second value ")
if isAFactor (val1, val2):
print "your function indicates that", val1, "is a factor of ", val2
else:
print "your function indicates that", val1, "is not a factor of ", val2
print
print
str1 = "This is a sentance without a period at the end"
str2 = "This is a sentance witha period at the end."
str1 = endWithPeriod (str1)
str2 = endWithPeriod (str2)
print "The next two lines should be identical"
print str1
print str2
print
print
print "the getPercetage function will execute until the user enters a value between 0 and 100.\nThe program will contineu to accept integers until the user enters 0,\nthen it will move on to the next function."
while True:
value = getPercentage("Enter an integer between 0 and 100 ")
if value < 0 or value > 100:
print "getPercentage isn't working - it accepted ", value
if value == 0:
break
print
print
minVal = 40
maxVal = 119
print "the getIntWithinRange function will execute until the user enters a value between the min and max value.\nThe program will contineu to accept integers until the user enters the min value,\nthen it will move on to the next function."
while True:
value = getIntWithinRange("Enter an integer between " + str(minVal) + " and " + str(maxVal), minVal, maxVal)
if value < minVal or value > maxVal:
print "getIntWithinRange isn't working - it accepted ", value
if value == minVal:
break
print
print
print "checking the letter grades for percentage between 50 and 100"
for percentage in xrange (50, 101):
print "%3d %s" % (percentage, letterGrade (percentage))
main()
| [
"ryan.evenstad22@gmail.com"
] | ryan.evenstad22@gmail.com |
c5d36dfa0ef5a51485fc9cdfad31cd0cf1297aa1 | fe2d6311f38922633eba5f0dfb7c3f23b5fa14fc | /blogproject/blog/migrations/0001_initial.py | 9602aa268363bf0302f0b2684e543b016bc64972 | [] | no_license | sakura1357/django-blog-learning | ec4a38a5bc69e184c8d954dcb5293a67e0f3c283 | 6490f021a8fd748165aeb0ee5b53a3369b1f4b97 | refs/heads/master | 2020-12-10T03:25:24.680467 | 2017-08-01T08:36:50 | 2017-08-01T08:36:50 | 95,524,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-21 02:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=70)),
('body', models.TextField()),
('created_time', models.DateTimeField()),
('modified_time', models.DateTimeField()),
('excerpt', models.CharField(blank=True, max_length=200)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tag'),
),
]
| [
"sakura1357@126.com"
] | sakura1357@126.com |
41dffe4b35b7d4e2173df0741091741647a618b5 | 4a30357a12371e20817968ee1e66ffb294fdd41d | /django_admin_rq/models.py | 40517f280240075151408475736849bb677a210b | [
"MIT"
] | permissive | Proper-Job/django-admin-rq | 42ddade8ce61ed7beeb2b100ffc4e8e63244f7a3 | ca7c62ada8f20de9e4635486d6ba5299d5514e8b | refs/heads/master | 2021-01-01T05:43:32.834250 | 2017-11-02T13:15:40 | 2017-11-02T13:15:40 | 56,675,158 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,583 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import six
from django.utils.six import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
STATUS_QUEUED = 'QUEUED'
STATUS_STARTED = 'STARTED'
STATUS_FINISHED = 'FINISHED'
STATUS_FAILED = 'FAILED'
STATUS_CHOICES = (
(STATUS_QUEUED, _('Queued')),
(STATUS_STARTED, _('Started')),
(STATUS_FINISHED, _('Finished')),
(STATUS_FAILED, _('Failed')),
)
def _get_uuid():
return uuid.uuid4().hex
@python_2_unicode_compatible
class JobStatus(models.Model):
"""
A model to save information about an asynchronous job
"""
created_on = models.DateTimeField(auto_now_add=True)
progress = models.PositiveIntegerField(default=0)
job_id = models.CharField(max_length=255, default='')
job_uuid = models.CharField(max_length=255, default=_get_uuid)
status = models.CharField(max_length=128, choices=STATUS_CHOICES, default=STATUS_QUEUED)
result = models.TextField(default='')
failure_reason = models.TextField(default='')
def __str__(self):
return self.job_uuid
def url(self):
return reverse('admin-rq-job-status', kwargs={'job_uuid': self.job_uuid})
def start(self, save=True):
self.status = STATUS_STARTED
if save:
self.save()
def finish(self, save=True):
self.status = STATUS_FINISHED
if save:
self.save()
def fail(self, save=True):
self.status = STATUS_FAILED
if save:
self.save()
def set_job_id(self, job_id, save=True):
self.job_id = job_id
if save:
self.save()
def set_result(self, result, save=True):
if isinstance(result, six.string_types):
self.result = result
if save:
self.save()
else:
raise ValueError('Result must be a string type.')
def set_progress(self, progress, save=True):
self.progress = int(progress)
if save:
self.save()
def is_queued(self):
return self.status == STATUS_QUEUED
def is_started(self):
return self.status == STATUS_STARTED
def is_finished(self):
return self.status == STATUS_FINISHED
def is_failed(self):
return self.status == STATUS_FAILED
class Meta:
ordering = ('-created_on', )
verbose_name = _('Job status')
verbose_name_plural = _('Job statuses')
| [
"moritz.pfeiffer@alp-phone.ch"
] | moritz.pfeiffer@alp-phone.ch |
fb9d79f2cad9fe16907f5b71389cc6c45915d5ba | 32fc5355d73a5098140ca404b139bde7434de947 | /LGTK.py | 745d43ac92b76bda63e4705bfa7149168ec4d7e1 | [] | no_license | Mreza-84/stiran | 1516b20df613e6d15793768190d4321d02e553e1 | b08c5609ce1bd38ef5f08951e50444e6f7db982d | refs/heads/master | 2020-09-25T21:56:41.924997 | 2019-12-05T12:54:28 | 2019-12-05T12:54:28 | 226,097,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | '''
for change byte format to str we can use :
str.encode("str")
'''
from tkinter import *
import dbm
import pickle
import os
def Login ():
#Tk
Sc.destroy()
Ls = Tk()
Ls.resizable(0, 0)
Ls.geometry("500x500")
Ls.title("STIRAN")
Ls.configure(background='black')
Label(Ls,text="Hello!!!\nHere is Login Page!!\nLogin now ! ",fg = "white",bg = "black",
font = "tahoma 16 bold italic")
Label(Ls,text="Name : ",font = "tahoma 16 bold italic",fg = "white"
,bg = "black" ).grid(row=0,column=1)
N = Entry(Ls)
N.grid(row=0,column=2)
Ls.mainloop()
C1 = 0
C = 5
try:
N = input("Username : ")
F = open((str(N)+".txt"),"r+")
P = input("Password : ")
PS = F.readline()
F.close()
if str(pickle.dumps(P)) == PS:
print("Welcome")
else:
while C1 != C :
C1 = C1 + 1
print("Wrong Password")
print("Try again")
P = input("Enter Your Password Again : ")
if P == PS:
break
print("Try again")
main()
except:
print("There is some thing wrong *___*")
print("You don't have any account")
print("Create a new one")
register()
def register():
if os.path.exists(str(N)+'.txt') == False :
P = input("Password : ")
PS = input("Confirm Your Password : ")
if PS == P :
F = open((str(N)+".txt"),"w+")
F.write(str(pickle.dumps(P)))
F.close()
print("Register Successfull")
print("Now login")
Login()
else:
print("Try again")
register()
else:
print("File has alredy exists\nTry again")
main()
def main ():
global Sc
#TK
Sc = Tk()
Sc.resizable(0, 0)
Sc.geometry("500x500")
Sc.title("STIRAN")
Sc.configure(background='black')
Label(Sc,text="Welcome to STIRAN\nRegister or login ?",fg = "white",bg = "black",
font = "tahoma 16 bold italic").pack()
######################################
button = Button(Sc, text='Register',compound = CENTER, width=20,font = "tahoma 16 bold ", bg = 'red',
command= register)
button.bind('<Button-1>')
button.pack()
######################################
button1 = Button(Sc, text='Login',compound = CENTER, width=20,font = "tahoma 16 bold "
, bg = 'red',command= Login )
button1.bind('<Button-1>')
button1.pack()
######################################
Exit =Button(Sc, text='Exit',compound = CENTER, width=20,font = "tahoma 16 bold "
, bg = 'red',command= Sc.destroy )
button1.bind('<Button-1>')
Exit.pack(side = "bottom")
mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Mreza-84.noreply@github.com |
78e042f96edcae4e0a847c8e8e83cd9fc8227a79 | 9a5f3bf9bb9dcbc8d827a7cadb4be282f96fda66 | /tools/wafcheck/wafw00f/plugins/wzb360.py | 2c06013deda04f4061c938724877b157b7a227b2 | [] | no_license | M3g4Byt3/AUTO-EARN | 484a6d30204f8d7db235730ca0fcde6d0b608025 | 0ae4a5a670a3cfd9a06bf2ee2fa3675348789139 | refs/heads/master | 2022-08-29T14:54:09.849417 | 2020-05-25T14:20:15 | 2020-05-25T14:20:15 | 270,321,724 | 1 | 0 | null | 2020-06-07T13:54:12 | 2020-06-07T13:54:12 | null | UTF-8 | Python | false | false | 409 | py | #!/usr/bin/env python
NAME = '360WangZhanBao (360 Technologies)'
def is_waf(self):
if self.matchheader(('X-Powered-By-360WZB', '.+')):
return True
for attack in self.attacks:
r = attack(self)
if r is None:
return
response, responsepage = r
if response.status == 493 and b'/wzws-waf-cgi/' in responsepage:
return True
return False | [
"394103765@qq.com"
] | 394103765@qq.com |
eee5d28a6868969b487511275ba079d06d1b2115 | 1b0a583725122fdecb444b19c69193043e291a12 | /lib/tests/streamlit/text_input_test.py | c269928213b1a322cae8fffb8984517b9586bc4c | [] | no_license | monchier/streamlit | 016f269d1890dc548f2fc5a8606edc09d0ee4464 | 01c57e1d1cd87d7265b3fefcc9de47481189a8a4 | refs/heads/master | 2020-07-11T10:35:35.049030 | 2019-08-24T00:49:20 | 2019-08-24T00:49:20 | 204,515,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # Copyright 2019 Streamlit Inc. All rights reserved.
"""text_input unit test."""
import re
from tests import testutil
import streamlit as st
class TextInputTest(testutil.DeltaGeneratorTestCase):
"""Test ability to marshall text_input protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.text_input('the label')
c = self.get_delta_from_queue().new_element.text_input
self.assertEqual(c.label, 'the label')
self.assertEqual(c.value, '')
def test_value_types(self):
"""Test that it supports different types of values."""
arg_values = ['some str', 123, None, {}, SomeObj()]
proto_values = ['some str', '123', 'None', '{}', '.*SomeObj.*']
for arg_value, proto_value in zip(arg_values, proto_values):
st.text_input('the label', arg_value)
c = self.get_delta_from_queue().new_element.text_input
self.assertEqual(c.label, 'the label')
self.assertTrue(re.match(proto_value, c.value))
class SomeObj(object):
pass
| [
"noreply@github.com"
] | monchier.noreply@github.com |
04ac76de2d84ceff4a6fb695071ff71b08cd5714 | 28129a9c44f3891eb5b3ce8c7fc530252b1c3840 | /codewars.com/openOrSenior.py | 2c97938ac775e6af76f13b0bd65400cce18da053 | [] | no_license | ngocyen3006/learn-python | 55eeb221f5a836ebee8c197fc3fddf6c585f02a6 | ec2f35a87f846385f7353e7ef4900e5f80cfdb0a | refs/heads/master | 2020-03-26T16:35:59.151230 | 2019-05-08T07:26:50 | 2019-05-08T07:26:50 | 145,112,258 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # https://www.codewars.com/kata/5502c9e7b3216ec63c0001aa/train/python
def openOrSenior(data):
res = []
for element in data:
if element[0] >= 55 and element[1] > 7:
res.append("Senior")
else:
res.append("Open")
return res
| [
"ngocyen300693@gmail.com"
] | ngocyen300693@gmail.com |
82f0b626024a3995576eaa4543e9340d049bcf38 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Kk5Ku4CtipaFtATPT_15.py | 165030897c712ef9ea7845e18fbdf381cb522de3 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | """
* "coconuts" has 8 letters.
* A byte in binary has 8 bits.
* A byte can represent a character.
We can use the word "coconuts" to communicate with each other in binary if we
use upper case letters as 1s and lower case letters as 0s.
**Create a function that translates a word in plain text, into Coconut.**
### Worked Example
The binary for "coconuts" is
01100011 01101111 01100011 01101111 01101110 01110101 01110100 01110011
c o c o n u t s
Since 0s are lowercase and 1s are uppercase, we can map the binary like this.
cOConuTS cOCoNUTS cOConuTS cOCoNUTS cOCoNUTs cOCOnUtS cOCOnUts cOCOnuTS
coconut_translator("coconuts") ➞ "cOConuTS cOCoNUTS cOConuTS cOCoNUTS cOCoNUTs cOCOnUtS cOCOnUts cOCOnuTS"
### Examples
coconut_translator("Hi") ➞ "cOcoNuts cOCoNutS"
coconut_translator("edabit") ➞ "cOConUtS cOConUts cOConutS cOConuTs cOCoNutS cOCOnUts"
coconut_translator("123") ➞ "coCOnutS coCOnuTs coCOnuTS"
### Notes
* All inputs will be strings.
* Make sure to separate the _coconuts_ with spaces.
"""
def coconut_translator(txt):
txt = [bin(i)[2:].zfill(8) for i in bytearray(txt, "utf8")]
output = ""
for i in txt:
for j, k in zip(i, "coconuts"):
if j == "1":
output += k.upper()
else:
output += k
output += " "
return output.strip()
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
50873a19625f685c0a0edd8193f0b37020193385 | b923fa34a18562cb565557111d4a14805121643a | /flo_core/src/tty_keyboard_teleop.py | b77ecf1266cbea47c08b40853505c87153ca88ee | [] | no_license | nattapas/LilFloSystem | ce9d7d4f69af613968c1dbc47cb3d968fcf3be85 | 014ac48543b2d26ebb1952a821b0613635adf828 | refs/heads/master | 2023-06-14T13:14:30.158653 | 2021-06-23T03:25:42 | 2021-06-23T03:25:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #!/usr/bin/env python
# pylint: skip-file
import rospy
import actionlib
from tts.msg import SpeechGoal, SpeechAction
import json
if __name__ == '__main__':
rospy.init_node('tts_keyboard_teleop')
rospy.loginfo('Node Up')
client = actionlib.SimpleActionClient('tts', SpeechAction)
client.wait_for_server()
rospy.loginfo('TTS Available')
while not rospy.is_shutdown():
to_say = raw_input("enter something to say:")
goal = SpeechGoal()
goal.text = str(to_say)
goal.metadata = json.dumps({"voice_id": "Ivy"})
# could also include parameters here: https://github.com/aws-robotics/tts-ros1/blob/23e7aa554e7c4717de15fbf6c28dd090b3cb89df/tts/src/tts/synthesizer.py#L131
client.send_goal(goal)
rospy.loginfo('sent command to robot')
client.wait_for_result()
rospy.loginfo('done speaking')
| [
"mjsobrep@live.com"
] | mjsobrep@live.com |
c371e404237fccd6bcdc3502904805dcc0fb1ffa | 5ba7d312159aae12958c83322448230b400a31ce | /mysite/urls.py | ffd5f87a68f2460a02e6255886aa3703fcb183ae | [] | no_license | vetrys/IntelliDress | 645ae02228e4048a0f27db25175d3c656ab05261 | 6218a0794290a7f535e7fb1354811a630a1cb8ee | refs/heads/master | 2021-04-06T06:05:20.272851 | 2018-03-13T16:47:44 | 2018-03-13T16:47:44 | 124,891,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('IntelliDress.urls')),
]
| [
"vetrys@allegheny.edu"
] | vetrys@allegheny.edu |
53edc9350df1a12d13aaf7e0da8c5ffd34c8aefb | 246b1cf80e464e68b1b1a2b80f90de65fff2fc4f | /transformer_pytorch/model/embedding/position.py | 87b511c08dc6c8788ec1580747c83d08b9ea536e | [
"Apache-2.0"
] | permissive | walkacross/transformer-pytorch | 42b9f74dcf79be80b42b7c323de7042c8518b182 | 6aa06889f6d10fceb5587b47aba15e65fa305074 | refs/heads/master | 2020-07-26T20:34:16.006426 | 2020-07-11T01:27:24 | 2020-07-11T01:27:24 | 208,758,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import torch.nn as nn
import torch
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
| [
"yujiangallen@126.com"
] | yujiangallen@126.com |
5d165bbd60983180c3a5a4a697818d2f46eba03a | 3a7d1b8898cde33dfa140f595a51868dc7734719 | /0x11-python-network_1/1-hbtn_header.py | ef51d9f31d496ad64c3b867756f4ca6d8c5f6581 | [] | no_license | nickuchida/holbertonschool-higher_level_programming | c27a5e259d90f4b1f43d69e1b3cefcdafca4cc83 | 92f8c5c3e178475b8e6c1098915ddbb88175253c | refs/heads/master | 2020-09-29T04:53:19.048770 | 2020-04-14T01:30:20 | 2020-04-14T01:30:20 | 226,956,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #!/usr/bin/python3
''' takes in a URL, sends a request to the URL and displays the
value of the X-Request-Id variable found in the header of the response'''
import sys
from urllib.request import urlopen
if __name__ == '__main__':
url = sys.argv[1]
with urlopen(url) as response:
print(response.headers['X-Request-Id'])
| [
"nicholas.y.uchida@gmail.com"
] | nicholas.y.uchida@gmail.com |
24bc2c05a04779de581841f8c5d183ee95b83081 | dae2deea56610239e84213d4de8ff5b183b6b639 | /DeepLearningStudy/day_02/test01.py | 1d822a6d20a1d940ea0e19425c494fe278e8a9b9 | [] | no_license | 852251748/pytorchStudy | f76fbd0529d1197535b0ffe7a6a75d5a64d755eb | 3b865c814bf51a1946d628bb75c7cec10fe12590 | refs/heads/master | 2022-12-05T18:14:00.549552 | 2020-09-03T06:27:20 | 2020-09-03T06:27:20 | 274,569,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | import torch
from torch import nn
if __name__ == '__main__':
# # 二维数据就是图像
# conv = nn.Conv2d(3, 16, 3, 1, padding=1)
# x = torch.randn(1, 3, 10, 10) # 格式是NCHW
# # print(x)
# print(conv(x).shape, conv(x))
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
print(input.shape,target.shape)
output = loss(input, target)
print(output)
# output.backward() | [
"123456789@qq.com"
] | 123456789@qq.com |
54408b4b27ec1d2509fea9290864d04954e06d4c | 5354b0689486628abff90e60541226aafe6d2d0c | /behav/WanderIM_hddm_stimcoding_behavonly_probes_vig_nolocalseep.py | 41273e2e6ca7fa3842aaa025c0f87e1329a50c1b | [] | no_license | aung2phyowai/wanderIM | cf0380163d2bef2773c256143a112bf022bb1b44 | 176724bde9258d3a28d7ceea5aa0843928239130 | refs/heads/master | 2023-05-31T15:54:22.842039 | 2021-06-16T09:46:17 | 2021-06-16T09:46:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,175 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 16:36:16 2018
@author: tand0009
"""
## Import required packages
import hddm
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from kabuki.analyze import gelman_rubin
# Load data from csv file into a NumPy structured array
data = hddm.load_csv('/Users/tand0009/Data/WanderIM/behav/WanderIM_ProbeResults2.txt')
data.columns = ['subj_idx','nblock','nprobe','task','ntrial','look','state','orig','awa','int','eng','perf','vig','corr','rt','stim','dprobe','response','stimulus','vigC','cond_v']
#data = hddm.load_csv('/Users/tand0009/Data/WanderIM/behav/WanderIM_TestResults2.txt')
#data.columns = ['subj_idx','nblock','task','ntrial','stimid','correctness','rt','stimulus','response','cond_v']
data = data[np.logical_or(np.logical_and(data.dprobe > -19,data.stimulus ==1),np.logical_and(data.dprobe > -3,data.stimulus ==0))]
data.fillna(999, inplace=True)
data = data[data.rt > 0.2]
# Create histogram of RTs by subject, looks odd due to dummy coding of nogo response.
data = hddm.utils.flip_errors(data)
data = data[data.state != 4]
data["state"] = data["state"].astype('category')
#plt.savefig('hddm_RThists_bysubj.pdf')
dataD = data[data.task == 2]
dataF = data[data.task == 1]
fig = plt.figure()
ax = fig.add_subplot(111, xlabel='RT', ylabel='count', title='RT distributions')
for i, subj_data in dataD.groupby(['vigC']):
subj_data.rt.hist(bins=25, histtype='step', ax=ax)
# HDDMStimCoding Model Subclass enables you to specify your bounds as go/no-go, as opposed to correct/incorrect. This allows the estimation of both z bias and v bias.
# Code below fits the full model where session (condition) influences all parameters, tends to have the lowest DIC.
modelD = hddm.HDDMStimCoding(dataD, include={'z'}, stim_col= 'stimulus', split_param='z', depends_on={'v': 'cond_v', 'a': 'vigC', 't': 'vigC', 'z': 'vigC'}, p_outlier=.05)
modelD.find_starting_values()# Create model and start MCMC sampling
modelD.sample(10000, burn=1000, dbname='hddm_stim.db', db='pickle')
#modelD.save('hddm_stim')
modelD.print_stats()
modelD.plot_posteriors(save=False)
modelD.plot_posterior_predictive()
modelD.plot_posteriors_conditions()
# Plot posterior probabilities for parameters and determine overlap
z_1, z_2 = modelD.nodes_db.node[['z(0)', 'z(1)']]
hddm.analyze.plot_posterior_nodes([z_1, z_2])
plt.xlabel('Response Bias z')
plt.ylabel('Posterior probability')
plt.title('Posterior of response bias between timepoints')
print("P(z(2) > z(1)) = ", (z_1.trace() > z_2.trace()).mean())
a_1, a_2 = modelD.nodes_db.node[['a(0)', 'a(1)']]
hddm.analyze.plot_posterior_nodes([a_1, a_2])
plt.xlabel('Boundary setting a')
plt.ylabel('Posterior probability')
plt.title('Posterior of boundary setting between timepoints')
print("P(a(2) > a(1)) = ", (a_1.trace() > a_2.trace()).mean())
t_1, t_2 = modelD.nodes_db.node[['t(0)', 't(1)']]
hddm.analyze.plot_posterior_nodes([t_1, t_2])
plt.xlabel('NDT')
plt.ylabel('Posterior probability')
plt.title('Posterior of NDT between timepoints')
print("P(t(2) < t(1)) = ", (t_1.trace() < t_2.trace()).mean())
v_g1, v_g2, v_ng1, v_ng2 = modelD.nodes_db.node[['v(go_alert)', 'v(go_drowsy)','v(nogo_alert)', 'v(nogo_drowsy)']]
hddm.analyze.plot_posterior_nodes([v_g1, v_g2, v_ng1, v_ng2])
plt.xlabel('Drift')
plt.ylabel('Posterior probability')
plt.title('Posterior of Drift between timepoints')
print("P(v(go_alert) > v(go_drowsy)) = ", (v_g1.trace() > v_g2.trace()).mean())
print("P(v(nogo_alert) > v(nogo_drowsy)) = ", (v_ng1.trace() > v_ng2.trace()).mean())
plt.plot((v_g1.trace() + v_ng1.trace()))
plt.plot((v_g2.trace() + v_ng2.trace()))
fig = plt.figure()
dc1=(v_g1.trace() + v_ng1.trace())
dc2=(v_g2.trace() + v_ng2.trace())
plt.hist(dc1,color='blue')
plt.hist(dc2,color='red')
mean_v_g1=(v_g2.trace()).mean()
mean_v_g0=(v_g1.trace()).mean()
mean_v_ng1=(v_ng2.trace()).mean()
mean_v_ng0=(v_ng1.trace()).mean()
mean_z_1=(z_2.trace()).mean()
mean_z_0=(z_1.trace()).mean()
mean_a_1=(a_2.trace()).mean()
mean_a_0=(a_1.trace()).mean()
mean_t_1=(t_2.trace()).mean()
mean_t_0=(t_1.trace()).mean()
std_v_g1=(v_g2.trace()).std()
std_v_g0=(v_g1.trace()).std()
std_v_ng1=(v_ng2.trace()).std()
std_v_ng0=(v_ng1.trace()).std()
std_z_1=(z_2.trace()).std()
std_z_0=(z_1.trace()).std()
std_a_1=(a_2.trace()).std()
std_a_0=(a_1.trace()).std()
std_t_1=(t_2.trace()).std()
std_t_0=(t_1.trace()).std()
x_pos=np.array([1,1,2,2,3,3,4,4,5,5])+np.array([-0.2,0.2,-0.2,0.2,-0.2,0.2,-0.2,0.2,-0.2,0.2])
xtick_pos=np.array([1,2,3,4,5])
xtick_labels = ['v_{go}', 'v_{nogo}', 'z', 'a', 't']
means = [mean_v_g0, mean_v_g1, mean_v_ng0, mean_v_ng1, mean_z_0, mean_z_1, mean_a_0, mean_a_1, mean_t_0, mean_t_1]
errors = [std_v_g0, std_v_g1, std_v_ng0, std_v_ng1, std_z_0, std_z_1, std_a_0, std_a_1, std_t_0, std_t_1]
fig, ax = plt.subplots()
ax.bar(x_pos, means, yerr=errors, align='center', alpha=0.5, ecolor='black', capsize=10, width=0.38, color=['blue','red','blue','red','blue','red','blue','red','blue','red'])
ax.set_ylabel('HDDM Parameters')
ax.set_xticks(xtick_pos)
ax.set_xticklabels(xtick_labels)
ax.axhline(y=0,color='black',linewidth=0.5)
ax.set_title('Subj Rating informed HDDM')
ax.yaxis.grid(True)
fig, ax = plt.subplots()
ax.bar(x_pos[0:4], means[0:4], yerr=errors[0:4], align='center', alpha=0.5, ecolor='black', capsize=10, width=0.38, color=['blue','red','blue','red'])
ax.set_ylabel('HDDM Parameters')
ax.set_xticks(xtick_pos[0:2])
ax.set_xticklabels(xtick_labels[0:2])
ax.set_title('Subj Rating informed HDDM')
ax.axhline(y=0,color='black',linewidth=0.5)
fig.set_size_inches(4,4)
plt.tight_layout()
plt.savefig('figures/HDDM_subj_v.png')
fig, ax = plt.subplots()
ax.bar(x_pos[4:11], means[4:11], yerr=errors[4:11], align='center', alpha=0.5, ecolor='black', capsize=10, width=0.38, color=['blue','red','blue','red','blue','red'])
ax.set_ylabel('HDDM Parameters')
ax.set_xticks(xtick_pos[2:6])
ax.set_xticklabels(xtick_labels[2:6])
ax.set_title('Subj Rating informed HDDM')
ax.axhline(y=0,color='black',linewidth=0.5)
fig.set_size_inches(6,4)
plt.tight_layout()
plt.savefig('figures/HDDM_subj_zat.png')
| [
"thomas.andrillon@gmail.com"
] | thomas.andrillon@gmail.com |
e9192700af5f53c65fcd0c80bf92e8642eaf558c | 40ae40afbb4fc2032f9dfefa2bae3b4d095ca8db | /SummaryUpdate/src/fio.py | d6a9ae44249743bd43f07d3717f3d90f05274e7a | [] | no_license | wencanluo/Summarization | 96d554b3727aceda0ba0dd9bb42d632a1e418cb5 | 81e7ed0bbb99c07c4f567b86296777764f718538 | refs/heads/master | 2021-01-22T07:18:12.221313 | 2017-05-21T02:06:23 | 2017-05-21T02:06:23 | 21,334,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,781 | py | ## @package fio
# Package for I/O
# @file fio.py
# @author Wencan Luo (wencan@cs.pitt.edu)
# @date 2011-09-25
import types as Types
import sys
import os
import shutil
def NewPath(path):
if not os.path.exists(path):
os.makedirs(path)
def IsExist(file):
return os.path.isfile(file)
def IsExistPath(path):
return os.path.exists(path)
def DeleteFolder(path):
try:
shutil.rmtree(path)
except Exception:
pass
# for dirpath, dirnames, filenames in os.walk(path):
# try:
# os.rmdir(dirpath)
# except OSError as ex:
# print(ex)
def ReadFile(file):
"""Input a file, and return a list of sentences
@param file: string, the input file path
@return: list of lines. Note: each line ends with "\r\n" or "\n"
"""
#read the file
f = open(file,'r')
lines = f.readlines()
f.close()
return lines
#sentences = []
#for line in lines:
#get ride of the '\r\n' in the end
# line = line.rstrip('\r\n')
# sentences.append(line)
#return sentences
def SaveList(List, file, linetag="\n"):
"""
Save a list into a file. Each item is a line
"""
reload(sys)
sys.setdefaultencoding('utf8')
f = open(file, "w")
for item in List:
f.write(str(item))
f.write(linetag)
f.close()
def LoadList(file):
return [line.strip() for line in ReadFile(file)]
def SaveText(text, file):
"""
Save a string into a file. Each item is a line
"""
f = open(file, "w")
f.write(str(text))
f.close()
def PrintListwithName(list, name = None):
print name, "\t",
for entry in list:
print entry, "\t",
print
def PrintList(list, sep = "\t", endflag=True, prefix=''):
"""
@function: print out a list into a file.
@param list: list
@param sep: string, the separator between each item
@param endflag: bool, whether each item is a line
@param prefix: string, the prefix of each iterm
"""
for i in range(len(list)):
entry = list[i]
entry = prefix+str(entry)+prefix
if i==len(list)-1:
if endflag:
print entry
else:
print entry,
else:
print entry + sep,
def PrintDict(dict, SortbyValueflag = True):
"""
@function: print out a dict in a reverse order of the values, the value of the dict should be numeric
@param dict: dictionary
"""
reload(sys)
sys.setdefaultencoding('utf8')
if SortbyValueflag:
for key in sorted(dict, key = dict.get, reverse = True):
print str(key) + "\t" + str(dict[key])
else:
for key in sorted(dict):
print str(key) + "\t" + str(dict[key])
def SaveDict(dict, file, SortbyValueflag = False):
"""
@function:save a dict
@param dict: dictionary
"""
SavedStdOut = sys.stdout
sys.stdout = open(file, 'w')
if SortbyValueflag:
for key in sorted(dict, key = dict.get, reverse = True):
print str(key) + "\t" + str(dict[key])
else:
for key in sorted(dict.keys()):
print str(key) + "\t" + str(dict[key])
sys.stdout = SavedStdOut
def LoadDict(file, type='str'):
"""
@function:load a dict
@return dict: dictionary
"""
body = ReadMatrix(file, False)
if body == None: return None
dict = {}
for row in body:
assert(len(row) == 2)
if type == 'str':
dict[row[0]] = row[1]
if type == 'float':
dict[row[0]] = float(row[1])
return dict
def LoadExcel(file, hasHead = True):
rows = ReadFile(file)
y = len(rows)
if y == 0:
return None
x = len(rows[0].split("\t"))
body = [[None]*x]*y
for i in range(y):
row = rows[i]
cols = row.split("\t")
if len(cols) != x:
print "Excel format is wrong"
print i, x, len(cols)
return None
for j in range(x):
col = cols[j]
body[i][j] = col
if hasHead:
head = body[0]
body = body[1:]
return head, body
else:
return body
def CRFWriter(file, data):
SavedStdOut = sys.stdout
sys.stdout = open(file, 'w')
for row in data:
for i, col in enumerate(row):
if i == len(row) - 1:
print col,
else:
print str(col)+"\t",
print
sys.stdout = SavedStdOut
def CRFReader(file):
body = ReadMatrix(file, False)
header = ['True','Predict']
body = [row[-2:] for row in body]
return header, body
#types = 'String', 'Category', 'Continuous'
def ArffWriter(file, head, types, name, data):
"""
Function: write the data to a arff file for Weka
@param file: string, the output file name
@param head: list, the attribute name list, the class label is "@class@"
@param types: the types of the attributes, an attribute can be 'String', 'Category' or 'Continuous'
@param name: string, the name of the relationship
@param data: matrix, the data, each row is an instance
"""
SavedStdOut = sys.stdout
reload(sys)
sys.setdefaultencoding('utf8')
sys.stdout = open(file, 'w')
print "@relation " + name
print
#get the class categories
n = len(head)
cats = []
for i in range(n):
cats.append({})
for row in data:
for i in range(n):
if types[i] != 'Category': continue
if row[i] == None:
row[i] = ""
if not cats[i].has_key(row[i]):
cats[i][row[i]] = 1
for i in range(n):
att = head[i]
if types[i] == 'Category':
print "@attribute " + att + " {",
#print the class
cat = sorted(cats[i].keys())
PrintList(cat, ",", False, "'")
print "}"
elif types[i] == 'String':
print "@attribute " + att + " string"
elif types[i] == 'Continuous':
print "@attribute " + att + " NUMERIC"
else:
print "Not Supported"
print
print "@data"
#write data
for row in data:
for i in range(len(row)):
atr = row[i]
if atr != None:
if types[i] == 'Continuous':
if i==len(row) - 1:
print str(atr),
else:
print str(atr)+",",
else:
if type(atr) is Types.StringType:
atr = atr.replace("'","\\'")
if type(atr) is Types.UnicodeType:
atr = atr.replace("'","\\'")
if i==len(row) - 1:
print '\''+str(atr)+'\'',
else:
print '\''+str(atr)+'\''+",",
else:
print "'',",
print
sys.stdout = SavedStdOut
def MulanWriter(file, labels, head, types, name, data):
"""
Function: write the data to a arff file for Mulan [http://mulan.sourceforge.net/index.html]
@param filename: string, the output file name without extension
@param labels: list, the labels
@param head: list, the attribute name list, the class label is "@class@"
@param types: the types of the attributes, an attribute can be 'String', 'Category' or 'Continuous'
@param name: string, the name of the relationship
@param data: matrix, the data, each row is an instance
"""
ArffWriter(file + '.arff', head, types, name, data)
#write the XML
SavedStdOut = sys.stdout
sys.stdout = open(file + '.xml', 'w')
print '<?xml version="1.0" encoding="utf-8"?>'
print '<labels xmlns="http://mulan.sourceforge.net/labels">'
for label in labels:
print '\t<label name="'+label+'"></label>'
print '</labels>'
sys.stdout = SavedStdOut
def MulanOutReader(file, labelnames = None):
lines = ReadFile(file)
body = []
for line in lines:
begin = line.find('[')
end = line.find(']')
if begin == -1 or end == -1:
print "Error"
labels = line[begin+1:end].split(',')
row = [1 if x.strip()=='true' else 0 for x in labels]
body.append(row)
return body
def ReadMatrix(file, hasHead=True):
"""
Function: Load a matrix from a file. The matrix is M*N
@param file: string, filename
@param hasHead: bool, whether the file has a header
"""
lines = ReadFile(file)
#print len(lines)
tm = []
for line in lines:
row = []
line = line.strip()
if len(line) == 0: continue
for num in line.split("\t"):
row.append(num.strip())
tm.append(row)
if hasHead:
header = tm[0]
body = tm[1:]
return header, body
else:
return tm
def WriteMatrix(file, data, header=None):
"""
Function: save a matrix to a file. The matrix is M*N
@param file: string, filename
@param data: M*N matrix,
@param header: list, the header of the matrix
"""
reload(sys)
sys.setdefaultencoding('utf8')
SavedStdOut = sys.stdout
sys.stdout = open(file, 'w')
if header != None:
for j in range(len(header)):
label = header[j]
if j == len(header)-1:
print label
else:
print label, "\t",
for row in data:
for j in range(len(row)):
col = row[j]
if j == len(row) - 1:
print col
else:
print col, "\t",
sys.stdout = SavedStdOut
def ExtractWekaScore(input, output):
lines = ReadFile(input)
S = []
header = ['TP Rate', 'FP Rate', 'Precision', 'Recall', 'F-Measure', 'ROC Area']
key = 'Weighted Avg.'
for line in lines:
line = line.strip()
if line.startswith(key):
line = line[len(key):]
scores = line.strip().split()
S.append(scores)
WriteMatrix(output, S, header)
if __name__ == '__main__':
labels = ['area', 'food', 'name', 'pricerange', 'addr', 'phone', 'postcode', 'signature']
MulanOutReader('res/dstc2_train_request_actngram_ngram.arff.label', labels)
| [
"wencanluo.cn@gmail.com"
] | wencanluo.cn@gmail.com |
4604728d9a1cc6dd473d690be807b9be988187c1 | a0c23ea4d487d692ff68b0c7c4ed665ecdbf4904 | /Cajero.py | c625412af352d94aed4ffbfa86856ecf24140c49 | [] | no_license | SJCP2004/Banco | 1704dd68e707cfd4f703b05915c4069b8aaf5541 | 78b374a20ca8f28dba82afa4e2fd8c3b15748be1 | refs/heads/master | 2022-12-25T04:52:24.543786 | 2020-09-28T13:39:22 | 2020-09-28T13:39:22 | 291,343,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,170 | py | import os
from datetime import datetime
from io import open
def feho():
nowx = datetime.now()
mome = nowx.strftime("%d/%m/%Y %H:%M:%S")
return mome
def limpiar():
os.system('clear')
def imp(texto, forma='p', tab=8):
mens = "\t"*tab + texto
if forma == 'p':
print(mens)
else:
return mens
def salto(salt=2):
print("\n"*salt)
#===========================================================
# MENU
#===========================================================
def menu():
limpiar()
salto()
imp("Bienvenido al menu")
salto(1)
imp("1 Consulta")
imp("2 Retiro")
imp("3 Deposito")
imp("4 Transferencias")
imp("5 Salir\n")
salto(1)
opc1=int(input(imp("Elija una opcion: ", 'i')))
return opc1
#===========================================================
# SALDO
#===========================================================
def saldo(sala, salc, tran):
limpiar()
salto()
saldo=open("Saldo.dat", "r")
sald=saldo.readlines()
saldo.close()
sala=float(sald[0])
salc=float(sald[1])
arch=open("Trans.dat", "r")
trans=arch.readlines()
for i in range(0, len(trans)):
x=trans[i].replace('\n','').split("|")
tran.append(x)
arch.close()
return [sala, salc]
#===========================================================
# CONSULTA
#===========================================================
def consulta(sala, salc, tran):
limpiar()
salto()
imp("Consulta")
salto(1)
imp("¿Que desea consultar?")
imp("0 Saldo")
imp("1 Transacciones")
salto(1)
opc2=int(input(imp("Inserte la opcion deseada: ", 'i')))
if opc2 == 0:
imp("Su saldo en la cuenta de ahorro es de: " + str(sala))
tran.append([feho(), " consulta ", str(sala), " ahorro ", str(sala) ,"SI"])
imp("Su saldo en la cuenta corriente es de: " + str(salc))
tran.append([feho(), " consulta ", str(salc), " corriente ", str(salc), "SI"])
else:
salto()
imp("Acontinuacion se mostraran sus transacciones")
salto(1)
imp("{0:<20} {1:<15} {2:>8} {3:^15} {4:>8} {5:^7}".format("Fecha", "Operacion", "Monto", "Tipo", "Saldo", "Estatus"), tab=6)
for i in range(0, len(tran)):
fech=tran[i][0]
oper=tran[i][1]
mont=tran[i][2]
tipo=tran[i][3]
sald=tran[i][4]
esta=tran[i][5]
imp("{0:<20} {1:<15} {2:>8} {3:^15} {4:>8} {5:^7}".format(fech, oper, mont, tipo, sald, esta), tab=6)
input()
#===========================================================
# RETIRO
#===========================================================
def retiro(sala, salc, tran):
limpiar()
salto()
imp("Retiro")
salto(1)
imp("0 ahorro")
imp("1 corriente")
salto(1)
opc3=int(input(imp("De que cuenta desea retirar el dinero: ", 'i')))
if opc3 == 0:
imp("Su saldo en la cuenta de ahorro es: " + str(sala))
cant1 = float(input(imp("Cual es el monto que desea retirar: ", 'i')))
if cant1 <= sala:
sala -= cant1
imp("Retiro exitoso")
tran.append([feho(), " Retiro ", str(cant1), " ahorro ", str(sala), "SI"])
else:
imp("Saldo insuficiente")
tran.append([feho(), " Retiro ", str(cant1), " ahorro ", str(sala), "NO"])
else:
imp("Su saldo en la cuenta corriente es: " + str(salc))
cant2 = float(input(imp("Cual es el monto que desea retirar: ", 'i')))
if cant2 <= salc:
salc -= cant2
imp("Retiro exitoso")
tran.append([feho(), " Retiro ", str(cant2), " corriente ", str(salc), "SI"])
else:
imp("Saldo insuficiente")
tran.append([feho(), " Retiro ", str(cant2), " corriente ", str(salc), "NO"])
input()
return [sala, salc]
#===========================================================
# DEPOSITO
#===========================================================
def deposito(sala, salc, tran):
limpiar()
salto()
imp("Deposito")
salto(1)
imp("0 ahorro")
imp("1 corriente")
salto(1)
opc4 = int(input(imp("De que cuenta quiere hacer el deposito: ", 'i')))
if opc4 == 0:
imp("Su saldo en la cuenta de ahorro es: " + str(sala))
cant3 = float(input(imp("Cual es el monto que desea depositar: ", 'i')))
if cant3 != 0:
sala += cant3
imp("Deposito exitoso")
tran.append([feho(), " deposito ", str(cant3), " ahorro ", str(sala), "SI"])
else:
imp("Deposito fallido")
tran.append([feho(), " deposito ", str(cant3), " ahorro ", str(sala), "NO"])
else:
imp("Su saldo en la cuenta corriente es: " + str(sala))
cant4 = float(input(imp("Cual es el monto que desea depositar: ", 'i')))
if cant4 != 0:
salc += cant4
imp("Deposito exitoso")
tran.append([feho(), " deposito ", str(cant4), " corriente ", str(salc), "SI"])
else:
imp("Deposito fallido")
tran.append([feho(), " deposito ", str(cant4), " corriente ", str(salc), "NO"])
input()
return [sala, salc]
#===========================================================
# TRANSFERENCIA
#===========================================================
def tranferencia(sala, salc, tran):
limpiar()
salto()
imp("transferencia")
salto(1)
imp("0 ahorro")
imp("1 corriente")
salto(1)
opc4 = int(input(imp("De que cuenta quiere hacer la transferencia: ", '1')))
if opc4 == 0:
imp("Su saldo en la cuenta de ahorro es: " + str(sala))
cant3 = float(input(imp("Cual es el monto que desea transferir: ", 'i')))
if cant3 <= sala:
salc += cant3
sala -= cant3
imp("Transferencia exitosa")
tran.append([feho(), " Transferencia ", str(cant3), " ahorro ", str(salc), "SI"])
else:
imp("Saldo insuficiente")
tran.append([feho(), " Transferencia ", str(cant3), " ahorro ", str(sala), "NO"])
else:
imp("Su saldo en la cuenta corriente es: " + str(salc))
cant4 = float(input(imp("Cual es el monto que desea transferir: ", 'i')))
if cant4 <= salc:
sala += cant4
salc -= cant4
imp("Transferencia exitosa")
tran.append([feho(), " Transferencia ", str(cant4), " corriente ", str(salc), "SI"])
else:
imp("Saldo insuficiente")
tran.append([feho(), " Transferencia ", str(cant4), " corriente ", str(salc), "NO"])
input()
return [sala, salc]
#===========================================================
# SALIR
#===========================================================
def salir():
limpiar()
salto()
imp("Gracias por usar nuestro servicio")
saldo=open("Saldo.dat","w")
saldo.write(str(sala) + "\n")
saldo.write(str(salc) + "\n")
saldo.close()
arc2=open("Trans.dat", "a")
for i in range(cani, len(tran)):
linea="|".join(tran[i])
arc2.write(linea)
arc2.close()
#===========================================================
# PROGRAMA PRINCIPAL
#===========================================================
salc=0
sala=0
cons=0
tran=[]
sala, salc = saldo(sala, salc, tran)
cani=len(tran)
opc1=1
while opc1 <= 4:
opc1 = menu()
if opc1 == 1:
consulta(sala, salc, tran)
elif opc1 == 2:
sala, salc = retiro(sala, salc, tran)
elif opc1 == 3:
sala, salc = deposito(sala, salc, tran)
elif opc1 == 4:
sala, salc = tranferencia(sala, salc, tran)
else:
salir()
break
| [
"sebastiancuevas490@gmail.com"
] | sebastiancuevas490@gmail.com |
d96d874e54be40673bda01e051fe010ee2f295f6 | bd033a3ad478ac7eb92ab1b2f56f5fc02c42583c | /spo/tools/spotify_write_playlist.py | 3ff404892b2687005d931c9bc7b98293f4ba4b81 | [
"MIT"
] | permissive | akx/spotify-tools | 85841853757dc950fa8d9e95622752e1357009ef | ee9980e79eb88506cf1e4df619e360037e6b05d8 | refs/heads/master | 2021-01-20T05:08:22.568509 | 2015-10-31T20:43:24 | 2015-10-31T20:43:24 | 15,848,156 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | # -- encoding: UTF-8 --
from __future__ import print_function
import click
from spo.spotify import Spotify
from spo.util import batch
import re
TRACK_RE = re.compile("^(?:https://api.spotify.com/v1/tracks/|spotify:track:)(.+?)$")
@click.command(u"spotify-write-pl", short_help="Create a Spotify playlist based on a list of track URIs.")
@click.argument(u"input", type=click.File("rb"), required=True)
@click.option(u"--name", required=False)
@click.option(u"--playlist-id", required=False)
def spotify_write_playlist(input, name, playlist_id): # pragma: no cover
if not (name or playlist_id):
raise click.UsageError(u"Need either name or id")
track_ids = get_track_ids(input)
sp = Spotify(auth=True)
if name:
playlist = sp.user_playlist_create(sp.auth_username, name, public=False)
print("Created new playlist with ID: %s" % playlist["id"])
else:
playlist = sp.user_playlist(sp.auth_username, playlist_id)
name = playlist["name"]
playlist_id = playlist["id"]
print(u"Writing %d tracks to playlist: %s" % (len(track_ids), name))
n_written = 0
for track_batch in batch(track_ids, 90):
sp.user_playlist_add_tracks(sp.auth_username, playlist_id, track_batch)
n_written += len(track_batch)
print(u"%d / %d tracks done" % (n_written, len(track_ids)))
def get_track_ids(input):
track_ids = []
for line in input:
line = line.strip()
m = TRACK_RE.match(line)
if m:
track_ids.append(m.group(1))
return track_ids
| [
"akx@iki.fi"
] | akx@iki.fi |
e3c28ee319199963a142f4d19b4fb708793b1caa | f84d94b75b32afd64e8bd80fd6e0a308a4ee563e | /tarea1.py | 1f032ef719944ac427a5dc090364676ad071ad6f | [] | no_license | roykelvin/SpeechRecognition | d98378114558aab38605126ffb05f357b8b8de10 | ff022e0b87049b3e0191bd7123cec9a26c485d06 | refs/heads/master | 2023-03-11T11:05:52.663311 | 2021-02-24T00:01:13 | 2021-02-24T00:01:13 | 341,721,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from flask import Flask, render_template, request, redirect
import speech_recognition as sr
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
transcript = ""
if request.method == "POST":
print("FORM DATA RECEIVED")
if "file" not in request.files:
return redirect(request.url)
file = request.files["file"]
if file.filename == "":
return redirect(request.url)
if file:
recognizer = sr.Recognizer()
audioFile = sr.AudioFile(file)
with audioFile as source:
data = recognizer.record(source)
transcript = recognizer.recognize_google(data, key=None)
return render_template('index.html', transcript=transcript)
if __name__ == "__main__":
app.run(debug=True, threaded=True) | [
"roy.melendez@hotmail.com"
] | roy.melendez@hotmail.com |
24af3f24219cfe2112021f1d51f9f728e92de5cf | df38459227aa7ce400bb63e54eb3375a3fd4a6ba | /basic programs/evenodd.py | e1fc71ff192992344b45e91fb0fed8b5b9803b00 | [] | no_license | saurabh0307meher/Python-Programs | 028d9d74ed79fb11f0597d603a595644cf10c942 | d27bae33f33bdcfab33a122d6af99bd71f8a6fbe | refs/heads/master | 2020-05-02T04:20:58.803631 | 2019-05-26T15:52:07 | 2019-05-26T15:52:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | #even odd
a=int(input("Enter a no"))
if (a%2==0):
print(a,"is even")
else:
print(a,"is odd")
| [
"noreply@github.com"
] | saurabh0307meher.noreply@github.com |
6c76e9633b05624cf3a23b6dc29fa05a0e1fb1cc | 54c55dae302bc6cc761c8fd63e33a886d4c09e9d | /src/fr/tagc/rainet/core/templates/template_simple_script.py | f189710276c036e196da9210436452629967abbd | [] | no_license | diogomribeiro/RAINET | 66f41e21da73dc4ace8184b8785f144abc70799a | 4d2f919a4554c45f5d5b8ddc9d35bc83b4bc2925 | refs/heads/master | 2021-09-14T02:11:49.965465 | 2018-05-07T13:50:19 | 2018-05-07T13:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,892 | py | import sys
import os
import argparse
# import numpy as np
# import pandas as pd
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.util.time.Timer import Timer
# from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil
# from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
# from fr.tagc.rainet.core.util.data.DataManager import DataManager
# from fr.tagc.rainet.core.data.Protein import Protein
#===============================================================================
# Started 25-June-2016
# Diogo Ribeiro
DESC_COMMENT = "Script to ..."
SCRIPT_NAME = "name_of_script.py"
#===============================================================================
#===============================================================================
# General plan:
# 1)
# 2)
#===============================================================================
#===============================================================================
# Processing notes:
# 1)
# 2)
#===============================================================================
# #
# Write line about what the function does
def function_one( arg1):
# SQLManager.get_instance().set_DBpath( DBPATH)
# sql_session = SQLManager.get_instance().get_session()
pass
if __name__ == "__main__":
try:
# Start chrono
Timer.get_instance().start_chrono()
print "STARTING " + SCRIPT_NAME
#===============================================================================
# Get input arguments
#===============================================================================
parser = argparse.ArgumentParser(description= DESC_COMMENT)
# positional args
parser.add_argument('sysArgFile', metavar='sysArgFile', type=str,
help='Description.')
parser.add_argument('--sysArgOptional', metavar='sysArgOptional', type=int, default = 1,
help='Description (Default = 1).')
#gets the arguments
args = parser.parse_args( )
#===============================================================================
# Run analysis / processing
#===============================================================================
# run function to..
Timer.get_instance().step( "Read bogus file..")
function_one( args.enrichmentPerRNAFile)
# Stop the chrono
Timer.get_instance().stop_chrono( "FINISHED " + SCRIPT_NAME )
# Use RainetException to catch errors
except RainetException as rainet:
Logger.get_instance().error( "Error during execution of %s. Aborting :\n" % SCRIPT_NAME + rainet.to_string())
| [
"diogo_ribeiro@bd239c00-765f-4708-9f37-acd49598262a"
] | diogo_ribeiro@bd239c00-765f-4708-9f37-acd49598262a |
5f0a944ab77f3193111f57d8b77359464e1b2fba | 68b78cbb47a5823340cd9ff8863831cd4882d29c | /extract_candidates.py | 363d23e453b88a9e11b144049f8e93bf6d1480c8 | [] | no_license | RPSeq/meistro | 91f58ba4798cd0c11b34fb1b5a31c3d895550450 | 6fa75e4f28a38c1922461feda4f25ebefab2d740 | refs/heads/master | 2021-01-10T16:10:30.168348 | 2016-05-03T08:28:49 | 2016-05-03T08:28:49 | 48,070,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,177 | py | #!/usr/bin/env python
import sys
from argparse import RawTextHelpFormatter, ArgumentParser
from itertools import izip
from string import maketrans
#installed modules
import pysam
from intervaltree import IntervalTree
from ssw_wrap import Aligner
__author__ = "Ryan Smith (ryanpsmith@wustl.edu)"
__version__ = "$Revision: 0.0.1 $"
__date__ = "$Date: 2016-04-11 11:43 $"
# ====================
# SAM Class
# ====================
class sam_al(object):
'''Class representing a SAM file alignment entry'''
def __init__(self, sam, in_sam=False):
#manual overloading based on arg types
if type(sam) == pysam.AlignedSegment and in_sam:
self.read_pysam(sam, in_sam)
elif type(sam) == str or type(sam) == list:
self.read(sam)
else:
exit("Error creating sam_al.\nUsage:sam_al(samlist), \
sam_al(samstr), sam_al(pysam.al, pysam.infile)")
def read(self, sam):
if type(sam)==str:
sam = sam.strip("\n").split("\t")
self.qname = sam[0]
self.flag = int(sam[1])
self.rname = sam[2]
self.pos = int(sam[3])
self.mapq = int(sam[4])
self.cigar = sam[5]
self.rnext = sam[6]
self.pnext = sam[7]
self.tlen = int(sam[8])
self.seq = sam[9]
self.qual = sam[10]
self.tags = {}
for i in range(10,len(sam)):
tag, ttype, val = sam[i].split(":")
tags[tag]=(val)
return
def read_pysam(self, al, in_sam):
self.qname = al.qname
self.flag = al.flag
self.rname = in_sam.getrname(al.tid)
self.pos = al.pos
self.mapq = al.mapq
self.cigarstring = al.cigarstring
self.cigar = al.cigar
self.rnext = in_sam.getrname(al.mrnm)
self.pnext = al.pnext
self.tlen = al.tlen
self.seq = al.seq
self.qual = al.qual
self.tags = dict(al.tags)
self.is_secondary = al.is_secondary
self.is_duplicate = al.is_duplicate
self.is_proper_pair = al.is_proper_pair
self.is_read1 = al.is_read1
self.is_read2 = al.is_read2
self.qstart = al.qstart
self.qend = al.qend
self.is_reverse = al.is_reverse
self.is_paired = al.is_paired
return
def __str__(self, pair_tag=True):
"""Returns the sam record as a single string"""
name = self.qname
if pair_tag:
if self.is_read1:
name = self.qname+"_1"
elif self.is_read2:
name = self.qname+"_2"
outlist = [name, str(self.flag), self.rname,
str(self.pos), str(self.mapq), self.cigarstring,
self.rnext, str(self.pnext), str(self.tlen),
self.seq, self.qual]
for tag, val in self.tags.viewitems():
if type(val)==int:
ttype = 'i'
else:
ttype = 'Z'
outlist.append("{0}:{1}:{2}".format(tag, ttype, val))
return "\t".join(outlist)+"\n"
#main loop function
def extract_candidates(bamfile,
is_sam,
anchors_out,
fastq_out,
clip_len,
single_only,
max_opp_clip):
# set input file
if bamfile == None:
if is_sam:
in_bam = pysam.Samfile("-", "r")
else:
in_bam = pysam.Samfile('-', 'rb')
else:
if is_sam:
in_bam = pysam.Samfile(bamfile, 'r')
else:
in_bam = pysam.Samfile(bamfile, "rb")
header = "@HD\tVN:1.3\tSO:unsorted\n" #write header to SAM output
header+="\n".join(in_bam.text.split("\n")[1:])
anchors_out = open(anchors_out, 'w')
anchors_out.write(header)
if fastq_out == "-": #allow - output argument
fastq_out = "/dev/stdout"
fastq_out = open(fastq_out, 'w') #open fastq output
batchsize = 1000000
anchor_batch = []
fq_batch = []
#create striped smith-waterman aligner object
#calibrated so gaps are not allowed, only mismatches.
polyA_ssw = Aligner("A"*clip_len,
match=4,
mismatch=8,
gap_open=900,
gap_extend=600,
report_secondary=False,
report_cigar=True)
#iterate over the als
for al in in_bam:
anchor = False
is_clip = False
fastq_p, fastq_s = False, False
#check if the batches need to be printed
if len(anchor_batch) >= batchsize:
anchors_out.write("".join(anchor_batch))
del anchor_batch[:]
if len(fq_batch) >= batchsize:
fastq_out.write("".join(fq_batch))
del fq_batch[:]
#skip secondary or duplicate alignments
if al.is_secondary or al.is_duplicate:
continue
#check if part of discordant pair.
#should add zscore test for reads mapped to close/far together
#(and not use proper pair flag)
conditions = [
al.mapq == 0 and al.opt('MQ') > 0,
al.is_unmapped != al.mate_is_unmapped,
al.is_reverse == al.mate_is_reverse,
not al.is_proper_pair,
al.rname != al.mrnm
]
if any(conditions):
#use check pairs to determine which side to align (or both)
remap, anchor = check_pairs(al, in_bam)
if remap:
fq_batch.append(remap)
if anchor:
al = anchor
al, is_clip = check_clip(al, in_bam, clip_len, max_opp_clip, anchor)
if is_clip:
fastq_s = fastq_str(al, is_clip) # get fastq string
fq_batch.append(fastq_s) # append to fq output batch
#pass to polyA function
ssw_al = check_polyA(fastq_s.split("\n")[1], polyA_ssw)
if ssw_al: #if we got a polyA hit,
al_tags = al.opt("TY").split(",") #get the al's tags
cigar, ori = ssw_al # get the polyA result
a_ori = "+"
if al.is_reverse:
a_ori = "-"
if 'ASL' in al_tags: #add the SR or SL -polyA tag.
pAtag = "SL"
elif 'ASR' in al_tags:
pAtag = "SR"
#generate the new tag and update al.
newtag = pAtag+","+"polyA,0,"+cigar+","+a_ori+ori
al.setTag("RA",newtag)
if anchor or is_clip:
anchor_batch.append(str(sam_al(al, in_bam)))
#after finishing, write the remaining items in the batches.
anchors_out.write("".join(anchor_batch))
fastq_out.write("".join(fq_batch))
anchors_out.close()
fastq_out.close()
# ============================================
# functions
# ============================================
def reverse_complement(sequence):
"""returns the reverse complement of the input DNA sequence"""
complement = maketrans("ACTGactg", "TGACtgac") #DNA translation table
return sequence[::-1].translate(complement) #reverse and translate
def hamming(str1, str2):
"""Returns the hamming distance between two strings of equal length"""
assert len(str1) == len(str2)
return sum(c1 != c2 for c1, c2 in izip(str1,str2))
def fastq_str(al, is_clip=False):
"""Returns a fastq string of the given BAM record"""
seq = al.seq
quals = al.qual
name = al.qname
tags=al.opt("TY") #get TY tag
#if read is a clipper,
if is_clip:
#get tags (there will be at least one, "," delimited)
tags = tags.split(",")
for i in range(len(tags)):
#if a tag is ASR or ASL, strip the A (fastq is NOT the anchor)
#and pull the clipped portion of the read
if tags[i] == "ASL":
tags=tags[i][1:]
seq = seq[:al.qstart]
quals = quals[:al.qstart]
elif tags[i] == "ASR":
tags=tags[i][1:]
seq = seq[al.qend:]
quals = quals[al.qend:]
#reverse the sequence if al is reversed
if al.is_reverse:
seq = reverse_complement(seq)
quals = quals[::-1]
#add read # tags
if al.is_read1:
name += "_1"
elif al.is_read2:
name += "_2"
# return the fastq string, appending tags to read name (rname:tags)
# (BWA can add these FASTQ comments (rname<\t>comment)
# to the output bam, but MOSAIK cannot.)
return "@"+name+":"+tags+"\n"+seq+"\n+\n"+quals+"\n"
def check_pairs(al1, in_bam):
"""Determines if a read from a pair is a UU, UR, or RU."""
anc, fq = False, False
mate_mapq = al1.opt('MQ') #get mate mapq
if al1.mapq > 0 and mate_mapq > 0: #if both are unique:
al1.setTag("TY","UU")
fq = fastq_str(al1)
anc = al1 #realign both
elif al1.mapq == 0 and mate_mapq > 0: #if this al is not unique,
al1.setTag("TY","RU")
fq = fastq_str(al1) #realign this al only
anc = False
elif al1.mapq > 0 and mate_mapq == 0: #if other al is not unique,
al1.setTag("TY","UR")
fq = False
anc = al1 #realign other al only
return fq, anc
def check_clip(al,
in_bam,
clip_len,
max_opp_clip,
disc_pair):
"""Checks a given alignment for clipped bases on one end"""
cigar = al.cigar
if (al.mapq == 0 or len(cigar)) < 2:
return al, False
try:
tag = al.opt("TY")
except:
tag = False
#if side=="L":
#Clipped on L side
if cigar[0][0] == 4 and cigar[0][1] >= clip_len:
#if opposite is not clipped more than max opposite clip len, write for realignment
if cigar[-1][0] != 4 or (cigar[-1][0] == 4 and cigar[-1][1] <= max_opp_clip):
if tag:
al.setTag("TY",tag+",ASL")
else:
al.setTag("TY","ASL")
return al, True
#elif side=="R":
#Clipped on R side
elif cigar[-1][0] == 4 and cigar[-1][1] >= clip_len:
#if opposite is not clipped more than max opposite clip len, write for realignment
if cigar[0][0] != 4 or (cigar[0][0] == 4 and cigar[0][1] <= max_opp_clip):
if tag:
al.setTag("TY",tag+",ASR")
else:
al.setTag("TY","ASR")
return al, True
return al, False
def check_polyA(seq, polyA_ssw):
hit_f = polyA_ssw.align(seq, min_score=10, min_len=12)
hit_r = polyA_ssw.align(reverse_complement(seq), min_score=10,min_len=12)
cutoff = 0.8
f_percent = 0
r_percent = 0
if hit_f:
f_len = (hit_f.query_end-hit_f.query_begin)+1
f_percent = hit_f.score/float(f_len*4)
if hit_r:
r_len = (hit_r.query_end-hit_r.query_begin)+1
r_percent = hit_r.score/float(r_len*4)
if (f_percent >= cutoff) and (r_percent >= cutoff):
if f_percent > r_percent:
return hit_f.cigar, "+"
else:
return hit_r.cigar, "-"
elif f_percent >= cutoff:
return hit_f.cigar, "+"
elif r_percent >= cutoff:
return hit_r.cigar, "-"
return False
def zscore(val, mean, stdev):
return abs((float(val)-float(mean))/float(stdev))
def filter_excludes(variants, exclude_file):
"""Returns variants that do not intersect features in a given BED file"""
#excludes is a dict of IntervalTrees (one for each chromosome)
excludes = defaultdict(IntervalTree)
filtered = [] #list to be populated with filtered items
for line in exclude_file:
if line[0] != "#": #skip headers
line = line.strip().split("\t")
chrom = line[0]
start = int(line[1])
stop = int(line[2])
#add chrom:interval to excludes
#intervaltree.addi(start, stop, data)
excludes[chrom].addi(start, stop, 1)
#could probably speed this up using a mapping function instead
for variant in variants:
#get interval tree for var chrom, and query with the position.
if len(excludes[variant.chrom][variant.pos]) == 0:
filtered.append(variant)
return filtered
def get_args():
parser = ArgumentParser(
formatter_class=RawTextHelpFormatter, add_help=False)
parser.add_argument('-a', metavar='SAM_OUT', required=True,
help='Output anchors SAM (required)')
parser.add_argument('-f', metavar='FASTQ_OUT', required=True,
help='Output FASTQ for realignment (required)')
parser.add_argument('-i', metavar='BAM',
help='Input BAM (stdin)')
parser.add_argument('-c', metavar='MIN_CLIP', type=int, default=25,
help='Minimum clip length (25)')
parser.add_argument('-oc', metavar='MAX_OPP_CLIP', type=int, default=7,
help='Maximum opposite clip length (7)')
parser.add_argument('-s', action='store_true', help='Input single-ended')
parser.add_argument('-S', action='store_true', help='Input is SAM format')
parser.add_argument('-h','--help', action='help')
# parse the arguments
args = parser.parse_args()
# bail if no BAM file
if args.i is None:
if sys.stdin.isatty():
parser.print_help()
exit(1)
# send back the user input
return args
# ============================================
# driver
# ============================================
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main():
args = get_args()
extract_candidates(args.i, args.S, args.a,
args.f, args.c, args.s, args.oc)
if __name__ == "__main__":
try:
sys.exit(main())
except IOError, e:
if e.errno != 32: # ignore SIGPIPE error for streaming
raise
| [
"ryan.smith.p@gmail.com"
] | ryan.smith.p@gmail.com |
dcc63f8e35bbed90cb915611a35fc5d5aac0d679 | dbcb887c6c64c983d0775c23283976b2d49ab2ca | /Week_07/212. 单词搜索 II.py | dc0fadc357cd9e3de7fe823dee2036c12eb63e71 | [] | no_license | thewayonly/algorithm008-class02 | 4bf29dfe78e1ca71159777b2758310a2f341ce50 | 14f58fef8fd1e9dd9071c62d90cadbae562d9e81 | refs/heads/master | 2022-11-11T06:34:21.952715 | 2020-07-05T16:03:11 | 2020-07-05T16:03:11 | 256,760,532 | 0 | 0 | null | 2020-04-18T13:28:33 | 2020-04-18T13:28:33 | null | UTF-8 | Python | false | false | 1,001 | py | class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
trie = {}
for word in words:
t = trie
for w in word:
t = t.setdefault(w, {})
t["end"] = 1
res = []
row = len(board)
col = len(board[0])
def dfs(i, j, trie, s):
c = board[i][j]
if c not in trie: return
trie = trie[c]
if "end" in trie and trie["end"] == 1:
res.append(s + c)
trie["end"] = 0
board[i][j] = "#"
for x, y in [[-1, 0], [1, 0], [0, 1], [0, -1]]:
tmp_i = x + i
tmp_j = y + j
if 0 <= tmp_i < row and 0 <= tmp_j < col and board[tmp_i][tmp_j] != "#":
dfs(tmp_i, tmp_j, trie, s + c)
board[i][j] = c
for i in range(row):
for j in range(col):
dfs(i, j, trie, "")
return res
| [
"noreply@github.com"
] | thewayonly.noreply@github.com |
7443e4de700cdd0989980dae21ce4a3eab131be0 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_2512.py | 882530706da7975403f43d82d608dbb898f6dfdd | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # Tkinter Spinbox Widget
Tkinter.Spinbox(values=(1,2,3,4))
sb.delete(0,"end")
sb.insert(0,2)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
25077ba81aed6aa5c1b4298eaadfe78e0772cad2 | 2d384897041940fb1863c9e64d0966f7fa9d6418 | /highs_lows.py | 13ec8d9c46d10afab39c1c01e3b41f548828445f | [] | no_license | violet-zilan/basic | acd401e0ad7648ea90735a109c534d489c8bc4a5 | 50e2d26d0d282188a55e6b705ce5a56986ecf463 | refs/heads/master | 2020-06-03T00:08:15.381503 | 2019-06-18T13:46:01 | 2019-06-18T13:46:01 | 191,355,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import csv
from datetime import datetime
from matplotlib import pyplot as plt
filename = 'death_valley_2014.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader:
try:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
fig = plt.figure(dpi =128, figsize =(10, 6))
plt.plot(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor = 'blue', alpha=0.1)
plt.title("Daily high temperatures,2014", fontsize=24)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature(F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.show()
| [
"172874168@qq.com"
] | 172874168@qq.com |
306dfa12b32bb329c2df429c8ee04b42881c7573 | 390d19c3159133d8c688396cb11b4ed3f8178d09 | /SWExpertAcademy/LEARN/02. Array(2)/4836_색칠하기.py | bde1b13cfa11471faa7938d0f4a9a7d77298f7b5 | [] | no_license | JJayeee/CodingPractice | adba64cbd1d030b13a877f0b2e5ccc1269cb2e11 | 60f8dce48c04850b9b265a9a31f49eb6d9fc13c8 | refs/heads/master | 2021-08-16T17:14:01.161390 | 2021-07-16T00:42:18 | 2021-07-16T00:42:18 | 226,757,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | for tc in range(1, int(input()) + 1):
zero = [[0 for i in range(10)] for j in range(10)]
for i in range(int(input())):
s_x, s_y, e_x, e_y, num = map(int, input().split())
for x in range(s_x, e_x + 1):
for y in range(s_y, e_y + 1):
if num == 1:
if zero[x][y] != 1:
zero[x][y] += num
if num == 2:
zero[x][y] += num
count = 0
for x in range(10):
for y in range(10):
if zero[x][y] == 3:
count += 1
print('#%d %d' % (tc, count))
| [
"jay.hyundong@gmail.com"
] | jay.hyundong@gmail.com |
c412389603f3d984cffe1558f7a28b8d99a6f763 | 018fff4376ba900d419618c991a6e1c749da02f3 | /helpmeunpack.py | d9f871216df6f627b7fea91ce588fce25793f779 | [] | no_license | bragon9/hack_attic | 114db19b655e713c3652aae0849dab16870ba6fa | b184cda1cd8a2c3e51ad1b623ce14e186cd2c202 | refs/heads/master | 2020-06-26T16:47:20.245799 | 2019-08-02T05:12:32 | 2019-08-02T05:12:32 | 199,689,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | import requests
from base64 import b64decode
import struct
# Get input data.
response = requests.get('https://hackattic.com/challenges/help_me_unpack/problem?access_token=96d50621d8c2e08d')
response_dict = response.json()
# Get the base64 encoded string and decode.
encoded_string = response_dict['bytes']
decoded_string = b64decode(encoded_string)
# Unpack the data.
unpacked_data = struct.unpack('iIhfdd', decoded_string)
# print(unpacked_data)
# Send the answer.
post_object = requests.post('https://hackattic.com/challenges/help_me_unpack/solve?access_token=96d50621d8c2e08d',
json = {
'int':unpacked_data[0],
'uint':unpacked_data[1],
'short':unpacked_data[2],
'float':unpacked_data[3],
'double':unpacked_data[4],
'big_endian_double':unpacked_data[4]
})
# Check the answer.
print(post_object.json()) | [
"bspagon@iofficecorp.com"
] | bspagon@iofficecorp.com |
609451b9560cf108d967f113bcf2b50c0fc344c1 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/Lumber-Mill/Vert_Collections.py | 137d401450d899a55d7dcb7f686913f3674f335d | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,607 | py | """----------------------------------------------------
Create mesh
----------------------------------------------------"""
import bpy
from bpy.types import Operator
from bpy.props import FloatVectorProperty
from bpy_extras.object_utils import AddObjectHelper
from mathutils import Vector
from Lumber_Mill_add_on import Lumber_Mill # working
from Lumber_Mill_add_on import Estimator
# add mesh code---------------------------------------------------
def add_object(self, context):
# scale of 1 in blender Imperial units is a foot
inch = 1 / 12
#unitScale = 3.28125 = 26.25 / 8
# where 26' 3" is what blender will use as an 8' length imperial
if bpy.context.scene.unit_settings.system == "IMPERIAL": #error
unitScale = 3.2810409 #adjusted
scale_x = self.scale.x / unitScale
scale_y = self.scale.y / unitScale
scale_z = self.scale.z / unitScale
elif bpy.context.scene.unit_settings.system == "METRIC" or "NONE":
scale_x = self.scale.x
scale_y = self.scale.y
scale_z = self.scale.z
length = Lumber_Mill.length
width = Lumber_Mill.width
height = Lumber_Mill.height
# x,y,z format # Vector(( * scale_x, * scale_y, ))
verts = [Vector((-length * scale_x, (inch * width) * scale_y, -(inch * height) * scale_z)), #0
Vector(( length * scale_x, (inch * width) * scale_y, -(inch * height) * scale_z)), #1
Vector(( length * scale_x, -(inch * width) * scale_y, -(inch * height) * scale_z)), #2
Vector((-length * scale_x, -(inch * width) * scale_y, -(inch * height) * scale_z)), #3
Vector((-length * scale_x, (inch * width) * scale_y, (inch * height) * scale_z)), #4
Vector(( length * scale_x, (inch * width) * scale_y, (inch * height) * scale_z)), #5
Vector(( length * scale_x, -(inch * width) * scale_y, (inch * height) * scale_z)), #6
Vector((-length * scale_x, -(inch * width) * scale_y, (inch * height) * scale_z)), #7
]
edges = [(0, 1), (1, 2), (2, 3), (3, 0),
(0, 4), (1, 5), (2, 6), (3, 7),
(4, 5), (5, 6), (6, 7), (7, 4)
]
faces = [[0, 1, 2, 3],
[0, 1, 5, 4],
[1, 2, 6, 5],
[2, 3, 7, 6],
[0, 3, 7, 4],
[4, 5, 6, 7]
]
"""
Vertex Layout
4 ------------5
7---|----------6 |
| 0----------|--1
3--------------2
Edges:
- edges labeled from 0 clockwise to 3 and back to 0
- next is vertical edges from bottom to top starting at (0,4)
clockwise
- top edges like the bottom plane start at 4 and go clockwise
to 7 and back to 4
Faces:
- first face is botton plane same 0 to 0 clockwise fasion as edges
as view from inside of polygon facing out and down
- the vertical planes start in the back from 0 to 4 clockwise facing
the plane from the outside
- this same pattern follows clockwise around the polygon as viewed from
the top
- the last face is 4 to 4 clockwise facing down on the polygon from
the outside
"""
mesh_data = bpy.data.meshes.new(name ="Lumber")
mesh_data.from_pydata(verts, edges, faces)
mesh_data.update()
#the string argument below is what the object gets named as
#this needs to be dynamic also
obj_name = Lumber_Mill.obj_string1 + Lumber_Mill.obj_string2
obj = bpy.data.objects.new(obj_name, mesh_data)
scene = bpy.context.scene
scene.objects.link(obj)
bpy.context.scene.objects.active = obj
obj.select = True
# makes add mesh code accessible as a button------------------------------
class OBJECT_OT_add_object(Operator, AddObjectHelper):
"""Create Dimensional Lumber"""
bl_idname = "mesh.add_object"
bl_label = "Add Lumber"
bl_options = {'REGISTER', 'UNDO'}
scale = FloatVectorProperty(
name="scale",
default=(1.0, 1.0, 1.0),
subtype='TRANSLATION',
description="scaling",
)
def execute(self, context):
add_object(self, context)
string = Lumber_Mill.obj_string1 + Lumber_Mill.obj_string2
if string not in Estimator.boardNames:
Estimator.boardNames.append(string)
#testing code
dimensions = [Lumber_Mill.width * 2, Lumber_Mill.height * 2,
Lumber_Mill.length * 2]
for value in dimensions:
print(value)
# end testing
return {'FINISHED'}
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9c6aab143e4a2a732bae263e9f1dba5b5a3201b0 | 1581f1d66d6835b2c271295e3251c2dde239fec8 | /product/migrations/0019_productartist.py | af350b245e6c9a2697a285bbe93a709662ffdeff | [] | no_license | abinash-kumar/pythod | 527659e3bdd161f9abcaaa9182dfe58044b3ff66 | 1469dc0cd9d6d72b2fe2e69f99542e470bea807b | refs/heads/master | 2023-01-30T02:54:10.729606 | 2020-02-24T07:18:51 | 2020-02-24T07:18:51 | 242,670,715 | 0 | 0 | null | 2023-01-25T13:57:52 | 2020-02-24T07:16:02 | Python | UTF-8 | Python | false | false | 1,003 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-08-06 18:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0008_remove_customer_is_designer'),
('product', '0018_cart_product_varient'),
]
operations = [
migrations.CreateModel(
name='ProductArtist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commission', models.DecimalField(decimal_places=2, max_digits=15)),
('artist_words', models.CharField(max_length=200)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Customer')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Product')),
],
),
]
| [
"abinashlv@AbinashSymboMac.local"
] | abinashlv@AbinashSymboMac.local |
f8fb6a8ce2182c21de8ba794aab5c567f4e9de86 | 6f7b37dd5876dad69fd259cd91c8e00db23b0912 | /examples/catalog_delete.py | a653bc6d7c8514ac70711a3574b659f7b977453b | [
"BSD-3-Clause"
] | permissive | zackster/HipHopGoblin | 78f98e161124f487a784e505dcddd26cfdbfc170 | d994759906e581f365fd954837c3f29a5266dcd8 | refs/heads/master | 2021-01-16T20:55:24.877152 | 2011-08-23T01:42:11 | 2011-08-23T01:42:11 | 2,250,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # Uncomment to set the API key explicitly. Otherwise Pyechonest will
# look in the ECHO_NEST_API_KEY environment variable for the key.
#from pyechonest import config
#config.ECHO_NEST_API_KEY='YOUR API KEY'
from pyechonest import catalog, song
import time
c = catalog.Catalog('example_songs', type='song')
items = [{ 'action': 'delete',
'item': { 'item_id': '1' }}]
ticket = c.update(items)
for i in range(0,12):
time.sleep(5)
if c.status(ticket)['ticket_status'] == 'complete':
break
if c.status(ticket)['ticket_status'] == 'complete':
print 'Updated. read_items() titles:'
for item in c.read_items():
if type(item) is song.Song:
print ' "%s"' % (item.title, )
else:
print ' "%s" not resolved' % (item['request']['song_name'])
print ' %s' % (item,)
cp = c.get_profile()
print '\nCatalog profile:'
for key, val in cp.iteritems():
print ' %-15s %s' % (key, val)
else:
print 'Update did not complete within 60 seconds.'
| [
"hiphopgoblin@web146.webfaction.com"
] | hiphopgoblin@web146.webfaction.com |
36a72381739f42955ca9c2e0d66f68e96de81a78 | 8bd7d66e7bfb28496a63fcaddaa5ce0d109dc854 | /DataFrame.py | 283e738d752f943ee60fa0ca2ae5ee196c341de4 | [] | no_license | nandda/PySparkExamples | f2904d61454a91409cab5537f7ff62a8ab0c6a33 | f888285af34f8080455f0e3bc32bd2551e3a6881 | refs/heads/main | 2023-01-02T09:48:37.992494 | 2020-10-23T07:34:18 | 2020-10-23T07:34:18 | 306,562,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[1]").appName("DataFrame").getOrCreate()
data = [("Finance", 10), ("Marketing", 20), ("Sales", 30), ("IT", 40)]
rdd1 = spark.sparkContext.parallelize(data)
# DF1 = rdd1.toDF()
# DF1.printSchema()
# DF1.show()
columns = ["DeptName", "DeptID"]
# DF1 = rdd1.toDF(columns)
# DF1.printSchema()
# DF1.show()
# deptRDD = spark.createDataFrame(data=rdd1, schema=columns)
# deptRDD.show()
from pyspark.sql.types import StringType, StructType, StructField
deptSchema = StructType([
StructField('deptName', StringType(), True),
StructField('deptId', StringType(), True)])
deptRDD1 = spark.createDataFrame(data=rdd1, schema=deptSchema)
deptRDD1.show()
| [
"ext-nanda.govind@handaten.com"
] | ext-nanda.govind@handaten.com |
7f24cdd401f508a41686afc457fd0714637e52af | bc505ccef7512334301500557138b20fc7a7bf03 | /script/get_image.py | 547ae63237007393fdc41bb81cd3c0b970eaa835 | [] | no_license | nanigasi-san/Image_manipulation | 4b2d6b38ac292fe71a6a895394e10375878d50a1 | c7a8781c0aaea8498783c808d2d120b50126f5ac | refs/heads/master | 2020-04-29T06:34:42.158899 | 2019-03-24T12:56:13 | 2019-03-24T12:56:13 | 175,920,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import requests
IMAGE_URL = "http://img.moeimg.net/wp-content/uploads/archives10/10658/46_e4ejovby8g.jpg"
responce = requests.get(IMAGE_URL)
with open("image/megane.jpg","wb") as f:
f.write(responce.content)
| [
"nanigasi.py@gmail.com"
] | nanigasi.py@gmail.com |
5c32be7d766d6cfaed4dbea6c2ceecbc77ca44eb | 0d7f112c0f6337cd543bfe6a720e7f50629faa57 | /Evidencia52_lista_mayor.py | 801a3d7790622fd26744d92c70087bf1c7be42c5 | [] | no_license | DanielJmz12/Evidenicas-Programacion-avanzada | 40023b1367fd883136e7c0bf089f931a506b8cc5 | 117e31acb5c05a142434319087b325f705030499 | refs/heads/main | 2023-01-12T20:44:23.403714 | 2020-11-19T23:54:07 | 2020-11-19T23:54:07 | 314,397,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | lista = []
for x in range(5):
valor = int(input("ingresa valor: "))
lista.append(valor)
mayor = lista[0]
for x in range(1,5):
if lista[x]>mayor:
mayor = lista[x]
print("lista: ")
print(lista)
print("numero mayor de la lista: ")
print(mayor)
| [
"noreply@github.com"
] | DanielJmz12.noreply@github.com |
5079d0edd8954b896b9d0c197a55d776cc64356a | c0e3f6ed152434fdce6d7e6336a556f1a2f1a255 | /Seq2SeqBot/bot.pyw | 2b68e271727349841d93fda9e0555c6e9906ad4e | [] | no_license | BasantHussein/MovieChatbot | 2511104cb746c9ab5f5625434726b7ef6df13e31 | 424ca3b36853e99651bd958a86f28bff089b36db | refs/heads/main | 2023-02-01T08:57:06.046230 | 2020-12-14T21:38:07 | 2020-12-14T21:38:07 | 303,817,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | pyw | import _thread
from bot_util import *
import tensorflow as tf
import execute
sess = tf.Session()
sess, model, enc_vocab, rev_dec_vocab = execute.init_session(sess, conf='seq2seq_serve.ini')
def ClickMe():
# Write message to chat window
EntryText = FilteredMessage(EntryBox.get("0.0", END))
LoadMyEntry(ChatLog, EntryText, sess, model, enc_vocab, rev_dec_vocab)
# Scroll to the bottom of chat windows
ChatLog.yview(END)
# Erase previous message in Entry Box
EntryBox.delete("0.0", END)
def PressAction(event):
EntryBox.config(state=NORMAL)
ClickMe()
def Disable(event):
EntryBox.config(state=DISABLED)
#Create a window
root = Tk()
root.title('NLP_Bot')
root.geometry("400x550")
#root.resizable(width=FALSE, height=FALSE)
#Chat window
ChatLog = Text(root, bd=0, bg="#FAFAFA", height="8", width="50", font="Arial",)
ChatLog.insert(END, "Hey there!\n")
ChatLog.config(state=DISABLED)
#Bind a scrollbar to the Chat window
sbar = Scrollbar(root, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = sbar.set
#Button to send message
SendButton = Button(root, font=30, text="Send", width="5", height="5", bd=0, bg="#2ecc71", activebackground="#27ae60",
fg="#FFFFFF", command=ClickMe)
#Box to enter message
EntryBox = Text(root, bd=0, bg="white",width="29", height="5", font="Arial", padx=5,pady=5)
EntryBox.bind("<Return>", Disable)
EntryBox.bind("<KeyRelease-Return>", PressAction)
#Place on screen
sbar.place(x=376,y=6, height=486)
ChatLog.place(x=6,y=6, height=486, width=370)
EntryBox.place(x=110, y=501, height=40, width=280)
SendButton.place(x=6, y=501, height=40, width=100)
root.mainloop()
| [
"basant_husseinn@yahoo.com"
] | basant_husseinn@yahoo.com |
a429e2412429883abf369f2da30c67d52fb74a76 | dd77440f5b07774106c7713d229f121335808972 | /venv/bin/pip | 6787b8b74e62633e8c3ebadc76d340c0e907dde1 | [] | no_license | SOUFIANE-AIT-TALEB/Project-SONDAGE | fb09922fc8e85d4bbe1d740e609b72ed65e8097a | 0c86492c394f5e64cb7dcb65d5d14011c4d6664f | refs/heads/master | 2022-04-14T03:24:45.014064 | 2020-03-18T15:26:46 | 2020-03-18T15:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | #!/home/abdelaziz/PycharmProjects/Django1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"Abdelaziz.59140@gmail.com"
] | Abdelaziz.59140@gmail.com | |
55fd036ec13741608e24a0b72ebb2d381c4b2e36 | e85f9c4d8cf8150416942d7b47a727aaab252e01 | /s2/src/download_s2.py | 37af00a6f539b9ad9a1254a87e7bc6d47a962437 | [] | no_license | iluvcoding/maslulcz | 27a7ab93e9a94e569b38689432050419b3aa8db4 | 34db15cfe63b19856885fe66b1568de148bdbc57 | refs/heads/master | 2020-06-10T15:06:56.467101 | 2019-06-25T07:51:54 | 2019-06-25T07:51:54 | 193,660,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 18:42:32 2018
@author: tomer
"""
from sentinelsat.sentinel import SentinelAPI
#import datetime as dt
import os
def download_s2(user, password, dir_raw, dir_nc, start_date, end_date, footprint, pr_status):
api = SentinelAPI(user, password, 'https://scihub.copernicus.eu/dhus/')
#footprint = "POLYGON((73 11, 74 11, 74 14, 73 14, 73 11))"
products = api.query(footprint, date=(start_date, end_date), producttype='S2MSI1C')
#print(products)
for product in products:
productInfo = api.get_product_odata(product)
title = productInfo['title']
tileNo_time = '%s_%s' % (title.split('_')[5], title.split('_')[2])
try:
downloadFlag = not pr_status[tileNo_time]
except KeyError:
pr_status[tileNo_time] = False
downloadFlag =True
#file_nc = os.path.join(dir_nc, "%s_VV.nc"%os.path.basename(title).split("_")[4])
#file_nc = os.path.join(dir_nc, "%s_VV.nc" % title[17:48])
file_wkt = os.path.join(os.path.dirname(dir_nc), "wkt/%s.wkt" % tileNo_time)
if not os.path.exists(file_wkt):
pFootPrint = productInfo['footprint']
file = open(file_wkt, "a")
file.write(pFootPrint)
file.close()
if downloadFlag:
api.download(product, dir_raw, checksum=True)
return pr_status | [
"rishabh@satyukt.com"
] | rishabh@satyukt.com |
5909c96e2b27a998bfca09db38f626d1454c4745 | 41586d36dd07c06860b9808c760e2b0212ed846b | /network/analyzer/nmap/actions.py | e2261e6377d3b22612de116e41001eef1ab405d0 | [] | no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import shelltools
from inary.actionsapi import get
shelltools.export("PYTHON","python2")
def setup():
autotools.autoconf()
autotools.configure("--prefix=/usr \
--without-zenmap\
--libexecdir=/usr/libexec \
--mandir=/usr/share/man \
--with-libpcap=included ")
def build():
shelltools.system("set +u")
autotools.make()
shelltools.system("set -u")
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
| [
"you@example.com"
] | you@example.com |
666fd1780ab7db7c0255218adbf27ecaf72478b6 | 8d6390acd2b4758df5613b2257c990b3c37aad0b | /tareas/migrations/0002_auto_20210316_1205.py | 2f034cd1ff5f33ee15727dda9099a9926b7cc0e9 | [] | no_license | BossDuck/escuela | 2f5833da253db5eedee624eef10f82718532e9eb | 9716df271d0d35d3cdd94293279e910af6f4c05c | refs/heads/master | 2023-04-04T23:16:05.326835 | 2021-04-19T01:21:31 | 2021-04-19T01:21:31 | 359,282,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # Generated by Django 3.1.6 on 2021-03-16 18:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('materias', '0002_auto_20210314_1529'),
('tareas', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='tarea',
name='user',
),
migrations.AddField(
model_name='tarea',
name='materia',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='materias.materia', verbose_name='Materia'),
),
]
| [
"fauchis741@gmail.com"
] | fauchis741@gmail.com |
1126d4c7b56e50b4bff490faffef1d41697fae11 | 91a1a60570d5533e82f421d0548852170d4dc047 | /nexus/actions/scitech.py | f4ac23fed434242377114f728f00e9584222597d | [
"Unlicense"
] | permissive | MrHamdu/hyperboria | 2b666618799f1664b1223b05a197c1fc826a1046 | 7db858386f1a20e8d49bc16f53bfd7f1e4d03f7e | refs/heads/master | 2023-04-27T16:57:47.593150 | 2021-05-03T12:01:34 | 2021-05-03T12:01:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | from html import unescape
from bs4 import BeautifulSoup
from nexus.actions.common import canonize_doi
from nexus.models.proto.operation_pb2 import \
DocumentOperation as DocumentOperationPb
from nexus.models.proto.operation_pb2 import UpdateDocument as UpdateDocumentPb
from nexus.models.proto.scitech_pb2 import Scitech as ScitechPb
from nexus.models.proto.typed_document_pb2 import \
TypedDocument as TypedDocumentPb
from nexus.nlptools.language_detect import detect_language
from nexus.nlptools.utils import (
despace,
despace_full,
)
from .base import BaseAction
class CleanScitechAction(BaseAction):
async def do(self, scitech_pb: ScitechPb) -> ScitechPb:
if scitech_pb.authors:
for i, author in enumerate(scitech_pb.authors):
scitech_pb.authors[i] = despace_full(author)
if scitech_pb.description:
description_soup = BeautifulSoup(unescape(scitech_pb.description), 'lxml')
for line in description_soup.select(r'p, title, jats\:title, jats\:p'):
line.replace_with(f'\n{line.text.strip()}\n')
scitech_pb.description = despace(description_soup.text.strip())
scitech_pb.series = despace_full(scitech_pb.series)
scitech_pb.title = despace_full(scitech_pb.title)
if not scitech_pb.meta_language and (scitech_pb.title or scitech_pb.description):
detected_language = detect_language(f'{scitech_pb.title} {scitech_pb.description }')
if detected_language:
scitech_pb.meta_language = detected_language
if not scitech_pb.language:
scitech_pb.language = scitech_pb.meta_language
scitech_pb.md5 = scitech_pb.md5.lower()
scitech_pb.extension = scitech_pb.extension.lower()
scitech_pb.doi = canonize_doi(scitech_pb.doi)
if scitech_pb.edition == 'None':
scitech_pb.edition = ''
return scitech_pb
class ScitechPbToDocumentOperationBytesAction(BaseAction):
async def do(self, item: ScitechPb) -> bytes:
document_operation_pb = DocumentOperationPb(
update_document=UpdateDocumentPb(
reindex=True,
typed_document=TypedDocumentPb(scitech=item),
),
)
return document_operation_pb.SerializeToString()
| [
"fist.of.the.first.pirates@gmail.com"
] | fist.of.the.first.pirates@gmail.com |
5cee1d96d0e7c3221e47c53745f82ceff4c14a14 | 78099e90f5224623f58db79e6e1f91fe8d44a0b3 | /sumn.py | a77ba09bf07ef1c117aed4753c64931ce1c34c35 | [] | no_license | savitadevi/function | 8151d6e79657d086f9c095f94cd0706e12cbea8f | 4d521dc546fd8c9b037cb78185e69a2bfbf6aa06 | refs/heads/main | 2023-06-14T14:13:15.493256 | 2021-07-13T06:55:23 | 2021-07-13T06:55:23 | 382,122,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # sum of number
def add_number(num1,num2):
a=num1+num2
print(a)
add_number(56,12)
# list a sum of number
def add_number_list(a,b):
i=0
sum=0
c=[]
while i<len(a):
sum=a[i]+b[i]
c.append(sum)
i=i+1
print(c)
add_number_list([50,60,10],[10,20,13])
| [
"savitadevi20@navgurukul.org"
] | savitadevi20@navgurukul.org |
e3cb32c489d6ffd15cae53410f28b69ca1abe807 | 2736148bac05093d94a560577cec5ec02ca7482d | /GlyphDataTest.py | b970be09f7b9a6117b05547c3f5322118fdae6c7 | [
"MIT"
] | permissive | NewcastleRSE/ADV-PowerBI | e0a069e7ce0d654b226e82616b2a671bc12b7921 | 151349759c32eb546bbcb31cec1162c4e519dadf | refs/heads/master | 2023-04-09T11:19:00.651385 | 2023-03-15T11:52:04 | 2023-03-15T11:52:04 | 219,758,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,677 | py | import bpy
import bmesh
from mathutils import Vector
import csv
import os
import glob
import math
import sys
import json
from random import *
# code to help Blender find local python modules
filepath = bpy.data.filepath
dir = os.path.dirname(filepath)
if not dir in sys.path:
sys.path.append(dir)
dir = dir+'\Glyphs' # point to glyph code directory
if not dir in sys.path:
sys.path.append(dir)
#----------------------------------------
from Material import makeFlatColor
from Material import makeEmissive
from Material import makeEmissiveAlpha
from Glyph import Glyph
from Glyph import createGlyph
from Glyph import initGlyph
from KeyTemperature import drawKeyTemperature
from latlonTOukng import WGS84toOSGB36
E_adj = -424500
N_adj = -564500
# Detect current working directory -------
currentDir = os.path.dirname(__file__)
strs = currentDir.split("\\")
if ".blend" in strs[len(strs)-1]:
currentDir = ""
x = 0
for strng in strs:
currentDir = currentDir + strng
currentDir = currentDir + "\\"
x = x + 1
if x == (len(strs) - 1):
break
os.chdir( currentDir )
print( os.getcwd() )
#--------------------------------------------------------------
text_material = bpy.data.materials.get("Text");
bg_colour = bpy.data.materials.get("BG-Material");
axis_colour = text_material
axis_value_colour = text_material
axis_label_colour = text_material
gridlines_colour = text_material
axes_z = -0.5
x_axis_length = 12.5
start_x = -5.5
end_x = start_x + x_axis_length
y_axis_length = 9.5
start_y = -4.25
end_y = start_y + y_axis_length
x_axis_values = []
y_axis_values = []
properties = None
def drawYAxis(min, max, inc, y_axis_label):
idx = min;
if (inc==0.0):
y_axis_values.append(min-1)
y_axis_values.append(min)
y_axis_values.append(min+1)
else:
y_axis_values.append(min - inc)
while idx <= max:
y_axis_values.append(idx);
idx += inc;
#y_axis_values.append(max)
y_axis_values.append(max + inc)
num_items = len(y_axis_values);
index = 0;
increment = y_axis_length/(num_items-1);
for y in y_axis_values:
bpy.ops.object.text_add(location=(-6.0, start_y+(index*increment)-0.125, axes_z));
txt = bpy.context.object;
txt.data.body = str(round(y,2));
txt.data.extrude = 0.02;
txt.data.size = 0.4;
txt.data.align_x = 'RIGHT';
txt.data.materials.append(axis_value_colour);
if(index!=0):
bpy.ops.mesh.primitive_cube_add(location=(0.7, start_y+(index*increment), axes_z));
cube = bpy.context.object;
cube.dimensions = (x_axis_length+0.1,0.01,0.01);
cube.data.materials.append(gridlines_colour);
else:
bpy.ops.mesh.primitive_cube_add(location=(0.75, start_y+(index*increment), axes_z));
cube = bpy.context.object;
cube.dimensions = (x_axis_length+0.25,0.05,0.01);
cube.data.materials.append(axis_colour);
index = index + 1;
#axis label
bpy.ops.object.text_add(location=(-7.45, 0.0, axes_z));
txt = bpy.context.object;
txt.data.body = y_axis_label;
txt.data.extrude = 0.02;
txt.data.size = 0.5;
txt.rotation_euler = (0.0, 0.0, 1.5708); #radians!
txt.data.materials.append(axis_label_colour);
txt.data.align_x = 'CENTER';
def drawXAxis(min, max, inc, x_axis_label):
idx = min;
if (inc==0.0):
x_axis_values.append(min-1)
x_axis_values.append(min)
x_axis_values.append(min+1)
else:
x_axis_values.append(min - inc)
while idx <= max:
x_axis_values.append(idx);
idx += inc;
#x_axis_values.append(max)
x_axis_values.append(max + inc)
num_items = len(x_axis_values);
index = 0;
increment = x_axis_length/(num_items-1);
for x in x_axis_values:
bpy.ops.object.text_add(location=(start_x+(index*increment), -5.0, axes_z));
txt = bpy.context.object;
txt.data.body = str(round(x,2));
txt.data.extrude = 0.02;
txt.data.size = 0.4;
txt.data.align_x = 'CENTER';
txt.data.materials.append(axis_value_colour);
if(index!=0):
bpy.ops.mesh.primitive_cube_add(location=(start_x+(index*increment), 0.45, axes_z));
cube = bpy.context.object;
cube.dimensions = (0.01,y_axis_length+0.1,0.01);
cube.data.materials.append(gridlines_colour);
else:
bpy.ops.mesh.primitive_cube_add(location=(start_x+(index*increment), 0.5, axes_z));
cube = bpy.context.object;
cube.dimensions = (0.05,y_axis_length+0.25,0.01);
cube.data.materials.append(axis_colour);
index = index + 1;
#axis label
bpy.ops.object.text_add(location=(0, -5.5, axes_z));
txt = bpy.context.object;
txt.data.body = x_axis_label;
txt.data.extrude = 0.02;
txt.data.size = 0.5;
txt.data.materials.append(axis_label_colour);
txt.data.align_x = 'CENTER';
txt.data.align_y = 'CENTER';
minVariance = 0.0
maxVariance = 1.0
ortho = True
numGlyphs = 7
min_x = None
max_x = None
min_y = None
max_y = None
##----------------------------------------- Code from here
# LOAD JSON DATA
argv = sys.argv
argv = argv[argv.index("--") + 1:] # get all args after "--"
j_data = json.loads(argv[0])
values = j_data["data"]
background = j_data["background"]
# LOAD PROPERTIES JSON
with open("properties.json", 'r') as props:
properties = json.load(props)
ax_col = properties["properties"]["axis_colour"]
axis_colour = makeEmissive(ax_col, 'AxisMaterial')
axis_colour = bpy.data.materials['AxisMaterial']
tx_col = properties["properties"]["axis_value_colour"]
axis_value_colour = makeEmissive(tx_col, 'AxisValueMaterial')
axis_value_colour = bpy.data.materials['AxisValueMaterial']
ax_col = properties["properties"]["axis_label_colour"]
axis_label_colour = makeEmissive(ax_col, 'AxisLabelMaterial')
axis_label_colour = bpy.data.materials['AxisLabelMaterial']
gx_col = properties["properties"]["gridlines_colour"]
gridlines_colour = makeEmissive(gx_col, 'GridlinesMaterial')
gridlines_colour = bpy.data.materials['GridlinesMaterial']
bg_col = properties["properties"]["background_colour"] # Background Colour
bg_colour = makeEmissiveAlpha(bg_col, 'New-BG-Material')
bg_colour = bpy.data.materials['New-BG-Material']
bg_colour.blend_method = 'BLEND'
bpy.data.objects["Plane"].data.materials[0] = bg_colour
bpy.data.objects["Plane.001"].data.materials[0] = bg_colour
bpy.data.objects["Plane.002"].data.materials[0] = bg_colour
if background == "map":
bpy.data.objects["Plane"].hide_render = True
bpy.data.collections['Background'].hide_render = False
else:
bpy.data.objects["Plane"].hide_render = False
bpy.data.collections['Background'].hide_render = True
bpy.data.objects["Least.U.TXT"].data.materials[0] = axis_label_colour
bpy.data.objects["Most.U.TXT"].data.materials[0] = axis_label_colour
bpy.data.objects["No.Data.TXT"].data.materials[0] = axis_label_colour
bpy.data.objects["Uncertainty.TXT"].data.materials[0] = axis_label_colour
bpy.data.objects["KeyTitle.TXT"].data.materials[0] = axis_label_colour
key_label = j_data["key_name"]
key_low = j_data["key_values"]["low_value"]
key_high = j_data["key_values"]["high_value"]
value_key_label = j_data["value_key_label"]
key_low = key_low.replace(' ', '\n')
key_high = key_high.replace(' ', '\n')
bpy.data.objects["Uncertainty.TXT"].data.body = key_label
bpy.data.objects["Least.U.TXT"].data.body = key_low
bpy.data.objects["Most.U.TXT"].data.body = key_high
bpy.data.objects["KeyTitle.TXT"].data.body = value_key_label
# ---------------------------------------------------------------
min_risk = None
max_risk = None
for idx in range(0, len(values)) :
datavalues = values[idx]
lon = float(datavalues["x"])
lat = float(datavalues["y"])
if background == "map":
E, N = WGS84toOSGB36(lat, lon)
d_x = (E + E_adj)
d_y = (N + N_adj)
if d_x > 500 or d_x < -500 or d_y > 500 or d_y < -500:
#not on tile, ignore
continue
else:
d_x = lon
d_y = lat
if (min_x == None or d_x < min_x):
min_x = d_x
if (max_x == None or d_x > max_x):
max_x = d_x
if (min_y == None or d_y < min_y):
min_y = d_y
if (max_y == None or d_y > max_y):
max_y = d_y
if 'r' in datavalues:
d_r = float(datavalues["r"])
if (min_risk == None or min_risk > d_r):
min_risk = d_r
if (max_risk == None or max_risk < d_r):
max_risk = d_r
#------------------------------------------------------------------------------------------------------------------- RENDER
bpy.context.window.scene = bpy.data.scenes['Overlay']
if background == "graph":
x_inc = (max_x - min_x)/10.0
y_inc = (max_y - min_y)/8.0
drawXAxis(min_x, max_x, x_inc, j_data["x_axis_label"])
drawYAxis(min_y, max_y, y_inc, j_data["y_axis_label"])
drawKeyTemperature(0.0, ortho, axis_value_colour)
if background == "map":
bpy.context.window.scene = bpy.data.scenes['Scene']
cam_offset = ((max_x - min_x) / 3.0)
cam_offset = max(0.2, cam_offset)
cam_x = ((min_x + max_x) / 2.0) + cam_offset
cam_y = (min_y + max_y) / 2.0
cam_orth_scale_x = (max_x - min_x) * 1.1
cam_orth_scale_y = (max_y - min_y) * 1.1
cam_orth_scale = max(cam_orth_scale_x, cam_orth_scale_y) * 2.0
cam_orth_scale = max(cam_orth_scale, 1.5)
camera = bpy.context.scene.objects["Camera"]
camera.location = (cam_x, cam_y, 500)
camera.data.ortho_scale = cam_orth_scale
for idx in range(0, len(values)) :
datavalues = values[idx]
lon = float(datavalues["x"])
lat = float(datavalues["y"])
if background == "map":
E, N = WGS84toOSGB36(lat, lon)
d_x = (E + E_adj)
d_y = (N + N_adj)
if d_x > 500 or d_x < -500 or d_y > 500 or d_y < -500:
#not on tile, ignore
#print("glyph not on tile")
continue
else:
d_x = start_x + ((lon - x_axis_values[0]) * (x_axis_length / (x_axis_values[len(x_axis_values)-1] - x_axis_values[0])))
d_y = start_y + ((lat - y_axis_values[0]) * (y_axis_length / (y_axis_values[len(y_axis_values)-1] - y_axis_values[0])))
d_uncertainty = float(datavalues["u"])
d_temperature = float(datavalues["v"])
if 'r' in datavalues:
d_risk = float(datavalues["r"])
risk_range = max_risk - min_risk
if (risk_range <= 0):
risk_val = d_risk
else:
risk_val = (d_risk - min_risk) / risk_range
else:
#print ("> No risk data")
risk_val = 0.5
if background == "map":
#glyph_scale = (cam_orth_scale / 1.5) * 0.25
glyph_size = 10.0 #* glyph_scale
scale = glyph_size + (risk_val * glyph_size)
force = False
height = 100.0
else:
glyph_size = 0.5
scale = glyph_size + (risk_val * glyph_size)
force = True
height = 1.0
glyph = initGlyph(d_uncertainty, 1, d_x, d_y, height, scale, d_temperature, "data-glyph-"+str(idx), force)
createGlyph( glyph, minVariance, maxVariance, ortho, numGlyphs )
#bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath) | [
"mike.simpson@ncl.ac.uk"
] | mike.simpson@ncl.ac.uk |
6032bb2815e2ccb9627f20da9565907b213c4353 | 9bf1b1298f1e67d62005b40824317107c43a7f3f | /node3python/python/app.py | b7ee7493b95723ef53635d4fb050f38e201609e3 | [] | no_license | mashuq/bluewhale | e4a1b5871b0c9f9b4fb125c83de4e0a367028b92 | 6f2b2576c818f5cf900b3efc81976f8eab37f798 | refs/heads/master | 2020-05-03T01:36:36.784367 | 2019-04-28T14:37:34 | 2019-04-28T14:37:34 | 178,342,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | from flask import Flask
from flask import jsonify
import urllib.request
import ssl
import socket
app = Flask(__name__)
@app.route('/')
def index():
ssl._create_default_https_context = ssl._create_unverified_context
data = {
'app': 'python flask',
'host': socket.gethostname(),
'php': urllib.request.urlopen('https://192.168.99.100/php').read(),
'node': urllib.request.urlopen('https://192.168.99.100/node/data').read()
}
return jsonify(data)
@app.route('/data')
def data():
data = {
'app': 'python flask',
'host': socket.gethostname()
}
return jsonify(data) | [
"mashuq@gmail.com"
] | mashuq@gmail.com |
8e4886ce4ec3a9fc22685d74ae9ed159945f095e | 1f3435e0698c062bebec79af4c2ed7bf5f865000 | /demo/oop/employees.py | f43c14f006685fdb848f59e57e47c2291b7b1e29 | [] | no_license | srikanthpragada/PYTHON_02_FEB_2021 | d879f5aadc3717d342e21e69d56f74b1a3967ca6 | c900b5745cf8343d5b00c345c0c13c9c7656cb5a | refs/heads/master | 2023-03-07T11:57:09.199601 | 2021-03-01T03:18:07 | 2021-03-01T03:18:07 | 335,815,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | from abc import ABC, abstractmethod
# Abstract class
class Employee(ABC):
def __init__(self, id, name, desg):
self.id = id
self.name = name
self.desg = desg
def __str__(self):
return f"{self.id} - {self.name} - {self.desg}"
@abstractmethod
def get_salary(self):
pass
class SalariedEmployee(Employee):
def __init__(self, id, name, desg, salary):
super().__init__(id, name, desg)
self.salary = salary
def __str__(self): # Overriding
return f"{super().__str__()} - {self.salary}"
def get_salary(self):
return self.salary
class Consultant(Employee):
pass
s = SalariedEmployee(1, "Jack", "Programmer", 500000)
print(s)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
95a860805752e65a0e7fa8e1ef22c28833231a4a | c8aa81def7b496eeeaa1d81e01602704576247bf | /Python-Practice-With-Nana/projects/github-api-requests/main.py | 86e611b8cf46eaeb7e11e934d8c59a49d2412f32 | [] | no_license | chaitanyatekane/python-practised | e6169c4e7be996e290a710234d534ced0954c0f0 | cad6308fc94189fff819487f43fd8d03ee9add82 | refs/heads/main | 2023-06-09T12:49:24.779144 | 2021-06-22T14:59:28 | 2021-06-22T14:59:28 | 377,890,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # Step 01: in terminal run command - pip install requests
import requests
response = requests.get("https://api.github.com/users/chaitanyatekane/repos")
my_repos = response.json()
# print the whole objects list
# print(my_projects)
# print(type(my_projects)) # will return list
# print just the names and urls
for project in my_repos:
print(
f"Project Name: {project['name']}\nProject Url: {project['html_url']}\n")
# Sample Output :-
# Project Name: 100DaysOfCodeChallenge-Feb-04-2021
# Project Url: https://github.com/chaitanyatekane/100DaysOfCodeChallenge-Feb-04-2021
# Project Name: 30DaysOfCodeChallenge-Jan-04-2021
# Project Url: https://github.com/chaitanyatekane/30DaysOfCodeChallenge-Jan-04-2021
# Project Name: AreYouCricketExpert
# Project Url: https://github.com/chaitanyatekane/AreYouCricketExpert
# Project Name: Auto-Write-Text
# Project Url: https://github.com/chaitanyatekane/Auto-Write-Text
# Project Name: Automotive-Car-Design-Template
# Project Url: https://github.com/chaitanyatekane/Automotive-Car-Design-Template
# Project Name: Background-Changer
# Project Url: https://github.com/chaitanyatekane/Background-Changer
# and so on.........................................................
| [
"noreply@github.com"
] | chaitanyatekane.noreply@github.com |
64bc2e9d03fe38dd247eaddfab53f3e1310a2ad8 | 27a0d1de27012e2c0de5714ca17534feb1d13187 | /report/revenueReport.py | b082459f3f9902001065cce8de282a10c75e56ea | [] | no_license | purva15/parking-lot | 8aed4d5ba1e9431865ccda88913f8286b26cd4f7 | c3b5222b0b6bb368d4b3805dd68d9e71389432fc | refs/heads/main | 2023-08-24T17:19:49.882137 | 2021-10-17T18:24:05 | 2021-10-17T18:24:05 | 416,379,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import json
from db.db import Sqlite
class RevenueReport:
""" All the revenue reports are generated through this class"""
def __init__(self):
self._db = Sqlite()
def generateReport(self):
totalCars, revenue = self._db.getAllCarsRevenue()
totalParkedCars = self._db.getNoOfParkedCars()
totalMTCars = self._db.getNoOfParkedCarsByType("MONSTER_TRUCK")
totalRCars = self._db.getNoOfParkedCarsByType("REGULAR")
response = {
"totalCarsServed": totalCars,
"totalRevenue": revenue,
"parkedCars": {
"count": totalParkedCars,
"monsterTrucks": totalMTCars,
"regularCars": totalRCars
}
}
return response
if __name__ == "__main__":
report = RevenueReport()
report.generateReport()
| [
"agrawalpurva15@gmail.com"
] | agrawalpurva15@gmail.com |
e4150cba0135d971e6853074f976f87d7b92d219 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5738606668808192_0/Python/t93/c.py | 19a67e6d30da8a0a722d5585f4d89b53e9b3141e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | import math
import random
full_proof_lim = 100
rand_proof_count = 100
def main():
random.seed(1)
t = int(raw_input())
for i in xrange(t):
n, j = map(int, raw_input().split(" "))
found = set([])
print "Case #%d:" % (i + 1)
while len(found) < j:
s = gen(n)
if s not in found:
ps = proveAll(s)
if ps is not None:
found.add(s)
print s, ' '.join(map(str, ps))
def proveAll(s):
ps = []
for b in xrange(2, 11):
p = prove(int(s, b))
if p is None:
return None
ps.append(p)
return ps
def prove(n):
if n % 2 == 0:
if n == 2:
return None
return 2
root = int(math.sqrt(n))
upper_bound = min(root, full_proof_lim)
for i in xrange(3, upper_bound + 1, 2):
if n % i == 0:
return i
for i in xrange(rand_proof_count):
d = random.randrange(3, root + 1)
if n % d == 0:
return d
return None
def gen(n):
s = []
s.append('1')
for i in xrange(n - 2):
s.append(random.choice('01'))
s.append('1')
return ''.join(s)
if __name__ == "__main__":
main()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
085edaf9b9ec6ceac6dfd453698944d792c7fffb | cd1f1949a886b7fcd0d5b286ae735c212ad14cde | /pro/settings.py | eb4a4d0f44b4e6267569ec1149ff7e7b2271e7ff | [] | no_license | KavyaHB/pro234 | 0403ef2cb4c4d84cd7affc896bfa8798a7fff77e | dc98208b7e0df2c7126b6151a560321cc920a337 | refs/heads/main | 2023-04-07T04:51:59.338479 | 2021-04-14T16:34:36 | 2021-04-14T16:34:36 | 357,973,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,310 | py | """
Django settings for pro project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR=os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-1vgxi%kc36@e%1p-(4l49@d)o7t4zjoxtt1o5pkxm*1@=d8g3u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"kavyahbgowda1998@gmail.com"
] | kavyahbgowda1998@gmail.com |
2b6868700d215277a89edaf88b8ae7d53c31616d | a0db3713e9e9914fab3ddf80186ce71f530133c2 | /restProject/bookstore4/books/urls.py | 95eb99137d22a4d7e266d21ee8349fb63ed6ca0e | [] | no_license | liddyv/RestfulAPI | ba2821eb8e408b4e7ed527bb9a59abba155f03a7 | 9b1bb441197e77ccaf19ba12c6b2252b668eb1d9 | refs/heads/master | 2020-09-06T04:26:44.532378 | 2019-11-20T20:04:37 | 2019-11-20T20:04:37 | 220,318,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django.conf.urls import url
from books.views import BookList, BookDetail
urlpatterns = [
url(
r'^books$',
BookList.as_view()
),
url(
r'^books/(?P<pk>[0-9]+)$',
BookDetail.as_view()
)
] | [
"liddyv@gmail.com"
] | liddyv@gmail.com |
be1d0bee1cce30d31d2a0411e9818233fb4d9d1d | bcc57de6f93224cd643108ee9ebe9a6faa726482 | /spark/test.py | 15a77de0c77c04220f2a2f6be55926d077b03fa6 | [] | no_license | doubaoa/taxi | 6f42ade6914995a584bf4d48e4d4346084f6ab44 | 6f313ce7f04bb746f8a20de10142ff04b89b7500 | refs/heads/master | 2020-06-15T05:24:27.192403 | 2019-07-24T09:38:11 | 2019-07-24T09:38:11 | 195,213,638 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,412 | py | import os
from pyspark.sql import SparkSession
from pyspark.sql import Window
from pyspark.sql import Row,functions as F
from pyspark.sql.functions import desc, from_unixtime
import datetime
import time
start_time =time.time()
os.environ['JAVA_HOME'] = '/root/jdk1.8.0_141'
spark = SparkSession \
.builder \
.appName('my_first_app_name') \
.getOrCreate()
file = r'/root/20170928.txt'
df = spark.read.csv(file,header=True,inferSchema=True)
df = df.drop('direction','HDOP')
speed = df.groupBy('num').avg('speed')
df = df.join(speed, df.num == speed.num, "left").drop(speed.num)
df = df.filter(df['avg(speed)']>0).filter(df['avg(speed)']<16.7)
df = df.drop(df['avg(speed)'])
start_result = df.withColumn("rn", F.row_number().over(Window.partitionBy("num").orderBy("time")))
start_result=start_result.where(start_result.rn<=1)
start_result = start_result.withColumnRenamed('longitude', 'start_long').withColumnRenamed('latitude', 'start_lati')
end_result = df.withColumn("rn", F.row_number().over(Window.partitionBy("num").orderBy(desc("time"))))
end_result = end_result.where(end_result.rn<=1)
end_result = end_result.withColumnRenamed('longitude', 'end_long').withColumnRenamed('latitude', 'end_lati')
result = start_result.join(end_result, start_result.num == end_result.num).drop(start_result.num)
result.createOrReplaceTempView("result")
a = spark.sql("select * from result where result.start_long!=result.end_long or result.start_lati!=result.end_lati")
a.createOrReplaceTempView("aa")
aa = spark.sql("select num from aa")
df = df.join(aa, df.num == aa.num).drop(aa.num).drop('rn').drop('speed')
start_result1 = df.withColumn("rn", F.row_number().over(Window.partitionBy("num").orderBy("time")))
start_result1=start_result1.where(start_result1.rn<=1).drop('rn').orderBy("time")
start_result1.coalesce(1).write.format('com.databricks.spark.csv').options(header="true").mode('append').save('/root/start')
# head = start_result1.head(1)[0]['time']
# head = datetime.datetime.fromtimestamp(head)
# for i in range(24):
# tail = head + datetime.timedelta(hours=1)
# head = tail
# part_start = start_result1.filter(start_result1['time']>= int(time.mktime((head-datetime.timedelta(hours=1)).timetuple())))\
# .filter(start_result1['time']<=int(time.mktime(tail.timetuple()))).drop('time').drop('num')
# print(part_start.count())
# part_start.coalesce(1).write.format('com.databricks.spark.csv').options(header="true").mode('append').save('/root/start'+str(i))
end_result1 = df.withColumn("rn", F.row_number().over(Window.partitionBy("num").orderBy(desc("time"))))
end_result1 = end_result1.where(end_result1.rn<=1).drop('rn').orderBy("time")
end_result1.coalesce(1).write.format('com.databricks.spark.csv').options(header="true").mode('append').save('/root/end')
# head1 = end_result1.head(1)[0]['time']
# head1 = datetime.datetime.fromtimestamp(head1)
# for i in range(24):
# tail1 = head1 + datetime.timedelta(hours=1)
# head1 = tail1
# part_end = end_result1.filter(end_result1['time']>= int(time.mktime((head1-datetime.timedelta(hours=1)).timetuple())))\
# .filter(end_result1['time']<=int(time.mktime(tail1.timetuple()))).drop('time').drop('num')
# part_end.show()
# part_end.coalesce(1).write.format('com.databricks.spark.csv').options(header="true").mode('append').save('/root/end'+str(i))
end_time = time.time()
print(end_time-start_time)
| [
"nianax_2587@163.com"
] | nianax_2587@163.com |
106b0d7e068ea42ebaa6e75cc7067b6ec54dd898 | c2afceefd7eb9066f83b1cedc29e23d28c2a6243 | /10-OOP/01-Activities/08-Stu_TDD-Dissect/node_modules/fsevents/build/config.gypi | b607e9be49e08d3cdae9552e09e14685b9694a5e | [
"MIT"
] | permissive | Simon-Xu-Lan/classcode | c390960cc640e73e79c3b645d2476d7d80b6e0c7 | d226d443fec46cb2032c5854ac863ccb4ca2aaf0 | refs/heads/master | 2022-12-05T16:08:28.580180 | 2020-08-25T18:55:49 | 2020-08-25T18:55:49 | 255,445,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,656 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "5",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_large_pages_script_lld": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "8.0",
"nodedir": "/Users/simonxu/Library/Caches/node-gyp/12.16.1",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/simonxu/.npm-init.js",
"userconfig": "/Users/simonxu/.npmrc",
"cidr": "",
"node_version": "12.16.1",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/simonxu/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.13.4 node/v12.16.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/4v/gw16xx4s0bg1hchktxd95hvw0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"xulan20907@gmail.com"
] | xulan20907@gmail.com |
6683bad167caeb8a0b9f59825d8eea3848f215f8 | 4db5cc1d080d7a61d51bd2b80451df2c92e15345 | /Maximum Depth of Binary Tree-Recursive.py | 62aad59578efaac4df688395a34714c79421903c | [] | no_license | thumarrushik/Leetcode | fe6eb76885fa0ff50afc21a496186a6537cbd897 | f11747b5a02e5787dd250451e639519d1170f4a3 | refs/heads/master | 2021-09-03T20:35:41.709620 | 2018-01-11T20:41:47 | 2018-01-11T20:41:47 | 110,296,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxDepth(self, root, height = 0):
"""
:type root: TreeNode
:rtype: int
"""
if root is None or root == []:
return 0
return 1+max(self.maxDepth(root.right), self.maxDepth(root.left))
| [
"noreply@github.com"
] | thumarrushik.noreply@github.com |
703ed9032d2a1f150c17020ddbae111e7b19897a | 4b072f5a123d3052ce27e7651602fb9a3bbde784 | /legacy_router.py | 3976f0450b84a77b4befc87f89c589798381c01b | [] | no_license | juhipark/PA3 | 8334a45cc7c0cef1b258ffc65890b422f4f02f2c | 588362e73bc78015067c1f9c6fcc117c83282bcb | refs/heads/master | 2022-04-19T11:29:16.622353 | 2020-04-20T04:47:17 | 2020-04-20T04:47:17 | 257,127,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8')
# for example '-.-.-.-/24'is ip addfress format to set prefix length
info( '*** Add Router\n')
r1 = net.addHost('r1', cls=Node, ip='10.0.0.1')
r1.cmd('sysctl -w net.ipv4.ip_forward=1')
info('*** Add Switch\n')
switch = net.addSwitch('s1')
info( '*** Add hosts\n')
h1 = net.addHost('h1', cls=Host, ip='10.0.0.3/8', defaultRoute='via 10.0.0.1')
h2 = net.addHost('h2', cls=Host, ip='192.0.1.2/12', defaultRoute='via 192.0.1.1')
info( '*** Add links\n')
net.addLink(h1, r1, intfName2='r1-eth1', params2={'ip':'10.0.0.1/8'})
net.addLink(h2, r1, intfName2='r1-eth2', params2={'ip':'192.0.1.1/12'})
info( '*** Starting network\n')
net.build()
info('*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info('*** Starting switches\n')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
| [
"None"
] | None |
6f875664e9bb3a39bb5169595763940fb2368fe7 | 71666a3dc7654fbde3be7f05aa3784aa16826619 | /installers/migrations/0007_auto_20200929_1305.py | 214f939cb0befc3937e119274e3f3a79747b3b6a | [] | no_license | Elias-Cz/Capstone | 5e3e5d40e133eb9bcf6d833ae2fd43b469301588 | 33c1c24167b113a149ba31d4c54887c7379a9c94 | refs/heads/master | 2023-01-24T18:55:53.002851 | 2020-12-10T05:39:45 | 2020-12-10T05:39:45 | 291,141,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # Generated by Django 3.0.8 on 2020-09-29 20:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('installers', '0006_auto_20200929_1110'),
]
operations = [
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day_data', models.CharField(max_length=140)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customer_day', to=settings.AUTH_USER_MODEL)),
('installer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='installer_day', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='schedule',
name='date_data',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='date_data', to='installers.Day'),
),
]
| [
"cazessus.elias@gmail.com"
] | cazessus.elias@gmail.com |
f6a5f5338ec97a20ca77320ba1f5344d605b32d6 | 4c93ff48983bb8475f6be4001811ba2b91d7a0b5 | /places/migrations/0003_auto_20151202_0600.py | c21336fb63aec7cd67d18565f1f8c3144fa38210 | [] | no_license | SoccerStatsUS/s3 | dfca61c7ac4e05987e77a992a482ffe9a69f3e16 | 44b4b8bc14cb580aa9d7683f72fbaa75d1e43d70 | refs/heads/master | 2021-01-10T15:42:58.832834 | 2017-03-01T22:29:25 | 2017-03-01T22:29:25 | 47,239,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-02 06:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('places', '0002_country_confederation'),
]
operations = [
migrations.AlterModelOptions(
name='city',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='country',
options={'ordering': ('name',)},
),
]
| [
"chrisedgemon@gmail.com"
] | chrisedgemon@gmail.com |
7a6658a8b04485f5a471d94fdf45802965465468 | 63daf225819636397fda6ef7e52783331c27f295 | /taobao_apis/top/api/rest/SellercatsListUpdateRequest.py | 9e0e466ff8c7fdec1ac966165669f807faeb22af | [] | no_license | cash2one/language-Python | e332ecfb4e9321a11407b29987ee64d44e552b15 | 8adb4f2fd2f023f9cc89b4edce1da5f71a3332ab | refs/heads/master | 2021-06-16T15:15:08.346420 | 2017-04-20T02:44:16 | 2017-04-20T02:44:16 | 112,173,361 | 1 | 0 | null | 2017-11-27T09:08:57 | 2017-11-27T09:08:57 | null | UTF-8 | Python | false | false | 377 | py | '''
Created by auto_sdk on 2016.04.13
'''
from top.api.base import RestApi
class SellercatsListUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cid = None
self.name = None
self.pict_url = None
self.sort_order = None
def getapiname(self):
return 'taobao.sellercats.list.update'
| [
"tangzhihui@Tom.local"
] | tangzhihui@Tom.local |
315ff3fd72a86295c5be5e98ad1185eb62793fe1 | 0f2223a9af08237145247879253d609f9a43d6a7 | /NQubitALU.py | 33e1e29df63a78f7d2a0e0ef2906d9fc484c0839 | [] | no_license | AlejandroGomezFrieiro/NQubitALU | 07898c1c0325ba9fcfc5057b72a333a680954a07 | cdcaa38f2b252a261977865739b0eb56e95461f0 | refs/heads/main | 2023-03-07T21:32:32.077402 | 2021-02-22T11:06:35 | 2021-02-22T11:06:35 | 340,795,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,271 | py | import numpy as np
import qiskit
from qiskit.visualization import plot_state_city
# import qiskit.circuits.library as lib
from qiskit import QuantumRegister
from typing import Optional, List
class HalfAdder(qiskit.circuit.Gate):
def __init__(self) -> None:
super().__init__('halfAdder', 4, [])
def _define(self):
"""
gate halfAdder(a, b, sum, carry)
{
cx a, sum;
cx b, sum;
ccx a, b, carry;
}
"""
from qiskit.circuit.quantumcircuit import QuantumCircuit
q = qiskit.QuantumRegister(4, name='q')
qc = qiskit.QuantumCircuit(q, name=self.name)
# XOR q0, q1 to q2
qc.cx(0, 2)
qc.cx(1, 2)
# AND q0, q1 to q3
qc.ccx(0, 1, 3)
self.definition = qc
# if self.num_variable_qubits < 4:
# raise ValueError
class FullAdder(qiskit.circuit.Gate):
def __init__(self) -> None:
super().__init__('fullAdder', 8, [])
def _define(self):
"""
gate halfAdder(a, b, cin, cout, sum, ancilla1, ancilla2, ancilla3)
{
cx a, ancilla1;
cx b, ancilla1;
cx cin, cout;
cx sum, cout;
ccx a, b, ancilla2;
ccx cin, ancilla1, ancilla3;
cx ancilla3, cout;
cx ancilla2, cout;
ccx ancilla2, ancilla3, cout;
}
"""
from qiskit.circuit.quantumcircuit import QuantumCircuit
q = qiskit.QuantumRegister(8, name='q')
qc = qiskit.QuantumCircuit(q, name=self.name)
# XOR q0, q1 to q5
qc.cx(0, 5)
qc.cx(1, 5)
# XOR q2, q5 to q4
qc.cx(2, 4)
qc.cx(5, 4)
# AND q0, q1 to q6
qc.ccx(0, 1, 6)
# AND q2, q5 to q7
qc.ccx(2, 5, 7)
# OR q7, q6, q3
qc.cx(7, 3)
qc.cx(6, 3)
qc.ccx(7, 6, 3)
self.definition = qc
class TwoQubitALU(qiskit.QuantumCircuit):
def __init__(self,
num_qubits: int = 12) -> None:
"""Return a circuit implementing a Two Qubit ALU, with input qubits in the form (a0, a1, b0, b1, s0, s1, c0, c1, sb, ancilla1, ancilla2, ancilla3)
Args:
num_qubits: the width of circuit.
Raises:
ValueError: If the number of qubits is not right.
"""
super().__init__(num_qubits, name="ALU(2)")
if num_qubits != 12:
raise ValueError("ALU(2) requires 13 bits")
q = self.qregs[0]
# B0 XOR SB to B0 (use ancilla 1, swap b0, ancilla1, then reset ancilla1)
self.cx(2, 9)
self.cx(8, 9)
self.swap(2, 9)
self.reset([9]*10)
# B1 XOR SB TO B1 (use ancilla 1, swap b1, ancilla2, then reset ancilla2)
self.cx(3, 10)
self.cx(8, 10)
self.swap(3, 10)
self.reset([10]*10)
# (a0, a1, b0, b1, s0, s1, c0, c1, sb, ancilla1, ancilla2, ancilla3)
# a, b, cin, cout, sum, ancilla1, ancilla2, ancilla3
# SUM A0, B0, reset ancillas
self.append(FullAdder(), [q[i] for i in [0, 2, 8, 6, 4, 9, 10, 11]]) #a0, b0, sb, c0, s0, ancillas
self.reset([9]*10)
self.reset([10]*10)
self.reset([11]*10)
# SUM A1, B1, reset ancillas
self.append(FullAdder(), [q[i] for i in [1, 3, 6, 7, 5, 9, 10, 11]])
self.reset([9]*10)
self.reset([10]*10)
self.reset([11]*10)
class NQubitALU(qiskit.circuit.library.BlueprintCircuit):
def __init__(self,
registerA: qiskit.QuantumRegister,
registerB: qiskit.QuantumRegister,
registerS: qiskit.QuantumRegister,
registerSB: qiskit.QuantumRegister,
registerC: qiskit.QuantumRegister,
registerAncilla: qiskit.QuantumRegister,
num_qubits: Optional[int] = None,
name: str = 'ALU(N)') -> None:
"""Return a circuit implementing a simple N Qubit ALU (named ALU(N)), with input qubits in the form (a0, ... an, b0, b ...bn, s0...sn, c0...cn, sb, ancilla1, ancilla2, ancilla3)
Args:
registerA: First Quantum register of size N, which will be the first string to operate on.
registerB: Second Quantum register of size N, which will be the second string to operate on.
registerS: Third Quantum register of size N, which will contain the output string.
registerSB: Single qubit quantum register which controls between operations ADD and SUB.
registerC: Fourth Quantum register of size N, which contains the carry bits.
registerAncilla: Quantum register of size 3 to operate Ancillas.
Raises:
CircuitError: if the xor bitstring exceeds available qubits.
Reference Circuit:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import XOR
import qiskit.tools.jupyter
circuit = XOR(5, seed=42)
%circuit_library_info circuit
"""
self.registerA = registerA
self.registerB = registerB
self.registerS = registerS
self.registerSB = registerSB
self.registerC = registerC
self.registerAncilla = registerAncilla
self._num_qubits = None
self.num_qubits = registerA.size # Size of the Nbits to operate on
self._name = f'ALU({self.registerA.size})'
self.qregs = [self.registerA, self.registerB, self.registerS, self.registerSB, self.registerC, self.registerAncilla]
super().__init__(*self.qregs, name=self._name)
self.qregs = [self.registerA, self.registerB, self.registerS, self.registerSB, self.registerC, self.registerAncilla]
@property
def num_qubits(self) -> int:
"""The number of qubits to be summed.
Returns:
The number of qubits per main register.
"""
return self._num_qubits
@num_qubits.setter
def num_qubits(self, num_qubits: int) -> None:
"""Set the number of qubits in the registers of the ALU operation.
Args:
num_qubits: The new number of qubits.
"""
if self._num_qubits is None or num_qubits != self._num_qubits:
self._invalidate()
self._num_qubits = num_qubits
self._reset_registers()
def _reset_registers(self):
qr_A = self.registerA
qr_B = self.registerB
qr_S = self.registerS
qr_SB = self.registerSB
qr_C = self.registerC
qr_An = self.registerAncilla
self.qregs = [qr_A, qr_B, qr_S, qr_SB, qr_C, qr_An]
@property
def num_ancilla_qubits(self) -> int:
"""The number of ancilla qubits required to implement the ALU(operation).
Returns:
The number of ancilla qubits in the circuit.
"""
return 3
@property
def num_carry_qubits(self) -> int:
"""The number of carry qubits required to compute the ALU.
Note that this is not necessarily equal to the number of ancilla qubits, these can
be queried using ``num_ancilla_qubits``.
Returns:
The number of carry qubits required to compute the sum.
"""
return self.num_qubits
def _check_configuration(self, raise_on_failure=True):
valid = True
if self._num_qubits is None:
valid = False
if raise_on_failure:
raise AttributeError('The input register has not been set.')
if not (self.registerA.size and self.registerB.size and self.registerS.size and self.registerC.size):
valid = False
if raise_on_failure:
raise ValueError('Register sizes are not equal.')
if self.registerSB.size != 1:
valid = False
if raise_on_failure:
raise ValueError('Control qubit register must be of size 1.')
if self.registerAncilla.size < 3:
valid = False
if raise_on_failure:
raise ValueError('Ancilla register needs at least three qubits.')
return valid
def _build(self):
super()._build()
qr_A = self.registerA
qr_B = self.registerB
qr_S = self.registerS
qr_SB = self.registerSB
qr_carry = self.registerC
qr_ancilla = self.registerAncilla
for qubit in qr_ancilla:
self.reset([qubit]*1)
for B_index in range(qr_B.size):
self.cx(qr_B[B_index], qr_ancilla[0])
self.cx(qr_SB[0], qr_ancilla[0])
self.swap(qr_B[B_index], qr_ancilla[0])
self.reset([qr_ancilla[0]]*1)
for A_index in range(qr_A.size):
if A_index == 0:
self.append(FullAdder(), [qubit for qubit in [qr_A[A_index], qr_B[A_index], qr_SB, qr_carry[A_index],
qr_S[A_index], qr_ancilla[0], qr_ancilla[1], qr_ancilla[2]]]) #a0, b0, sb, c0, s0, ancillas
self.reset([qr_ancilla[0]]*1)
self.reset([qr_ancilla[1]]*1)
self.reset([qr_ancilla[2]]*1)
else:
self.append(FullAdder(), [qubit for qubit in [qr_A[A_index], qr_B[A_index], qr_carry[A_index - 1], qr_carry[A_index],
qr_S[A_index], qr_ancilla[0], qr_ancilla[1], qr_ancilla[2]]]) #a0, b0, sb, c0, s0, ancillas
self.reset([qr_ancilla[0]]*1)
self.reset([qr_ancilla[1]]*1)
self.reset([qr_ancilla[2]]*1)
def set_register_from_classical_register(quantumRegister: qiskit.QuantumRegister, classicalRegister: qiskit.ClassicalRegister):
'''
Sets the qubits in a quantum register to the classical values of a classic register.
Cannot be implemented as of current Qiskit version without measuring a circuit.
Input:
quantumRegister: A quantum register in its ground state.
classicalRegister: Target register to set.
'''
if quantumRegister.size != classicalRegister.size:
raise ValueError("Classic register and quantum register are not of same size")
pass
def set_quantum_register_from_string(circuit: qiskit.QuantumCircuit,
quantumRegister: qiskit.QuantumRegister,
input_string: str,
n_resets: int = 1):
"""
Resets quantumRegister and sets it to the values of the input string. Appends the necessary gates into circuit.
"""
N = len(input_string)
if quantumRegister.size != N:
raise ValueError("Classic register and quantum register are not of same size")
circuit.reset([qubit for qubit in quantumRegister]*n_resets)
for characters in range(N-1, -1, -1):
if input_string[characters] == '1':
circuit.x(quantumRegister[N - characters - 1])
if __name__ == "__main__":
from qiskit import Aer
import qiskit
import itertools
from qiskit.result.utils import count_keys
# Define length of strings, as well as the strings you would like to sum and the control bit
N = 2
stringA = '10'
stringB = '01'
stringSB = '0'
# Initialize quantum and classic registers
registerA = qiskit.QuantumRegister(N)
registerB = qiskit.QuantumRegister(N)
registerS = qiskit.QuantumRegister(N)
registerSB = qiskit.QuantumRegister(1)
registerC = qiskit.QuantumRegister(N)
registerAnc = qiskit.QuantumRegister(3)
measurementRegister = qiskit.ClassicalRegister(4+4*N)
regs = [registerA, registerB, registerS, registerSB, registerC, registerAnc]
flat_regs = list(itertools.chain(*[register[:] for register in regs]))
# Create a circuit
circuit = qiskit.QuantumCircuit(*regs, measurementRegister, name = 'ALU')
# Set input registers
set_quantum_register_from_string(circuit, registerA, stringA)
set_quantum_register_from_string(circuit, registerB, stringB)
# Set SB to choose between add or sub
# If dding a Hadamard gate, then calculate both ADD and SUB
set_quantum_register_from_string(circuit, registerSB, stringSB)
circuit.h(registerSB)
# Add NQubitALU to the circuit
circuit.append(NQubitALU(*regs).to_instruction(), flat_regs)
# Add measurement operations to the circuit
circuit.barrier()
circuit.measure(range(4+4*N), range(4+4*N))
# Display the circuit
print(circuit)
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
n_shots = 1024
# Execute the circuit on the qasm simulator
job = qiskit.execute(circuit, simulator, shots=n_shots, meas_return = 'single')
# Grab results from the job
result = job.result()
# Return counts
counts = result.get_counts(circuit)
bit_results = list(counts.keys())
print(f"\nInputs are A = {stringA}, B = {stringB}, SB = {stringSB}")
print("\nTotal counts are:", counts)
print("\nSum of A + (SB XOR B) is: \n")
for result in bit_results:
print(f"{result[4+N:4+2*N]} with a number of counts {counts.get(result)}/{n_shots}") | [
"alejandrogomezfrieiro@gmail.com"
] | alejandrogomezfrieiro@gmail.com |
f3e5c94ed58ecc7af879b344bf90b7450799daaf | 47190e816dffcf5d20b3562f8d0d13e7ad08b3c4 | /jwt_auth/migrations/0007_auto_20200816_1542.py | 68780e6d865b5174239cba9f87f0c9ba85348053 | [] | no_license | RobSero/Home-Companion | d426098d9aa91862d8663db50d64f6e005e44ea0 | 6a6c2c06bf485aa2ea36a843aa7e15a0d01eb505 | refs/heads/master | 2022-12-18T18:24:42.121052 | 2020-09-04T19:59:36 | 2020-09-04T19:59:36 | 287,741,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # Generated by Django 3.1 on 2020-08-16 15:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jwt_auth', '0006_auto_20200612_1005'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='experience',
),
migrations.RemoveField(
model_name='user',
name='money',
),
migrations.RemoveField(
model_name='user',
name='total_money',
),
]
| [
"robert.serowka@hotmail.co.uk"
] | robert.serowka@hotmail.co.uk |
4ee4554388d49f1860538799a4bfc141e77e5456 | d9c2c3cd1831ac8cbacce15ac83d6939c8dc346d | /Array/Max and min in array.py | 3be80024a7b24f21e117d31ed62d184b3d5e12be | [] | no_license | pratyusa98/Compitative_Programme | ce36821b2bcff5e641bc4ff07941126ac21fd8db | 4fe2117947b832f7c66cfdf6a1df13fd02e0633f | refs/heads/master | 2023-01-20T11:49:07.857168 | 2020-11-21T17:46:42 | 2020-11-21T17:46:42 | 310,860,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py |
a = [1000, 11, 445, 1, 330, 3000]
print(max(a),min(a)) | [
"noreply@github.com"
] | pratyusa98.noreply@github.com |
ff75f85ee5895f6816dc235222eadff15eed35a1 | 9147d00c4dc03b6a64bc8c9278139e7d5936a74e | /mmm.py | d5a8ed375e0d6348fda2aaa07a69b17feae6bdc9 | [] | no_license | charuj/MultinomialMixtureModel | af0c58388fb04ace70dc09df5b42c9a03cf95045 | 1ae7bcc2ceccacc9d7e3b554e7c00ddb5087708c | refs/heads/master | 2020-12-24T19:28:17.881628 | 2016-11-18T04:06:00 | 2016-11-18T04:06:00 | 55,998,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,433 | py | import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from scipy.misc import logsumexp
from collections import Counter
# load the data
import csv
with open ('train_data.csv', 'rb') as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
training=[]
for row in datareader:
training.append(row)
with open ('train_user_ids.csv', 'rb') as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
user_id=[]
for row in datareader:
user_id.append(row)
with open ('demographics.csv', 'rb') as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
demog=[]
for row in datareader:
demog.append(row)
#turning training data into array
training= np.array(training)
training=training.astype(np.float64)
#training=np.random.permutation(training)
#3-fold cross validation, dividing the data
N=training.shape[0]
set1= training[0:N/3]
set2= training[N/3:2*N/3]
set3=training[2*N/3:N]
N=set1.shape[0]
#E Step: calculate responsbility
def beta(K):
betas= np.random.random([89, K,5])
for i in range(0,89):#put variable for 89
for j in range(0,K):
beta_sum=np.sum(betas[i,j,:])
beta_normalized= betas[i,j,:]/beta_sum
betas[i,j,:] = beta_normalized
return betas, np.ones(K)*(1.0/K)
def estep(K, beta, data, N, priors):
a=np.zeros([K,N])
responsibility= np.zeros([K,N])
for users in range(0, N): # users
for clusters in range(0,K):
likelihood=1
for movies in range(0,89): #movies
x= data[users, movies] #rating for a particular user for a particular movie
x=int(x)
if x == 0:
continue
likelihood= beta[movies, clusters,x-1]*likelihood #likelihood for particular user, for particular movie, for particular cluster
#prior=np.float64(1.0/K)
pre_a= priors[clusters] * likelihood # not summed yet
# sum over the 89 movies now
# if pre_a == 0:
# print "a is zero"
try:
a[clusters,users]+= pre_a
except:
print 'alsdfasd'
for users in range(0,N):
# calc scaler log sum exp for each user
#sum_user= sc.misc.logsumexp(a[:, users])
sum_user= np.sum(a[:,users])
# if np.min(a[:,users]) == 0:
# print "ASASf"
# if np.min(a[:,users])/(sum_user) == 0:
# print "respons 0"
#responsibility[:,users]= np.exp(np.log(a[:,users])-(sum_user+1*np.e**(-32)))
responsibility[:,users]= (a[:,users])/(sum_user)
return responsibility, a
def mstep(responsibility,data,K,priors):
betas= np.zeros([89, K,5])
for movie in range(0,89):#put variable for 89
for cluster in range(0,K):
rating_count= [0,0,0,0,0]
for users in range(0,N):
if data[users, movie]!= 0:
#TODO:the following might be for "soft"
rating_count[int(data[users,movie])-1]+=responsibility[cluster,users]
#hard version
#rating_count[int(data[users,movie])-1]+=1.0
# normalize
beta_ml= rating_count/(np.sum(rating_count)) # for one particular cluster movie combo
beta_ml=beta_ml +1*np.e**(-32)
beta_ml=beta_ml/(np.sum(beta_ml))
betas[movie, cluster,:]= beta_ml
#TODO: WRITE CODE TO UPDATE PRIORS
distribution_across_clusters = cluster_dist(K,responsibility)
distribution_across_clusters=np.array(distribution_across_clusters)
priors = distribution_across_clusters.astype(np.float64) / np.sum(distribution_across_clusters)
return betas,priors
def log_like(a, responsibility):
# max_prob= np.max(a, axis=0) # max a across clusters
# log_maxprob= np.log(max_prob)
# log_probx= np.sum(log_maxprob)
log_probx= np.multiply(responsibility, np.log(a + (1*np.e**(-32))))
log_probx= np.sum(log_probx)
return log_probx
def demographics(N,K,a, userid, demog):
userid = np.array(userid[0]).astype(int)
cluster_users_belong_to= np.argmax(a, axis=0) # max a across clusters
list_of_cluster_userids= []
list_of_cluster_ages = []
list_of_cluster_genders = []
for i in range(K):
list_of_cluster_userids.append([])
list_of_cluster_ages.append([])
list_of_cluster_genders.append([])
#for loop i to range of 241
for i in range (N):
cluster = cluster_users_belong_to[i]
list_of_cluster_userids[cluster].append(userid[i])
for cluster in range (K):
user_ids_in_this_cluster = list_of_cluster_userids[cluster]
for user_id_in_cluster in user_ids_in_this_cluster:
age = int(demog[user_id_in_cluster][1])
gender = demog[user_id_in_cluster][2]
list_of_cluster_ages[cluster].append(age)
list_of_cluster_genders[cluster].append(gender)
print "mean age for cluster ",cluster," is ", np.mean(np.array(list_of_cluster_ages[cluster]))
males = 0
females = 0
for gender in list_of_cluster_genders[cluster]:
if gender == 'M':
males +=1
else:
females+=1
print "males ", males, " females ", females, " ratio ", float(males)/females
'''def cluster_dist(responsibility):
best_cluster= np.argmax(responsibility, axis=0) # finding maximal cluster, for every user
dict= Counter(best_cluster).values()
#dict= dict.T
print dict
#plt.hist(dict)
#plt.show()
return dict
'''
def cluster_dist(K, responsibility):
dict= np.zeros(K)
best_cluster= np.argmax(responsibility, axis=0)
for i in range(0,N):
dict[best_cluster[i]]+=1
#plt.hist(count)
#plt.show(count)
return dict
list_logk= []
list_logk2=[]
for restart in range(0,1):
# main
# need to iterate over different values of k, and do cross-validation
#K=[1,2,3,4,5,6,7,8,9,10]
K=[1,2,3,4]
log_by_K = []
log_by_K2 = []
epochs = 10
t=np.arange(0,epochs)
for num in K:
sum_ll= 0
sum_ll2= 0
list_results=[]
list_results2=[]
for crossv in range(1):
all_sets= [set1, set2, set3]
valid_set=all_sets.pop(crossv)
train_set= np.concatenate(all_sets)
print "\n" +"for K value = " + str(num)
betas,priors= beta(num)
results = []
results2=[]
for i in range(0,epochs):
#set1= np.random.permutation(set1)
responsibility, a= estep(num,betas,train_set, N,priors)
responsibility2, a2= estep(num,betas,valid_set, N,priors)
log_probx= log_like(a, responsibility)
log_probx2= log_like(a2, responsibility2)
dist= cluster_dist(num, responsibility)
#print log_probx
results.append(log_probx)
results2.append(log_probx2)
betas,priors= mstep(responsibility,train_set,num,priors)
list_results.append(results)
list_results2.append(results2)
sum_ll= sum_ll+ log_probx
sum_ll2= sum_ll2+ log_probx2
# take means of log likelihood over training sets
log_probx= np.mean(sum_ll)
log_probx2= np.mean(sum_ll2)
list_results= np.array(list_results)
list_results2=np.array(list_results2)
results_mean= np.mean(list_results, axis=0)
results2_mean= np.mean(list_results2, axis=0)
log_by_K.append(log_probx)
log_by_K2.append(log_probx2)
demographics(N,num,a, user_id, demog)
# plt.plot(t,results_mean, 'r--', t, results2_mean, 'b')
# plt.show()
list_logk.append(log_by_K)
list_logk2.append(log_by_K2)
K=np.array(K)
#fig,ax= plt.subplots()
#rects1= ax.bar(K, log_by_K, 0.25, color='r')
#rects2= ax.bar(K +0.25, log_by_K2, 0.25, color='b')
#plt.bar(K,log_by_K, K,log_by_K2)
#plt.show()
# list_logk=np.array(list_logk)
# list_logk2=np.array(list_logk2)
# std_list_logk=np.std(list_logk,axis=0)
# std_list_logk2= np.std(list_logk2,axis=0)
#
# print std_list_logk
# print std_list_logk2
# print np.mean(list_logk,axis=0)
# print np.mean(list_logk2,axis=0)
| [
"clmjaiswal@gmail.com"
] | clmjaiswal@gmail.com |
78ff1c6556730e18b33126f71c863b99cb64a8b0 | dd8e3100f723cd5a14bcdeaff33cd46b7f4cb905 | /application/auth/views.py | ac717dd8b3d025992a08f80b29dc2583a452f90a | [] | no_license | ValheKouneli/SkrolliEditor | 3e329b7dd1965d0f8985caf163684691c3274d08 | e3e34813b27c70bc8c44bf5d0198667ced6303c8 | refs/heads/master | 2020-03-23T23:56:09.203709 | 2018-09-01T19:11:34 | 2018-09-01T19:11:34 | 142,264,394 | 0 | 0 | null | 2018-08-31T17:31:10 | 2018-07-25T07:33:29 | Python | UTF-8 | Python | false | false | 6,303 | py | from flask import render_template, request, redirect, url_for
from flask_login import login_user, logout_user, current_user
from application import app, db, login_required
from application.auth.models import User
from application.auth.forms import LoginForm, RegisterForm, UpdateAccountForm
from application.articles.views_helper import update_status, delete_article
from application.pictures.views_helper import update_picture_status, delete_picture
from application.articles.models import Article
from application.pictures.models import Picture
from application.react_to_post_request import react_to_post_request
from application.help import getArticlesWithCondition, getPicturesWithCondition, getPictureWithId
@app.route("/auth/login/", methods = ["GET", "POST"])
def auth_login():
if request.method == "GET":
return render_template("auth/loginform.html", form = LoginForm())
form = LoginForm(request.form)
user = User.query.filter_by(username=form.username.data).first()
if (not user) or (not user.is_correct_password(form.password.data)):
return render_template("auth/loginform.html", form = form,
error = "No such username or password")
login_user(user)
next = request.form.get("next_address")
if next and next != "None":
return redirect(next)
return redirect(url_for("index"))
@app.route("/auth/logout/")
def auth_logout():
logout_user()
return redirect(url_for("index"))
@app.route("/auth/register/", methods = ["GET", "POST"])
def auth_register():
if request.method == "GET":
return render_template("auth/registerform.html", form = RegisterForm())
form = RegisterForm(request.form)
if not form.validate():
return render_template("auth/registerform.html", form = form)
u = User(form.name.data, form.username.data, form.password.data)
db.session().add(u)
db.session().commit()
u.add_name(form.name.data)
login_user(u)
return redirect(url_for("index"))
@app.route("/auth/update/", methods = ["GET", "POST"])
@login_required()
def auth_update():
if request.method == "GET":
form = UpdateAccountForm()
form.name.data = current_user.name
return render_template("auth/updateaccountform.html", form = form)
form = UpdateAccountForm(request.form)
if not form.validate():
return render_template("auth/updateaccountform.html", form = form)
if (not current_user.is_authenticated) or (not current_user.is_correct_password(form.oldpassword.data)):
return render_template("auth/updateaccountform.html", form = form,
error = "Incorrect password")
if form.password.data:
current_user.set_password(form.password.data)
if current_user.name != form.name.data:
current_user.add_name(form.name.data)
current_user.set_name(form.name.data)
return redirect(url_for("index"))
@app.route("/auth/mypage/", methods = ["GET", "POST"])
@login_required()
def mypage():
alert = {}
open = 0
if request.method == "POST":
response = react_to_post_request(request, current_user)
if response["redirect"]:
return response["redirect"]
else:
alert = response["alert"]
open = response["open"]
# fall trough
return render_template("people/tasks.html",
person_is = "I am",
posessive_form = "My",
system_name = current_user.name,
articles_writing = current_user.get_articles_writing().fetchall(),
articles_editing = current_user.get_articles_editing().fetchall(),
pictures_responsible = current_user.get_pictures_responsible().fetchall(),
articles_language_checking = current_user.get_articles_language_checking().fetchall(),
open = open,
alert = alert,
user_id=current_user.id)
@app.route("/auth/languageconsultant/", methods = ["GET", "POST"])
def language_consultant_page():
alert = {}
if request.method == "POST":
response = react_to_post_request(request, current_user)
if response["redirect"]:
return response["redirect"]
else:
alert = response["alert"]
# fall trough
articles = getArticlesWithCondition(
"(Article.editing_status = 100" + \
" AND Article.writing_status = 100" + \
" AND NOT Article.language_checked" + \
" AND Article.language_consultant IS NULL)")
articles = articles.fetchall()
my_articles = None
if current_user.is_authenticated:
my_articles = getArticlesWithCondition(
"(Article.editing_status = 100" + \
" AND Article.writing_status = 100" + \
" AND NOT Article.language_checked" + \
" AND Article.language_consultant = %s)" % current_user.id)
my_articles = my_articles.fetchall()
return render_template("auth/language_consultant_page.html",
articles = articles,
my_articles = my_articles,
current_user = current_user,
alert = alert)
@app.route("/auth/pictureeditor/", methods=["GET", "POST"])
def picture_editor_page():
alert = {}
if request.method == "POST":
if not (current_user.is_authenticated and
current_user.has_role("PICTURE_EDITOR")):
return redirect(url_for("error403"))
try:
id = int(request.form["picture_id"])
except:
message = "Request to mark picture ready failed, because either" + \
" parameter picture_id was missing or it was not an integer."
return render_template("error500.html", message=message)
picture = Picture.query.get(id)
if not picture:
return redirect(url_for("error404"))
picture.ready = True
db.session.commit()
alert = {"type": "success",
"text": "Picture marked ready!" }
# fall trough
pictures = getPicturesWithCondition(
"(Picture.status = 100" + \
" AND NOT Picture.ready)")
pictures = pictures.fetchall()
return render_template("auth/picture_editor_page.html",
pictures = pictures,
current_user = current_user,
alert = alert)
| [
"valhe.kouneli@gmail.com"
] | valhe.kouneli@gmail.com |
6db1aba520cc16f1a2008f85bb487b78c64eb5e0 | fd97c7a1a8a732f77ff53d41c50abfcf48ae8647 | /test_partal_data/method.py | 2cf0b8c76ae72ca4142cf0c5029ce99c4d138599 | [] | no_license | Zhaokun-max/workspaces | eb922902fa4762051f2e8660a70d95ce08c1b70b | 87d713a5c8d3763b3dfa191cd7a00933899679b9 | refs/heads/master | 2023-03-21T00:42:06.451609 | 2021-03-20T15:20:26 | 2021-03-20T15:20:26 | 329,214,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import requests
class Requests:
def request(self,url, method='get', **kwargs):
if method=='get':
return requests.request(url=url, method=method, **kwargs)
elif method=='post':
return requests.request(url=url, method=method, **kwargs)
def get(self, url, **kwargs):
return self.request(url=url, method='get', **kwargs)
def post(self, url, **kwargs):
return self.request(url=url, method='post', **kwargs) | [
"18701079606@163.com"
] | 18701079606@163.com |
b118bf0d9a361e044b62795aff56710301e2645d | 55f4c952c1745e3dc7ffd32859034560dbfe6be5 | /greedy.py | bfec83335f8422be2976f04f467e4f1d5e8abd4d | [] | no_license | banacer/interview_prep | d8832e4f990e21fb3694c5f4701b31c6fb7a711e | 1379fbd54e1accbe8e281c2310d166b028069ee5 | refs/heads/master | 2021-05-10T10:39:27.443393 | 2018-02-16T17:24:42 | 2018-02-16T17:24:42 | 118,390,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | import heapq
import collections
def compute_optimum_task_assignment(l, N):
l.sort(reverse=True)
h = [0 for _ in range(N)]
for i in l:
e = heapq.heappop(h)
heapq.heappush(h,e+i)
return max(h)
def schedule_minimize_time(l):
l.sort()
count = 0
sum = 0
for i in l:
sum+=count
count+=i
return sum
def interval_covering_problem(l):
Interval = collections.namedtuple('interval', ('left','right'))
intervals = []
for i in l:
intervals.append(Interval(i[0],i[1]))
intervals.sort(key=lambda x: x.right)
res = []
for i in intervals:
if not res or (res and res[-1] < i.left):
res.append(i.right)
return res
def find_majority(l):
candidate = None
count = 0
for c in l:
if not candidate or count == 0:
candidate = c
count = 1
if candidate == c:
count+=1
else:
count-=1
if count == 0:
candidate = c
count = 1
return candidate
def find_gasup(gallons, cities):
total_distance = sum(cities)
total_gallons = sum(gallons)
mpg = total_distance/total_distance
for i in range(len(cities)):
if cities[i]/gallons[i] == mpg or cities[i-1]/gallons[i] == mpg:
return i
return -1
def max_trapped_water(l):
i = 0;
j = len(l)-1
max_area = 0
while i < j:
area = min(l[i],l[j])*(j-i)
if area > max_area:
max_area = area
if l[i] > l[j]:
j-=1
else:
i+=1
if __name__ == '__main__':
assert compute_optimum_task_assignment([5,2,1,6,4,4],3) == 8
assert schedule_minimize_time([2,5,1,3]) == 10
assert interval_covering_problem([[1,2], [2,3],[3,4], [2,3],[2,3],[3,4],[4,5]]) == [2,4]
assert find_majority(list('bacaabaaca')) == 'a'
assert max_trapped_water([1,2,1,3,4,4,5,6,2,1,3,2,1,2,4]) == 48 | [
"nacerkhalil@gmail.com"
] | nacerkhalil@gmail.com |
47a0c2b54d4735dc376b96849ebeaf4711f5542f | 94ebb695b0a3045fc628b59e8479654ce6b7a85d | /spanner/tests/system/utils/streaming_utils.py | 0e30827d951fce3937ecef862593fc41580c1148 | [
"Apache-2.0"
] | permissive | alexwelcing/google-cloud-python | 9b668fe3d9efdb6c29dbb7b0ce1a54a0c1eda829 | bdda0d94469ddf7daefaf714dcc1268a83fc0377 | refs/heads/master | 2023-02-04T21:50:54.649436 | 2019-11-11T14:33:22 | 2019-11-11T14:33:22 | 104,496,968 | 0 | 0 | Apache-2.0 | 2023-02-03T04:22:05 | 2017-09-22T16:42:22 | Python | UTF-8 | Python | false | false | 1,212 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
INSTANCE_NAME = 'gcp-streaming-systests'
DATABASE_NAME = 'testing'
_SHOULD_PRINT = os.getenv('GOOGLE_CLOUD_NO_PRINT') != 'true'
class _TableDesc(collections.namedtuple(
'TableDesc', ('table', 'row_count', 'value_size', 'column_count'))):
def value(self):
return u'X' * self.value_size
FOUR_KAY = _TableDesc('four_kay', 1000, 4096, 1)
FORTY_KAY = _TableDesc('forty_kay', 100, 4096 * 10, 1)
FOUR_HUNDRED_KAY = _TableDesc('four_hundred_kay', 25, 4096 * 100, 1)
FOUR_MEG = _TableDesc('four_meg', 10, 2048 * 1024, 2)
def print_func(message):
if _SHOULD_PRINT:
print(message)
| [
"tseaver@palladion.com"
] | tseaver@palladion.com |
9874b62bd0a68ce2db4ef04ac9e7178f0c1f426c | 1c8a7407888c76248d52c0bb3ac478f755e60acb | /models/fully_connected_model.py | c3a75d4b517eab6e63e54b68e468151d1c468046 | [] | no_license | NumberChiffre/neurips2020-flatland-baselines | ce8e827ecbd0326afd625c9ef3aab5f67b8eac8c | 88e5a8f157d8b2a7b849ba8fa694b5f43e98aa19 | refs/heads/master | 2022-12-14T08:09:00.248906 | 2020-05-20T18:39:26 | 2020-05-20T18:39:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | import sys
import gym
import tensorflow as tf
import numpy as np
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from models.common.models import FullyConnected
class FullyConnectedModel(TFModelV2):
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
assert isinstance(action_space, gym.spaces.Discrete), \
"Currently, only 'gym.spaces.Discrete' action spaces are supported."
self._action_space = action_space
self._options = model_config['custom_options']
self._mask_unavailable_actions = self._options.get("mask_unavailable_actions", False)
if self._mask_unavailable_actions:
observations = tf.keras.layers.Input(shape=obs_space.original_space['obs'].shape)
else:
observations = tf.keras.layers.Input(shape=obs_space.shape)
activation = tf.keras.activations.deserialize(self._options['activation'])
fc_out = FullyConnected(layers=self._options['layers'], activation=activation,
layer_norm=self._options['layer_norm'], activation_out=True)(observations)
logits = tf.keras.layers.Dense(units=action_space.n)(fc_out)
baseline = tf.keras.layers.Dense(units=1)(fc_out)
self._model = tf.keras.Model(inputs=[observations], outputs=[logits, baseline])
self.register_variables(self._model.variables)
self._model.summary()
def forward(self, input_dict, state, seq_lens):
if self._mask_unavailable_actions:
obs = input_dict['obs']['obs']
else:
obs = input_dict['obs']
logits, baseline = self._model(obs)
self.baseline = tf.reshape(baseline, [-1])
if self._mask_unavailable_actions:
inf_mask = tf.maximum(tf.math.log(input_dict['obs']['available_actions']), tf.float32.min)
logits = logits + inf_mask
return logits, state
def value_function(self):
return self.baseline
| [
"ch.scheller@hotmail.com"
] | ch.scheller@hotmail.com |
9bfaa585d624e4dac8b2ed1d45882b79d9b3415b | 6937225781f872faf06385987be44636fb1122a2 | /DSA_Assignment/Full Project/logic.py | 3735ed2ea81603e5017c07634406901c7bb38b81 | [] | no_license | Moss89/Data_Structures_Algorithms | e3cab0a69652c381727113761d8fb375bc3f06eb | a7d77baafe8d425abbb4561fde9c3fc79aae4b6e | refs/heads/master | 2020-12-22T07:20:02.838870 | 2020-01-28T10:36:38 | 2020-01-28T10:36:38 | 236,709,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | import pandas as pd
import DataStructures
import os
import sys
csv = ["test.csv",
"airport.csv",
"countrycurrency.csv",
"currencyrates.csv",
"currencyrates.csv",
"aircraft.csv"]
missingFiles = []
filesPresent = True
for file in csv:
if not os.path.isfile(file):
missingFiles += [file]
filesPresent = False
break
if filesPresent:
test_df = pd.read_csv("test.csv")
airport_df = pd.read_csv("airport.csv")
country_currency_df = pd.read_csv("countrycurrency.csv")
currency_rates_df = pd.read_csv("currencyrates.csv")
aircraft_df = pd.read_csv("aircraft.csv")
airport_currencies_df = airport_df.merge(country_currency_df.rename(columns={"name":"Country"}), on="Country")
merged_df = airport_currencies_df.merge(currency_rates_df.rename(columns={"CurrencyCode":"currency_alphabetic_code"}), on="currency_alphabetic_code")
else:
print("Error, csv file(s) missing:", missingFiles)
sys.exit()
g = DataStructures.Graph()
airports = DataStructures.create_airport_list(merged_df)
aircrafts = DataStructures.createAircrafts(aircraft_df)
for i in range(len(test_df)):
route = DataStructures.getRoute(test_df, i)
aircraft = test_df["Airplane"][i]
checkRoute = DataStructures.checkRoute(route, airports)
if not checkRoute[0]:
print("Error: Airport,", checkRoute[1], ", doesn't exist in route:", route, "\n")
continue
if not DataStructures.checkAircraftExist(aircraft, aircrafts):
print("Error: Aircraft doesn't exist:", aircraft, "\n")
continue
DataStructures.addToAirportAtlas(route, g, airports)
DataStructures.addAirportDistances(route, g, airports)
permutations = DataStructures.getPermutations(route)
print(DataStructures.getCheapestPath(permutations, aircrafts[aircraft].get_maxfuel(), g), "\n") | [
"noreply@github.com"
] | Moss89.noreply@github.com |
c27c9cb6efded93a2eb21abacf8ec5774cd91f3c | f1230362b9ca480dfdcbabfa114d73f16e08ed39 | /graph.py | 06f5016b1852cc700d87e974c29904be6f60217a | [] | no_license | goatgoose/ranker | aac51db5ecfcc0770147676e054b0f631bfe5887 | fd3c68141bba95c4b49faca596925df520548183 | refs/heads/master | 2022-09-20T22:07:16.400103 | 2020-05-24T13:22:41 | 2020-05-24T13:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,437 | py | from typing import Hashable
class Node:
def __init__(self, id_: Hashable):
self.id: Hashable = id_
self.edges: dict[Hashable: Edge] = {}
def __str__(self):
return str(self.id)
def __repr__(self):
return str(self)
class Edge:
def __init__(self, tail: Node, head: Node):
self.tail: Node = tail
self.head: Node = head
class Graph:
def __init__(self):
self.nodes: dict[Hashable: Node] = {}
def add_node(self, node: Node):
self.nodes[node.id] = node
def add_edge(self, edge: Edge):
self.nodes[edge.tail.id].edges[edge.head.id] = edge
def remove_node(self, node_to_remove):
for node in self.nodes:
edges_to_remove = []
for edge in node.edges:
if edge.head.id == node_to_remove.id or edge.tail.id == node_to_remove.id:
edges_to_remove.append(edge)
for edge_to_remove in edges_to_remove:
del node.edges[edge_to_remove]
del self.nodes[node_to_remove.id]
def breadth_first_traverse(self, start_id):
s = self.nodes[start_id]
layers = [{s}] # layer : nodes in layer
discovered = set()
current_layer = {s}
next_layer = set()
while len(current_layer) > 0:
node = current_layer.pop()
if node in discovered:
continue
discovered.add(node)
for edge in node.edges.values():
if edge.head not in discovered:
next_layer.add(edge.head)
if len(current_layer) == 0:
current_layer = next_layer
layers.append(current_layer.copy())
next_layer = set()
return layers[:-1]
def breadth_first_search(self, s_id, t_id):
s = self.nodes[s_id]
t = self.nodes[t_id]
discovered = set()
stack = [s]
parents = {}
while len(stack) > 0:
top = stack.pop(0)
if top in discovered:
continue
discovered.add(top)
edges = top.edges.values()
for edge in edges:
new_node = edge.head
parents[new_node] = top
if new_node == t:
ret = new_node
path = [ret]
while ret != s:
ret = parents[ret]
path.append(ret)
return list(reversed(path))
elif new_node not in discovered:
stack.append(new_node)
def __str__(self):
out = ""
for node in self.nodes.values():
out += str(node)
for edge in node.edges.values():
out += f" -> {edge.head}"
out += "\n"
return out[:-1]
def __contains__(self, item):
return item in self.nodes
if __name__ == '__main__':
graph = Graph()
graph.add_node(Node(1))
graph.add_node(Node(2))
graph.add_node(Node(3))
graph.add_node(Node(4))
graph.add_edge(Edge(graph.nodes[1], graph.nodes[2]))
graph.add_edge(Edge(graph.nodes[1], graph.nodes[3]))
graph.add_edge(Edge(graph.nodes[2], graph.nodes[3]))
graph.add_edge(Edge(graph.nodes[2], graph.nodes[4]))
print(graph)
print(graph.breadth_first_traverse(1))
print(graph.breadth_first_search(1,3))
| [
"jhchilds@uvm.edu"
] | jhchilds@uvm.edu |
30e9e67b6cfcd5076857587426ea7ce6b7b1784e | b4fcfcd018934c02b4a3d0dd28175c16e79674ce | /books/store/migrations/0006_alter_userbookrelation_rate.py | 31b843cd9179b435fa8ba6411c18ed5605d06613 | [] | no_license | ChebuRashkaRF/DRF_book | aac141733cb135e4ea9ca469e73cd47c3b896866 | dc58812f167446a9f2b399fd2edfce0e28f9b6f0 | refs/heads/main | 2023-07-25T08:05:10.535255 | 2021-08-29T22:11:36 | 2021-08-29T22:11:36 | 401,151,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Generated by Django 3.2.6 on 2021-08-24 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0005_auto_20210824_1307'),
]
operations = [
migrations.AlterField(
model_name='userbookrelation',
name='rate',
field=models.PositiveSmallIntegerField(choices=[(1, '1.0'), (2, '2.0'), (3, '3.0'), (4, '4.0'), (5, '5.0')], null=True),
),
]
| [
"50379603+ChebuRashkaRF@users.noreply.github.com"
] | 50379603+ChebuRashkaRF@users.noreply.github.com |
b244985f81429915c2950b9834cfdbbaff95b0e6 | 68b68ebccdfd9cf59c910f6d80264eeee9ac8b17 | /run_match.py | 60f5bf6e8ea51393a96033aed6e759b13487c2e6 | [] | no_license | alan-turing-institute/rl_tournament | 611eba63c7da2906790c351148c5c36727f7c8ae | 8fa247d3e448b3d3db24a4e075d6a6c8cef75085 | refs/heads/main | 2023-03-13T13:13:28.001193 | 2021-03-11T15:50:52 | 2021-03-11T15:50:52 | 330,745,876 | 6 | 3 | null | 2021-03-11T15:50:53 | 2021-01-18T17:55:56 | Python | UTF-8 | Python | false | false | 340 | py | import os
from battleground.battleground import Battleground
if __name__ == "__main__":
if "MATCH_ID" not in os.environ.keys():
raise RuntimeError("MATCH_ID not found in environment")
match_id = os.environ["MATCH_ID"]
bg = Battleground(match_id=match_id)
bg.setup_games()
bg.listen_for_ready()
bg.play()
| [
"nbarlow@turing.ac.uk"
] | nbarlow@turing.ac.uk |
7bc0d8cccf91009e4d128c0b2b933bf59d4eac50 | 8496aa090624d599e7790453784e38e46cfa51e5 | /modules/texify.py | f0aa8c8f67e3242088ef94296766f43b0cadb644 | [] | no_license | dafyddcrosby/phenny | e4e2f05f262ef608a48f73c412ee9a94eae7e7cc | e7abfd8a2c69d7998b3cb4cbbc24f1313e11fac5 | refs/heads/master | 2021-01-18T13:33:09.545060 | 2012-06-06T23:33:29 | 2012-06-06T23:33:29 | 1,837,916 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | #!/usr/bin/env python
"""
texify.py - Phenny Texify Module
"""
import urllib
TEXIFY = '{nick}: http://texify.com/${tex}$'
def tex(phenny, input):
"""Gives a link of rendered LaTeX"""
query = input.group(2)
if not query:
return phenny.reply('.tex what?')
query = query.encode('utf-8')
query = urllib.quote(query)
output = TEXIFY.format(nick=input.nick, tex=query)
phenny.say(output)
tex.commands = ['tex']
tex.priority = 'high'
tex.example = ".tex E=mc^2"
if __name__ == '__main__':
print __doc__.strip()
| [
"alexalemi@gmail.com"
] | alexalemi@gmail.com |
4d141a71e15b22c552798c4df2ca3132387ac149 | eb4e2391664239873225cd94774c1244f172341b | /user/views.py | cca4f05d66169f7eb41aff1c5dc85441009c86a1 | [] | no_license | AKL-FIRE/mysite | f4dbedcfab6f35fa0e9966ddcc244907d7ae84b5 | 1369cc8fc6598097482f915e7271de40c94288e9 | refs/heads/master | 2021-07-23T20:44:56.310813 | 2020-05-11T00:42:06 | 2020-05-11T00:42:06 | 167,659,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,131 | py | import string
import time
from django.shortcuts import render, redirect
from django.contrib import auth
from django.contrib.auth.models import User
from django.urls import reverse
from django.http import JsonResponse
from django.core.mail import send_mail
from .forms import LoginForm, RegForm, ChangeNicknameForm, BindEmailForm, ChangePasswordForm, ForgotPasswordForm
from .models import Profile
from blog.models import Blog
import random
def login_for_modal(request):
login_form = LoginForm(request.POST)
data = {}
if login_form.is_valid():
user = login_form.cleaned_data['user']
auth.login(request, user)
data['status'] = 'SUCCESS'
else:
data['status'] = 'ERROR'
return JsonResponse(data)
def login(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
user = login_form.cleaned_data['user']
auth.login(request, user)
return redirect(request.GET.get('from', reverse('home')))
else:
login_form = LoginForm()
context = {}
context['login_form'] = login_form
return render(request, 'user/login.html', context)
def register(request):
if request.method == 'POST':
reg_form = RegForm(request.POST, request=request)
if reg_form.is_valid():
username = reg_form.cleaned_data['username']
email = reg_form.cleaned_data['email']
password = reg_form.cleaned_data['password']
# 创建用户
user = User.objects.create_user(username, email, password)
user.save()
user_profile = Profile.objects.create(user=user, nickname='')
user_profile.save()
# 清除session
del request.session['register_code']
# 登录用户
user = auth.authenticate(username=username, password=password)
auth.login(request, user)
return redirect(request.GET.get('from', reverse('home')))
else:
reg_form = RegForm()
context = {}
context['reg_form'] = reg_form
return render(request, 'user/register.html', context)
def logout(request):
auth.logout(request)
return redirect(request.GET.get('from', reverse('home')))
def user_info(request):
blogs_count = Blog.objects.all().count()
blogs = []
context = {}
if Blog.objects.count() >= 3:
for i in range(3):
index = random.randint(1, blogs_count)
blogs.append(Blog.objects.get(pk=index))
context['blogs'] = blogs
return render(request, 'user/user_info.html', context)
def change_nickname(request):
redirect_to = request.GET.get('from', reverse('home'))
if request.method == 'POST':
form = ChangeNicknameForm(request.POST, user=request.user)
if form.is_valid():
nickname_new = form.cleaned_data['nickname_new']
profile, created = Profile.objects.get_or_create(user=request.user)
profile.nickname = nickname_new
profile.save()
return redirect(redirect_to)
else:
form = ChangeNicknameForm()
context = {}
context['page_title'] = '修改昵称'
context['form_title'] = '修改昵称'
context['submit_text'] = '修改'
context['form'] = form
context['return_back_url'] = redirect_to
return render(request, 'form.html', context)
def bind_email(request):
redirect_to = request.GET.get('from', reverse('home'))
if request.method == 'POST':
form = BindEmailForm(request.POST, request=request)
if form.is_valid():
email = form.cleaned_data['email']
request.user.email = email
request.user.save()
# 清除session
del request.session['bind_email_code']
return redirect(redirect_to)
else:
form = BindEmailForm()
context = {}
context['page_title'] = '绑定邮箱'
context['form_title'] = '绑定邮箱'
context['submit_text'] = '绑定'
context['form'] = form
context['return_back_url'] = redirect_to
return render(request, 'user/bind_email.html', context)
def send_verification_code(request):
email = request.GET.get('email', '')
send_for= request.GET.get('send_for', '')
data = {}
if email != '':
# 生成验证码
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 60:
data['status'] = 'ERROR'
else:
request.session[send_for] = code
request.session['send_code_time'] = now
# 发送邮件
send_mail(
'发送验证码',
'验证码:%s' % code,
'598309689@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
else:
data['status'] = 'ERROR'
return JsonResponse(data)
def change_password(request):
redirect_to = reverse('home')
if request.method == 'POST':
form = ChangePasswordForm(request.POST, user=request.user)
if form.is_valid():
user = request.user
new_password = form.cleaned_data['new_password']
user.set_password(new_password)
user.save()
auth.logout(request)
return redirect(redirect_to)
else:
form = ChangePasswordForm()
context = {}
context['page_title'] = '修改密码'
context['form_title'] = '修改密码'
context['submit_text'] = '修改'
context['form'] = form
context['return_back_url'] = redirect_to
return render(request, 'form.html', context)
def forgot_password(request):
redirect_to = reverse('home')
if request.method == 'POST':
form = ForgotPasswordForm(request.POST, request=request)
if form.is_valid():
email = form.cleaned_data['email']
new_password = form.cleaned_data['new_password']
user = User.objects.get(email=email)
user.set_password(new_password)
user.save()
# 清除session
del request.session['forgot_password_code']
return redirect(redirect_to)
else:
form = ForgotPasswordForm()
context = {}
context['page_title'] = '重置密码'
context['form_title'] = '重置密码'
context['submit_text'] = '重置'
context['form'] = form
context['return_back_url'] = redirect_to
return render(request, 'user/forgot_password.html', context)
def get_update_head(request):
context = {}
return render(request, 'user/change_head_icon.html', context)
def update_head(request):
if request.method == 'POST':
img = request.FILES['img']
user = User.objects.get(username=request.user)
profile = Profile.objects.filter(user=user)[0]
profile.user_icon = img
profile.save()
context = {}
return render(request, 'user/user_info.html', context)
| [
"lcy19961018@163.com"
] | lcy19961018@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.