blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0753dd9e69c7ef1f6fcaf2dd6e5751a5c5cb12b6 | 7465b8929289b1a435750fc825d558ae711e84a5 | /client.py | 2fee9c1297ef429e9a3a0e4fe8c2a24615b67df1 | [] | no_license | mifcom-iv/BarcodeScanner | 3030143b31bb3cc3927a23cd2d8434e8569e82eb | 619d2fdac6248267486c00b5086be8f2a84d5f6c | refs/heads/master | 2023-01-07T14:25:47.121139 | 2020-11-03T13:50:48 | 2020-11-03T13:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | import cv2
import time
import os
from pyzbar import pyzbar
import simplejson as json
import requests
import threading
import random
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_PLAIN
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
cap.set(cv2.CAP_PROP_FOCUS, 100)
cap.set(3, 1280)
cap.set(4, 1024)
def update_detection():
pass
lastprinted = time.time()
current_frame = None
response = None
updater = threading.Thread(target=update_detection)
def process_frame():
global current_frame, response
start = time.time()
_, frame = cap.read()
current_frame = frame
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fileID = str(random.randint(0, 100000)) + ".jpg"
cv2.imwrite(fileID, image, [cv2.IMWRITE_JPEG_QUALITY, 50])
with open(fileID, 'rb') as f:
r = requests.post('http://52.29.176.28:5000/barcode', files={'image': f})
response = json.loads(r.content)
os.remove(fileID)
for obj in response['barcodes']:
x = obj['rect']['left']
y = obj['rect']['top']
w = obj['rect']['width']
h = obj['rect']['height']
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 5)
text = "{} ( {} )".format(obj['data'], obj['type'])
cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 0, 255), 2)
cv2.putText(frame,
"Server-side: " + str(response['time_ms']) + " ms",
(0, 50), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 0, 0), 2)
timems = int((time.time() - start) * 1000)
cv2.putText(frame, "FPS: " + str(int(1000 / timems)), (0, 25), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 0, 0),
2)
current_frame = frame
def main():
while True:
#time.sleep(0.05)
#th = threading.Thread(target=process_frame)
#th.start()
process_frame()
if not current_frame is None:
cv2.imshow("Webcam Preview", current_frame)
#os.remove('frame.jpg')
#print(json.loads(r.content))
#print(timems)
key = cv2.waitKey(1)
if key == 27:
break
main()
cap.release()
cv2.destroyAllWindows() | [
"skipperro@gmail.com"
] | skipperro@gmail.com |
08ad55cb546abc0b9e3e0cca87900e8499006e3e | 40f93bb82fe3f911c768676f02380519f3a98fd9 | /bricks.py | 374bea8b66210b48ea67ea38bdf4efee9ef7d6a0 | [] | no_license | aadarshraghunathan/Terminal-Game | 143481e9c6f5eedd6f62dde6c00c0bc231e1a087 | 2c470a60ea24e5cb0b94bfa119f2f45815405d4a | refs/heads/master | 2023-03-27T05:32:56.026361 | 2021-03-19T17:27:30 | 2021-03-19T17:27:30 | 349,503,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | import time
import powerups
import random
l1 = powerups.powerups
class bricks: # (x,y) is the bottom left corner of the block and score is the number of hits the block can tolerate
def __init__(self, x, y, score):
self.x = x
self.y = y
self.height = 1
self.width = 9
self.score = score
self.exist = True
self.yeet_colour = False
# def push_to_array(self, x):
# for i in range(self.x - self.height + 1, self.x + 1):
# for j in range(self.y, self.y + self.width + 2):
# x[i][j] = 1
def hit(self, arr, k, arr1, xvel, yvel, level):
if (self.score != 100000000):
self.score = self.score - 1
self.yeet_colour = True
def destroy(self, arr, k, arr1, xvel, yvel, level):
if (self.score > 0):
self.score = 0
x = arr[k].x
y = arr[k].y
for i in range(x - arr[k].height, x):
for j in range(y, y + arr[k].width):
arr1[i][j] = ' '
if (level != 3):
id = random.randint(1, 4)
choice = random.randint(1, 2)
if (choice == 1):
p = powerups.Powerups(x, y, id, xvel, yvel)
powerups.powerup_list.append(p)
self.exist = False
def change_score(score):
pass
class Rainbow_bricks(bricks):
def __init__(self, x, y, score):
super().__init__(x, y, score)
def change_score(self):
score = self.score
if (self.yeet_colour == False):
score = (score + 1) % 3 + 1
self.score = score
class Exploding_bricks(bricks):
def __init__(self, x, y, score):
super().__init__(x, y, score)
def destroy(self, arr, k, arr1, xvel, yvel, level):
x = arr[k].x
y = arr[k].y
for i in range(x - arr[k].height, x):
for j in range(y, y + arr[k].width):
arr1[i][j] = ' '
self.exist = False
self.score = 0
l = len(arr)
j = 0
while (j < l):
if (arr[j].exist == True):
# #
# if ((arr[j].y + arr[j].width + 1) == self.y
# and (arr[j].x == self.x or (arr[j].x == self.x-1))):
# arr[j].destroy(arr, j)
# elif (arr[j].y == (self.y + self.width)
# and ((arr[j].x <= self.x) and
# (arr[j].x >= self.x - self.height))):
# arr[j].destroy(arr, j)
# elif (arr[j].x == self.x
# and ((arr[j].y >= self.y) and
# (arr[j].y <= self.y + self.width))):
# arr[j].destroy(arr, j)
# elif (arr[j].x == (self.x - self.height)
# and ((arr[j].y >= self.y) and
# (arr[j].y <= self.y + self.width))):
# arr[j].destroy(arr, j)
if ((abs(arr[j].x - self.x) <= self.height)
and (abs(arr[j].y - self.y) <= self.width + 1)):
arr[j].destroy(arr, j, arr1, xvel, yvel, level)
j += 1
def hit(self, arr, k, arr1, xvel, yvel, level):
self.destroy(arr, k, arr1, xvel, yvel, level)
| [
"aadu.r01@gmail.com"
] | aadu.r01@gmail.com |
1dc9c0534e24d225ec4d1804cdae435dfa7c7bf5 | 557df02ebb9ead0722f9979cd3412d3e5933c4f2 | /labexercise_5.py | 1f5b0bef3ce711b994df617f470498bb076b6faa | [] | no_license | prabalbhandari04/python_ | 7409969a5324510d7b6f17992fc7899c437a1d14 | b84bdefb5214ad9b61c950b11986ef975cafdec0 | refs/heads/master | 2020-09-07T19:04:25.634560 | 2019-12-12T07:42:13 | 2019-12-12T07:42:13 | 220,886,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #Write a Python program that accepts a word from the user and reverse it.
def reverse():
rev = word[::-1]
print(rev)
word = input("Enter a word to be reversed:")
reverse() | [
"noreply@github.com"
] | noreply@github.com |
4055c0b8d5cfcebbc7f81b1354a620ded0422057 | bde76b010eebfa338b4556bbe6af85a9e093a773 | /ivpk/plots.py | 28af37151cb04e38e9698146717b98e62801a3c7 | [
"MIT"
] | permissive | yanshouyu/human-iv-pk | 81a98cae1983feef68d4c6692096e123771a5880 | d4517fce11dfa407b2b125afcdeb213fcb01bed4 | refs/heads/main | 2023-07-15T02:03:14.255631 | 2021-08-19T08:59:08 | 2021-08-19T08:59:08 | 393,326,063 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | """Visualization module, including both data and molecule visualization.
"""
from rdkit import Chem
from rdkit.Chem.rdchem import Mol
from rdkit.Chem import Draw
from rdkit.Chem.Draw import rdMolDraw2D
import numpy as np
from typing import Union, Tuple
from matplotlib import pyplot as plt
from .utils import morgan_bit_atoms
from .evalutaion import Evaluation
#------------ chem plot -------------#
def draw_highlighted_bit(
fname: str,
mol: Union[Mol, str],
bit: int,
nBits: int) -> None:
"""Draw the molecule, highlighting a bit on its Morgan fingerprint if
the bit is on.
Args:
fname: path of the image to be saved
mol: molecule object or smiles string
bit: the bit to highlight
nBits: nBits in generating morgan fingerprint
"""
if isinstance(mol, str):
mol = Chem.MolFromSmiles(mol)
hl_atoms = []
hl_bonds = []
atom_radius = morgan_bit_atoms(mol, nBits=nBits, bit=bit)
if atom_radius:
for a_id, r in morgan_bit_atoms(mol, nBits=nBits, bit=bit):
hl_atoms.append(a_id)
if r > 0:
atom = mol.GetAtomWithIdx(a_id)
for bond in atom.GetBonds():
hl_bonds.append(bond.GetIdx())
d = rdMolDraw2D.MolDraw2DCairo(500, 500)
rdMolDraw2D.PrepareAndDrawMolecule(d, mol, highlightAtoms=hl_atoms, highlightBonds=hl_bonds)
d.FinishDrawing()
d.WriteDrawingText(fname)
#------------ stat plot -------------#
def plot_model_eval(ev: Evaluation):
def plot_scatter(x, y, lim: Tuple):
plt.plot(x, y, ".")
plt.xlim(*lim)
plt.ylim(*lim)
plt.xlabel("real")
plt.ylabel("predicted")
# set lim
min_val = np.min([ev.y_val, ev.y_val_pred])
max_val = np.max([ev.y_val, ev.y_val_pred])
interval = max_val - min_val
lim = (
np.floor(min_val - 0.05*interval),
np.ceil(max_val + 0.05*interval)
)
plt.figure(figsize=(10, 5))
plt.subplot(121)
plot_scatter(ev.y_val, ev.y_val_pred, lim)
plt.title((
"validation",
f"MAE: {ev.evaluation['MAE_val']:.3f},",
f"Pearson: {ev.evaluation['Pearsonr_val']:.3f}"
))
plt.subplot(122)
plot_scatter(ev.y_train_val, ev.y_train_val_pred, lim)
plt.title((
f"CV pred",
f"MAE: {ev.evaluation['MAE_cv']:.3f}",
f"Pearson: {ev.evaluation['Pearsonr_cv']:.3f}"
)) | [
"yan.shouyu@foxmail.com"
] | yan.shouyu@foxmail.com |
b9cd9b43fb64eb1805b8b9e3a30ddee088c9540c | 76f59c245744e468577a293a0b9b078f064acf07 | /3.longest-substring-without-repeating-characters.py | f5ddb791b9a978f2ed72a471cf53a960cb68a2a9 | [] | no_license | satoshun-algorithm-example/leetcode | c3774f07e653cf58640a6e7239705e58c5abde82 | 16b39e903755dea86f9a4f16df187bb8bbf835c5 | refs/heads/master | 2020-07-01T10:24:05.343283 | 2020-01-13T03:27:27 | 2020-01-13T03:27:27 | 201,144,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | #
# @lc app=leetcode id=3 lang=python3
#
# [3] Longest Substring Without Repeating Characters
#
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
c = 0
for i, _ in enumerate(s):
characters = ''
for j in s[i:]:
if j in characters:
break
characters += j
if len(characters) > c:
c = len(characters)
if len(characters) > c:
c = len(characters)
return c
| [
"shun.sato1@gmail.com"
] | shun.sato1@gmail.com |
4e7b737ef7c0dfbd4334a02c47e6e82ee662b5e9 | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-shared/treemenus/migrations/0003_menuitem_caption_pt.py | 7f07c34686f12f29e3581c5062d3499f2d994595 | [] | no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('treemenus', '0002_menuitem_caption_pl'),
]
operations = [
migrations.AddField(
model_name='menuitem',
name='caption_pt',
field=models.CharField(max_length=150, null=True, verbose_name='Caption Portuguese', blank=True),
),
]
| [
"ibalyko@ubuntu-server-16-04"
] | ibalyko@ubuntu-server-16-04 |
7666a5cf1d0b282967767d4acfacfbe8ebb452cc | 9a871ca18c94f080f51fab53de90ecec6bc4ca65 | /django_dzenlog/signals.py | 6ed5ebfd3059816ef7ef979ad003a885901b234d | [] | no_license | IIKovalenko/django-dzenlog | dbfc302ba70d39be28176b029f91d844faa83847 | 45025d20e6d56322fece40f81e0ab370beed2b9c | refs/heads/master | 2020-06-02T15:24:35.009989 | 2010-12-04T18:52:06 | 2010-12-04T18:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from django.dispatch import Signal
published = Signal(providing_args=['instance',])
| [
"svetlyak.40wt@gmail.com"
] | svetlyak.40wt@gmail.com |
fa8c94040d155a351e2b5c25b1d46aa6b8dd8ab6 | eefa9973b0cbafd3c05b795be8b85fc1ded9fff2 | /healthapp/migrations/0006_auto_20211012_1736.py | 73363350baff31c1c9912e5261841b199251de72 | [] | no_license | HwToLrn/opencvhealth | ccb7de8b51a963af4fa37f74d2e00922cc3dcfcd | d693d2f9040f371fdce63abcbdbc040a9cff7390 | refs/heads/main | 2023-09-01T01:23:56.444194 | 2021-10-12T19:12:10 | 2021-10-12T19:12:10 | 416,202,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Generated by Django 3.2.8 on 2021-10-12 08:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('healthapp', '0005_healthcustom'),
]
operations = [
migrations.RenameField(
model_name='healthcustom',
old_name='repeats',
new_name='repeats_1',
),
migrations.AddField(
model_name='healthcustom',
name='repeats_2',
field=models.PositiveIntegerField(default=1),
),
migrations.AddField(
model_name='healthcustom',
name='repeats_3',
field=models.PositiveIntegerField(default=1),
),
]
| [
"dayoorang@gmail.com"
] | dayoorang@gmail.com |
929b7bcac75f34355aa13b3f1e3a5faab8b98760 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L29/29-40_wat_20Abox/set_4.py | e1cd145f4856de5364ea6cdd85c38159e2eee008 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L29/wat_20Abox/ti_one-step/29_40/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_4.in'
temp_pbs = filesdir + 'temp_4.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_4.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_4.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
7058046baa3c952775c38a273ce86611b6ff8399 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/516.py | 2f37f445e68945c2132f222f58ca3bd97747e8c4 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | lines = open('data.txt').read()
output = open('output.txt', 'w')
lines = lines.splitlines()
cases_num = int(lines[0])
lines = lines[1:]
cur_index = 0
for i in range(cases_num):
case_num = i + 1
m, n = lines[cur_index].split()
n = int(n)
m = int(m)
cur_index += 1
matrix = []
for row_ind in range(m):
line = lines[row_ind + cur_index]
matrix.append([int(x) for x in line.split()])
rows = []
columns = []
for row in matrix:
rows.append(sorted(set(row)))
for column in zip(*matrix):
columns.append(sorted(set(column)))
def is_lawnable():
for i in range(m):
for j in range(n):
elem = matrix[i][j]
i_row = rows[i].index(elem)
j_column = columns[j].index(elem)
if len(rows[i]) > i_row + 1 and len(columns[j]) > j_column + 1:
return False
return True
is_good = is_lawnable()
cur_index += m
if is_good:
output.write('Case #{0}:'.format(case_num) + ' YES\n')
print 'Case #{0}:'.format(case_num), 'YES'
else:
output.write('Case #{0}:'.format(case_num) + ' NO\n')
print 'Case #{0}:'.format(case_num), 'NO'
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
83db1bb88ed3a2db62f321db2e83dd80786b703b | 49e9838c3d4e3ff393342c651bbecf6c4b733945 | /chat/chatbox/permissions.py | f8bbce8adc467ee9873240f6236bfb806289b216 | [] | no_license | jideabdqudus/pychat_be | a904287e353d7385729beb309ad082c7e0c949c9 | 5113abb73b27aea82320decd237c905f85c25104 | refs/heads/main | 2023-06-10T20:25:56.565670 | 2021-06-29T21:47:24 | 2021-06-29T21:47:24 | 377,649,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | from rest_framework import permissions
class UpdateOwnPost(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner.id == request.user.id
| [
"jideabdqudus@gmail.com"
] | jideabdqudus@gmail.com |
aa7848567de21cc1e2bbf18f411bcce6258b36e8 | a3aab8e80ec5d660aa70cec28a48bff19800dc30 | /TaskDo/tasktracker_team/apps.py | 1f21df54c85f7413c6cbe3df650663bf4ccaf741 | [] | no_license | zubarenik/TaskDo | be95ba5a9344264a0466318614b876fc12a246c0 | c4bb89e0c511abc26895bd0dfc3c24c2cab637d2 | refs/heads/master | 2022-12-22T11:02:27.743929 | 2020-09-22T14:33:48 | 2020-09-22T14:33:48 | 297,669,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from django.apps import AppConfig
class TasktrackerTeamConfig(AppConfig):
name = 'tasktracker_team'
| [
"zubarenik@gmail.com"
] | zubarenik@gmail.com |
a5349026ada15c40b557c4114bf8afbc7b1c65fa | dcb387712437c2f2542809ed2f9286a97af68485 | /code_deprecated/tango_with_django_project/tango_with_django_project/settings.py | 91fc6dce1d0b77f9eb121a25e0ee32e7f228859f | [] | no_license | perkalerk/TangoWithDjango | 61af2c7d3376140da9f31dc35214808b8098dd6b | 080953405cd26ab4f94e74ddbcc1ff42c9cc1eae | refs/heads/master | 2021-01-11T02:40:00.742829 | 2016-10-21T21:03:23 | 2016-10-21T21:03:23 | 70,913,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,720 | py | """
Django settings for tango_with_django_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-qh-n=6ou&lk4d)c_4k^pk(bjiu+qpwuiy+!ke@gog0lx#+=iw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rango',
'registration',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tango_with_django_project.urls'
WSGI_APPLICATION = 'tango_with_django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_PATH,
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_PATH, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Absolute path to the media directory
# LOGIN_URL = '/rango/login/'
# Django Registration Redux
REGISTRATION_OPEN = True # If True, users can register
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
REGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.
LOGIN_REDIRECT_URL = '/rango/' # The page you want users to arrive at after they successful log in
LOGIN_URL = '/accounts/login/' # The page users are directed to if they are not logged in,
# and are trying to access pages requiring authentication | [
"Andrew@tegraanalytics.com"
] | Andrew@tegraanalytics.com |
2ec8cd1a2d3ff906aa3f726721c7fdcbdf438dfc | 1b4894c3660a1afb6021a95d9ce0b33e295c6dc3 | /src/101-150/125.py | b690b2bb33145bccaba8d41bee548686d37401b9 | [
"MIT"
] | permissive | adaggarwal/leetcode1992 | dbfba8ba38efbe34a84ae2b7272c3c077faeac5b | 4e0cc19ae89050770a7a6d4ba5b330dc76c8dfdc | refs/heads/master | 2022-05-26T20:20:04.833871 | 2022-05-09T02:19:33 | 2022-05-09T02:19:33 | 137,307,149 | 9 | 5 | MIT | 2020-07-10T01:50:14 | 2018-06-14T04:45:20 | Python | UTF-8 | Python | false | false | 742 | py | '''
125. Valid Palindrome
Share
Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Note: For the purpose of this problem, we define empty string as valid palindrome.
Example 1:
Input: "A man, a plan, a canal: Panama"
Output: true
Example 2:
Input: "race a car"
Output: false
'''
class Solution:
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
s = ''.join([el.lower() for el in s if el and el.isalnum()])
start, end = 0, len(s)-1
while start<end:
if s[start] == s[end]:
start += 1
end -= 1
else:
return False
return True | [
"aggaraditya@gmail.com"
] | aggaraditya@gmail.com |
20dd3e7f584a02e5e3b09c1e309e86c08733305c | b6417cfd5fd01458f08d02b9a551faa330130463 | /tests/test_authforms.py | ccfaf92e9fab132858309fecbc438d9557e26908 | [] | no_license | TristanRice/Protest.me | d3cf4cd583eace3d2b9895c82d4c107e3c600d1d | 1d74a3e3ebbbca62c4c0f9bf3901e8b62f002c6c | refs/heads/main | 2023-03-31T13:16:23.471831 | 2021-04-11T16:44:19 | 2021-04-11T16:44:19 | 354,333,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,677 | py | import pytest
from app.models import Protest, User
from app import app, db
import re
import datetime
from string import ascii_lowercase
import random
import time
test_client = app.test_client()
def create_random_username():
return "".join(random.choice(ascii_lowercase) for _ in range(10))
def create_random_email():
email_username = create_random_username()
email_domain = "test.com"
return f"{email_username}@{email_domain}"
def test_authforms1_method1():
"""
Make sure that valid credentials are accepted
"""
data = {
"email": create_random_email(),
"username": create_random_username(),
"password": "Th1s-1s4-$3CuR3_P4$$W0RD",
}
res = test_client.post("/register", data=data, follow_redirects=False)
assert b"Redirecting..." in res.data
def test_authforms1_method2():
"""
Make sure that invalid emails are rejected
"""
data = {
"email": "aaa",
"username": "valid_username",
"password": "Th1s-1s4-$3CuR3_P4$$W0RD"
}
res = test_client.post("/register", data=data)
assert b"Email must be valid" in res.data
def test_authforms1_method3():
"""
Make sure that insecure passwords are rejected
"""
data = {
"email": create_random_email(),
"username": "valid_username",
"password": "aaa"
}
res = test_client.post("/register", data=data)
assert b"Password must contain" in res.data
def test_authforms1_method4():
"""
Make sure that duplicate emails are rejected
"""
email_val = create_random_email()
data1 = {
"email": email_val,
"username": create_random_username(),
"password": "Th1s-1s4-$3CuR3_P4$$W0RD"
}
data2 = {
"email": email_val,
"username": create_random_username(),
"password": "Th1s-1s4-$3CuR3_P4$$W0RD"
}
res1 = test_client.post("/register", data=data1)
res2 = test_client.post("/register", data=data2)
# If the registration is successful, the user will be redirected to the index page
assert b"<a href=\"/\">/</a>" in res1.data
# If the registration is unsuccessful, the user will be redirected to the register page
assert b"That email is already taken" in res2.data
def test_authforms1_method5():
"""
Make sure that we can login users correctly
"""
username = create_random_username()
data_register = {
"email": create_random_email(),
"username": username,
"password": "Th1s-1s4-$3CuR3_P4$$W0RD"
}
data_login = {
"username_or_email": username,
"password": "Th1s-1s4-$3CuR3_P4$$W0RD"
}
res = test_client.post("/register", data=data_register)
assert b"<a href=\"/\">/</a>" in res.data
test_client.delete_cookie(key="session", server_name="localhost")
res1 = test_client.post("/login", data=data_login)
assert b"<a href=\"/\">/</a>" in res1.data
def test_authforms1_method6():
"""
Make sure that invalid users cannot login
"""
data = {
"username_or_email": "aaa",
"password": "aaa"
}
# Make sure that we don't keep the login from the previous test
test_client.delete_cookie(key="session", server_name="localhost")
res = test_client.post("/login", data=data)
print(res.data)
assert b"Username or password incorrect" in res.data
def test_authforms1_method7():
"""
Make sure that invalid usernams are rejected
"""
data = {
"username": "Thi$-I/s-an-invalid-username",
"email": create_random_email(),
"password": "Th1s-1s4-$3CuR3_P4$$W0RD"
}
res = test_client.post("/register", data=data)
| [
"minor_2468@hotmail.com"
] | minor_2468@hotmail.com |
b085f75a1abf112fb5ab270b6c1ca6ab5ca9f87f | 09e8d7f378fdbda66a3b730a9be1d3c2e34d570b | /ZB.py | dd2bf9b0bc3d2096745571f7697ecf5239a524b4 | [] | no_license | laaciits/RTR105 | bd53da410772ca5f8e54d295b2cdf4f5e23fce7c | cb3d64015104574dbecad38a740c6c14c247474c | refs/heads/master | 2020-03-28T03:24:01.690515 | 2019-02-08T09:37:48 | 2019-02-08T09:37:48 | 147,642,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # -*- coding: utf-8 -*-
from math import sin
# S0, S1, S2, S3 -> S
x = float(input("Lietotāj, lūdzu, ievadi argumentu (x): "))
y = sin(x/2)
print("sin(%.2f) = %.2f"%(x,y))
a0 = (-1)**0*x**(2*0+1)/(2*0+1)*2**(2*2*0+1)
S = a0
print("a0 = %.2f S0 = %.2f"%(a0,S))
a1 = (-1)**1*x**(2*1+1)/(2*1+1)*2**(2*1+1)
#S1 = (a0) + a1
#S1 = S0 + a1
S = S + a1
print("a1 = %.2f S1 = %.2f"%(a1,S))
a2 = (-1)**2*x**(2*2+1)/(2*2+1)*2**(2*2+1)
#S2 = (a0 + a1) + a2
#S2 = S1 + a2
S = S + a2
print("a2 = %.2f S2 = %.2f"%(a2,S))
a3 = (-1)**3*x**(2*3+1)/(2*3+1)*2**(2*3+1)
#S3 = (a0 + a1 + a2) + a3
#S3 = S2 + a3
S = S + a3
print("a3 = %.2f S3 = %.2f"%(a3,S))
| [
"noreply@github.com"
] | noreply@github.com |
d3c628ada98b13f64b53fd43795a2a7054c81546 | 52c3ff9435a895893fd2b917cba3d6a36fbdf8a5 | /namesapi/namesapi/wsgi.py | ee2e2491803fc9abda25519a6b9ba118a4d24f6f | [] | no_license | rsgilbert/names-env | fb69466f7eb28fd34c4aee2d2489219422ba2eeb | bcb20150437d76baa93e07276ae051f052fe8d56 | refs/heads/master | 2022-05-03T02:47:25.137991 | 2019-09-06T12:48:24 | 2019-09-06T12:48:24 | 206,786,722 | 0 | 0 | null | 2022-04-22T22:16:19 | 2019-09-06T12:14:10 | Python | UTF-8 | Python | false | false | 393 | py | """
WSGI config for namesapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'namesapi.settings')
application = get_wsgi_application()
| [
"gssenyonjo@gmail.com"
] | gssenyonjo@gmail.com |
4b964397df7ef88fabea054402bb1db1ad59d9b4 | 7f43264f32a57599d87fe8be8e0d748d89abecab | /api_v0/ElasticsearchURL.py | 46da6ee9e31f2e2d8f574166965a86e3a980e86c | [] | no_license | chair300/rsss_api | e13215439be1bfaa536ea7be5bfe4cc657bb0663 | 03866b0f5052dc81b61cab3b1c2a451d8e2ec449 | refs/heads/master | 2023-03-19T02:38:09.963553 | 2018-01-17T00:41:18 | 2018-01-17T00:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | from rest_framework.response import Response
from api_v0.serializers import ScoresRowSerializer
from django.conf import settings
import requests
import random
import json
#make all the stuff in views.py use this class.
#NOTE: if it turns out that making a query to check if a server is response is
#This is the way to do it.
#TOO overhead-intensive, use the following algorithm:
# Fully formulate the URL as it would be used (randomly shuffle the ES boxes)
# make the request as-is, and try/catch to detect timeout and/or connection errors.
# If there's a dropped request; then pop the next machine off of the shuffled list of
# available ES nodes; try that URL.
# Either end up returning the result set; or a 500 status Response with a descriptive
# message about Elasticsearch being down.
class ElasticsearchURL(object):
#if operation is None, id_to_get had better be there.
#if scroll duration is included, this is a scrolling download.
def __init__(self, data_type, operation="_search",
from_result=None, page_size=None, id_to_get=None,
scroll_info=None):
url_base = self.get_base_es_url()
name_of_index = None
if data_type == 'atsnp_output':
name_of_index = settings.ES_INDEX_NAMES['ATSNP_DATA']
elif data_type == 'gencode_gene_symbols':
name_of_index = settings.ES_INDEX_NAMES['GENE_NAMES']
elif data_type == 'sequence':
name_of_index = settings.ES_INDEX_NAMES['SNP_INFO']
elif data_type == 'motif_bits':
name_of_index = settings.ES_INDEX_NAMES['MOTIF_BITS']
#print "url_base : " + url_base
#print "name_of_index: " + name_of_index
#print "data_type: " + data_type
#print "operation: " + operation
url_parts = [url_base, name_of_index, data_type]
get_args = []
if id_to_get is not None:
#throw a nice exception if this is invalid?
url_parts.append(id_to_get)
else:
#this is a search.
url_parts.append(operation)
get_args.append(self.get_page_size(page_size))
if scroll_info is not None:
if 'duration' in scroll_info:
get_args.append('scroll=' + scroll_info['duration'])
else:
#Use a bare URL to continue a scroll
get_args = []
url_parts = [url_base, operation]
url_parts.append('scroll')
if from_result is not None:
get_args.append("from=" + str(from_result))
bare_url = "/".join(url_parts)
if len(get_args) > 0:
self.url = '?'.join([bare_url,'&'.join(get_args)])
else:
self.url = bare_url
#print "url created: " + self.url
def setup_scroll_args(self, scroll_info):
scroll_args = []
if 'duration' in scroll_info:
scroll_args.append('scroll=' + scroll_info['duration'])
return scroll_args
#for searches
def get_page_size(self, page_size):
if page_size is None:
page_size = settings.ELASTICSEARCH_PAGE_SIZE
return "size=" + str(page_size)
def get_base_es_url(self):
machines_to_try = settings.ELASTICSEARCH_URLS[:]
random.shuffle(machines_to_try)
return machines_to_try.pop()
def get_url(self):
return self.url
| [
"rebeccakathrynhudson@gmail.com"
] | rebeccakathrynhudson@gmail.com |
a23254d0dc4694c1e4b422d7c01a592c9f3ab7ad | 03c4bff614702639d0ba2da99e23b55a96863066 | /main/src/independent/__init__.py | e110ea902d582774e9f789bff738ce793226fd7a | [] | no_license | wangwuli/XLauto | dcaa9320c62ea78cbddad46bd65dab04ee87ba61 | 88705807df697dc86a5ed8a648505b3fb895872b | refs/heads/master | 2023-01-10T04:40:32.976245 | 2021-02-23T16:50:34 | 2021-02-23T16:50:34 | 223,140,055 | 1 | 0 | null | 2022-12-12T03:55:39 | 2019-11-21T09:43:56 | Python | UTF-8 | Python | false | false | 186 | py | # from .logio import login
# def init_app(app):
# login.init_app(app)
from flask import Blueprint
independent = Blueprint('independent', __name__)
from src.independent import nmap | [
"liww@cenboomh.com"
] | liww@cenboomh.com |
8c752f8f75dae42db01ac5d7a7ce833f5757c8e3 | 59b4a26e405cdc256b370e9ad141bf930965c609 | /emailValidation_app/controllers/emailValidations.py | 4a0c2db44d76678aba90b212c186a25b12cd48f0 | [] | no_license | cbakcoleman/CD_FlSql_emailValidation | 13c5e69e1bd629c46b2d669bb0ba86cc1a8f585a | 6c62dca69685397fdd10eb0295c93af313bfd7ad | refs/heads/master | 2023-08-12T06:53:40.223422 | 2021-09-14T23:00:59 | 2021-09-14T23:00:59 | 406,139,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | from emailValidation_app import app
from flask import render_template, redirect, request
from emailValidation_app.model.emailValidation import Email
from flask import flash
@app.route("/")
def home():
return render_template("index.html")
@app.route("/register", methods=["POST"])
def register():
print(request.form)
if not Email.validate_email(request.form):
return redirect("/")
new_email = Email.add_email(request.form)
return redirect("/show_emails")
@app.route("/show_emails")
def show_emails():
all_emails = Email.all_emails()
return render_template("emails.html", emails = all_emails) | [
"88118758+cbakcoleman@users.noreply.github.com"
] | 88118758+cbakcoleman@users.noreply.github.com |
098c3ccd7b91207c10bdec75c4ec1098cdd7ee8c | 4572c7a30af256f47eb2742b6beb1c820eaf475b | /main.py | 34b7da4cbca6501ef46e81c6fe177cb66aed0029 | [] | no_license | stanislavstarkov/pi-pico | afa1a30655f2b7aff9ec3718f5b56f34a01cca7b | 18c74ac2f8bea81a852f140285bb874f95f9a9ae | refs/heads/master | 2023-04-01T19:53:59.832637 | 2021-03-31T18:10:11 | 2021-03-31T18:10:11 | 348,808,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from machine import Pin,ADC
import utime
adc = ADC(26)
TEMPERATURE_SENSOR = ADC(4)
CONVERSION_FACTOR = 3.3 / (65535)
RED_LED = Pin(21, Pin.OUT)
GREEN_LED = Pin(20, Pin.OUT)
BLUE_LED = Pin(19, Pin.OUT)
LEDS = [RED_LED, GREEN_LED, BLUE_LED]
for led in LEDS:
led.off()
while True:
val = adc.read_u16() * CONVERSION_FACTOR
reading = TEMPERATURE_SENSOR.read_u16() * CONVERSION_FACTOR
temperature = 27 - (reading - 0.706)/0.001721
print(val)
RED_LED.toggle()
utime.sleep(2) | [
"stanislav.starkov@gmail.com"
] | stanislav.starkov@gmail.com |
96a81eea6f894c02e7acccdf364b4b8bbc19a7c3 | a010b36f7de8b1f056f8935db717639a145e75f9 | /cell_field.py | 23ca1840a2f22de1fe5dbd4846939a7447c35186 | [] | no_license | victormorozov1/pygame_field | 04a2a2329648a2d3b5985945b0f76e783b1ee7db | 3f35ecfd7e904d193802a586e553fe7b2ab3d4e3 | refs/heads/master | 2020-09-29T23:56:23.672975 | 2019-12-26T10:30:32 | 2019-12-26T10:30:32 | 227,151,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import pygame
from field import Field
from random import randrange as rd
from functions import *
class CellField(Field):
def __init__(self, szx, szy, field_arr, field_dict, cell_sz=64, bg=(255, 255, 255)):
super().__init__(szx, szy, bg=bg)
self.field_arr = field_arr
self.field_dict = field_dict
self.cell_sz = cell_sz
self.n = szx // cell_sz
self.m = szy // cell_sz
def draw_field(self, win, start=(0, 0)):
for i in range(len(self.field_arr)):
for j in range(len(self.field_arr[i])):
for pos in camera_coords(i * self.cell_sz, j * self.cell_sz, self.szx, self.szy, start):
win.blit(self.field_dict[self.field_arr[i][j]], pos)
def draw_objects(self, win, start=(0, 0)):
for i in self.objects:
for pos in camera_coords(i.rect.x, i.rect.y, self.szx, self.szy, start):
win.blit(i.image, pos)
def show(self, win, start=(0, 0)):
win.fill(self.bg)
self.draw_field(win, start=start)
self.draw_objects(self.win, start=start)
pygame.display.update()
| [
"vismo@bk.ru"
] | vismo@bk.ru |
d35f6b4783bc805b8733a5dde7649cb3452f1da9 | f42f0dd339093895a971a5e64d5e01382c4cbeea | /arts/arts/settings.py | 07b3a98297844a55a0ad8c57abcf4d6a743476d6 | [] | no_license | rusbal/artuus | e379e3a0ef9c72fdcdeb83d2cf96c1744265a403 | 9d3d4736c0435dff572bb2e3f1aaab43d1a76db2 | refs/heads/master | 2020-05-09T20:40:58.532398 | 2015-04-16T17:13:00 | 2015-04-16T17:13:00 | 34,054,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,936 | py | """
Django settings for arts project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from .secret import *
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagestore',
# Imagestore Requirements
'sorl.thumbnail',
'autocomplete_light',
'tagging',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'imagestore.middleware.request.GlobalRequestMiddleware',
)
ROOT_URLCONF = 'arts.urls'
WSGI_APPLICATION = 'arts.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'arts', 'public', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'arts', 'public', 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'arts', 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
| [
"raymond@philippinedev.com"
] | raymond@philippinedev.com |
63787f329717b913e1ca259593439e0fb42d7a1c | 1257fadd402cb1ea3a2b8262833a00ee969fbfbe | /conduit/app.py | 8f84ee89807ad26735c81d9dc19edc657a2d7bc8 | [
"MIT"
] | permissive | Willis0826/flask-realworld-example-app-ci-cd | d81742c27800292eb468dc09a4b684422cf827c0 | f8a9293a4f58c2312ad95520d8858b0b2a8be86b | refs/heads/master | 2023-03-08T02:03:14.995918 | 2023-02-20T16:09:36 | 2023-02-20T16:09:36 | 233,528,197 | 1 | 0 | MIT | 2023-02-20T16:10:31 | 2020-01-13T06:38:01 | Python | UTF-8 | Python | false | false | 2,379 | py | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask
from conduit.extensions import bcrypt, cache, db, migrate, jwt, cors
from conduit import commands, user, profile, articles
from conduit.settings import ProdConfig
from conduit.exceptions import InvalidUsage
def create_app(config_object=ProdConfig):
"""An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.url_map.strict_slashes = False
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
jwt.init_app(app)
def register_blueprints(app):
"""Register Flask blueprints."""
origins = app.config.get('CORS_ORIGIN_WHITELIST', '*')
cors.init_app(user.views.blueprint, origins=origins)
cors.init_app(profile.views.blueprint, origins=origins)
cors.init_app(articles.views.blueprint, origins=origins)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(profile.views.blueprint)
app.register_blueprint(articles.views.blueprint)
def register_errorhandlers(app):
def errorhandler(error):
response = error.to_json()
response.status_code = error.status_code
return response
app.errorhandler(InvalidUsage)(errorhandler)
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User,
'UserProfile': profile.models.UserProfile,
'Article': articles.models.Article,
'Tag': articles.models.Tags,
'Comment': articles.models.Comment,
}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| [
"willis_chou@trendmicro.com"
] | willis_chou@trendmicro.com |
d402c0437514136df6d09b410b5a1e2423715000 | 712022621249b210fd384158f08a05e02f567bb4 | /model/project.py | 23ff065f4e347215fc9bda91989b1b36739bb796 | [] | no_license | Ka3a4ook/python-Mantis_BT | b35a92b78322b419c256bc90ae58c22a0e8058a4 | 0f38f08fde12f7d817211611c3b0842a32235b58 | refs/heads/main | 2023-02-05T17:24:03.189228 | 2020-12-27T21:47:48 | 2020-12-27T21:47:48 | 322,549,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | from sys import maxsize
class Project:
def __init__(self, name=None, status=None, inherit_global=True, view_state=None, description=None, id=None):
self.name = name
self.status = status
self.inherit_global = inherit_global
self.view_state = view_state
self.description = description
self.id = id
def __repr__(self):
return "%s:%s" % (self.id, self.name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and (
self.name is None or other.name is None or self.name == other.name)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| [
"89052585288@mail.ru"
] | 89052585288@mail.ru |
48f45b897c9bf17861962bc4ebf47143512923ef | 7b290aa7d2fe64c2ee7adb31ed0212dea6d22189 | /IO_multi/s2/s1.py | 3affac98cd11eab3259b0b96b943bf75cb1916df | [] | no_license | ginkgodia/PYcode | 97866c1a74d896368e6f6a9cc6450a6665c4d1b0 | cbac336cd7be46fea442b2b112d29184361f0db3 | refs/heads/master | 2021-09-05T13:29:05.201458 | 2018-01-28T06:55:34 | 2018-01-28T06:55:34 | 115,179,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | #!/usr/bin/env python
# -*- coding=utf8 -*-
# @Author:Ginkgo
# @File: s1
# @Time: 2018/1/25 22:00
import socket
import select
s1 = socket.socket()
s1.bind(("127.0.0.1", 8001))
s1.listen()
inputs = [s1, ]
outputs = []
message_dict = {}
while True:
r_list, w_list, x_list = select.select(inputs, outputs, [], 1)
print("正在监听%d 个对象" % len(inputs))
print(r_list)
for item in w_list:
item.sendall(bytes(message_dict[item]+"hello", encoding="utf-8"))
outputs.remove(item)
for connect_or_obj in r_list:
print("2")
if connect_or_obj == s1:
conn, address = connect_or_obj.accept()
inputs.append(conn)
else:
try:
re = str(connect_or_obj.recv(1024), encoding="utf-8")
except Exception as E:
inputs.remove(connect_or_obj)
else:
outputs.append(connect_or_obj)
message_dict[connect_or_obj] = re
# connect_or_obj.sendall(bytes(re + "ok", encoding="utf-8")) | [
"907632998@qq.com"
] | 907632998@qq.com |
c2863e4dc147572401933bd01d965ba2f78229d3 | b0067a719fcfe1e64bd6dcf20ccf5fae93410d7d | /algo.py | 1d205d71922ada986ec916a7791ac0c404066748 | [] | no_license | sravanthi-ch/Friend-Suggestion-Algo | 9c83cdeeb5da92afd0917860a6384dda355c5da9 | 2a73b0b50b61b6d709432a05404fc2a10341b83c | refs/heads/master | 2020-04-18T03:55:23.158488 | 2019-01-23T17:06:17 | 2019-01-23T17:06:17 | 167,218,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py |
# List of friends of friends...
ff_list = {
1 : [2, 4, 5],#[2,4,5,3]
2 : [1, 4, 6],
3 : [4, 5],#[4,5,1]
4 : [1, 2, 3, 6],
5 : [1, 3, 6],
6 : [2, 4, 5]
}
# schoolfriend=1, neighbour =2, college mate=3, collegue =4, relative =5, teacher/ student=6, employee/employer=7
hr_list = {
1 : [4,2,3],#[4,2,3,4]
2 : [4,4,5],
3 : [2,1],#[2,1,4]
4 : [2,4,2,7],
5 : [3,1,1],
6 : [5,7,1]
}
def get_friend_suggestions(user_id):
'''
@param user_id - currently logged in user
Find the list of suggested friends for a user along with
the friends that are mutual to the suggested friend.
'''
suggested_friends = {}
sugg_fcount ={}
tempp=''
for f in ff_list:
sugg_fcount[f] = 0;
# Get the friends of the current user.
friends = ff_list[user_id]
for friend in friends:
# Friends friends list.
ffriends = ff_list[friend]
for ff in ffriends:
# If the friendsFriend(ff) is not us, and not our friend, he can be suggested
if ff != user_id and ff not in friends:
# The key is the suggested friend
suggested_friends[ff] = {'mutual_friends' : []}
for f in ff_list[ff]:
# If he is a friend of the current user, he is a mutual friend
if f in friends:
suggested_friends[ff]['mutual_friends'].append(f)
for ff in suggested_friends:
print ff,":",suggested_friends[ff]
for ff in suggested_friends:
k=len(suggested_friends[ff]['mutual_friends'])
for l in range(0, k):
i=suggested_friends[ff]['mutual_friends'][l]
if hr_list[user_id][ff_list[user_id].index(i)] == hr_list[i][ff_list[i].index(ff)]:
print "NEW SUGGESTION",ff,": through",i
tempp = input("accept?\n")
if tempp == "y":
ff_list[user_id].append(ff)
ff_list[ff].append(user_id)
temppp = input("relation\n")
hr_list[user_id].append(temppp)
hr_list[ff].append(temppp)
#change user_id_input in the below code to run this code for different accounts....
# a working example has been depected.... use as input the following
# "y"\n 4\n "n"\n "n"\n "n"\n "n"\n
# to see all functionality of this code
user_id_input = 3
print "Suggested Friends and Mutual Friends for user {}".format(user_id_input)
#print
get_friend_suggestions(user_id_input)
print "\nSuggested Friends and Mutual Friends for user {}".format(user_id_input)
get_friend_suggestions(user_id_input)
user_id_input = 2
print "\nSuggested Friends and Mutual Friends for user {}".format(user_id_input)
get_friend_suggestions(user_id_input)
print "DONE"
| [
"noreply@github.com"
] | noreply@github.com |
9fee9927053a85fe0988554aa2c1cf7fc746326b | 8a7950440a4a8015523a1e1474a3bfc3aaa95782 | /email_smtplib/basic/email_send2.py | d3b046d99e4e22a27c9ecef541a328042c1dbfab | [] | no_license | SatishNitk/Python_Web_Scrapper | bddb320b86a8942b6b3c346eb09f09b933be5b37 | f257ad2e6d2053f0f86443905de87ccf81df0c62 | refs/heads/master | 2020-05-07T19:51:51.816353 | 2019-07-07T13:31:27 | 2019-07-07T13:31:27 | 180,826,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPException,SMTPAuthenticationError
host = "smtp.gmail.com"
port = 587
email = "hungrygupta@gmail.com"
password = ""
from1 = "hungrygupta@gmail.com"
to_list = ["hungrygupta@gmail.com"]
try:
email_obj = SMTP(host, port)
email_obj.ehlo()
email_obj.starttls()
email_obj.ehlo()
email_obj.login(email,password)
plain_text = "just a simple text message"
html_txt = """
<html>
<body>
<h1>
This paragraph
contains a lot of lines
in the source code,
but the browser
ignores it.
</h1>
</body>
</html>
"""
the_msg = MIMEMultipart("alternative")
the_msg['Subject'] = "Hello there"
the_msg['From'] = from1
part1 = MIMEText(plain_text, "plain")
part2 = MIMEText(html_txt, "html")
the_msg.attach(part1)
the_msg.attach(part2)
print(the_msg.as_string())
email_obj.sendmail(from1,to_list,the_msg.as_string())
except SMTPException:
print("exception occured in sending rmail check once whole code")
| [
"satishkrgu95@gmail.com"
] | satishkrgu95@gmail.com |
c0ccca0a0a3c420e12571083f2fb21357e046b62 | 8a6b1a90dd7ab92bb8e5880efcf0a587f59bc77f | /Modules/data_methods.py | 7807843659a4e5c9c6c1a30ac1368cb5067846e6 | [] | no_license | osuranboa/music-learning | b79756fa45859d1cf7c7d229d7f6d7259a35360a | 7e80230ed45055f6fbef16e857d0c01c0142e528 | refs/heads/master | 2020-12-22T07:31:50.984227 | 2019-11-12T02:09:53 | 2019-11-12T02:09:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,364 | py | from sklearn import decomposition
from sklearn import preprocessing
from sklearn import cluster
import pandas as pd
import numpy as np
import plotly.plotly as py
import sys
from plotly.graph_objs import *
import pdb
def transformPCA(X, n):
pca = decomposition.PCA(n_components = n)
X = pca.fit_transform(X)
return X
def centerScaleData(X):
standard_scaler = preprocessing.StandardScaler()
X = standard_scaler.fit_transform(X)
return X
def minMaxScaleData(X):
minmax_scaler = preprocessing.MinMaxScaler()
X = minmax_scaler.fit_transform(X)
return X
def classifyUnsupervised(X, n_clusters = 6, method = "km", random_state = 42):
if method == "km":
clf = cluster.KMeans(init = "random", n_clusters = n_clusters, random_state = random_state)
clusters = clf.fit_predict(X).tolist()
return clusters
def scatterplot(data, filename, title = "", xAxisLabel = "", yAxisLabel = ""):
trace1 = Scatter(
x = data[ : , 0],
y = data[ : , 1],
mode = "markers"
)
layout = Layout(
title = title,
xaxis = XAxis(
title = xAxisLabel,
showgrid = False,
zeroline = False
),
yaxis = YAxis(
title = yAxisLabel,
showline = False
)
)
data = Data([trace1])
fig = Figure(data = data, layout = layout)
plot_url = py.plot(fig, filename = filename)
def closest(X, p):
disp = X - p
return np.argmin((disp * disp).sum(1))
def distances(X, p):
disp = X - p
return (disp * disp).sum(1)
## take pandas dataframe and reorder by distance from point p in distances()
def sortByDistance(df, dist):
df = df.reset_index()
return df.iloc[np.argsort(dist), ]
def expandToPoints(X, df, artist, title):
try:
start = np.where((df.artist.str.lower() == artist.lower()) & \
(df.title.str.lower() == title.lower()))[0][0]
except:
print("Artist name and Song title not found as entered...")
print("Please try again.")
sys.exit()
ds = distances(X, X[start, : ])
dfs = sortByDistance(df, ds)
return dfs
def walkPoints(X, df, artist, title):
try:
start = np.where((df.artist.str.lower() == artist.lower()) & \
(df.title.str.lower() == title.lower()))[0][0]
except:
print("Artist name and Song title not found as entered...")
print("Please try again.")
sys.exit()
out_list = ["spotify:track:%s" % df.iloc[start].spotify_id]
curr_point = X[start, : ].copy()
## once the point has been touched, make the value impossiblly far away
X[start, : ] = np.repeat(10e9, X.shape[1])
for i in xrange(X.shape[0] - 1):
nxt = closest(X, curr_point)
next_point = X[nxt, : ].copy()
if 'local' in df.iloc[nxt].spotify_id:
out_list.append(df.iloc[nxt].spotify_id)
else:
out_list.append("spotify:track:%s" % df.iloc[nxt].spotify_id)
X[nxt, : ] = np.repeat(10e9, X.shape[1])
curr_point = next_point
return out_list
def getSimilarPoints(X, df, artist, title, n = 5, stdout = True):
dfs = expandToPoints(X, df, artist, title)
neighbors = dfs[1:n + 1][['artist', 'title']]
if stdout:
return neighbors
else:
return dfs[1:n + 1]
| [
"jonathan.kroening@ipsos.com"
] | jonathan.kroening@ipsos.com |
d7309c81554e616dd3b96b608dcd9a41dbabebc1 | e5a26ae20d4d44c544f9fc6b052d074114da10b1 | /clean_seq_names.py | e13ad489c6c71c10f759c33e9fe1f6427bca55cd | [] | no_license | rdtarvin/sequence-manipulation | d8608f68a74419d39c3c1ea746362b879ca95c47 | 09630d61776195893d14dd53d9672e86e4bbd3cd | refs/heads/master | 2020-05-17T07:59:43.992857 | 2019-02-06T21:39:33 | 2019-02-06T21:39:33 | 18,824,595 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 14 20:27:07 2014
@author: RDT
"""
'''This script reorganizes the rec.id and rec.description of a fasta file.'''
from Bio import SeqIO
import sys
def clean_seqs(infile,genename,database):
'''Open fasta infile and return iterator of SeqRecords with protein sequences.'''
records = SeqIO.parse(infile, 'fasta')
blasthit=str(infile[:-6])
newrecords=[]
# print filename
for rec in records:
items=(rec.description).split(' ') # turns description into a list
# print items
rec.id = genename+items[1] # adds gene name to sequence ID
newitem=''
# print length
for i in range(3,len(items)):
newitem='%s %s ' %(newitem,items[i]) # concatenates paths
if items[1][0] == '_':
items[1] = items[1][1:]
rec.description="'%s' %s %s %s %s" %(blasthit, database, items[1], items[2], newitem) # rewrites description
# print rec.id
# print rec.description
newrecords.append(rec)
outfile=genename+'_clean.fasta'
with open(outfile,'w') as f:
SeqIO.write(newrecords, f, 'fasta')
if __name__ == '__main__':
infile = sys.argv[1]
gene = sys.argv[2]
database= sys.argv[3]
clean_seqs(infile,gene,database)
| [
"rdtarvin@gmail.com"
] | rdtarvin@gmail.com |
d11d940582b0e8fbde9f48ff33c8715cee9d1f05 | 4ea0cb75847744ed58b188d4277d69370f31a0c7 | /app.py | e1e3f7b4bf38751be3cba21d3d5089c2f469e4cd | [
"MIT"
] | permissive | justinhaef/audit_endpoints | d1c33ada7c8a2e790c3421435fb917a37c4ff083 | 74a77c26c3d7f2a12773961a39220139e3e36537 | refs/heads/master | 2023-04-24T00:37:48.177314 | 2021-05-10T19:21:07 | 2021-05-10T19:21:07 | 364,438,280 | 0 | 0 | MIT | 2021-05-10T16:52:03 | 2021-05-05T02:11:48 | Python | UTF-8 | Python | false | false | 3,891 | py | import json
import argparse
import logging
from datetime import datetime
from tqdm import tqdm
from pathlib import Path
from parser import Parser
from cleaner import Cleaner
# pip install deepdiff
from deepdiff import DeepDiff
logging.basicConfig(
filename=Path('app.log'),
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s"
)
#-----Helper Functions-----#
def get_folders(parent_folder):
""" Grab the Folder names for all endpoint models"""
endpoint_models = list()
endpoint_folders = parent_folder.iterdir()
for endpoints in endpoint_folders:
if endpoints.is_dir():
endpoint_models.append(endpoints.name)
logging.debug(f'Found Folder Names: {endpoint_models}')
return endpoint_models
def get_standard_config(endpoint):
standard_config_folder = Path(f'./Endpoint/{endpoint}/Gold/').iterdir()
for config in standard_config_folder:
if config.is_file():
standardConfig = config
logging.debug(f'Standard config for {endpoint} found {config}')
else:
logging.error(f'No standard config file found for {endpoint}')
return standardConfig
def gather_endpoints(endpoint):
endpoint_files = list()
endpoint_config_folder = Path(f'./Endpoint/{endpoint}/InstallBase/').iterdir()
for endpoint in endpoint_config_folder:
if endpoint.is_file():
endpoint_files.append(endpoint)
logging.debug(f'Found Endpoints: {endpoint.name}')
return endpoint_files
#-----main function------#
def main(endpoint_models: list):
""" Loop over endpoint models, search file directory for endpoints
and compare to a standard endpoint config.
"""
audit_list = list()
for model in tqdm(endpoint_models, desc="Looping over endpoint models..."):
# first get the gold standard config file
standard_config_file = get_standard_config(model)
# now open that standard file
with open(standard_config_file, 'r') as standard_config:
standard_config_json = json.load(standard_config)
audit = Parser(standard_config_json)
# gather endpoint filenames
endpoint_config_files = gather_endpoints(model)
for endpoint in tqdm(endpoint_config_files, desc="Looping over endpoint config files..."):
with open(endpoint, 'r') as endpoint_file:
endpoint_json = json.load(endpoint_file)
config_diff = audit.compare(endpoint_json, endpoint.name)
cleaner = Cleaner(config_diff)
cleaned = cleaner.clean()
audit_list.append({f"{model}": cleaned})
return audit_list
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="""
Cisco Video Endpoint Configuration Audit Application
""",
)
argument_parser.add_argument(
"-l",
"--limit",
help="Run whole application or limit to only functions",
dest="limit",
choices=['DX80', 'all'],
default='all',
required=False,
)
args = argument_parser.parse_args()
endpoint_models = get_folders(Path('./Endpoint'))
if args.limit == 'all':
result = main(endpoint_models)
elif args.limit in endpoint_models:
model = list()
model.append(args.limit)
result = main(model)
else:
print(f'Sorry but {args.limit} not found in given inventory.')
print(f'Please try again using one of these options: {endpoint_models}')
if result:
today = datetime.today()
with open(Path(f'./output/{today.date()}.json'), 'w') as outfile:
json.dump(result, outfile, indent=4)
print(f'Complete')
else:
print(f'There was no result from the audit.') | [
"justin.haefner@gmail.com"
] | justin.haefner@gmail.com |
e2148f0f764a79bfe448ff604bbf47c7b66721f4 | ca50e6b61a5b168607beaf960a585a18b67776cb | /acolyte/testing/core/context.py | f96716410fd05d98f4d764ed72fb6fb0f2742f42 | [] | no_license | chihongze/acolyte | 07fdcba2afc89d5c8c4c2d6c924b678b10a5a54c | 57693989261d36511620b19baed801df7b4238e5 | refs/heads/master | 2020-12-30T20:58:27.580788 | 2016-09-18T03:58:47 | 2016-09-18T03:58:47 | 67,601,167 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from acolyte.core.context import MySQLContext
from acolyte.testing import EasemobFlowTestCase
class MySQLContextTestCase(EasemobFlowTestCase):
def setUp(self):
self._flow_ctx = MySQLContext(
self._("FlowExecutorService"), self._("db"), 100086)
def testCommonOperation(self):
"""测试MySQLContext的各种常规操作
"""
self._flow_ctx["id"] = 100
self._flow_ctx["name"] = "Sam"
self.assertEqual(self._flow_ctx["id"], '100')
self.assertEqual(self._flow_ctx["name"], "Sam")
self.assertEqual(len(self._flow_ctx), 2)
del self._flow_ctx["id"]
self.assertIsNone(self._flow_ctx["id"])
def tearDown(self):
self._flow_ctx.destroy()
| [
"hongze.chi@shicaigj.com"
] | hongze.chi@shicaigj.com |
77d2829d54da095fddd7e59397ee677f5e65d3d6 | 7c26f08f666273f0717382382482d92a83d07fbb | /django/tutorial/crud0412/BT/urls.py | 9e58c1dbbc3f12ff2498d0795c4bc46b4fddac4c | [] | no_license | kimyounghoon93/TIL-c9 | f0d7cd574a63ab87545a197ff3d659979c80ed50 | 07301b51cb757e101d44257d71afa61639147378 | refs/heads/master | 2020-04-17T17:17:56.494719 | 2019-05-02T03:09:59 | 2019-05-02T03:09:59 | 166,777,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from django.urls import path
from . import views
app_name = 'BT'
urlpatterns = [
path('create/', views.create, name='create'),
]
| [
"busanteam_business@naver.com"
] | busanteam_business@naver.com |
7d7e94d1be2aa238c20dd4c88421ec965f026d95 | 11025e06cbb0d4964adebe76e0a59b4f5d2fe892 | /eichhoernchen/views.py | 466797b9fb56e1736b25413eb21cb321e7390bd5 | [] | no_license | cod3monk/eichhoernchen | 02b3f940008691cd60c541b7cc0e2b88a256c3aa | 27f735f411500719acff554017ad1796d8a0379b | refs/heads/master | 2021-01-01T18:23:31.327480 | 2013-05-22T09:58:32 | 2013-05-22T09:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,220 | py | #!/usr/bin/env python
# encoding: utf-8
from eichhoernchen import app, api
import model
from flask import render_template
from flask import Flask, request
from flask.ext import restful
import flask
import json
import random
import querier
def match_ip_or_403(allowed_ips):
def decorator(fnct):
def wrapper(*args, **kwargs):
if request.remote_addr not in allowed_ips:
# requester's IP is not in allowed_ips list, 403 - Access denied
restful.abort(403)
return fnct(*args, **kwargs)
return wrapper
return decorator
@app.route('/')
def index():
return render_template('list.html')
class ObjectSearch(restful.Resource):
def get(self):
# Multiple objects aka. search:
data = []
if 'q' in request.args:
# TODO: parse argument *q* and use as filter
q = querier.UserQuery.parse(request.args['q'])
# makeing use of new (full)text index
# See: http://emptysquare.net/blog/mongodb-full-text-search/
if q['search']:
data = model.db.command('text', model.Object.collection_name,
search=q['search'], filter=q['filter'], language=q['language'], limit=q['limit'])
data = map(lambda x: model.Object.from_pymongo(x['obj']), data['results'])
else:
data = list(model.Object.find(q['filter']).limit(q['limit']))
else:
# By default we return the first five objects
data = list(model.Object.find().limit(5))
# TODO this is a hack: use bson to directly get the dict!
return map(lambda x: x.to_json(), data)
api.add_resource(ObjectSearch, '/db/obj/search')
class MongoResource(restful.Resource):
'''Generic restful.Resource for MongoEngine objects.
Just overwrite *obj* and change allow_change to suite needs.'''
# This needs to be overwritten by extending classes
document = None
def load_or_404(self, id_):
'''Loads Object from database or raises flask 404 exception'''
# Converting string to ObjectId
if isinstance(id_, basestring):
try:
id_ = model.ObjectId(id_)
except model.InvalidId:
# id_ is not a real ObjectId
restful.abort(400)
try:
return self.document(id_)
except ValueError:
# Query returned nothing
restful.abort(404)
def get(self, id_):
# Selection of single object by id_
# TODO this is a hack: use bson to directly get the dict!
return self.load_or_404(id_).to_json()
def put(self, id_):
'''Updates an existing object with *id_*. Or, if *id_* is "new", creates a new object.
Returns a dictionary with field names as keys and error messages as values.
If there were no errors, the JSON representation of the object is returned.'''
# We only respond to valid_ JSON requests that are a dictionary
if not request.json or type(request.json) is not dict:
restful.abort(400)
if id_ == "new":
# Create new Object
o = self.document()
else:
# Update existing Object
o = self.load_or_404(id_)
err_messages = o.from_json(request.json)
if err_messages:
return err_messages
# Check validity
err_messages = o.validate()
if err_messages:
return err_messages
# Do we really want to save changes?
if 'save' in request.args:
if not self.allow_change():
# Change is not allowed -> 403 - Access denied
restful.abort(403)
# save object
err_messages = o.save()
if err_messages:
return err_messages, 400
return o.to_json()
def delete(self, id_):
'''Deletes an object with *id_*.'''
if not self.allow_change():
# Change is not allowed -> 403 - Access denied
restful.abort(403)
# Selecting object from database and delete
self.load_or_404(id_).delete()
def allow_change(self):
'''Is being checked before any changes are made.
The changes are only saved if this retruns True.'''
return request.remote_addr == '127.0.0.1'
class ObjectResource(MongoResource):
document = model.Object
api.add_resource(ObjectResource, '/db/obj/<string:id_>')
class LocationResource(MongoResource):
document = model.Location
api.add_resource(LocationResource, '/db/loc/<string:id_>')
# class CategoryResource(MongoResource):
# document = model.Category
# api.add_resource(CategoryResource, '/db/cat/<string:id_>')
#
# class CompanyResource(MongoResource):
# document = model.Company
# api.add_resource(CompanyResource, '/db/com/<string:id_>')
#
# class AttributeResource(MongoResource):
# document = model.Attribute
# api.add_resource(AttributeResource, '/db/atr/<string:id_>') | [
"codemonk@u-sys.org"
] | codemonk@u-sys.org |
9b23535eb76065e7d3f1fe2b65e825724333a94e | cd0ffaf0160799e014d20569e99542ab5b3cc6c0 | /testPanedWindow.py | 06cb857a919cf9aee773411c6428a7278aacc360 | [] | no_license | Anm-pinellia/PythonStudy | 0ea92bff9d25bff0726029891c2442e853c22629 | 0810442ae291d5fcb770b380425a7ccdbad9d447 | refs/heads/master | 2022-12-03T18:29:10.670170 | 2020-08-18T06:34:23 | 2020-08-18T06:34:23 | 279,058,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from tkinter import *
root = Tk()
m = PanedWindow(showhandle = True, sashrelief=SUNKEN)
m.pack(fill=BOTH, expand=1)
left = Label(m, text='左窗格')
m.add(left)
m2 = PanedWindow(orient=VERTICAL, showhandle = True, sashrelief=SUNKEN)
m.add(m2)
right1 = Label(m2, text='右窗格1')
m2.add(right1)
right2 = Label(m2, text='右窗格2')
m2.add(right2)
mainloop()
| [
"1752066346@qq.com"
] | 1752066346@qq.com |
ba6005b0ace1cfc2e6631857d634ff0646876303 | 0d566dee40a0fa22b4d9afdc345e57a518c52b71 | /credentials.py | 12aebcbaf7ac04437441ecb26e6259c9f368541c | [] | no_license | AstroCB/waitlistBot | 4f16beca6f630c292ffe98d86d0ea3d22917f264 | e137ccb87c0f84c8969c979fc271e6f043087ffd | refs/heads/master | 2021-01-01T20:42:43.006343 | 2017-07-31T19:10:56 | 2017-07-31T19:10:56 | 98,914,726 | 0 | 0 | null | 2017-07-31T17:55:09 | 2017-07-31T17:55:09 | null | UTF-8 | Python | false | false | 150 | py | Account_SID = "Your SID here"
Auth_TOKEN = "Your Auth token here"
myTwilioNumber = "copy paste your number"
myNumber = "use this format +15556665555"
| [
"cambernhardt@me.com"
] | cambernhardt@me.com |
50d6287652848e060f1283ac01817eee828aae5e | 515107fc56b44742d2938f7f702b36f161214a75 | /python/design_patterns/strategy_pattern/duck.py | 6ff6a164e7dab2df39b6ac347c8effd522095d04 | [
"MIT"
] | permissive | lmregus/Portfolio | e621f50ee943b27a6a6b7c18d1090bd7c56da8c8 | 9a751443edbfe5ff2b47cdeacca86761ed03e81f | refs/heads/master | 2023-09-02T01:56:14.928021 | 2019-06-02T23:01:03 | 2019-06-02T23:01:03 | 34,394,101 | 0 | 0 | MIT | 2022-12-27T15:00:08 | 2015-04-22T14:09:12 | Java | UTF-8 | Python | false | false | 830 | py | from abc import ABC
from abc import abstractmethod
from behavior.fly_behavior import FlyBehavior
from behavior.quack_behavior import QuackBehavior
class Duck(ABC):
def __init__(self, fly_behavior, quack_behavior):
self._fly_behavior = fly_behavior
self._quack_behavior = quack_behavior
@abstractmethod
def display(self):
return NotImplemented()
def fly(self):
self._fly_behavior.fly()
def quack(self):
self._quack_behavior.quack()
def swim(self):
print('Swim')
def set_fly_behavior(self, fly_behavior: FlyBehavior):
self._fly_behavior = fly_behavior
def set_quack_behavior(self, quack_behavior: QuackBehavior):
self._quack_behavior = quack_behavior
__slots__ = (
'_fly_behavior',
'_quack_behavior'
)
| [
"lmregus27@gmail.com"
] | lmregus27@gmail.com |
27c47b4018832a5889811707121620980fba947d | 7b8c3ef7ad0e997df37ceaa09a661089280e418c | /libs/selenium/webdriver/common/devtools/v107/runtime.py | 90dc81fb0ce20ecbb547a2298f62dd0cff2f5bf1 | [
"MIT"
] | permissive | rocketbot-cl/webpro | 47053a7c63e9c3526b506f44d970a699bc65d378 | 5927575bf15fc6dd8d40a287ce32de52cd6ba5c8 | refs/heads/master | 2023-08-18T02:47:24.107652 | 2023-08-14T15:07:41 | 2023-08-14T15:07:41 | 218,623,703 | 5 | 4 | null | 2023-08-14T15:07:42 | 2019-10-30T20:51:02 | Python | UTF-8 | Python | false | false | 57,994 | py | # DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: Runtime
from __future__ import annotations
from .util import event_class, T_JSON_DICT
from dataclasses import dataclass
import enum
import typing
class ScriptId(str):
'''
Unique script identifier.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> ScriptId:
return cls(json)
def __repr__(self):
return 'ScriptId({})'.format(super().__repr__())
@dataclass
class WebDriverValue:
'''
Represents the value serialiazed by the WebDriver BiDi specification
https://w3c.github.io/webdriver-bidi.
'''
type_: str
value: typing.Optional[typing.Any] = None
object_id: typing.Optional[str] = None
def to_json(self):
json = dict()
json['type'] = self.type_
if self.value is not None:
json['value'] = self.value
if self.object_id is not None:
json['objectId'] = self.object_id
return json
@classmethod
def from_json(cls, json):
return cls(
type_=str(json['type']),
value=json['value'] if 'value' in json else None,
object_id=str(json['objectId']) if 'objectId' in json else None,
)
class RemoteObjectId(str):
'''
Unique object identifier.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> RemoteObjectId:
return cls(json)
def __repr__(self):
return 'RemoteObjectId({})'.format(super().__repr__())
class UnserializableValue(str):
'''
Primitive value which cannot be JSON-stringified. Includes values ``-0``, ``NaN``, ``Infinity``,
``-Infinity``, and bigint literals.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> UnserializableValue:
return cls(json)
def __repr__(self):
return 'UnserializableValue({})'.format(super().__repr__())
@dataclass
class RemoteObject:
'''
Mirror object referencing original JavaScript object.
'''
#: Object type.
type_: str
#: Object subtype hint. Specified for ``object`` type values only.
#: NOTE: If you change anything here, make sure to also update
#: ``subtype`` in ``ObjectPreview`` and ``PropertyPreview`` below.
subtype: typing.Optional[str] = None
#: Object class (constructor) name. Specified for ``object`` type values only.
class_name: typing.Optional[str] = None
#: Remote object value in case of primitive values or JSON values (if it was requested).
value: typing.Optional[typing.Any] = None
#: Primitive value which can not be JSON-stringified does not have ``value``, but gets this
#: property.
unserializable_value: typing.Optional[UnserializableValue] = None
#: String representation of the object.
description: typing.Optional[str] = None
#: WebDriver BiDi representation of the value.
web_driver_value: typing.Optional[WebDriverValue] = None
#: Unique object identifier (for non-primitive values).
object_id: typing.Optional[RemoteObjectId] = None
#: Preview containing abbreviated property values. Specified for ``object`` type values only.
preview: typing.Optional[ObjectPreview] = None
custom_preview: typing.Optional[CustomPreview] = None
def to_json(self):
json = dict()
json['type'] = self.type_
if self.subtype is not None:
json['subtype'] = self.subtype
if self.class_name is not None:
json['className'] = self.class_name
if self.value is not None:
json['value'] = self.value
if self.unserializable_value is not None:
json['unserializableValue'] = self.unserializable_value.to_json()
if self.description is not None:
json['description'] = self.description
if self.web_driver_value is not None:
json['webDriverValue'] = self.web_driver_value.to_json()
if self.object_id is not None:
json['objectId'] = self.object_id.to_json()
if self.preview is not None:
json['preview'] = self.preview.to_json()
if self.custom_preview is not None:
json['customPreview'] = self.custom_preview.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
type_=str(json['type']),
subtype=str(json['subtype']) if 'subtype' in json else None,
class_name=str(json['className']) if 'className' in json else None,
value=json['value'] if 'value' in json else None,
unserializable_value=UnserializableValue.from_json(json['unserializableValue']) if 'unserializableValue' in json else None,
description=str(json['description']) if 'description' in json else None,
web_driver_value=WebDriverValue.from_json(json['webDriverValue']) if 'webDriverValue' in json else None,
object_id=RemoteObjectId.from_json(json['objectId']) if 'objectId' in json else None,
preview=ObjectPreview.from_json(json['preview']) if 'preview' in json else None,
custom_preview=CustomPreview.from_json(json['customPreview']) if 'customPreview' in json else None,
)
@dataclass
class CustomPreview:
#: The JSON-stringified result of formatter.header(object, config) call.
#: It contains json ML array that represents RemoteObject.
header: str
#: If formatter returns true as a result of formatter.hasBody call then bodyGetterId will
#: contain RemoteObjectId for the function that returns result of formatter.body(object, config) call.
#: The result value is json ML array.
body_getter_id: typing.Optional[RemoteObjectId] = None
def to_json(self):
json = dict()
json['header'] = self.header
if self.body_getter_id is not None:
json['bodyGetterId'] = self.body_getter_id.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
header=str(json['header']),
body_getter_id=RemoteObjectId.from_json(json['bodyGetterId']) if 'bodyGetterId' in json else None,
)
@dataclass
class ObjectPreview:
'''
Object containing abbreviated remote object value.
'''
#: Object type.
type_: str
#: True iff some of the properties or entries of the original object did not fit.
overflow: bool
#: List of the properties.
properties: typing.List[PropertyPreview]
#: Object subtype hint. Specified for ``object`` type values only.
subtype: typing.Optional[str] = None
#: String representation of the object.
description: typing.Optional[str] = None
#: List of the entries. Specified for ``map`` and ``set`` subtype values only.
entries: typing.Optional[typing.List[EntryPreview]] = None
def to_json(self):
json = dict()
json['type'] = self.type_
json['overflow'] = self.overflow
json['properties'] = [i.to_json() for i in self.properties]
if self.subtype is not None:
json['subtype'] = self.subtype
if self.description is not None:
json['description'] = self.description
if self.entries is not None:
json['entries'] = [i.to_json() for i in self.entries]
return json
@classmethod
def from_json(cls, json):
return cls(
type_=str(json['type']),
overflow=bool(json['overflow']),
properties=[PropertyPreview.from_json(i) for i in json['properties']],
subtype=str(json['subtype']) if 'subtype' in json else None,
description=str(json['description']) if 'description' in json else None,
entries=[EntryPreview.from_json(i) for i in json['entries']] if 'entries' in json else None,
)
@dataclass
class PropertyPreview:
#: Property name.
name: str
#: Object type. Accessor means that the property itself is an accessor property.
type_: str
#: User-friendly property value string.
value: typing.Optional[str] = None
#: Nested value preview.
value_preview: typing.Optional[ObjectPreview] = None
#: Object subtype hint. Specified for ``object`` type values only.
subtype: typing.Optional[str] = None
def to_json(self):
json = dict()
json['name'] = self.name
json['type'] = self.type_
if self.value is not None:
json['value'] = self.value
if self.value_preview is not None:
json['valuePreview'] = self.value_preview.to_json()
if self.subtype is not None:
json['subtype'] = self.subtype
return json
@classmethod
def from_json(cls, json):
return cls(
name=str(json['name']),
type_=str(json['type']),
value=str(json['value']) if 'value' in json else None,
value_preview=ObjectPreview.from_json(json['valuePreview']) if 'valuePreview' in json else None,
subtype=str(json['subtype']) if 'subtype' in json else None,
)
@dataclass
class EntryPreview:
#: Preview of the value.
value: ObjectPreview
#: Preview of the key. Specified for map-like collection entries.
key: typing.Optional[ObjectPreview] = None
def to_json(self):
json = dict()
json['value'] = self.value.to_json()
if self.key is not None:
json['key'] = self.key.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
value=ObjectPreview.from_json(json['value']),
key=ObjectPreview.from_json(json['key']) if 'key' in json else None,
)
@dataclass
class PropertyDescriptor:
'''
Object property descriptor.
'''
#: Property name or symbol description.
name: str
#: True if the type of this property descriptor may be changed and if the property may be
#: deleted from the corresponding object.
configurable: bool
#: True if this property shows up during enumeration of the properties on the corresponding
#: object.
enumerable: bool
#: The value associated with the property.
value: typing.Optional[RemoteObject] = None
#: True if the value associated with the property may be changed (data descriptors only).
writable: typing.Optional[bool] = None
#: A function which serves as a getter for the property, or ``undefined`` if there is no getter
#: (accessor descriptors only).
get: typing.Optional[RemoteObject] = None
#: A function which serves as a setter for the property, or ``undefined`` if there is no setter
#: (accessor descriptors only).
set_: typing.Optional[RemoteObject] = None
#: True if the result was thrown during the evaluation.
was_thrown: typing.Optional[bool] = None
#: True if the property is owned for the object.
is_own: typing.Optional[bool] = None
#: Property symbol object, if the property is of the ``symbol`` type.
symbol: typing.Optional[RemoteObject] = None
def to_json(self):
json = dict()
json['name'] = self.name
json['configurable'] = self.configurable
json['enumerable'] = self.enumerable
if self.value is not None:
json['value'] = self.value.to_json()
if self.writable is not None:
json['writable'] = self.writable
if self.get is not None:
json['get'] = self.get.to_json()
if self.set_ is not None:
json['set'] = self.set_.to_json()
if self.was_thrown is not None:
json['wasThrown'] = self.was_thrown
if self.is_own is not None:
json['isOwn'] = self.is_own
if self.symbol is not None:
json['symbol'] = self.symbol.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
name=str(json['name']),
configurable=bool(json['configurable']),
enumerable=bool(json['enumerable']),
value=RemoteObject.from_json(json['value']) if 'value' in json else None,
writable=bool(json['writable']) if 'writable' in json else None,
get=RemoteObject.from_json(json['get']) if 'get' in json else None,
set_=RemoteObject.from_json(json['set']) if 'set' in json else None,
was_thrown=bool(json['wasThrown']) if 'wasThrown' in json else None,
is_own=bool(json['isOwn']) if 'isOwn' in json else None,
symbol=RemoteObject.from_json(json['symbol']) if 'symbol' in json else None,
)
@dataclass
class InternalPropertyDescriptor:
'''
Object internal property descriptor. This property isn't normally visible in JavaScript code.
'''
#: Conventional property name.
name: str
#: The value associated with the property.
value: typing.Optional[RemoteObject] = None
def to_json(self):
json = dict()
json['name'] = self.name
if self.value is not None:
json['value'] = self.value.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
name=str(json['name']),
value=RemoteObject.from_json(json['value']) if 'value' in json else None,
)
@dataclass
class PrivatePropertyDescriptor:
'''
Object private field descriptor.
'''
#: Private property name.
name: str
#: The value associated with the private property.
value: typing.Optional[RemoteObject] = None
#: A function which serves as a getter for the private property,
#: or ``undefined`` if there is no getter (accessor descriptors only).
get: typing.Optional[RemoteObject] = None
#: A function which serves as a setter for the private property,
#: or ``undefined`` if there is no setter (accessor descriptors only).
set_: typing.Optional[RemoteObject] = None
def to_json(self):
json = dict()
json['name'] = self.name
if self.value is not None:
json['value'] = self.value.to_json()
if self.get is not None:
json['get'] = self.get.to_json()
if self.set_ is not None:
json['set'] = self.set_.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
name=str(json['name']),
value=RemoteObject.from_json(json['value']) if 'value' in json else None,
get=RemoteObject.from_json(json['get']) if 'get' in json else None,
set_=RemoteObject.from_json(json['set']) if 'set' in json else None,
)
@dataclass
class CallArgument:
'''
Represents function call argument. Either remote object id ``objectId``, primitive ``value``,
unserializable primitive value or neither of (for undefined) them should be specified.
'''
#: Primitive value or serializable javascript object.
value: typing.Optional[typing.Any] = None
#: Primitive value which can not be JSON-stringified.
unserializable_value: typing.Optional[UnserializableValue] = None
#: Remote object handle.
object_id: typing.Optional[RemoteObjectId] = None
def to_json(self):
json = dict()
if self.value is not None:
json['value'] = self.value
if self.unserializable_value is not None:
json['unserializableValue'] = self.unserializable_value.to_json()
if self.object_id is not None:
json['objectId'] = self.object_id.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
value=json['value'] if 'value' in json else None,
unserializable_value=UnserializableValue.from_json(json['unserializableValue']) if 'unserializableValue' in json else None,
object_id=RemoteObjectId.from_json(json['objectId']) if 'objectId' in json else None,
)
class ExecutionContextId(int):
'''
Id of an execution context.
'''
def to_json(self) -> int:
return self
@classmethod
def from_json(cls, json: int) -> ExecutionContextId:
return cls(json)
def __repr__(self):
return 'ExecutionContextId({})'.format(super().__repr__())
@dataclass
class ExecutionContextDescription:
'''
Description of an isolated world.
'''
#: Unique id of the execution context. It can be used to specify in which execution context
#: script evaluation should be performed.
id_: ExecutionContextId
#: Execution context origin.
origin: str
#: Human readable name describing given context.
name: str
#: A system-unique execution context identifier. Unlike the id, this is unique across
#: multiple processes, so can be reliably used to identify specific context while backend
#: performs a cross-process navigation.
unique_id: str
#: Embedder-specific auxiliary data.
aux_data: typing.Optional[dict] = None
def to_json(self):
json = dict()
json['id'] = self.id_.to_json()
json['origin'] = self.origin
json['name'] = self.name
json['uniqueId'] = self.unique_id
if self.aux_data is not None:
json['auxData'] = self.aux_data
return json
@classmethod
def from_json(cls, json):
return cls(
id_=ExecutionContextId.from_json(json['id']),
origin=str(json['origin']),
name=str(json['name']),
unique_id=str(json['uniqueId']),
aux_data=dict(json['auxData']) if 'auxData' in json else None,
)
@dataclass
class ExceptionDetails:
'''
Detailed information about exception (or error) that was thrown during script compilation or
execution.
'''
#: Exception id.
exception_id: int
#: Exception text, which should be used together with exception object when available.
text: str
#: Line number of the exception location (0-based).
line_number: int
#: Column number of the exception location (0-based).
column_number: int
#: Script ID of the exception location.
script_id: typing.Optional[ScriptId] = None
#: URL of the exception location, to be used when the script was not reported.
url: typing.Optional[str] = None
#: JavaScript stack trace if available.
stack_trace: typing.Optional[StackTrace] = None
#: Exception object if available.
exception: typing.Optional[RemoteObject] = None
#: Identifier of the context where exception happened.
execution_context_id: typing.Optional[ExecutionContextId] = None
#: Dictionary with entries of meta data that the client associated
#: with this exception, such as information about associated network
#: requests, etc.
exception_meta_data: typing.Optional[dict] = None
def to_json(self):
json = dict()
json['exceptionId'] = self.exception_id
json['text'] = self.text
json['lineNumber'] = self.line_number
json['columnNumber'] = self.column_number
if self.script_id is not None:
json['scriptId'] = self.script_id.to_json()
if self.url is not None:
json['url'] = self.url
if self.stack_trace is not None:
json['stackTrace'] = self.stack_trace.to_json()
if self.exception is not None:
json['exception'] = self.exception.to_json()
if self.execution_context_id is not None:
json['executionContextId'] = self.execution_context_id.to_json()
if self.exception_meta_data is not None:
json['exceptionMetaData'] = self.exception_meta_data
return json
@classmethod
def from_json(cls, json):
return cls(
exception_id=int(json['exceptionId']),
text=str(json['text']),
line_number=int(json['lineNumber']),
column_number=int(json['columnNumber']),
script_id=ScriptId.from_json(json['scriptId']) if 'scriptId' in json else None,
url=str(json['url']) if 'url' in json else None,
stack_trace=StackTrace.from_json(json['stackTrace']) if 'stackTrace' in json else None,
exception=RemoteObject.from_json(json['exception']) if 'exception' in json else None,
execution_context_id=ExecutionContextId.from_json(json['executionContextId']) if 'executionContextId' in json else None,
exception_meta_data=dict(json['exceptionMetaData']) if 'exceptionMetaData' in json else None,
)
class Timestamp(float):
'''
Number of milliseconds since epoch.
'''
def to_json(self) -> float:
return self
@classmethod
def from_json(cls, json: float) -> Timestamp:
return cls(json)
def __repr__(self):
return 'Timestamp({})'.format(super().__repr__())
class TimeDelta(float):
'''
Number of milliseconds.
'''
def to_json(self) -> float:
return self
@classmethod
def from_json(cls, json: float) -> TimeDelta:
return cls(json)
def __repr__(self):
return 'TimeDelta({})'.format(super().__repr__())
@dataclass
class CallFrame:
'''
Stack entry for runtime errors and assertions.
'''
#: JavaScript function name.
function_name: str
#: JavaScript script id.
script_id: ScriptId
#: JavaScript script name or url.
url: str
#: JavaScript script line number (0-based).
line_number: int
#: JavaScript script column number (0-based).
column_number: int
def to_json(self):
json = dict()
json['functionName'] = self.function_name
json['scriptId'] = self.script_id.to_json()
json['url'] = self.url
json['lineNumber'] = self.line_number
json['columnNumber'] = self.column_number
return json
@classmethod
def from_json(cls, json):
return cls(
function_name=str(json['functionName']),
script_id=ScriptId.from_json(json['scriptId']),
url=str(json['url']),
line_number=int(json['lineNumber']),
column_number=int(json['columnNumber']),
)
@dataclass
class StackTrace:
'''
Call frames for assertions or error messages.
'''
#: JavaScript function name.
call_frames: typing.List[CallFrame]
#: String label of this stack trace. For async traces this may be a name of the function that
#: initiated the async call.
description: typing.Optional[str] = None
#: Asynchronous JavaScript stack trace that preceded this stack, if available.
parent: typing.Optional[StackTrace] = None
#: Asynchronous JavaScript stack trace that preceded this stack, if available.
parent_id: typing.Optional[StackTraceId] = None
def to_json(self):
json = dict()
json['callFrames'] = [i.to_json() for i in self.call_frames]
if self.description is not None:
json['description'] = self.description
if self.parent is not None:
json['parent'] = self.parent.to_json()
if self.parent_id is not None:
json['parentId'] = self.parent_id.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
call_frames=[CallFrame.from_json(i) for i in json['callFrames']],
description=str(json['description']) if 'description' in json else None,
parent=StackTrace.from_json(json['parent']) if 'parent' in json else None,
parent_id=StackTraceId.from_json(json['parentId']) if 'parentId' in json else None,
)
class UniqueDebuggerId(str):
'''
Unique identifier of current debugger.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> UniqueDebuggerId:
return cls(json)
def __repr__(self):
return 'UniqueDebuggerId({})'.format(super().__repr__())
@dataclass
class StackTraceId:
'''
If ``debuggerId`` is set stack trace comes from another debugger and can be resolved there. This
allows to track cross-debugger calls. See ``Runtime.StackTrace`` and ``Debugger.paused`` for usages.
'''
id_: str
debugger_id: typing.Optional[UniqueDebuggerId] = None
def to_json(self):
json = dict()
json['id'] = self.id_
if self.debugger_id is not None:
json['debuggerId'] = self.debugger_id.to_json()
return json
@classmethod
def from_json(cls, json):
return cls(
id_=str(json['id']),
debugger_id=UniqueDebuggerId.from_json(json['debuggerId']) if 'debuggerId' in json else None,
)
def await_promise(
promise_object_id: RemoteObjectId,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]]:
'''
Add handler to promise with given promise object id.
:param promise_object_id: Identifier of the promise.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value.
:param generate_preview: *(Optional)* Whether preview should be generated for the result.
:returns: A tuple with the following items:
0. **result** - Promise result. Will contain rejected value if promise was rejected.
1. **exceptionDetails** - *(Optional)* Exception details if stack strace is available.
'''
params: T_JSON_DICT = dict()
params['promiseObjectId'] = promise_object_id.to_json()
if return_by_value is not None:
params['returnByValue'] = return_by_value
if generate_preview is not None:
params['generatePreview'] = generate_preview
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.awaitPromise',
'params': params,
}
json = yield cmd_dict
return (
RemoteObject.from_json(json['result']),
ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
)
def call_function_on(
function_declaration: str,
object_id: typing.Optional[RemoteObjectId] = None,
arguments: typing.Optional[typing.List[CallArgument]] = None,
silent: typing.Optional[bool] = None,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
user_gesture: typing.Optional[bool] = None,
await_promise: typing.Optional[bool] = None,
execution_context_id: typing.Optional[ExecutionContextId] = None,
object_group: typing.Optional[str] = None,
throw_on_side_effect: typing.Optional[bool] = None,
generate_web_driver_value: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]]:
'''
Calls function with given declaration on the given object. Object group of the result is
inherited from the target object.
:param function_declaration: Declaration of the function to call.
:param object_id: *(Optional)* Identifier of the object to call function on. Either objectId or executionContextId should be specified.
:param arguments: *(Optional)* Call arguments. All call arguments must belong to the same JavaScript world as the target object.
:param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object which should be sent by value.
:param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result.
:param user_gesture: *(Optional)* Whether execution should be treated as initiated by user in the UI.
:param await_promise: *(Optional)* Whether execution should ````await```` for resulting value and return once awaited promise is resolved.
:param execution_context_id: *(Optional)* Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified.
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object.
:param throw_on_side_effect: **(EXPERIMENTAL)** *(Optional)* Whether to throw an exception if side effect cannot be ruled out during evaluation.
:param generate_web_driver_value: **(EXPERIMENTAL)** *(Optional)* Whether the result should contain ````webDriverValue````, serialized according to https://w3c.github.io/webdriver-bidi. This is mutually exclusive with ````returnByValue````, but resulting ````objectId``` is still provided.
:returns: A tuple with the following items:
0. **result** - Call result.
1. **exceptionDetails** - *(Optional)* Exception details.
'''
params: T_JSON_DICT = dict()
params['functionDeclaration'] = function_declaration
if object_id is not None:
params['objectId'] = object_id.to_json()
if arguments is not None:
params['arguments'] = [i.to_json() for i in arguments]
if silent is not None:
params['silent'] = silent
if return_by_value is not None:
params['returnByValue'] = return_by_value
if generate_preview is not None:
params['generatePreview'] = generate_preview
if user_gesture is not None:
params['userGesture'] = user_gesture
if await_promise is not None:
params['awaitPromise'] = await_promise
if execution_context_id is not None:
params['executionContextId'] = execution_context_id.to_json()
if object_group is not None:
params['objectGroup'] = object_group
if throw_on_side_effect is not None:
params['throwOnSideEffect'] = throw_on_side_effect
if generate_web_driver_value is not None:
params['generateWebDriverValue'] = generate_web_driver_value
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.callFunctionOn',
'params': params,
}
json = yield cmd_dict
return (
RemoteObject.from_json(json['result']),
ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
)
def compile_script(
expression: str,
source_url: str,
persist_script: bool,
execution_context_id: typing.Optional[ExecutionContextId] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.Optional[ScriptId], typing.Optional[ExceptionDetails]]]:
'''
Compiles expression.
:param expression: Expression to compile.
:param source_url: Source url to be set for the script.
:param persist_script: Specifies whether the compiled script should be persisted.
:param execution_context_id: *(Optional)* Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
:returns: A tuple with the following items:
0. **scriptId** - *(Optional)* Id of the script.
1. **exceptionDetails** - *(Optional)* Exception details.
'''
params: T_JSON_DICT = dict()
params['expression'] = expression
params['sourceURL'] = source_url
params['persistScript'] = persist_script
if execution_context_id is not None:
params['executionContextId'] = execution_context_id.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.compileScript',
'params': params,
}
json = yield cmd_dict
return (
ScriptId.from_json(json['scriptId']) if 'scriptId' in json else None,
ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
)
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disables reporting of execution contexts creation.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.disable',
}
json = yield cmd_dict
def discard_console_entries() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Discards collected exceptions and console API calls.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.discardConsoleEntries',
}
json = yield cmd_dict
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enables reporting of execution contexts creation by means of ``executionContextCreated`` event.
When the reporting gets enabled the event will be sent immediately for each existing execution
context.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.enable',
}
json = yield cmd_dict
def evaluate(
expression: str,
object_group: typing.Optional[str] = None,
include_command_line_api: typing.Optional[bool] = None,
silent: typing.Optional[bool] = None,
context_id: typing.Optional[ExecutionContextId] = None,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
user_gesture: typing.Optional[bool] = None,
await_promise: typing.Optional[bool] = None,
throw_on_side_effect: typing.Optional[bool] = None,
timeout: typing.Optional[TimeDelta] = None,
disable_breaks: typing.Optional[bool] = None,
repl_mode: typing.Optional[bool] = None,
allow_unsafe_eval_blocked_by_csp: typing.Optional[bool] = None,
unique_context_id: typing.Optional[str] = None,
generate_web_driver_value: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]]:
'''
Evaluates expression on global object.
:param expression: Expression to evaluate.
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects.
:param include_command_line_api: *(Optional)* Determines whether Command Line API should be available during the evaluation.
:param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state.
:param context_id: *(Optional)* Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page. This is mutually exclusive with ````uniqueContextId````, which offers an alternative way to identify the execution context that is more reliable in a multi-process environment.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value.
:param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result.
:param user_gesture: *(Optional)* Whether execution should be treated as initiated by user in the UI.
:param await_promise: *(Optional)* Whether execution should ````await```` for resulting value and return once awaited promise is resolved.
:param throw_on_side_effect: **(EXPERIMENTAL)** *(Optional)* Whether to throw an exception if side effect cannot be ruled out during evaluation. This implies ````disableBreaks```` below.
:param timeout: **(EXPERIMENTAL)** *(Optional)* Terminate execution after timing out (number of milliseconds).
:param disable_breaks: **(EXPERIMENTAL)** *(Optional)* Disable breakpoints during execution.
:param repl_mode: **(EXPERIMENTAL)** *(Optional)* Setting this flag to true enables ````let```` re-declaration and top-level ````await````. Note that ````let```` variables can only be re-declared if they originate from ````replMode```` themselves.
:param allow_unsafe_eval_blocked_by_csp: **(EXPERIMENTAL)** *(Optional)* The Content Security Policy (CSP) for the target might block 'unsafe-eval' which includes eval(), Function(), setTimeout() and setInterval() when called with non-callable arguments. This flag bypasses CSP for this evaluation and allows unsafe-eval. Defaults to true.
:param unique_context_id: **(EXPERIMENTAL)** *(Optional)* An alternative way to specify the execution context to evaluate in. Compared to contextId that may be reused across processes, this is guaranteed to be system-unique, so it can be used to prevent accidental evaluation of the expression in context different than intended (e.g. as a result of navigation across process boundaries). This is mutually exclusive with ````contextId```.
:param generate_web_driver_value: **(EXPERIMENTAL)** *(Optional)* Whether the result should be serialized according to https://w3c.github.io/webdriver-bidi.
:returns: A tuple with the following items:
0. **result** - Evaluation result.
1. **exceptionDetails** - *(Optional)* Exception details.
'''
params: T_JSON_DICT = dict()
params['expression'] = expression
if object_group is not None:
params['objectGroup'] = object_group
if include_command_line_api is not None:
params['includeCommandLineAPI'] = include_command_line_api
if silent is not None:
params['silent'] = silent
if context_id is not None:
params['contextId'] = context_id.to_json()
if return_by_value is not None:
params['returnByValue'] = return_by_value
if generate_preview is not None:
params['generatePreview'] = generate_preview
if user_gesture is not None:
params['userGesture'] = user_gesture
if await_promise is not None:
params['awaitPromise'] = await_promise
if throw_on_side_effect is not None:
params['throwOnSideEffect'] = throw_on_side_effect
if timeout is not None:
params['timeout'] = timeout.to_json()
if disable_breaks is not None:
params['disableBreaks'] = disable_breaks
if repl_mode is not None:
params['replMode'] = repl_mode
if allow_unsafe_eval_blocked_by_csp is not None:
params['allowUnsafeEvalBlockedByCSP'] = allow_unsafe_eval_blocked_by_csp
if unique_context_id is not None:
params['uniqueContextId'] = unique_context_id
if generate_web_driver_value is not None:
params['generateWebDriverValue'] = generate_web_driver_value
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.evaluate',
'params': params,
}
json = yield cmd_dict
return (
RemoteObject.from_json(json['result']),
ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
)
def get_isolate_id() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,str]:
'''
Returns the isolate id.
**EXPERIMENTAL**
:returns: The isolate id.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.getIsolateId',
}
json = yield cmd_dict
return str(json['id'])
def get_heap_usage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[float, float]]:
'''
Returns the JavaScript heap usage.
It is the total usage of the corresponding isolate not scoped to a particular Runtime.
**EXPERIMENTAL**
:returns: A tuple with the following items:
0. **usedSize** - Used heap size in bytes.
1. **totalSize** - Allocated heap size in bytes.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.getHeapUsage',
}
json = yield cmd_dict
return (
float(json['usedSize']),
float(json['totalSize'])
)
def get_properties(
object_id: RemoteObjectId,
own_properties: typing.Optional[bool] = None,
accessor_properties_only: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
non_indexed_properties_only: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[PropertyDescriptor], typing.Optional[typing.List[InternalPropertyDescriptor]], typing.Optional[typing.List[PrivatePropertyDescriptor]], typing.Optional[ExceptionDetails]]]:
'''
Returns properties of a given object. Object group of the result is inherited from the target
object.
:param object_id: Identifier of the object to return properties for.
:param own_properties: *(Optional)* If true, returns properties belonging only to the element itself, not to its prototype chain.
:param accessor_properties_only: **(EXPERIMENTAL)** *(Optional)* If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.
:param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the results.
:param non_indexed_properties_only: **(EXPERIMENTAL)** *(Optional)* If true, returns non-indexed properties only.
:returns: A tuple with the following items:
0. **result** - Object properties.
1. **internalProperties** - *(Optional)* Internal object properties (only of the element itself).
2. **privateProperties** - *(Optional)* Object private properties.
3. **exceptionDetails** - *(Optional)* Exception details.
'''
params: T_JSON_DICT = dict()
params['objectId'] = object_id.to_json()
if own_properties is not None:
params['ownProperties'] = own_properties
if accessor_properties_only is not None:
params['accessorPropertiesOnly'] = accessor_properties_only
if generate_preview is not None:
params['generatePreview'] = generate_preview
if non_indexed_properties_only is not None:
params['nonIndexedPropertiesOnly'] = non_indexed_properties_only
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.getProperties',
'params': params,
}
json = yield cmd_dict
return (
[PropertyDescriptor.from_json(i) for i in json['result']],
[InternalPropertyDescriptor.from_json(i) for i in json['internalProperties']] if 'internalProperties' in json else None,
[PrivatePropertyDescriptor.from_json(i) for i in json['privateProperties']] if 'privateProperties' in json else None,
ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
)
def global_lexical_scope_names(
execution_context_id: typing.Optional[ExecutionContextId] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[str]]:
'''
Returns all let, const and class variables from global scope.
:param execution_context_id: *(Optional)* Specifies in which execution context to lookup global scope variables.
:returns:
'''
params: T_JSON_DICT = dict()
if execution_context_id is not None:
params['executionContextId'] = execution_context_id.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.globalLexicalScopeNames',
'params': params,
}
json = yield cmd_dict
return [str(i) for i in json['names']]
def query_objects(
prototype_object_id: RemoteObjectId,
object_group: typing.Optional[str] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,RemoteObject]:
'''
:param prototype_object_id: Identifier of the prototype to return objects for.
:param object_group: *(Optional)* Symbolic group name that can be used to release the results.
:returns: Array with objects.
'''
params: T_JSON_DICT = dict()
params['prototypeObjectId'] = prototype_object_id.to_json()
if object_group is not None:
params['objectGroup'] = object_group
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.queryObjects',
'params': params,
}
json = yield cmd_dict
return RemoteObject.from_json(json['objects'])
def release_object(
object_id: RemoteObjectId
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Releases remote object with given id.
:param object_id: Identifier of the object to release.
'''
params: T_JSON_DICT = dict()
params['objectId'] = object_id.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.releaseObject',
'params': params,
}
json = yield cmd_dict
def release_object_group(
object_group: str
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Releases all remote objects that belong to a given group.
:param object_group: Symbolic object group name.
'''
params: T_JSON_DICT = dict()
params['objectGroup'] = object_group
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.releaseObjectGroup',
'params': params,
}
json = yield cmd_dict
def run_if_waiting_for_debugger() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Tells inspected instance to run if it was waiting for debugger to attach.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.runIfWaitingForDebugger',
}
json = yield cmd_dict
def run_script(
script_id: ScriptId,
execution_context_id: typing.Optional[ExecutionContextId] = None,
object_group: typing.Optional[str] = None,
silent: typing.Optional[bool] = None,
include_command_line_api: typing.Optional[bool] = None,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
await_promise: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]]:
'''
Runs script with given id in a given context.
:param script_id: Id of the script to run.
:param execution_context_id: *(Optional)* Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects.
:param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state.
:param include_command_line_api: *(Optional)* Determines whether Command Line API should be available during the evaluation.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object which should be sent by value.
:param generate_preview: *(Optional)* Whether preview should be generated for the result.
:param await_promise: *(Optional)* Whether execution should ````await``` for resulting value and return once awaited promise is resolved.
:returns: A tuple with the following items:
0. **result** - Run result.
1. **exceptionDetails** - *(Optional)* Exception details.
'''
params: T_JSON_DICT = dict()
params['scriptId'] = script_id.to_json()
if execution_context_id is not None:
params['executionContextId'] = execution_context_id.to_json()
if object_group is not None:
params['objectGroup'] = object_group
if silent is not None:
params['silent'] = silent
if include_command_line_api is not None:
params['includeCommandLineAPI'] = include_command_line_api
if return_by_value is not None:
params['returnByValue'] = return_by_value
if generate_preview is not None:
params['generatePreview'] = generate_preview
if await_promise is not None:
params['awaitPromise'] = await_promise
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.runScript',
'params': params,
}
json = yield cmd_dict
return (
RemoteObject.from_json(json['result']),
ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
)
def set_async_call_stack_depth(
max_depth: int
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enables or disables async call stacks tracking.
:param max_depth: Maximum depth of async call stacks. Setting to ```0``` will effectively disable collecting async call stacks (default).
'''
params: T_JSON_DICT = dict()
params['maxDepth'] = max_depth
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.setAsyncCallStackDepth',
'params': params,
}
json = yield cmd_dict
def set_custom_object_formatter_enabled(
enabled: bool
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
**EXPERIMENTAL**
:param enabled:
'''
params: T_JSON_DICT = dict()
params['enabled'] = enabled
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.setCustomObjectFormatterEnabled',
'params': params,
}
json = yield cmd_dict
def set_max_call_stack_size_to_capture(
size: int
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
**EXPERIMENTAL**
:param size:
'''
params: T_JSON_DICT = dict()
params['size'] = size
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.setMaxCallStackSizeToCapture',
'params': params,
}
json = yield cmd_dict
def terminate_execution() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Terminate current or next JavaScript execution.
Will cancel the termination when the outer-most script execution ends.
**EXPERIMENTAL**
'''
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.terminateExecution',
}
json = yield cmd_dict
def add_binding(
name: str,
execution_context_id: typing.Optional[ExecutionContextId] = None,
execution_context_name: typing.Optional[str] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
If executionContextId is empty, adds binding with the given name on the
global objects of all inspected contexts, including those created later,
bindings survive reloads.
Binding function takes exactly one argument, this argument should be string,
in case of any other input, function throws an exception.
Each binding function call produces Runtime.bindingCalled notification.
**EXPERIMENTAL**
:param name:
:param execution_context_id: *(Optional)* If specified, the binding would only be exposed to the specified execution context. If omitted and ```executionContextName```` is not set, the binding is exposed to all execution contexts of the target. This parameter is mutually exclusive with ````executionContextName````. Deprecated in favor of ````executionContextName```` due to an unclear use case and bugs in implementation (crbug.com/1169639). ````executionContextId```` will be removed in the future.
:param execution_context_name: **(EXPERIMENTAL)** *(Optional)* If specified, the binding is exposed to the executionContext with matching name, even for contexts created after the binding is added. See also ````ExecutionContext.name```` and ````worldName```` parameter to ````Page.addScriptToEvaluateOnNewDocument````. This parameter is mutually exclusive with ````executionContextId```.
'''
params: T_JSON_DICT = dict()
params['name'] = name
if execution_context_id is not None:
params['executionContextId'] = execution_context_id.to_json()
if execution_context_name is not None:
params['executionContextName'] = execution_context_name
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.addBinding',
'params': params,
}
json = yield cmd_dict
def remove_binding(
name: str
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
This method does not remove binding function from global object but
unsubscribes current runtime agent from Runtime.bindingCalled notifications.
**EXPERIMENTAL**
:param name:
'''
params: T_JSON_DICT = dict()
params['name'] = name
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.removeBinding',
'params': params,
}
json = yield cmd_dict
def get_exception_details(
error_object_id: RemoteObjectId
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Optional[ExceptionDetails]]:
'''
This method tries to lookup and populate exception details for a
JavaScript Error object.
Note that the stackTrace portion of the resulting exceptionDetails will
only be populated if the Runtime domain was enabled at the time when the
Error was thrown.
**EXPERIMENTAL**
:param error_object_id: The error object for which to resolve the exception details.
:returns:
'''
params: T_JSON_DICT = dict()
params['errorObjectId'] = error_object_id.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'Runtime.getExceptionDetails',
'params': params,
}
json = yield cmd_dict
return ExceptionDetails.from_json(json['exceptionDetails']) if 'exceptionDetails' in json else None
@event_class('Runtime.bindingCalled')
@dataclass
class BindingCalled:
'''
**EXPERIMENTAL**
Notification is issued every time when binding is called.
'''
name: str
payload: str
#: Identifier of the context where the call was made.
execution_context_id: ExecutionContextId
@classmethod
def from_json(cls, json: T_JSON_DICT) -> BindingCalled:
return cls(
name=str(json['name']),
payload=str(json['payload']),
execution_context_id=ExecutionContextId.from_json(json['executionContextId'])
)
@event_class('Runtime.consoleAPICalled')
@dataclass
class ConsoleAPICalled:
'''
Issued when console API was called.
'''
#: Type of the call.
type_: str
#: Call arguments.
args: typing.List[RemoteObject]
#: Identifier of the context where the call was made.
execution_context_id: ExecutionContextId
#: Call timestamp.
timestamp: Timestamp
#: Stack trace captured when the call was made. The async stack chain is automatically reported for
#: the following call types: ``assert``, ``error``, ``trace``, ``warning``. For other types the async call
#: chain can be retrieved using ``Debugger.getStackTrace`` and ``stackTrace.parentId`` field.
stack_trace: typing.Optional[StackTrace]
#: Console context descriptor for calls on non-default console context (not console.*):
#: 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call
#: on named context.
context: typing.Optional[str]
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ConsoleAPICalled:
return cls(
type_=str(json['type']),
args=[RemoteObject.from_json(i) for i in json['args']],
execution_context_id=ExecutionContextId.from_json(json['executionContextId']),
timestamp=Timestamp.from_json(json['timestamp']),
stack_trace=StackTrace.from_json(json['stackTrace']) if 'stackTrace' in json else None,
context=str(json['context']) if 'context' in json else None
)
@event_class('Runtime.exceptionRevoked')
@dataclass
class ExceptionRevoked:
'''
Issued when unhandled exception was revoked.
'''
#: Reason describing why exception was revoked.
reason: str
#: The id of revoked exception, as reported in ``exceptionThrown``.
exception_id: int
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ExceptionRevoked:
return cls(
reason=str(json['reason']),
exception_id=int(json['exceptionId'])
)
@event_class('Runtime.exceptionThrown')
@dataclass
class ExceptionThrown:
'''
Issued when exception was thrown and unhandled.
'''
#: Timestamp of the exception.
timestamp: Timestamp
exception_details: ExceptionDetails
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ExceptionThrown:
return cls(
timestamp=Timestamp.from_json(json['timestamp']),
exception_details=ExceptionDetails.from_json(json['exceptionDetails'])
)
@event_class('Runtime.executionContextCreated')
@dataclass
class ExecutionContextCreated:
'''
Issued when new execution context is created.
'''
#: A newly created execution context.
context: ExecutionContextDescription
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ExecutionContextCreated:
return cls(
context=ExecutionContextDescription.from_json(json['context'])
)
@event_class('Runtime.executionContextDestroyed')
@dataclass
class ExecutionContextDestroyed:
'''
Issued when execution context is destroyed.
'''
#: Id of the destroyed context
execution_context_id: ExecutionContextId
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ExecutionContextDestroyed:
return cls(
execution_context_id=ExecutionContextId.from_json(json['executionContextId'])
)
@event_class('Runtime.executionContextsCleared')
@dataclass
class ExecutionContextsCleared:
'''
Issued when all executionContexts were cleared in browser
'''
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ExecutionContextsCleared:
return cls(
)
@event_class('Runtime.inspectRequested')
@dataclass
class InspectRequested:
'''
Issued when object should be inspected (for example, as a result of inspect() command line API
call).
'''
object_: RemoteObject
hints: dict
#: Identifier of the context where the call was made.
execution_context_id: typing.Optional[ExecutionContextId]
@classmethod
def from_json(cls, json: T_JSON_DICT) -> InspectRequested:
return cls(
object_=RemoteObject.from_json(json['object']),
hints=dict(json['hints']),
execution_context_id=ExecutionContextId.from_json(json['executionContextId']) if 'executionContextId' in json else None
)
| [
"nicolas.garcia@rocketbot.com"
] | nicolas.garcia@rocketbot.com |
a9ac46049f6f7f1229187b3476228f9528f6a9e4 | 53e8762caede13acfdc2071a2b8def57128dd3e4 | /Arshia_phase_2/hyperstar_for_skipgram_cbow_w2v/evaluate1.py | 397428a9e2ac03a2934f0a6d629db6bbb2b12980 | [] | no_license | manikyaswathi/SemEval2018HypernymDiscovery | 8c17cf4d16fa48b2719381752b18386acde6c4ee | 03ee054bf0266fed5337b2a8bba14e8d7fec31aa | refs/heads/master | 2020-03-10T14:52:28.582128 | 2017-12-15T20:40:32 | 2017-12-15T20:40:32 | 129,437,112 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,108 | py | #!/usr/bin/env python
from batch_sim.nn_vec import nn_vec
import argparse
import csv
import glob
import os
import pickle
import re
import sys
import gensim
from collections import defaultdict
import numpy as np
from projlearn import MODELS
from multiprocessing import cpu_count
parser = argparse.ArgumentParser(description='Evaluation.')
parser.add_argument('--w2v', default='w2v.txt', nargs='?', help='Path to the word2vec model.')
parser.add_argument('--test', default='test.npz', nargs='?', help='Path to the test set.')
parser.add_argument('--subsumptions', default='subsumptions-test.txt', nargs='?', help='Path to the test subsumptions.')
parser.add_argument('--non_optimized', action='store_true', help='Disable most similar words calculation optimization.')
parser.add_argument('--threads', nargs='?', type=int, default=cpu_count(), help='Number of threads.')
parser.add_argument('path', nargs='*', help='List of the directories with results.')
args = vars(parser.parse_args())
if not len(sys.argv) > 1:
print('Usage: %s path...' % (sys.argv[0]))
sys.exit(1)
WD = os.path.dirname(os.path.realpath(__file__))
w2v = gensim.models.KeyedVectors.load(os.path.join(WD, args['w2v']))
w2v.init_sims(replace=True)
with np.load(args['test']) as npz:
X_index_test = npz['X_index']
Y_all_test = npz['Y_all']
Z_all_test = npz['Z_all']
X_all_test = Z_all_test[X_index_test[:, 0], :]
subsumptions_test = []
with open(args['subsumptions']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
subsumptions_test.append((row[0], row[1]))
assert len(subsumptions_test) == X_all_test.shape[0]
def extract(clusters, Y_hat_clusters):
cluster_indices = {cluster: 0 for cluster in Y_hat_clusters}
Y_all_hat = []
for cluster in clusters:
Y_hat = Y_hat_clusters[cluster][cluster_indices[cluster]]
cluster_indices[cluster] += 1
Y_all_hat.append(Y_hat)
assert sum(cluster_indices.values()) == len(clusters)
return np.array(Y_all_hat)
def compute_ats(measures):
return [sum(measures[j].values()) / len(subsumptions_test) for j in range(len(measures))]
def compute_auc(ats):
return sum([ats[j] + ats[j + 1] for j in range(0, len(ats) - 1)]) / 2
for path in args['path']:
print('Doing "%s" on "%s" and "%s".' % (path, args['test'], args['subsumptions']))
kmeans = pickle.load(open(os.path.join(path, 'kmeans.pickle'), 'rb'))
print('The number of clusters is %d.' % (kmeans.n_clusters))
clusters_test = kmeans.predict(Y_all_test - X_all_test)
for model in MODELS:
try:
with np.load(os.path.join(path, '%s.test.npz') % model) as npz:
Y_hat_clusters = {int(cluster): npz[cluster] for cluster in npz.files}
except FileNotFoundError:
Y_hat_clusters = {}
if kmeans.n_clusters != len(Y_hat_clusters):
print('Missing the output for the model "%s"!' % model)
continue
Y_all_hat = extract(clusters_test, Y_hat_clusters)
assert len(subsumptions_test) == Y_all_hat.shape[0]
measures = [{} for _ in range(10)]
if not args['non_optimized']:
# normalize Y_all_hat to make dot product equeal to cosine and monotonically decreasing function of euclidean distance
Y_all_hat_norm = Y_all_hat / np.linalg.norm(Y_all_hat,axis=1)[:,np.newaxis]
print('nn_vec...')
similar_indices = nn_vec(Y_all_hat_norm, w2v.syn0norm, topn=10, sort=True, return_sims=False, nthreads=args['threads'], verbose=False)
print('nn_vec results covert...')
similar_words = [[w2v.index2word[ind] for ind in row] for row in similar_indices]
print('done')
file_ptr_ms = open(str(model)+"_test_candidates1",'w')
file_ptr_hypo = open("test_hypo1",'w')
file_ptr_gold = open("test_gold1",'w')
prev_hypo = ''
gold_list = ''
out_ms = ''
count = 0
for i, (hyponym, hypernym) in enumerate(subsumptions_test):
if args['non_optimized']:
Y_hat = Y_all_hat[i].reshape(X_all_test.shape[1],)
actual = [w for w,_ in w2v.most_similar(positive=[Y_hat], topn=10)]
else:
actual = similar_words[i]
if count==0 or prev_hypo == hyponym :
gold_list = gold_list + hypernym + '\t'
for word in actual:
out_ms = out_ms + str(word) + "\t"
prev_hypo = hyponym
count=1
elif prev_hypo != hyponym :
out_ms = out_ms + '\n'
gold_list = gold_list + '\n'
file_ptr_ms.write(out_ms)
file_ptr_hypo.write(prev_hypo + '\n')
file_ptr_gold.write(gold_list)
gold_list = ''
out_ms = ''
prev_hypo = hyponym
gold_list = gold_list + hypernym + '\t'
for word in actual:
out_ms = out_ms + str(word) + "\t"
for j in range(0, len(measures)):
measures[j][(hyponym, hypernym)] = 1. if hypernym in actual[:j + 1] else 0.
if (i + 1) % 100 == 0:
ats = compute_ats(measures)
auc = compute_auc(ats)
ats_string = ', '.join(['A@%d=%.6f' % (j + 1, ats[j]) for j in range(len(ats))])
print('%d examples out of %d done for "%s/%s": %s. AUC=%.6f.' % (
i + 1,
len(subsumptions_test),
path,
model,
ats_string,
auc))
file_ptr_ms.close()
file_ptr_hypo.close()
file_ptr_gold.close()
ats = compute_ats(measures)
auc = compute_auc(ats)
ats_string = ', '.join(['A@%d=%.4f' % (j + 1, ats[j]) for j in range(len(ats))])
print('For "%s/%s": overall %s. AUC=%.6f.' % (
path,
model,
ats_string,
auc))
| [
"noreply-github@umn.edu"
] | noreply-github@umn.edu |
f0650ee2992ba411453b76ca97c75777d3496945 | d20c77dccaeaf9bda4527e15d8d800a5772852ac | /13-exec/04-selenium/v08-douyu.py | a466f0143d9eb36da799e8c1fd07640bb369e07f | [] | no_license | xrr314/python | e940d5694731584f71792cc3bcf57bf360ddda65 | 125afc91863e44868812ad16dbe03e2e4ede4038 | refs/heads/master | 2020-03-31T04:20:16.933090 | 2019-06-03T12:07:52 | 2019-06-03T12:07:52 | 151,901,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | '''
爬取斗鱼直播页内容
'''
from selenium import webdriver
import time
from lxml import etree
driver = webdriver.PhantomJS()
def get_page():
driver.get('https://www.douyu.com/directory/all')
time.sleep(5)
html = driver.page_source
return html
def parse(html):
html = etree.HTML(html)
lis = html.xpath('//ul[@id="live-list-contentbox"]/li')
print(len(lis))
for li in lis:
title = li.xpath('./a/div/div/h3/text()')[0].strip()
tag = li.xpath('./a/div/div/span/text()')[0].strip()
author = li.xpath('./a/div/p/span[@class="dy-name ellipsis fl"]/text()')[0].strip()
print(title,tag,author)
def main():
html = get_page()
parse(html)
if __name__ == '__main__':
main() | [
"xrr940314"
] | xrr940314 |
21a5c03abea2d99977cafd2ece99b4a6c52a04ad | e519c645017782661e00c97a6124d16dc4d7a905 | /inception/inception_rap_model.py | 3fbfbf5c50a7943f4dc39def11c273a0a16c6e09 | [] | no_license | tinhtn1508/GRLModel | b7e68b4d7d91faa715c4ce294717666caec251d9 | 21495dac0df20388def0f7deaebc17b74538524a | refs/heads/master | 2022-01-17T13:33:29.372278 | 2019-05-21T12:42:09 | 2019-05-21T12:42:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,440 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build the Inception v3 network on ImageNet data set.
The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567
Summary of available functions:
inference: Compute inference on the model inputs to make a prediction
loss: Compute the loss of the prediction with respect to the labels
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from inception.slim import slim
from inception.slim import ops
FLAGS = tf.app.flags.FLAGS
# If a model is trained using multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
# Batch normalization. Constant governing the exponential moving average of
# the 'global' mean and variance for all activations.
BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997
# The decay to use for the moving average.
MOVING_AVERAGE_DECAY = 0.9999
def inference(images, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def inference_bn(images, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3_bn(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def inference_roi_head(images, rois, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3_roi_head(
images,
rois,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def inference_lstm(images, rois, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3_lstm(
images,
rois,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def inference_roi_lstm(images, rois, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3_roi_lstm(
images,
rois,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def inference_roi_lstm_bn(images, rois, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3_roi_lstm_bn(
images,
rois,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def inference_roi_lstm_loc2glo(images, rois, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3_roi_lstm_loc2glo(
images,
rois,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
# logits = tf.Print(logits, [ops.flatten(logits)], 'logits= ', summarize=30)
# auxiliary_logits = tf.Print(auxiliary_logits, [ops.flatten(auxiliary_logits)], 'logits= ', summarize=30)
return logits, auxiliary_logits
def loss(logits, labels, batch_size=None):
"""Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
batch_size: integer
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Reshape the labels into a dense Tensor of
# shape [FLAGS.batch_size, num_classes].
# sparse_labels = tf.reshape(labels, [batch_size, 30])
# indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
# concated = tf.concat(axis=1, values=[indices, sparse_labels])
num_classes = logits[0].get_shape()[-1].value
# dense_labels = tf.sparse_to_dense(concated,
# [batch_size, num_classes],
# 1.0, 0.0)
# print (num_classes)
# print(logits[0])
# label_weight = [0.3049, 0.0102, 0.4294, 0.5604, 0.0043, 0.1884, 0.9451, 0.017, 0.073, 0.0105, 0.2213, 0.1238, 0.0582, 0.2289, 0.1715, 0.3151, 0.0226, 0.0309, 0.0176, 0.5624, 0.065, 0.065, 0.065, 0.267, 0.1326, 0.3024, 0.2718, 0.1236, 0.0124, 0.1572]
label_weight=[0.3049, 0.0102, 0.4294, 0.5604, 0.1453, 0.7437, 0.0963, 0.9466, 0.0478, 0.0043, 0.1884, 0.9451, 0.017, 0.073, 0.0105, 0.2213, 0.1238, 0.0582, 0.2289, 0.1715, 0.3151, 0.0226, 0.0309, 0.0176, 0.5624, 0.065, 0.065, 0.065, 0.267, 0.1326, 0.3024, 0.2718, 0.1236, 0.0124, 0.1572, 0.0179, 0.0638, 0.028, 0.0421, 0.0272, 0.0124, 0.0238, 0.293, 0.0338, 0.0329, 0.0916, 0.0246, 0.0101, 0.017, 0.0229, 0.1278]
# Cross entropy loss for the main softmax prediction.
slim.losses.weighted_sigmoid_cross_entropy_loss(logits[0],
labels,
label_weight,
label_smoothing=0.1,
weight=1.0)
# Cross entropy loss for the auxiliary softmax head.
slim.losses.weighted_sigmoid_cross_entropy_loss(logits[1],
labels,
label_weight,
label_smoothing=0.1,
weight=0.4,
scope='aux_loss')
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summaries(endpoints):
with tf.name_scope('summaries'):
for act in endpoints.values():
_activation_summary(act)
| [
"slf12thuss@163.com"
] | slf12thuss@163.com |
a7e3300d975a841171e8c857c965142b30239106 | 23631af0987b3f1d30b0bf8bfcea1bd63159eeba | /gate_api/api/__init__.py | 56f948e75c0f565a6647dec42431e9458c3446f2 | [] | no_license | xuvw/gateapi-python | 08c3c72ff0e2c4713bf3a2ffe0b15d05e57491ca | 1a3f3551cba4a756f76f17b070c3e0c5ff2e88ea | refs/heads/master | 2020-05-25T14:33:35.592775 | 2019-04-02T08:50:25 | 2019-04-02T08:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from gate_api.api.futures_api import FuturesApi
from gate_api.api.margin_api import MarginApi
from gate_api.api.spot_api import SpotApi
| [
"revilwang@gmail.com"
] | revilwang@gmail.com |
50fb214882899ea973df69630262b57e20b57534 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D94B/CONQVAD94BUN.py | 8785753a062814ca0ea352440adb08d86535cd20 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,437 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD94BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 2},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 25},
{ID: 'FII', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'DOC', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'BII', MIN: 0, MAX: 100000, LEVEL: [
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 1, MAX: 6},
{ID: 'PRI', MIN: 0, MAX: 1},
{ID: 'LIN', MIN: 1, MAX: 100, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'GIS', MIN: 0, MAX: 5},
]},
]},
{ID: 'TAX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 5},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
d7cbffddf5802e40d531f1b6a26d5a2c7299f3be | 4c1bb75e7267311238fa40761db326bba2a24168 | /application2/testing/test_mock.py | 221ddfd0be3c8b7269ab1786ae7d2e63aa95bf6a | [] | no_license | mobamba1/DockerTask | b6fa45f61b90ddee78b41af055d83f1f90be873f | 9b975f5a93096d3569ef93c08d963f782c08b1aa | refs/heads/master | 2022-12-01T17:49:43.329507 | 2020-08-19T15:18:29 | 2020-08-19T15:18:29 | 288,765,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | from unittest.mock import patch
from flask import url_for
from flask_testing import TestCase
from application2.app import animals, animalsnoises
from application2 import app
class TestBase(TestCase):
def create_app(self):
return app
class TestResponse(TestBase):
def test_football(self):
with patch('requests.get') as g:
g.return_value.text = "Dog"
response = self.client.get(url_for('animalsnoises'))
self.assertIn(b'Dog Woof', response.data)
| [
"kenneth1521412@gmail.com"
] | kenneth1521412@gmail.com |
9f312a1302c6fccc481de9229fb5f60c5f35095e | 439a1a4a95ea2a915c20b12aa49d083d28be5e72 | /miscellaneous_scripts/change_fasta_headers.py | 055ffd86c7db8428591a0b8ae36efcd934725214 | [] | no_license | davidgllund/ARG_analysis_scripts | 611b4b1efa8976a4b5ef37b7b8d67a1c4dbacb63 | 7f1388ab8fa48951534813c850ae62222deebd5a | refs/heads/master | 2023-05-26T23:36:37.365107 | 2021-06-01T12:48:36 | 2021-06-01T12:48:36 | 337,497,994 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | #!/usr/bin/env python
#--------------------------------------------------------------------------------------------------------------------
# Script that, given a list of fasta-file headers, a fasta-file, and the name of the output file produced,
# changes the headers of the original fasta to the new headers provided, and outputs the results as a new fasta-file.
#--------------------------------------------------------------------------------------------------------------------
# Import required packages
import sys
from Bio import SeqIO
# Import list of fasta headers
file1 = open(sys.argv[1], 'r')
new_ids = file1.readlines()
file1.close()
# Define "i" as a way to count iterations
i = 0
# Use SeqIO to overwrite previous fasta headers with new headers
with open(sys.argv[2]) as original_fasta, open(sys.argv[3], 'w') as corrected_fasta:
records = SeqIO.parse(original_fasta, 'fasta')
for record in records:
record.id = new_ids[i]
record.description = ""
i += 1
SeqIO.write(record, corrected_fasta, 'fasta')
| [
"dlund@athena.math.chalmers.se"
] | dlund@athena.math.chalmers.se |
70bc0c7fd60448ce2c089fc9d82d3e94cd8b2e1a | b7d6b87961fc0b641f8d0239a0f56a3c8027f0d6 | /main.py | a716ab90fcf1daeff352423e92716a580e51b3dc | [] | no_license | dcolthar/napalm_tools | ab8e257911710ceacfaee90d1325091dad378ffc | a4b0eb493da173c6da3c9640c31ed723517b4183 | refs/heads/master | 2023-03-02T18:17:04.546009 | 2021-02-11T17:28:04 | 2021-02-11T17:28:04 | 285,040,446 | 1 | 0 | null | 2020-08-04T20:21:17 | 2020-08-04T16:42:07 | Python | UTF-8 | Python | false | false | 12,857 | py | import napalm_common_operations
from getpass import getpass
import threading
from queue import Queue
import pandas as pd
import argparse
import os
class Main():
def __init__(self):
args = argparse.ArgumentParser()
args.add_argument('--get_config', help='will copy the full config to the config_output folder',
action='store_true')
### legacy tacacs conversion works but want to add in something to prompt prior to changes for each unit and
### to show the changes being made ahead of the actual change
args.add_argument('--tacacs_legacy_upgrade', help='will convert any legacy tacacs config to new format',
action='store_true')
### Work in progress as well to check ospf neighbor health on links
args.add_argument('--check_ospf_link_health', help='any interface with ospf neighbor, check health',
action='store_true')
args.add_argument('--interface_uptime_check',
help='create a report of interfaces and last time passed traffic on the links ',
action='store_true')
args.add_argument('--port_mapping', help='build out logical info for a port mapping and write to excel',
action='store_true')
args.add_argument('--mac_address_table', help='dump mac address table to a file for the switch',
action='store_true')
args.add_argument('--validate_mac_address_table', help='validate mac addresses in the right vlan',
action='store_true')
args.add_argument('--input_mac_address_table_file', help='name of the mac table file to read from',
default='mac_address_table.xlsx')
args.add_argument('--file_name', help='name of the file to pull hosts from default is host_list.xlsx',
default='host_list.xlsx')
args.add_argument('--port_mapping_output_file',
help='name of output file to write port mapping to, default is port_mapping_output.xlsx',
default='port_mapping_output.xlsx')
args.add_argument('--image_filename', help='port mapping report image file, default is generic_company_logo.jpg',
default='images/generic_company_logo.jpg')
args.add_argument('--thread_max', help='number of worker threads to concurrently run default is 5', default=5),
args.add_argument('--username', help='the username to use to connect to devices', default='admin')
# parse all the arguments
arguments = args.parse_args()
# convert to a dictionary
self.args_dict = vars(arguments)
# if we do interface uptime stuff we use this
self.total_interfaces_info = []
# this is used if doing a port mapping
self.port_mapping_info = []
# this is used if doing a mac address table
self.mac_address_table_info = {}
# if we're counting interface stats or doing a port mapping lets remove the old files first
try:
if self.args_dict['interface_uptime_check']:
os.remove('input_output_interfaces.xlsx')
elif self.args_dict['port_mapping']:
os.remove('port_mapping.xlsx')
except:
pass
# kick off the threading
self.do_thread()
# at this point when control has returned here we want to make sure all work in queues is complete
self.work_queue.join()
# if we have info in the total_interfaces_info we need to write the data thread safe
if len(self.total_interfaces_info) > 0:
for interface_summary in self.total_interfaces_info:
napalm_common_operations.excel_workbook_creation(interface_summary)
# if we did a port mapping run this
if len(self.port_mapping_info) > 0:
napalm_common_operations.port_mapping_excel_creation(
ports_summary=self.port_mapping_info,
output_file=self.args_dict['port_mapping_output_file'],
image_file=self.args_dict['image_filename']
)
# if we did a mac address table dump do this
if len(self.mac_address_table_info) > 0:
napalm_common_operations.write_mac_addresses_to_excel(
mac_address_table_info=self.mac_address_table_info
)
def do_thread(self):
# we need the password to use to connect
password = getpass('Enter the password to use to connect to hosts:')
# we'll store all our hosts in a queue eventually
self.work_queue = Queue(maxsize=0)
# open file to read in host list
hosts = pd.read_excel(self.args_dict['file_name'])
# iterate through and add all hosts to the queue
for index, value in hosts.iterrows():
if value['Include'] == True:
self.work_queue.put(
{
'site_name': value['Site Name'],
'host': value['IP Address'],
'device_role': value['Device Role'],
'device_type': value['Device Type'],
'device_hostname': value['Device Hostname'],
'closet': value['Closet Name'],
'site_name': value['Site Name']
}
)
# now we kick off our threads
for i in range(int(self.args_dict['thread_max'])):
worker_thread = threading.Thread(
target=self.do_work,
name=f'worker_thread_number_{i}',
kwargs={
'password': password
}
)
# daemonize the thread
worker_thread.setDaemon(True)
# start the thread
worker_thread.start()
def do_work(self, password):
# while our queue isn't empty
while not self.work_queue.empty():
try:
# lets get our host info
host_info = self.work_queue.get()
print(f'beginning work on host at ip {host_info["host"]}')
# lets try to connect
network_connection = napalm_common_operations.connect_to_network_device(
host=host_info['host'],
username=self.args_dict['username'],
password=password,
device_type=host_info['device_type']
)
# if the connection failed...False was returned and we just skip this
if network_connection:
facts = network_connection.get_facts()
host_info['device_hostname'] = facts['hostname']
# now do work depending on what arguments were passed
# should we write the full config to a file
if self.args_dict['get_config']:
napalm_common_operations.get_full_config(network_connection=network_connection,
hostname=host_info['device_hostname'])
# should we check link health of any interface with ospf neighbors?
if self.args_dict['check_ospf_link_health']:
pass
# napalm_common_operations.check_ospf_link_health(network_connection=network_connection)
# should we check interface uptime info
if self.args_dict['interface_uptime_check']:
results = napalm_common_operations.interface_uptime_check(network_connection=network_connection,
device_type=host_info['device_type'])
# append the results to the list, we'll go through this later
self.total_interfaces_info.append(results)
# if doing a port mapping
if self.args_dict['port_mapping']:
results = napalm_common_operations.port_mapping(network_connection=network_connection)
# we want to add the switches to the proper closet info sub-list
# if the sub-list doesn't exist though we need to add it
try:
# we get the index number of the closet name in the outer list so we can append to it
closet_index = [
i for i, d in enumerate(self.port_mapping_info) if host_info['closet'] in d.keys()
]
# now append the info to the sub-list
self.port_mapping_info[closet_index[0]][host_info['closet']].append(results)
except Exception as e:
self.port_mapping_info.append({host_info['closet']: [results]})
#self.port_mapping_info.append(results)
# if a mac address table map dump is desired
if self.args_dict['mac_address_table']:
site = host_info['site_name']
closet = host_info['closet']
# mac table for the switch
results = napalm_common_operations.get_mac_address_table(network_connection=network_connection)
# if the site doesn't exist in the global output...add it
if not site in self.mac_address_table_info:
self.mac_address_table_info[site] = {}
# if the closet isn't in the list on the site...add it
if not closet in self.mac_address_table_info[site]:
self.mac_address_table_info[site][closet] = []
# add the device results to the site and closet
self.mac_address_table_info[site][closet].append(
results
)
# if we want to validate mac addresses in a file against a new switch
if self.args_dict['validate_mac_address_table']:
mac_input_file = self.args_dict['input_mac_address_table_file']
# pass the network_connection object and the input mac address table file
napalm_common_operations.valiate_mac_table(
mac_input_file=mac_input_file,
network_connection=network_connection
)
# should we upgrade legacy tacacs config if it exists?
if self.args_dict['tacacs_legacy_upgrade']:
pass
# # napalm relies on scp server to be enabled for config changes, if not enabled...we need to do so
# # use netmiko since config changes in napalm rely on scp server to upload the candidate config
# if not napalm_common_operations.check_scp_server(network_connection=network_connection):
# print(f'enabling scp server on host {host_info["host"]}')
# # pass info to netmiko to enable scp server
# napalm_common_operations.enable_scp_server(
# host=host_info['host'],
# username=self.args_dict['username'],
# password=password,
# device_type=host_info['device_type']
# )
# napalm_common_operations.convert_tacacs_legacy_to_new_format(
# network_connection=network_connection, host=host_info["host"])
else:
print(f'completing work on host {host_info["device_hostname"]} at ip {host_info["host"]} due to error')
# disconnect from the network device
napalm_common_operations.disconnect_from_network_device(network_connection=network_connection)
except Exception as e:
print(f'an Exception occurred while connecting\n{e}')
raise(e)
finally:
# signal queue entry work is done
self.work_queue.task_done()
if __name__ == '__main__':
main = Main() | [
"dcolthar090406@gmail.com"
] | dcolthar090406@gmail.com |
2acecc2df69505f7e1212e4a55334a54cc1983bf | 24ff9c836944e2d1592a36a7962b36978d2fb16d | /gkn/models.py | 1cab2c99373066bffd0b8f74203b3ad80e0d7a1d | [] | no_license | VitalySvyatyuk/gkn | d2d67d4ba02849a64a5d45b4affd2a8ab5004d57 | f184de1578e8d0f25bc0c27530d7e013376fa898 | refs/heads/master | 2021-04-15T13:48:52.317866 | 2016-08-19T09:23:26 | 2016-08-19T09:23:26 | 65,666,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.db import models
class Order(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=30)
total = models.FloatField()
email = models.CharField(max_length=30) | [
"goodgame1945@gmail.com"
] | goodgame1945@gmail.com |
76fc4694ede4b1df5bb7c95f6c8fc6bbfd897ea6 | 053c1ae06f1f5cbbdbc72cce94d4af2e1e1391e3 | /study_sklearn/groupKFold_test.py | 22fad60a299118b456099b6580e069665a64146c | [] | no_license | QilongPan/machine_learning | 2995ea34d759ad8e710b397ae1dead4962a8013e | 0b3566010ce8b5a229592231accc3921f55b0ed4 | refs/heads/master | 2020-04-07T04:26:13.982019 | 2019-08-05T09:21:40 | 2019-08-05T09:21:40 | 158,055,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # -*- coding: utf-8 -*-
# @Date : 2019-05-17 14:23:30
# @Author : QilongPan
# @Email : 3102377627@qq.com
# 相同的组不会出现在两个不同的折叠中(不同组的数量必须至少等于折叠的数量)
# group定义了每天数据的分组id
from sklearn.model_selection import GroupKFold
import numpy as np
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 2, 3, 4])
groups = np.array([0,0, 2, 2])
group_kfold = GroupKFold(n_splits=2)
for train_index,test_index in group_kfold.split(X, y, groups):
print(train_index)
print(test_index)
| [
"3102377627@qq.com"
] | 3102377627@qq.com |
5bfee7606764826ff036404a7b07620623e24a96 | 88745dafec989d39726ca2e4d7f6cfb20bb60f5d | /tests/unit_tests/modules/s3/s3gis/BingLayer.py | c25ecc6c14edf56ab2f86ebbe914dc43f8fc5a3b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sungkomp/SAMBRO | f1ced7c9d198ccfe30aaa1bf883c2f8a7478fffb | 4618d785d03424d122206d88d9ebfb6971486e2c | refs/heads/master | 2020-05-30T08:41:26.855362 | 2019-10-15T02:48:47 | 2019-10-15T02:48:47 | 69,448,194 | 1 | 0 | NOASSERTION | 2019-10-15T04:25:13 | 2016-09-28T09:31:35 | Python | UTF-8 | Python | false | false | 784 | py |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
def test_BingLayer():
s3gis_tests.layer_test(
db,
db.gis_layer_bing,
dict(
name = "Test Bing Layer",
description = "Test Bing layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
aerial_enabled = True,
road_enabled = True,
hybrid_enabled = True,
apikey = "FAKEAPIKEY",
),
"S3.gis.Bing",
{
"Aerial": u"Bing Satellite",
"ApiKey": u"FAKEAPIKEY",
"Hybrid": u"Bing Hybrid",
"Road": u"Bing Roads",
},
session = session,
request = request,
)
| [
"fran@aidiq.com"
] | fran@aidiq.com |
d5e5e491086979335728a5ce09637227e79fbd84 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-rabbitmq/huaweicloudsdkrabbitmq/v2/model/show_background_task_request.py | 15f666a3a90534bc5325dbb4cc52bae5849ca114 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,478 | py | # coding: utf-8
import pprint
import re
import six
class ShowBackgroundTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'task_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'task_id': 'task_id'
}
def __init__(self, instance_id=None, task_id=None):
"""ShowBackgroundTaskRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._task_id = None
self.discriminator = None
self.instance_id = instance_id
self.task_id = task_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowBackgroundTaskRequest.
实例ID。
:return: The instance_id of this ShowBackgroundTaskRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowBackgroundTaskRequest.
实例ID。
:param instance_id: The instance_id of this ShowBackgroundTaskRequest.
:type: str
"""
self._instance_id = instance_id
@property
def task_id(self):
"""Gets the task_id of this ShowBackgroundTaskRequest.
任务ID。
:return: The task_id of this ShowBackgroundTaskRequest.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this ShowBackgroundTaskRequest.
任务ID。
:param task_id: The task_id of this ShowBackgroundTaskRequest.
:type: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBackgroundTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
665b261c26c914af9be8d0cc6ca2991861d06d4a | 1d164438ac1ba7c88aeabb7c9ea39b58680ba79c | /django_postgres_matviews/management/commands/drop_matviews.py | da51ebd3f07c9398a3e198e8445d967b5dc87d2b | [
"Unlicense"
] | permissive | andrewp-as-is/django-postgres-matviews.py | fac3288f199f013a0421ae23f634ea7082020181 | ff7d76f885318e208b81be7f5dcaa71ff7fc4fb3 | refs/heads/master | 2023-01-20T18:38:58.232754 | 2020-12-03T20:46:40 | 2020-12-03T20:46:40 | 285,872,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django.core.management.base import BaseCommand
from django.db import connection
from django_postgres_matviews.utils import drop_matviews
class Command(BaseCommand):
def handle(self, *args, **options):
drop_matviews()
| [
"russianidiot.github@gmail.com"
] | russianidiot.github@gmail.com |
a1c37246801a248634486d255bc85009de21e17f | b42e2016972825d28b2abcfe001d879bfd56b453 | /sitka_highs.py | c3278286424cdd79abf125b152a39a24a2d95260 | [] | no_license | pk-tec/DataVisualization | 28b87cfcbeb419621df3f4915a6be0f944406bd9 | 79d46a95143e35f5f8eb2152821e5997afbe8646 | refs/heads/main | 2023-03-28T00:50:51.931945 | 2021-03-30T01:08:45 | 2021-03-30T01:08:45 | 352,829,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | import csv
from datetime import datetime
import matplotlib.pyplot as plt
filename = 'data/death_valley_2018_simple.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
# Get dates, and high and low temperature from this file.
dates, highs, lows = [], [], []
for row in reader:
current_date = datetime.strptime(row[2], '%Y-%m-%d')
try:
high = int(row[4])
low = int(row[5])
except ValueError:
print(f"Missing data for {current_date}")
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
# Plot the high and low temperature
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(dates, highs, c='red', alpha=0.5)
ax.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot.
title = "Daily high and low temperature - 2018\nDeath Valley, CA"
plt.title(title, fontsize=20)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show() | [
"Pritam8987@gmail.com"
] | Pritam8987@gmail.com |
f41f0f5811f7145e058831e8ebfffdbe5fba97f0 | 37951ec6d5ffa26c26023a832c7d4ccb304d476f | /docs/source/conf.py | 1873daa9ef8998384a81e93f6813747710767133 | [] | no_license | abg/misc | f380699ce7f402c8d12bf0137f1d2213d018049b | db4544d7ab60d0934fbf1c60fa95059885676144 | refs/heads/master | 2020-12-24T17:17:35.882153 | 2011-01-22T08:26:17 | 2011-01-22T08:26:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,100 | py | # -*- coding: utf-8 -*-
#
# config4py documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 19 10:06:36 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'config4py'
copyright = u'2011, Andrew Garner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0a1'
# The full version, including alpha/beta/rc tags.
release = '1.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'config4pydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'config4py.tex', u'config4py Documentation',
u'Andrew Garner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'config4py', u'config4py Documentation',
[u'Andrew Garner'], 1)
]
| [
"muzazzi@gmail.com"
] | muzazzi@gmail.com |
b548d643ad30ee98db0062765d29564780e59505 | c9fd02550474644c4e35ffdfd81243f0066bcfbf | /Django/Random_Word/main/apps/random_word/views.py | 98cff6c9a6a3f17d6fcd642393dc2bceafcb1f12 | [] | no_license | Evermark/Python_Assignments | 7f5dfd82ddbd4171b821dabc1af82d6471cd79da | a93f7308b6ffd827350f9e3467df04026141074c | refs/heads/master | 2021-04-30T05:52:33.022302 | 2018-03-26T05:27:59 | 2018-03-26T05:27:59 | 116,930,734 | 0 | 1 | null | 2018-01-20T21:19:35 | 2018-01-10T08:34:33 | Python | UTF-8 | Python | false | false | 530 | py | from django.shortcuts import render, HttpResponse, redirect
from django.utils.crypto import get_random_string
def index(request):
if 'attempt' not in request.session:
request.session['attempt'] = 1
else:
request.session['attempt'] += 1
random = {'word' : get_random_string(length=12)}
return render(request, 'random_word/index.html', random)
def random_word(request):
return redirect('/random_word')
def reset(request):
request.session['attempt'] = 0
return redirect('/random_word')
| [
"mmathisen@gmail.com"
] | mmathisen@gmail.com |
81753007576d20c13b025493673bf3b5a2da906c | 9cca8e13ea7ff9142ea8b1ebb46ed5a3f7dc118f | /exam1.py | 5d62ffddfab2665bbd50ea9017395816d9de491c | [
"MIT"
] | permissive | elijahanderson/Python-Practice | 594924b6760413837aed405b4db51f97171a26ae | 881f3dad9d6050d4f3c268172187e6a17ed39426 | refs/heads/master | 2021-01-21T21:57:20.594099 | 2018-02-06T16:11:40 | 2018-02-06T16:11:40 | 95,133,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | # exam.py - A program to test your programming for exam #1.
# This program is basically a set of functions that do various
# separate things, not one big program.
# The comments with TODO in them below are problems you need to
# finish.
# You might want to go to the bottom to look at the program code
# to see what the program expects the functions to do.
# isEven( n )
# Returns True if n is even, and False if n is odd.
def isEven (n) :
if n % 2 == 0 :
return True
return False
# sumUpEvens( n )
# Returns the sum of all even numbers between 1 and n inclusive.
def sumUpEvens(n) :
sum = 0
for i in range(1,n+1) :
if isEven(i) :
sum += i
return sum
# makeList( s, n )
# Returns a list consisting of n copies of s, where s is converted
# to all capital letters.
def makeList(s, n) :
s = s.upper()
theList = [s]
theList *= n
return theList
# fillDictionary( dict )
# Prompts the user to enter a name of a person, then the name of
# one of that person's pets. Then, add that pair to the dictionary
# (the name is the key and the pet name is the value). Repeat so
# that five name-pet pairs are added to the dictionary.
def fillDictionary(dict) :
for i in range(5) :
person = input('Enter the name of a person: ')
pet = input('Enter the name of that person\'s pet (note: a person cannot have more than one pet): ')
dict.setdefault(person, pet)
# Beginning of main program
print( 'Enter a positive integer:' )
number = int( input( ) )
if number < 0 :
print('That number is negative. The absolute value of it will be entered instead.')
number *= -1
if ( isEven( number ) ) :
print( str( number ) + ' is an even number.' )
else :
print( str( number ) + ' is an odd number.' )
print( 'The sum of all even numbers from 1 to ' + str( number ) + ':' )
print( sumUpEvens( number ) )
print( 'Please enter your name:' )
name = input( )
nameList = makeList( name, number )
print( 'Here is your name in all capitals a number of times:' )
print( nameList )
namesAndPets = { }
fillDictionary( namesAndPets )
print( 'Here is the information about names and pets.' )
print( namesAndPets )
| [
"elijahanderson_2019@depauw.edu"
] | elijahanderson_2019@depauw.edu |
127e521fc174bcb018737f195d7d9d13e672b726 | 9b4fe9c2693abc6ecc614088665cbf855971deaf | /78.subsets.py | e02e0a2c09554ecf70645818837f819efcf53e44 | [
"MIT"
] | permissive | windard/leeeeee | e795be2b9dcabfc9f32fe25794878e591a6fb2c8 | 0dd67edca4e0b0323cb5a7239f02ea46383cd15a | refs/heads/master | 2022-08-12T19:51:26.748317 | 2022-08-07T16:01:30 | 2022-08-07T16:01:30 | 222,122,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | # coding=utf-8
#
# @lc app=leetcode id=78 lang=python
#
# [78] Subsets
#
# https://leetcode.com/problems/subsets/description/
#
# algorithms
# Medium (51.03%)
# Likes: 2192
# Dislikes: 54
# Total Accepted: 396.6K
# Total Submissions: 731K
# Testcase Example: '[1,2,3]'
#
# Given a set of distinct integers, nums, return all possible subsets (the
# power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: nums = [1,2,3]
# Output:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
#
#
class Solution(object):
def _subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# DFS
# 组合
# 结果正确,顺序不对
result = temp = [[]]
last = []
while temp:
temp = []
for num in nums:
if not last:
temp.append(last + [num])
else:
for l in last:
if num > max(l):
temp.append(l + [num])
last = temp
result.extend(last)
return result
def __subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
length = len(nums)
data = {value:2**key for key,value in enumerate(nums)}
for i in range(2**length):
temp = []
for key,value in data.items():
if value & i != 0:
temp.append(key)
result.append(temp)
return result
def ___subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
length = len(nums)
for i in range(1<<length):
temp = []
for key,value in enumerate(nums):
if 1<<key & i != 0:
temp.append(value)
result.append(temp)
return result
def ____subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# Best of All
result = [[]]
for n in nums:
current = result[:]
for t in current:
result.append(t+[n])
return result
def _____subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
return self.helper(nums, 0, [[]])
def helper(self, nums, index, result):
if index >= len(nums):
return result
temp = result[:]
for t in temp:
result.append(t+[nums[index]])
return self.helper(nums, index+1, result)
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
start = 0
e = len(nums)
result = []
def backtrack(s, p):
result.append(p)
for i in range(s, e):
backtrack(i+1, p+[nums[i]])
backtrack(start, [])
return result
# if __name__ == "__main__":
# s = Solution()
# print s.subsets([1,2,3])
| [
"windard@qq.com"
] | windard@qq.com |
fca72c404fcf1bdb72672e53cdb4cff8efe5a4ff | 63b4b47117e42ac3e2548a5ce25f27b63bd348c9 | /scripts/bootstrap.py | 8cf7c9b7975c8bffa579fec903e8498688c402bb | [] | no_license | jgraettinger/sentiment | 0b11cd92e7d1ea4fbe70e4a1352a95a85df50b01 | 765a2fbaefa4d9a0a3a1d27d40b3623124dccc53 | refs/heads/master | 2021-01-18T01:44:00.155862 | 2010-09-23T09:05:37 | 2010-09-23T09:05:37 | 615,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py |
import getty
import vinz
import vinz.featurization.basic_featurize
import vinz.normalization.basic_normalize
import vinz.normalization.twitter_normalize
import vinz.estimation
import vinz.feature_transform
def bootstrap():
inj = getty.Injector()
inj.bind(vinz.featurization.InternTable,
to = vinz.featurization.InternTable,
scope = getty.Singleton)
inj.bind(vinz.featurization.Featurizer,
to = vinz.featurization.basic_featurize.TfIdfFeaturizer)
inj.bind(vinz.normalization.Normalizer,
to = vinz.normalization.twitter_normalize.TwitterNormalizer)
inj.bind(vinz.feature_transform.FeatureSelector,
to = vinz.feature_transform.ProjIGainCutoffTransform)
inj.bind_instance(getty.Config,
with_annotation = 'alpha', to = 0.04)
inj.bind_instance(getty.Config,
with_annotation = 'min_features', to = 100)
inj.bind_instance(getty.Config,
with_annotation = 'max_mass_ratio', to = 0.75)
inj.bind_instance(getty.Config,
with_annotation = 'max_features', to = 100000)
inj.bind_instance(getty.Config,
with_annotation = 'class_smoothing_factor', to = 0.02)
inj.bind_instance(getty.Config,
with_annotation = 'n_rand_output_features', to = 300)
inj.bind_instance(getty.Config,
with_annotation = 'n_pca_output_features', to = 50)
return inj
| [
"johnny531@gmail.com"
] | johnny531@gmail.com |
5326eeda4f0270ef381bd89bf8bfdc44a76caab8 | 0baf7c1d3860c8f39fdde49eafb4f5033cd34b2f | /06/hw0601.py | 1fc54f39a8c3a794bc95c2b7fbf231cf2ecc2467 | [] | no_license | dchmerenko/courseraPy | 08290a1e1cef573be9a6ca2c2929cac34e493e43 | 6be764eb40076c386370cf19d09d5a306d4f161c | refs/heads/master | 2021-01-01T20:43:56.793375 | 2017-08-03T16:15:17 | 2017-08-03T16:15:17 | 98,897,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # list merging
def merge(a, b):
c = list()
i = j = 0
while i < len(a) and j < len(b):
if a[i] < b[j]:
c.append(a[i])
i += 1
else:
c.append(b[j])
j += 1
if i < len(a):
while i < len(a):
c.append(a[i])
i += 1
if j < len(b):
while j < len(b):
c.append(b[j])
j += 1
return c
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(*merge(a, b))
| [
"dchmerenko@gmail.com"
] | dchmerenko@gmail.com |
f9c7bea1d2f71bd02daaa0048f7368e29612f381 | 41a0045f0cff201153bc352102f654978d497925 | /trotro.py | 726b5090645f072b3e0af2ce5b52af08c1397833 | [] | no_license | Tejas-Naik/mobile-responsive | a8a3fde3b426130abef2cb30bfb33584a49246f0 | c10b2521c1e80e0258148b54d1cea4bcb4508983 | refs/heads/master | 2023-05-06T05:35:35.113264 | 2021-06-02T06:36:09 | 2021-06-02T06:36:09 | 373,177,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | from app import app
| [
"RNTejas2005@gmail.com"
] | RNTejas2005@gmail.com |
246e17ff0c48c787a0a932071216fd5a5e87c321 | 770e3f4fcb3d2f96ea8cc36bfa47625778c40c71 | /unit_tests/test_provides.py | 154f566f2b1cbe63d5f075866c676d2654f56ed0 | [] | no_license | openstack-charmers/charm-interface-pacemaker-remote | 8d12a0594bc580f74c9a591b44429320912c8cbf | f1297f72a5c6f8dc4f89461850a7d8ebaa01cf04 | refs/heads/master | 2020-04-30T03:33:29.086571 | 2019-03-20T07:31:55 | 2019-03-20T07:31:55 | 176,589,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,454 | py | import unittest
import mock
with mock.patch('charmhelpers.core.hookenv.metadata') as _meta:
_meta.return_Value = 'ss'
import provides
_hook_args = {}
TO_PATCH = [
]
def mock_hook(*args, **kwargs):
def inner(f):
# remember what we were passed. Note that we can't actually determine
# the class we're attached to, as the decorator only gets the function.
_hook_args[f.__name__] = dict(args=args, kwargs=kwargs)
return f
return inner
class _unit_mock:
def __init__(self, unit_name, received=None):
self.unit_name = unit_name
self.received = received or {}
class _relation_mock:
def __init__(self, application_name=None, units=None):
self.to_publish_raw = {}
self.to_publish = {}
self.application_name = application_name
self.units = units
class TestPacemakerRemoteProvides(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._patched_hook = mock.patch('charms.reactive.when', mock_hook)
cls._patched_hook_started = cls._patched_hook.start()
# force provides to rerun the mock_hook decorator:
# try except is Python2/Python3 compatibility as Python3 has moved
# reload to importlib.
try:
reload(provides)
except NameError:
import importlib
importlib.reload(provides)
@classmethod
def tearDownClass(cls):
cls._patched_hook.stop()
cls._patched_hook_started = None
cls._patched_hook = None
# and fix any breakage we did to the module
try:
reload(provides)
except NameError:
import importlib
importlib.reload(provides)
def patch(self, method):
_m = mock.patch.object(self.obj, method)
_mock = _m.start()
self.addCleanup(_m.stop)
return _mock
def setUp(self):
self.relation_obj = provides.PacemakerRemoteProvides(
'some-relation',
[])
self._patches = {}
self._patches_start = {}
self.obj = provides
for method in TO_PATCH:
setattr(self, method, self.patch(method))
def tearDown(self):
self.relation_obj = None
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch_relation_obj(self, attr, return_value=None):
mocked = mock.patch.object(self.relation_obj, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_publish_info(self):
mock_rel = _relation_mock()
self.relation_obj._relations = [mock_rel]
self.relation_obj.publish_info(
'node1.az1.local',
stonith_hostname='node1.stonith',
enable_resources=True)
expect = {
'remote-hostname': 'node1.az1.local',
'stonith-hostname': 'node1.stonith',
'enable-resources': True}
self.assertEqual(
mock_rel.to_publish,
expect)
def test_get_pacemaker_key(self):
unit1 = _unit_mock(
'unit1',
received={'pacemaker-key': 'cG1ha2Vya2V5MQo='})
mock_rel = _relation_mock(units=[unit1])
self.relation_obj._relations = [mock_rel]
self.assertEqual(
self.relation_obj.get_pacemaker_key(),
b'pmakerkey1\n')
def test_get_pacemaker_key_inconsistent(self):
unit1 = _unit_mock(
'unit1',
received={'pacemaker-key': 'cG1ha2Vya2V5MQo='})
unit2 = _unit_mock(
'unit2',
received={'pacemaker-key': 'cG1ha2Vya2V5Mgo='})
mock_rel = _relation_mock(units=[unit1, unit2])
self.relation_obj._relations = [mock_rel]
with self.assertRaises(Exception):
self.relation_obj.get_pacemaker_key()
def test_get_pacemaker_key_missing(self):
unit1 = _unit_mock(
'unit1',
received={})
unit2 = _unit_mock(
'unit2',
received={})
mock_rel = _relation_mock(units=[unit1, unit2])
self.relation_obj._relations = [mock_rel]
self.assertEqual(
self.relation_obj.get_pacemaker_key(),
None)
| [
"liam.young@canonical.com"
] | liam.young@canonical.com |
83c5651a93a1bee73e720ee98900ca34bc8bc05b | e7bca56e03831ee604a3765867f235a27e261c85 | /src/utils/finance_metrics.py | f9d11ffbde7a6f5f2cec28be9b77d78e7de5333c | [
"MIT"
] | permissive | WatweA/RNNforPairsTrading | 8bac598424891620e1bf0b31b7400b96d6f90018 | 44379de85712a411c501524473c5569964f1c4b6 | refs/heads/main | 2023-04-10T20:52:22.884222 | 2021-04-26T21:38:17 | 2021-04-26T21:38:17 | 352,525,153 | 1 | 0 | null | 2021-03-30T16:56:34 | 2021-03-29T05:21:40 | Jupyter Notebook | UTF-8 | Python | false | false | 3,561 | py | from typing import Tuple, Dict, Optional
import numpy as np
import pandas as pd
TRADING_DAYS = 252
def to_signal(predicted_returns: np.ndarray,
signal_dict: Dict[Tuple[Optional[float], Optional[float]], float] = None
) -> np.ndarray:
"""
Generate a list of signals given a list of returns and a dictionary of range mappings to returns
:param predicted_returns: a list of return values
:param signal_dict: a dictionary of range mappings to signals, where a range is a tuple representing the inclusive
minimum and the exclusive maximum. A None is used where there is no minimum or maximum value for the range. (
i.e. for the range of all numbers greater than 0.05 one would say (0.05, None)
:return: a list of signals
"""
if signal_dict is None:
signal_dict = {
(None, -0.0150): -1.00,
(-0.0150, -0.0100): -0.75,
(-0.0100, -0.0075): -0.50,
(-0.0075, -0.0050): -0.25,
(-0.0050, -0.0025): -0.10,
(-0.0025, 0.0025): 0.00,
(0.0025, 0.0050): 0.10,
(0.0050, 0.0075): 0.25,
(0.0075, 0.0100): 0.50,
(0.0100, 0.0150): 0.75,
(0.0150, None): 1.00
}
signals = []
for i in range(len(predicted_returns)):
predicted_return = predicted_returns[i]
signal = 1
for (range_min, range_max), range_signal in signal_dict.items():
if (range_min is None or range_min <= predicted_return) and \
(range_max is None or predicted_return < range_max):
signal = range_signal
signals.append(signal)
return np.ndarray(signals)
def adjusted_returns(signals: np.ndarray,
returns_a: np.ndarray,
returns_b: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generates the adjusted returns for two returns series with a given signal for the pairwise returns.
This method will apply the signal to returns_a and apply the negative signal to returns_b, and return both
adjusted return arrays in a tuple.
:param signals: the signals used to scale the first returns relative to the second
:param returns_a: the first returns series in the pair
:param returns_b: the second returns series in the pair
:return: a tuple of the adjusted returns series
"""
adjusted_a = []
adjusted_b = []
for i, signal in enumerate(signals):
adjusted_a.append(returns_a[1] * signal)
adjusted_b.append(returns_b[1] * -1 * signal)
return np.array(adjusted_a), np.array(adjusted_b)
def cumulative_return(returns_df: pd.DataFrame) -> pd.DataFrame:
"""
Calculate the cumulative returns for all columns in the returns DataFrame
:param returns_df: the DataFrame of returns
:return: a DataFrame of cumulative returns for each column
"""
cumulative_return_df = returns_df.fillna(0) + 1
return cumulative_return_df.cumprod(axis=0)
def annualized_return(returns_df: pd.DataFrame) -> pd.DataFrame:
"""
Calculate an return the annualized return for all daily return columns in the given DataFrame
:param returns_df: the DataFrame of returns
:return: a series of annualized returns for each column
"""
n_days = len(returns_df.index)
returns = returns_df.add(1).prod(axis=0)
# annualized with the number of days and the total days per trading year
returns = (returns ** (TRADING_DAYS/n_days)).add(-1)
return returns
| [
"45186715+WatweA@users.noreply.github.com"
] | 45186715+WatweA@users.noreply.github.com |
329444be3e093f08598e5a613a554721a6d6e9b1 | f99e42d181267f35ffaa7a47106d188bbe9f1094 | /codekata/91.py | 086d614fa3eba2704fdf10a7a701cdfee2d485c5 | [] | no_license | DEEPTHA26/guvi | 92d0992548a8ccbffc5e2ba8b702184cc2c42929 | 84816f470c08ede86a87b010cfd5ef5573d045f8 | refs/heads/master | 2020-06-15T20:29:44.864601 | 2019-06-30T15:18:17 | 2019-06-30T15:18:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | z,x,c=map(int,input().split())
v=z*x*c
t=(2*z*x)+(2*x*c)+(2*c*z)
print(t,v)
| [
"noreply@github.com"
] | noreply@github.com |
4faf6fbf0104e8ad8a9246bb96f76397be73fdd3 | e3e2f29401d9703c7dc8fd87fb04913351135d54 | /codes/delete_repeat_classlayout.py | 7abfbe317472f11dfe4d8ec1dfa3a4d94c468870 | [] | no_license | cooplus-vscape/vscape_analyzer | 2d934442c9596dd8fd93b22fa305a52cae25db5c | c4632577d32b3b73d38db4a7f2056dc1c66cd259 | refs/heads/main | 2023-02-23T19:55:26.460592 | 2021-02-03T09:31:53 | 2021-02-03T09:31:53 | 325,956,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | #coding:utf-8
from globalconfig import *
from DBatom import *
from threading import Thread
import threading
import time
def processitems(content, rangenum, rangenumend, tid):
for cc in range(rangenum, rangenumend):
try:
try:
print content[cc]
except:
continue
fs = {
"classname": content[cc]["classname"]
}
baoliu = content[cc]
databasecl.delete_many(fs)
# databasecl.insert(baoliu,check_keys=False)
databasecl.insert(baoliu, check_keys=False)
except:
continue
print "thread %d done " % tid
pass
if __name__ == "__main__":
NWORKERS = 40
threads = []
databasecl, client = getdbhandler(COOPPLUSDB, CLASSLAYOUT)
fi={}
content = databasecl.find(fi, no_cursor_timeout=True)
# num = content.count()
allnum = content.count()
stepnum = allnum/NWORKERS
rangenum = 0
rangenumend = 0
for n in range(NWORKERS):
rangenum = rangenumend
rangenumend = rangenum+stepnum
t = Thread(target=processitems, args=(
content, rangenum, rangenumend, n,))
t.daemon = True
# print "%d is started" % n
t.start()
threads.append(t)
# raw_input("xxxxxx")
while True:
time.sleep(10)
pass
client.close()
print "__end__"
| [
"vcakesxyz@outlook.com"
] | vcakesxyz@outlook.com |
3050537d9919a0962079d4d4af28cf9ef9a613a5 | e4beaea96286efe42dc53c34a1521950c8f3b14c | /ordersapp/migrations/0001_initial.py | 9286cccef052165142e18b1ef514ffb88432f7ff | [] | no_license | MrWindmark/mercuryShop | 7f71d430d4dc148e4530723265b265c44abde457 | 6de5230bd872279c7cacfa6920a8e06a1e4189c1 | refs/heads/master | 2023-04-12T23:16:11.752814 | 2021-04-14T18:59:25 | 2021-04-14T18:59:25 | 340,908,915 | 0 | 0 | null | 2021-05-14T12:08:01 | 2021-02-21T13:32:39 | CSS | UTF-8 | Python | false | false | 2,148 | py | # Generated by Django 2.2.18 on 2021-04-12 20:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Создан')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Изменён')),
('status', models.CharField(choices=[('FM', 'формируется'), ('STP', 'отправлен в обработку'), ('PD', 'оплачен'), ('PRD', 'обрабатывается'), ('RDY', 'готов к выдаче'), ('CNC', 'отменен')], default='FM', max_length=3, verbose_name='Статус')),
('is_active', models.BooleanField(default=True, verbose_name='Активен')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Заказ',
'verbose_name_plural': 'Заказы',
'ordering': ('-created_at',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=0, verbose_name='количество')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orderitems', to='ordersapp.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Product', verbose_name='продукт')),
],
),
]
| [
"agentstorm@ya.ru"
] | agentstorm@ya.ru |
dcb41c3975358829574421abff12c1fd1b3ed047 | d128dc06cff15a60d9d431c03516766d19a72cdc | /101 Tasks/Task 081.py | 0b4e6320771ce66c2b6a53bc0c96c3c0ebbae30c | [] | no_license | NguyenVanDuc2022/Self-study | eb66f06a74dc4900a91ec73705236b2b55e673d2 | a42c5c251ef9339ba33d8944583b239a5f2cddfa | refs/heads/main | 2023-08-28T02:12:29.440153 | 2021-10-06T03:03:25 | 2021-10-06T03:03:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | """
Question 081 - Level 03
Please write a program to generate all sentences where subject is in ["I", "You"] and the object is in ["Hockey",
"Football"].
Hints: Use list[index] notation to get a element from a list.
--- Nguyen Van Duc ---
"""
subjects = ["I", "You"]
verbs = ["Hockey", "Football"]
for i in range(len(subjects)):
for j in range(len(verbs)):
for k in range(len(subjects)):
sentence = "%s %s %s." % (subjects[i], verbs[j], subjects[k])
print(sentence)
| [
"nvduc.ds@hotmail.com"
] | nvduc.ds@hotmail.com |
b332a21d8410d1f50d009e15238f801aced23ad4 | f8565c4d6a96dd91c2a6cd247f180f7c7d1054ff | /changing_internalLines.py | 7f7df0d0c1dc8e9afd73acd5abe3b5562bcd1bec | [] | no_license | AhmedAlhallag/Python_SomeJunk | d625e243c7bb7e78d4ca1d2547f9d4c3e7fb59ee | e45b2e8af5326b7d627edc87345b1a7f5d4b9268 | refs/heads/master | 2022-11-15T07:06:19.064093 | 2020-07-13T17:01:57 | 2020-07-13T17:01:57 | 279,363,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # with open("user_output.txt","r") as file:
fnName = "Hamada"
i = 1
with open("pyt1_edited.py", "w") as edited:
for line in open("pyt1.py", "r").readlines():
comIDX = line.find("#")
if comIDX >= 0:
# we found a comment; remove it
print(f"Found a Comment:x{i}")
i += 1
else:
edited.write(line)
| [
"alhallag@gmail.com"
] | alhallag@gmail.com |
b4edfc32c00e21b7065ff29a25b0321737f977df | aa8098d44e92a1b5aeb9676ca8b15b4c701b297f | /exceptions.py | 9dcddae19e95c786344328998f5dcbce03cbf92f | [
"MIT"
] | permissive | rpierlot/LaPosteClient | 7b331c4a7b3c0d64425b46edf7a5782af28ab2f1 | 70eba4f42fec795b883c26f516a40aa0fa116de1 | refs/heads/master | 2021-08-31T18:23:47.487722 | 2017-12-21T13:29:40 | 2017-12-22T10:33:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | class TrackShipmentError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
| [
"rpierlot-ext@deezer.com"
] | rpierlot-ext@deezer.com |
5b7ef9e36ad051f8020055d5ddecfbee31905843 | 63289b50fd61f1365476dbee9daf5f64f0632d0e | /FoodApp/migrations/0001_initial.py | 7ee83bcf0b01a0c365bcb3b0303351d3f7b686a9 | [] | no_license | pradeepvarma22/Food_Review | 98b97fba71fcf78fb2335046a312299627b96d65 | 76c96580716398746e1bf78cb5613795a03e8abb | refs/heads/master | 2023-03-01T01:26:48.286331 | 2021-02-06T19:29:07 | 2021-02-06T19:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | # Generated by Django 3.0.4 on 2021-02-06 08:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FoodModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_name', models.CharField(max_length=200)),
('item_image', models.ImageField(blank=True, upload_to='static/images/')),
('item_desc', models.CharField(max_length=200)),
('createdon', models.DateTimeField(default=django.utils.timezone.now)),
('diet', models.CharField(choices=[('BreakFast', 'BreakFast'), ('Lunch', 'Lunch'), ('Snacks', 'Snacks'), ('Snacks', 'Dinner')], max_length=100)),
],
options={
'ordering': ('-createdon',),
},
),
migrations.CreateModel(
name='ReviewModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createdon', models.DateTimeField(default=django.utils.timezone.now)),
('text', models.CharField(blank=True, max_length=600)),
('rate', models.SmallIntegerField(choices=[('Poor', 'Poor'), ('Average', 'Average'), ('Good', 'Good'), ('Excellent', 'Excellent')], default='Excellent')),
('foodmodel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FoodApp.FoodModel')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"pradeepvarma107@gmail.com"
] | pradeepvarma107@gmail.com |
cc73f71771bfafd0ba5c10ac52a206b706afb2f9 | 8698ba18025a29a1bc491769b7c0299497c9cc7f | /scrapy_templates/19-design/gooddesignaward/gooddesignaward/middlewares.py | 086971947280f42d53f7ef1c32a65740184ecbbc | [] | no_license | QuincyLi/happy-spiders | 33ea11d3ac570f1c3723de0fb4f58ef6e6fe5b9a | df30bc43f2b99ff70c380bbadeec4bcc285d481b | refs/heads/master | 2020-04-27T11:12:46.608104 | 2019-01-02T09:39:21 | 2019-01-02T09:39:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,615 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class GooddesignawardSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class GooddesignawardDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"caiconghuai@gmail.com"
] | caiconghuai@gmail.com |
5cfdde968cee2e114cd56c641bad909c4f4adb5f | ebc5c949aa4e0e87afb0cd1daf2780e5306a3e02 | /KafkaProducer.py | 1ac7a66a166cf47b7278627d9b56c620c35c2139 | [] | no_license | ecvasile/python-big-data | e1e53c91750ef46bf5430715c756a48a43cfa482 | 6543dd0713212acb2ab76dc965640a004c718cc6 | refs/heads/master | 2023-02-15T22:34:00.882930 | 2021-01-07T16:03:04 | 2021-01-07T16:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from TrafficSimulator import TrafficGenerator
from kafka import KafkaProducer
# create decoder object from pcap file
pkt_obj = enumerate(TrafficGenerator('/home/cloudera-master/2018-04-05_09-36-26_Default_SPW-4.pcap'))
# create Kafka producer
producer = KafkaProducer(bootstrap_servers='cldmaster.local:9092', # batch_size=16384, linger_ms=5,
value_serializer=lambda v: v)
# topic must be the same for the Producer and Consumer
topic = "TM_RAW"
# counter = itertools.count(start=0, step=1)
for _ in range(10):
# for _ in range(863928):
# iterate spw packets one by one
packet = next(pkt_obj)[1]
# send each pcap packet in bytes format
if packet is not None:
producer.send("TM_RAW", packet)
# print(packet)
else:
print('None')
# print(i)
# time.sleep(0.01)
producer.flush() | [
"eduardvasile@JackSparrows.lan"
] | eduardvasile@JackSparrows.lan |
894b732050372338c14fa012e7f9b16f6e1eadbf | 11812a0cc7b818292e601ecdd4aa4c4e03d131c5 | /02_多任务/3_协程/hm_15_使用协程完成多任务终极.py | ed3306cb3d774702e938b02a0d1ebc14291efd90 | [] | no_license | SunshineFaxixi/Python_Learning | f1e55adcfa898489cc9146ccfb220f0b48a31a22 | ab3ca44d013311b6de02124091acc4c36a83c4d9 | refs/heads/master | 2021-08-16T05:47:29.963118 | 2021-01-04T13:48:30 | 2021-01-04T13:48:30 | 238,857,341 | 1 | 0 | null | 2020-03-03T13:53:08 | 2020-02-07T06:21:46 | HTML | UTF-8 | Python | false | false | 409 | py | import gevent
import time
import random
from gevent import monkey
def coroutine_work(coroutine_name):
for i in range(10):
print(coroutine_name, i)
time.sleep(random.random())
def main():
monkey.patch_all() # 打补丁
gevent.joinall([
gevent.spawn(coroutine_work, "work1"),
gevent.spawn(coroutine_work, "work2")
])
if __name__ == "__main__":
main() | [
"xxhan2018@163.com"
] | xxhan2018@163.com |
ce745bc83fd08998d867c0ec245ba218d19c4298 | 6e6ef650d0fd5e5006dab4d755bb4ac77de43072 | /openreview/create_basic_openreview_dataset.py | aba3de742d65cd752a0945164be8a4122fb474c7 | [] | no_license | raymondroc/an-open-review-of-openreview | b2f2a4dcd7badbbd55ab535f4f319ae8d888afa6 | 7d94a42096759b36739090f9801dc2a09dec0380 | refs/heads/main | 2023-01-10T15:22:06.851193 | 2020-11-13T18:39:30 | 2020-11-13T18:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,821 | py | from __future__ import print_function
import openreview
import pandas as pd
import pickle
def parse_data():
"""Clean and parse raw data"""
lines = []
header = ("conference,year,paper,authors,"
"emails,ratings,confidences,decisions,"
"cmt_before_review,"
"cmt_between,cmt_after_decision,double_blinded")
lines.append(header)
conference = "ICLR"
def parse_score(line):
return line.split(":")[0]
for year in list(papers.keys()):
for key in list(papers[year].keys()):
# extract title, authors, and emails
paper_content = papers[year][key].content
title = paper_content['title']
authors = ";".join(paper_content['authors'])
emails = ";".join(paper_content['authorids'])
ratings = ""
confidences = ""
decision = "Withdrawn"
cmt_before_review = ""
cmt_between = ""
cmt_after_decision = ""
# check if the confenrence is double blinded based on their website
double_blinded = "no" if year == 2017 else "yes"
if key in decisions[year]:
# extract decision and decision date
decision_content = decisions[year][key].content
if year == 2019:
decision = decision_content['recommendation']
else:
decision = decision_content['decision']
decision_date = decisions[year][key].tcdate
if key in reviews[year]:
# extract rating, confidence and first review date
review_notes = reviews[year][key]
ratings = []
confidences = []
review_date = -1
for review in review_notes:
# first review date
if review_date == -1 or review.tcdate < review_date:
review_date = review.tcdate
# rating
ratings.append(parse_score(review.content["rating"]))
# confidence if exists
if "confidence" in review.content:
confidences.append(parse_score(review.content["confidence"]))
ratings = ";".join(ratings)
confidences = ";".join(confidences)
# extract comment before review, between review and decision,
# and after decision
cmt_before_review = 0
cmt_between = 0
if key in decisions[year]:
cmt_after_decision = 0
for comments in [public_comments, official_comments]:
if year in comments and key in comments[year]:
for comment in comments[year][key]:
if comment.tcdate < review_date:
cmt_before_review += 1
elif key in decisions[year]:
if review_date < comment.tcdate < decision_date:
cmt_between += 1
else:
cmt_after_decision += 1
else:
if review_date < comment.tcdate:
cmt_between += 1
if decision == "Withdrawn":
cmt_after_decision = ""
line = (f"{conference},"
f"{year},"
f"\"{title}\","
f"\"{authors}\","
f"\"{emails}\","
f"{ratings},"
f"{confidences},"
f"\"{decision}\","
f"{cmt_before_review},"
f"{cmt_between},"
f"{cmt_after_decision},"
f"{double_blinded}")
lines.append(line)
return lines
if __name__ == "__main__":
# import raw data
papers, decisions, reviews = None, None, None
public_comments, official_comments = None, None
with open('papers.p', 'rb') as fp:
papers = pickle.load(fp)
with open('decisions.p', 'rb') as fp:
decisions = pickle.load(fp)
with open('reviews.p', 'rb') as fp:
reviews = pickle.load(fp)
with open('public_comments.p', 'rb') as fp:
public_comments = pickle.load(fp)
with open('official_comments.p', 'rb') as fp:
official_comments = pickle.load(fp)
# remove unecessary information that's not relevant to any of the existing papers
for year in papers.keys():
for key in list(decisions[year].keys()):
if key not in papers[year]:
del decisions[year][key]
for key in list(reviews[year].keys()):
if key not in papers[year]:
del reviews[year][key]
if year in public_comments:
for key in list(public_comments[year].keys()):
if key not in papers[year]:
del public_comments[year][key]
if year in official_comments:
for key in list(official_comments[year].keys()):
if key not in papers[year]:
del official_comments[year][key]
# 2018 has an extra decision
del papers[2018]["ry5wc1bCW"]
del decisions[2018]["ry5wc1bCW"]
# parse data
lines = parse_data()
# export to a csv file
with open("openreview.csv", "a") as f:
for line in lines:
f.write(line + "\n")
| [
"kganapathy23@gmail.com"
] | kganapathy23@gmail.com |
db6dac8b0b6013de4ea57a1b57fa20f6b8d368f8 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/add_and_search_word.py | 13c99ec33dbc7abde7199d0dc2552efa2636dc28 | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 2,277 | py | # https://leetcode.com/problems/add-and-search-word-data-structure-design/
"""
Design a data structure that supports the following two operations:
void addWord(word)
bool search(word)
search(word) can search a literal word or a regular expression string containing only letters a-z or .. A . means it can represent any one letter.
Example:
addWord("bad")
addWord("dad")
addWord("mad")
search("pad") -> false
search("bad") -> true
search(".ad") -> true
search("b..") -> true
Note:
You may assume that all words are consist of lowercase letters a-z.
"""
from dataclasses import dataclass, field
from typing import Any, Dict, Optional
@dataclass
class TrieNode:
children: Dict[str, "TrieNode"] = field(default_factory=dict)
# we don't need to save the val, simply using a isEnd flag is enough
value: Optional[Any] = None
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
node = self.root
for char in word:
if char not in node.children:
node.children[char] = TrieNode()
node = node.children[char]
node.value = word
def searchHelper(self, word: str, index: int, node: TrieNode) -> bool:
if index == len(word):
return node.value != None
if word[index] == ".":
return any(
[
self.searchHelper(word, index + 1, node.children[child])
for child in node.children
]
)
if word[index] not in node.children:
return False
return self.searchHelper(word, index + 1, node.children[word[index]])
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
return self.searchHelper(word, 0, self.root)
if __name__ == "__main__":
obj = WordDictionary()
for word in ["bad", "dad", "mad", "pad"]:
obj.addWord(word)
for word in ["bad", ".ad", "b.."]:
print(f"{obj.search(word)=}")
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
a9dd8620e61118abf0707b5fb0f71735b60984ba | b45d66c2c009d74b4925f07d0d9e779c99ffbf28 | /gp/business_logic/business_objects/monopoly.py | f40bcc4ebbf90d3908c5f5b1da8a279f7018e9f4 | [] | no_license | erezrubinstein/aa | d96c0e39762fe7aaeeadebbd51c80b5e58576565 | a3f59ba59519183257ed9a731e8a1516a4c54b48 | refs/heads/master | 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import datetime
__author__ = 'erezrubinstein'
class Monopoly(object):
def __init__(self, store_id, monopoly_type_id, trade_area_id, start_date, end_date):
self.store_id = store_id
self.monopoly_type_id = monopoly_type_id
self.trade_area_id = trade_area_id
self.start_date = start_date
self.end_date = end_date
def __eq__(self, other):
# sometimes mongo selects the start date slightly off. so this just makes sure they're within one seconds
return self.store_id == other.store_id and self.monopoly_type_id == other.monopoly_type_id and self.trade_area_id == other.trade_area_id and \
(other.start_date - self.start_date) < datetime.timedelta(seconds = 1) and \
(other.end_date - self.end_date) < datetime.timedelta(seconds = 1) | [
"erezrubinstein@hotmail.com"
] | erezrubinstein@hotmail.com |
581e1c1d60197ef7fb2dec645612f33e5e7cfc99 | 930b54f510c25ba970bc17684006df28ee627c0f | /diversos/ExampleGenerateLog.py | 0e24bb5483fe04392d95bcb61a81ba1833184ba1 | [] | no_license | fgirardi/SourcePython | 33e3cfcaa328b7943855dabbe219730f338b1b7a | 43538d95c00a67e11154598641f71409f71a856b | refs/heads/master | 2020-04-10T03:58:15.331458 | 2019-08-24T22:04:30 | 2019-08-24T22:04:30 | 124,260,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import logging
import time
logging.basicConfig(level = logging.INFO, filename = time.strftime("my-%Y-%m-%d.log"))
logging.debug("debug")
logging.info("info")
logging.warning("warning")
logging.error("error")
logging.critical("critical")
| [
"girardi.fabiano@gmail.com"
] | girardi.fabiano@gmail.com |
26241c413c173bca992b68e16ce9ce180b940a87 | e1fcff197b13428fe2e17846d6eac60952b8dcc7 | /src/featurization/featurize.py | 805c901e15d64f7f53f62ad7f9ff4d6e7fca1f40 | [
"MIT"
] | permissive | bejerano-lab/X-CAP | 254ffec90478b21e32bc1e1c3511ed818ccd117f | 4ceeea89b00aa72024c30ae52fc369f360e26390 | refs/heads/main | 2023-04-07T11:43:59.532265 | 2022-06-22T23:52:39 | 2022-06-22T23:52:39 | 499,202,286 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,313 | py | import argparse
import os
import sys
from tqdm import tqdm
import conservation
import gene_essentiality
import locations
import metadata
import omim
import read_through
import reinitiation
import rvis
import splicing
import transcript
import transcript_class
import utils
def init(reference_data, annovar_dir):
gencode_dir = os.path.join(annovar_dir, "gencode33_ensembl99")
gencode_transcripts_path = os.path.join(gencode_dir, "hg38_ensGene.txt")
gencode_mrna_path = os.path.join(gencode_dir, "hg38_ensGeneMrna.fa")
gencode_attributes_path = os.path.join(gencode_dir, "tempdir", "wgEncodeGencodeAttrsV33.txt")
utils.init_transcript_map(gencode_transcripts_path, gencode_mrna_path)
gene_essentiality.init(reference_data)
rvis.init()
omim.init()
transcript_class.init(reference_data)
metadata.init_variant_data(reference_data)
splicing.init(gencode_attributes_path)
conservation.init_bws()
def run_on_data(input_data, reference_data, annovar_dir, is_training_set):
init(reference_data, annovar_dir)
header, feature_table, labels = None, [], []
print("Featurizing data")
for row in tqdm(input_data):
features, names = [], []
#### GENE ESSENTIALITY
gene_oe = gene_essentiality.get_average_gene_oe_lof(row)
transcript_oe = gene_essentiality.get_average_transcript_oe_lof(row)
rvis_percentile = rvis.get_average_rvis(row)
recessive_gene = omim.on_recessive_gene(row)
dominant_gene = omim.on_dominant_gene(row)
monoclass_pathogenic_transcript = transcript_class.on_monoclass_pathogenic_transcript(row, is_training_set)
monoclass_pathogenic_exon = transcript_class.on_monoclass_pathogenic_exon(row, is_training_set)
features += [gene_oe, transcript_oe, rvis_percentile, recessive_gene, dominant_gene,
monoclass_pathogenic_transcript, monoclass_pathogenic_exon]
names += ["gene_oe_lof", "transcript_oe_lof", "rvis", "recessive_gene", "dominant_gene",
"monoclass_pathogenic_transcript", "monoclass_pathogenic_exon"]
#### ZYGOSITY
zygosity = metadata.get_zygosity(row)
features += [zygosity]
names += ["zygosity"]
#### VARIANT LOCATION
distance_from_cds_start = locations.get_distance_from_cds_start(row)
distance_from_cds_end = locations.get_distance_from_cds_end(row)
relative_location_in_cds = locations.get_relative_location_in_cds(row)
features += [distance_from_cds_start, distance_from_cds_end, relative_location_in_cds]
names += ["distance_from_cds_start", "distance_from_cds_end", "relative_location_in_cds"]
distance_from_exon_start = locations.get_distance_from_exon_start(row)
distance_from_exon_end = locations.get_distance_from_exon_end(row)
relative_location_in_exon = locations.get_relative_location_in_exon(row)
exon_length = locations.get_overlapped_exon_length(row)
features += [distance_from_exon_start, distance_from_exon_end, relative_location_in_exon, exon_length]
names += ["distance_from_exon_start", "distance_from_exon_end", "relative_location_in_exon", "exon_length"]
overlapped_exon_num = locations.get_overlapped_exon_num(row)
num_exons = transcript.get_num_exons(row)
on_autosome = locations.on_autosomal_chromosome(row)
on_X = locations.on_X_chromosome(row)
on_Y = locations.on_Y_chromosome(row)
features += [overlapped_exon_num, num_exons, on_autosome, on_X, on_Y]
names += ["overlapped_exon_num", "num_exons", "on_autosome", "on_X", "on_Y"]
#### NMD
dist_from_last_exon_exon_junction = locations.get_distance_from_last_exon_exon_junction(row)
percentage_of_transcripts_with_nmd = locations.get_percentage_of_transcripts_with_nmd(row)
features += [dist_from_last_exon_exon_junction, percentage_of_transcripts_with_nmd]
names += ["dist_from_last_exon_exon_junction", "%_transcripts_with_nmd"]
#### SPLICING
can_be_spliced_out = splicing.can_be_spliced_out(row)
features += [can_be_spliced_out]
names += ["can_be_spliced_out"]
#### STOP CODON READ THROUGH
kmers, stop_codon_encoding = read_through.get_stop_codon(row, num_context=0)
features += stop_codon_encoding
names += kmers
#### TRANSLATION REINITIATION
dist_to_next_start_codon = reinitiation.get_dist_to_next_start_codon(row)
features += [dist_to_next_start_codon]
names += ["dist_to_next_start_codon"]
#### CONSERVATION
overlapped_exon_phylop = conservation.get_mean_phylop_on_overlapped_exon(row)
overlapped_exon_phastcons = conservation.get_mean_phastcons_on_overlapped_exon(row)
upstream_phylop = conservation.get_mean_phylop(row, upstream=True)
upstream_phastcons = conservation.get_mean_phastcons(row, upstream=True)
downstream_phylop = conservation.get_mean_phylop(row, upstream=False)
downstream_phastcons = conservation.get_mean_phastcons(row, upstream=False)
features += [overlapped_exon_phylop, overlapped_exon_phastcons, upstream_phylop, upstream_phastcons,
downstream_phylop, downstream_phastcons]
names += ["overlapped_exon_phylop", "overlapped_exon_phastcons", "upstream_phylop",
"upstream_phastcons", "downstream_phylop", "downstream_phastcons"]
header = names
feature_table.append(features)
labels.append(metadata.get_label(row))
return header, feature_table, labels
def run_on_file(input_filepath, reference_filepath, output_filepath, annovar_dir, is_training_set):
input_data = [line.strip().split('\t') for line in open(input_filepath)]
reference_data = [line.strip().split('\t') for line in open(reference_filepath)]
header, features, labels = run_on_data(input_data, reference_data, annovar_dir, is_training_set)
with open(output_filepath, 'w') as outfile:
header.append("label")
outfile.write('\t'.join(header) + '\n')
for f, l in zip(features, labels):
output = f + [l]
output = '\t'.join(map(str, output))
outfile.write(output + '\n')
def main():
parser = argparse.ArgumentParser(description="X-CAP feature generation tool")
parser.add_argument("input_file", type=str, help="file containing the data to featurize")
parser.add_argument("reference_file", type=str, help="file containing the training dataset")
parser.add_argument("output_file", type=str, help="file where features will be output")
parser.add_argument("annovar_dir", type=str, help="ANNOVAR dir prepared with GencodeV33 transcript set")
parser.add_argument("--is_training_set", type=bool, nargs='?', default=False, const=True,
help="input_filepath contains the training_data")
args = parser.parse_args()
print("Running X-CAP. Input: {}, reference: {}, output: {}, is training set?: {}".format(
args.input_file, args.reference_file, args.output_file, args.is_training_set))
run_on_file(args.input_file, args.reference_file, args.output_file, args.annovar_dir, args.is_training_set)
if __name__ == '__main__':
main()
| [
"rrastogi@stanford.edu"
] | rrastogi@stanford.edu |
cc9b53516891ad24d377268578b0a33545678476 | cc13c060d7d3ed555d68073c770fc0aca3c67f82 | /project-euler/p120.py | 22d9eafb1fc0366c71a382683916e5589a729e70 | [] | no_license | Devang-25/acm-icpc | 8bd29cc90119a6269ccd61729d8e67a8b642b131 | a036bdbba60f8b363ee2d6cb9814e82e5536e02e | refs/heads/master | 2022-04-01T02:52:51.367994 | 2020-01-24T11:46:01 | 2020-01-24T11:46:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # a + 1
# a^2 + 2a + 1 = 2a + 1
# 2a^2 + 2a = 3a + 1
# ...
# a - 1
# a^2 - 2a + 1 = -2a + 1
# (-2a + 1) (a - 1) = 3a - 1
# (3a - 1)(a - 1) = -4a + 1
def solve(a):
result = 0
for n in range(1, 2 * a):
r = n * a + 1
if n % 2 == 0:
r -= n * a - 1
else:
r += n * a - 1
r %= a * a
if r < 0:
r += a * a
result = max(result, r)
return result
result = 0
for a in range(3, 1001):
result += solve(a)
print(result)
| [
"ftiasch0@gmail.com"
] | ftiasch0@gmail.com |
1c930e410b98d1fae3b2475161e5d1d4f296a1a8 | 3a499c01c976c0d985d9921c64a9ed2bdd8ae457 | /etc/hello.py | 3bea3975c2ade2b3b2307676b6718206ffb3b827 | [] | no_license | kkmjj/sparta_study | 1430ae39d4f38bc4771f0e4b974780ff9037212a | b4dc630d737f90905e45ed34e0949efc69c526cb | refs/heads/master | 2020-09-21T09:04:49.722474 | 2020-02-07T03:49:05 | 2020-02-07T03:49:05 | 224,749,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,807 | py | name = ' bob' # 자료형을 따로 안만들어도 됨
num = 12
print (name, num)
# 리스트
a_list = []
a_list.append(1)
a_list.append([1, 2])
print(a_list)
# dic
a_dict = {}
# 함수
def sum_all(a, b, c):
return a + b + c
#퀴즈
def name(name):
if(name=="김민준"):
print (name)
name("김민준")
name("김준")
#반복문
for_list =[1,2,3,4,5]
def sum(mylist):
sum=0
for i in mylist:
sum =sum+i
print(sum)
sum(for_list)
#range
rang_list =range(10)
print(rang_list[0])
#이름만뽑기
for_list
a = 'spartacodingclub@gmail.com'
#채워야하는 함수
def check_mail(s): # 여기에 코딩을 해주세요 #결과값
for i in a:
if(i=='@'):
print('true')
check_mail(a)
#아래와 같이 출력됩니다
True
#입력값
a = ['사과','감','감','배','포도','포도','딸기','포도','감','수박','딸기']
#채워야하는 함수
fruit = {}
def count_list(a_list): # 여기에 코딩을 해주세요 #결과값
for i in a:
print(i)
if i in fruit:
fruit[i]+=1
else:
fruit[i]=1
count_list(a)
print(fruit)
#아래와 같이 출력됩니다
{'사과': 1, '감': 3, '배': 1, '포도': 3, '딸기': 2, '수박': 1}
#requests
import requests # requests 라이브러리 설치 필요
r = requests.get('http://openapi.seoul.go.kr:8088/6d4d776b466c656533356a4b4b5872/json/RealtimeCityAir/1/99')
rjson = r.json()
print (rjson['RealtimeCityAir']['row'][0]['NO2'])
#크롤링 배우기
import requests
from bs4 import BeautifulSoup
# URL을 읽어서 HTML를 받아오고,
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20190909',headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
soup = BeautifulSoup(data.text, 'html.parser')
# select를 이용해서, tr들을 불러오기
movies = soup.select('#old_content > table > tbody > tr')
# movies (tr들) 의 반복문을 돌리기
for movie in movies:
# movie 안에 a 가 있으면,
a_tag = movie.select_one('td.title > div > a')
a_ac = movie.select_one('td.point')
if a_ac is not None:
a_ac = movie.select_one('td.point').text
if a_tag is not None:
# a의 text를 찍어본다.
print (a_tag.text, a_ac)
#엑셀 활용하기 openpyxl file -> setting 에서 install
from openpyxl import load_workbook
work_book = load_workbook('prac01.xlsx')
work_sheet = work_book['prac']
# 데이터를 읽어봅니다.
temp_text = work_sheet.cell(row = 1, column = 1).value # 1행 1열을 읽어온다
print (temp_text)
work_sheet.cell(row=3, column=2, value='홍길동')
# 수정한 엑셀파일을 저장합니다.
# 참고: 다른이름으로 저장할 수도 있습니다.
work_book.save('prac01.xlsx')
for movie in movies:
# movie 안에 a 가 있으면,
a_tag = movie.select_one('td.title > div > a')
a_ac = movie.select_one('td.point')
if a_ac is not None:
a_ac = movie.select_one('td.point').text
if a_tag is not None:
# a의 text를 찍어본다.
print (a_tag.text, a_ac)
#실습
i=0
for movie in movies:
a_tag = movie.select_one('td.title > div > a')
a_ac = movie.select_one('td.point')
if a_tag is not None:
i = i + 1
work_sheet.cell(row=i, column=1, value=i)
work_sheet.cell(row=i, column=2, value=a_tag.text)
work_book.save('prac01.xlsx')
if a_ac is not None:
work_sheet.cell(row=i, column=3, value=a_ac.text)
work_book.save('prac01.xlsx') | [
"mjkim1201@naver.com"
] | mjkim1201@naver.com |
fa66f392d5e1dd703fe2551c4d35c4dcbf3660a8 | 275c84881b0eeb06df1ac44fb84c64991d8b41e4 | /env/bin/alembic | ac3b4718fbc778bfe2ad264ccea3949b71d41983 | [] | no_license | natagonz/mambang | ddd37de0b15717a9a3292e381fa5cb482d3fa5ed | cabab22dfaf04d31fb6b6bebbaaa92cffa9b0e0b | refs/heads/master | 2020-03-11T00:36:11.013564 | 2018-04-16T01:36:07 | 2018-04-16T01:36:07 | 129,667,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | #!/var/www/html/ems/env/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"root@dev.mambangsehat.com"
] | root@dev.mambangsehat.com | |
91abe388075942a28f543fc7085eb9c91a707e33 | c1aa719c6d0f66c0530e06e478c13f3c1fbcc5de | /Fabrizio/RNN/src_RNN/allprocess/DataSet.py | 3b6004fe9a48ad53cc1845928b82347f1b89738b | [] | no_license | deepbrain2016/websiteclassificator | 5d3c2c77c39a778aa79f483b0bc2eae6618470a2 | dde350f28d6df3fe267ebde7a8658a9dfb0b7e7c | refs/heads/master | 2021-01-12T09:50:45.532510 | 2016-12-15T16:53:18 | 2016-12-15T16:53:18 | 76,276,524 | 0 | 0 | null | 2016-12-29T08:39:40 | 2016-12-12T16:52:42 | Python | UTF-8 | Python | false | false | 4,057 | py | '''
Created on 03/ott/2016
@author: fabrizio
'''
from Parameters import *
from keras.datasets import imdb
from keras.preprocessing import sequence
import pickle,numpy,sys
class DataSet(object):
'''
classdocs
'''
def load_data_contex(self,nomefile):
reader = pickle.load(open(nomefile, 'rb'))
id=[]
target=[]
data=[]
i_row=0
for row in reader:
#print row
id.append(row[0])
target.append(row[1])
data.append(row[2])
if (i_row==1000000):
break
i_row=i_row+1
np_data=numpy.array(data)
#print np_data
print "shape",np_data.shape
row_split=int(len(target)*(1-self.P.test_split))
id_train=id[:row_split]
X_train=data[:row_split]
y_train=target[:row_split]
id_test=id[row_split:]
X_test=data[row_split:]
y_test=target[row_split:]
print "max_features: ",self.max_features
print "max_site_length: ",self.max_site_length
return (id_train,X_train,y_train),(id_test,X_test,y_test)
def sample_train(self, id_sample):
print self.dataSet[0][0][id_sample]
print self.dataSet[0][1][id_sample]
def sample_test(self, id_sample):
print self.dataSet[1][0][id_sample]
print self.dataSet[1][1][id_sample]
def load_data(self,tipoDataSet):
print('Loading data...')
if (tipoDataSet=='imdb'):
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=self.max_features)
'''debug'''
# X_train=X_train[0:100]
# y_train=y_train[0:100]
#
# X_test=X_test[0:100]
# y_test=y_test[0:100]
''''''
print ('dimensione_del_vocabolario:'), self.max_features
if (tipoDataSet=='contex3'):
(id_train,X_train,y_train),(id_test,X_test,y_test) = self.load_data_contex(self.P.contex3namefile)
print ('dimensione_del_vocabolario:'), self.max_features
if (tipoDataSet=='preproc'):
(id_train,X_train,y_train),(id_test,X_test,y_test) = self.load_data_contex(self.P.preproc)
print ('dimensione_del_vocabolario:'), self.max_features
if (tipoDataSet=='contexW2V'):
(id_train,X_train,y_train),(id_test,X_test,y_test) = self.load_data_contex(self.P.contexW2Vnamefile)
print ('dimensione_del_vocabolario:'), self.max_features
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
return (X_train, y_train), (X_test, y_test)
def __init__(self):
P=Parameters()
self.P=P
self.max_features=P.max_features
self.max_site_length=P.max_site_length
tipoDataSet=P.tipoDataSet
(X_train, y_train), (X_test, y_test) =self.load_data(tipoDataSet)
#print X_train
print('Pad sequences (samples x time maxlen:)',self.max_site_length)
print len(X_train[0][0])
print (X_train[0][0])
X_train = sequence.pad_sequences(X_train, maxlen=self.max_site_length,dtype="float32")
print len(X_train[0][0])
print (X_train[0][998])
print len(X_test[0][0])
print (X_test[0][0])
X_test = sequence.pad_sequences(X_test, maxlen=self.max_site_length,dtype="float32")
print len(X_test[0][0])
print (X_test[0][998])
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
self.dataSet= (X_train, y_train), (X_test, y_test)
| [
"defausti@istat.it"
] | defausti@istat.it |
51590b835aa43662e38c926541bf933823e59792 | c664cbe6cd433ec5f63272b1ebecb9ae81dfe10a | /python/union-find/percolation.py | 1ed429f3c47c7193189a351d13777a6910cbd3c7 | [] | no_license | paramsingh/algs4 | 9c0e84375d0e87f0b6e92aee9a000b9e4fcd33bb | 2e2db8f9b431bb52023a1065c4fdcef9ffdc7b2a | refs/heads/master | 2021-01-16T21:37:57.951848 | 2015-07-24T14:05:25 | 2015-07-24T14:09:30 | 37,737,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,132 | py | from weightedQuickUnionUF import WeightedQuickUnionUF
class Percolation:
def __init__(self, n):
if n <= 0:
raise ValueError("n should be greater than 0")
self._grid = [[False for _ in range(n)] for _ in range(n)]
self._size = n
# a quick find data structure containing sites for
# virtual tops and virtual bottoms
self._qf = WeightedQuickUnionUF(self._size*self._size + 2)
# a quick find data structure only containing a site for
# virtual top (to eliminate backwash)
self._nf = WeightedQuickUnionUF(self._size * self._size + 1)
self._top = self._size * self._size
self._bottom = self._top + 1
def _validate(self, i , j):
"""Check if indexes passed are valid, if invalid, throw an IndexError"""
if i <= 0 or i > self._size or j <= 0 or j > self._size:
raise IndexError(str(i) + ", " + str(j))
def open(self, i, j):
""" Open site (row i, column j) if it is not open already"""
self._validate(i, j)
x = i - 1
y = j - 1
if self.is_open(i, j):
return
self._grid[x][y] = True
# corner case
# if grid is 1 x 1
if self._size == 1:
self._qf.union(self._convert(x, y), self._top)
self._qf.union(self._convert(x, y), self._bottom)
self._nf.union(self._convert(x, y), self._top)
return
if x == 0:
# only check the site below
if self.is_open(i+1, j):
self._qf.union(self._convert(x, y), self._convert(x+1, y))
self._nf.union(self._convert(x, y), self._convert(x+1, y))
self._qf.union(self._convert(x, y), self._top)
elif x == self._size - 1:
# only check the site above
if self.is_open(i-1, j):
self._qf.union(self._convert(x, y), self._convert(x-1, y))
self._nf.union(self._convert(x, y), self._convert(x-1, y))
self._qf.union(self._convert(x, y), self._bottom)
else:
# check both sites above and below
if self.is_open(i-1, j):
self._qf.union(self._convert(x, y), self._convert(x-1, y))
self._nf.union(self._convert(x, y), self._convert(x-1, y))
if self.is_open(i+1, j):
self._qf.union(self._convert(x, y), self._convert(x+1, y))
self._nf.union(self._convert(x, y), self._convert(x+1, y))
if y == 0:
# check only the right site
if self.is_open(i, j+1):
self._qf.union(self._convert(x, y), self._convert(x, y+1))
self._nf.union(self._convert(x, y), self._convert(x, y+1))
elif y == self._size - 1:
if self.is_open(i, j-1):
self._qf.union(self._convert(x, y), self._convert(x, y-1))
self._nf.union(self._convert(x, y), self._convert(x, y-1))
else:
if self.is_open(i, j-1):
self._qf.union(self._convert(x, y), self._convert(x, y-1))
self._nf.union(self._convert(x, y), self._convert(x, y-1))
if self.is_open(i, j+1):
self._qf.union(self._convert(x, y), self._convert(x, y+1))
self._nf.union(self._convert(x, y), self._convert(x, y+1))
def _convert(self, i, j):
return self._size * i + j
def is_open(self, i, j):
self._validate(i, j)
return self._grid[i-1][j-1]
def is_full(self, i, j):
self._validate(i, j)
return self._nf.connected(self._convert(i-1, j-1), self._top)
def percolates(self):
return self._qf.connected(self._top, self._bottom)
if __name__ == '__main__':
n = int(raw_input("Enter the size: "))
p = Percolation(n)
print "Enter 'quit' to quit"
i = ''
while i != 'quit':
x, y = map(int, raw_input().split())
if not p.is_open(x, y):
p.open(x, y)
if p.percolates():
print "Percolates"
else:
print "Does not percolate"
| [
"paramsingh258@gmail.com"
] | paramsingh258@gmail.com |
1dc0dea861966e90029e565da5df0e5edce1c8de | 73e8ce6e3b1b938359928f9c9e1620e1ded0f8de | /pomtracker/extensions.py | 3e7310b5c5d3094dbef354d7eef0b25ad26b90bc | [] | no_license | haydenhw/pomtracker-flask | 78db7de671453ac3a4a2043b0bbb7914f1d4e34e | bb460a09befaf4bd17129b5d0e1b4766d415869d | refs/heads/master | 2023-04-23T21:57:04.172481 | 2021-04-02T15:44:29 | 2021-04-02T15:44:29 | 284,805,426 | 0 | 0 | null | 2021-05-06T20:25:59 | 2020-08-03T20:55:37 | Python | UTF-8 | Python | false | false | 878 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class CrudMixin:
@classmethod
def find_by_id(cls, id_):
return cls.query.filter_by(id=id_).first()
@classmethod
def find_by_user_id(cls, user_id):
return cls.query.filter_by(user_id=user_id).all()
@classmethod
def create(cls, **kwargs):
item = cls(**kwargs)
db.session.add(item)
db.session.commit()
return item
@classmethod
def update_by_id(cls, updates, id_):
cls.query.filter_by(id=id_).update(updates)
db.session.commit()
# find the newly updated row and return it
return cls.query.filter_by(id=id_).first()
def save_to_db(self) -> None:
db.session.add(self)
db.session.commit()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
| [
"hayden321@gmail.com"
] | hayden321@gmail.com |
fe1cd37ddd5460332b2a2248effe4f29cf9f8f34 | 0f94eef14ffe0e9502e20f20d0e4c74045e46bec | /views/match.py | ffa81cd89a9f70fd063f6e0ac5405ff2a6b565b8 | [] | no_license | Ikeaven/P4_konrath_kevin | 63d5c229f39f64fe6d60ed0a40c63db7c153229c | b65637e3f3dcb9ad88df08902df48cbebc0cf75e | refs/heads/master | 2023-06-17T02:50:44.278125 | 2021-07-02T13:38:33 | 2021-07-02T13:38:33 | 368,557,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | """ Match View Module """
import pandas as pd
from .utilities import UtilitiesView
class MatchView:
def display_match(self, match):
"""Print all match informations in a pandas DataFrame
Args:
match (obj): Match instance
"""
UtilitiesView().line_separator()
print()
data = {
'Joueur 1': f'{match.player1.first_name} {match.player1.last_name} ',
'Classement joueur 1': f'{match.player1.ranking}',
'Score joueur 1': match.score_player1,
'VS': '|',
'Joueur 2': f'{match.player2.first_name} {match.player2.last_name} ',
'Classement joueur 2': f'{match.player2.ranking} ',
'Score joueur 2': match.score_player2
}
with pd.option_context('display.colheader_justify', 'center'):
df = pd.DataFrame(data=data, index=['Match =>'])
print(df)
UtilitiesView().line_separator()
| [
"36306272+Ikeaven@users.noreply.github.com"
] | 36306272+Ikeaven@users.noreply.github.com |
be841d0c140f1313cdf1a56717dd41a56a9726d5 | 9e68b186856d0cab2513d2d3ca1b383847e5aa11 | /virtual/bin/django-admin.py | 364ad7a2af5440f2efb00decd6b9659835112c31 | [
"MIT"
] | permissive | Yvonne-Ouma/Neighborhood | 9af5a8a2e802ec1cb987b672a012ec9a63a9cb06 | 9dcd5b65c1deb7a72b022e670790d8242d71bebe | refs/heads/master | 2020-04-01T20:58:23.361371 | 2018-10-23T14:34:06 | 2018-10-23T14:34:06 | 153,629,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #!/home/yyvonne/Desktop/nhood/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"yvonneouma98@gmail.com"
] | yvonneouma98@gmail.com |
664eb7abb9eb41389f8841ef285b03ec9b8d74a8 | 5aedbd845fb6341bc9737c534760b51e3baa868c | /rakutenMobileCheck.py | 28f155ba4fd1fecb9e523b04302f0cb23a99e763 | [] | no_license | suzusho515/- | 83ff9b3fb347f5ec17b330c5010919407360051a | 90272043d7611de1ed444e78672fd1bb83d5e41b | refs/heads/main | 2023-08-14T12:47:26.613656 | 2021-10-03T04:13:33 | 2021-10-03T04:13:33 | 412,972,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,115 | py | import json
from time import sleep
import datetime
import win32com.client
import threading
import eel
from common.driver import Driver
from common.logger import set_logger
DATE_FORMAT = '%Y-%m-%d-%H-%M-%S'
class Search:
"""
楽天モバイルで在庫を確認する処理
"""
def __init__(self, rakuten_url, setting_json):
self.rakuten_url = rakuten_url
self.setting_json = setting_json
self.stock = []
self.logger = set_logger("__name__")
# main処理
def search_rakuten(self):
#setting.jsonから設定値を取得
json_parameter = self.get_josn()
#終了時間または、STOPを押すまで無限ループ
while self.end_judge():
#処理ステータスを表示
eel.view_status("在庫チェック中")
# driverを起動
driver = Driver.set_driver(False)
# Webサイトを開く
driver.get(self.rakuten_url)
self.logger.info("楽天モバイルページへ遷移")
sleep(5)
#在庫チェック時の情報を出力
dt = datetime.datetime
self.check_time =dt.now().strftime('%Y-%m-%d %H:%M:%S')
#機種名を取得
self.model_name = driver.find_element_by_css_selector(
"div.rktn-equipment__title-container-position > h1.rktn-equipment__title-container-name").text
eel.view_log_js(f"{self.check_time}:「{self.model_name}」の在庫チェック開始")
#各色のブロックごとにリストに格納
color_table = driver.find_elements_by_css_selector(
"div.rktn-product-colors__content > div.rktn-product-colors__item")
# #各色のブロックごろに処理を繰り返す
for i, color in enumerate(color_table):
# #2回目以降エラーになるので再度ドライバを指定
# color = driver.find_elements_by_css_selector("div.rktn-product-colors__content")
#色を格納
color_name = color.find_element_by_css_selector(
"div.rktn-product-colors__item-content-text").text
#「在庫なし」というキーワードがない場合、HTMLがないとエラーになる
try:
#在庫の確認
stock_check = color.find_element_by_css_selector(
"div.rktn-product-colors__item-status").text
except:
stock_check = "在庫あり"
#在庫ありリストの追加
self.stock.append(color_name)
#ログ出力
self.logger.info(f"「{color_name}」:{stock_check}")
#デスクトップテキストエリアへの出力
eel.view_log_js(f"「{color_name}」:{stock_check}")
sleep(3)
#メモリのブロックごとにリストに格納
memory_table = driver.find_elements_by_css_selector(
"div.rktn-equipment-memory__memories > div.rktn-equipment-memory__memories-content")
#各メモリのブロックごろに処理を繰り返す
for i, memory in enumerate(memory_table):
# #2回目以降エラーになるので再度ドライバを指定
# color = driver.find_elements_by_css_selector("div.rktn-product-colors__content")
#メモリを格納
memory_name = memory.find_element_by_css_selector(
"div.rktn-equipment-memory__memories-content-item-info > div.rktn-equipment-memory__memories-content-item-info-value").text
#在庫の確認
#「在庫なし」というキーワードがない場合、HTMLがないとエラーになる
try:
#在庫の確認
stock_check = memory.find_element_by_css_selector(
"div.rktn-equipment-memory__memories-content-item-status").text
except:
stock_check = "在庫あり"
#在庫ありリストの追加
self.stock.append(color_name)
#ログ出力
self.logger.info(f"「{memory_name}GB」:{stock_check}")
#デスクトップテキストエリアへの出力
eel.view_log_js(f"「{memory_name}GB」:{stock_check}")
sleep(1)
#ブラウザを閉じる
driver.quit()
#在庫があったらメールを送る
if len(self.stock) > 0:
#メール送信クラスを呼び出し
self.logger.info("メール通知処理")
sendemail = SendEmail(json_parameter,self.stock,self.check_time,self.model_name)
sendemail.send_email()
else:
#ログ出力
self.logger.info("全種類在庫なし")
#終了時刻の場合、インターバルを無視
if self.end_judge():
#ログ出力
self.logger.info("インターバル中")
#処理ステータスを表示
eel.view_status("インターバル中")
#setting.jsonで指定した時間まで待機
sleep(int(json_parameter["environment_preference"]["interval"]))
#在庫ありリストリセット
self.stock = []
#ログ出力
self.logger.info("終了時刻となったので、処理を終了します")
eel.view_status("処理停止中")
#setting.jsonから設定値を取得
def get_josn(self):
#setting.jsonを開く
json_open = open(self.setting_json, 'r')
#setting.jsonを読み込む
json_load = json.load(json_open)
return json_load
#終了時間の判定を行う
def end_judge(self):
#今の時刻
dt = datetime.datetime
dt_now =dt.now()
#setting.jsonから設定値を取得
json_parameter = self.get_josn()
now_time = datetime.datetime(dt_now.year,dt_now.month,dt_now.day,dt_now.hour,dt_now.minute)
end_time = json_parameter["environment_preference"]["endTime"]
end_time = end_time.split(",")
end_time = datetime.datetime(
int(end_time[0]),int(end_time[1]),int(end_time[2]),int(end_time[3]),int(end_time[4]))
#時間比較
return now_time < end_time
#STOP判定用ファイルを作成します
def stop_flag_assign(self):
self.logger.info("停止判定ファイル生成")
with open("./stop_flg.dat", 'w', encoding='UTF-8') as f:
f.write("ダミーファイルです")
class SendEmail():
def __init__(self, json_parameter, stock_list, check_time, model_name):
self.json_parameter = json_parameter
self.stock_list = stock_list
self.check_time = check_time
self.model_name = model_name
def send_email(self):
#Outlook設定
outlook = win32com.client.Dispatch("Outlook.Application")
mail = outlook.CreateItem(0)
mail.to = self.json_parameter["environment_preference"]["emailAddress"]
mail.subject = f"★{self.model_name}の在庫がありました:チェック時間[{self.check_time}]"
mail.bodyFormat = 1
#在庫ありのリストから本文を作成
for stock in self.stock_list:
message = f"「{stock}」:在庫あり" + "\n"
mail.body += message + "\n"
mail.Send()
| [
"noreply@github.com"
] | noreply@github.com |
34ce5c921dd9f1b778bc6419d7f3772aa88e3545 | 1193a870dae7233f284b90993d0563a3c6b8bc9d | /number.py | bd9b8255f5e01d3e79ffc32be6dc63c01b656d51 | [] | no_license | sabin-nepal/python | 6892363ffb27d7deb762cdff1b5059b3d2ca41b9 | bfcd8953b30d3bd73d13d092b0b7bfcaf349ccda | refs/heads/master | 2020-04-19T14:50:31.610515 | 2019-02-18T07:20:13 | 2019-02-18T07:20:13 | 168,255,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | #enter 10 number
i=0
list = []
while i<10:
data= int(input('Enter the 10 numbers to be added in list'))
list.append(data)
i+=1
print(list)
##display only evenn number from a list
#if length is unknown
#size = len(list)
#print(size)
j = 0
new_list =[]
while j <10:
if list[j]%2==0:
new_list.append(list[j])
j+=1
print(new_list)
| [
"sabinnepal2k17@gmail.com"
] | sabinnepal2k17@gmail.com |
d07346a54a738ce1a6f148d25d58bdae099d3070 | e569fed15f699fd5ebd8333671274cf527fabbfb | /Dokkan_Battle/__init__.py | cd1ddc37759d09afd6de2cbff78872b68a36016f | [] | no_license | sagarp-patel/dokkan_set | 09812c142408d720f96ba4d3a75f28605144071c | fc470d2e55d511952719fcc02adf2247db6c3bb4 | refs/heads/master | 2020-05-23T08:20:47.091055 | 2019-05-14T19:36:39 | 2019-05-14T19:36:39 | 186,684,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | #This file will run the application
from DokkanGUI import Window
import sys
from PyQt5 import QtWidgets,QtGui
app = QtWidgets.QApplication(sys.argv)
a_window = Window()
sys.exit(app.exec_())
| [
"sagarp@coyote.csusb.edu"
] | sagarp@coyote.csusb.edu |
2f8969c2405bef1307bb54cc0d04bd39b9cc79cd | 42a2226d9d2d6c6c8e43e07f08bef14ec83ebba6 | /noise_removal/fbs/ebl.py | c2ea756049ff11cb7bc570cb92f797996a25b893 | [] | no_license | sdimitro/minions | 0037adf7f49357562bd70d3c959b83189d338b0f | aab490e5eb9a8d248f880cb87b4c3d76e1179b8c | refs/heads/master | 2020-12-19T22:56:44.404452 | 2017-02-11T16:42:14 | 2017-02-11T16:42:14 | 30,044,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py |
from bs4 import BeautifulSoup
import sys
blacklist = [
"place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&queryType=1&sort=12&maxResults=10&excludeQueries=1",
"place:type=6&sort=14&maxResults=10",
"http://www.ubuntu.com/",
"http://wiki.ubuntu.com/",
"https://answers.launchpad.net/ubuntu/+addquestion",
"http://www.debian.org/",
"https://one.ubuntu.com/",
"https://www.mozilla.org/en-US/firefox/help/",
"https://www.mozilla.org/en-US/firefox/customize/",
"https://www.mozilla.org/en-US/contribute/",
"https://www.mozilla.org/en-US/about/",
"place:sort=8&maxResults=10",
"https://www.mozilla.org/en-US/firefox/central/"
]
def extract_links(filename):
soup = BeautifulSoup(open(filename))
return [link.get('href') for link in soup.find_all('a')]
def filter(links, blacklisted):
return list(set(links) - set(blacklisted))
def print_links(links):
for l in links:
print(l)
def main(filename):
print_links(filter(extract_links(filename), blacklist))
main(sys.argv[1])
| [
"serapheimd@gmail.com"
] | serapheimd@gmail.com |
655844d63f2a4178988a51339f7f95e0c8e8ab96 | 8855e6bc0aa97dbd0ef31ea3dcc3c51d46690891 | /testing/dag_communication.py | 1366a664412d5bc4654e3fb84e3fea12f4932344 | [] | no_license | brian-kalinowski-sonos/airflow-poc | 9be76adb7bb380495cca78f890104205ced16371 | c3e9549cefc1946ea416a78fa6ce63a287ab6ad6 | refs/heads/master | 2021-04-24T05:17:27.201740 | 2020-02-25T18:26:48 | 2020-02-25T18:26:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | import unittest
from datetime import datetime
from airflow.models import DagBag,TaskInstance
class TestDagCommiunication(unittest.TestCase):
LOAD_SECOND_THRESHOLD = 2
def setUp(self):
self.dagbag = DagBag()
self.emails='airflow@example.com'
self.dag_id = 'hello_world_xcoms'
self.from_task ='push_to_xcoms'
self.to_task1='pull_from_xcoms'
self.to_task2='templated_xcoms_value'
def test_xcoms(self):
dag = self.dagbag.get_dag(self.dag_id)
push_to_xcoms_task = dag.get_task(self.from_task)
pull_from_xcoms_task = dag.get_task(self.to_task1)
execution_date = datetime.now()
push_to_xcoms_ti = TaskInstance(task=push_to_xcoms_task, execution_date=execution_date)
context = push_to_xcoms_ti.get_template_context()
push_to_xcoms_task.execute(context)
pull_from_xcoms_ti = TaskInstance(task=pull_from_xcoms_task, execution_date=execution_date)
result = pull_from_xcoms_ti.xcom_pull(key="dummyKey")
self.assertEqual(result, 'dummyValue')
def test_xcom_in_templated_field(self):
dag = self.dagbag.get_dag(self.dag_id)
push_to_xcoms_task = dag.get_task(self.from_task)
execution_date = datetime.now()
push_to_xcoms_ti = TaskInstance(task=push_to_xcoms_task, execution_date=execution_date)
context = push_to_xcoms_ti.get_template_context()
push_to_xcoms_task.execute(context)
templated_xcoms_value_task = dag.get_task(self.to_task2)
templated_xcoms_value_ti = TaskInstance(task=templated_xcoms_value_task, execution_date=execution_date)
context = templated_xcoms_value_ti.get_template_context()
bash_operator_templated_field = 'bash_command'
rendered_template = templated_xcoms_value_task.render_template
bash_command_value = getattr(templated_xcoms_value_task, bash_operator_templated_field)
bash_command_rendered_value = rendered_template(bash_command_value,context)
self.assertEqual(bash_command_rendered_value, 'echo dummyValue')
suite = unittest.TestLoader().loadTestsFromTestCase(TestDagCommiunication)
unittest.TextTestRunner(verbosity=2).run(suite) | [
"hardik.furia@sonos.com"
] | hardik.furia@sonos.com |
97fa0e9ee730c87d48b090216f91c70f823bd477 | 682638ecc330000ac70d92db66552546aaa72d2f | /eg.py | b2100faeadce53e2dc2275b1fa9765c09b758f44 | [] | no_license | inirpa/fashion-mnist-keras | 5f2e095af2f7a939d22bdccfe513ee5e8d29a8f2 | b31b522e23d52c2b4838bcf7a7bd773c6a12e37e | refs/heads/master | 2022-01-06T20:17:30.405540 | 2019-02-18T04:12:27 | 2019-02-18T04:12:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
#########download and extract data set
# Load the fashion-mnist pre-shuffled train data and test data
# data_train = pd.read_csv('data/fashion_train.csv')
# data_test = pd.read_csv('data/fashion_test.csv')
(x_train, y_train), (x_test, y_test) = input_data.read_data_sets('input/data', one_hot=True)
print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
model = tf.keras.Sequential()
# Must define the input shape in the first layer of the neural network
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Take a look at the model summary
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train,
y_train,
batch_size=64,
epochs=10,
validation_data=(x_valid, y_valid),
callbacks=[checkpointer])
# Evaluate the model on test set
score = model.evaluate(x_test, y_test, verbose=0)
# Print test accuracy
print('\n', 'Test accuracy:', score[1])
| [
"nirparai@yahoo.com"
] | nirparai@yahoo.com |
854af7420db7d0f4e86a40c4f14decf87adb1b66 | 283f5fb9b6e17b51ec087cb21d0f8a2ed1441f61 | /src/datasets/k_fold_split.py | 53f7abe49a511767412477f8b27bc5478ee62f74 | [] | no_license | javierleung/bias-detection | 4af1667a637a3b0d0778ef2f1b5bd727018bde00 | 4563c805a6c77a0c3706a3abf8ac84e676edd26a | refs/heads/master | 2022-04-12T17:14:06.975381 | 2020-04-01T13:19:43 | 2020-04-01T13:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | """
Utilitary functions for splitting a dataset into subsets.
"""
import torch
from torch.utils.data import Dataset, Subset
def k_fold_split(dataset, k):
"""Return k pairs of (train_subset, test_subset) representing k-fold splits"""
# TODO
# can be done by using sklearn, but only with already tensorized dataset
pass
| [
"andrecruz97@gmail.com"
] | andrecruz97@gmail.com |
795985da57f6d924af7ddb13359a42bc964faca8 | 334d0a4652c44d0c313e11b6dcf8fb89829c6dbe | /checkov/terraform/checks/resource/aws/ImagebuilderImageRecipeEBSEncrypted.py | 754146fc760da36332b301b41159066dcef14f23 | [
"Apache-2.0"
] | permissive | schosterbarak/checkov | 4131e03b88ae91d82b2fa211f17e370a6f881157 | ea6d697de4de2083c8f6a7aa9ceceffd6b621b58 | refs/heads/master | 2022-05-22T18:12:40.994315 | 2022-04-28T07:44:05 | 2022-04-28T07:59:17 | 233,451,426 | 0 | 0 | Apache-2.0 | 2020-03-23T12:12:23 | 2020-01-12T20:07:15 | Python | UTF-8 | Python | false | false | 1,199 | py | from typing import Dict, List, Any
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class ImagebuilderImageRecipeEBSEncrypted(BaseResourceCheck):
def __init__(self):
name = "Ensure that Image Recipe EBS Disk are encrypted with CMK"
id = "CKV_AWS_200"
supported_resources = ["aws_imagebuilder_image_recipe"]
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if conf.get('block_device_mapping'):
mappings = conf.get('block_device_mapping')
for mapping in mappings:
if mapping.get("ebs"):
ebs = mapping["ebs"][0]
if not ebs.get("encrypted"):
return CheckResult.FAILED
if not ebs.get("kms_key_id"):
return CheckResult.FAILED
# pass thru
return CheckResult.PASSED
check = ImagebuilderImageRecipeEBSEncrypted() | [
"noreply@github.com"
] | noreply@github.com |
c39e8230c431368878239dd20fc0eb3c5513713d | 65184b509bd974d0475f7b51c50b7b6701adf57e | /packageDaisy/euler.py | 00a7a1dd501c508f48c4277d09b60813ce668caa | [] | no_license | Locki22/Assignment5 | a043ea23e6c4a881d3b14da432f6d17a6b1802d5 | 0e250d7e9df4a41b7e3735869a6794529a57d754 | refs/heads/master | 2020-06-17T05:16:19.555629 | 2019-07-26T21:06:28 | 2019-07-26T21:06:28 | 195,809,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 14:59:51 2019
@author: nickh
"""
def euler(initial, tendency, h=1):
"""Integrate forward in time using Euler's method of numerical integration.
initial + h*tendency
Arguments
---------
initial : float
The initial state.
tendency : float
The rate of change in the initial state.
Keyword arguments
-----------------
h = 1 : float
The timestep duration.
"""
return initial + h*tendency | [
"noreply@github.com"
] | noreply@github.com |
9bdd0b54603f4bced8f4c82edb28d3dca4e88841 | 4a191e5aecd53c4cea28482a0179539eeb6cd74b | /blogproject/settings.py | a7f99cc15f936dff53808f9385c2c2992e57abbc | [] | no_license | jiangjingwei/blogproject | 631a2e8e2f72420cce45ddaf152174852376d831 | daf14e88092dc030a3ab0c295ee06fb6b2164372 | refs/heads/master | 2020-03-14T23:29:08.052253 | 2018-05-10T11:35:59 | 2018-05-10T11:35:59 | 131,846,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | """
Django settings for blogproject project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm&=b!x8(eqh&ek!4e_)#h@=g$6sjfd1ulx*exs4$d1!h&tef@@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.jjwxy.com', '139.196.81.14']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'blog',
'comments',
'haystack',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
},
}
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 10
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor' | [
"270159429@qq.com"
] | 270159429@qq.com |
c5e0dd3c000acac9c2ceaabe7ab9ea965ce8179e | c07516e3cd8a0f94be5e77fc8ff83f4552c145d3 | /Day 5/password-generator.py | b31e56264fa320668901292a68c76fd7b610707e | [] | no_license | zaahidali/100-Days-of-Python | 3556c5db3a054be8b2a60cd780d59e1da1a23d6c | 1613c0e62430affeb4486d24f358e8bc120c5255 | refs/heads/master | 2023-07-13T05:24:04.593860 | 2021-08-24T17:13:16 | 2021-08-24T17:13:16 | 398,355,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | #Password Generator Project
import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to the PyPassword Generator!")
nr_letters= int(input("How many letters would you like in your password?\n"))
nr_symbols = int(input(f"How many symbols would you like?\n"))
nr_numbers = int(input(f"How many numbers would you like?\n"))
#Eazy Level - Order not randomised:
#e.g. 4 letter, 2 symbol, 2 number = JduE&!91
#
let = random.sample(letters,nr_letters)
nums = random.sample(numbers,nr_symbols)
syms = random.sample(symbols,nr_numbers)
x = ''.join(let)
print(x)
y = ''.join(nums)
print(y)
z = ''.join(syms)
print(z)
random_password = x+y+z
print(random_password)
llist = list(random_password)
random.shuffle(llist)
print(''.join(llist))
#######
#Pngp97!(
#g!Pnp9(7
random_letters = random.sample(letters,nr_letters)
random_numbers = random.sample(numbers, nr_numbers)
random_symbols = random.sample(symbols, nr_symbols)
generated_password = []
generated_password = random_letters + random_numbers + random_symbols
print("Before: ",''.join(generated_password))
random.shuffle(generated_password)
final_random_password = ''.join(generated_password)
print("Here is your password: {}".format(final_random_password))
#print("After: ",generated_password)
#print(len(random_password))
#Hard Level - Order of characters randomised:
#e.g. 4 letter, 2 symbol, 2 number = g^2jk8&P | [
"zahid.4317@gmail.com"
] | zahid.4317@gmail.com |
94e2a43014b05f292486e68815f57b103be12c22 | f161fa804dc78cb87c160368b24a7ff34bfcd20d | /clubCalendar/clubCalendar/settings.py | c7f75f5230485e48dc77e444618aa74174e0450d | [
"MIT"
] | permissive | AkshilVT/clubCalendar | 42bf47afce2b4fa2d5f50adf5f4a1cad5a74c211 | 3a9d51d3b3586e22d2f16f8b8797508d86300b0a | refs/heads/main | 2023-03-31T11:23:44.556582 | 2021-03-28T08:24:49 | 2021-03-28T08:24:49 | 351,843,313 | 1 | 0 | MIT | 2021-03-26T16:21:54 | 2021-03-26T16:21:54 | null | UTF-8 | Python | false | false | 3,267 | py | """
Django settings for clubCalendar project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!kh&-1avtl5u^lyvt02^!pk+ub!ear5b5se&jqz&6^07(%(02u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'clubs.apps.ClubsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'clubCalendar.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'clubCalendar.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
| [
"72624080+kavania2002@users.noreply.github.com"
] | 72624080+kavania2002@users.noreply.github.com |
0a3714c4393419c790f0b83b5e274f57f3d9effd | c140ad38b1463024e289ceb0d5d6d44a45c91724 | /test/test_sed.py | de9b9a31a4d19a6bce8f59f8af9aff375038c1e6 | [
"Apache-2.0"
] | permissive | NVIDIA/hpc-container-maker | 3a333526decbd18352ef8d1fb3bec0033be221e8 | 60fd2a51c171258a6b3f93c2523101cb7018ba1b | refs/heads/master | 2023-08-21T13:32:27.132476 | 2023-06-12T21:12:40 | 2023-06-12T21:12:40 | 126,385,168 | 419 | 88 | Apache-2.0 | 2023-09-11T18:33:26 | 2018-03-22T19:26:41 | Python | UTF-8 | Python | false | false | 1,626 | py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the sed module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from hpccm.templates.sed import sed
class Test_sed(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_basic(self):
"""Basic sed"""
s = sed()
self.assertEqual(s.sed_step(file='foo',
patterns=[r's/a/A/g',
r's/FOO = BAR/FOO = BAZ/g']),
r'''sed -i -e s/a/A/g \
-e 's/FOO = BAR/FOO = BAZ/g' foo''')
def test_nofile(self):
"""No file specified"""
s = sed()
self.assertEqual(s.sed_step(patterns=[r's/a/A/g']), '')
def test_nopatterns(self):
"""No patterns specified"""
s = sed()
self.assertEqual(s.sed_step(file='foo'), '')
| [
"noreply@github.com"
] | noreply@github.com |
da0e360ef04be5b4a9aef897331aa98e4b9ce97c | 4d93c6999f1c938f12b7ff6fb779557e1a77479f | /chapter11/names.py | 37cab2bbcddd9ca3a3f64613ed94eea1aa8473fc | [] | no_license | MadhuV99/pywork | 5efd1aac74f2c88413bb90bbc9e0d0c250057e7c | 81ea17d8bed89ba57cdd35d2ceb0560f68a21cc8 | refs/heads/main | 2023-01-20T06:50:03.004849 | 2020-11-29T16:01:06 | 2020-11-29T16:01:06 | 312,609,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # names.py
from name_function import get_formatted_name
print("Enter 'q' at any time to quit.")
while True:
first = input("\nPlease give me a first name: ")
if first.strip().lower() == 'q':
break
last = input("Please give me a last name: ")
if last.strip().lower() == 'q':
break
formatted_name = get_formatted_name(first, last)
print(f"\tNeatly formatted name: {formatted_name}.") | [
"madhuvasudevan@yahoo.com"
] | madhuvasudevan@yahoo.com |
8d9bb6926f1bd85ef8da53778229913d6ac4bc86 | a8160d8e37a227fd4d8b5fed6a1c4c1eef980612 | /dsnalgo/sort_pile_of_cards.py | cd0bcb2f01f2b3b82b47bf0086785a3c148482e7 | [] | no_license | gauravtatke/codetinkering | c5d5e1259592b681573c864bc9aabfbfd5d41cbd | fe3c39e0c1cb689dad5f5305a890f21a3edb2260 | refs/heads/master | 2022-09-29T02:39:50.601689 | 2022-09-21T16:02:51 | 2022-09-21T16:02:51 | 93,332,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | #!/usr/bin/env python3
# We have N cards with each card numbered from 1 to N. All cards are randomly shuffled. We are allowed only operation moveCard(n) which moves the card with value n to the top of the pile. You are required to find out the minimum number of moveCard() operations required to sort the cards in increasing order.
def minMove(arr):
# start iterating from back and count number of elements already in descending order.
# minimum movement to sort will be n-(num of elem in desc ord) because only those elem need to move
# for e.g. 4, 2, 5, 1, 6, 3 item in desc ord are 3 i.e.{6, 5, 4} so 6 - 3 = 3 movement to sort.
# moveCard(3) -> moveCard(2) -> moveCard(1)
n = len(arr)
count = 0
for i in arr[-1::-1]:
if i == n:
count += 1
n -= 1
return len(arr)-count
def main():
arr1 = [4, 2, 5, 1, 6, 3]
arr2 = [5, 1, 2, 3, 4]
arr3 = [3, 4, 2, 1]
print(minMove(arr1))
print(minMove(arr2))
print(minMove(arr3))
if __name__ == '__main__':
main()
| [
"gauravtatke23@gmail.com"
] | gauravtatke23@gmail.com |
cd7fe7d9b83a05f42d321c44d90d1d46ece98287 | 3dfd14d5b19fffb361b55f55581547e1ee2ff3a3 | /adi/trash/browser/browser.py | 45f138f31c032ecfe60b78a7656656b9d3d5163e | [] | no_license | idgserpro/adi.trash | 5a13036bcfa0c495cc3571d8acc35ee883a3e7d4 | a9c6fb350b6cd4ce4e40646ac12598cc9dbf407f | refs/heads/master | 2021-01-01T18:51:38.472346 | 2017-07-26T18:17:09 | 2017-07-26T18:17:09 | 88,053,711 | 0 | 0 | null | 2017-04-12T13:16:18 | 2017-04-12T13:16:18 | null | UTF-8 | Python | false | false | 3,096 | py | from plone import api
from zope.i18nmessageid import MessageFactory
from Products.statusmessages.interfaces import IStatusMessage
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
_ = MessageFactory('plone')
class Trash(BrowserView):
def __init__(self, context, request):
self.context = context
self.request = request
self.paths = None
self.navroot = api.portal.get_navigation_root(context)
if 'paths' in self.request.form:
self.paths = self.request.form['paths']
def __call__(self):
trash_id = 'trash'
self.addTrash(trash_id)
landing_url = self.request.get_header('referer')
status = IStatusMessage(self.request)
# We're coming from a folder_contents' delete-button:
if landing_url.endswith('/folder_contents'):
if self.paths:
for path in self.paths:
obj = api.content.get(path)
if self.isTrash(obj, trash_id):
api.content.delete(obj=obj) #, check_linkintegrity=True)
else:
api.content.move(obj, self.navroot.trash)
status.add(_(u'Item(s) deleted.'), type=u'info')
else:
status.add(_(u'Please select one or more items to delete.'), type=u'info')
# We're coming from an item's delete-button:
else:
if self.isTrash(self.context, trash_id):
api.content.delete(obj=self.context) #, check_linkintegrity=True)
else:
api.content.move(self.context, self.navroot.trash)
status.add(_(u'Item(s) deleted.'), type=u'info')
# We want to land on old parent:
landing_url = '/'.join(self.context.absolute_url().split('/')[:-1])
return self.request.response.redirect(landing_url)
def addTrash(self, trash_id):
"""
Create '[NAVROOT_PATH]/[TRASH_ID]', if not existing.
"""
# Do we have a trashcan?
if trash_id not in self.navroot.objectIds():
# No, create it:
trash = self.navroot.invokeFactory('Folder', trash_id)
# Set trash-title:
self.navroot.trash.setTitle('Trash')
# Update title in navroot_catalog:
self.navroot.trash.reindexObject()
def isTrash(self, obj, trash_id):
"""
Check, whether an item lives inside of a trashcan,
or is the trashcan itself. Returns True, if one of
both is true.
"""
TRASH = False
navroot_url = self.navroot.absolute_url()
item_url = self.context.absolute_url()
item_rel_url = item_url[len(navroot_url)+1:]
if item_rel_url == trash_id\
or item_rel_url.startswith(trash_id + '/')\
or obj.id == trash_id:
TRASH = True
return TRASH
| [
"contact@ida-ebkes.eu"
] | contact@ida-ebkes.eu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.