blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
73d3562cc8169b63495c78ae59440d22cfde758f
c2a6adcfb2319b67922c646ebfb70a8f189c5e12
/moya/command/sub/filesystems.py
6f9c3ad92bb93bc098833bb4b4daee56548919fe
[ "MIT" ]
permissive
metaperl/moya
d8624664382370ca28502b43a91f69d856b79aa3
960d504e98da643ae870e85a75ad0ed183fabfa2
refs/heads/master
2020-06-24T20:33:53.771757
2016-09-11T14:44:25
2016-09-11T14:44:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
12,106
py
from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import os from ...command import SubCommand from ...wsgi import WSGIApplication from ...console import Cell from ...compat import text_type, raw_input from fs.opener import fsopendir from fs.errors import FSError from fs.multifs import MultiFS from fs.mountfs import MountFS from fs.path import dirname def _ls(console, file_paths, dir_paths, format_long=False): """Cannibalized from pyfileystem""" dirs = frozenset(dir_paths) paths = sorted(file_paths + dir_paths, key=lambda p: p.lower()) def columnize(paths, num_columns): col_height = (len(paths) + num_columns - 1) / num_columns columns = [[] for _ in range(num_columns)] col_no = 0 col_pos = 0 for path in paths: columns[col_no].append(path) col_pos += 1 if col_pos >= col_height: col_no += 1 col_pos = 0 padded_columns = [] def wrap(path): return (path in dirs, path.ljust(max_width)) for column in columns: if column: max_width = max([len(path) for path in column]) else: max_width = 1 max_width = min(max_width, terminal_width) padded_columns.append([wrap(path) for path in column]) return padded_columns def condense_columns(columns): max_column_height = max([len(col) for col in columns]) lines = [[] for _ in range(max_column_height)] for column in columns: for line, (isdir, path) in zip(lines, column): line.append((isdir, path)) for line in lines: for i, (isdir, path) in enumerate(line): if isdir: console(path, bold=True, fg="blue") else: console(path) if i < len(line) - 1: console(' ') console.nl() if format_long: for path in paths: if path in dirs: console(path, bold=True, fg="blue") else: console(path) console.nl() else: terminal_width = console.width path_widths = [len(path) for path in paths] smallest_paths = min(path_widths) num_paths = len(paths) num_cols = min(terminal_width // (smallest_paths + 2), num_paths) while num_cols: col_height = (num_paths + num_cols - 1) // num_cols line_width = 0 for col_no in range(num_cols): try: col_width = max(path_widths[col_no * col_height: (col_no + 1) * col_height]) except ValueError: continue line_width += col_width if line_width > terminal_width: break line_width += 2 else: if line_width - 1 <= terminal_width: break num_cols -= 1 num_cols = max(1, num_cols) columns = columnize(paths, num_cols) condense_columns(columns) class FS(SubCommand): """Manage project filesystems""" help = "manage project fsfilesystems" def add_arguments(self, parser): parser.add_argument(dest="fs", nargs="?", default=None, metavar="FILESYSTEM", help="filesystem name") parser.add_argument("-l", "--location", dest="location", default=None, metavar="PATH", help="location of the Moya server code") parser.add_argument("-i", "--ini", dest="settings", default=None, metavar="SETTINGSPATH", help="path to project settings") parser.add_argument("--server", dest="server", default='main', metavar="SERVERREF", help="server element to use") parser.add_argument('--ls', dest="listdir", default=None, metavar="PATH", help="list files / directories") parser.add_argument("--tree", dest="tree", nargs='?', default=None, const='/', help="display a tree view of the filesystem") parser.add_argument("--cat", dest="cat", default=None, metavar="PATH", help="Cat a file to the console") parser.add_argument("--syspath", dest="syspath", default=None, metavar="PATH", help="display the system path of a file") parser.add_argument("--open", dest="open", default=None, metavar="PATH", help="open a file") parser.add_argument("--copy", dest="copy", metavar="DESTINATION or PATH DESTINATION", nargs='+', help="copy contents of a filesystem to PATH, or a file from PATH to DESTINATION") parser.add_argument('--extract', dest="extract", metavar="PATH DIRECTORY", nargs=2, help="copy a file from a filesystem, preserving directory structure") parser.add_argument("-f", "--force", dest="force", action="store_true", default=False, help="force overwrite of destination even if it is not empty (with --copy)") return parser def run(self): args = self.args application = WSGIApplication(self.location, self.get_settings(), args.server, disable_autoreload=True, master_settings=self.master_settings) archive = application.archive filesystems = archive.filesystems fs = None if args.fs: try: fs = filesystems[args.fs] except KeyError: self.console.error("No filesystem called '%s'" % args.fs) return -1 if args.tree is not None: if fs is None: self.console.error("Filesystem required") return -1 with fs.opendir(args.tree) as tree_fs: tree_fs.tree() return if args.listdir: if fs is None: self.console.error("Filesystem required") return -1 dir_fs = fs.opendir(args.listdir) file_paths = dir_fs.listdir(files_only=True) dir_paths = dir_fs.listdir(dirs_only=True) _ls(self.console, file_paths, dir_paths) elif args.cat: if fs is None: self.console.error("Filesystem required") return -1 contents = fs.getcontents(args.cat) self.console.cat(contents, args.cat) elif args.open: if fs is None: self.console.error("Filesystem required") return -1 filepath = fs.getsyspath(args.open, allow_none=True) if filepath is None: self.console.error("No system path for '%s' in filesystem '%s'" % (args.open, args.fs)) return -1 import subprocess if os.name == 'mac': subprocess.call(('open', filepath)) elif os.name == 'nt': subprocess.call(('start', filepath), shell=True) elif os.name == 'posix': subprocess.call(('xdg-open', filepath)) else: self.console.error("Moya doesn't know how to open files on this platform (%s)" % os.name) elif args.syspath: if fs is None: self.console.error("Filesystem required") return -1 if not fs.exists(args.syspath): self.console.error("No file called '%s' found in filesystem '%s'" % (args.syspath, args.fs)) return -1 syspath = fs.getsyspath(args.syspath, allow_none=True) if syspath is None: self.console.error("No system path for '%s' in filesystem '%s'" % (args.syspath, args.fs)) else: self.console(syspath).nl() elif args.copy: if fs is None: self.console.error("Filesystem required") return -1 if len(args.copy) == 1: src = '/' dst = args.copy[0] elif len(args.copy) == 2: src, dst = args.copy else: self.console.error("--copy requires 1 or 2 arguments") return -1 if fs.isdir(src): src_fs = fs.opendir(src) dst_fs = fsopendir(dst, create_dir=True) if not args.force and not dst_fs.isdirempty('/'): response = raw_input("'%s' is not empty. Copying may overwrite directory contents. Continue? " % dst) if response.lower() not in ('y', 'yes'): return 0 from fs.utils import copydir copydir(src_fs, dst_fs) else: with fs.open(src, 'rb') as read_f: if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) try: os.makedirs(dst) with open(dst, 'wb') as write_f: while 1: chunk = read_f.read(16384) if not chunk: break write_f.write(chunk) except IOError as e: self.error('unable to write to {}'.format(dst)) elif args.extract: if fs is None: self.console.error("Filesystem required") return -1 src_path, dst_dir_path = args.extract src_fs = fs dst_fs = fsopendir(dst_dir_path, create_dir=True) if not args.force and dst_fs.exists(src_path): response = raw_input("'%s' exists. Do you want to overwrite? " % src_path) if response.lower() not in ('y', 'yes'): return 0 dst_fs.makedir(dirname(src_path), recursive=True, allow_recreate=True) with src_fs.open(src_path, 'rb') as read_file: dst_fs.setcontents(src_path, read_file) else: table = [[Cell("Name", bold=True), Cell("Type", bold=True), Cell("Location", bold=True)]] if fs is None: list_filesystems = filesystems.items() else: list_filesystems = [(args.fs, fs)] for name, fs in sorted(list_filesystems): if isinstance(fs, MultiFS): location = '\n'.join(mount_fs.desc('/') for mount_fs in fs.fs_sequence) fg = "yellow" elif isinstance(fs, MountFS): mount_desc = [] for path, dirmount in fs.mount_tree.items(): mount_desc.append('%s->%s' % (path, dirmount.fs.desc('/'))) location = '\n'.join(mount_desc) fg = "magenta" else: syspath = fs.getsyspath('/', allow_none=True) if syspath is not None: location = syspath fg = "green" else: try: location = fs.desc('/') except FSError as e: location = text_type(e) fg = "red" else: fg = "blue" table.append([Cell(name), Cell(type(fs).__name__), Cell(location, bold=True, fg=fg) ]) self.console.table(table, header=True)
[ "willmcgugan@gmail.com" ]
willmcgugan@gmail.com
701b994d2b8c8b09b9fbfc52af044fe27f7a4ed4
7f87a68d69e41ddc2e4cf67d019eaf74f823a5e5
/models/post.py
930e32e74d00ff3f299092c59ab27b565f00e492
[]
no_license
wgwjifeng/Publish
5b9ac129bca819077ddc1edaa5c54240a51a5a64
9a0cc10278bdd71b93d539c23daae86c469ff5c9
refs/heads/master
2020-04-04T02:59:45.206878
2016-04-10T13:58:16
2016-04-10T13:58:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
679
py
# -*- coding: utf-8 -*- import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Post(Base): __tablename__ = 'post' __table_args__ = { 'mysql_charset': 'utf8', } id = sa.Column(sa.Integer, primary_key = True, autoincrement = True) title = sa.Column(sa.String(64)) content = sa.Column(sa.Text) username = sa.Column(sa.String) def __init__(self, title=title, content=content, username=username): self.title = title self.content = content self.username = username def __repr__(self): return '<Post %s> [Content %s]' % (self.title, self.content)
[ "1016903103@qq.com" ]
1016903103@qq.com
fd634b5a0f7144f8919b3ba701529f52f30990f2
f2770a694a8aedb71c326ced49e76531d69d9f4f
/bin_SPIDERS_AGN/create_gaia_mask_2RXS.py
c1ba0a341fabe82650c3c0d62c2cea216a8ff4d4
[]
no_license
JohanComparat/makeSample
258280ccb0f9dfa7dc13e934f453c520a1f416b8
b15f942f51750f504150f6ec29eba857123c29c4
refs/heads/master
2021-06-07T18:21:27.559530
2020-01-22T08:46:04
2020-01-22T08:46:04
106,016,246
2
2
null
null
null
null
UTF-8
Python
false
false
5,675
py
import astropy.io.fits as fits import os, sys, glob from os.path import join #import pymangle as mangle import numpy as np import matplotlib.pyplot as p from scipy.interpolate import interp1d import astropy.units as u import astropy.cosmology as cc cosmo = cc.Planck13 from astropy.coordinates import SkyCoord from sklearn.neighbors import BallTree, DistanceMetric from astropy.table import Table,unique from math import radians, cos, sin, asin, sqrt, pi in_dir = '/data36s/comparat/AGN_clustering/catalogs/' out_dir = '/data36s/comparat/AGN_clustering/angular_clustering/' deg_to_rad = np.pi/180. arcsec = 1. / 3600. rs = 10**np.arange(-1,1.6,0.1) *arcsec # SDSS WISE CATALOGS # path_2_file = '/data44s/eroAGN_WG_DATA/DATA/masks/SDSS_WISE_imageprop_nside512.fits' # hdu_S = fits.open(path_2_file) # DATA path_2_data_2rxs = join( in_dir, '2RXS_AllWISE_catalog_paper_2017May26_v5_11_0_sdss_26_VERON_MASKED.fits' ) data_file = path_2_data_2rxs ra_name_data = 'ALLW_RA' dec_name_data = 'ALLW_DEC' hduD = fits.open(data_file) ra_data = hduD[1].data[ra_name_data] dec_data = hduD[1].data[dec_name_data] z_data = np.zeros_like(ra_data) ratelim_data = hduD[1].data['RATELIM'] coords = SkyCoord(ra_data, dec_data, unit='deg', frame='icrs') bb_data = coords.galactic.b.value ll_data = coords.galactic.l.value bb_ecl_data = coords.barycentrictrueecliptic.lat stars_data = (hduD[1].data['p_any']>0.5)&(hduD[1].data['2RXS_ExiML']>10) rt_sel_data = (ratelim_data>0.015) x_gal_data = (abs(bb_data)>20)&(dec_data<80)&(dec_data>-80)&(bb_ecl_data.value>-80) selection_data = (x_gal_data)&(stars_data==False)&(rt_sel_data) N_data = len(ra_data[selection_data]) # GAIA CATALOGS gaia_dir = '/data44s/eroAGN_WG_DATA/DATA/photometry/catalogs/GAIA/DR2/' gaia_table_list = np.array(glob.glob(os.path.join(gaia_dir, 'table_*.fits'))) gaia_table_list.sort() for gaia_file in gaia_table_list[1:][::-1]: print(gaia_file) hdu_G = fits.open(gaia_file) ra_gaia, dec_gaia = hdu_G[1].data['ra'], hdu_G[1].data['dec'] coords = SkyCoord(ra_gaia, dec_gaia, unit='deg', frame='icrs') bb_gaia = coords.galactic.b.value ll_gaia = coords.galactic.l.value bb_ecl_gaia = coords.barycentrictrueecliptic.lat x_gal_gaia = (abs(bb_gaia)>20)&(dec_gaia<80)&(dec_gaia>-80)&(bb_ecl_gaia.value>-80) selection_gaia = (x_gal_gaia) N_gaia = len(ra_gaia[selection_gaia]) agn_coordinates = deg_to_rad * np.transpose([dec_data[selection_data], ra_data[selection_data]]) gaia_coordinates = deg_to_rad * np.transpose([dec_gaia[selection_gaia], ra_gaia[selection_gaia]]) Tree_obj_Gaia = BallTree(gaia_coordinates, metric='haversine') Tree_obj_AGN = BallTree(agn_coordinates , metric='haversine') test_c = np.array([ Tree_obj_Gaia.query_radius(agn_coordinates, r = rr, count_only=True) for rr in rs ]) N_pairs_total = test_c.sum(axis=1) Delta_N_pairs = N_pairs_total[1:]-N_pairs_total[:-1] area = 4.*np.pi*(rs[1:]**2 - rs[:-1]**2) pair_density = Delta_N_pairs/(area*N_data*N_gaia) out_data = os.path.join(out_dir , '2RXS_AllWISE_catalog_paper_2017May26_X_GAIA_'+os.path.basename(gaia_file)+'.data') np.savetxt(out_data, np.transpose([rs[1:], pair_density]), header='radius_arcsec density' ) # EXCEPTION for the first file, that has a broad magnitude range 1-5 that I re-cut by hand into 2 and extend the radius. gaia_file = gaia_table_list[0] print(gaia_file) hdu_G = fits.open(gaia_file) ra_gaia, dec_gaia = hdu_G[1].data['ra'], hdu_G[1].data['dec'] coords = SkyCoord(ra_gaia, dec_gaia, unit='deg', frame='icrs') bb_gaia = coords.galactic.b.value ll_gaia = coords.galactic.l.value bb_ecl_gaia = coords.barycentrictrueecliptic.lat g_mag = hdu_G[1].data['phot_g_mean_mag'] x_gal_gaia = (abs(bb_gaia)>20)&(dec_gaia<80)&(dec_gaia>-80)&(bb_ecl_gaia.value>-80) mag_sel = (g_mag>4) selection_gaia = (x_gal_gaia)&(mag_sel) N_gaia = len(ra_gaia[selection_gaia]) # Tree to select pairs # COUNT UNIQUE agn_coordinates = deg_to_rad * np.transpose([dec_data[selection_data], ra_data[selection_data]]) gaia_coordinates = deg_to_rad * np.transpose([dec_gaia[selection_gaia], ra_gaia[selection_gaia]]) Tree_obj_Gaia = BallTree(gaia_coordinates, metric='haversine') Tree_obj_AGN = BallTree(agn_coordinates , metric='haversine') test_c = np.array([ Tree_obj_Gaia.query_radius(agn_coordinates, r = rr, count_only=True) for rr in rs ]) N_pairs_total = test_c.sum(axis=1) Delta_N_pairs = N_pairs_total[1:]-N_pairs_total[:-1] area = 4.*np.pi*(rs[1:]**2 - rs[:-1]**2) pair_density = Delta_N_pairs/(area*N_data*N_gaia) out_data = os.path.join(out_dir , '2RXS_AllWISE_catalog_paper_2017May26_X_GAIA_table_4_g_5.fits.data') np.savetxt(out_data, np.transpose([rs[1:], pair_density]), header='radius_arcsec density' ) mag_sel = (g_mag<=4) selection_gaia = (x_gal_gaia)&(mag_sel) N_gaia = len(ra_gaia[selection_gaia]) # Tree to select pairs # COUNT UNIQUE agn_coordinates = deg_to_rad * np.transpose([dec_data[selection_data], ra_data[selection_data]]) gaia_coordinates = deg_to_rad * np.transpose([dec_gaia[selection_gaia], ra_gaia[selection_gaia]]) Tree_obj_Gaia = BallTree(gaia_coordinates, metric='haversine') Tree_obj_AGN = BallTree(agn_coordinates , metric='haversine') test_c = np.array([ Tree_obj_Gaia.query_radius(agn_coordinates, r = rr, count_only=True) for rr in rs ]) N_pairs_total = test_c.sum(axis=1) Delta_N_pairs = N_pairs_total[1:]-N_pairs_total[:-1] area = 4.*np.pi*(rs[1:]**2 - rs[:-1]**2) pair_density = Delta_N_pairs/(area*N_data*N_gaia) out_data = os.path.join(out_dir , '2RXS_AllWISE_catalog_paper_2017May26_X_GAIA_table_1_g_4.fits.data') np.savetxt(out_data, np.transpose([rs[1:], pair_density]), header='radius_arcsec density' )
[ "johan.comparat@gmail.com" ]
johan.comparat@gmail.com
d40c9cc8024fef9549347f41beb2dad2ef0fbe36
79945ed5fa2bdc4adbc96008d40e7b1f4f741b9f
/keyPressed.py
7e26fe461de7dd72666a4ad7d0ec587c3b2b3cd0
[]
no_license
Zaraklin/ContinuousAuthDataAnalyzer
ce4414fc64d2dee0bfae90ff5f0a380c67b5be84
ce2da60582056ebff2774db0ee75d3865f4daafd
refs/heads/master
2021-04-15T02:17:11.488103
2020-03-22T22:44:10
2020-03-22T22:44:10
249,286,439
0
0
null
null
null
null
UTF-8
Python
false
false
2,090
py
class KeyPressed(object): def __init__(self, data): self.__dict__ = data self.intervalBetweenKeyPress = self.__dict__["intervalBetweenKeyPress"] self.keyPressed = self.__dict__["keyPressed"] def __repr__(self): return str(self) def __str__(self): return str(self.intervalBetweenKeyPress) + "," + "'" + str(self.keyPressed) + "'" def __int__(self): return int(self.intervalBetweenKeyPress) def __lt__(self, other): return int(self.intervalBetweenKeyPress) < int(other.intervalBetweenKeyPress) def __le__(self, other): return int(self.intervalBetweenKeyPress) <= int(other.intervalBetweenKeyPress) def __gt__(self, other): return int(self.intervalBetweenKeyPress) > int(other.intervalBetweenKeyPress) def __ge__(self, other): return int(self.intervalBetweenKeyPress) > int(other.intervalBetweenKeyPress) def __eq__(self, other): return int(self.intervalBetweenKeyPress) == int(other.intervalBetweenKeyPress) def __ne__(self, other): return int(self.intervalBetweenKeyPress) != int(other.intervalBetweenKeyPress) def __radd__(self, other): return self.intervalBetweenKeyPress + int(other) def __iadd__(self, other): return self.intervalBetweenKeyPress + int(other) def __sum__(self, other): return self.intervalBetweenKeyPress + int(other) def __rsub__(self, other): return self.intervalBetweenKeyPress - int(other) def __isub__(self, other): return self.intervalBetweenKeyPress - int(other) def __sub__(self, other): return self.intervalBetweenKeyPress - int(other) def __rtruediv__(self, other): return self.intervalBetweenKeyPress / int(other) def __truediv__(self, other): return self.intervalBetweenKeyPress / int(other) def __rfloordiv__(self, other): return self.intervalBetweenKeyPress / int(other) def __floordiv__(self, other): return self.intervalBetweenKeyPress / int(other)
[ "zaraklin@users.noreply.github.com" ]
zaraklin@users.noreply.github.com
4e2274addc319ca71c1ddd6fb261f8e950e2a1d7
685fa64398a4af7529c16de12edbb430aa8f444c
/OnlineAssesment/Amazon/MaximumUnit.py
9682d0328d9be7eec7ae123f7eda75e144b110a1
[]
no_license
aakashpatel25/InterviewQuestions
2407c83c911587f2e648ae150463412dac5cd626
2f8e8a33747affdabb10f4bc478fe545e7f09a4c
refs/heads/master
2022-12-15T02:18:22.645049
2020-09-19T03:19:53
2020-09-19T03:19:53
296,470,873
0
0
null
null
null
null
UTF-8
Python
false
false
629
py
""" https://leetcode.com/discuss/interview-question/793606/ Approach: Compute the number of units for each box sort it in descending order and fill up the truck till it is full. Time Complexity: O(MlogM) - Sorting would require this complexity Space Complexity: O(M) - Where M sum(boxes) """ def get_max_unit(num, boxes, unit_size, units_per_box, truck_size): units = [] for boxes, unit in zip(boxes, units_per_box): units+= [unit]*boxes units.sort(reverse=True) return sum(units[:truck_size]) assert get_max_unit(3, [1,2,3], 3, [3,2,1], 3) == 7 assert get_max_unit(3, [2,5,3], 3, [3,2,1], 50) == 19
[ "aakashpatel775@gmail.com" ]
aakashpatel775@gmail.com
8780c7cb4a2857a1136d8ebd11d3ba4e48effcab
7c0d51af5c615ee557e4189444ca9c00a53f1b1e
/query.py
d28fdb045cc532b8757b6bdd8022ed052dd220b2
[]
no_license
akhiljain0777/wikigraph
c68fd2787b7fc99a6fd38658bc1e81f522a3d51d
08fd74ea020f5eedec641e5878128985c1e6c758
refs/heads/master
2020-05-21T08:06:34.508241
2015-01-21T18:32:41
2015-01-21T18:32:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,697
py
from py2neo import neo4j import json, time def find_shortest_path(node1, node2): """Connects to graph database, then creates and sends query to graph database. Returns the shortest path between two nodes. Format: (67149)-[:'LINKS_TO']->(421)""" graph_db = neo4j.GraphDatabaseService() t0 = time.time() query = neo4j.CypherQuery( graph_db, """MATCH (m:Page {node:{n1}}), (n:Page {node:{n2}}), p = shortestPath((m)-[*..10]->(n)) RETURN p""" ) try: path = query.execute_one(n1=node1, n2=node2) except: path = None t1 = time.time() print "\nShortest Path:", path print "Time elapsed: %.2f seconds" % (t1 - t0) return path def parse_node(node, in_path): """Extract title and code from a node object. Returns a dict of information.""" code, deg, title = node.get_properties().values() title = title.replace('_', ' ') if title == "Basque people": # special exception for a changed redirect title = "Basques" node_dict = {'code': int(code), 'title': title, 'degrees': deg, 'group': 'none'} if in_path: node_dict['group'] = 'path' return node_dict def parse_rel(rel, in_path): """Extract node code from a relationship object. Returns a dict of information.""" start_id = rel.start_node.get_properties()['node'] end_id = rel.end_node.get_properties()['node'] rel_dict = {'source': int(start_id), 'target': int(end_id), 'value': 0} if in_path: rel_dict['value'] = 1 return rel_dict def parse_node_objs(node_objs_list, in_path=False): """Takes a list of node objects. Returns dict of node dicts.""" nodes = {} for node in node_objs_list: node_dict = parse_node(node, in_path=in_path) if node_dict['code'] not in nodes: nodes[node_dict['code']] = node_dict return nodes def parse_rel_objs(rel_objs_list, in_path=False): """Takes a list of relationship objects. Returns list of rel dicts.""" rel_dict_list = [parse_rel(rel=rel, in_path=in_path) for rel in rel_objs_list] return rel_dict_list def find_other_nodes(node_objs_list): """Takes a list of node objects. Returns list of rel dicts and list of node dicts.""" rels = [] nodes = [] for node in node_objs_list: for rel in node.match_incoming(limit=8): rels.append(rel) nodes.append(rel.start_node) for rel in node.match_outgoing(limit=8): rels.append(rel) nodes.append(rel.end_node) rel_dict_list = parse_rel_objs(rels) node_dict_list = parse_node_objs(nodes) return rel_dict_list, node_dict_list def merge_node_dicts(path_nodes, npath_nodes): """Takes and merges the two dictionaries of node dicts. Returns list of node dicts.""" d = dict(npath_nodes.items() + path_nodes.items()) node_dict_list = [node_dict for node_dict in d.values()] return node_dict_list def parse_nodes_and_rels(path): """Takes a path object. Returns two lists, one for rel dicts and one for node dicts.""" # rel dict list for main path path_rels = parse_rel_objs(rel_objs_list=path.relationships, in_path=True) # parse nodes, create list of unique nodes path_nodes = parse_node_objs(node_objs_list=path.nodes, in_path=True) # this is a quick/dirty way to grab the names for each path node in order path_names = [] for node in path.nodes: path_dict = node.get_properties().values()[0] path_names.append({'title': path_nodes[int(path_dict)]['title'], 'code': path_nodes[int(path_dict)]['code']}) # rel dict list for secondary rels npath_rels, npath_nodes = find_other_nodes(node_objs_list=path.nodes) # filter out reversed or duplicate paths in the path rels for rel in npath_rels: for path in path_rels: if rel['source'] == path['target'] and rel['target'] == path['source']: rel['value'] = 1 # include it in the path if rel['source'] == path['source'] and rel['target'] == path['target']: npath_rels.remove(rel) # remove duplicates # combine the two lists for nodes and rels rels_list = path_rels + npath_rels nodes_list = merge_node_dicts(path_nodes, npath_nodes) return rels_list, nodes_list, path_names def create_lists(node1, node2): """Request the shortest path between two nodes from the database. Assemble list of nodes and relationships from the path, then process to recode their IDs. Write output to a JSON file.""" path = find_shortest_path(str(node1), str(node2)) if path: rels_list, nodes_list, path_names = parse_nodes_and_rels(path) codes = {} id_counter = 0 for node in nodes_list: # create a dict to translate id codes node_id = node['code'] if node_id not in codes: codes[node_id] = id_counter id_counter += 1 for rel in rels_list: # look up the source and target in codes rel['source'] = codes[rel['source']] rel['target'] = codes[rel['target']] response = """{ "path": %s, "results": { "directed": true, "nodes": %s, "links": %s, "multigraph": false }}""" % (json.dumps(path_names), json.dumps(nodes_list), json.dumps(rels_list)) else: response = '{ "path": "None", "results": "None" }' return response if __name__ == '__main__': print create_lists('335354', '3778612') # Abraham Lincoln to Astronomy
[ "erabug@gmail.com" ]
erabug@gmail.com
ac7ed1ceb5306083b6350f9abe9ce05b8d24226a
cd713f083d719dcdf219a72f6f7f0f506a128e14
/weather.py
42cfc558f2d5c465ecfc60f2e103c40b290d09d8
[]
no_license
patricia1387/IPK-projekt
2397b469f4a8d5e081b6c9565045acee8b1652a7
f3dc0e77e0d5c202e713fcfa2fbd2bd4ff6f096d
refs/heads/master
2020-05-18T03:02:10.841575
2019-04-29T19:55:40
2019-04-29T19:55:40
184,134,410
0
0
null
null
null
null
UTF-8
Python
false
false
1,176
py
#!/bin/env python3 import sys import socket import json from socket import * if len(sys.argv) != 3: sys.stderr.write("Wrong number of arguments\n") sys.exit() CITY = sys.argv[2] API_KEY = sys.argv[1] HOST = "api.openweathermap.org" message = 'GET /data/2.5/weather?q='+CITY+'&APPID='+API_KEY+'&units=metric HTTP/1.1\r\nHost:'+HOST+'\r\n\r\n' message = str.encode(message) HOST = "api.openweathermap.org" PORT = 80 mySocket = socket(AF_INET, SOCK_STREAM) mySocket.connect((HOST,PORT)) mySocket.sendall(message) data = mySocket.recv(1024).decode() data.json.loads(data[data.find("\r\n\r\n")+4:]) try: description = data['weather'][0]['description'] temp = data['main']['temp'] pressure = data['main']['pressure'] humidity = data['main']['humidity'] wind_speed = data['wind']['speed']*3.6 wind_deg = data['wind']['deg'] print(CITY) print('{}'.format(description)) print('Teplota : {} stupnov Celzia'.format(temp)) print('Tlak : {} hPa'.format(pressure)) print('Vlhkost : {} %'.format(humidity)) print('Rychlost vetra : {} km/h'.format(wind_speed)) print('Smer vetra : {} '.format(wind_deg)) except: print("Mesto sa nenašlo: " + CITY)
[ "noreply@github.com" ]
patricia1387.noreply@github.com
adee2f37c5e416cdc0042d01d5194f7e63586daa
71ea474548cd7521723c97b48abe1ff03e54a4a4
/optimal_covering.py
534e24212a673ce559b7ec48820a05ff451e2a1f
[]
no_license
wfedorko/optimal_covering
7d6eaca0c9bb96fc7ac4ba19a83df26c02b858c0
f0ab5559c7b0519df3dd0cd9441f48b7b017f6ce
refs/heads/master
2020-08-09T07:35:11.144149
2019-10-09T22:08:44
2019-10-09T22:08:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,182
py
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from shapely.geometry import Polygon from datetime import datetime import os import itertools import time import random from modules.earth_data import clouds from modules.utils import time_utils from modules.optimization import genetic_helpers from modules.utils import retrievals from modules.optimization import genetic_algorithm from modules.earth_data import clouds def remove(polygon_to_remove, mask_polys): # removes all intersections of the mask with this polygon. # that is, after calling this function your mask will have a hole # where the polygon was removed = [] for p in mask_polys: intersection = poly_minus_intersection(p, polygon_to_remove) if intersection.wkt == "GEOMETRYCOLLECTION EMPTY": continue removed.append(intersection) return removed def poly_minus_intersection(pol1, pol2): # removes the intersection of pol1 and pol2 from pol1 if pol1.intersects(pol2): # https://stackoverflow.com/questions/39459496/create-new-shapely-polygon-by-subtracting-the-intersection-with-another-polygon no_overlap = (pol1.symmetric_difference(pol2)).difference(pol2) else: no_overlap = pol1 return no_overlap def genetic_lattice(dtime, plot=False, **kwargs): ref_time = datetime(2015, 1, 1) time = time_utils.Time(dtime, ref_time) # mask of lat between 0->90 and lon -180->180 mask_obj = genetic_helpers.clear_mask(time, plot=plot) # polygons describing said mask speed = False if speed: mask_polys = genetic_algorithm.get_mask_points(mask_obj) else: mask_polys = genetic_algorithm.get_mask_polys(mask_obj) # lattice of FOVs generated using that mask and genetic algorithm lattice = genetic_helpers.generate_fovs(mask_obj, mask_polys=mask_polys, **kwargs) genetic_helpers.plot_individual(lattice, mask_obj, "final", save=False) def subtraction_lattice(dtime, plot=False): ref_time = datetime(2015, 1, 1) time = time_utils.Time(dtime, ref_time) # mask of lat between 0->90 and lon -180->180 mask_obj = genetic_helpers.clear_mask(time) # coords = mask_obj.coords # clear = mask_obj.mask # num_ones = len(np.where(clear == 1)[0].flatten()) # polygons describing said mask mask_polys = genetic_algorithm.get_mask_polys(mask_obj) # get one point chosen by taking 100 random samples and evaluating each best_observations = [] best_obs_poly = Polygon([(0, 0), (0, 0), (0, 0), (0, 0)]) members_per_pop = 100 for fov_num in range(120): print("loop", fov_num) mask_polys = remove(best_obs_poly, mask_polys) best_obs_pop = genetic_helpers.generate_fovs( mask_obj, mask_polys=mask_polys, generations=1, members_per_pop=members_per_pop, # num_ones, fovs_per_member=1, num_parents_mating=0, hpixels=64, vpixels=64) best_obs_poly = best_obs_pop[0] best_observations.append(best_obs_poly) # genetics.plot_individual( # best_obs_pop, mask_obj, fov_num, save=False, # mask_polys=mask_polys) # print(best_obs_poly) best_observations = np.array(best_observations) # for obs in best_observations: # print(obs) genetic_helpers.plot_individual(best_observations, mask_obj, fov_num, save=True) if __name__ == "__main__": # np.random.seed(1234) year = 2015 month = 4 # np.random.randint(1, 13) day = 4 # np.randgenetic_algorithm.randint(1, 29) # no days above 28 to avoid February hour = 8 # np.random.randint(0, 24) dtime = datetime(year, month, day, hour=hour) #clouds.download_merra_data(year, month) #genetic_lattice( # dtime, # generations=400, # members_per_pop=10, # fovs_per_member=30, # num_parents_mating=2, # num_mutations=1 #) genetic_lattice(dtime, generations=0)
[ "callum189@gmail.com" ]
callum189@gmail.com
5853f6d3503fb35e9598547cafd94efd8a76e23c
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
/src/main/output/issue/student_year/change/state/point/moment.py
ee124633f30626ea2579ff142848cd93d68a4ae2
[]
no_license
matkosoric/GenericNameTesting
860a22af1098dda9ea9e24a1fc681bb728aa2d69
03f4a38229c28bc6d83258e5a84fce4b189d5f00
refs/heads/master
2021-01-08T22:35:20.022350
2020-02-21T11:28:21
2020-02-21T11:28:21
242,123,053
1
0
null
null
null
null
UTF-8
Python
false
false
2,730
py
var express = require('express'); let https = require ('https'); let body = ''; let host = 'api.microsofttranslator.com'; let path = '/V2/Http.svc/TranslateArray'; let target = 'en'; let params = ''; let ns = "http://schemas.microsoft.com/2003/10/Serialization/Arrays"; let content = '<TranslateArrayRequest>\n' + // NOTE: AppId is required, but it can be empty because we are sending the Ocp-Apim-Subscription-Key header. ' <AppId />\n' + ' <Texts>\n' + ' <string xmlns=\"' + ns + '\">돼지</string>\n' + ' <string xmlns=\"' + ns + '\">소고기</string>\n' + ' <string xmlns=\"' + ns + '\">닭고기</string>\n' + ' <string xmlns=\"' + ns + '\">같은 제조시설</string>\n' + ' </Texts>\n' + ' <To>' + target + '</To>\n' + '</TranslateArrayRequest>\n'; module.exports.Translate = async function() { GetTranslationsArray(); } let GetTranslationsArray = function () { let request_params = { method : 'POST', hostname : host, path : path + params, headers : { 'Content-Type' : 'text/xml', '70de318bb5902f3b5cc0d6119aba0c05' : subscriptionKey, } }; let req = https.request (request_params, response_handler); req.write (content); req.end (); } let response_handler = function (response) { response.on ('data', function (d) { body += d; }); response.on ('end', function () { console.log ('[[[[[[end]]]]]]' + body); return body; }); response.on ('error', function (e) { console.log ('Error: ' + e.message); }); }; /* let response_handler = function (response) { let body = ''; response.on ('data', function (d) { body += d; }); response.on ('end', function () { console.log (body); }); response.on ('error', function (e) { console.log ('Error: ' + e.message); }); }; module.exports.Translate = function(){ // Replace the subscriptionKey string value with your valid subscription key. let subscriptionKey = '95c07978668892044c722dacc9d7607d'; let host = 'api.microsofttranslator.com'; let path = '/V2/Http.svc/Translate'; //let from = 'unk';from=' + from + ' let target = 'en'; let text = '안녕. 좋은 아침입니다.'; let params = '?to=' + target + '&text=' + encodeURI(text); let request_params = { method : 'GET', hostname : host, path : path + params, headers : { '53f9bacbf84de83f7081e0f15b562425' : subscriptionKey, } }; let req = https.request (request_params, response_handler); req.end (); console.log(req); return req; }; */
[ "soric.matko@gmail.com" ]
soric.matko@gmail.com
6963950662de49a1b19ec0ddd269e009f1a95243
f19ef1f67be42b17e75bf62fe0c7cc18dedceb81
/shop/migrations/0031_product_likes.py
7801fed65c933e126c9fc3c4a3cde57dac4c6160
[]
no_license
FatfoutiMontassar/django
5ec1164a14a0ed77f673372d9b62367444982c47
f63dfd053314311e24aabf969ed1cb154e186493
refs/heads/master
2020-11-30T00:33:36.161003
2017-08-25T14:54:56
2017-08-25T14:54:56
95,864,961
1
0
null
null
null
null
UTF-8
Python
false
false
564
py
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-07-09 22:23 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('shop', '0030_trader'), ] operations = [ migrations.AddField( model_name='product', name='likes', field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL), ), ]
[ "montassar.fatfouti@supcom.tn" ]
montassar.fatfouti@supcom.tn
83b1aa1cb7e5ac74c85cac1b03d678a6da5ecb50
5fffb4c907ca25c8481ae2cf3a9d4b97ae746117
/bloomfilter.py
d563f6f0e3c4f5a4635b0cd538da180ce95cce25
[]
no_license
dengguoguo/algorithm
b7aa47c559339ed64c464ca1fd6763f8d11cd850
316e06bc6ad163eaff8644d1d88a04fccaad06d8
refs/heads/master
2020-04-07T11:45:59.786332
2018-11-20T06:54:12
2018-11-20T06:54:12
158,339,921
2
0
null
null
null
null
UTF-8
Python
false
false
3,569
py
# -*- coding: utf-8 -*- # @Time : 2018/8/17 11:42 # @Author : dengguo # @File : bloomfilter.py # @Software: PyCharm """ 如果想判断一个元素是不是在一个集合里,一般想到的是将所有元素保存起来,然后通过比较确定。 链表,树等等数据结构都是这种思路. 但是随着集合中元素的增加,我们需要的存储空间越来越大,检索速度也越来越慢。 不过世界上还有一种叫作散列表(又叫哈希表,Hash table)的数据结构。 它可以通过一个Hash函数将一个元素映射成一个位阵列(Bit Array)中的一个点。 这样一来,我们只要看看这个点是不是 1 就知道可以集合中有没有它了。这就是布隆过滤器的基本思想。 Hash面临的问题就是冲突。假设 Hash 函数是良好的,如果我们的位阵列长度为 m 个点,那么如果我们想将冲突率降低到例如 1%, 这个散列表就只能容纳 m/100 个元素。显然这就不叫空间有效了(Space-efficient)。 解决方法也简单,就是使用多个 Hash,如果它们有一个说元素不在集合中,那肯定就不在。 如果它们都说在,虽然也有一定可能性它们在说谎,不过直觉上判断这种事情的概率是比较低的。 """ from bitarray import bitarray import mmh3 class BloomFilter(set): def __init__(self, size, hash_count): super(BloomFilter, self).__init__() self.bit_array = bitarray(size) self.bit_array.setall(0) self.size = size self.hash_count = hash_count def __len__(self): return self.size def __iter__(self): return iter(self.bit_array) def add(self, item): for ii in range(self.hash_count): index = mmh3.hash(item, ii) % self.size self.bit_array[index] = 1 return self def __contains__(self, item): out = True for ii in range(self.hash_count): index = mmh3.hash(item, ii) % self.size if self.bit_array[index] == 0: out = False return out def main(): bloom = BloomFilter(100, 10) animals = ['dog', 'cat', 'giraffe', 'fly', 'mosquito', 'horse', 'eagle', 'bird', 'bison', 'boar', 'butterfly', 'ant', 'anaconda', 'bear', 'chicken', 'dolphin', 'donkey', 'crow', 'crocodile'] # First insertion of animals into the bloom filter for animal in animals: bloom.add(animal) # Membership existence for already inserted animals # There should not be any false negatives for animal in animals: if animal in bloom: print('{} is in bloom filter as expected'.format(animal)) else: print('Something is terribly went wrong for {}'.format(animal)) print('FALSE NEGATIVE!') # Membership existence for not inserted animals # There could be false positives other_animals = ['badger', 'cow', 'pig', 'sheep', 'bee', 'wolf', 'fox', 'whale', 'shark', 'fish', 'turkey', 'duck', 'dove', 'deer', 'elephant', 'frog', 'falcon', 'goat', 'gorilla', 'hawk'] for other_animal in other_animals: if other_animal in bloom: print('{} is not in the bloom, but a false positive'.format(other_animal)) else: print('{} is not in the bloom filter as expected'.format(other_animal)) if __name__ == '__main__': main()
[ "609085005@qq.com" ]
609085005@qq.com
5a55e22c0047fbb7206c9582f0074d7c4212b789
e8ea8c6049ce3168753d06a23ed44cca7aadf098
/baselines/racing/experiments/acme/__init__.py
216f423713605af9bbba13e06ab3182dd577a776
[]
no_license
luigiberducci/racing_dreamer
a05ec0e8a611d8c819f70c2b9b16bb97f52b2462
6ae736a577573682b17e67e63537c3c00de21b22
refs/heads/main
2023-08-24T09:20:40.262976
2021-10-23T10:32:58
2021-10-23T10:32:58
350,778,233
0
0
null
2021-03-23T16:16:08
2021-03-23T16:16:07
null
UTF-8
Python
false
false
1,932
py
from functools import partial from typing import Optional, Union, Dict from baselines.racing import make_mpo_agent from baselines.racing.algorithms.d4pg import make_d4pg_agent from baselines.racing.algorithms.lstm_mpo import make_lstm_mpo_agent from baselines.racing.experiments.acme import SingleAgentExperiment import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') for device in physical_devices: tf.config.experimental.set_memory_growth(device, True) from baselines.racing.experiments.util import read_hyperparams def choose_agent(name: str, param_file: Union[Optional[str], Dict], checkpoint_path: str): if param_file: if isinstance(param_file, str): params = read_hyperparams(param_file) elif isinstance(param_file, Dict): params = param_file else: params = {} else: params ={} print(params.keys()) if name == 'mpo': constructor = partial(make_mpo_agent, hyperparams=params, checkpoint_path=checkpoint_path) elif name == 'd4pg': constructor = partial(make_d4pg_agent, hyperparams=params, checkpoint_path=checkpoint_path) elif name == 'lstm-mpo': constructor = partial(make_lstm_mpo_agent, hyperparams=params, checkpoint_path=checkpoint_path) else: raise NotImplementedError(name) return constructor def make_experiment(args, logdir): checkpoint_path = f'{logdir}/checkpoints' env_config = SingleAgentExperiment.EnvConfig( track=args.track, task=args.task, action_repeat=args.action_repeat, training_time_limit=args.training_time_limit, eval_time_limit=args.eval_time_limit ) experiment = SingleAgentExperiment(env_config=env_config, seed=args.seed, logdir=logdir) agent_ctor = choose_agent(name=args.agent, param_file=args.params, checkpoint_path=checkpoint_path) return experiment, agent_ctor
[ "axel.brunnbauer@gmx.at" ]
axel.brunnbauer@gmx.at
20788e9de508e3703d0c702bb4d7af18d499b85f
eab8f79829f1f5d3050a4f47b240b79f9e37dbda
/unused_oil_effect.py
3dfaf355a9044ebf4ae7e2928026e72c616eb274
[]
no_license
ZichangYe/AdvancedPython2021
5c5f3df36db4a34f3531f44b550ae79b0a520b69
a2aad24f2e8c9242c6551724fe9bb4b67171cdc0
refs/heads/main
2023-05-03T12:49:29.758440
2021-05-12T03:41:33
2021-05-12T03:41:33
363,002,008
4
3
null
null
null
null
UTF-8
Python
false
false
1,697
py
import numpy as np from itertools import product def img_filter(img, locations, radius=3, level=10): ''' this algorithm select the most frequent intensity group in the kernel, and average r,b,g in the pixel group, it makes the original image like oil painting :param img: 3d image array height, width, channel (b,g,r) :param locations: list of tuple of face locations (top, right, bottom, left) :param radius: :param level: :return: ''' des_img = np.copy(img) for location in locations: top, right, bottom, left = location if top+radius >= bottom-radius or left+radius>=right-radius: continue for i, j in product(range(top+radius, bottom-radius), range(left+radius, right-radius)): level_counter = np.zeros(level, dtype=np.uint32) b_level = np.zeros(level, dtype=np.uint32) r_level = np.zeros(level, dtype=np.uint32) g_level = np.zeros(level, dtype=np.uint32) for m, n in product(range(-radius, radius), repeat=2): b, g, r = img[i + m, j + n] avg = (b+g+r) / 3. pixlv = int(avg / (256 / level)) level_counter[pixlv] += 1 b_level[pixlv] += b g_level[pixlv] += g r_level[pixlv] += r most_level_Idx = np.argmax(level_counter) most_level_count = level_counter[most_level_Idx] des_img[i, j, 0] = b_level[most_level_Idx] // most_level_count des_img[i, j, 1] = g_level[most_level_Idx] // most_level_count des_img[i, j, 2] = r_level[most_level_Idx] // most_level_count return des_img
[ "54514463+PygMali0n@users.noreply.github.com" ]
54514463+PygMali0n@users.noreply.github.com
cc48c048975e45adac7805c2e2bb0d29228dbacb
74bdcbc8781e850d1088e506ffd758038c7ee7d9
/pytty/lukija.py
d4d613b1c7e9cf900f760151b0c983e52db54b6d
[]
no_license
ToniTSaari/Beginning
0522209d0ceb1367871c96ffcd4b9dc3a4460eaa
16b318e6de49017e3e6b8f9ee60743a0a4ed5ce6
refs/heads/master
2020-07-16T23:30:37.120940
2019-11-15T09:41:56
2019-11-15T09:41:56
205,891,313
0
0
null
null
null
null
UTF-8
Python
false
false
410
py
import os dir = os.getcwd() juuri = os.path.abspath('..') print(dir) print(juuri) print(["Tiedosto","Koko"]) for file in os.listdir(dir): content = [file] + [str(os.path.getsize(os.path.join(dir, file)))] print(content) print() print(["Tiedosto","Koko"]) for file in os.listdir(juuri): content = [file] + [str(os.path.getsize(os.path.join(juuri, file)))] print(content)
[ "noreply@github.com" ]
ToniTSaari.noreply@github.com
514580033b9cd1a0d335e94d0eccc4cf36a36f2b
0ae2bb21d7ca71a691e33cb044a0964d380adda2
/EA/LC160IntersectionOfTwoLinkedLists.py
4c7fb25db05293025b1189d2b23daabae55d0cf8
[]
no_license
xwang322/Coding-Interview
5d27ec92d6fcbb7b929dd98bb07c968c1e1b2a04
ee5beb79038675ce73c6d147ba9249d9a5ca346a
refs/heads/master
2020-03-10T08:18:34.980557
2018-06-24T03:37:12
2018-06-24T03:37:12
129,282,263
2
6
null
2018-04-19T19:31:24
2018-04-12T16:41:28
Python
UTF-8
Python
false
false
1,299
py
# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None class Solution(object): # O(n) space def getIntersectionNode(self, headA, headB): if not headA or not headB: return None visited = set() while headA: visited.add(headA.val) headA = headA.next while headB: if headB.val in visited: return headB headB = headB.next return None class Solution(object): # O(1) space def getIntersectionNode(self, headA, headB): if not headA or not headB: return None length1 = 0 length2 = 0 tempA = headA tempB = headB while tempA: length1 += 1 tempA = tempA.next while tempB: length2 += 1 tempB = tempB.next if length1 >= length2: for i in range(length1 - length2): headA = headA.next else: for i in range(length2 - length1): headB = headB.next while headA != headB: headA = headA.next headB = headB.next return headA
[ "noreply@github.com" ]
xwang322.noreply@github.com
d8440c333807f499183b5d7c0271f43a4a5eddf3
bb32566f0c4688292b8f37d29630e0b7a18be24b
/checkio/cipher-map.py
d7b90088c37eff5bec1701186b7ee4eab0cc5e34
[]
no_license
Chencheng78/python-learning
4c7dd3a5ad39ac2e96b9ff0a1b9aabb56e863f7f
63eb7ee9e547f899eafa698556f6adeb518795bb
refs/heads/master
2022-07-27T02:40:03.067820
2022-07-12T09:05:47
2022-07-12T09:05:47
55,224,905
0
1
null
null
null
null
UTF-8
Python
false
false
649
py
def clockwise_90(matrix): matrix.reverse() return list(map(list,zip(*matrix))) def recall_password(grille,password): cipher_matrix = [] for i in grille: cipher_matrix.append([j for j in i]) pwd_matrix = [] for i in password: pwd_matrix.append([j for j in i]) i = 0 output = '' while i < 4: for a in range(4): for b in range(4): if cipher_matrix[a][b] == 'X': output += pwd_matrix[a][b] cipher_matrix = clockwise_90(cipher_matrix) i +=1 #print(clockwise_90(cipher_matrix)) return output print(recall_password( ('X...', '..X.', 'X..X', '....'), ('itdf', 'gdce', 'aton', 'qrdi')))
[ "geniuscc7@163.com" ]
geniuscc7@163.com
d6caf303d71c8dc706509a4b217651915c4b7600
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
/3yPvbEjskPuuSSCAC_11.py
2fba61184753e37ef33b7f27aaa7c8a6692740f4
[]
no_license
daniel-reich/ubiquitous-fiesta
26e80f0082f8589e51d359ce7953117a3da7d38c
9af2700dbe59284f5697e612491499841a6c126f
refs/heads/master
2023-04-05T06:40:37.328213
2021-04-06T20:17:44
2021-04-06T20:17:44
355,318,759
0
0
null
null
null
null
UTF-8
Python
false
false
121
py
def trimmed_averages(lst): lst.remove(min(lst)) lst.remove(max(lst)) avg = sum(lst)/len(lst) return round(avg)
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
6234f0537de000f3fcd0a6ebe6e3d79510ff5394
c9b4179f07e10a657b4d18fb3dea6ed34ccf79c4
/benchmark_system/EmoticonDetector.py
9e69ddb0b2ed6e67170f7bb24f4428d6ad56e3ca
[]
no_license
chialun-yeh/irony-detection
866c3ab00e1218d35579d32f0decd0e83b09c55a
c9556636f4dfcbb9c5d095f55bb960da548e2242
refs/heads/master
2020-04-07T16:31:35.341366
2018-04-16T17:53:09
2018-04-16T17:53:09
124,222,779
0
0
null
null
null
null
UTF-8
Python
false
false
825
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Apr 4 15:51:20 2018 @author: xuecho """ class EmoticonDetector: emoticons = {} def __init__(self, emoticon_file="emoticons.txt"): from pathlib import Path content = Path(emoticon_file).read_text() positive = True for line in content.split("\n"): if "positive" in line.lower(): positive = True continue elif "negative" in line.lower(): positive = False continue self.emoticons[line] = positive def is_positive(self, emoticon): if emoticon in self.emoticons: return self.emoticons[emoticon] return False def is_emoticon(self, to_check): return to_check in self.emoticons
[ "F.Xu-1@student.tudelft.nl" ]
F.Xu-1@student.tudelft.nl
2284ae8e1029a899f3759326e8b0fd32c931c601
6d026cee30d52154e3fd55a92c4981ee532c8084
/Section2/L5 Univeral array/universal_array_function.py
5c12399655d6095a4c5fbd790387f524779bf968
[ "MIT" ]
permissive
Mohit-Sharma1/Takenmind_Internship_assignments
6817934a7c210e1e471da3fb1aebf5f5d87366a3
7099ae3a70fca009f6298482e90e988124868148
refs/heads/master
2022-11-17T18:45:19.488769
2020-07-20T19:03:22
2020-07-20T19:03:22
281,186,240
0
0
null
null
null
null
UTF-8
Python
false
false
434
py
import numpy as np #arrange #sqrt #expo #random #addition #maximum #reference link for many others A= np.arange(15) print A A=np.arange(1,15,2) print A #sqrt B=np.sqrt(A) print "B is" print B #exp C=np.exp(A) print "C is" print C #addition D=np.add(A,B) print "D is" print D #maximum function E=np.maximum(A,B) print "E is" print E #additional resources #scipy.org all functions associated np array #docs.scipy.org
[ "noreply@github.com" ]
Mohit-Sharma1.noreply@github.com
ceca4d7baec39bcf1dc5d2c847eea6e41fe27d3a
89828b141969c07d00fc7aa8da51f8244babf40b
/catalog/migrations/0002_auto_20210204_2112.py
d2ada785787a7cde2eea2c0c51b1b68d20702899
[]
no_license
dannybarajas/library
0d2288bbbe2e019ceb1f14f106f87c1d1aa4304a
11386cae8283435514437e27ff3f9f9949eea439
refs/heads/master
2023-02-27T05:20:47.280827
2021-02-05T03:33:37
2021-02-05T03:33:37
336,152,919
0
0
null
null
null
null
UTF-8
Python
false
false
840
py
# Generated by Django 3.1.5 on 2021-02-05 02:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('catalog', '0001_initial'), ] operations = [ migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)", max_length=200)), ], ), migrations.AddField( model_name='book', name='language', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.language'), ), ]
[ "dbarajasm93@gmail.com" ]
dbarajasm93@gmail.com
f3e633a244d1f2f9e6a7415e195e1f9f035238f5
b28e5d26f3ec176970c035d10823ce47fcadcb38
/operations/exchange.py
b0806dcbcb32e17ea89e202d70fbbc1144cb3bf5
[]
no_license
Aldebrand/xchange
da989f63ebc4645874e749fdc4c5e9c3c968cc55
4f7073624601275675077f6106716754c442a74d
refs/heads/main
2023-03-26T05:47:48.501485
2021-03-24T22:08:02
2021-03-24T22:08:02
350,117,788
0
0
null
null
null
null
UTF-8
Python
false
false
1,971
py
from typing import Optional, Tuple from operations.utils import convert_money, load_config def exchange(amount: int, origin_currency: str, target_currency: str) -> Tuple[Optional[str], Optional[str]]: """ Exchange a certain amount of money from one currency to another. :param amount: Amount of money to exchange :param origin_currency: Original currency :param target_currency: Target currency (defaults to EUR) """ # Load the configuration before exchanging the money in order to exchange. # it with the right commission. # If something went wrong, return an error message. config = load_config() if not config: error_msg = 'Invalid configuration file' return None, error_msg # Convert the money. converted_amount = convert_money(origin_currency, target_currency, amount) # Check if the conversion completed successfully. # If not, return an error message. if not converted_amount: error_msg = (f'One of the currency codes: {origin_currency} or ' f'{target_currency} is invalid') return None, error_msg # Calculate the amount of money to give back to the user. commission = config.get('base_commission', 5) commission_percent = commission / 100 profit = converted_amount * commission_percent amount_after_commission = round(converted_amount - profit, 3) # Build summery message. success_msg = """ From Amount: {origin_amount} From Currency: {origin_currency} To Currency: {target_currency} Commission: {commission}% Amount Before commission: {converted_amount} Amount: {amount} """.format(origin_amount=amount, origin_currency=origin_currency, target_currency=target_currency, commission=commission, converted_amount=converted_amount, amount=amount_after_commission) return success_msg, None
[ "or@datascience.co.il" ]
or@datascience.co.il
5489b75964557fe23e7066825e9c3fdcf32c8609
60b620746561bdcbdfde2106c167095c63ea3448
/Hash Maps/PermutationPalindrome.py
0d53b0d93f0f7e490ec4398ab5f7b31c4c164c07
[]
no_license
salonishah331/CodingChallenges
b20c9f450ce523dd5cca14496ca3b8ed15a78559
1c02aedc08e8c4eac2f59b7c8860b7e89e7cc27e
refs/heads/master
2021-01-14T04:15:14.064951
2020-10-21T20:33:18
2020-10-21T20:33:18
242,596,422
0
0
null
null
null
null
UTF-8
Python
false
false
892
py
''' Permutation Palindrome: Write an efficient function that checks whether any permutation of an input string is a palindrome. Ex: "ivicc" should return True "civil" should return False ''' def PermutationPalindrome(word): perm = {} for i in range(len(word)): if word[i] not in perm: # add to dict perm[word[i]] = 1 else: # increment value perm[word[i]] += 1 vals = list(perm.values()) odd_count = 0 for elem in vals: if elem % 2 != 0: odd_count += 1 return odd_count <= 1 def ver2(word): perms = set() for char in word: if char in perms: perms.remove(char) else: perms.add(char) return len(perms) <= 1 def main(): word = 'salas' print(PermutationPalindrome(word)) print(ver2(word)) main()
[ "noreply@github.com" ]
salonishah331.noreply@github.com
cacfc13d21332b6a02950212f0f55a6fa2fbf453
8bce93fc8f875780f0737130f457ff9fe0c58836
/deploy.py
9ef305a04d3207d3890f3552a7f06ef5987f6d7d
[ "Apache-2.0" ]
permissive
cundohubs/cundo-lambda-deployer
e992668f0466e28e8995da7707f466ca3dea3f5e
a91565c42cf981962634b1e090f687440a0c0181
refs/heads/master
2020-05-30T19:11:06.200031
2016-06-21T23:11:29
2016-06-21T23:11:29
61,673,382
0
0
null
null
null
null
UTF-8
Python
false
false
11,486
py
#!/usr/bin/env python from git import Repo from boto3 import session from shutil import rmtree import os import zipfile import logging import json import argparse import ConfigParser logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) root_dir = "/tmp" class DeploymentConfig: def __init__(self, **config_json): self.__dict__.update(config_json) class Deployment: def __init__(self, name, region, codecommit_arn, aws_access_key_id=None, aws_secret_access_key=None): self.session = session.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region) self._repository_name = name self._codecommit_arn = codecommit_arn self.codecommit = self.session.client('codecommit', region_name=region) self.repository_name = self._codecommit_arn.split(":")[-1] self.local_repo_path = root_dir + "/" + self.repository_name + "/" self._zip_filename = None self._s3_bucket = None self._s3_key = None self._s3_prefix = None self._lambda_config = None self._local_lambda_path_inside_repo = None self._function_name = None def get_deployment_configurations(self, config_file_name='lambda-deploy.json'): config_file = self.local_repo_path + config_file_name with open(config_file) as data_file: data = json.load(data_file) configurations = data["DeploymentConfiguration"] self._s3_prefix = configurations["S3PrefixDeployments"] self._local_lambda_path_inside_repo = configurations["LambdaDirectory"] self._s3_bucket = configurations["S3Bucket"] def git_clone(self): try: response = self.codecommit.get_repository(repositoryName=self._repository_name) # clone_url = response['repositoryMetadata']['cloneUrlHttp'] clone_ssh = response['repositoryMetadata']['cloneUrlSsh'] destination = self.local_repo_path if os.path.exists(destination): self.git_pull(self._repository_name) Repo.clone_from(clone_ssh, destination) return clone_ssh except Exception as e: print(e) print( 'Error getting repository {}. Make sure it exists and that your \ repository is in the same region as this function.'.format( self._repository_name)) raise e def configure_lambda_function_deprecated(self, repository): try: self.configure_lambda_function_from_config_json(repository) self._s3_bucket = self._lambda_config.S3Bucket self._local_lambda_path_inside_repo = self._lambda_config.LambdaDirectory self._function_name = self._lambda_config.FunctionName except Exception as e: print (e.message) print("Failed to configure Lambda function from config file") raise e def configure_lambda_function_from_config_json(self, config_file_name='config.json'): config_json_path = self.repository_name + config_file_name try: with open(config_json_path) as data_file: data = json.load(data_file) self._lambda_config = DeploymentConfig(**data) data_file.close() except Exception as e: print(e.message) print("Failed to load configurations from file %s" % config_json_path) # raise e data = {'S3Bucket': 'curalate-lambda-qa', 'LambdaDirectory': 'src/python/', 'FunctionName': 'test_function'} self._lambda_config = DeploymentConfig(**data) def zip_package(self): lambda_directory = self._local_lambda_path_inside_repo try: lambda_path = self.local_repo_path + "/" + lambda_directory.strip('/') self._zip_filename = self._repository_name + '.zip' ziph = zipfile.ZipFile(root_dir + '/' + self._zip_filename, 'w', zipfile.ZIP_DEFLATED) exclude = {'.git'} # ziph is zipfile handle for root, dirs, files in os.walk(lambda_path): dirs[:] = [d for d in dirs if d not in exclude] for f in files: lambda_path_inside_zip = root.replace(lambda_path, self._repository_name) + "/" ziph.write(os.path.join(root, f), lambda_path_inside_zip + f) except Exception as e: print (e.message) raise e def upload_zip_file_to_s3(self, bucket=None): s3_client = self.session.client('s3') if bucket is not None: self._s3_bucket = bucket try: logger.info("Uploading %s to S3..." % self._zip_filename) self._s3_key = self._s3_prefix + self._zip_filename s3_client.upload_file(root_dir + "/" + self._zip_filename, self._s3_bucket, self._s3_key) except Exception as e: print(e.message) raise e def get_s3_key_version(self, s3_key): self._s3_bucket = self._s3_bucket try: logger.info("Getting version id of %s from S3..." % (s3_key)) s3_resource = self.session.resource('s3') obj = s3_resource.Object(self._s3_bucket, s3_key) return obj.version_id except Exception, e: print(e.message) raise e def load_lambda_configuration_from_file(self, file_name="lambda-deploy.json"): config_file = self.local_repo_path + file_name with open(config_file) as data_file: data = json.load(data_file) configurations = data["LambdaConfiguration"] configurations["Handler"] = self.repository_name + "/" + configurations["Handler"] # configurations["Code"]["S3ObjectVersion"] = self.get_s3_key_version(self._s3_key) del configurations["Code"]["S3Key"] del configurations["Code"]["S3Bucket"] del configurations["Code"]["S3ObjectVersion"] del configurations["VpcConfig"] return configurations def create_lambda_function(self): parameters = self.load_lambda_configuration_from_file() try: zipdata = file_get_contents(root_dir + "/" + self._zip_filename) lambda_client = self.session.client('lambda') parameters['Code']['ZipFile'] = zipdata lambda_client.create_function(**parameters) except Exception as e: print("Failed to create lambda function from %s/%s" % (self._s3_bucket, self._s3_key)) print ("Parameters: %s" % parameters) print (e.message) raise e def update_lambda_function_code(self, s3_key=None): lambda_client = self.session.client('lambda') s3_key = self._s3_key if s3_key is None else s3_key # prefix, filename = s3_key.split("/", 1) parameters = { 'FunctionName': self._function_name, 'S3Bucket': self._s3_bucket, 'S3Key': s3_key, 'Publish': True } try: lambda_client.update_function_code(**parameters) except Exception as e: print("Failed to publish zip file %s to S3 Bucket %s" % (s3_key, self._s3_bucket)) print (e.message) raise e def rm_local_repo(self): try: rmtree(self.local_repo_path) return True except Exception as e: print (e.message) raise e def git_pull(self): # To do: git pull, not rm the local repo self.rm_local_repo() def file_get_contents(filename): with open(filename) as f: return f.read() def lambda_handler(event, context): # Load credentials only if we are testing. Otherwise, use IAM Role event = json.loads(event) if 'Credentials' in event.keys(): aws_access_key_id = event['Credentials']['aws_access_key_id'] aws_secret_access_key = event['Credentials']['aws_secret_access_key'] else: aws_access_key_id = None aws_secret_access_key = None function_arn = event['Records'][0]['eventSourceARN'] arn, provider, service, region, akid, resource_id = function_arn.split(":") # references = {reference['ref'] for reference in event['Records'][0]['codecommit']['references']} # print("References: " + str(references)) # Get the repository from the event and show its git clone URL repository = event['Records'][0]['eventSourceARN'].split(':')[5] deployment = Deployment(repository, region, function_arn, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) deployment.git_clone() deployment.get_deployment_configurations() # deployment.configure_lambda_function_from_config_json() deployment.zip_package() deployment.upload_zip_file_to_s3() # deployment.load_lambda_configuration_from_file() deployment.create_lambda_function() # deployment.update_lambda_function_code() output = event output['Status'] = "OK" return output class Event: def __init__(self, **entries): self.__dict__.update(entries) class Context: def __init__(self, **entries): self.__dict__.update(entries) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dryrun", help="Dry run - don't change anything in AWS") parser.add_argument("--account", help="AWS Account Name", default="QA") parser.add_argument("--secretkey", help="AWS Secret Key", default="deprecated") args = parser.parse_args() credentials = ConfigParser.ConfigParser() configs = ConfigParser.ConfigParser() home_dir = os.environ['HOME'] credentials_file = home_dir + "/.aws/credentials" credentials.read(credentials_file) account_name = args.account aws_access_key_id = credentials.get(account_name, "aws_access_key_id") aws_secret_access_key = credentials.get(account_name, "aws_secret_access_key") aws_credentials = dict([ ('aws_access_key_id', aws_access_key_id), ('aws_secret_access_key', aws_secret_access_key)]) sample_arn = "arn:aws:lambda:us-east-1:123456789012:function:tagging-lambda" records = [dict([ ('eventSourceARN', "arn:aws:codecommit:us-east-1:176853725791:tagger-asg"), ('codecommit', dict([ ('references', [dict([('ref', 'ref_1')])]) ])) ])] event = dict([("invokingEvent", dict([("configurationItem", dict([("configuration", dict([("instanceId", "i-deaed543")]) ), ("configurationItemStatus", "OK")]) )])), ("Records", records), ("eventLeftScope", False), ("Credentials", aws_credentials), ]) context = dict([("invoked_function_arn", sample_arn), ("keys", dict([ ("aws_access_key_id", aws_access_key_id), ("aws_secret_access_key", aws_secret_access_key)] )) ]) lambda_handler(json.dumps(event), Context(**context))
[ "facundo@curalate.com" ]
facundo@curalate.com
3690b622b34c806b02e2997a3f32cb9d737b7b7d
459a338018ba2d1a8ea228045b662fccbced58d0
/ebooksapi/ebooks/migrations/0003_auto_20191111_2146.py
60f2f543c332f59b6be4d55985470a4e7ac4e3cc
[]
no_license
nasutionam/django-web-api
ca75147d219774ca7454a2cf05073194f48643ac
03373c74a981e27f8fa1c56d14e8418801aa8350
refs/heads/master
2022-12-15T14:11:23.490820
2019-11-20T06:17:57
2019-11-20T06:17:57
220,584,142
1
0
null
2022-11-22T04:50:06
2019-11-09T03:09:51
Python
UTF-8
Python
false
false
516
py
# Generated by Django 2.2.7 on 2019-11-11 14:46 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ebooks', '0002_auto_20191111_2102'), ] operations = [ migrations.AlterField( model_name='review', name='rating', field=models.PositiveIntegerField(validators=[django.core.validators.MinLengthValidator(1), django.core.validators.MaxLengthValidator(5)]), ), ]
[ "nasution.nam@gmail.com" ]
nasution.nam@gmail.com
2e2050dc501bb659f815d5f48c8b65a162ee9cc0
5a8b0fe3e31a6436c159beee3ac07752d8cd2531
/exam.py
fb4b1b4d976e3071c232c143b0798660b6e6b222
[]
no_license
toekhinethein/Sample
49e81442a1171693ee5cd7e547eff56515bfd602
aad6d7c3240193fd8db4ad1d45e5f19759cd3f57
refs/heads/master
2020-12-14T07:13:10.636859
2020-03-07T05:35:08
2020-03-07T05:35:08
234,677,203
0
0
null
null
null
null
UTF-8
Python
false
false
271
py
x = int(input("Please Enter Exam Result: ")) if x < 50 and x > 0: print ( "F" ) elif x > 49 and x < 60: print ( "D" ) elif x > 59 and x < 80: print ( "C" ) elif x > 79 and x < 90: print ( "B" ) elif x > 89 and x < 101: print ( "A" ) else: print ("Check Again")
[ "toekhinetheinthein@gmail.com" ]
toekhinetheinthein@gmail.com
b381305143d97abcbd35226c2553b66154935323
156ede071b75ce824dcac0dddb0162b6f83963ee
/08 Practice Makes Perfect/07_reverse.py
b6b1745341a468b28fbfaa4db8b9e3e47203cb56
[]
no_license
mindovermiles262/codecademy-python
c834b4a7f3b1381dd3e65db27b2edb15eabda9c8
b6a6aa4db10192264ec2c88ddf4d1d16db66c533
refs/heads/master
2021-01-12T03:31:04.388402
2017-01-11T16:58:06
2017-01-11T16:58:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
578
py
# -*- coding: utf-8 -*- """ Created on Wed Jan 4 12:34:29 2017 @author: mindovermiles262 Define a function called reverse that takes a string textand returns that string in reverse. For example: reverse("abcd") should return "dcba". You may not use reversed or [::-1] to help you with this. You may get a string containing special characters (for example, !, @, or #). """ def reverse(x): x = str(x) index = len(x) - 1 reversed_output = [] for i in x: reversed_output.append(x[index]) index = index - 1 return(''.join(reversed_output))
[ "mindovermiles262@gmail.com" ]
mindovermiles262@gmail.com
4ec3271673e917258b23e66d30a21e94fce571aa
e676677d718a3572634b3d5ef75815d3bb18a6c9
/venv/Scripts/keyy.py
3f374ca31b6b1890bb21c03b53bf27ddedce7e6d
[]
no_license
charithcherry/Automation_opencv
ecf525bb3efc288d67b5418204dc65ee40def857
cff68137df79e9c17d7ce8fb669fb009d3396006
refs/heads/master
2022-11-18T03:11:36.185841
2020-07-20T11:20:09
2020-07-20T11:20:09
281,095,998
1
0
null
null
null
null
UTF-8
Python
false
false
154
py
import keyboard while True: rk=keyboard.record(until="shift") keyboard.play(rk,speed_factor=10) keyboard.add_hotkey('alt',lambda : exit(0))
[ "charithcherry1100gmail.com" ]
charithcherry1100gmail.com
ed32075ba24c826644dd08d780d93081e8b04d3d
60ce3d7cbf8689663fb9f8682fd8fd4a85a8aceb
/.venv/bin/easy_install
806e4fe3a1344f4d5caa8c63ac83405e1c468704
[]
no_license
SlikNik/ghostpost_backend
91c330094b6e1e387803cff770892799f257dacb
810db076516dbd9cfa2dc4e17bce5103ed80748f
refs/heads/master
2022-12-24T00:44:40.042721
2020-10-02T00:21:45
2020-10-02T00:21:45
299,697,335
1
0
null
null
null
null
UTF-8
Python
false
false
295
#!/Users/nikalmorgan/Desktop/Kenzie/submitted/Q4/ghostpost_backend/.venv/bin/python # -*- coding: utf-8 -*- import re import sys from setuptools.command.easy_install import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "nikalmorgan11@gmail.com" ]
nikalmorgan11@gmail.com
dc1a42b7340f04ba0eb9400c2c8855f8a88f2ea9
134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2
/desktop/core/ext-py/Twisted/doc/core/howto/tutorial/listings/finger/finger20.tac
52b955619703f31883d897082674c0c7f37e1c7c
[ "Apache-2.0", "MIT" ]
permissive
civascu/hue
22637f13a4cfc557716557661523131b6ac16da4
82f2de44789ff5a981ed725175bae7944832d1e9
refs/heads/master
2020-03-31T01:50:39.449966
2010-07-21T01:05:50
2010-07-21T01:07:15
788,284
0
0
Apache-2.0
2019-02-04T07:03:12
2010-07-21T07:34:27
Python
UTF-8
Python
false
false
6,918
tac
# Do everything properly, and componentize from twisted.application import internet, service from twisted.internet import protocol, reactor, defer from twisted.words.protocols import irc from twisted.protocols import basic from twisted.python import components from twisted.web import resource, server, static, xmlrpc, microdom from zope.interface import Interface, implements import cgi class IFingerService(Interface): def getUser(user): """Return a deferred returning a string""" def getUsers(): """Return a deferred returning a list of strings""" class IFingerSetterService(Interface): def setUser(user, status): """Set the user's status to something""" def catchError(err): return "Internal error in server" class FingerProtocol(basic.LineReceiver): def lineReceived(self, user): d = self.factory.getUser(user) d.addErrback(catchError) def writeValue(value): self.transport.write(value+'\r\n') self.transport.loseConnection() d.addCallback(writeValue) class IFingerFactory(Interface): def getUser(user): """Return a deferred returning a string""" def buildProtocol(addr): """Return a protocol returning a string""" class FingerFactoryFromService(protocol.ServerFactory): implements(IFingerFactory) protocol = FingerProtocol def __init__(self, service): self.service = service def getUser(self, user): return self.service.getUser(user) components.registerAdapter(FingerFactoryFromService, IFingerService, IFingerFactory) class FingerSetterProtocol(basic.LineReceiver): def connectionMade(self): self.lines = [] def lineReceived(self, line): self.lines.append(line) def connectionLost(self, reason): if len(self.lines) == 2: self.factory.setUser(*self.lines) class IFingerSetterFactory(Interface): def setUser(user, status): """Return a deferred returning a string""" def buildProtocol(addr): """Return a protocol returning a string""" class FingerSetterFactoryFromService(protocol.ServerFactory): implements(IFingerSetterFactory) protocol = FingerSetterProtocol def __init__(self, service): self.service = service def setUser(self, user, status): self.service.setUser(user, status) components.registerAdapter(FingerSetterFactoryFromService, IFingerSetterService, IFingerSetterFactory) class IRCReplyBot(irc.IRCClient): def connectionMade(self): self.nickname = self.factory.nickname irc.IRCClient.connectionMade(self) def privmsg(self, user, channel, msg): user = user.split('!')[0] if self.nickname.lower() == channel.lower(): d = self.factory.getUser(msg) d.addErrback(catchError) d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) d.addCallback(lambda m: self.msg(user, m)) class IIRCClientFactory(Interface): """ @ivar nickname """ def getUser(user): """Return a deferred returning a string""" def buildProtocol(addr): """Return a protocol""" class IRCClientFactoryFromService(protocol.ClientFactory): implements(IIRCClientFactory) protocol = IRCReplyBot nickname = None def __init__(self, service): self.service = service def getUser(self, user): return self.service.getUser(user) components.registerAdapter(IRCClientFactoryFromService, IFingerService, IIRCClientFactory) class UserStatusTree(resource.Resource): def __init__(self, service): resource.Resource.__init__(self) self.service=service # add a specific child for the path "RPC2" self.putChild("RPC2", UserStatusXR(self.service)) # need to do this for resources at the root of the site self.putChild("", self) def _cb_render_GET(self, users, request): userOutput = ''.join(["<li><a href=\"%s\">%s</a></li>" % (user, user) for user in users]) request.write(""" <html><head><title>Users</title></head><body> <h1>Users</h1> <ul> %s </ul></body></html>""" % userOutput) request.finish() def render_GET(self, request): d = self.service.getUsers() d.addCallback(self._cb_render_GET, request) # signal that the rendering is not complete return server.NOT_DONE_YET def getChild(self, path, request): return UserStatus(user=path, service=self.service) components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) class UserStatus(resource.Resource): def __init__(self, user, service): resource.Resource.__init__(self) self.user = user self.service = service def _cb_render_GET(self, status, request): request.write("""<html><head><title>%s</title></head> <body><h1>%s</h1> <p>%s</p> </body></html>""" % (self.user, self.user, status)) request.finish() def render_GET(self, request): d = self.service.getUser(self.user) d.addCallback(self._cb_render_GET, request) # signal that the rendering is not complete return server.NOT_DONE_YET class UserStatusXR(xmlrpc.XMLRPC): def __init__(self, service): xmlrpc.XMLRPC.__init__(self) self.service = service def xmlrpc_getUser(self, user): return self.service.getUser(user) def xmlrpc_getUsers(self): return self.service.getUsers() class FingerService(service.Service): implements(IFingerService) def __init__(self, filename): self.filename = filename self._read() def _read(self): self.users = {} for line in file(self.filename): user, status = line.split(':', 1) user = user.strip() status = status.strip() self.users[user] = status self.call = reactor.callLater(30, self._read) def getUser(self, user): return defer.succeed(self.users.get(user, "No such user")) def getUsers(self): return defer.succeed(self.users.keys()) application = service.Application('finger', uid=1, gid=1) f = FingerService('/etc/users') serviceCollection = service.IServiceCollection(application) internet.TCPServer(79, IFingerFactory(f) ).setServiceParent(serviceCollection) internet.TCPServer(8000, server.Site(resource.IResource(f)) ).setServiceParent(serviceCollection) i = IIRCClientFactory(f) i.nickname = 'fingerbot' internet.TCPClient('irc.freenode.org', 6667, i ).setServiceParent(serviceCollection)
[ "bcwalrus@cloudera.com" ]
bcwalrus@cloudera.com
b397b42327400482c2d0d8bacccc325d4eee2383
c8a04384030c3af88a8e16de4cedc4ef8aebfae5
/stubs/pandas/tests/io/parser/test_c_parser_only.pyi
4f06775773e118e6584efa8effa83be67cd56474
[ "MIT" ]
permissive
Accern/accern-xyme
f61fce4b426262b4f67c722e563bb4297cfc4235
6ed6c52671d02745efabe7e6b8bdf0ad21f8762c
refs/heads/master
2023-08-17T04:29:00.904122
2023-05-23T09:18:09
2023-05-23T09:18:09
226,960,272
3
2
MIT
2023-07-19T02:13:18
2019-12-09T20:21:59
Python
UTF-8
Python
false
false
2,374
pyi
# Stubs for pandas.tests.io.parser.test_c_parser_only (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. # pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name # pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ # pylint: disable=no-member,too-few-public-methods,keyword-arg-before-vararg # pylint: disable=super-init-not-called,abstract-method,redefined-builtin from typing import Any def test_buffer_overflow(c_parser_only: Any, malformed: Any) -> None: ... def test_buffer_rd_bytes(c_parser_only: Any) -> None: ... def test_delim_whitespace_custom_terminator(c_parser_only: Any) -> None: ... def test_dtype_and_names_error(c_parser_only: Any) -> None: ... def test_unsupported_dtype(c_parser_only: Any, match: Any, kwargs: Any) -> None: ... def test_precise_conversion(c_parser_only: Any) -> None: ... def test_usecols_dtypes(c_parser_only: Any) -> None: ... def test_disable_bool_parsing(c_parser_only: Any) -> None: ... def test_custom_lineterminator(c_parser_only: Any) -> None: ... def test_parse_ragged_csv(c_parser_only: Any) -> None: ... def test_tokenize_CR_with_quoting(c_parser_only: Any) -> None: ... def test_grow_boundary_at_cap(c_parser_only: Any) -> None: ... def test_parse_trim_buffers(c_parser_only: Any) -> None: ... def test_internal_null_byte(c_parser_only: Any) -> None: ... def test_read_nrows_large(c_parser_only: Any) -> None: ... def test_float_precision_round_trip_with_text(c_parser_only: Any) -> None: ... def test_large_difference_in_columns(c_parser_only: Any) -> None: ... def test_data_after_quote(c_parser_only: Any) -> None: ... def test_comment_whitespace_delimited(c_parser_only: Any, capsys: Any) -> None: ... def test_file_like_no_next(c_parser_only: Any) -> None: ... def test_buffer_rd_bytes_bad_unicode(c_parser_only: Any) -> None: ... def test_read_tarfile(c_parser_only: Any, csv_dir_path: Any, tar_suffix: Any) -> None: ... def test_bytes_exceed_2gb(c_parser_only: Any) -> None: ... def test_chunk_whitespace_on_boundary(c_parser_only: Any) -> None: ... def test_file_handles_mmap(c_parser_only: Any, csv1: Any) -> None: ... def test_file_binary_mode(c_parser_only: Any) -> None: ...
[ "josua.krause@gmail.com" ]
josua.krause@gmail.com
e22adfda64346dc4ead5ef8e645866f4ad1921df
912fbf0ca814c831d4e1eaf5e919c6f300a5f8de
/Code/Basic/check_firmware.py
41ad479f747d6bc5cc37ce0c3eddca9069950aeb
[ "MIT" ]
permissive
JoelBuenrostro/micropython-for-esp32
2ed503549d2581cbb304f66e8b2a60c98f1df5bb
d4ba9777ec4459b09089762be9287985d19bbe28
refs/heads/master
2020-07-22T03:15:02.558445
2019-10-14T12:24:12
2019-10-14T12:24:12
207,058,087
1
1
null
null
null
null
UTF-8
Python
false
false
434
py
# Chip: ESP32-WROOM-32 (ESP32-D0WDQ6) # Microprocessor: Dual-Core Xtensa® 32-bit LX6 # Clock: 80MHz to 240Mhz # Crystal: 40MHz # SPÍ flash: 4 MB # Operating voltage: 3.0V-3.6V # Operating current: 80mA # Purpose: Check the firmware integrity from a MicroPython REPL prompt # Notes: If the last output value is True, the firmware is OK. Otherwise, it’s corrupted and need # to be reflashed correctly. import esp esp.check_fw()
[ "joelbuenrostro@outlook.com" ]
joelbuenrostro@outlook.com
4a4c6c643a4bac875fe8b0b5f38720441793c48a
aa26cd3faeff070f056314f0aec2302ca8b32cef
/bs1.py
fc4c21d0d9da6543df2f4eca78dbb2e72ec3f195
[]
no_license
rcanzlovar/linux-customizations
8f709b1d7fd1e2b61bf5f31c101442f5d3bd5bed
8ace30643a906b6d83eb16609d5fd4ce9bd51ba5
refs/heads/master
2020-06-27T18:03:55.761361
2017-12-11T09:55:24
2017-12-11T09:55:24
97,069,156
0
0
null
null
null
null
UTF-8
Python
false
false
5,398
py
#!/usr/bin/python3 import bs4 as bs import urllib.request import urllib.parse import time values = {} # get the timestamp for the mysql command values["timestamp"] = time.strftime('%Y-%m-%d %H:%M:%S') source = urllib.request.urlopen("http://www.msn.com/en-us/weather/today/Longmont,Colorado,United-States/we-city-40.164,-105.100") # (`timestamp`, # `city_name`, # `city_lat`, # `city_lon`, fields = { "Humidity": "humidity", "Barometer": "barometer", "Wind": "wind", "Visibility": "visibility", "Dew Point": "dewpoint", "Feels Like": "feelslike" } soup = bs.BeautifulSoup(source,'lxml') body = soup.body ###print (body) print ('###start - my locations ###') for item in body.find_all('div', class_="mylocations" ): # get rid of the %99 escaped characters foo = (item.attrs['data-loc']) for thing in urllib.parse.unquote(foo).split("&"): print ("Thing: ",thing) (key,val) = thing.split("=") if key == "c": values["city_name"] = val if key == "lat": values["city_lat"] = val if key == "long": values["city_lon"] = val ## get the temperature for item in body.find_all('span', class_="current" ): values["temperature"] = item.text print ("## Temp ", values["temperature"]) # get the other values print ('### weather-info ###') for item in body.find_all('div', class_="weather-info" ): # print (item.text) # whole item including all the bits # this is the first span in the div.... values["summary"] = item.find_all('span')[0].text # get the other pieces... for thing1 in item.find_all('li'): whole = thing1.text for thing2 in thing1.find_all('span'): label = thing2.text label = label.strip() # print ("span thing: ",thing2.text) value = whole.replace(label,'') # clear extraneous bits value = value.replace("°",'') value = value.replace(" mi",'') value = value.replace(" in",'') value = value.replace("%",'') value = value.strip() print (label + ": ",value) if (fields[label]): values[fields[label]] = value # ['HTML_FORMATTERS', 'XML_FORMATTERS', '__bool__', '__call__', '__class__', '__contains__', '__copy__', '__delattr__', '__delitem__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', '__unicode__', '__weakref__', '_all_strings', '_attr_value_as_string', '_attribute_checker', '_find_all', '_find_one', '_formatter_for_name', '_is_xml', '_lastRecursiveChild', '_last_descendant', '_select_debug', '_selector_combinators', '_should_pretty_print', '_tag_name_matches_and', 'append', 'attribselect_re', 'attrs', 'can_be_empty_element', 'childGenerator', 'children', 'clear', 'contents', 'decode', 'decode_contents', 'decompose', 'descendants', 'encode', 'encode_contents', 'extract', 'fetchNextSiblings', 'fetchParents', 'fetchPrevious', 'fetchPreviousSiblings', 'find', 'findAll', 'findAllNext', 'findAllPrevious', 'findChild', 'findChildren', 'findNext', 'findNextSibling', 'findNextSiblings', 'findParent', 'findParents', 'findPrevious', 'findPreviousSibling', 'findPreviousSiblings', 'find_all', 'find_all_next', 'find_all_previous', 'find_next', 'find_next_sibling', 'find_next_siblings', 'find_parent', 'find_parents', 'find_previous', 'find_previous_sibling', 'find_previous_siblings', 'format_string', 'get', 'getText', 'get_attribute_list', 'get_text', 'has_attr', 'has_key', 'hidden', 'index', 'insert', 'insert_after', 'insert_before', 'isSelfClosing', 'is_empty_element', 'known_xml', 'name', 'namespace', 'next', 'nextGenerator', 'nextSibling', 'nextSiblingGenerator', 'next_element', 'next_elements', 'next_sibling', 'next_siblings', 'parent', 'parentGenerator', 'parents', 'parserClass', 'parser_class', 'prefix', 'preserve_whitespace_tags', 'prettify', 'previous', 'previousGenerator', 'previousSibling', 'previousSiblingGenerator', 'previous_element', 'previous_elements', 'previous_sibling', 'previous_siblings', 'quoted_colon', 'recursiveChildGenerator', 'renderContents', 'replaceWith', 'replaceWithChildren', 'replace_with', 'replace_with_children', 'select', 'select_one', 'setup', 'string', 'strings', 'stripped_strings', 'tag_name_re', 'text', 'unwrap', 'wrap'] keys = "" vals = "" print (values) for key in values: print (key + "=\"" + values[key] + "\",") keys = keys + key + "," vals = vals + values[key] + "," sql = "INSERT into `weather` (" + keys + ") VALUES (" + vals + ")" print ("sql = ",sql) ##insert into table set fieldr=valoe, field2=val2, etc=something # sql = "INSERT INTO `weather` # (`timestamp`, # `city_name`, # `city_lat`, # `city_lon`, # `temperature`, # `humidity`, # `barometer`, # `conditions`, # `visibility`, # `dewpoint`, # `feelslike`) # VALUES # ('" . $timestamp . "', # '" . $weather_values{'city'} . "', # '" . $weather_values{'lat'} . "', # '" . $weather_values{'lon'} . "', # '" . $weather_values{'temperature'} . "', # '" . $weather_values{'humidity'} . "', # '" . $weather_values{'barometer'} . "', # '" . $weather_values{'conditions'} . "', # '" . $weather_values{'visibility'} . "', # '" . $weather_values{'dewpoint'} . "', # '" . $weather_values{'feelslike'} . "');";
[ "noreply@github.com" ]
rcanzlovar.noreply@github.com
f2d1fe0367179617efb95be8c474a75140a696e9
089533c91d5506bd19f8af5a9240f4429d747195
/tests/build-workflows.py
399ef0012bbcea2870d237380a4a1f099f358bad
[ "Apache-2.0" ]
permissive
chia-os/pipscoin-blockchain
f55092c6962d18d455d01601c725d3c891077e86
55c09f9b75f912625bc3ed27e46dc85a410392fb
refs/heads/main
2023-08-08T01:30:56.832134
2021-09-02T02:33:47
2021-09-02T02:33:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,370
py
#!/usr/bin/env python3 # Run from the current directory. import argparse import testconfig import logging import subprocess from pathlib import Path from typing import List def subdirs(root_dirs: List[str]) -> List[Path]: dirs: List[Path] = [] for r in root_dirs: dirs.extend(Path(r).rglob("**/")) return [d for d in dirs if not (any(c.startswith("_") for c in d.parts) or any(c.startswith(".") for c in d.parts))] def module_dict(module): return {k: v for k, v in module.__dict__.items() if not k.startswith("_")} def dir_config(dir): import importlib module_name = str(dir).replace("/", ".") + ".config" try: return module_dict(importlib.import_module(module_name)) except ModuleNotFoundError: return {} def read_file(filename): with open(filename) as f: return f.read() return None # Input file def workflow_yaml_template_text(os): return Path(f"runner-templates/build-test-{os}").read_text() # Output files def workflow_yaml_file(dir, os, test_name): return Path(dir / f"build-test-{os}-{test_name}.yml") # String function from test dir to test name def test_name(dir): return str(dir).replace("/", "-") def transform_template(template_text, replacements): t = template_text for r, v in replacements.items(): t = t.replace(r, v) return t def test_files_in_dir(dir): g = dir.glob("test_*.py") return [] if g is None else [f for f in g] # ----- default_replacements = { "INSTALL_TIMELORD": read_file("runner-templates/install-timelord.include.yml").rstrip(), "CHECKOUT_TEST_BLOCKS_AND_PLOTS": read_file("runner-templates/checkout-test-plots.include.yml").rstrip(), "TEST_DIR": "", "TEST_NAME": "", "PYTEST_PARALLEL_ARGS": "", } # ----- # Replace with update_config def generate_replacements(defaults, conf, dir, test_files): assert len(test_files) > 0 replacements = dict(defaults) if not conf["checkout_blocks_and_plots"]: replacements[ "CHECKOUT_TEST_BLOCKS_AND_PLOTS" ] = "# Omitted checking out blocks and plots repo Pipscoin-Network/test-cache" if not conf["install_timelord"]: replacements["INSTALL_TIMELORD"] = "# Omitted installing Timelord" if conf["parallel"]: replacements["PYTEST_PARALLEL_ARGS"] = "-n auto" if conf["job_timeout"]: replacements["JOB_TIMEOUT"] = str(conf["job_timeout"]) test_paths = ["tests/" + str(f) for f in test_files] # We have to list the test files individually until pytest has the # option to only collect tests in the named dir, and not those below replacements["TEST_DIR"] = " ".join(sorted(test_paths)) replacements["TEST_NAME"] = test_name(str(dir)) if "test_name" in conf: replacements["TEST_NAME"] = conf["test_name"] return replacements # Overwrite with directory specific values def update_config(parent, child): if child is None: return parent conf = child for k, v in parent.items(): if k not in child: conf[k] = v return conf def dir_path(string): p = Path(string) if p.is_dir(): return p else: raise NotADirectoryError(string) # args arg_parser = argparse.ArgumentParser(description="Build github workflows") arg_parser.add_argument("--output-dir", "-d", default="../.github/workflows", type=dir_path) arg_parser.add_argument("--verbose", "-v", action="store_true") args = arg_parser.parse_args() if args.verbose: logging.basicConfig(format="%(asctime)s:%(message)s", level=logging.DEBUG) # main test_dirs = subdirs(testconfig.root_test_dirs) for os in testconfig.oses: template_text = workflow_yaml_template_text(os) for dir in test_dirs: test_files = test_files_in_dir(dir) if len(test_files) == 0: logging.info(f"Skipping {dir}: no tests collected") continue conf = update_config(module_dict(testconfig), dir_config(dir)) replacements = generate_replacements(default_replacements, conf, dir, test_files) txt = transform_template(template_text, replacements) logging.info(f"Writing {os}-{test_name(dir)}") workflow_yaml_file(args.output_dir, os, test_name(dir)).write_text(txt) out = subprocess.run(["git", "diff", args.output_dir]) if out.stdout: print(out.stdout)
[ "hello@pipscoin.net" ]
hello@pipscoin.net
f1117ca1bf35ce910854edc80c27f58f03f2f6a6
8b4aa3227c54a283e4c61294fb4c475b5df83ec9
/spanning_tree.py
31f3dc6bd6373b6546acef163817264e7a97d965
[]
no_license
dailydmello/PythonComputerNetworkandSecurity
abdbce8433076fabe2695658983bbb5a286aea43
e3ab5fcae9554e9d34824f6cbda7811190792eb4
refs/heads/master
2020-04-09T15:13:33.842245
2018-12-05T21:11:57
2018-12-05T21:11:57
160,419,967
0
0
null
null
null
null
UTF-8
Python
false
false
14,243
py
# Copyright 2012,2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Creates a spanning tree. This component uses the discovery component to build a view of the network topology, constructs a spanning tree, and then disables flooding on switch ports that aren't on the tree by setting their NO_FLOOD bit. The result is that topologies with loops no longer turn your network into useless hot packet soup. This component is inspired by and roughly based on the description of Glenn Gibb's spanning tree module for NOX: http://www.openflow.org/wk/index.php/Basic_Spanning_Tree Note that this does not have much of a relationship to Spanning Tree Protocol. They have similar purposes, but this is a rather different way of going about it. """ from pox.core import core import pox.openflow.libopenflow_01 as of from pox.lib.revent import * from collections import defaultdict from pox.openflow.discovery import Discovery from pox.lib.util import dpidToStr from pox.lib.recoco import Timer import time import sys import itertools log = core.getLogger() def diameter (nodes, G): # number of vertices number_of_nodes = len (G.keys()) dist_mat=[] for i in range (0,number_of_nodes): dist_mat.append([0,0,0,0,0,0,0]) #distance matrix for curr_node, node_neighbours in G.iteritems():#assign weights of 1 to nearby nodes #since list indexing starts at 0 for neighbours in node_neighbours: dist_mat[curr_node-1][neighbours-1]=1 for i in range(0,number_of_nodes): #setting nodes not adjacent to infinity for j in range (0,number_of_nodes): if (i!=j and dist_mat[i][j]!=1): dist_mat[i][j]=float('inf') # floyd warshall algorithm will store the distance from current node to every other node in a matrix for a in range (0,number_of_nodes): for b in range (0,number_of_nodes): for c in range (0,number_of_nodes): if (dist_mat[b][c]>dist_mat[b][a]+dist_mat[a][c]): dist_mat[b][c] = dist_mat[b][a] + dist_mat[a][c] #loop through matrix and store largest distance as diameter maximum=0 for i in range (0,number_of_nodes): for j in range (0,number_of_nodes): if maximum < dist_mat[i][j]: maximum = dist_mat[i][j] Diameter = maximum return Diameter visited=[] def check_visited(node): for x in visited: if (x == node): return True return False def check_spanning_tree(nodes, G): stack = [] current = 1 while (len(visited)!=7): # loop to get all the node_neighbours to get into stack for curr_node, node_neighbours in G.iteritems(): if (curr_node==current): for neighbours in node_neighbours: if (check_visited(neighbours)==False): stack.append(neighbours) #add current node to visited to keep track visited.append(current) # if stack is not empty pop top node on stack into current if (len(stack)!=0): current = stack.pop() visited.sort() #sort through stack to check for repeated cycles for x in range (0, len(visited)-1): if (visited[x] == visited[x+1]): isSpanningTree = False return isSpanningTree isSpanningTree = True return isSpanningTree def get_choices (L, excludes): choices = [] for i in range(1, 2**len(L)): l = [] good = True for j in range(len(L) + 1): if (i >= (2 ** j) and i % (2 ** (j + 1)) != 0): if (L[j] > excludes): l.append (L[j]) else: good = False break if (good): choices.append(l) return choices def _calc_mdst (): """ Calculates the minimum diameter spanning tree Returns it as dictionary where the keys are DPID1, and the values are tuples of (DPID2, port-num), where port-num is the port on DPID1 connecting to DPID2. """ def flip (link): return Discovery.Link(link[2],link[3], link[0],link[1]) adj = defaultdict(lambda:defaultdict(lambda:[])) switches = set() # Add all links and switches for l in core.openflow_discovery.adjacency: adj[l.dpid1][l.dpid2].append(l) switches.add(l.dpid1) switches.add(l.dpid2) for s1 in switches: for s2 in switches: if s2 not in adj[s1]: continue if not isinstance(adj[s1][s2], list): continue assert s1 is not s2 good = False for l in adj[s1][s2]: if flip(l) in core.openflow_discovery.adjacency: # This is a good one adj[s1][s2] = l.port1 adj[s2][s1] = l.port2 good = True break if not good: del adj[s1][s2] if s1 in adj[s2]: # Delete the other way too del adj[s2][s1] Choices = {} selectedChoice = {} ls = sorted(list(switches)) for i in ls: Choices[i] = get_choices(list(adj[i]), i) selectedChoice[i] = 0 tree = defaultdict(set) finish = False md = 100 while (not finish): nextTree = defaultdict(set) for i in ls: selectedChoice[i] = selectedChoice[i] + 1 if (selectedChoice[i] >= len(Choices[i])): selectedChoice[i] = 0 if (i == ls[len(ls) - 1]): finish = True else: break graph = {} for j in switches: graph[j] = [] for j in switches: if(len(Choices[j]) > 0): for k in Choices[j][selectedChoice[j]]: if not ((k,adj[j][k]) in nextTree[j]): graph[j].append(k) graph[k].append(j) nextTree[j].add((k,adj[j][k])) nextTree[k].add((j,adj[k][j])) if (check_spanning_tree(ls, graph)): d = diameter(ls, graph) if (d < md): md = d tree = nextTree log.debug("*** SPANNING TREE ***") for sw,ports in tree.iteritems(): log.debug((" %i : " % sw) + " ".join([str(l[0]) for l in sorted(list(ports))])) log.debug("*********************") return tree def _calc_spanning_tree (): """ Calculates the first found spanning tree Returns it as dictionary where the keys are DPID1, and the values are tuples of (DPID2, port-num), where port-num is the port on DPID1 connecting to DPID2. """ def flip (link): return Discovery.Link(link[2],link[3], link[0],link[1]) adj = defaultdict(lambda:defaultdict(lambda:[])) switches = set() # Add all links and switches for l in core.openflow_discovery.adjacency: adj[l.dpid1][l.dpid2].append(l) switches.add(l.dpid1) switches.add(l.dpid2) # Cull links -- we want a single symmetric link connecting nodes for s1 in switches: for s2 in switches: if s2 not in adj[s1]: continue if not isinstance(adj[s1][s2], list): continue assert s1 is not s2 good = False for l in adj[s1][s2]: if flip(l) in core.openflow_discovery.adjacency: # This is a good one adj[s1][s2] = l.port1 adj[s2][s1] = l.port2 good = True break if not good: del adj[s1][s2] if s1 in adj[s2]: # Delete the other way too del adj[s2][s1] q = [] more = set(switches) done = set() tree = defaultdict(set) while True: q = sorted(list(more)) + q more.clear() if len(q) == 0: break v = q.pop(False) if v in done: continue done.add(v) for w,p in adj[v].iteritems(): if w in tree: continue more.add(w) tree[v].add((w,p)) tree[w].add((v,adj[w][v])) if True: log.debug("*** SPANNING TREE ***") for sw,ports in tree.iteritems(): #print " ", dpidToStr(sw), ":", sorted(list(ports)) #print " ", sw, ":", [l[0] for l in sorted(list(ports))] log.debug((" %i : " % sw) + " ".join([str(l[0]) for l in sorted(list(ports))])) log.debug("*********************") return tree # Keep a list of previous port states so that we can skip some port mods # If other things mess with port states, these may not be correct. We # could also refer to Connection.ports, but those are not guaranteed to # be up to date. _prev = defaultdict(lambda : defaultdict(lambda : None)) # If True, we set ports down when a switch connects _noflood_by_default = False # If True, don't allow turning off flood bits until a complete discovery # cycle should have completed (mostly makes sense with _noflood_by_default). _hold_down = False # If True, calculate the minimum diameter spanning tree _mdst = False def _handle_ConnectionUp (event): # When a switch connects, forget about previous port states _prev[event.dpid].clear() if _noflood_by_default: con = event.connection log.debug("Disabling flooding for %i ports", len(con.ports)) for p in con.ports.itervalues(): if p.port_no >= of.OFPP_MAX: continue _prev[con.dpid][p.port_no] = False pm = of.ofp_port_mod(port_no=p.port_no, hw_addr=p.hw_addr, config = of.OFPPC_NO_FLOOD, mask = of.OFPPC_NO_FLOOD) con.send(pm) _invalidate_ports(con.dpid) if _hold_down: t = Timer(core.openflow_discovery.send_cycle_time + 1, _update_tree, kw={'force_dpid':event.dpid}) def _handle_LinkEvent (event): # When links change, update spanning tree (dp1,p1),(dp2,p2) = event.link.end if _prev[dp1][p1] is False: if _prev[dp2][p2] is False: # We're disabling this link; who cares if it's up or down? #log.debug("Ignoring link status for %s", event.link) return _update_tree() def _update_tree (force_dpid = None): """ Update spanning tree force_dpid specifies a switch we want to update even if we are supposed to be holding down changes. """ # Get a spanning tree if _mdst: tree = _calc_mdst() else: tree = _calc_spanning_tree() log.debug("Spanning tree updated") # Connections born before this time are old enough that a complete # discovery cycle should have completed (and, thus, all of their # links should have been discovered). enable_time = time.time() - core.openflow_discovery.send_cycle_time - 1 # Now modify ports as needed try: change_count = 0 for sw, ports in tree.iteritems(): con = core.openflow.getConnection(sw) if con is None: continue # Must have disconnected if con.connect_time is None: continue # Not fully connected if _hold_down: if con.connect_time > enable_time: # Too young -- we should hold down changes. if force_dpid is not None and sw == force_dpid: # .. but we'll allow it anyway pass else: continue tree_ports = [p[1] for p in ports] for p in con.ports.itervalues(): if p.port_no < of.OFPP_MAX: flood = p.port_no in tree_ports if not flood: if core.openflow_discovery.is_edge_port(sw, p.port_no): flood = True if _prev[sw][p.port_no] is flood: #print sw,p.port_no,"skip","(",flood,")" continue # Skip change_count += 1 _prev[sw][p.port_no] = flood #print sw,p.port_no,flood #TODO: Check results pm = of.ofp_port_mod(port_no=p.port_no, hw_addr=p.hw_addr, config = 0 if flood else of.OFPPC_NO_FLOOD, mask = of.OFPPC_NO_FLOOD) con.send(pm) _invalidate_ports(con.dpid) if change_count: log.info("%i ports changed", change_count) except: _prev.clear() log.exception("Couldn't push spanning tree") _dirty_switches = {} # A map dpid_with_dirty_ports->Timer _coalesce_period = 2 # Seconds to wait between features requests def _invalidate_ports (dpid): """ Registers the fact that port info for dpid may be out of date When the spanning tree adjusts the port flags, the port config bits we keep in the Connection become out of date. We don't want to just set them locally because an in-flight port status message could overwrite them. We also might not want to assume they get set the way we want them. SO, we do send a features request, but we wait a moment before sending it so that we can potentially coalesce several. TLDR: Port information for this switch may be out of date for around _coalesce_period seconds. """ if dpid in _dirty_switches: # We're already planning to check return t = Timer(_coalesce_period, _check_ports, args=(dpid,)) _dirty_switches[dpid] = t def _check_ports (dpid): """ Sends a features request to the given dpid """ _dirty_switches.pop(dpid,None) con = core.openflow.getConnection(dpid) if con is None: return con.send(of.ofp_barrier_request()) con.send(of.ofp_features_request()) log.debug("Requested switch features for %s", str(con)) def launch (no_flood = False, hold_down = False, mdst=False): global _noflood_by_default, _hold_down, _mdst if no_flood is True: _noflood_by_default = True if hold_down is True: _hold_down = True if mdst is True: _mdst = True def start_spanning_tree (): core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent) log.debug("Spanning tree component ready") core.call_when_ready(start_spanning_tree, "openflow_discovery")
[ "dmelloe@mcmaster.ca" ]
dmelloe@mcmaster.ca
75dfe29367cde603df06f5c09d349672c7e70888
c7e58e7e961c9e8a3f307e423eb9c0d796e123e3
/test_python.py
05675da23381a6e74f88edfce84694e95a9be400
[]
no_license
StuWheel/mount_steel
7632034647a7868fa02070254bdb3b93d09a499f
b6f2158e60a3e48ed643da77e05ed10245d0faa9
refs/heads/master
2021-04-04T14:47:05.315758
2020-03-29T04:41:52
2020-03-29T04:41:52
248,465,489
0
0
null
null
null
null
UTF-8
Python
false
false
22
py
x = 15 print(x) y = 15
[ "stuart.s.wall@gmail.com" ]
stuart.s.wall@gmail.com
440e2fcbfd242367639df5349316b38f86b6a699
88863cb16f35cd479d43f2e7852d20064daa0c89
/Winton/kimonolabs-api.py
a126fb4eb279151491491f9e15b6d5c4ebc0e053
[]
no_license
chrishefele/kaggle-sample-code
842c3cd766003f3b8257fddc4d61b919e87526c4
1c04e859c7376f8757b011ed5a9a1f455bd598b9
refs/heads/master
2020-12-29T12:18:09.957285
2020-12-22T20:16:35
2020-12-22T20:16:35
238,604,678
3
1
null
null
null
null
UTF-8
Python
false
false
350
py
import urllib2 request = urllib2.Request("https://www.kimonolabs.com/api/csv/2i3ixo7u?apikey=qtyJXfoXzVQrHxFRbRxoZ06S13CAtOjl", headers={"authorization" : "Bearer BRALL8UhSiBI78UvVPEN0PC7F0fNYtBh"}) contents = urllib2.urlopen(request).read() print(contents) print for line in contents.splitlines(): print line
[ "c.hefele@verizon.net" ]
c.hefele@verizon.net
05381db273e480b5a0039ca5f9462b772e1f0806
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
/alipay/aop/api/response/AlipayCommerceReceiptSyncResponse.py
335d068194ecc2851c30cfb4ee5e3179b39e0a1c
[ "Apache-2.0" ]
permissive
alipay/alipay-sdk-python-all
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
1fad300587c9e7e099747305ba9077d4cd7afde9
refs/heads/master
2023-08-27T21:35:01.778771
2023-08-23T07:12:26
2023-08-23T07:12:26
133,338,689
247
70
Apache-2.0
2023-04-25T04:54:02
2018-05-14T09:40:54
Python
UTF-8
Python
false
false
440
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse class AlipayCommerceReceiptSyncResponse(AlipayResponse): def __init__(self): super(AlipayCommerceReceiptSyncResponse, self).__init__() def parse_response_content(self, response_content): response = super(AlipayCommerceReceiptSyncResponse, self).parse_response_content(response_content)
[ "jishupei.jsp@alibaba-inc.com" ]
jishupei.jsp@alibaba-inc.com
a2e98c99e36453844e82eec47239264e0afc73d4
a3abf2477473e8e660f25be0c4de7a9d6daf52ee
/python/aws/kinesis/messages_pb2.py
52eb9a5955987c232237af6366ee2bce6c124f1d
[]
no_license
abferm/Protobufs
b083bd683d50f1e7d7421b3625135a355b793753
4b60fa3861a528f3511bbfa2e37cfbc2582feafc
refs/heads/master
2021-01-12T17:27:33.141067
2016-11-04T22:11:16
2016-11-04T22:11:16
71,573,796
0
0
null
null
null
null
UTF-8
Python
false
true
34,991
py
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: messages.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() import config_pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='messages.proto', package='aws.kinesis.protobuf', serialized_pb=_b('\n\x0emessages.proto\x12\x14\x61ws.kinesis.protobuf\x1a\x0c\x63onfig.proto\"!\n\x03Tag\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x01(\t\"}\n\x06Record\x12\x1b\n\x13partition_key_index\x18\x01 \x02(\x04\x12\x1f\n\x17\x65xplicit_hash_key_index\x18\x02 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x03 \x02(\x0c\x12\'\n\x04tags\x18\x04 \x03(\x0b\x32\x19.aws.kinesis.protobuf.Tag\"\x7f\n\x10\x41ggregatedRecord\x12\x1b\n\x13partition_key_table\x18\x01 \x03(\t\x12\x1f\n\x17\x65xplicit_hash_key_table\x18\x02 \x03(\t\x12-\n\x07records\x18\x03 \x03(\x0b\x32\x1c.aws.kinesis.protobuf.Record\"\xe6\x03\n\x07Message\x12\n\n\x02id\x18\x01 \x02(\x04\x12\x11\n\tsource_id\x18\x02 \x01(\x04\x12\x35\n\nput_record\x18\x03 \x01(\x0b\x32\x1f.aws.kinesis.protobuf.PutRecordH\x00\x12,\n\x05\x66lush\x18\x04 \x01(\x0b\x32\x1b.aws.kinesis.protobuf.FlushH\x00\x12\x42\n\x11put_record_result\x18\x05 \x01(\x0b\x32%.aws.kinesis.protobuf.PutRecordResultH\x00\x12<\n\rconfiguration\x18\x06 \x01(\x0b\x32#.aws.kinesis.protobuf.ConfigurationH\x00\x12?\n\x0fmetrics_request\x18\x07 \x01(\x0b\x32$.aws.kinesis.protobuf.MetricsRequestH\x00\x12\x41\n\x10metrics_response\x18\x08 \x01(\x0b\x32%.aws.kinesis.protobuf.MetricsResponseH\x00\x12?\n\x0fset_credentials\x18\t \x01(\x0b\x32$.aws.kinesis.protobuf.SetCredentialsH\x00\x42\x10\n\x0e\x61\x63tual_message\"`\n\tPutRecord\x12\x13\n\x0bstream_name\x18\x01 \x02(\t\x12\x15\n\rpartition_key\x18\x02 \x02(\t\x12\x19\n\x11\x65xplicit_hash_key\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x02(\x0c\"\x1c\n\x05\x46lush\x12\x13\n\x0bstream_name\x18\x01 \x01(\t\"f\n\x07\x41ttempt\x12\r\n\x05\x64\x65lay\x18\x01 \x02(\r\x12\x10\n\x08\x64uration\x18\x02 \x02(\r\x12\x0f\n\x07success\x18\x03 \x02(\x08\x12\x12\n\nerror_code\x18\x04 \x01(\t\x12\x15\n\rerror_message\x18\x05 \x01(\t\"~\n\x0fPutRecordResult\x12/\n\x08\x61ttempts\x18\x01 \x03(\x0b\x32\x1d.aws.kinesis.protobuf.Attempt\x12\x0f\n\x07success\x18\x02 \x02(\x08\x12\x10\n\x08shard_id\x18\x03 \x01(\t\x12\x17\n\x0fsequence_number\x18\x04 \x01(\t\">\n\x0b\x43redentials\x12\x0c\n\x04\x61kid\x18\x01 \x02(\t\x12\x12\n\nsecret_key\x18\x02 \x02(\t\x12\r\n\x05token\x18\x03 \x01(\t\"]\n\x0eSetCredentials\x12\x13\n\x0b\x66or_metrics\x18\x01 \x01(\x08\x12\x36\n\x0b\x63redentials\x18\x02 \x02(\x0b\x32!.aws.kinesis.protobuf.Credentials\"\'\n\tDimension\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\t\"K\n\x05Stats\x12\r\n\x05\x63ount\x18\x01 \x02(\x01\x12\x0b\n\x03sum\x18\x02 \x02(\x01\x12\x0c\n\x04mean\x18\x03 \x02(\x01\x12\x0b\n\x03min\x18\x04 \x02(\x01\x12\x0b\n\x03max\x18\x05 \x02(\x01\"\x88\x01\n\x06Metric\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x33\n\ndimensions\x18\x02 \x03(\x0b\x32\x1f.aws.kinesis.protobuf.Dimension\x12*\n\x05stats\x18\x03 \x02(\x0b\x32\x1b.aws.kinesis.protobuf.Stats\x12\x0f\n\x07seconds\x18\x04 \x02(\x04\"/\n\x0eMetricsRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07seconds\x18\x02 \x01(\x04\"@\n\x0fMetricsResponse\x12-\n\x07metrics\x18\x01 \x03(\x0b\x32\x1c.aws.kinesis.protobuf.MetricB)\n\'com.amazonaws.kinesis.producer.protobuf') , dependencies=[config_pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _TAG = _descriptor.Descriptor( name='Tag', full_name='aws.kinesis.protobuf.Tag', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='aws.kinesis.protobuf.Tag.key', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='aws.kinesis.protobuf.Tag.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=54, serialized_end=87, ) _RECORD = _descriptor.Descriptor( name='Record', full_name='aws.kinesis.protobuf.Record', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='partition_key_index', full_name='aws.kinesis.protobuf.Record.partition_key_index', index=0, number=1, type=4, cpp_type=4, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='explicit_hash_key_index', full_name='aws.kinesis.protobuf.Record.explicit_hash_key_index', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='data', full_name='aws.kinesis.protobuf.Record.data', index=2, number=3, type=12, cpp_type=9, label=2, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='tags', full_name='aws.kinesis.protobuf.Record.tags', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=89, serialized_end=214, ) _AGGREGATEDRECORD = _descriptor.Descriptor( name='AggregatedRecord', full_name='aws.kinesis.protobuf.AggregatedRecord', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='partition_key_table', full_name='aws.kinesis.protobuf.AggregatedRecord.partition_key_table', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='explicit_hash_key_table', full_name='aws.kinesis.protobuf.AggregatedRecord.explicit_hash_key_table', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='records', full_name='aws.kinesis.protobuf.AggregatedRecord.records', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=216, serialized_end=343, ) _MESSAGE = _descriptor.Descriptor( name='Message', full_name='aws.kinesis.protobuf.Message', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='aws.kinesis.protobuf.Message.id', index=0, number=1, type=4, cpp_type=4, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='source_id', full_name='aws.kinesis.protobuf.Message.source_id', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='put_record', full_name='aws.kinesis.protobuf.Message.put_record', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='flush', full_name='aws.kinesis.protobuf.Message.flush', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='put_record_result', full_name='aws.kinesis.protobuf.Message.put_record_result', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='configuration', full_name='aws.kinesis.protobuf.Message.configuration', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metrics_request', full_name='aws.kinesis.protobuf.Message.metrics_request', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metrics_response', full_name='aws.kinesis.protobuf.Message.metrics_response', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='set_credentials', full_name='aws.kinesis.protobuf.Message.set_credentials', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='actual_message', full_name='aws.kinesis.protobuf.Message.actual_message', index=0, containing_type=None, fields=[]), ], serialized_start=346, serialized_end=832, ) _PUTRECORD = _descriptor.Descriptor( name='PutRecord', full_name='aws.kinesis.protobuf.PutRecord', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='stream_name', full_name='aws.kinesis.protobuf.PutRecord.stream_name', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='partition_key', full_name='aws.kinesis.protobuf.PutRecord.partition_key', index=1, number=2, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='explicit_hash_key', full_name='aws.kinesis.protobuf.PutRecord.explicit_hash_key', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='data', full_name='aws.kinesis.protobuf.PutRecord.data', index=3, number=4, type=12, cpp_type=9, label=2, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=834, serialized_end=930, ) _FLUSH = _descriptor.Descriptor( name='Flush', full_name='aws.kinesis.protobuf.Flush', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='stream_name', full_name='aws.kinesis.protobuf.Flush.stream_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=932, serialized_end=960, ) _ATTEMPT = _descriptor.Descriptor( name='Attempt', full_name='aws.kinesis.protobuf.Attempt', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='delay', full_name='aws.kinesis.protobuf.Attempt.delay', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='duration', full_name='aws.kinesis.protobuf.Attempt.duration', index=1, number=2, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='success', full_name='aws.kinesis.protobuf.Attempt.success', index=2, number=3, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='error_code', full_name='aws.kinesis.protobuf.Attempt.error_code', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='error_message', full_name='aws.kinesis.protobuf.Attempt.error_message', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=962, serialized_end=1064, ) _PUTRECORDRESULT = _descriptor.Descriptor( name='PutRecordResult', full_name='aws.kinesis.protobuf.PutRecordResult', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='attempts', full_name='aws.kinesis.protobuf.PutRecordResult.attempts', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='success', full_name='aws.kinesis.protobuf.PutRecordResult.success', index=1, number=2, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='shard_id', full_name='aws.kinesis.protobuf.PutRecordResult.shard_id', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='sequence_number', full_name='aws.kinesis.protobuf.PutRecordResult.sequence_number', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1066, serialized_end=1192, ) _CREDENTIALS = _descriptor.Descriptor( name='Credentials', full_name='aws.kinesis.protobuf.Credentials', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='akid', full_name='aws.kinesis.protobuf.Credentials.akid', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='secret_key', full_name='aws.kinesis.protobuf.Credentials.secret_key', index=1, number=2, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='token', full_name='aws.kinesis.protobuf.Credentials.token', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1194, serialized_end=1256, ) _SETCREDENTIALS = _descriptor.Descriptor( name='SetCredentials', full_name='aws.kinesis.protobuf.SetCredentials', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='for_metrics', full_name='aws.kinesis.protobuf.SetCredentials.for_metrics', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='credentials', full_name='aws.kinesis.protobuf.SetCredentials.credentials', index=1, number=2, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1258, serialized_end=1351, ) _DIMENSION = _descriptor.Descriptor( name='Dimension', full_name='aws.kinesis.protobuf.Dimension', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='aws.kinesis.protobuf.Dimension.key', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='aws.kinesis.protobuf.Dimension.value', index=1, number=2, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1353, serialized_end=1392, ) _STATS = _descriptor.Descriptor( name='Stats', full_name='aws.kinesis.protobuf.Stats', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='count', full_name='aws.kinesis.protobuf.Stats.count', index=0, number=1, type=1, cpp_type=5, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='sum', full_name='aws.kinesis.protobuf.Stats.sum', index=1, number=2, type=1, cpp_type=5, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mean', full_name='aws.kinesis.protobuf.Stats.mean', index=2, number=3, type=1, cpp_type=5, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='min', full_name='aws.kinesis.protobuf.Stats.min', index=3, number=4, type=1, cpp_type=5, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='max', full_name='aws.kinesis.protobuf.Stats.max', index=4, number=5, type=1, cpp_type=5, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1394, serialized_end=1469, ) _METRIC = _descriptor.Descriptor( name='Metric', full_name='aws.kinesis.protobuf.Metric', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='aws.kinesis.protobuf.Metric.name', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='dimensions', full_name='aws.kinesis.protobuf.Metric.dimensions', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='stats', full_name='aws.kinesis.protobuf.Metric.stats', index=2, number=3, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='seconds', full_name='aws.kinesis.protobuf.Metric.seconds', index=3, number=4, type=4, cpp_type=4, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1472, serialized_end=1608, ) _METRICSREQUEST = _descriptor.Descriptor( name='MetricsRequest', full_name='aws.kinesis.protobuf.MetricsRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='aws.kinesis.protobuf.MetricsRequest.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='seconds', full_name='aws.kinesis.protobuf.MetricsRequest.seconds', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1610, serialized_end=1657, ) _METRICSRESPONSE = _descriptor.Descriptor( name='MetricsResponse', full_name='aws.kinesis.protobuf.MetricsResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='metrics', full_name='aws.kinesis.protobuf.MetricsResponse.metrics', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=1659, serialized_end=1723, ) _RECORD.fields_by_name['tags'].message_type = _TAG _AGGREGATEDRECORD.fields_by_name['records'].message_type = _RECORD _MESSAGE.fields_by_name['put_record'].message_type = _PUTRECORD _MESSAGE.fields_by_name['flush'].message_type = _FLUSH _MESSAGE.fields_by_name['put_record_result'].message_type = _PUTRECORDRESULT _MESSAGE.fields_by_name['configuration'].message_type = config_pb2._CONFIGURATION _MESSAGE.fields_by_name['metrics_request'].message_type = _METRICSREQUEST _MESSAGE.fields_by_name['metrics_response'].message_type = _METRICSRESPONSE _MESSAGE.fields_by_name['set_credentials'].message_type = _SETCREDENTIALS _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['put_record']) _MESSAGE.fields_by_name['put_record'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['flush']) _MESSAGE.fields_by_name['flush'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['put_record_result']) _MESSAGE.fields_by_name['put_record_result'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['configuration']) _MESSAGE.fields_by_name['configuration'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['metrics_request']) _MESSAGE.fields_by_name['metrics_request'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['metrics_response']) _MESSAGE.fields_by_name['metrics_response'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _MESSAGE.oneofs_by_name['actual_message'].fields.append( _MESSAGE.fields_by_name['set_credentials']) _MESSAGE.fields_by_name['set_credentials'].containing_oneof = _MESSAGE.oneofs_by_name['actual_message'] _PUTRECORDRESULT.fields_by_name['attempts'].message_type = _ATTEMPT _SETCREDENTIALS.fields_by_name['credentials'].message_type = _CREDENTIALS _METRIC.fields_by_name['dimensions'].message_type = _DIMENSION _METRIC.fields_by_name['stats'].message_type = _STATS _METRICSRESPONSE.fields_by_name['metrics'].message_type = _METRIC DESCRIPTOR.message_types_by_name['Tag'] = _TAG DESCRIPTOR.message_types_by_name['Record'] = _RECORD DESCRIPTOR.message_types_by_name['AggregatedRecord'] = _AGGREGATEDRECORD DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE DESCRIPTOR.message_types_by_name['PutRecord'] = _PUTRECORD DESCRIPTOR.message_types_by_name['Flush'] = _FLUSH DESCRIPTOR.message_types_by_name['Attempt'] = _ATTEMPT DESCRIPTOR.message_types_by_name['PutRecordResult'] = _PUTRECORDRESULT DESCRIPTOR.message_types_by_name['Credentials'] = _CREDENTIALS DESCRIPTOR.message_types_by_name['SetCredentials'] = _SETCREDENTIALS DESCRIPTOR.message_types_by_name['Dimension'] = _DIMENSION DESCRIPTOR.message_types_by_name['Stats'] = _STATS DESCRIPTOR.message_types_by_name['Metric'] = _METRIC DESCRIPTOR.message_types_by_name['MetricsRequest'] = _METRICSREQUEST DESCRIPTOR.message_types_by_name['MetricsResponse'] = _METRICSRESPONSE Tag = _reflection.GeneratedProtocolMessageType('Tag', (_message.Message,), dict( DESCRIPTOR = _TAG, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Tag) )) _sym_db.RegisterMessage(Tag) Record = _reflection.GeneratedProtocolMessageType('Record', (_message.Message,), dict( DESCRIPTOR = _RECORD, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Record) )) _sym_db.RegisterMessage(Record) AggregatedRecord = _reflection.GeneratedProtocolMessageType('AggregatedRecord', (_message.Message,), dict( DESCRIPTOR = _AGGREGATEDRECORD, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.AggregatedRecord) )) _sym_db.RegisterMessage(AggregatedRecord) Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict( DESCRIPTOR = _MESSAGE, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Message) )) _sym_db.RegisterMessage(Message) PutRecord = _reflection.GeneratedProtocolMessageType('PutRecord', (_message.Message,), dict( DESCRIPTOR = _PUTRECORD, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.PutRecord) )) _sym_db.RegisterMessage(PutRecord) Flush = _reflection.GeneratedProtocolMessageType('Flush', (_message.Message,), dict( DESCRIPTOR = _FLUSH, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Flush) )) _sym_db.RegisterMessage(Flush) Attempt = _reflection.GeneratedProtocolMessageType('Attempt', (_message.Message,), dict( DESCRIPTOR = _ATTEMPT, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Attempt) )) _sym_db.RegisterMessage(Attempt) PutRecordResult = _reflection.GeneratedProtocolMessageType('PutRecordResult', (_message.Message,), dict( DESCRIPTOR = _PUTRECORDRESULT, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.PutRecordResult) )) _sym_db.RegisterMessage(PutRecordResult) Credentials = _reflection.GeneratedProtocolMessageType('Credentials', (_message.Message,), dict( DESCRIPTOR = _CREDENTIALS, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Credentials) )) _sym_db.RegisterMessage(Credentials) SetCredentials = _reflection.GeneratedProtocolMessageType('SetCredentials', (_message.Message,), dict( DESCRIPTOR = _SETCREDENTIALS, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.SetCredentials) )) _sym_db.RegisterMessage(SetCredentials) Dimension = _reflection.GeneratedProtocolMessageType('Dimension', (_message.Message,), dict( DESCRIPTOR = _DIMENSION, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Dimension) )) _sym_db.RegisterMessage(Dimension) Stats = _reflection.GeneratedProtocolMessageType('Stats', (_message.Message,), dict( DESCRIPTOR = _STATS, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Stats) )) _sym_db.RegisterMessage(Stats) Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict( DESCRIPTOR = _METRIC, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.Metric) )) _sym_db.RegisterMessage(Metric) MetricsRequest = _reflection.GeneratedProtocolMessageType('MetricsRequest', (_message.Message,), dict( DESCRIPTOR = _METRICSREQUEST, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.MetricsRequest) )) _sym_db.RegisterMessage(MetricsRequest) MetricsResponse = _reflection.GeneratedProtocolMessageType('MetricsResponse', (_message.Message,), dict( DESCRIPTOR = _METRICSRESPONSE, __module__ = 'messages_pb2' # @@protoc_insertion_point(class_scope:aws.kinesis.protobuf.MetricsResponse) )) _sym_db.RegisterMessage(MetricsResponse) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\'com.amazonaws.kinesis.producer.protobuf')) # @@protoc_insertion_point(module_scope)
[ "aferm@petropower.com" ]
aferm@petropower.com
da25cb5b8c5a7e297cbea47ff6715d495fb036a3
b27308e062c956dae4e9bd6902e1a46a3ee08f2f
/Django/d01/ex03/capital_city.py
6413b8da94500f305cd2be657fb07735be98dbbd
[]
no_license
vlapparov/Ecole42
d25a9af2d68cedbf349597a051bcdfca149b60f0
0760df71e52deee2f1b3c9cb887fc8286f7549a8
refs/heads/master
2020-03-30T12:54:09.289574
2018-10-14T23:47:01
2018-10-14T23:47:01
151,247,020
0
0
null
null
null
null
UTF-8
Python
false
false
578
py
# coding: utf-8 # In[18]: from sys import argv def my_var(): states = { "Oregon" : "OR", "Alabama" : "AL", "New Jersey": "NJ", "Colorado" : "CO" } capital_cities = { "OR": "Salem", "AL": "Montgomery", "NJ": "Trenton", "CO": "Denver" } if(len(argv) != 2): return else: if(argv[1] in states): if(states[argv[1]] in capital_cities): print(capital_cities[states[argv[1]]]) else: print('Unknown state.') if __name__ == '__main__': my_var()
[ "vlapparov@Valentins-MacBook-Pro-2.local" ]
vlapparov@Valentins-MacBook-Pro-2.local
8a7547871ef8dbddb0987ec8942ad3ec3d0e0fe2
f5ef9235079706f801de198c70585e68d338beff
/five_votes/example_new.py
1c7ce5d995982aa01f4f7e3aba0cd87b56f9ad26
[]
no_license
garciaclaudio/askthecrowd
1f025f8f5a58ae84d5723bd913b308a3e37a6dd9
7925075d9ccd70707460e451485292618a148f09
refs/heads/master
2021-01-13T01:55:59.328564
2015-12-15T20:34:33
2015-12-15T20:34:33
537,308
0
0
null
null
null
null
UTF-8
Python
false
false
5,694
py
#!/usr/bin/env python # # Copyright 2010 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A barebones AppEngine application that uses Facebook for login. 1. Make sure you add a copy of facebook.py (from python-sdk/src/) into this directory so it can be imported. 2. Don't forget to tick Login With Facebook on your facebook app's dashboard and place the app's url wherever it is hosted 3. Place a random, unguessable string as a session secret below in config dict. 4. Fill app id and app secret. 5. Change the application name in app.yaml. """ FACEBOOK_APP_ID = '331909936825023' FACEBOOK_APP_SECRET = '9f17f1ecae197ca6bf22d809442be538' import facebook import webapp2 import os import jinja2 import urllib2 import os, sys import pprint from google.appengine.ext import db from webapp2_extras import sessions config = {} config['webapp2_extras.sessions'] = dict(secret_key='fbar1sas26786345barfoobfdsazbar67asdasd32') class User(db.Model): id = db.StringProperty(required=True) created = db.DateTimeProperty(auto_now_add=True) updated = db.DateTimeProperty(auto_now=True) name = db.StringProperty(required=True) profile_url = db.StringProperty(required=True) access_token = db.StringProperty(required=True) class BaseHandler(webapp2.RequestHandler): """Provides access to the active Facebook user in self.current_user The property is lazy-loaded on first access, using the cookie saved by the Facebook JavaScript SDK to determine the user ID of the active user. See http://developers.facebook.com/docs/authentication/ for more information. """ @property def current_user(self): if self.session.get("user"): # User is logged in return self.session.get("user") else: # Either used just logged in or just saw the first page # We'll see here cookie = facebook.get_user_from_cookie(self.request.cookies, FACEBOOK_APP_ID, FACEBOOK_APP_SECRET) if cookie: # Okay so user logged in. # Now, check to see if existing user user = User.get_by_key_name(cookie["uid"]) if not user: # Not an existing user so get user info graph = facebook.GraphAPI(cookie["access_token"]) profile = graph.get_object("me") user = User( key_name=str(profile["id"]), id=str(profile["id"]), name=profile["name"], profile_url=profile["link"], access_token=cookie["access_token"] ) user.put() elif user.access_token != cookie["access_token"]: user.access_token = cookie["access_token"] user.put() # User is now logged in self.session["user"] = dict( name=user.name, profile_url=user.profile_url, id=user.id, access_token=user.access_token ) return self.session.get("user") return None def dispatch(self): """ This snippet of code is taken from the webapp2 framework documentation. See more at http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html """ self.session_store = sessions.get_store(request=self.request) try: webapp2.RequestHandler.dispatch(self) finally: self.session_store.save_sessions(self.response) @webapp2.cached_property def session(self): """ This snippet of code is taken from the webapp2 framework documentation. See more at http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html """ return self.session_store.get_session() class HomeHandler(BaseHandler): def get(self): print >> sys.stderr, '=============== HERE I AM ==============' template = jinja_environment.get_template('example_new.html') self.response.out.write(template.render(dict( facebook_app_id=FACEBOOK_APP_ID, current_user=self.current_user ))) def post(self): url = self.request.get('url') file = urllib2.urlopen(url) graph = facebook.GraphAPI(self.current_user['access_token']) response = graph.put_photo(file, "Test Image") photo_url = ("http://www.facebook.com/" "photo.php?fbid={0}".format(response['id'])) self.redirect(str(photo_url)) class LogoutHandler(BaseHandler): def get(self): if self.current_user is not None: self.session['user'] = None self.redirect('/') jinja_environment = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)) ) app = webapp2.WSGIApplication( [('/', HomeHandler), ('/logout', LogoutHandler)], debug=True, config=config )
[ "claudio.garcia@booking.com" ]
claudio.garcia@booking.com
a51d66646ddf79561b2c16509843ee824196ca37
f3e7e79c9e71c2e6b713b1bc91feba795dba44d0
/main.py
6a00c94f66918df25f4a4ab106f68c894222834b
[ "MIT" ]
permissive
veeral-agarwal/Genetic-Algorithm
720c12f755bceb653a6a309295befaeb82ac0244
471ecac287d4e6a028e84887dab328ca950ffc5f
refs/heads/main
2023-05-14T16:23:27.099235
2021-06-03T22:48:15
2021-06-03T22:48:15
340,640,810
0
0
null
2021-05-20T21:19:28
2021-02-20T11:43:19
Jupyter Notebook
UTF-8
Python
false
false
9,449
py
import numpy as np from client import * import random import json #13508412130.218098 368125762698.6422 #13560827319.190525 370434930576.4746 #13532626957.355581 369745382579.87744 #13510723304.19212 368296592820.6967 this # f=open("overfit.txt","r") # data=f.read() # data=data.rstrip() # data=data.strip('][').split(', ') # f.close() # for i in range(len(data)): # data[i]=float(data[i]) with open ("TeamName1.json","r") as file: vectors = json.load(file) # data = [[]] # for i in range(len(vectors)): # for j in range(len(vectors[i])): # data[i][j]=float(vectors[i][j]) data = vectors #print(list(data)) pop_size=10 chromosome_size=11 train_factor = 0.5 def mod(val): if val<0: return -1*val return val #change few genes of chromosome #change few genes of chromosome def mutate(chromosome:np.ndarray): mutation_probability = 0.2 for i in range(chromosome_size): l = abs(chromosome[i])/5000 r = -1*l temp = random.uniform(r,l) k = random.uniform(0,1) if k <= mutation_probability: chromosome[i]+=temp if chromosome[i]>10: chromosome[i]=10 elif chromosome[i]<-10: chromosome[i]=-10 #population generation def get_fitness(arr, ind): #getting errors on the data fitness=[0 for i in range(chromosome_size)] j=0 for chromoso in arr: trainerr,validerr=get_errors(key,list(chromoso)) print(trainerr,validerr) fitness[j]=1/(train_factor*trainerr+validerr) j+=1 if ind==1: for m in range(chromosome_size): fitness1[m]=fitness[m] #print(fitness) else: for m in range(chromosome_size): fitness2[m]=fitness[m] #calculate probabilities sum_fit=np.sum(fitness) generation_file.write("respective probabilities :") for k in range(pop_size): probability[k]=fitness[k]/sum_fit generation_file.write(str(probability[k])+"\n") def selection(arr:np.ndarray): for i in range(pop_size): parent1ind=np.random.choice(pop_size,p=probability) parent1=arr[parent1ind] parent2ind=np.random.choice(pop_size,p=probability) parent2=arr[parent2ind] #printing parents after selection : print("printing parents after selection :") print("parent1:") print(parent1) print("parent2:") print(parent2) # appending parents after selection in generation file generation_file.write("parents after selection:\n") generation_file.write("parent1 and its probability : ") generation_file.write(str(parent1)+str(probability[parent1ind])+"\n") generation_file.write("parent2 and its probability: ") generation_file.write(str(parent2)+str(probability[parent2ind])+"\n") new_pop[i]=crossover(parent1,parent2) def crossover(parent1,parent2): mid=random.randint(1,chromosome_size-1) child=np.ones(chromosome_size) for i in range(0,mid+1): child[i]=parent1[i] for j in range(mid,chromosome_size): child[j]=parent2[j] print(child) return child # temp_arr = np.zeros((pop_size,11)) temp_arr = data print(temp_arr) generation_file = open("15_march_generationsat1.txt","a") generations=1 total_generations=2 new_init_pop=np.zeros((pop_size,11)) while(generations!=total_generations): #at last we can put newpop to init pop and start algo again init_pop=np.zeros((pop_size,11)) new_pop=np.zeros((pop_size,11)) fitness1=[0 for i in range(chromosome_size)] fitness2=[0 for i in range(chromosome_size)] probability=[0 for j in range(pop_size)] #copy the original vector to all the population and change few values in the population so that it generates varied initial population,ie we can simply mutate for i in range(pop_size): if generations==0: for j in range (chromosome_size): probab=11 tempp = random.randint(1,10) if tempp < probab: rng = np.random.uniform(low = 0.3, high = 0.90) init_pop[i][j] = rng* temp_arr[i][j] else: for j in range (chromosome_size): init_pop[i][j]=temp_arr[i][j] mutate(init_pop[i]) #initial population printing print("initial population :") for lol in init_pop: print(lol) #appending initial populaton in generations file generation_file.write("\n \n \n \ngeneration 1 \n\n") #put here generation number generation_file.write("initial population:\n") for lol in init_pop: generation_file.write(str(lol)+"\n") generation_file.write("\n") # generations+=1 get_fitness(init_pop,1) selection(init_pop) # printing population after crossover print("after crossover:") for lol in new_pop: print(lol) # appending population after crossover in generation file generation_file.write("population after crossover:\n") for lol in new_pop: generation_file.write(str(lol)+"\n") generation_file.write("\n") # mutation for i in range(pop_size): mutate(new_pop[i]) #printing population after mutation print("population after mutation :") for lol in new_pop: print(lol) # appending population after mutation in generation file generation_file.write("population after mutation:\n") for lol in new_pop: generation_file.write(str(lol)+"\n") generation_file.write("\n") #print(new_pop) get_fitness(new_pop,2) #print(fitness1,"initialfitness") #print(fitness2,"childrenfitness") #we need to replace children which are more fit than parents #sort population according to fitness, descending #new_init_pop=np.zeros() finaltup=[] for i in range(pop_size): finaltup.append((fitness1[i],init_pop[i])) for i in range(pop_size): finaltup.append((fitness2[i],new_pop[i])) print("final touple:") print(finaltup) # <<<<<<< HEAD finaltup.sort(reverse=True , key=lambda x:x[0]) # ======= finaltup.sort(reverse=True, key=lambda x:x[0]) #made change here # >>>>>>> 79663dab7f777da52388a064f057b1a35a525a35 print("final sorted touple") print(finaltup) print("FFS , MIXED FITNESS FUNCTIONS IN ORDER") for i in range(pop_size): new_init_pop[i]=finaltup[i][1] print(finaltup[i][0]) ret=submit(key,list(new_init_pop[0])) print(ret) #printing the vector we are submitting print("the vector we are submitting",end=" ") print(new_init_pop[0]) tr ,va = get_errors(key,list(new_init_pop[0])) update = [] for i in range(len(new_init_pop)): update.append(list(new_init_pop[i])) loll = open("vectors15at1.txt","a") loll.write(str(new_init_pop[0] )) loll.close() loll = open("train15at1.txt","a") loll.write(str(tr )+"\n") loll.close() loll = open("validations15at1.txt","a") loll.write(str(va )+"\n") loll.close() generations+=1 with open('TeamName1.json','w') as outfile: json.dump(update,outfile) ''' [ 0.00000000e+00 -1.45791987e-12 -2.28980078e-13 4.62026183e-11 -1.75232807e-10 -1.83669770e-15 8.52944060e-16 2.29423303e-05 -2.04721003e-06 -1.59784330e-08 9.98380485e-10] [ 0.00000000e+00 -1.45791987e-12 -2.28980078e-13 4.62165370e-11 -1.75214813e-10 -1.83669770e-15 8.52944060e-16 2.29423303e-05 -2.04726501e-06 -1.59792834e-08 9.98289696e-10] [ 0.00000000e+00 -1.45799022e-12 -2.28980078e-13 4.62094809e-11 -1.75240463e-10 -1.83689245e-15 8.52944060e-16 2.29423303e-05 -2.04726501e-06 -1.59792834e-08 9.98172827e-10] [ 0.00000000e+00 -1.45799022e-12 -2.28980078e-13 4.62010753e-11 -1.75214358e-10 -1.83695504e-15 8.52944060e-16 2.29423303e-05 -2.04713431e-06 -1.59792834e-08 9.98214034e-10] [ 0.00000000e+00 -1.45799022e-12 -2.28980078e-13 4.62010753e-11 -1.75214813e-10 -1.83705704e-15 8.52993038e-16 2.29424118e-05 -2.04717969e-06 -1.59818356e-08 9.98214034e-10] [ 0.00000000e+00 -1.45823003e-12 -2.28980078e-13 4.62010753e-11 -1.75214813e-10 -1.83669770e-15 8.52944060e-16 2.29474725e-05 -2.04732743e-06 -1.59792834e-08 9.98214034e-10] [ 0.00000000e+00 -1.45791987e-12 -2.28954113e-13 4.62090729e-11 -1.75214813e-10 -1.83669770e-15 8.52944060e-16 2.29388125e-05 -2.04740215e-06 -1.59784330e-08 9.98386710e-10] [ 0.00000000e+00 -1.45802904e-12 -2.28980078e-13 4.62094809e-11 -1.75240463e-10 -1.83689245e-15 8.52944060e-16 2.29467194e-05 -2.04686387e-06 -1.59777469e-08 9.98214034e-10] [ 0.00000000e+00 -1.45810044e-12 -2.28980250e-13 4.62010753e-11 -1.75232807e-10 -1.83669770e-15 8.52944060e-16 2.29423303e-05 -2.04748606e-06 -1.59784330e-08 9.98153221e-10] [ 0.00000000e+00 -1.45792140e-12 -2.28954583e-13 4.62094809e-11 -1.75255822e-10 -1.83669770e-15 8.52944060e-16 2.29423303e-05 -2.04740927e-06 -1.59789814e-08 9.98501919e-10] ''' ''' [ 0.00000000e+00 -1.45791987e-12 -2.28980078e-13 4.62026183e-11 -1.75232807e-10 -1.83669770e-15 8.52944060e-16 2.29423303e-05 -2.04721003e-06 -1.59784330e-08 9.98380485e-10] the vector we are submitting [ 0.00000000e+00 -4.68244455e-13 -1.23800807e-13 4.62010753e-11 -1.08897836e-10 -1.83645233e-15 2.85985980e-16 2.29457065e-05 -2.04721003e-06 -5.09817427e-09 6.19031957e-10] '''
[ "veeral.agarwal@research.iiit.ac.in" ]
veeral.agarwal@research.iiit.ac.in
363bdeff54855b7013b24e42476b351ed21eff08
34ef54c04b369a6161c6f8a649868a47122a2d89
/.venv/Lib/site-packages/Bio/PDB/parse_pdb_header.py
10c9f1a5d1e7f5373d3275dedda3c4d241e052ba
[ "MIT" ]
permissive
abner-lucas/tp-cruzi-db
f70ad269c50a2db24debd1455daeddaa2ebd3923
595c5c46794ae08a1f19716636eac7430cededa1
refs/heads/bioinformatica
2023-05-18T23:23:23.458394
2021-06-14T02:13:17
2021-06-14T02:13:17
351,864,250
2
2
MIT
2021-06-13T19:52:18
2021-03-26T17:40:20
Python
UTF-8
Python
false
false
11,664
py
#!/usr/bin/env python # Copyright 2004 Kristian Rother. # Revisions copyright 2004 Thomas Hamelryck. # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """Parse header of PDB files into a python dictionary. Emerged from the Columba database project www.columba-db.de, original author Kristian Rother. """ import re from Bio import File def _get_journal(inl): # JRNL AUTH L.CHEN,M.DOI,F.S.MATHEWS,A.Y.CHISTOSERDOV, 2BBK 7 journal = "" for l in inl: if re.search(r"\AJRNL", l): journal += l[19:72].lower() journal = re.sub(r"\s\s+", " ", journal) return journal def _get_references(inl): # REMARK 1 REFERENCE 1 1CSE 11 # REMARK 1 AUTH W.BODE,E.PAPAMOKOS,D.MUSIL 1CSE 12 references = [] actref = "" for l in inl: if re.search(r"\AREMARK 1", l): if re.search(r"\AREMARK 1 REFERENCE", l): if actref != "": actref = re.sub(r"\s\s+", " ", actref) if actref != " ": references.append(actref) actref = "" else: actref += l[19:72].lower() if actref != "": actref = re.sub(r"\s\s+", " ", actref) if actref != " ": references.append(actref) return references # bring dates to format: 1909-01-08 def _format_date(pdb_date): """Convert dates from DD-Mon-YY to YYYY-MM-DD format (PRIVATE).""" date = "" year = int(pdb_date[7:]) if year < 50: century = 2000 else: century = 1900 date = str(century + year) + "-" all_months = [ "xxx", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] month = str(all_months.index(pdb_date[3:6])) if len(month) == 1: month = "0" + month date = date + month + "-" + pdb_date[:2] return date def _chop_end_codes(line): """Chops lines ending with ' 1CSA 14' and the like (PRIVATE).""" return re.sub(r"\s\s\s\s+[\w]{4}.\s+\d*\Z", "", line) def _chop_end_misc(line): """Chops lines ending with ' 14-JUL-97 1CSA' and the like (PRIVATE).""" return re.sub(r"\s+\d\d-\w\w\w-\d\d\s+[1-9][0-9A-Z]{3}\s*\Z", "", line) def _nice_case(line): """Make A Lowercase String With Capitals (PRIVATE).""" line_lower = line.lower() s = "" i = 0 nextCap = 1 while i < len(line_lower): c = line_lower[i] if c >= "a" and c <= "z" and nextCap: c = c.upper() nextCap = 0 elif c in " .,;:\t-_": nextCap = 1 s += c i += 1 return s def parse_pdb_header(infile): """Return the header lines of a pdb file as a dictionary. Dictionary keys are: head, deposition_date, release_date, structure_method, resolution, structure_reference, journal_reference, author and compound. """ header = [] with File.as_handle(infile) as f: for l in f: record_type = l[0:6] if record_type in ("ATOM ", "HETATM", "MODEL "): break else: header.append(l) return _parse_pdb_header_list(header) def _parse_remark_465(line): """Parse missing residue remarks. Returns a dictionary describing the missing residue. The specification for REMARK 465 at http://www.wwpdb.org/documentation/file-format-content/format33/remarks2.html#REMARK%20465 only gives templates, but does not say they have to be followed. So we assume that not all pdb-files with a REMARK 465 can be understood. Returns a dictionary with the following keys: "model", "res_name", "chain", "ssseq", "insertion" """ if line: # Note that line has been stripped. assert line[0] != " " and line[-1] not in "\n ", "line has to be stripped" pattern = re.compile( r""" (\d+\s[\sA-Z][\sA-Z][A-Z] | # Either model number + residue name [A-Z]{1,3}) # Or only residue name with 1 (RNA) to 3 letters \s ([A-Za-z0-9]) # A single character chain \s+(\d+[A-Za-z]?)$ # Residue number: A digit followed by an optional # insertion code (Hetero-flags make no sense in # context with missing res) """, re.VERBOSE, ) match = pattern.match(line) if match is None: return None residue = {} if " " in match.group(1): model, residue["res_name"] = match.group(1).split() residue["model"] = int(model) else: residue["model"] = None residue["res_name"] = match.group(1) residue["chain"] = match.group(2) try: residue["ssseq"] = int(match.group(3)) except ValueError: residue["insertion"] = match.group(3)[-1] residue["ssseq"] = int(match.group(3)[:-1]) else: residue["insertion"] = None return residue def _parse_pdb_header_list(header): # database fields pdbh_dict = { "name": "", "head": "", "idcode": "", "deposition_date": "1909-01-08", "release_date": "1909-01-08", "structure_method": "unknown", "resolution": None, "structure_reference": "unknown", "journal_reference": "unknown", "author": "", "compound": {"1": {"misc": ""}}, "source": {"1": {"misc": ""}}, "has_missing_residues": False, "missing_residues": [], } pdbh_dict["structure_reference"] = _get_references(header) pdbh_dict["journal_reference"] = _get_journal(header) comp_molid = "1" last_comp_key = "misc" last_src_key = "misc" for hh in header: h = re.sub(r"[\s\n\r]*\Z", "", hh) # chop linebreaks off # key=re.sub("\s.+\s*","",h) key = h[:6].strip() # tail=re.sub("\A\w+\s+\d*\s*","",h) tail = h[10:].strip() # print("%s:%s" % (key, tail) # From here, all the keys from the header are being parsed if key == "TITLE": name = _chop_end_codes(tail).lower() pdbh_dict["name"] = " ".join([pdbh_dict["name"], name]).strip() elif key == "HEADER": rr = re.search(r"\d\d-\w\w\w-\d\d", tail) if rr is not None: pdbh_dict["deposition_date"] = _format_date(_nice_case(rr.group())) rr = re.search(r"\s+([1-9][0-9A-Z]{3})\s*\Z", tail) if rr is not None: pdbh_dict["idcode"] = rr.group(1) head = _chop_end_misc(tail).lower() pdbh_dict["head"] = head elif key == "COMPND": tt = re.sub(r"\;\s*\Z", "", _chop_end_codes(tail)).lower() # look for E.C. numbers in COMPND lines rec = re.search(r"\d+\.\d+\.\d+\.\d+", tt) if rec: pdbh_dict["compound"][comp_molid]["ec_number"] = rec.group() tt = re.sub(r"\((e\.c\.)*\d+\.\d+\.\d+\.\d+\)", "", tt) tok = tt.split(":") if len(tok) >= 2: ckey = tok[0] cval = re.sub(r"\A\s*", "", tok[1]) if ckey == "mol_id": pdbh_dict["compound"][cval] = {"misc": ""} comp_molid = cval last_comp_key = "misc" else: pdbh_dict["compound"][comp_molid][ckey] = cval last_comp_key = ckey else: pdbh_dict["compound"][comp_molid][last_comp_key] += tok[0] + " " elif key == "SOURCE": tt = re.sub(r"\;\s*\Z", "", _chop_end_codes(tail)).lower() tok = tt.split(":") # print(tok) if len(tok) >= 2: ckey = tok[0] cval = re.sub(r"\A\s*", "", tok[1]) if ckey == "mol_id": pdbh_dict["source"][cval] = {"misc": ""} comp_molid = cval last_src_key = "misc" else: pdbh_dict["source"][comp_molid][ckey] = cval last_src_key = ckey else: pdbh_dict["source"][comp_molid][last_src_key] += tok[0] + " " elif key == "KEYWDS": kwd = _chop_end_codes(tail).lower() if "keywords" in pdbh_dict: pdbh_dict["keywords"] += " " + kwd else: pdbh_dict["keywords"] = kwd elif key == "EXPDTA": expd = _chop_end_codes(tail) # chop junk at end of lines for some structures expd = re.sub(r"\s\s\s\s\s\s\s.*\Z", "", expd) # if re.search('\Anmr',expd,re.IGNORECASE): expd='nmr' # if re.search('x-ray diffraction',expd,re.IGNORECASE): expd='x-ray diffraction' pdbh_dict["structure_method"] = expd.lower() elif key == "CAVEAT": # make Annotation entries out of these!!! pass elif key == "REVDAT": rr = re.search(r"\d\d-\w\w\w-\d\d", tail) if rr is not None: pdbh_dict["release_date"] = _format_date(_nice_case(rr.group())) elif key == "JRNL": # print("%s:%s" % (key, tail)) if "journal" in pdbh_dict: pdbh_dict["journal"] += tail else: pdbh_dict["journal"] = tail elif key == "AUTHOR": auth = _nice_case(_chop_end_codes(tail)) if "author" in pdbh_dict: pdbh_dict["author"] += auth else: pdbh_dict["author"] = auth elif key == "REMARK": if re.search("REMARK 2 RESOLUTION.", hh): r = _chop_end_codes(re.sub("REMARK 2 RESOLUTION.", "", hh)) r = re.sub(r"\s+ANGSTROM.*", "", r) try: pdbh_dict["resolution"] = float(r) except ValueError: # print('nonstandard resolution %r' % r) pdbh_dict["resolution"] = None elif hh.startswith("REMARK 465"): if tail: pdbh_dict["has_missing_residues"] = True missing_res_info = _parse_remark_465(tail) if missing_res_info: pdbh_dict["missing_residues"].append(missing_res_info) elif hh.startswith("REMARK 99 ASTRAL"): if tail: remark_99_keyval = tail.replace("ASTRAL ", "").split(": ") if type(remark_99_keyval) == list and len(remark_99_keyval) == 2: if "astral" not in pdbh_dict: pdbh_dict["astral"] = { remark_99_keyval[0]: remark_99_keyval[1] } else: pdbh_dict["astral"][remark_99_keyval[0]] = remark_99_keyval[ 1 ] else: # print(key) pass if pdbh_dict["structure_method"] == "unknown": res = pdbh_dict["resolution"] if res is not None and res > 0.0: pdbh_dict["structure_method"] = "x-ray diffraction" return pdbh_dict
[ "abnerlucas.cad@gmail.com" ]
abnerlucas.cad@gmail.com
c2437a5446c8e2344ca329a820696b24318be6c8
766ed25dc718ba4b9358a13dc5fbff0f846204f8
/iot_temp_mod/iotSupport/getConfig.py
a7d67c20358b076907fcf334ed190f128cb951bd
[]
no_license
MichaelOsowski/iot_temp_mod
01a734f066110c7f45702c612cd684472576a335
1d45e7cb5ca788c4d3332fe5c4ecc5a3d209e87f
refs/heads/master
2021-04-15T13:53:14.758721
2019-07-02T17:37:05
2019-07-02T17:37:05
126,368,053
0
0
null
null
null
null
UTF-8
Python
false
false
443
py
''' Created on Jan 26, 2016 @author: Michael Osowski ''' import ConfigParser def parseConfig(InFile,configGroup): confDict = {} config= ConfigParser.ConfigParser() config.read(InFile) options = config.options(configGroup) for option in options: try: confDict[option] = config.get(configGroup,option) except: print("Missing Config values") return confDict
[ "mosowski@newrelic.com" ]
mosowski@newrelic.com
58794f7b06991f84fe20e97670d5b070e724b7b4
8b37e49a5d061a92816c0e1aaab2bdaa84132ab7
/Interactive_Visualizations_&_Dashboards/Plotly&Flask/PetPals_LT/PetPals/app.py
d4764a9ab94569f1895731533962e6e998063844
[]
no_license
KenT370/Learned_Tech
d4b05e3e26d95e71407ac8b52be5943cf133b774
2b8fb15f3f59840c36220175b536fcb45f571f7d
refs/heads/master
2022-12-10T07:05:48.801794
2019-06-10T16:35:24
2019-06-10T16:35:24
171,935,753
0
0
null
2022-12-08T05:14:19
2019-02-21T19:54:43
Jupyter Notebook
UTF-8
Python
false
false
1,614
py
from flask import Flask, render_template,jsonify, request, redirect from flask_sqlalchemy import SQLAlchemy import os #from flask.ext.heroku import heroku app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://zphvqfcqbejonu:86c66f04c54f05d6fc6ad5adc2683dcb2e3d9bfbbc4c01c42d0005c70ee66807@ec2-54-83-201-84.compute-1.amazonaws.com:5432/d8hgnkhff30apd' db = SQLAlchemy(app) from .models import Pet @app.route('/') def home(): return render_template('index.html') @app.route('/send',methods=['GET','POST']) def send(): if request.method == 'POST': name = request.form['petname'] lat = request.form['Latitude'] lon = request.form['Longitude'] pet = Pet(name=name,lat=lat,lon=lon) db.session.add(pet) db.session.commit() return redirect('/',code=302) return render_template('form.html') @app.route('/api/pals') def data(): results = db.session.query(Pet).all() name = [var.name for var in results] lat = [var.lat for var in results] lon = [var.lon for var in results] dic = { 'type':'scattergeo', 'mode':'markers+text', 'text':name, 'lon':lon, 'lat':lat, 'marker':{'size':10}, 'name': 'Pet Locations', 'textposition': 'top center' } return jsonify(dic) if __name__ == '__main__': app.run() # class Pet(db.Model): # __tablename__ = 'pets' # id = db.Column(db.*) # lat = db.Column(db.Float) # lon = db.Column(db.Float) # def __repr__(self): # return '<Pet %r>' % (self.name)
[ "kentorres822@gmail.com" ]
kentorres822@gmail.com
83828df8c4b7e817787e4b613cd850d74a92d000
445b219933c75009a6c0c813b204df0c720cc4a2
/demo/multi_classification.py
45a674c20e425a7bcfdb8a91f7f0a91a478820aa
[]
no_license
RyotaFuwa/NeuralNetworkFramework
a2fc1e0312f7b3a3db8222dcd085a23ad073709b
9558c195ab796df817cb7d83b9fbb03257eb6b69
refs/heads/master
2022-12-15T09:37:59.685252
2020-09-10T12:36:36
2020-09-10T14:21:28
265,612,918
0
0
null
null
null
null
UTF-8
Python
false
false
547
py
import numpy as np import pandas as pd import matplotlib.pyplot as plt from misc.utils import split_data def multi_classification(csv_file_path): """assuming the csv file has columns: x, y, class""" df = pd.read_csv(csv_file_path) X = df[['x', 'y']].to_numpy() Y = df['class'].to_numpy().reshape((-1, 1)) plt.scatter(X[:, 0], X[:, 1], c=Y, s=100, marker='o') plt.show() (train_x, train_y), (test_x, test_y) = split_data(X, Y, ratio=0.75, random=True) multi_classification('../data/examples/multi_classification/test_data.csv')
[ "ryota.fuwa.1428@gmail.com" ]
ryota.fuwa.1428@gmail.com
50574fb0976254f71acedb7d478c899596de33ee
957f28e9296c9e9bb471b0bf178ca8a53aa21b25
/AI - Module/api/retrievers/__init__.py
a430aa52e6d175529f0650aee410ecdad38f5885
[]
no_license
FelipeCardoso89/HackathonOriginal
e222f37b5ae9d647d28acfda08877740fca206e2
42d7915953920cd34d5bac399fdc63e92790f699
refs/heads/master
2021-08-23T05:05:54.076852
2017-12-03T14:22:16
2017-12-03T14:22:16
112,932,437
0
0
null
null
null
null
UTF-8
Python
false
false
95
py
from .userInteractionIntel import UserInteractionIntel __all__ = [ UserInteractionIntel ]
[ "holanda.nayana@gmail.com" ]
holanda.nayana@gmail.com
6df326d71e9954918deec914f12cc7826fbf00c5
021d6b2c5c2b5e2e14c3e48838e040421abbf392
/PhD-GPCR/plotDistancesLikeShaw.py
2be9d7abbdedd80be3fb071af282f96a00c61d46
[]
no_license
victor-gil-sepulveda/PhD-GPCR
b6f44b2a0c70e0f8035cd5c8275e9aa3907c58ab
338357cd01241df833fe6a6e248a9a39465fac3c
refs/heads/master
2021-01-21T14:09:02.109178
2016-04-26T19:18:41
2016-04-26T19:18:41
18,520,998
0
0
null
null
null
null
UTF-8
Python
false
false
4,654
py
""" Created on 13/03/2014 @author: victor """ import prody import os.path from pyRMSD.RMSDCalculator import RMSDCalculator import matplotlib import pylab import os from tools import distance, create_dir data = [ { 'dir':'2rh1_refined', 'pdb_traj':'2rh1_refined.pdb', 'selection':{ 'backbone':"backbone and chain A", 'motif_all': "noh resnum 322:327", # Todos heavy 'motif_backbone': "backbone resnum 322:327", # Solo backbone 'arg131_leu272': "ca resnum 131 272", # No hace falta alignment 'ser207':"backbone resnum 207", # Alignment backbone / RMSD backbone 'ser207_OG':"name OG resnum 207", 'lig_OAE':"name OAE", # Unique name in the ligand 'ile121': "backbone resnum 121", 'phe282': "backbone resnum 282" } } ] cwd = os.getcwd() for datum in data: prot_name = datum['dir'] print "========================\nWorking with %s\n========================"%(prot_name) # Look for the directory and enter it base_dir = os.path.join(cwd, prot_name) os.chdir(base_dir) # Create dirs to put results create_dir("plots") create_dir("selections") # Generate all selections pdb = prody.parsePDB("%s"%datum['pdb_traj']) selections = {} for selection in datum['selection']: print selection selections[selection] = pdb.select(datum['selection'][selection]) prody.writePDB(os.path.join("selections",selection), selections[selection]) ############################# # Motif VS Helix Distance ############################# calculator = RMSDCalculator( calculatorType = "QCP_OMP_CALCULATOR", fittingCoordsets = selections['motif_all'].getCoordsets(), calculationCoordsets = selections['motif_backbone'].getCoordsets()) motif_rmsd = calculator.oneVsTheOthers( conformation_number = 0, get_superposed_coordinates = False) residue_distances = [] for conf in selections['arg131_leu272'].getCoordsets(): arg131 = conf[0] leu272 = conf[1] residue_distances.append(distance(arg131, leu272)) exp_motif_rmsd = [0]+list(motif_rmsd) matplotlib.pyplot.scatter(residue_distances, exp_motif_rmsd) matplotlib.pyplot.savefig(os.path.join("plots", "motif_vs_helix_dist.svg")) matplotlib.pyplot.close() ########################################### # Backbone of ser 207 rmsd Vs ser to ligand distance ########################################### calculator = RMSDCalculator( calculatorType = "QCP_OMP_CALCULATOR", fittingCoordsets = selections['backbone'].getCoordsets(), calculationCoordsets = selections['ser207'].getCoordsets()) ser207_rmsd = calculator.oneVsTheOthers( conformation_number = 0, get_superposed_coordinates = False) # Distance from ligand to serine distances = [] lig_coords = selections["lig_OAE"].getCoordsets() ser_coords = selections["ser207_OG"].getCoordsets() for i, ser_OG in enumerate(ser_coords): lig_OAE = lig_coords[i] distances.append(distance(ser_OG[0], lig_OAE[0])) exp_ser_207_rmsd = [0]+list(ser207_rmsd) matplotlib.pyplot.scatter(distances, exp_ser_207_rmsd) matplotlib.pyplot.savefig(os.path.join("plots", "ser207_dist_lig.svg")) matplotlib.pyplot.close() ########################################### # Ile 121 rmsd vs Phe 282 rmsd ########################################### ile121_coordsets = pdb.select(datum['selection']['ile121']) phe282_coordsets = pdb.select(datum['selection']['phe282']) calculator = RMSDCalculator( calculatorType = "QCP_OMP_CALCULATOR", fittingCoordsets = selections['backbone'].getCoordsets(), calculationCoordsets = selections['ile121'].getCoordsets()) ile121_rmsd = calculator.oneVsTheOthers( conformation_number = 0, get_superposed_coordinates = False) calculator = RMSDCalculator( calculatorType = "QCP_OMP_CALCULATOR", fittingCoordsets = selections['backbone'].getCoordsets(), calculationCoordsets = selections['phe282'].getCoordsets()) phe282_rmsd = calculator.oneVsTheOthers( conformation_number = 0, get_superposed_coordinates = False) matplotlib.pyplot.scatter(phe282_rmsd, ile121_rmsd) matplotlib.pyplot.savefig(os.path.join("plots", "ile121_vs_phe282.svg")) matplotlib.pyplot.close()
[ "victor.gil.sepulveda@gmail.com" ]
victor.gil.sepulveda@gmail.com
e8cc4a779f0748f11f893f138e6faeb4aace28ab
0897e2254eeac5458e4f17caea43871dd72fda9a
/great_expectations/expectations/core/expect_column_values_to_match_regex_list.py
a337613795d3cc82a329be40ed1c5a3f6b35b3eb
[ "Apache-2.0" ]
permissive
NulledExceptions/great_expectations
10f7710d7520f0b57ded539ce33075ddeb47856d
8704c3ecf6632a72c55e012ed117d56f85a21f74
refs/heads/main
2023-09-05T19:30:32.910045
2021-11-18T20:50:42
2021-11-18T20:50:42
431,229,866
1
0
Apache-2.0
2021-11-23T19:35:34
2021-11-23T19:35:34
null
UTF-8
Python
false
false
11,214
py
from typing import Optional from great_expectations.core.expectation_configuration import ExpectationConfiguration from great_expectations.expectations.util import ( add_values_with_json_schema_from_list_in_params, render_evaluation_parameter_string, ) from ...render.renderer.renderer import renderer from ...render.types import RenderedStringTemplateContent from ...render.util import ( num_to_str, parse_row_condition_string_pandas_engine, substitute_none_for_missing, ) from ..expectation import ColumnMapExpectation, InvalidExpectationConfigurationError class ExpectColumnValuesToMatchRegexList(ColumnMapExpectation): """Expect the column entries to be strings that can be matched to either any of or all of a list of regular expressions. Matches can be anywhere in the string. expect_column_values_to_match_regex_list is a \ :func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine .column_map_expectation>`. Args: column (str): \ The column name. regex_list (list): \ The list of regular expressions which the column entries should match Keyword Args: match_on= (string): \ "any" or "all". Use "any" if the value should match at least one regular expression in the list. Use "all" if it should match each regular expression in the list. mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly fraction of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see :ref:`meta`. Returns: An ExpectationSuiteValidationResult Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: :func:`expect_column_values_to_match_regex \ <great_expectations.execution_engine.execution_engine.ExecutionEngine.expect_column_values_to_match_regex>` :func:`expect_column_values_to_not_match_regex \ <great_expectations.execution_engine.execution_engine.ExecutionEngine .expect_column_values_to_not_match_regex>` """ library_metadata = { "maturity": "production", "package": "great_expectations", "tags": ["core expectation", "column map expectation"], "contributors": [ "@great_expectations", ], "requirements": [], } map_metric = "column_values.match_regex_list" success_keys = ( "regex_list", "match_on", "mostly", ) default_kwarg_values = { "row_condition": None, "condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed "mostly": 1, "result_format": "BASIC", "include_config": True, "catch_exceptions": True, } def validate_configuration(self, configuration: Optional[ExpectationConfiguration]): super().validate_configuration(configuration) if configuration is None: configuration = self.configuration try: assert "regex_list" in configuration.kwargs, "regex_list is required" assert isinstance( configuration.kwargs["regex_list"], (list, dict) ), "regex_list must be a list of regexes" if ( not isinstance(configuration.kwargs["regex_list"], dict) and len(configuration.kwargs["regex_list"]) > 0 ): for i in configuration.kwargs["regex_list"]: assert isinstance(i, str), "regexes in list must be strings" if isinstance(configuration.kwargs["regex_list"], dict): assert ( "$PARAMETER" in configuration.kwargs["regex_list"] ), 'Evaluation Parameter dict for regex_list kwarg must have "$PARAMETER" key.' except AssertionError as e: raise InvalidExpectationConfigurationError(str(e)) return True @classmethod def _atomic_prescriptive_template( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs, ): runtime_configuration = runtime_configuration or {} include_column_name = runtime_configuration.get("include_column_name", True) include_column_name = ( include_column_name if include_column_name is not None else True ) styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, [ "column", "regex_list", "mostly", "match_on", "row_condition", "condition_parser", ], ) params_with_json_schema = { "column": {"schema": {"type": "string"}, "value": params.get("column")}, "regex_list": { "schema": {"type": "array"}, "value": params.get("regex_list"), }, "mostly": {"schema": {"type": "number"}, "value": params.get("mostly")}, "match_on": {"schema": {"type": "string"}, "value": params.get("match_on")}, "row_condition": { "schema": {"type": "string"}, "value": params.get("row_condition"), }, "condition_parser": { "schema": {"type": "string"}, "value": params.get("condition_parser"), }, } if not params.get("regex_list") or len(params.get("regex_list")) == 0: values_string = "[ ]" else: for i, v in enumerate(params["regex_list"]): params["v__" + str(i)] = v values_string = " ".join( ["$v__" + str(i) for i, v in enumerate(params["regex_list"])] ) if params.get("match_on") == "all": template_str = ( "values must match all of the following regular expressions: " + values_string ) else: template_str = ( "values must match any of the following regular expressions: " + values_string ) if params["mostly"] is not None: params["mostly_pct"] = num_to_str( params["mostly"] * 100, precision=15, no_scientific=True ) # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") template_str += ", at least $mostly_pct % of the time." else: template_str += "." if include_column_name: template_str = "$column " + template_str if params["row_condition"] is not None: ( conditional_template_str, conditional_params, ) = parse_row_condition_string_pandas_engine( params["row_condition"], with_schema=True ) template_str = conditional_template_str + ", then " + template_str params_with_json_schema.update(conditional_params) params_with_json_schema = add_values_with_json_schema_from_list_in_params( params=params, params_with_json_schema=params_with_json_schema, param_key_with_list="regex_list", ) return (template_str, params_with_json_schema, styling) @classmethod @renderer(renderer_type="renderer.prescriptive") @render_evaluation_parameter_string def _prescriptive_renderer( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs, ): runtime_configuration = runtime_configuration or {} include_column_name = runtime_configuration.get("include_column_name", True) include_column_name = ( include_column_name if include_column_name is not None else True ) styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, [ "column", "regex_list", "mostly", "match_on", "row_condition", "condition_parser", ], ) if not params.get("regex_list") or len(params.get("regex_list")) == 0: values_string = "[ ]" else: for i, v in enumerate(params["regex_list"]): params["v__" + str(i)] = v values_string = " ".join( ["$v__" + str(i) for i, v in enumerate(params["regex_list"])] ) if params.get("match_on") == "all": template_str = ( "values must match all of the following regular expressions: " + values_string ) else: template_str = ( "values must match any of the following regular expressions: " + values_string ) if params["mostly"] is not None: params["mostly_pct"] = num_to_str( params["mostly"] * 100, precision=15, no_scientific=True ) # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") template_str += ", at least $mostly_pct % of the time." else: template_str += "." if include_column_name: template_str = "$column " + template_str if params["row_condition"] is not None: ( conditional_template_str, conditional_params, ) = parse_row_condition_string_pandas_engine(params["row_condition"]) template_str = conditional_template_str + ", then " + template_str params.update(conditional_params) return [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": template_str, "params": params, "styling": styling, }, } ) ]
[ "noreply@github.com" ]
NulledExceptions.noreply@github.com
a49cd1fe7b46fdd509083cfbf90268e706b9a9f7
4b2df15d4943e71e07d9d75fadc4f82b61853276
/dog_image_classifier_project/test_classifier.py
0b3c2a28a95f21f203b039c8ddcee7e18304afd6
[]
no_license
suryakant1698/machineLearningProject
da76801e198afdd8593105a9de1814387003312b
547591a29dbcc8fd0cccce56e78528527154e5fe
refs/heads/master
2020-04-14T06:41:19.101947
2019-03-18T08:18:07
2019-03-18T08:18:07
163,693,104
0
0
null
null
null
null
UTF-8
Python
false
false
1,760
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # */AIPND-revision/intropyproject-classify-pet-images/test_classifier.py # # PROGRAMMER: Jennifer S. # DATE CREATED: 01/30/2018 # REVISED DATE: <=(Date Revised - if any) # PURPOSE: To demonstrate the proper usage of the classifier() function that # is defined in classifier.py This function uses CNN model # architecture that has been pretrained on the ImageNet data to # classify images. The only model architectures that this function # will accept are: 'resnet', 'alexnet', and 'vgg'. See the example # usage below. # # Usage: python test_classifier.py -- will run program from commandline # Imports classifier function for using pretrained CNN to classify images from classifier import classifier # Defines a dog test image from pet_images folder test_image = "pet_images/mangu.jpg" # Defines a model architecture to be used for classification # NOTE: this function only works for model architectures: # 'vgg', 'alexnet', 'resnet' model = "vgg" # Demonstrates classifier() functions usage # NOTE: image_classication is a text string - It contains mixed case(both lower # and upper case letter) image labels that can be separated by commas when a # label has more than one word that can describe it. image_classification = classifier(test_image, model) # prints result from running classifier() function print("\nResults from test_classifier.py\nImage:", test_image, "using model:", model, "was classified as a:", image_classification)
[ "suryakant.rocky@gmail.com" ]
suryakant.rocky@gmail.com
2d79e4e554bf3b88d7a570df54378fe1bf1994f3
83bbb67d205b6488c5c86a9ff5479420994b68ac
/tzq_cvae/dataset.py
e2f43a7eefc86c09c28db556a2197cc1622bf69c
[ "MIT" ]
permissive
enhuiz/cvae
7c74f78af7d3695676eed338b9dcbe85b0dcc7b1
8ccfd12df37996f280ae95302caecc7c7a967b44
refs/heads/main
2023-08-27T22:43:16.718363
2021-09-08T05:21:45
2021-09-08T05:21:45
403,652,796
0
0
null
null
null
null
UTF-8
Python
false
false
1,346
py
import numpy as np from torch.utils.data import DataLoader from torchvision.datasets import MNIST from torchvision import transforms from functools import cached_property class MaskedMNIST(MNIST): def __init__(self, root, num_masks=3, **kwargs): self.num_masks = num_masks kwargs.setdefault("transform", transforms.ToTensor()) kwargs.setdefault("download", True) super().__init__(root, **kwargs) @cached_property def _quads(self): return [ (slice(None), slice(i, i + 14), slice(j, j + 14)) for i in [0, 14] for j in [0, 14] ] def _sample_quads(self): indices = np.random.choice(4, self.num_masks, False) return [self._quads[i] for i in indices] def __getitem__(self, index): x, _ = super().__getitem__(index) y = x.clone() quads = self._sample_quads() for quad in quads: x[quad] = -1 return x, y def as_dataloader(self, *args, **kwargs): return DataLoader(self, *args, **kwargs) if __name__ == "__main__": import matplotlib.pyplot as plt dataset = MaskedMNIST("data/mnist", download=True) x, y = dataset[0] print(x.shape) plt.subplot(121) plt.imshow(x[0]) plt.subplot(122) plt.imshow(y[0]) plt.savefig("test.png")
[ "niuzhe.nz@outlook.com" ]
niuzhe.nz@outlook.com
52fa8b03611d4b9c943a75dd51236e5aa36ec012
165db92b0b84dd26e3d66b1602a5285e8679c99b
/pset6-python/Mario/mario_less.py
7c30cacbd5f6cfe46167973ff3db800a00503181
[]
no_license
BSanandu88/CS50x
8acee66d8c153293b46917e8f63df110bb2816df
044964ac0ab8afe813b84f2612c6a662c554b925
refs/heads/master
2022-11-21T13:33:56.396233
2020-07-18T04:35:20
2020-07-18T04:35:20
263,544,391
1
0
null
null
null
null
UTF-8
Python
false
false
326
py
from cs50 import get_int def main(): while True: height=get_int("Height:") width = height + 1 if height >= 0 and height <=23: break for i in range(1, height + 1): hashes = i+1 spaces=width - hashes print(" " * spaces,end="") print("#" * hashes) if__name__ == "__main__": main()
[ "noreply@github.com" ]
BSanandu88.noreply@github.com
20331f88d9ab903e1970463cb154cfaa0c020812
ef8c023091909332a8d592446040b780febe7958
/main.py
39a9ada2c9f7fe9025b2302def89a878307e1702
[]
no_license
tungpun/php-functions
128706fb79a9ddac9a22288b7209f8a43b70b117
e7704f6dec52af67515bca2d543ed07128904370
refs/heads/master
2021-01-01T19:40:14.512003
2017-07-28T10:42:05
2017-07-28T10:42:05
98,639,789
0
0
null
null
null
null
UTF-8
Python
false
false
425
py
#!/usr/bin/python 2.7 import requests from bs4 import BeautifulSoup if __name__ == '__main__': url = "http://php.net/manual/en/indexes.functions.php" res = requests.get(url) html = res.text soup = BeautifulSoup(html) li_s = soup.find_all("li") lines = soup.find_all("a", {"class": "index"}) with open('functions.txt', 'w') as f: for line in lines: f.write(line.text + "\n")
[ "nguyenhuutung@punmac.local" ]
nguyenhuutung@punmac.local
d2b457d289866a682eca245ac46d0b8f83ab0bc6
8ef2194ca22b59e86db1d00c67bcb4aabda102b4
/stackfinder/cli.py
ebfd8327ca8a122d1b944f409d6e5e51eeb1fe90
[ "Apache-2.0" ]
permissive
planetlabs/planet_stack_finder
79438b93ffe4a8e4b68d156240bf4fb4df4b25e7
62cf704b2c178a5f08b14164fbf5cf84550a0a46
refs/heads/master
2021-01-21T06:05:29.545844
2016-03-17T00:09:00
2016-03-17T00:09:00
38,714,586
7
2
null
2016-03-16T23:46:26
2015-07-07T21:00:16
Python
UTF-8
Python
false
false
1,392
py
"""cli interface for stack finding """ import click from stackfinder import findstacks import json @click.command("find-stacks") @click.argument('metadata', default='-', required=False, nargs=1) @click.option('--index', default=0, help='zero indexed stack number ordered by decreasing number of objects in cluster') @click.pass_context def find_stacks(ctx, metadata, index): """ Input is a list of geojson dictionaries. Each dictionary in the list must have keys ['geometry']['coordinates'] or ['coordinates'] e.g. find the deepest stack in a set of planet labs images cat path/to/file.geojson | planet search | find-stacks """ if metadata == '-': src = click.open_file('-') if not src.isatty(): data = src.read() else: click.echo(ctx.get_usage()) ctx.exit(1) else: with open(metadata, 'r') as src: data = src.read() geojson = json.loads(data) scenes_md = [] for i in geojson['features']: scenes_md.append(i) stacks, stack_centers = findstacks(scenes_md, min_depth=2, max_sep_km=2) if len(stacks) < index+1: click.echo("No Stack of that index") exit() # create a feature collection from the stacks FC = { "type": "FeatureCollection", "features": stacks[index] } click.echo(json.dumps(FC))
[ "joshtennefoss@gmail.com" ]
joshtennefoss@gmail.com
55c189bd6dec4acfe1ef58b0fe10bb3a2fa0f677
d490c6408851678b4346ca6ec8dc08d3547b88d3
/Jessie Smith/assignments/ninjagold/server.py
b0943f2c5410a7436e5a92e33d57f4c91079a31d
[ "MIT" ]
permissive
authman/Python201609
e0852163f35d5f6e7e14a8464c4fa92e3d36e9de
6f40c82da395de44de2faca1272b6fe72b99721a
refs/heads/master
2020-09-19T13:26:08.162643
2016-10-06T14:24:27
2016-10-06T14:24:27
67,229,446
0
12
null
2016-10-06T14:24:28
2016-09-02T14:29:17
Python
UTF-8
Python
false
false
1,459
py
from flask import Flask, render_template, request, redirect, session app = Flask(__name__) import random app.secret_key="ThisIsSecret" @app.route('/') def ninjamoney(): if not session.has_key('count'): session['count'] = 0 if not session.has_key('message'): session['message'] = [] if not session.has_key('num'): session['num'] = 0 return render_template("index.html", num=session['num'], count=session['count'], message=reversed(session['message'])) @app.route('/process_money', methods=['POST']) def process_money(): if (request.form['building'] == 'farm'): session['num'] = random.randrange(10, 21) session['count'] += session['num'] session['message'].append("You earned " + str(session['num']) + " golds from the farm!") elif (request.form['building'] == 'cave'): session['num'] = random.randrange(5, 11) session['count'] += session['num'] session['message'].append("You earned " + str(session['num']) + " golds from the cave!") elif (request.form['building'] == 'house'): session['num'] = random.randrange(2, 6) session['count'] += session['num'] session['message'].append("You earned " + str(session['num']) + " golds from the house!") elif (request.form['building'] == 'casino'): session['num'] = random.randrange(-51, 51) session['count'] += session['num'] session['message'].append("You earned " + str(session['num']) + " golds from the casino!") return redirect('/') app.run(debug=True)
[ "jessie.smith7543@gmail.com" ]
jessie.smith7543@gmail.com
87872b8b2da163f6bf90bd3e8f26052b225ac28f
379e61bb7708a9f54c50fb7590b28de43e002b76
/variables/greetings_cp3.py
2f11d89db42c5c2b3b810181209db03d46a69988
[]
no_license
JAL-code/python_work
b377fe0328b02b7f36d95570a3d6ed2166d0428b
213f10c981ce94e73e56c76230f624b05e06fddd
refs/heads/main
2023-04-21T11:25:28.574958
2021-05-13T15:29:27
2021-05-13T15:29:27
348,765,295
0
0
null
2021-03-17T17:21:56
2021-03-17T15:46:50
Python
UTF-8
Python
false
false
318
py
#Python Crash Course, Eric Matthes, Chapter 3, #Custom names my_friends = ['Bob', 'Billy', 'Silly', 'Dot', 'Betty', 'Bailey'] print(f"Hello {my_friends[0]}") print(f"Hello {my_friends[1]}") print(f"Hello {my_friends[2]}") print(f"Hello {my_friends[3]}") print(f"Hello {my_friends[4]}") print(f"Hello {my_friends[5]}")
[ "jleffek@live.com" ]
jleffek@live.com
8dff07ab392aba5786ff030694a2bd24cbb14a79
8118f2538d3cbaf254cf21626be6ad73bff43d74
/fastapi-example-type-api/db.py
6f5be30b1d46692e4d9ca3a50b0e320a3931a0bc
[]
no_license
mrchoke/ui-api-datatype-exmaple
4522d52bac33dc875c1e03a492b51a3e6a8a9edf
b48a1a65f615a94014616d02cd3c9ddaef509f25
refs/heads/main
2023-01-23T00:01:13.630523
2020-11-23T01:38:21
2020-11-23T01:38:21
315,074,516
1
0
null
null
null
null
UTF-8
Python
false
false
576
py
import databases import sqlalchemy #DATABASE_URL = "mysql://user:passwd@sever/db?charset=utf8mb4" DATABASE_URL = "sqlite:///./todos.db" database = databases.Database(DATABASE_URL) metadata = sqlalchemy.MetaData() notes = sqlalchemy.Table( "notes", metadata, sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True), sqlalchemy.Column("text", sqlalchemy.String(255)), sqlalchemy.Column("completed", sqlalchemy.Boolean), ) engine = sqlalchemy.create_engine( DATABASE_URL,connect_args={"check_same_thread": False} ) metadata.create_all(engine)
[ "mrchoke@gmail.com" ]
mrchoke@gmail.com
39bc28da239af8f974569246cabb713160001af2
7e67fcbc6f674ad59c5f94c7284120df092033c6
/privex/cspgen/builder.py
dcebd8f187ebc7c4bc11b42ae81b5528da903166
[ "MIT", "X11" ]
permissive
Privex/cspgen
4460d6423b8c6ba087a9f62ad7d584454b7d89f5
3502bc9f2300b9133afeabdaf26fce86dd8cd043
refs/heads/master
2023-02-13T13:36:23.949517
2021-01-13T05:16:50
2021-01-13T05:16:50
329,161,116
0
0
null
null
null
null
UTF-8
Python
false
false
14,103
py
#!/usr/bin/env python3 """ +===================================================+ | © 2021 Privex Inc. | | https://www.privex.io | +===================================================+ | | | CSPGen - Python Content Sec Policy Generator | | License: X11/MIT | | | | Core Developer(s): | | | | (+) Chris (@someguy123) [Privex] | | | +===================================================+ CSPGen - A Python tool for generating Content Security Policies without constantly repeating yourself. Copyright (c) 2021 Privex Inc. ( https://www.privex.io ) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name(s) of the above copyright holders shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization. """ import configparser import sys import textwrap from colorama import Fore from os import getenv as env from privex.helpers import empty, empty_if, is_true, is_false, env_bool, T, K, ErrHelpParser from privex.cspgen import version from privex.cspgen.helpers import automark_str, clean_dict, dedup, literal, read_stdin oprint = print from rich import print from pathlib import Path import logging import argparse from privex.loghelper import LogHelper from typing import Union, Optional, List, Tuple, Dict, Set __all__ = [ 'CSPBuilder', 'get_builder', 'main', 'parser', 'log_level', 'PKG_DIR', 'EXAMPLE_DIR', 'EXAMPLE_INI' ] PKG_DIR = Path(__file__).parent.resolve() EXAMPLE_DIR = PKG_DIR / 'examples' EXAMPLE_INI = EXAMPLE_DIR / 'example.ini' log_level = env('LOG_LEVEL', 'WARNING') _lh = LogHelper('privex.cspgen', handler_level=logging.getLevelName(log_level)) _lh.add_console_handler(stream=sys.stderr) log = _lh.get_logger() argc, argv = len(sys.argv), sys.argv class CSPBuilder: def __init__(self, filename: str = None, file_handle = None, contents: Union[str, list, tuple] = None, **kwargs): self.config = configparser.ConfigParser() self.conf_file = None if not empty(filename): self.conf_file = Path(filename).resolve() self.config.read(self.conf_file) elif file_handle is not None: self.config.read_file(file_handle) elif not empty(contents, itr=True): if isinstance(contents, (tuple, list)): contents = "\n".join(contents) self.config.read_string(contents) else: raise ValueError( "CSPBuilder expects either a filename, file handle (open()), or config string " "contents to be passed. All 3 are None / empty. Nothing to parse." ) self.groups = {} self.config_dict = {} self.flags = '' self.excluded = kwargs.get('excluded', ['flags', 'groups', 'DEFAULT']) self.cleaned = False # self.section_split = kwargs.get('section_split', ': ') self.section_split = kwargs.get('section_split', ' ') @property def sections(self) -> list: return self.config.sections() @property def clean_sections(self) -> list: return [s for s in self.sections if s not in self.excluded] def clean(self): # First we extract 'groups' from the config, replace it's {{markers}}, and then deduplicate all group values if 'groups' in self.sections: self.groups = clean_dict(self.config['groups']) groups = self.groups # Next we iterate over the Config object and extract all sections into a standard dict config_dict = self.config_dict for k, v in self.config.items(): v: configparser.SectionProxy sec_items = dict(v.items()) config_dict[k] = sec_items # Remove auto-added 'DEFAULT' (not used), and 'groups' (already parsed and extracted into self.groups) if 'DEFAULT' in config_dict: del config_dict['DEFAULT'] if 'groups' in config_dict: del config_dict['groups'] # Extract 'flags' if present in the config, replace {{markers}}, and deduplicate it's contents. cflags = '' if 'flags' not in config_dict else config_dict['flags']['flags'] cflags = dedup(automark_str(cflags, groups)) # Then we can simply remove 'flags' from the config dict if 'flags' in config_dict: del config_dict['flags'] # Finally we make sure all local variables are saved back to their appropriate instance attributes self.config_dict = {k: clean_dict(v, groups) for k, v in config_dict.items()} self.flags = cflags self.groups = groups self.cleaned = True return self def autoclean(self): if self.cleaned: return True return self.clean() def str_section(self, name: str): self.autoclean() sec = self.config_dict.get(name, None) if not sec: return None s = f"{name}{self.section_split}{sec.get('zones', '')}" if is_true(sec.get('unsafe-eval', False)): s += " 'unsafe-eval'" if is_true(sec.get('unsafe-inline', False)): s += " 'unsafe-inline'" s += ';' return s def generate(self, output='list', sep=' ', **kwargs): secs = [self.str_section(s) for s in self.clean_sections] secd = dict(zip(self.clean_sections, secs)) secd['flags'] = [s + ';' for s in self.flags.split()] secs += secd['flags'] output = output.lower() if output == 'list': return secs if output == 'tuple': return tuple(secs) if output in ['dict', 'dictionary', 'kv', 'keyval', 'map', 'mapping']: return secd if output in ['str', 'string']: sj = sep.join(secs) if not sj.endswith(';'): sj += ';' return sj raise ValueError(f"Supported: (str, string, list, tuple). Unsupported output type: {output}") def __str__(self): return self.generate(output='str') def __iter__(self): yield from self.generate(output='list') def __len__(self): return len(self.generate(output='list')) def __getitem__(self, item:str): self.autoclean() if item in self.sections: return self.config_dict[item] gend = self.generate(output='dict') if item in gend: return gend[item] if item in self.groups: return self.groups[item] raise KeyError(f"Item {item!r} not found in config sections, generated sections, or group keys...") def get_builder(name: str = None, file_handle = None, contents: Union[str, list, tuple] = None, **kwargs) -> CSPBuilder: if empty(name) and file_handle is None and empty(contents, itr=True): name = argv[1] return CSPBuilder(name, file_handle, contents, **kwargs) COPYRIGHT = f""" {Fore.GREEN}Content Security Policy (CSP) Generator{Fore.RESET} {Fore.CYAN}Version: v{version.VERSION} Github: https://github.com/Privex/cspgen License: X11 / MIT (C) 2021 Privex Inc. ( https://www.privex.io ){Fore.RESET} """ parser = ErrHelpParser( formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent(f""" {COPYRIGHT} {Fore.YELLOW}Generates CSP's based off of one or more INI files, with each CSP "type" (default-src, style-src, etc.) as an INI header, a 'zones' key in each type, containing the various domains you want to allow, 'unsafe-eval = true' / 'unsafe-inline = false' for more clear enabling/disabling unsafe-eval and unsafe-inline per "type", and two special headers:{Fore.RESET} {Fore.BLUE} 'groups' - Groups of variables that can be used in each type's 'zones = ' key, AND can also include other group names (as long as the included vars are defined higher up, and doesn't include the var including it). 'flags' - Contains "flags", which are CSP strings that standalone, such as 'upgrade-insecure-requests', instead of being a key with zones as a value. {Fore.RESET} {Fore.GREEN}Example INI file:{Fore.RESET}""" + """ [groups] # First we define cdn, onions, and i2p cdn = https://cdn.privex.io cdn.privex.i2p files.privex.i2p files.privex.io https://www.privex.io onions = privex3guvvasyer6pxz2fqcgy56auvw5egkir6ykwpptferdcb5toad.onion privexqvhkwdsdnjofrsm7reaixclmzpbpveefiu4uctfm2l4mycnwad.onion i2p = privex.i2p www.privex.i2p pay.privex.i2p # Now we can add our main websites, PLUS the onions, and i2p variables websites = https://www.privex.io https://pay.privex.io https://privex.io {{onions}} {{i2p}} # While defaultsrc will contain 'self' + websites + cdn defaultsrc = 'self' {{websites}} {{cdn}} images = https://i.imgur.com https://ipfs.io https://cloudflare-ipfs.com video = https://youtube.com https://vimeo.com media = {{video}} {{images}} [default-src] # For default-src, we can simply set zones to use the defaultsrc var zones = {{defaultsrc}} # Enable unsafe-inline and disable unsafe-eval for default-src unsafe-inline = true unsafe-eval = false [img-src] zones = {{defaultsrc}} {{images}} {{trustpilot}} [media-src] zones = {{defaultsrc}} {{media}} [flags] # Special header 'flags'. We can set the independent CSP flag 'upgrade-insecure-requests' here. flags = upgrade-insecure-requests """ + f""" {Fore.GREEN}End of config{Fore.RESET} """), ) def read_example_file() -> Tuple[str, Path]: with open(EXAMPLE_INI, 'r') as fh: data = fh.read() return data, EXAMPLE_INI parser.add_argument('--section-sep', type=str, default=' ', dest='section_sep', help="Separator between each CSP section (default-src, media-src, img-src etc.) - Textual \\n, \\r, and \\t will " "be auto-converted into the literal characters for newline/carriage return/tab") parser.add_argument('--file-sep', type=str, default='\n\n', dest='file_sep', help="Separator used between each file's config output") parser.add_argument('--version', '-V', action='store_true', default=False, dest='show_version', help="Show version + copyright info") parser.add_argument('--verbose', '-v', action='store_true', default=False, dest='verbose_mode', help="Verbose Mode - Show DEBUG logs") parser.add_argument('--example', '-E', action='store_true', default=False, dest='show_example', help="Output the template example.ini to STDOUT for use as a CSP INI config template") parser.add_argument('filenames', nargs='*', default=[], help="One or more INI files to parse into CSP configs") def main(): global log try: vargs = parser.parse_args() except Exception as e: parser.error(f"{type(e)} - {str(e)}") return sys.exit(1) if vargs.verbose_mode: _lh2 = LogHelper('privex.cspgen', handler_level=logging.DEBUG) _lh2.add_console_handler(stream=sys.stderr) log = _lh2.get_logger() log.debug(f"parser args: {vargs!r}") if vargs.show_version: oprint(COPYRIGHT) return COPYRIGHT if vargs.show_example: exfile, expath = read_example_file() exnote = "#####", "#", "# Privex CSPGen example.ini file", f"# Original Location within Python Package: {expath}", "#", "#####\n" oprint(*exnote, exfile, *exnote, sep="\n") return sys.exit(0) filenames = vargs.filenames file_sep, sec_sep = literal(vargs.file_sep), literal(vargs.section_sep) str_secs = [] list_secs = [] if empty(filenames, itr=True): if sys.stdin.isatty(): parser.error("No filenames specified, and no data piped to stdin") return sys.exit(1) log.debug("Assuming config piped via STDIN. Reading config from stdin.") confd = read_stdin() builder = get_builder(contents=confd) str_secs += [builder.generate('string', sep=sec_sep)] list_secs += [builder.generate('list')] else: for fn in filenames: if fn in ['-', '/dev/stdin', 'STDIN']: log.debug("Assuming config piped via STDIN. Reading config from stdin.") builder = get_builder(contents=read_stdin()) else: builder = get_builder(fn) str_secs += [builder.generate('string', sep=sec_sep)] list_secs += [builder.generate('list')] # oprint('file_sep: ', repr(file_sep)) # oprint('sec_sep: ', repr(sec_sep)) oprint(file_sep.join(str_secs)) return list_secs, str_secs if __name__ == '__main__': main()
[ "info@someguy123.com" ]
info@someguy123.com
89f0820e9acb321370a83a7a3fe8fd4ce48618d2
cf7e903ee2b0af7cd13e6f81efd7816679bd2b7c
/duplicates.py
12ccef17d2a33fb4d501b59df688bcd692a255d4
[]
no_license
uggi121/leetcode
cd2ae44e9e848ff779b167e1526ece832b244c29
577382d86e6482324e312fac423e4be3098099ef
refs/heads/master
2020-07-10T16:08:39.466474
2019-10-29T06:56:17
2019-10-29T06:56:17
204,307,451
0
0
null
null
null
null
UTF-8
Python
false
false
489
py
class Solution(object): def removeDuplicates(self, nums): """ :type nums: List[int] :rtype: int """ if len(nums) <= 1: return len(nums) value = None insert = 0 for i in range(len(nums)): if nums[i] != value: value = nums[i] nums[insert] = value insert += 1 return insert
[ "sudharshanmadhavan@outlook.com" ]
sudharshanmadhavan@outlook.com
8c95e060bc23fd925224c79fd04682aad6794b04
5fcfaabd985b010074b3b078b3d80075d7487767
/nbeam/handlers.py
039394aecc5cebf850f1b6197f8054bb59ca6f7d
[]
no_license
AnyBucket/Neutron-Beam
4071df02857f42fe2dd61abc70acc3802db7f639
3efb46511faa53615517a9314b198c5236012f67
refs/heads/master
2020-12-24T22:28:51.337431
2013-10-05T19:23:51
2013-10-05T19:23:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,183
py
import os import re import base64 import json from tornado.web import RequestHandler, StaticFileHandler, HTTPError from tornado.escape import json_encode, json_decode from .SimpleAES import SimpleAES from .views import list_dir, open_file, save_file, rename_file, delete, new_file, new_dir, upload_file, new_url, token, token_valid, create_realtime, stop_realtime from .views_search import start_search, start_replace, job_status, cancel_job from .version import VERSION_STRING class StaticHandler (StaticFileHandler): def get (self, path, include_body=True): reobj = re.search('/(\S+)/public/(.*)', self.request.path) token = reobj.group(1) if token_valid(token): return super(StaticHandler, self).get(path, include_body) raise HTTPError(404) class MainHandler (RequestHandler): def __init__ (self, *args, **kwargs): self.config = args[0].config self.aes = SimpleAES(args[0].config['key']) self.ALLOWED_TASKS = { 'list': list_dir, 'open': open_file, 'save': save_file, 'rename': rename_file, 'delete': delete, 'newfile': new_file, 'newdir': new_dir, 'upload': upload_file, 'newurl': new_url, 'search': start_search, 'replace': start_replace, 'jobstatus': job_status, 'canceljob': cancel_job, 'token': token, 'createRealtime': create_realtime, 'stopRealtime': stop_realtime, } super(MainHandler, self).__init__(*args, **kwargs) def valid_request (self, rdata): if rdata['task'] in self.ALLOWED_TASKS: if rdata['email'].lower() == self.config['email'].lower(): path = None if 'file' in rdata: path = rdata['file'] elif 'dir' in rdata: path = rdata['dir'] if path: path = os.path.normpath(self.config['dir'] + path) if path.startswith(self.config['dir']): return True return True return False def options (self): self.set_header('Access-Control-Allow-Origin', '*') self.set_header('Access-Control-Allow-Headers', 'X-CSRFToken') self.set_header('Access-Control-Allow-Methods', 'POST, OPTIONS') def post (self): data = {'status': 'Invalid Request'} self.set_header('Content-Type', 'application/json') self.set_header('Access-Control-Allow-Origin', '*') rdata = self.get_argument("request", '') try: test = base64.decodestring(rdata) rdata = self.aes.decrypt(rdata) rdata = json_decode(rdata) except: pass else: if self.valid_request(rdata): rdata['REMOTE_IP'] = self.request.remote_ip response_data = self.ALLOWED_TASKS[rdata['task']](self.config, rdata) data = { 'response': response_data, 'email': self.config['email'], } j = json_encode(data) data = { 'encrypted': self.aes.encrypt(j), 'beam': rdata['beam'], 'status': 'ok', 'version': VERSION_STRING, } j = json_encode(data) self.write(j) self.finish()
[ "paul.m.bailey@gmail.com" ]
paul.m.bailey@gmail.com
9e9ab762dffaf01f8fe4c9d08fd6301507cdd632
b09d305fd5771a583a205e12328979d79f56254c
/expressnews/expressnews/urls.py
adf0b561c989fd89f8ea99be57a52a07ab708afb
[]
no_license
DhruvikDonga/expressnews
02d25e1516b8f3be18d0b3bbbcc94f34c655694a
7670c763e7aeb04f776c74e99135cef8a6e9fef1
refs/heads/main
2023-04-15T21:01:15.983765
2021-04-29T05:10:01
2021-04-29T05:10:01
362,640,314
3
0
null
null
null
null
UTF-8
Python
false
false
1,114
py
from django.contrib import admin from django.urls import path from newsAPI.views import ListNewsView,getfavourite,getfavouritenews from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView) from restauth.views import ModsView, register,current_user urlpatterns = [ path('admin/', admin.site.urls), path('api/news/', ListNewsView.as_view()), # path('api/newsave/', ListSaveNewsView.as_view()), path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'), # Submit your refresh token to this path to obtain a new access token path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'), # Return 'Mods' model objects path('mods/', ModsView.as_view(), name='mods_view'), # Register a new user path('register/', register, name='register_view'), # user details path('userdetails/',current_user,name='userdetails'), # post saved news by user path('favourite/', getfavourite, name='favourite_post'), # get saved news by user path('getfavourite/', getfavouritenews, name='favourite_news'), ]
[ "dongadhruvik@gmail.com" ]
dongadhruvik@gmail.com
3de15438cec6eb4fbe69b20384a50ef8466bc557
8c202ab339e4fe4458b7985f789699ee19d07fc0
/test2.py
3681ff9d63e6062e9e66e8cb3168c75146f03de5
[]
no_license
Martijnvandebrug/autosuggest
0a57a3c80afec1c9b7d7a018c47caa5dab4bbcb1
5688112f00c06d4d14c2ab4a27a8bfe49df1e40e
refs/heads/master
2021-01-20T00:00:55.066132
2017-04-22T12:13:43
2017-04-22T12:13:43
89,066,116
0
0
null
null
null
null
UTF-8
Python
false
false
157
py
import operator dict = dict() dict['test']=1 dict['test999']=2 dict['test3']=3 dict['test64']=11 print sorted(dict.items(), key=operator.itemgetter(1))
[ "martijn.vandebrug@incentro.com" ]
martijn.vandebrug@incentro.com
92e641e29896fe9b94118631c0108a543f516c6c
bd923535ce082dd874591e7db42851d822207b96
/project/scr.py
a26a76ab9d9c57ff3354edc9ae2173b0c7baf847
[]
no_license
amtvj/MINI_PROJECT
b8314825ed09ccc11a3a3b29fd4275eb739f17b1
831a7e1096853fb1af490c0d2f327e644bab11ef
refs/heads/master
2020-07-16T01:00:37.382157
2016-11-16T20:25:15
2016-11-16T20:25:15
73,954,765
0
0
null
null
null
null
UTF-8
Python
false
false
575
py
import scenedetect scene_list = [] # Scenes will be added to this list in detect_scenes(). for i in range(10): scene_list = [] path = 'ACCEDE0000'+str(i)+'.mp4' # Path to video file. # Usually use one detector, but multiple can be used. detector_list = [ scenedetect.detectors.ThresholdDetector(threshold = 8, min_percent = 0.5) ] video_framerate, frames_read = scenedetect.detect_scenes_file( path, scene_list, detector_list) # scene_list now contains the frame numbers of scene boundaries. print(len(scene_list))#.size() print(scene_list)
[ "iit2014110@iiita.ac.in" ]
iit2014110@iiita.ac.in
dedf270ecd85808beb261e2b5286d4273b2404bd
cb44e78c06f0428f02f1648c59463616247eac49
/simulation/microsaccade/simulate_ms.py
cf97936d47e09cd4d562f2c8fa8035032dd88482
[ "MIT" ]
permissive
helloTC/LearnPython
da5106da926735a6c0ff5d9e9f3a8beef0eabe4b
bd5fc977c800f3dc2d239b8cb7ad7e6e1b42fce8
refs/heads/master
2021-01-12T06:08:48.115607
2018-09-23T08:45:23
2018-09-23T08:45:23
77,313,517
0
0
null
null
null
null
UTF-8
Python
false
false
1,287
py
import models import numpy as np import matplotlib.pyplot as plt import seaborn as sns cancel_command = 150 perm_t_list = [] perm_m_list = [] for n_perm in range(1000): t_list = [] m_list = [] is_success_canceled = False is_success_canceled_list = [] t0 = 0 for i in range(1): t, m, is_success_canceled= models.microsaccades(cancel_command, last_success_canceled = is_success_canceled, t_step = 1) t = [j + t0 for j in t] t0 = np.max(t) t_list += t m_list += m is_success_canceled_list.append(is_success_canceled) print('permutation times: {}'.format(n_perm)) t_list = np.array(t_list) m_list = np.array(m_list) pt_t_list = t_list[m_list>1000] pt_m_list = m_list[m_list>1000] perm_t_list.append([int(m) for m in pt_t_list]) perm_m_list.append(pt_m_list) plt.figure() plt.plot(t_list, m_list) plt.plot(pt_t_list.tolist(), pt_m_list.tolist(), marker = 'o', color='r', ls='') plt.show() flat_perm_t_list = [item for sublist in perm_t_list for item in sublist] flat_perm_m_list = [item for sublist in perm_m_list for item in sublist] plt.figure() bin_perm_t_list = np.bincount(flat_perm_t_list) p_perm_t_list = 1.0*bin_perm_t_list/bin_perm_t_list.sum() plt.plot(p_perm_t_list) plt.show()
[ "taicheng_huang@sina.cn" ]
taicheng_huang@sina.cn
76f2c9df9de6d2d8b1e534e92216377e35669502
0b876f54170b0bde2c0abd43bbd81389fa2f6322
/TO_GPU/schedulers.py
e358e8124b0e7eaef96cf4f16e143410a11f4548
[ "MIT" ]
permissive
AJSVB/GPBT
2227aecf958a94d07473e76d8254a36e807082b9
746c11d06ecc4c3b62fc0a3290d672d336cbb11e
refs/heads/main
2023-08-25T16:54:04.965149
2021-09-30T18:02:32
2021-09-30T18:02:32
369,235,287
0
0
null
null
null
null
UTF-8
Python
false
false
19,463
py
import copy import math import random import os import numpy as np import time import datetime import os import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms from torch.utils.data import DataLoader import numpy as np import time # Trials is an object that contains all the informations about the different trials from hyperopt import hp, fmin, tpe, Trials from functools import * # importing this from sklearn import datasets import pandas as pd from sklearn.model_selection import train_test_split # the x variable is the variable whit the model configurations def translation(liste): config = {} config["lr"] = liste[0] config["droupout_prob"] = liste[1] config["weight_decay"] = liste[2] config["b1"] = liste[3] config["b2"] = liste[4] config["eps"] = liste[5] return config def test_function(x, models, h, losses, losses_temp, parent_model, k_f, iteration, fsvnlogger, printer,old_time): """General function that trains the given model with the given hyperparameters and computes the result for the train and validation set. This function is called by the Scheduler from initialisation() and loop(). Args: x ([type]): new hyperparameter values models ([type]): model h ([type]): list of model parameters losses ([type]): array with all the losses parent_model ([type]): [description] k_f ([type]): [description] iteration ([type]): [description] fsvnlogger ([type]): [description] printer ([type]): [description] """ # x= translation(x) if isinstance(k_f, list): k = k_f[0] is_list = True else: k = k_f is_list = False # checks if it's the first time and if so it initialises the model if iteration == 0: models[k] = parent_model[k](x) else: models[k] = parent_model.adapt(x) # here it increases the pointer to each one of the models if is_list: k_f[0] += 1 # this changes the actual array in the Scheduler h[k] = x #models[k].train1() for i in range(39): train_acc, train_los,acc_val,loss_val = models[k].step() if printer: temp = dict(x) temp.update({'train_acc': train_acc}) temp.update({'train_los': train_los}) temp.update({'acc_val': acc_val}) temp.update({'loss_val': loss_val}) temp.update({'time':time.time()-old_time}) fsvnlogger.writerow(temp) print(k) losses_temp[k] = loss_val print(np.sort(losses_temp[:k+1])) if iteration !=0 and (k==0 or (k==1 and loss_val<losses_temp[0]) or (k!=1 and loss_val<np.sort(losses_temp[:k])[math.floor(k/2)])): train_acc1, train_los1,acc_val1,loss_val1 = models[k].step() train_acc2, train_los2,acc_val2,loss_val2 = models[k].step() train_acc3, train_los3,acc_val3,loss_val3 = models[k].step() train_acc4, train_los4,acc_val4,loss_val4 = models[k].step() train_acc, train_los,acc_val,loss_val = ((train_acc+train_acc1+train_acc2+train_acc3+train_acc4)/5, (train_los+train_los1+train_los2+train_los3+train_los4)/5, (acc_val+acc_val1+acc_val2+acc_val3+acc_val4)/5,(loss_val+loss_val1+loss_val2+loss_val3+loss_val4)/5) if iteration ==0 or (k==0 or (k==1 and loss_val<losses_temp[0]) or (k!=1 and loss_val<np.sort(losses_temp[:k])[math.floor(k/2)])): acc_test,loss_test = models[k].test1() if printer: temp = dict(x) temp.update({'train_acc': train_acc}) temp.update({'train_los': train_los}) temp.update({'acc_val': acc_val}) temp.update({'loss_val': loss_val}) temp.update({'acc_test': acc_test}) temp.update({'loss_test': loss_test}) temp.update({'time':time.time()-old_time}) fsvnlogger.writerow(temp) else: if printer: temp = dict(x) #acc_test,loss_test = models[k].test1() temp.update({'train_acc': train_acc}) temp.update({'train_los': train_los}) temp.update({'acc_val': acc_val}) temp.update({'loss_val': loss_val}) #temp.update({'acc_test': acc_test}) #temp.update({'loss_test': loss_test}) temp.update({'time':time.time()-old_time}) fsvnlogger.writerow(temp) # print("# loss : {}\t test : {}".format(loss, test)) losses[k] = loss_val # print("# Exiting test_function") return loss_val def parent_idxs_choice(sorted_idxs, n_total, **opttional_args): """Returns the idexes of the new parents for the next iteration. Used in Scheduler.loop() Args: sorted_idxs (list): Indexes of the sorted loss for the previous iteration. n_total (int): Length of the returned array. Returns: list: List containing the sorted indexes of the new parents for the next iteration. """ acceptance_probability = opttional_args.get("accept_prob", 0.9) new_idxs = [{}] * n_total i = 0 j = 0 while i < n_total and j < len(sorted_idxs) - (n_total - i): if random.uniform(0, 1) < acceptance_probability: new_idxs[i] = sorted_idxs[j] i += 1 j += 1 while i < n_total: new_idxs[i] = sorted_idxs[j] i += 1 j += 1 return new_idxs class Parent(): """Parent class that handles the passage of Network Configurations from one step to the following """ def __init__(self, point_hyperspace, configuration, model, loss): # Trials function self.point_hyperspace = point_hyperspace # list of the different hyperpoints tried for this model self.configuration_list = [configuration] # list of all the previous losses of this function self.loss_list = [loss] self.model = model self.is_replicated = False def update(self, configuration, loss, model): self.is_replicated = False self.configuration_list.append(configuration) self.loss_list.append(loss) self.model = model def replication(self): self.is_replicated = True # self.configuration_list.append(self.configuration_list[-1]) # self.loss_list.append(self.loss_list[-1]) # ?? why it its working now ? # replication_trials(self.point_hyperspace.trials, n_children) def get_last_conf(self): return self.configuration_list[-1] def get_point_hyperspace(self): return self.point_hyperspace def get_model(self): return self.model def get_loss(self): return self.loss_list class Scheduler(): """Scheduler class that handles all the training process for our proposed algorithm (FSVN) """ def __init__(self, model, num_iterations, num_config, oracle, naccache, printer,nott): self.oracle = oracle # Oracle manages the BO self.num_iterations = num_iterations # total number of iterations self.num_config = num_config # number of configurations at each step self.naccache = naccache # la constante de naccache self.n_parents = math.floor(math.sqrt(num_config/naccache)) self.time = time.time() # self.h is for the num_config hyperparameters used at every loop, h is a configuration from the search space self.h = [{}] * self.num_config # it is a boolean to indicate if the trial should be stored in the .csv file self.print = printer # this will be called again in the initialization() method # self.points_hyperspace = np.empty(num_config) # self.hyperspaces is for storing the # sqrt(m) hyperspaces used by the algorithm self.plot = np.empty(num_iterations) # this will contain the value for the loss at each # ??? there is no need to save all the points in hyperspace # self.points_hyperspace = np.empty(num_config) # self.parents is the sqrt(m) best model from last iteration that are the parents in the current iteration self.parents = [{}]*self.n_parents # all the models self.models = [model]*self.num_config # self.losses remembers the performances of all m models during the current iteration # sqrt(m) best from self.models self.losses = np.empty(num_config) self.losses_temp = np.empty(num_config) if not os.path.isdir('./log_data'): os.makedirs('./log_data') import csv temp = [k for k,v in oracle.searchspace.items()] temp.append('aiteration') temp.append('acc_val') temp.append('loss_val') temp.append('acc_test') temp.append('loss_test') temp.append('train_acc') temp.append('train_los') temp.append('time') self.file = open(os.path.join('./log_data', 'log_seed_{0}_{1}.csv'.format(1234, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))), 'a') self.logger = csv.DictWriter(self.file, delimiter=",", fieldnames=np.sort(np.array(temp)), extrasaction='ignore') self.logger.writeheader() self.file.flush() self.nott = nott # ??? didn't understand the use of k # c'est pour avoir un pointeur sur k, c'est pas plus que O(sqrt)-paralélisable pour le moment du coup. self.k = [0] def initialisation(self): """It will initialise the search process creating all the relevat structures and it will also compute the first iteration of the algorithm. """ # print("> Entering Scheduler.initialisation()") # Database that will save all the evaluated points used by hyperopts point_extended_hyperspace = Trials()# [None,None] # defines the test function, partial sets all the different parameters fmin_objective = partial( test_function, models=self.models, h=self.h, losses=self.losses, losses_temp=self.losses_temp, parent_model=self.models, # train=self.train_loader, # val=self.val_loader, # test=self.test_loader, k_f=self.k, iteration=0, fsvnlogger=self.logger, printer=self.print,old_time = self.time ) self.oracle.compute_batch(point_extended_hyperspace, self.num_config, 0, fmin_objective) # where are the losses computed? in the `test_function` indexes = np.argsort(self.losses) # self.out[0] = self.losses[indexes[0:self.n_parents]] # ??? NOT USED ANYMORE. WHY ? # self.points_hyperspace = [point_extended_hyperspace] * self.n_parents # we have in models all the models being trained, while we have that the models that # can generate all the other models are in self.parents # all the parents models have right now the informations about all the losses self.parents = [ Parent( #copy.deepcopy(point_extended_hyperspace), # Trials function Trials(), self.h[indexes[i]], # the hyperpoint is chosent during fmin self.models[indexes[i]], # saves the model self.losses[indexes[i]] # saves the loss ) for i in range(self.n_parents) ] # for the plot it only saves the best loss, which is the one we are going to take in the end self.plot[0] = self.losses[indexes[0]] def close(self): for i in self.parents[0].configuration_list: self.logger.writerow(i) self.file.flush() def loop(self): """Function to do the training for a number of times defined by the variable self.num_iterations. """ # print("^ Entering Scheduler.loop()") for current_iter in range(1, self.num_iterations): # it reinitialises the value of k that will be modified in the `test_function` self.k[0] = 0 threshold = 1.7976931348623157e+308 if current_iter>1+self.nott: threshold = -self.plot[-1] + self.plot[-2] stop = False self.losses = np.ones(self.num_config)*1.7976931348623157e+307 for idx_parent, parent in enumerate(self.parents): if not(stop) and np.min(self.losses[-self.num_config:]) < - threshold + self.plot[-1] : # print(np.min(self.losses[-self.num_config:])) # print( self.plot[-1]) stop = True if stop: # print("stopped!") # print("saved trainings : " + str(self.num_config/self.n_parents)) break # print("^^ current_iter : {:d} and idx_parent : {:d}".format(current_iter, idx_parent)) point_extended_hyperspace = parent.get_point_hyperspace() # print("^^ last loss of parent : {:.4f}".format(parent.get_loss()[-1])) fmin_objective = partial( test_function, models=self.models, # ??? why it is calling all the self.models h=self.h, # ??? why not using the configuration list in parent losses=self.losses, losses_temp=self.losses_temp, parent_model=parent.get_model(), k_f=self.k, iteration=current_iter, # train=self.train_loader, # val=self.val_loader, # test=self.test_loader, fsvnlogger=self.logger, printer=self.print,old_time=self.time ) if not parent.is_replicated: # point_extended_hyperspace = Trials() #TODO Delete # parent.point_hyperspace = Trials() #TODO Delete # print("^^ parent has NOT been replicated") self.oracle.repeat_good( point_extended_hyperspace, # trials fucntion for the current parent len(parent.get_loss()), # number of iterations of the parent fmin_objective, parent.configuration_list[-1] # it is the last hyperpoint of the parent ) # define the number of models to train from this parent in this loop iteration # only the best parent has childrens if idx_parent == 0: numb_training = self.num_config - (self.n_parents - 1) * \ math.floor(self.num_config/self.n_parents) - 1 else: numb_training = math.floor(self.num_config/self.n_parents) - 1 if not(stop) and np.min(self.losses[-self.num_config:]) < - threshold + self.plot[-1] : stop = True if stop: # print("stopped!") # print("saved trainings : " + str(numb_training)) break # computes the new batch for each one of the parents for every iteration # tehy are all going to be sons of this same parent since they have the same Trials func self.oracle.compute_batch( point_extended_hyperspace, numb_training, len(parent.get_loss()), fmin_objective ) else: # print("^^ parent has been replicated") if idx_parent == 0: numb_training = self.num_config - (self.n_parents - 1) * math.floor(self.num_config/self.n_parents) else: numb_training = math.floor(self.num_config/self.n_parents) # replicated parent self.oracle.compute_batch( point_extended_hyperspace, numb_training, len(parent.get_loss()), fmin_objective ) combined_losses = np.concatenate( ( self.losses, [self.parents[i].get_loss()[-1] for i in range(self.n_parents)] ), 0 ) combined_losses = self.losses #sorted_loss = np.sort(combined_losses) #sigma=sorted_loss[self.n_parents] - sorted_loss[1] #best children has probability >99.5% to be taken #all sqrt(n) best children have probability >50% to be taken #noised_loss = [np.random.normal(i,sigma/5,) for i in combined_losses] #print(combined_losses) #print(np.argsort(noised_loss)[:self.n_parents]) #print(np.argsort(combined_losses)[:self.n_parents]) ixs_parents= np.argsort(combined_losses) #ixs_parents = np.argsort(noised_loss) #Amelioration proposed by Naccache: non deterministic selection of children parent_idx = ixs_parents[:self.n_parents] #parent_idx = parent_idxs_choice(ixs_parents, self.n_parents, accept_prob=0.95) temp_parents = [''] * self.n_parents for j, x in enumerate(parent_idx): x = int(x) if x >= self.num_config: print("Do we go there? (replications)") temp_parents[j] = copy.deepcopy( self.parents[x - self.num_config]) temp_parents[j].replication() else: temp_parents[j] = copy.deepcopy(self.parents[math.floor(x/self.num_config * self.n_parents)]) temp_parents[j].update(self.h[x], self.losses[x], self.models[x]) temp_parents[j].point_hyperspace = Trials() #TODO Delete self.parents = temp_parents # updating the best loss for the plot self.plot[current_iter] = combined_losses[parent_idx[0]] print("iter" + str(current_iter)) print("parent_idx" + str(combined_losses[parent_idx[0]])) self.file.flush() # print("^ Exiting Scheduler.loop()")
[ "antoine.scardigli@epfl.ch" ]
antoine.scardigli@epfl.ch
812cfcad42f917b8938602ef5ac9ad34fd9f1a26
85e7eb9cbae5e195a303f737c069b70b20a97015
/人工智慧與機器學習/lab/5-2-loop.py
b4234ef7cb61dce8ea250790e00d21d34e20699a
[]
no_license
Ruby-Dog/III
776473c315c13b166b38115bc84b8cc5440084f6
f9dd5578fc2a0c2130a0591babf045f7ce4f5f6c
refs/heads/master
2022-11-10T16:54:34.190619
2020-07-02T16:32:43
2020-07-02T16:32:43
276,695,108
0
0
null
null
null
null
UTF-8
Python
false
false
372
py
#終極密碼 import random low = 1 high = 100 ans = random.randint(low, high) while True: i = int(input("請輸入[" + str(low) + "-" + str(high) + "範圍中的數]: " )) if i == ans: print("你猜對了") break elif i < ans: print("你猜小了") low = i elif i > ans: print("你猜大了") high = i
[ "rubydog0501@gmail.com" ]
rubydog0501@gmail.com
8cbcc47c34e3d8102a34f1d71d6dece5e48648d6
5d35bcc45ebc56ec33f49ea91a68576f9e933558
/stage/settings.py
b1ef01f61fd78ae6a1b81a9d7a9fb61849c6247d
[]
no_license
Taratra-RD/stageproject
1c2eb052709b6432e72ab042017ae3df7fac5b40
81634eba59c8f2f9f5862f7c97a6436cb56c21b3
refs/heads/main
2023-01-01T14:53:16.856683
2020-10-27T06:01:00
2020-10-27T06:01:00
307,600,247
0
0
null
null
null
null
UTF-8
Python
false
false
3,105
py
""" Django settings for stage project. Generated by 'django-admin startproject' using Django 3.1.2. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'r)ce#&itin@31m7z-6k-(2w#&0qskfp^qtmo005o#-d)5h@5@s' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'appli.apps.AppliConfig', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'stage.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ["appli/templates"], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'stage.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/'
[ "taratra.access4@gmail.com" ]
taratra.access4@gmail.com
e5c3a5017255ad7d4a49726ac7ee6252374c6a5e
1a4b31008e6cde2cc31de2aefad2174717299bf4
/ABC081A.py
204b9f12c1720d11ad07ad5683d12ddccfb6965b
[]
no_license
mayosuke/atcoder
577b060751a830823f300ab1e58ea888672a5dc6
65e747a976cdacc1c2ec669bd33e2f5fdc0faf19
refs/heads/main
2023-05-05T04:34:18.941331
2021-05-31T06:52:54
2021-05-31T06:52:54
344,834,896
0
0
null
null
null
null
UTF-8
Python
false
false
730
py
# https://atcoder.jp/contests/abs/tasks/abc081_a # ABC081A - Placing Marbles / # 実行時間制限: 2 sec / メモリ制限: 256 MB # 配点 : 100 点 # 問題文 # すぬけ君は 1,2,3 の番号がついた # 3 つのマスからなるマス目を持っています。 各マスには 0 か 1 が書かれており、マス i には si が書かれています。 # すぬけ君は 1 が書かれたマスにビー玉を置きます。 ビー玉が置かれるマスがいくつあるか求めてください。 # 制約 # s1,s2,s3 は 1 あるいは 0 # 入力 # 入力は以下の形式で標準入力から与えられる。 # s1s2s3 # 出力 # 答えを出力せよ。 print(len([s for s in input() if s == '1']))
[ "mayo.suke@gmail.com" ]
mayo.suke@gmail.com
72ce4d94ac2d84c72df69cc3963c2d4e59f64193
12216794188a9fc317422f04a2fa3c82e952df00
/buddy_downloader.py
70ef70febfb6ec1d46cdbb8af058d831d8e486c5
[ "MIT" ]
permissive
maciej1x/YouTuBuddy
a31f2b78ea0a51603ad39264d3a97cd64ca932a3
c1d8d5be005886ff4863986f516f6e8c9d1c82de
refs/heads/main
2023-01-31T21:37:12.534093
2020-12-09T10:25:20
2020-12-09T10:25:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,072
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging import os import moviepy.editor as mp from pytube import YouTube class BuddyDownloader: """ Download video and/or audio for given YouTube url Returns ------- None. """ def __init__(self) -> None: self.logger = logging.getLogger('BuddyDownloader') def load_url(self, url: str) -> None: """ Load YouTube URL Parameters ---------- url : str YouTube url. Returns ------- None. """ self.logger.info(f'Getting video for url: {url}') self.yt = YouTube(url) def download_video(self, output_path=None, filename=None) -> str: """ Download video from previously loaded url Parameters ---------- output_path : str, optional path to output videofile. The default is None. filename : str, optional name of videofile. The default is None, which means title of video. Returns ------- downloaded_file_path : str path to videofile. """ self.logger.info('Downloading video...') ys = self.yt.streams.get_highest_resolution() downloaded_file_path = ys.download(output_path=output_path, filename=filename) self.logger.info('Downloading finished') return downloaded_file_path def convert_mp4_to_mp3(self, videofile: str) -> None: """ Convert given videofile (mp4) to mp3 Parameters ---------- videofile : str path to videofile.mp4. Returns ------- None. """ self.logger.info('Converting video') clip = mp.VideoFileClip(videofile) self.logger.info('Saving audio') audiofilename = os.path.basename(videofile).split('.')[0] + '.mp3' clip.audio.write_audiofile( os.path.join(os.path.split(videofile)[0], audiofilename) )
[ "ulaszewski.maciej@gmail.com" ]
ulaszewski.maciej@gmail.com
38537c9f9c94431cfa3e7134396ddf062c9de2bb
db4f9b417e5b91d8f83fedddc959201c55db037d
/AddHeightWeightToPlayerDirectory.py
6fd87052275e113a4c5dfbfb99aff6bc41e874b1
[]
no_license
sbattula2/dfsNeuralNet
7e1e7ec666bf0b3fc915b8049880465869c97a37
f2e8fedebcb51609feafdccab766260657b71c59
refs/heads/main
2023-01-22T09:40:40.455702
2020-11-23T18:22:34
2020-11-23T18:22:34
314,943,077
1
0
null
null
null
null
UTF-8
Python
false
false
767
py
# -*- coding: utf-8 -*- """ Created on Thu Sep 5 11:30:29 2019 @author: nbatt """ import pandas as pd import numpy as np import os df = pd.read_csv('PlayerDirectory.csv') os.chdir('C:/NBA DFS/Anthropomorphic') files = os.listdir('C:/NBA DFS/Anthropomorphic') #df.set_index('Player',inplace=True) df['Height'] = [1]*df.shape[0] df['Weight'] = [1]*df.shape[0] for j in files: rosters = pd.read_csv(j) #rosters = rosters.set_index('Player') for i in rosters['Player']: df.loc[df['Player']==i,'Height'] = float(rosters.loc[rosters['Player']==i,'Ht']) df.loc[df['Player']==i,'Weight'] = float(rosters.loc[rosters['Player']==i,'Wt']) df = df.loc[df['Tm']!='TOT'] os.chdir('C:/NBA DFS') df.to_csv('PlayerDirectory.csv',index=False)
[ "nbattula2@gmail.com" ]
nbattula2@gmail.com
2e5bd2963984d065453e3cd16b49c9d75b40a786
422e3f7534b7a9007ec02b82e28034583780b9d5
/argtyped/arguments.py
b71f69b22642f7e7dff00af50f2a69258c8b7af4
[ "MIT" ]
permissive
guhur/argtyped
7128cf1ae54f2c69ee05ebb9d5ca267a3ccfa6af
39ce53d5d05004ea3a622c0df9a1499f8a2abbf5
refs/heads/master
2023-01-30T09:54:29.637928
2020-06-16T01:21:51
2020-06-16T01:27:12
319,275,849
0
0
MIT
2020-12-07T09:59:01
2020-12-07T09:59:00
null
UTF-8
Python
false
false
11,317
py
import argparse import functools import sys from collections import OrderedDict from typing import Any, Callable, Dict, List, Optional, TypeVar from .custom_types import Switch, is_choices, is_enum, is_optional, unwrap_optional, unwrap_choices __all__ = [ "Arguments", ] T = TypeVar('T') ConversionFn = Callable[[str], T] class ArgumentParser(argparse.ArgumentParser): r"""A class to override some of ``ArgumentParser``\ 's behaviors. """ def _get_value(self, action, arg_string): r"""The original ``_get_value`` method catches exceptions in user-defined ``type_func``\ s and ignores the error message. Here we don't do that. """ type_func = self._registry_get('type', action.type, action.type) try: result = type_func(arg_string) except (argparse.ArgumentTypeError, TypeError, ValueError) as e: message = f"value '{arg_string}', {e.__class__.__name__}: {str(e)}" raise argparse.ArgumentError(action, message) return result def error(self, message): r"""The original ``error`` method only prints the usage and force quits. Here we print the full help. """ self.print_help(sys.stderr) sys.stderr.write(f"{self.prog}: error: {message}\n") self.exit(2) def add_switch_argument(self, name: str, default: bool = False) -> None: r"""Add a "switch" argument to the parser. A switch argument with name ``"flag"`` has value ``True`` if the argument ``--flag`` exists, and ``False`` if ``--no-flag`` exists. """ assert name.startswith("--") name = name[2:] var_name = name.replace('-', '_') self.add_argument(f"--{name}", action="store_true", default=default, dest=var_name) self.add_argument(f"--no-{name}", action="store_false", dest=var_name) def _bool_conversion_fn(s: str) -> bool: if s.lower() in ["y", "yes", "true", "ok"]: return True if s.lower() in ["n", "no", "false"]: return False raise ValueError(f"Invalid value '{s}' for bool argument") def _optional_wrapper_fn(fn: ConversionFn[T]) -> ConversionFn[Optional[T]]: @functools.wraps(fn) def wrapped(s: str) -> Optional[T]: if s.lower() == 'none': return None return fn(s) return wrapped _TYPE_CONVERSION_FN: Dict[type, ConversionFn[Any]] = { bool: _bool_conversion_fn, } class Arguments: r"""A typed version of ``argparse``. It's easier to illustrate using an example: .. code-block:: python from typing import Optional from argtyped import Arguments, Choices, Switch from argtyped import Enum, auto class LoggingLevels(Enum): Debug = auto() Info = auto() Warning = auto() Error = auto() Critical = auto() class MyArguments(Arguments): model_name: str hidden_size: int = 512 activation: Choices['relu', 'tanh', 'sigmoid'] = 'relu' logging_level: LoggingLevels = LoggingLevels.Info use_dropout: Switch = True dropout_prob: Optional[float] = 0.5 args = Arguments() This is equivalent to the following code with Python built-in ``argparse``: .. code-block:: python import argparse parser = argparse.ArgumentParser() parser.add_argument("--model-name", type=str, required=True) parser.add_argument("--hidden-size", type=int, default=512) parser.add_argument("--activation", choices=["relu", "tanh", "sigmoid"], default="relu") parser.add_argument("--logging-level", choices=ghcc.logging.get_levels(), default="info") parser.add_argument("--use-dropout", action="store_true", dest="use_dropout", default=True) parser.add_argument("--no-use-dropout", action="store_false", dest="use_dropout") parser.add_argument("--dropout-prob", type=lambda s: None if s.lower() == 'none' else float(s), default=0.5) args = parser.parse_args() Suppose the following arguments are provided: .. code-block:: bash python main.py \ --model-name LSTM \ --activation sigmoid \ --logging-level debug \ --no-use-dropout \ --dropout-prob none the parsed arguments will be: .. code-block:: bash Namespace(model_name="LSTM", hidden_size=512, activation="sigmoid", logging_level="debug", use_dropout=False, dropout_prob=None) :class:`Arguments` provides the following features: - More concise and intuitive syntax over ``argparse``, less boilerplate code. - Arguments take the form of type-annotated class attributes, allowing IDEs to provide autocompletion. - Drop-in replacement for ``argparse``, since internally ``argparse`` is used. **Note:** Advanced features such as subparsers, groups, argument lists, custom actions are not supported. """ _annotations: 'OrderedDict[str, type]' def __init__(self, args: Optional[List[str]] = None): annotations: 'OrderedDict[str, type]' = OrderedDict() for base in reversed(self.__class__.mro()): # Use reversed order so derived classes can override base annotations. if base not in [object, Arguments]: annotations.update(base.__dict__.get('__annotations__', {})) # Check if there are arguments with default values but without annotations. for key in dir(self): value = getattr(self, key) if not key.startswith("__") and not callable(value): if key not in annotations: raise ValueError(f"Argument '{key}' does not have type annotation") parser = ArgumentParser() for arg_name, arg_typ in annotations.items(): # Check validity of name and type. has_default = hasattr(self.__class__, arg_name) default_val = getattr(self.__class__, arg_name, None) nullable = is_optional(arg_typ) if nullable: # extract the type wrapped inside `Optional` arg_typ = unwrap_optional(arg_typ) required = False if nullable and not has_default: has_default = True default_val = None elif not nullable and not has_default: required = True if not nullable and has_default and default_val is None: raise ValueError(f"Argument '{arg_name}' has default value of None, but is not nullable") parser_arg_name = "--" + arg_name.replace("_", "-") parser_kwargs: Dict[str, Any] = { "required": required, } if arg_typ is Switch: # type: ignore[misc] if not isinstance(default_val, bool): raise ValueError(f"Switch argument '{arg_name}' must have a default value of type bool") parser.add_switch_argument(parser_arg_name, default_val) elif is_choices(arg_typ) or is_enum(arg_typ): if is_enum(arg_typ): choices = list(arg_typ) # type: ignore[call-overload] parser_kwargs["type"] = arg_typ else: choices = unwrap_choices(arg_typ) if any(not isinstance(choice, str) for choice in choices): raise ValueError("All choices must be strings") parser_kwargs["choices"] = choices if has_default: if default_val not in choices: raise ValueError(f"Invalid default value for argument '{arg_name}'") parser_kwargs["default"] = default_val parser.add_argument(parser_arg_name, **parser_kwargs) else: if arg_typ not in _TYPE_CONVERSION_FN and not callable(arg_typ): raise ValueError(f"Invalid type '{arg_typ}' for argument '{arg_name}'") conversion_fn = _TYPE_CONVERSION_FN.get(arg_typ, arg_typ) if nullable: conversion_fn = _optional_wrapper_fn(conversion_fn) parser_kwargs["type"] = conversion_fn if has_default: parser_kwargs["default"] = default_val parser.add_argument(parser_arg_name, **parser_kwargs) if self.__class__.__module__ != "__main__": # Usually arguments are defined in the same script that is directly run (__main__). # If this is not the case, add a note in help message indicating where the arguments are defined. parser.epilog = f"Note: Arguments defined in {self.__class__.__module__}.{self.__class__.__name__}" namespace = parser.parse_args(args) self._annotations = annotations for arg_name, arg_typ in annotations.items(): setattr(self, arg_name, getattr(namespace, arg_name)) def to_dict(self) -> 'OrderedDict[str, Any]': r"""Convert the set of arguments to a dictionary. :return: An ``OrderedDict`` mapping argument names to values. """ return OrderedDict([(key, getattr(self, key)) for key in self._annotations.keys()]) def to_string(self, width: Optional[int] = None, max_width: Optional[int] = None) -> str: r"""Represent the arguments as a table. :param width: Width of the printed table. Defaults to ``None``, which fits the table to its contents. An exception is raised when the table cannot be drawn with the given width. :param max_width: Maximum width of the printed table. Defaults to ``None``, meaning no limits. Must be ``None`` if :arg:`width` is not ``None``. """ if width is not None and max_width is not None: raise ValueError("`max_width` must be None when `width` is specified") k_col = "Arguments" v_col = "Values" valid_keys = list(self._annotations.keys()) valid_vals = [repr(getattr(self, k)) for k in valid_keys] max_key = max(len(k_col), max(len(k) for k in valid_keys)) max_val = max(len(v_col), max(len(v) for v in valid_vals)) margin_col = 7 # table frame & spaces if width is not None: max_val = width - max_key - margin_col elif max_width is not None: max_val = min(max_val, max_width - max_key - margin_col) if max_val < len(v_col): raise ValueError("Table cannot be drawn under the width constraints") def get_row(k: str, v: str) -> str: if len(v) > max_val: v = v[:((max_val - 5) // 2)] + ' ... ' + v[-((max_val - 4) // 2):] assert len(v) == max_val return f"║ {k.ljust(max_key)} │ {v.ljust(max_val)} ║\n" s = repr(self.__class__) + '\n' s += f"╔═{'═' * max_key}═╤═{'═' * max_val}═╗\n" s += get_row(k_col, v_col) s += f"╠═{'═' * max_key}═╪═{'═' * max_val}═╣\n" for k, v in zip(valid_keys, valid_vals): s += get_row(k, v) s += f"╚═{'═' * max_key}═╧═{'═' * max_val}═╝\n" return s
[ "huzecong@gmail.com" ]
huzecong@gmail.com
b51e7f25b8fbc0a62e9ba18826007922377d9475
f82236cb5a388846ea8c8980ac21d0e11f20171d
/LeetCode/53_最大子序和.py
7c9ca43e86b406922190bad2717d6d9d4269839c
[]
no_license
cp4011/Algorithms
f2889c219d68b597a38d86899d8ff682e386e649
e7214e59640cd24d908a6b95d8876c9db9822d8b
refs/heads/master
2020-04-27T20:21:43.937234
2019-08-18T15:12:09
2019-08-18T15:12:09
174,654,563
3
0
null
null
null
null
UTF-8
Python
false
false
907
py
"""给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。 示例: 输入: [-2,1,-3,4,-1,2,1,-5,4], 输出: 6 解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。 进阶: 如果你已经实现复杂度为 O(n) 的解法,尝试使用更为精妙的分治法求解。 """ class Solution: def maxSubArray(self, nums): """ :type nums: List[int] :rtype: int """ if len(nums) == 1: return nums[0] result_sum = [0 for i in range(len(nums))] result_sum[0] = nums[0] for i in range(1, len(nums)): if result_sum[i - 1] < 0: result_sum[i] = nums[i] else: result_sum[i] = result_sum[i - 1] + nums[i] return max(result_sum) print(list(map(str, [1, 2]))) print(' '.join(["1", "2"]))
[ "957628963@qq.com" ]
957628963@qq.com
3449edc16f2114c5800785635a25a94a9aafedbf
10e34c6c9bb904039677e5221b1cbb0fd604b66b
/lib/__init__.py
c92d8e2e4919c315e0e3bc7a9a2f165bcae2ea21
[]
no_license
jQwotos/webnouncements
4a748f2871a04546ebc4e7754548181bf8eef79c
e8448711fd95bd9fe37605e7412c84f543d505f1
refs/heads/master
2021-01-19T21:06:37.612705
2017-09-23T03:52:57
2017-09-23T03:52:57
80,591,695
0
6
null
2018-10-26T15:06:22
2017-02-01T05:13:02
JavaScript
UTF-8
Python
false
false
18
py
# Blank init file
[ "le.kent.jason@gmail.com" ]
le.kent.jason@gmail.com
158d75b848be7a554a3a613f64f6ee41ace7d699
6dbf099660ee82b72fb2526a3dc242d99c5fb8c8
/nuitka/tree/ReformulationDictionaryCreation.py
982733f1dbd98179389b9f897633514ab7923746
[ "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
Hellebore/Nuitka
3544af691bc352769858ec1d44b6e9de46087bcf
252d8e5d24521f8fff38142aa66c6b9063151f57
refs/heads/develop
2021-01-06T15:33:49.111250
2020-02-18T14:24:49
2020-02-18T14:24:49
241,380,473
0
0
Apache-2.0
2020-07-11T17:52:04
2020-02-18T14:21:01
Python
UTF-8
Python
false
false
11,198
py
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Reformulation of dictionary creations. Dictionary creations might be directly translated to constants, or they might become nodes that build dictionaries. For Python3.5, unpacking can happen while creating dictionaries, these are being re-formulated to an internal function. Consult the developer manual for information. TODO: Add ability to sync source code comments with developer manual sections. """ from nuitka.nodes.AssignNodes import ( StatementAssignmentVariable, StatementDelVariable, StatementReleaseVariable, ) from nuitka.nodes.AttributeNodes import ExpressionAttributeLookup from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1 from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1 from nuitka.nodes.ConstantRefNodes import makeConstantRefNode from nuitka.nodes.ContainerMakingNodes import ExpressionMakeTuple from nuitka.nodes.DictionaryNodes import ( ExpressionKeyValuePair, ExpressionMakeDict, StatementDictOperationUpdate, ) from nuitka.nodes.ExceptionNodes import ( ExpressionBuiltinMakeException, StatementRaiseException, ) from nuitka.nodes.FunctionNodes import ( ExpressionFunctionCall, ExpressionFunctionCreation, ExpressionFunctionRef, ) from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak from nuitka.nodes.OperatorNodes import makeBinaryOperationNode from nuitka.nodes.ReturnNodes import StatementReturn from nuitka.nodes.TypeNodes import ExpressionBuiltinType1 from nuitka.nodes.VariableRefNodes import ( ExpressionTempVariableRef, ExpressionVariableRef, ) from nuitka.PythonVersions import python_version from nuitka.specs.ParameterSpecs import ParameterSpec from .InternalModule import ( internal_source_ref, makeInternalHelperFunctionBody, once_decorator, ) from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode from .ReformulationTryFinallyStatements import makeTryFinallyStatement from .TreeHelpers import ( buildNode, buildNodeList, makeDictCreationOrConstant, makeStatementsSequenceFromStatement, makeStatementsSequenceFromStatements, ) def buildDictionaryNode(provider, node, source_ref): if python_version >= 350: for key in node.keys: if key is None: return buildDictionaryUnpacking( provider=provider, node=node, source_ref=source_ref ) return makeDictCreationOrConstant( keys=buildNodeList(provider, node.keys, source_ref), values=buildNodeList(provider, node.values, source_ref), source_ref=source_ref, ) @once_decorator def getDictUnpackingHelper(): helper_name = "_unpack_dict" result = makeInternalHelperFunctionBody( name=helper_name, parameters=ParameterSpec( ps_name=helper_name, ps_normal_args=(), ps_list_star_arg="args", ps_dict_star_arg=None, ps_default_count=0, ps_kw_only_args=(), ps_pos_only_args=(), ), ) temp_scope = None tmp_result_variable = result.allocateTempVariable(temp_scope, "dict") tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter") tmp_item_variable = result.allocateTempVariable(temp_scope, "keys") loop_body = makeStatementsSequenceFromStatements( makeTryExceptSingleHandlerNode( tried=StatementAssignmentVariable( variable=tmp_item_variable, source=ExpressionBuiltinNext1( value=ExpressionTempVariableRef( variable=tmp_iter_variable, source_ref=internal_source_ref ), source_ref=internal_source_ref, ), source_ref=internal_source_ref, ), exception_name="StopIteration", handler_body=StatementLoopBreak(source_ref=internal_source_ref), source_ref=internal_source_ref, ), makeTryExceptSingleHandlerNode( tried=StatementDictOperationUpdate( dict_arg=ExpressionTempVariableRef( variable=tmp_result_variable, source_ref=internal_source_ref ), value=ExpressionTempVariableRef( variable=tmp_item_variable, source_ref=internal_source_ref ), source_ref=internal_source_ref, ), exception_name="AttributeError", handler_body=StatementRaiseException( exception_type=ExpressionBuiltinMakeException( exception_name="TypeError", args=( makeBinaryOperationNode( operator="Mod", left=makeConstantRefNode( constant="""\ '%s' object is not a mapping""", source_ref=internal_source_ref, user_provided=True, ), right=ExpressionMakeTuple( elements=( ExpressionAttributeLookup( source=ExpressionBuiltinType1( value=ExpressionTempVariableRef( variable=tmp_item_variable, source_ref=internal_source_ref, ), source_ref=internal_source_ref, ), attribute_name="__name__", source_ref=internal_source_ref, ), ), source_ref=internal_source_ref, ), source_ref=internal_source_ref, ), ), source_ref=internal_source_ref, ), exception_value=None, exception_trace=None, exception_cause=None, source_ref=internal_source_ref, ), source_ref=internal_source_ref, ), ) args_variable = result.getVariableForAssignment(variable_name="args") final = ( StatementReleaseVariable( variable=tmp_result_variable, source_ref=internal_source_ref ), StatementReleaseVariable( variable=tmp_iter_variable, source_ref=internal_source_ref ), StatementReleaseVariable( variable=tmp_item_variable, source_ref=internal_source_ref ), # We get handed our args responsibility. StatementDelVariable( variable=args_variable, tolerant=False, source_ref=internal_source_ref ), ) tried = makeStatementsSequenceFromStatements( StatementAssignmentVariable( variable=tmp_iter_variable, source=ExpressionBuiltinIter1( value=ExpressionVariableRef( variable=args_variable, source_ref=internal_source_ref ), source_ref=internal_source_ref, ), source_ref=internal_source_ref, ), StatementAssignmentVariable( variable=tmp_result_variable, source=makeConstantRefNode(constant={}, source_ref=internal_source_ref), source_ref=internal_source_ref, ), StatementLoop(body=loop_body, source_ref=internal_source_ref), StatementReturn( expression=ExpressionTempVariableRef( variable=tmp_result_variable, source_ref=internal_source_ref ), source_ref=internal_source_ref, ), ) result.setBody( makeStatementsSequenceFromStatement( makeTryFinallyStatement( provider=result, tried=tried, final=final, source_ref=internal_source_ref, ) ) ) return result def buildDictionaryUnpackingArgs(provider, keys, values, source_ref): result = [] for key, value in zip(keys, values): # TODO: We could be a lot cleverer about the dictionaries for non-starred # arguments, but lets get this to work first. if key is None: result.append(buildNode(provider, value, source_ref)) elif type(key) is str: result.append( ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=makeConstantRefNode( constant=key, source_ref=source_ref ), value=buildNode(provider, value, source_ref), source_ref=source_ref, ), ), source_ref=source_ref, ) ) else: result.append( ExpressionMakeDict( pairs=( ExpressionKeyValuePair( key=buildNode(provider, key, source_ref), value=buildNode(provider, value, source_ref), source_ref=source_ref, ), ), source_ref=source_ref, ) ) return result def buildDictionaryUnpacking(provider, node, source_ref): helper_args = buildDictionaryUnpackingArgs( provider, node.keys, node.values, source_ref ) result = ExpressionFunctionCall( function=ExpressionFunctionCreation( function_ref=ExpressionFunctionRef( function_body=getDictUnpackingHelper(), source_ref=source_ref ), defaults=(), kw_defaults=None, annotations=None, source_ref=source_ref, ), values=(ExpressionMakeTuple(helper_args, source_ref),), source_ref=source_ref, ) result.setCompatibleSourceReference(helper_args[-1].getCompatibleSourceReference()) return result
[ "kay.hayen@gmail.com" ]
kay.hayen@gmail.com
90156dab1baf2565c6bd64e1d84f670ace47b874
c38d3f4bd2b275475430c2a0d64f4de3c3bbe0fb
/python-server/testAnalystics.py
72f2cb8eccdfdcb281c07ce3241b521df1f397f2
[]
no_license
yassinekhaldi00/stage-2019-SpeechRecognition
d27df7bba1a7eb027dc39b6efbf3877c0847b317
741fc175191616a363738d012f5af9312450cbfb
refs/heads/master
2020-06-27T02:07:39.704214
2019-08-19T10:27:43
2019-08-19T10:27:43
199,817,138
0
1
null
null
null
null
UTF-8
Python
false
false
1,509
py
import nltk import matplotlib.pyplot as plt import arabic_reshaper from bidi.algorithm import get_display import re import json from flask import * import uuid class analyse(): def __init__(self,text): self.text = text self.actions = json.load(open('actions.json','r',encoding='utf-8')) return super().__init__() def onAction(self,n): action = self.actions["modules"][n] valid = True if not action["valid"]: for key in action["properties"]: prop = re.search(action["properties"][key]["rex"], self.text) if prop : action["properties"][key]["value"] = prop.group() else: valid = False action["valid"] = valid return action def takeDecision(self): for i in range(0,len(self.actions["modules"])): for j in self.actions["modules"][i]["keyWords"]: if j in self.text: return self.onAction(i) return "command non valid" if __name__ == "__main__": app = Flask(__name__) @app.route('/takeDecision', methods=['post']) def takeDecision(): text = request.args.get('text') print(text) analys = analyse(text) print(analys.takeDecision()) return Response(response=json.dumps(analys.takeDecision()),status = 200,mimetype="application/json") app.run(host='0.0.0.0',port='5000')
[ "yassinekhaldi00@gmail.com" ]
yassinekhaldi00@gmail.com
8709d793445d4fd7c5c435c8fe401b825d7f2fc8
177b9b7c6aeba680d3a59155aa9b659c801edb10
/blog01.py
90d3ef8ff4d9f03b9934996abf9a5092e44a02d3
[]
no_license
janaprasanna/Flask-tutorial
2b414c115058786b24dc7deb97434f35fcf6d4c2
d65d587d3115d4f03c8eae03ea1a3f4b5064bcc5
refs/heads/main
2023-06-18T20:04:53.037595
2021-07-21T09:58:15
2021-07-21T09:58:15
384,620,115
1
0
null
null
null
null
UTF-8
Python
false
false
618
py
from flask import Flask,render_template,url_for app=Flask(__name__) posts = [ { 'author':'jana', 'title':'Blog post 1', 'content':'My first blog post', 'date':'20th june 2021' }, { 'author': 'prasanna', 'title': 'Blog post 2', 'content': 'My second blog post', 'date': '19th june 2021' } ] @app.route("/") @app.route("/home") def home(): return render_template('home.html',posts=posts) @app.route("/about") def about(): return render_template('about.html',title='about') if __name__ == '__main__': app.run(debug=True)
[ "emailtojana27@gmail.com" ]
emailtojana27@gmail.com
247c2669458198436559ecad64bb3fc6620144d1
af669dbef653dd69474f4c0836582bf14262c80f
/price-test/frame/lib/controllib/pickcase.py
33dfd072cc050da25cfde06bc968b3d3f02c305e
[]
no_license
siki320/fishtest
7a3f91639d8d4cee624adc1d4d05563611b435e9
7c3f024192e1c48214b53bc45105bdf9e746a013
refs/heads/master
2021-01-19T21:58:36.807126
2017-04-19T09:56:37
2017-04-19T09:56:37
88,729,049
0
0
null
null
null
null
GB18030
Python
false
false
10,819
py
# -*- coding: GB18030 -*- ''' Created on Nov 9, 2011 @author: caiyifeng<caiyifeng> @summary: 加载case的基类 ''' import os import re import inspect import imp import traceback import sys from new import instancemethod from frame.lib.controllib.case import DtestCase,CasePriority from frame.lib.commonlib.dlog import dlog from frame.lib.commonlib.relpath import get_relpath from frame.lib.controllib.result import CaseResult from frame.lib.commonlib.utils import get_py_owner class CasePicker(object): def __init__(self): self.ignore_list = [] # 过滤列表 self.tags = [] # tags列表,每个元素还是一个列表; 子列表内部,或关系;子列表之间,与关系 self.testname = "." # testname正则 self.prior = CasePriority.DEFAULT self.author = "" def read_ignore_list(self, filepath): f = open(filepath) for line in f: line = line.rstrip() if line != '': abs_path = os.path.abspath(line) # line既可以是绝对路径,也可以是相对于启动目录的相对路径 self.ignore_list.append(abs_path) f.close() def parse_tag(self, tag_str): '''@summary: 解析tag选项''' list_ = tag_str.split(",") self.tags.append([s.strip() for s in list_]) def set_testname(self, testname): self.testname = testname def set_priority(self, str_priority): self.prior = CasePriority.str2value(str_priority) def set_author(self, author): self.author = author def pickcases(self, args, suite, result): ''' @summary: 将args中的DtestCase加入suite,并且递归处理子目录 @param suite: DtestSuite对象 @param result: DResult对象,用来记录pick过程中的结果 ''' for arg in args: spec_tests = [] if arg.find(":")>=0: #通过字符串指定具体的testFunction,以,分隔 arg,spec_test = arg.split(":") spec_tests = spec_test.split(",") arg = os.path.abspath(arg) relarg = self._get_relpath(arg) base = os.path.basename(arg) modulename, ext = os.path.splitext(base) if os.path.isfile(arg) and ext == ".py" and modulename != "__init__": # 是python文件,且不是__init__.py # 过滤case ignored = False for i in self.ignore_list: if arg.startswith(i): # case路径的前缀符合过滤行 ignored = True break if ignored: dlog.debug("'%s' starts with ignore line. Ignore it", relarg) result.case_result_dict[relarg] = CaseResult(result.SKIP, "Ignored") continue #只加载指定author的case if self.author != "": if get_py_owner(arg) != self.author: continue # 导入该模块 try: amod, modulename = self._myimport(arg, modulename) except Exception: # 不可导入的python模块,跳过 dlog.error("Can't import module : %s. Skip it", relarg, exc_info=True) result.case_result_dict[relarg] = CaseResult(result.FAILED, "Syntax_Error", exc_stack=traceback.format_exc()) continue # 获取模块中的测试类 test_cls_flag = False for item in amod.__dict__.keys(): #确认是class & 确认是DtestCase子类 & 确认属于该模块 if inspect.isclass(amod.__dict__[item]) and \ issubclass(amod.__dict__[item], DtestCase) and \ amod.__dict__[item].__dict__['__module__'] == modulename: modclass = amod.__dict__[item] test_cls_flag = True break #指定模块文件中没有获取到测试类 if not test_cls_flag: dlog.warning("No test class found in '%s'. Ignore this file", modulename + '.py') result.case_result_dict[relarg] = CaseResult(result.SKIP, "No_Case") continue # 实例化Case类 try: acase = modclass() acase.set_filepath(arg) acase.set_desc(amod.__doc__) except Exception: # 实例化出错 dlog.error("Can't instance class : %s. Skip it", relarg, exc_info=True) result.case_result_dict[relarg] = CaseResult(result.FAILED, "Syntax_Error", exc_stack=traceback.format_exc()) continue if acase.priority&self.prior == 0: dlog.debug("'%s' is not picked becuase of proirioty", relarg) continue # 过滤不是enable的case if not acase.enable: dlog.debug("'%s' is not enable. Ignore it", relarg) result.case_result_dict[relarg] = CaseResult(result.SKIP, "Disable") continue # 过滤不符合tag的case if not self._check_meet_tag(acase.tags): dlog.debug("'%s' doesn't meet tags. Ignore it", relarg) result.case_result_dict[relarg] = CaseResult(result.SKIP, "Tag_Filter") continue # 得到case的所有testXXX方法 tests = [] prior_que = acase.tests_bvt+acase.tests_high+acase.tests_low for propname in dir(acase): if propname.startswith("test"): # 以test开头的属性 prop = getattr(acase, propname) if not isinstance(prop, instancemethod): # prop不是实例方法 continue if not prior_que: #队列没指定任何case,和以前的方式一样处理 tests.append(prop) continue if propname not in prior_que: # 某个test没有被收录到任何队列,则加入slow默认队列 acase.tests_low.append(propname) if propname in acase.tests_bvt: if self.prior & CasePriority.BVT: tests.append(prop) continue if propname in acase.tests_high: if self.prior & CasePriority.HIGH: tests.append(prop) continue if propname in acase.tests_low: if self.prior & CasePriority.LOW: tests.append(prop) continue #没有设置优先级队列的case是否含有testXXX方法 if not prior_que and not tests: dlog.error("Class '%s' has no testXXX method. Skip it", relarg) result.case_result_dict[relarg] = CaseResult(result.FAILED, "No_Test_Method") continue # 根据testname选择tests f_tests = [t for t in tests if re.search(self.testname, t.__name__)] if len(spec_tests) > 0: #指定test列表,只运行指定列表 f_tests=[] for testname in spec_tests: if not propname.startswith("test"): dlog.error("testname '%s' is not start with test",testname) continue prop = getattr(acase, testname) if not isinstance(prop, instancemethod): dlog.error("testname '%s' not a instancemethod",testname) continue f_tests.append(prop) if not f_tests: dlog.debug("Class '%s' has no matching testXXX method. Skip it", relarg) result.case_result_dict[relarg] = CaseResult(result.SKIP, "Testname_Filter") continue # 将case加入suite suite.addcase(acase) # 将case对应的test方法加入suite suite.add_test_dict(acase, f_tests) elif os.path.isdir(arg): # 是子目录,递归处理 subargs = os.listdir(arg) subargs.sort() # 确保case的执行顺序固定 self.pickcases([arg+'/'+subarg for subarg in subargs], suite, result) elif not os.path.exists(arg): # 文件不存在 dlog.error("Case doesn't exist : %s. Skip it", relarg) result.case_result_dict[relarg] = CaseResult(result.FAILED, "Not_Exist") def _get_relpath(self, abspath): return get_relpath(abspath) def _myimport(self, abspath, modulename): ''' @summary: 导入case模块 @param abspath: case绝对路径 @param modulename: 模块名 ''' fp, pathname, description = imp.find_module(modulename, [os.path.dirname(abspath)]) i = 0 while sys.modules.has_key(modulename): i += 1 modulename += '_%d' % i m = imp.load_module(modulename, fp, pathname, description) fp.close() return m, modulename def _check_meet_tag(self, case_tags): '''@summary: 检查case_tags是否符合tags''' # 没有指定tags,运行所有case if not self.tags: return True for tag in self.tags: # 检查tag和case_tags有交集 tag_set = set(tag) case_tags_set = set(case_tags) if not (tag_set & case_tags_set): # 没有交集,不满足要求 return False else: return True
[ "lisiqi_i@didichuxing.com" ]
lisiqi_i@didichuxing.com
18783951fb6c722cfc6b04ead8cb84df13c3d573
013debf76eb83e6a455214912f83e19287245f83
/MySv/OneWeb/models.py
8bde53c2ef1a48a2c2c5b65ba2f4386b9223a708
[]
no_license
JainPin/WorkWeb
34f73ab261a2de4018b4c5b5c161bc76dff5b22e
4949395ff26fb07b2721d4d5cc42202e0541423d
refs/heads/master
2020-03-21T01:03:10.728980
2018-06-19T18:27:28
2018-06-19T18:27:28
137,921,881
0
0
null
null
null
null
UTF-8
Python
false
false
531
py
from django.db import models # Create your models here. class OneWeb(models.Model): title = models.CharField(max_length=100) content = models.TextField(blank=True) photo = models.URLField(blank=True) location = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add = True) def __str__(self): return self.title class Note(models.Model): title = models.CharField(max_length=100) content = models.TextField(blank=True) def __str__(self): return self.title
[ "as47288679@gmail.com" ]
as47288679@gmail.com
812dac2b93d0efd7b1fc7e1eb07da70e8fc308e7
a01c1f6a1735edbad2b105107151d158883320dd
/todoapp/tests.py
12f883c868379c25fc22bed78010fa9fff749caa
[]
no_license
nvnvenki/Todo
486a68215f689984d01ab40f3ac485acc16ba993
8fd3ec31414d88007a4fd0adeda27506c75daee1
refs/heads/master
2020-03-30T13:35:48.381339
2014-01-27T07:29:11
2014-01-27T07:29:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,140
py
from django.test.client import Client import json from django.test.testcases import TestCase # Create your tests here. class TestTodo(TestCase): def setUp(self): self.client = Client() def test_get_todos_list(self): response = self.client.get("http://127.0.0.1:8000/todos") self.assertEqual(response.status_code, 200) def test_post_todos_list(self): response_post = self.client.post("http://127.0.0.1:8000/todos", json.dumps({'task_name':'task', 'time':'1:00'}),"text/json") response_get = self.client.get("http://127.0.0.1:8000/todos") print response_get self.assertEqual(response_get.status_code, 200) def test_delete_todo(self): response_post1 = self.client.post("http://127.0.0.1:8000/todos", json.dumps({'task_name':'task1', 'time':'1:00'}),"text/json") # response_delete = self.client.delete("http://127.0.0.1:8000/todos", json.dumps({'task_name':'task'}),"text/json") response_delete = self.client.delete("http://127.0.0.1:8000/todos", json.dumps({'task_name':'task1'}),"text/json") print response_delete
[ "nvnvenki@gmail.com" ]
nvnvenki@gmail.com
dc7cd9af950d48d14e18dea90cf103358effa539
19f3e86df623613a12b8bca7f530eef157c92e86
/github_zen.py
4fd093c54eef7aa561ba81d25e040449767b34f4
[ "MIT" ]
permissive
spaceofmiah/learn_ci_args
919e268de03f016632e98d177e6dd2a14a184efe
2fababa88b1cc327d40add0bb455cdfa08abc16a
refs/heads/main
2023-04-20T12:28:47.162490
2021-05-14T11:59:13
2021-05-14T11:59:13
366,903,488
2
0
null
null
null
null
UTF-8
Python
false
false
2,089
py
""" Simplified approach describing how command line argument are implement in python with argparse library. =================== python version: 3.8 code lesson url: Project Description: Project interacts with github's API and retrieve a provided number of github zen. It can also save the zen's retrieved in a file if stated. =================== """ import argparse import requests import time from datetime import datetime # Instantiate ArgumentParser and provide it with program name & # description. To be shown when --help is called on program module parser = argparse.ArgumentParser( prog="GitZen", description="Zen of Git" ) parser.add_argument( "-n", "--num", type=int, default=1, choices=[ value for value in range(1, 6) ], help= "Defines the number of zen to retrieve. \ Max of 5 and Min of 1. Defaults to 1 if flag not used in invocation" ) parser.add_argument( "out", type=str, choices=[ "log", "txt", ], help = "Defines where zen would be rendered. (required)" ) # Retrieve all values supplied to arguments on program # invocation args = parser.parse_args() zens_to_retrieve = args.num output = args.out # Create a different file name on every run with datetime lib. & # replace all spaces on datetime by underscores and colons # by hyphens. This is so file name can meet supported naming # format. date_time_list = datetime.now().strftime("%c").split(" ") time_list = "_".join(date_time_list).split(":") file_name = "-".join(time_list) file_name_and_extension = f"{file_name}.{output}" # Zen retrieval engine if output != "log": file = open(f"{file_name_and_extension}", "w") while zens_to_retrieve > 0: time.sleep(20) response = requests.get("https://api.github.com/zen") file.write(f"> {response.text}\n") zens_to_retrieve -= 1 file.close() else: while zens_to_retrieve > 0: time.sleep(20) response = requests.get("https://api.github.com/zen") print(f"> {response.text}") zens_to_retrieve -= 1
[ "geek_200@outlook.com" ]
geek_200@outlook.com
d3fa450a89e67993256decec13a1c2d9ab54cb65
d0cb57a09037749c54d45a2177e088ef76f283e3
/gmmclassificationtiedcov.py
338069e4807472ba5f179f573423f8412725aed5
[]
no_license
thebouda/MLProject
b16b71363b7fba6b57480a66fae9a62e14fbf92b
530b67eed1f38df561a5d6356e1b157dc5f916b3
refs/heads/main
2023-05-31T12:14:39.136724
2021-07-06T08:20:57
2021-07-06T08:20:57
363,367,242
0
0
null
null
null
null
UTF-8
Python
false
false
4,747
py
import numpy import scipy.optimize # from GMM_load import load_gmm import matplotlib.pyplot as plt # import sklearn.datasets as skl from gmmclassificationgeneralfunctions import gmmValues, computeClassifications,mcol,computeNewCovar,SplitGMM,logpdf_GAU_ND,logpdf_GMM,Estep # similar to the em algorithm but different constrint for the covarinace def TiedEMalgorithm(X, gmm): # flag is used to exit the if llr1-llr2 < threshold in this case 1e-6 flag = True while(flag): (logdens, S) = logpdf_GMM(X, gmm) LLR1 = numpy.sum(logdens)/X.shape[1] # ------ E-step ---------- posterior = Estep(logdens, S) # ------ M-step ---------- (w, mu, cov) = TiedMstep(X, S, posterior) for g in range(len(gmm)): # Update the model parameters that are in gmm gmm[g] = (w[g], mu[:, g].reshape((mu.shape[0], 1)), cov[g]) # Compute the new log densities and the new sub-class conditional densities (logdens, S) = logpdf_GMM(X, gmm) LLR2 = numpy.sum(logdens)/X.shape[1] if (LLR2-LLR1 < 10**(-6)): flag = False return gmm def TiedMstep(X, S, posterior): psi = 0.01 Zg = numpy.sum(posterior, axis=1) # 3 Fg = numpy.zeros((X.shape[0], S.shape[0])) # 4x3 for g in range(S.shape[0]): tempSum = numpy.zeros(X.shape[0]) for i in range(X.shape[1]): tempSum += posterior[g, i] * X[:, i] Fg[:, g] = tempSum Sg = numpy.zeros((S.shape[0], X.shape[0], X.shape[0])) for g in range(S.shape[0]): tempSum = numpy.zeros((X.shape[0], X.shape[0])) for i in range(X.shape[1]): tempSum += posterior[g, i] * numpy.dot(X[:, i].reshape( (X.shape[0], 1)), X[:, i].reshape((1, X.shape[0]))) Sg[g] = tempSum mu = Fg / Zg prodmu = numpy.zeros((S.shape[0], X.shape[0], X.shape[0])) for g in range(S.shape[0]): prodmu[g] = numpy.dot(mu[:, g].reshape((X.shape[0], 1)), mu[:, g].reshape((1, X.shape[0]))) cov = Sg / Zg.reshape((Zg.size, 1, 1)) - prodmu tsum = numpy.zeros((cov.shape[1], cov.shape[2])) for g in range(S.shape[0]): tsum += Zg[g]*cov[g] for g in range(S.shape[0]): cov[g] = 1/X.shape[1] * tsum U, s, Vh = numpy.linalg.svd(cov[g]) s[s < psi] = psi cov[g] = numpy.dot(U, mcol(s)*U.T) w = Zg/numpy.sum(Zg) return (w, mu, cov) def LBTiedCov(x,gmm,alpha,iterations,minEigen): psi = minEigen gmmDiag = [(gmm[0][0],gmm[0][1],computeNewCovar(gmm[0][2],psi))] for i in range(iterations): gmmDiag = SplitGMM(gmmDiag,alpha) gmmDiag = TiedEMalgorithm(x,gmmDiag) return gmmDiag def CovMatrixTied(gmmDiag,sigma): gmm = [] for i in range(len(gmmDiag)): gmm.append((gmmDiag[i][0],gmmDiag[i][1],sigma)) return gmm def KFoldValidationTiedGMMCovariance(D,L,alpha,minEigen,gmms): K = 8 N = int(D.shape[1]/K) numpy.random.seed(0) indexes = numpy.random.permutation(D.shape[1]) errorRates=[] for i in range(K): idxTest = indexes[i*N:(i+1)*N] if i > 0: idxTrainLeft = indexes[0:i*N] elif (i+1) < K: idxTrainRight = indexes[(i+1)*N:] if i == 0: idxTrain = idxTrainRight elif i == K-1: idxTrain = idxTrainLeft else: idxTrain = numpy.hstack([idxTrainLeft, idxTrainRight]) DTR = D[:, idxTrain] LTR = L[idxTrain] DTE = D[:, idxTest] LTE = L[idxTest] # separate from classes DTR0 =DTR[:,LTR ==0] DTR1 = DTR[:,LTR == 1] # here start the classification # Dtotal=[DTR0,DTR1,DTR2,DTR3,DTR4, DTR5, DTR6, DTR7, DTR8, DTR9, DTR10] Dtotal = [DTR0,DTR1] errors = computeClassifications(gmms,Dtotal,LBTiedCov,minEigen,alpha,DTE,LTE) errorRates.append(errors) print(errorRates) npErrors = numpy.array(errorRates) # emean precision for 1 gmms pre1= npErrors[:,0][:,1].sum()/len(npErrors[:,0][:,1]) # emean precision for 2 gmms pre2= npErrors[:,1][:,1].sum()/len(npErrors[:,0][:,1]) # emean precision for 4 gmms pre4= npErrors[:,2][:,1].sum()/len(npErrors[:,0][:,1]) # emean precision for 8 gmms pre8= npErrors[:,3][:,1].sum()/len(npErrors[:,0][:,1]) # emean precision for 16 gmms pre16= npErrors[:,4][:,1].sum()/len(npErrors[:,0][:,1]) print("error full cov:\n") print("gmm1: "+ str(pre1)) print("gmm2: "+ str(pre2)) print("gmm4: "+ str(pre4)) print("gmm8: "+ str(pre8)) print("gmm16: "+ str(pre16))
[ "s288347@studenti.polito.it" ]
s288347@studenti.polito.it
dc0936eb52f68a8cb9685376aa3a3d2da1e4f0ab
b4eed53837564e453b8e46253cfd4384f2eef874
/spark-swat/scripts/grep_opencl_stats.py
68d0db762d5119dccd3a2a17437215135cce6bc3
[ "BSD-2-Clause" ]
permissive
omario61/Execution-Time-Prediction
38a7eb6938018c25384bc0d08f4ad9004c1400df
39318206ba4b6114ee9fb107f2d825828d3462b2
refs/heads/master
2021-01-22T05:33:42.179632
2017-02-13T23:27:22
2017-02-13T23:27:22
81,673,002
0
0
null
null
null
null
UTF-8
Python
false
false
2,256
py
#!/usr/bin/python import os import sys class OpenCLEventInfo: def __init__(self, tokens): assert tokens[5] == 'total' self.total = int(tokens[3]) timestamp_str = tokens[8] self.timestamp = int(timestamp_str[0:len(timestamp_str) - 1]) self.queued = int(tokens[12]) self.submitted = int(tokens[17]) self.run = int(tokens[22]) PADDING = 30 def pad(s): assert len(s) <= PADDING padding = PADDING - len(s) return s + (' ' * padding) known_labels = ['0: init_write :', '1: init_write :', '2: init_write :', '3: init_write :', '4: free_index-in :', '5: run :', '6: free_index-out :', '7: heap :', '8: out :', '9: out :'] labels_info = {} for lbl in known_labels: labels_info[lbl] = [] if len(sys.argv) != 2: print('usage: grep_opencl_stats.py filename') sys.exit(1) fp = open(sys.argv[1], 'r') for line in fp: tokens = line.split() if len(tokens) < 3: continue lbl = ' '.join(tokens[0:3]) if lbl in labels_info.keys(): info = OpenCLEventInfo(tokens) labels_info[lbl].append(info) print(pad('LABEL') + pad('AVG TOTAL') + pad('AVG QUEUED') + pad('AVG SUBMITTED') + pad('AVG RUN')) nlabels = float(len(known_labels)) sums = [0.0, 0.0, 0.0, 0.0] for lbl in known_labels: infos = labels_info[lbl] ninfos = float(len(infos)) sum_totals = 0.0 sum_queued = 0.0 sum_submitted = 0.0 sum_run = 0.0 for i in infos: sum_totals += i.total sum_queued += i.queued sum_submitted += i.submitted sum_run += i.run sums[0] += sum_totals sums[1] += sum_queued sums[2] += sum_submitted sums[3] += sum_run print(pad(lbl) + pad(str(sum_totals / ninfos)) + pad(str(sum_queued / ninfos)) + pad(str(sum_submitted / ninfos)) + pad(str(sum_run / ninfos))) print(pad('') + pad('==========') + pad('==========') + pad('==========') + pad('==========')) print(pad('') + pad(str(sums[0] / nlabels)) + pad(str(sums[1] / nlabels)) + pad(str(sums[2] / nlabels)) + pad(str(sums[3] / nlabels))) fp.close()
[ "omario61@hotmail.com" ]
omario61@hotmail.com
f1a74b539754bd02d6525b30b90d7b64ce883fb6
37a8d6a938589cb8ccb03db3c3c4582eb3c32ca1
/Exercise_2/DVC_2019_exc2/Code_snippet/selection_not_working/dvc_exc2_skeleton_ver2.py
8c26c6010da200b35d29dab42daf34ded59df22c
[]
no_license
neeraj310/Semster_1_DVC
e1754303a630b87b30491577607a75be1ee4e198
d5bc95c99a4b571e746fd3e147d31d23293576a5
refs/heads/master
2021-01-01T21:34:10.051645
2020-02-09T18:03:58
2020-02-09T18:03:58
239,348,166
0
0
null
null
null
null
UTF-8
Python
false
false
8,995
py
import numpy as np import pandas as pd from bokeh.plotting import figure, show, output_file, save, curdoc from bokeh.models import ColumnDataSource, HoverTool, NumeralTickFormatter from bokeh.models.widgets import Select from bokeh.layouts import column, row, gridplot from datetime import datetime as dt from math import pi, sqrt import os # ================================================================== # Task1: specify necessary drawing components # ================================================================== # starting point: drawing a horizontal bar chart # hint: https://bokeh.pydata.org/en/latest/docs/gallery/bar_intervals.html # read the dato into dataframe by using absolute path as specified in the lecture __file__ = 'ebd_US-AL-101_201801_201801_relMay-2018.txt' my_absolute_dirpath = os.path.abspath(os.path.dirname(__file__)) filename = my_absolute_dirpath+'/' + __file__ df = pd.read_csv(filename, delimiter = '\t') #Change column names by replacing space with underscore df.columns = df.columns.str.replace(' ', '_') # generate Y axix labels as a list of array (['2018-01-01', '2018-01-02', '2018-01-03',....]) by using only the "OBSERVATION DATE" column of data # add two missing dates to make it complete, finally sort the labels ylabel =df[['OBSERVATION_DATE']] ylabel = np.sort(ylabel.OBSERVATION_DATE.unique()) missing_dates = ['2018-01-06','2018-01-16'] ylabel = sorted(np.append(ylabel, missing_dates )) # ======== add control selector for selecting birds based on category ========== # reference: https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/widgets.html # how to add "all" category into the selector # reference: https://stackoverflow.com//questions/50603077/bokeh-select-widget-does-not-update-plot category = [] category.append('All') category.extend(df['CATEGORY'].unique().tolist()) # ===== optionally: ===== # category = [] # category.append('All') # category.extend(df['CATEGORY'].unique().tolist()) # add the selector, options can be: category, name, locality, protocol, etc.... # reference: https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/widgets.html select_category = Select(title="Option:", value="All", options=category) # ================================================================== # Task2: define the datasource construction function # ================================================================== # input: dataframe, either the original one or the updated one after triggering the selector # output: column data source, which will be used for plotting, in our case, will serve as the input for the draw_plot function def datasource_construct(dfnew): # sort dfnew based on "OBSERVATION DATE", then reset the index for sorted dfnew dfnew.sort_values(by=['OBSERVATION_DATE']).reset_index(inplace=True) # scale the "OBSERVATION COUNT" column into reasonalbe number serving as the radius when later draw the circle # recommend to do the scaling for each item by computing "sqrt(item)*2" dfnew['OBSERVATION_COUNT_SQRT'] = (np.sqrt((dfnew['OBSERVATION_COUNT'])) *2) counts = dfnew['OBSERVATION_COUNT_SQRT'].tolist() # set the color corresponding to the observer number --> ==================== observers = sorted(dfnew['NUMBER_OBSERVERS'].tolist()) # extract the column of "NUMBER OBSERVERS" and set it into a list colorset = ['lightseagreen', 'tomato', 'blueviolet', 'gold'] # can change to any other colors you like choices = {1: colorset[0], 2: colorset[1], 3: colorset[2], 13: colorset[3]} # 1,2,3,13 are the unique values of number observers colors = [choices.get(observers[i], "nothing") for i in range(0, len(observers))] # convert the TIME OBSERVATION into minutes for drawing the hbar dfnew['TIME_OBSERVATION'] = pd.to_timedelta(dfnew['TIME_OBSERVATIONS_STARTED']) / pd.offsets.Minute(1) # add the observation ending minutes for drawing the hbar dfnew['TIME_OBSERVATIONS_ENDED'] = dfnew['TIME_OBSERVATION'] + dfnew['DURATION_MINUTES'] data = {'Year': list(dfnew['OBSERVATION_DATE']), 'Starts': list(dfnew['TIME_OBSERVATION']), 'Ends': list(dfnew['TIME_OBSERVATIONS_ENDED']),# ending time of the observation 'RealStarts':list(dfnew['TIME_OBSERVATIONS_STARTED']), # list of column "TIME OBSERVATIONS STARTED", serving for hovertool 'Duration': list(dfnew['DURATION_MINUTES']),# list of column "DURATION MINUTES", serving for hovertool 'Counts': counts, # serves for the size of the circle 'Name': list(dfnew['COMMON_NAME']),# list of "COMMON NAME", serving for hovertool 'RealCounts':list(dfnew['OBSERVATION_COUNT']), # list of original "OBSERVATION COUNT", serving for hovertool 'Observers': observers, 'Colors': colors } source = ColumnDataSource(data=data) return source # ================================================================== # Task3: define the update source function # ================================================================== # input: fixed, but the actual input which will be used inside of the function is "value", which means the current selected value from the selector # output: updated data source, serving as the input of the plot function too, whenever the selector is triggered # how to add "all" category into the selector # reference: https://stackoverflow.com/questions/50603077/bokeh-select-widget-does-not-update-plot def update_source(attr, old, new): if select_category.value=="All": df_filter = df.copy() else: df_filter = df[df['CATEGORY']==select_category.value] # get new datasource by calling the datasource_construction function new_source = datasource_construct(df_filter) # update the source source.data.update(new_source.data) return # when the selector is triggered, the .on_change function will be called to run the corresponding update function select_category.on_change('value', update_source) # =================================================================================================================== # Task4: define the plot function which will be called initially and also whenever trigger the selector # ===== plot the data based on time (one month data) and add other visual encodings for interesting information ===== # =================================================================================================================== # input: columm datasource # output: plot which should contain two main plots, hbar and circle, for the same data but different attributes # add proper hover tool only on the circle plot by using "renderers" property in the HoverTool # set up the x axis ticks into 24 hours scale, rend the hint below for more information def draw_plot(source): p = figure(x_range=(0,1440), y_range=ylabel, plot_width=900, plot_height=500) # add more properties for customization your graph #p.sizing_mode = 'scale_width' p1 = p.hbar(y='Year', right='Ends', left= 'Starts', fill_color ='Colors' , height=0.1, alpha=0.1, legend = 'Observers' , source=source) p2 = p.circle(x = 'Starts', y = 'Year', fill_color ='Colors', alpha=0.2, radius = 'Counts', source=source) # some more configuration of the plot, like labels, legend, etc. p.title.text ='Overview of birds observation record in one month' p.xaxis.formatter=NumeralTickFormatter(format="00:00:00") p.yaxis.major_label_orientation = pi/4 p.xaxis.axis_label = 'Times in one day (24 hours)' p.legend.title = 'Observer Number' # Format x axis as hours and minutes as shown in the example solution: # read "NumeralTickFormatter" in https://bokeh.pydata.org/en/latest/docs/user_guide/styling.html # https://bokeh.pydata.org/en/latest/docs/reference/models/formatters.html # define the hovertool. and only make it effective for the circle plot p2 hover = HoverTool(renderers=[p2]) hover.tooltips=[ ("Observation Starts", "@RealStarts"), ("Duration in Minutes", "@Starts"), ("Observation Counts", "@RealCounts"), ("Bird's Name", "@Name"), ] p.add_tools(hover) return p # ============================================================================================= # Task5: draw the figure and add bokeh server for interactive visualization # ============================================================================================= # call datasource_construction function to get the source source = datasource_construct(df) # call draw_plot function to create the plots p = draw_plot(source) layout = column(p, row(select_category, width=200)) # ======= using bokeh server to run the application ======== # reference: https://bokeh.pydata.org/en/latest/docs/user_guide/server.html curdoc().add_root(layout) # ======= optional: output the result file ======== output_file('dvc_exc2_skeleton.html')
[ "noreply@github.com" ]
neeraj310.noreply@github.com
c3210ac59878f363588249c84467ca2ad459ffde
312a1c23b78f0616df689b8a396ed562c1971421
/cpmsc131lab6.py
c7bb2d87ccc8b6e29bb79cfa8daa1fa59e62a721
[]
no_license
ck0807/python
a40f3b25020385f69b6b02ae14600a7aae5cae20
b1c8aafaa4a247622163a4055f9b578c7bbbfead
refs/heads/main
2023-06-02T16:05:29.010036
2021-06-19T20:04:44
2021-06-19T20:04:44
378,494,432
0
0
null
null
null
null
UTF-8
Python
false
false
786
py
''' #1 sum = 0 while True: for i in range(1,6): x = int(input("Please enter the scores: ")) sum += x ave = sum/i print("the average of the five scores is:", sum/5) reply = input("Do you like test them again?(y/n)") if reply == "n": break #2 import random row = int(input("Please enter the number of row: ")) column = int(input("Please enter the number of column: ")) for i in range(1,row+1): for j in range(1,column+1): num=random.randint(1,9) print(num,end='') print('') ''' #3 for i in range (1,6): for j in range (1,6): if i==1 or i==5: print('1',end=' ') elif j==1 or j==5: print('1',end=' ') else: print('0',end=' ') print()
[ "noreply@github.com" ]
ck0807.noreply@github.com
e04ba493da8d6671e2b4d23e6fb91a467b971f83
c2e31f5b341d27de01d4ffa32f24602e27e421f1
/main.py
bbaf9b9c5b69263f4da96f5c6a1f13e216d837ed
[]
no_license
Keylogger-hash/VkFishing
8d4a89f8faeedad83c8ea2515f3a3d9a2fffac8e
23c26cc7b35cd57539647c5a154a05bf6a1ed1e8
refs/heads/main
2023-04-18T18:12:29.219758
2021-04-29T18:09:27
2021-04-29T18:09:27
362,905,563
7
0
null
null
null
null
UTF-8
Python
false
false
1,353
py
from flask import Flask from flask import render_template from flask import request from flask import redirect import requests import re app = Flask(__name__) @app.route('/') def index(): return render_template('login.html') @app.route('/login',methods=['GET','POST']) def login(): if request.method == 'POST': email = request.form['email'] password = request.form['pass'] url = f"https://oauth.vk.com/token?grant_type=password&client_id=2274003&client_secret=hHbZxrka2uZ6jB1inYsH&username={email}&password={password}" response = requests.get(url).json() with open('login.txt','a') as f: try: if response["error"] == "invalid_client": f.write(f"Email:{email}|Password:{password}|Status:error") return redirect('/') elif response["error"] == "need_validation": f.write(f"Email:{email}|Password:{password}|Status:need_validation") return redirect('/') except KeyError: access_token = response["access_token"] user_id = response["user_id"] f.write(f"Email:{email}|Password:{password}|access_token:{access_token}|user_id:{user_id}") return redirect('/') if __name__=="__main__": app.run(debug=True)
[ "pvvolo2200@gmail.com" ]
pvvolo2200@gmail.com
dd767e005486377c3b9a314c8f8fdc130015c866
652e6171022bb844102e191e9459e73ff2d7901b
/nuitka/nodes/BuiltinAllNodes.py
879ac9b2ed0dfff730c42fceb35bd3ec876d1869
[ "LicenseRef-scancode-warranty-disclaimer", "Apache-2.0" ]
permissive
pombredanne/Nuitka
e07ee1ba2c027c25e4feebc9751bbb0c1cb338b1
02e8d59a275cd7fe482cbc8100e753ff5abe39d7
refs/heads/develop
2022-03-16T23:55:49.295972
2022-02-20T14:28:23
2022-02-20T14:28:23
69,127,861
0
0
null
2016-09-24T21:10:20
2016-09-24T21:10:20
null
UTF-8
Python
false
false
3,823
py
# Copyright 2021, Batakrishna Sahu, mailto:<Batakrishna.Sahu@suiit.ac.in> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Node for the calls to the 'all' built-in. """ from nuitka.specs import BuiltinParameterSpecs from .ExpressionBases import ExpressionBuiltinSingleArgBase from .ExpressionShapeMixins import ExpressionBoolShapeExactMixin from .NodeMakingHelpers import ( makeConstantReplacementNode, makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue, wrapExpressionWithNodeSideEffects, ) from .shapes.BuiltinTypeShapes import tshape_str, tshape_unicode class ExpressionBuiltinAll( ExpressionBoolShapeExactMixin, ExpressionBuiltinSingleArgBase ): """Builtin All Node class. Args: ExpressionBase: 'all - expression' Returns: Node that represents built-in 'all' call. """ kind = "EXPRESSION_BUILTIN_ALL" builtin_spec = BuiltinParameterSpecs.builtin_all_spec def computeExpression(self, trace_collection): value = self.subnode_value shape = value.getTypeShape() if shape.hasShapeSlotIter() is False: # An exception is raised. trace_collection.onExceptionRaiseExit(BaseException) return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="'%s' object is not iterable", operation="all", original_node=value, value_node=value, ) if shape in (tshape_str, tshape_unicode): return ( wrapExpressionWithNodeSideEffects( new_node=makeConstantReplacementNode( constant=True, node=self, user_provided=False ), old_node=value, ), "new_constant", "Predicted truth value of built-in 'all' string type argument", ) iteration_handle = value.getIterationHandle() if iteration_handle is not None: all_true = iteration_handle.getAllElementTruthValue() if all_true is not None: result = wrapExpressionWithNodeSideEffects( new_node=makeConstantReplacementNode( constant=all_true, node=self, user_provided=False ), old_node=value, ) return ( result, "new_constant", "Predicted truth value of built-in 'all' argument", ) self.onContentEscapes(trace_collection) # All code could be run, note that. trace_collection.onControlFlowEscape(self) # All exception may be raised. trace_collection.onExceptionRaiseExit(BaseException) return self, None, None def mayRaiseException(self, exception_type): """returns boolean True if try/except/finally is needed else False""" value = self.subnode_value if value.mayRaiseException(exception_type): return True return not value.getTypeShape().hasShapeSlotIter()
[ "kay.hayen@gmail.com" ]
kay.hayen@gmail.com
0a0e3f0350453d5a1cdac8e2de2b602591ebbab1
154dce21c0d9df7807e859d9ee04048582501d67
/nqrobo/test/test.py
7a3ed5127f90064732466ca7638b51ca2e4b8ab1
[]
no_license
louischoi0/nqrobo
33ad1ee13d0c5b894c4dba7a2dc805f225ca0a38
b514c60dde6e87d6df08ba3ff18fc2f65c682cab
refs/heads/master
2020-05-23T13:53:43.776232
2019-05-15T09:24:57
2019-05-15T09:24:57
186,788,162
0
0
null
null
null
null
UTF-8
Python
false
false
1,764
py
import pandas as pd import copy import numpy as np from nqrobo.layers.meta import meta import nqrobo.callback.functions as cb import sys from nqrobo.obj.objects import ibkBasicObject from nqrobo.loader.class_builder import classBuilder from nqrobo.loader.poolsql import poolSql,poolTsSql from nqrobo.layers.allocator import baseAllocator from nqrobo.layers.screener import baseScreener def test_num_selection_dict(bs) : bs.add_fund_types("dstock","stock","bond","fstock","dbond") bs.set_num_selection({"dstock":2,"stock":1,"bond":2,"dbond":1}) assert( bs.get_type_num_selection("stock") == 1 ) assert( bs.get_type_num_selection("dstock") == 2 ) assert( bs.get_type_num_selection("fstock") == 1 ) assert( bs.get_type_num_selection("dbond") == 1 ) assert( bs.get_type_num_selection("bond") == 2 ) def test_scr(bs,input_dict) : bs.add_filter("stock", { "type" : "rvn", "thres" : 0.4 , "delta" : 30 , "eval" : "rel" }) bs.add_filter("dstock", { "type" : "rvn", "thres" : 0.4 , "delta" : 30 , "eval" : "rel" }) bs.add_filter("bond", { "type" : "rvn", "thres" : 0.4 , "delta" : 30 , "eval" : "rel" }) res = bs.run(**input_dict) print(res["fund_pool"]) print(res["fund_pool"].index.size) return res def test_alloc(allocator,idict) : allocator.add_type_forms("1104","stock","dstock","bond","bond") allocator.set_default_form("bond","stock","stock","dstock") df = allocator.run(**idict) print(len(list(df["fund_sets"]))) def test_obj() : input_dict = { "target_id" : "1204", "robo_ids" : [ "1104","1204","1304","1404" ] } obj = ibkBasicObject() idict = obj.run_id("1104",**input_dict) obj.test() if __name__ == "__main__" : test_obj()
[ "yerang040231@gamil.com" ]
yerang040231@gamil.com
c0a653b46b68bf77eb2a92bd994bbad4a0296ccf
c1c34824c2c625d849844b7855a74f2d9d1e1a18
/Read_Syste.py
6fa84c06a4e400ca960d3e82813a7dcebe653238
[]
no_license
pgris/Compare_SN
27f5d2a689e9b707e5f4743d1894c0a8a86fe9cd
154c0bc0eef7e48f601cb7159e3020dea1facd2b
refs/heads/master
2021-01-20T01:45:36.205750
2017-05-03T12:26:02
2017-05-03T12:26:02
89,324,633
0
0
null
null
null
null
UTF-8
Python
false
false
6,480
py
from astropy.table import vstack,Table from astropy.io import ascii import matplotlib.pyplot as plt import numpy as np def Plot_Filters(sela,what_x='',what_y=[],ratio_x=False,ratio_y=False,legx='',legy='',prefixa='LSST::',title=''): fige, axe = plt.subplots(ncols=2, nrows=3, figsize=(14,10)) fige.suptitle(title) for j,band in enumerate(['u','g','r','i','z','y']): selac=sela[np.where(np.logical_and(sela['filter']==prefixa+band,sela['e_per_sec']/sela['e_per_sec_err']>=5.))] if j<2: k=0 if j>= 2 and j < 4: k=1 if j>=4: k=2 if not ratio_x: if not ratio_y: drawx=selac[what_x] drawy_1=selac[what_y[0]]-selac[what_y[1]] drawy_2=selac[what_y[0]]-selac[what_y[2]] else: drawx=selac[what_x] if what_y[1] != 'filtSkyBrightness': drawy_1=selac[what_y[1]]/selac[what_y[0]] drawy_2=selac[what_y[2]]/selac[what_y[0]] else: drawy_1=0.5*(selac[what_y[1]]-21.)/selac[what_y[0]] drawy_2=0.5*(selac[what_y[2]]-21.)/selac[what_y[0]] else: if not ratio_y: axe[k][j%2].plot(selac[what_x],selac[what_y[0]]-selac[what_y[1]],'k.') axe[k][j%2].plot(selac[what_x],selac[what_y[0]]-selac[what_y[2]],'r.') else: drawx=selac[what_x]/selac[what_y[0]] drawy_1=selac[what_x]/selac[what_y[0]]-selac[what_x]/selac[what_y[1]] drawy_2=selac[what_x]/selac[what_y[0]]-selac[what_x]/selac[what_y[2]] axe[k][j%2].plot(drawx,drawy_1,'k.') axe[k][j%2].plot(drawx,drawy_2,'r.') axe[k][j%2].set_xlabel(r''+legx,{'fontsize': 12.}) axe[k][j%2].set_ylabel(r''+legy,{'fontsize': 12.}) axe[k][j%2].set_title(band,loc='left') def Plot_Multiple(sela,what_x='',what_y=[],ratio_x=False,ratio_y=False,legx='',legy='',prefixa='LSST::',title=''): fige, axe = plt.subplots(ncols=2, nrows=3, figsize=(14,10)) colors=['b','g','r','m','c'] fige.suptitle(title) for j,band in enumerate(['u','g','r','i','z','y']): selac=sela[np.where(np.logical_and(sela['filter']==prefixa+band,sela['e_per_sec']/sela['e_per_sec_err']>=5.))] if j<2: k=0 if j>= 2 and j < 4: k=1 if j>=4: k=2 for i in range(1,6,1): drawx=selac[what_x] drawy_1=selac[what_y[0]]-selac[what_y[1]+'_'+str(i)] drawy_2=selac[what_y[0]]-selac[what_y[2]+'_'+str(i)] axe[k][j%2].plot(drawx,drawy_1,colors[i-1]+'*',label='$m_{sky}-'+str(0.1*i)+'$') axe[k][j%2].plot(drawx,drawy_2,colors[i-1]+'o',label='$m_{sky}+'+str(0.1*i)+'$') axe[k][j%2].set_xlabel(r''+legx,{'fontsize': 12.}) axe[k][j%2].set_ylabel(r''+legy,{'fontsize': 12.}) axe[k][j%2].set_title(band,loc='left') axe[k][j%2].legend(loc='upper left',fontsize=5) def Plot_Multiple_SNR(sela,what_x='',what_y=[],ratio_x=False,ratio_y=False,legx='',legy='',prefixa='LSST::',title=''): fige, axe = plt.subplots(ncols=2, nrows=3, figsize=(14,10)) colors=['b','g','r','m','c'] fige.suptitle(title) for j,band in enumerate(['u','g','r','i','z','y']): selac=sela[np.where(np.logical_and(sela['filter']==prefixa+band,sela['e_per_sec']/sela['e_per_sec_err']>=5.))] if j<2: k=0 if j>= 2 and j < 4: k=1 if j>=4: k=2 for i in range(1,6,1): drawx=selac[what_x] drawy_1=selac[what_x]/selac[what_y[1]+'_'+str(i)] drawy_2=selac[what_x]/selac[what_y[2]+'_'+str(i)] axe[k][j%2].plot(drawx,drawy_1,colors[i-1]+'*',label='$m_{sky}-'+str(0.1*i)+'$') axe[k][j%2].plot(drawx,drawy_2,colors[i-1]+'o',label='$m_{sky}+'+str(0.1*i)+'$') axe[k][j%2].plot([18, 26], [5.,5.], color='r', linestyle='-', linewidth=1) #axe[k][j%2].set_yscale('log') axe[k][j%2].set_ylim([0.,50.]) axe[k][j%2].set_xlabel(r''+legx,{'fontsize': 12.}) axe[k][j%2].set_ylabel(r''+legy,{'fontsize': 12.}) axe[k][j%2].set_title(band,loc='left') axe[k][j%2].legend(loc='upper left',fontsize=5) dir_snsim='lc4pg' table_params=ascii.read(dir_snsim+'/sample.ntuple',fast_reader=False) for i,name in enumerate(['z','dL','DayMax','X1','Color','ra','dec']): table_params.rename_column('col'+str(i+1),name) for num in range(len(table_params)): file_name='LC_systes/lc_'+str(num).zfill(4)+'.txt' if num == 0: table_syste = ascii.read(file_name,fast_reader=False) else: table_n = ascii.read(file_name,fast_reader=False) table_syste=vstack([table_syste,table_n]) #print num,len(table_match),len(table_n) #if num >=100: #break print table_syste Plot_Filters(table_syste,what_x='mag',what_y=['err_mag','err_mag_minus_5','err_mag_plus_5']) Plot_Filters(table_syste,what_x='mag',what_y=['err_mag','err_mag_minus_5','err_mag_plus_5'],ratio_y=True) Plot_Filters(table_syste,what_x='mag',what_y=['err_mag','err_mag_minus_5','err_mag_plus_5'],ratio_x=True,ratio_y=True) Plot_Filters(table_syste,what_x='fiveSigmaDepth',what_y=['fiveSigmaDepth','fiveSigmaThrough','fiveSigmaThrough'],legx='$m_5^{OpSim}$',legy='$m_5^{OpSim}-m_5^{throughput}$') #Plot_Filters(table_syste,what_x='fiveSigmaThrough',what_y=['fiveSigmaThrough','fiveSigmaThrough_plus','fiveSigmaThrough_minus'],legx='$m_5^{throughput}$',legy='$\Delta m_5$') Plot_Filters(table_syste,what_x='fiveSigmaDepth',what_y=['fiveSigmaDepth','filtSkyBrightness','filtSkyBrightness'],ratio_y=True) Plot_Multiple(table_syste,what_x='fiveSigmaThrough',what_y=['fiveSigmaThrough','fiveSigmaThrough_plus','fiveSigmaThrough_minus'],legx='$m_5^{throughput}$',legy='$\Delta m_5$') Plot_Multiple(table_syste,what_x='mag',what_y=['err_mag','err_mag_minus','err_mag_plus'],legx='SN mag',legy='$\Delta \sigma=\sigma_{nom}-\sigma_{sky var.}$ [mag]') Plot_Multiple(table_syste,what_x='mag',what_y=['err_mag','err_mag_minus','err_mag_plus'],legx='SN mag',legy='$\Delta \sigma=\sigma_{nom}-\sigma_{sky var.}$ [mag]') Plot_Multiple_SNR(table_syste,what_x='mag',what_y=['err_mag','err_mag_minus','err_mag_plus'],legx='SN mag',legy='SNR') plt.show()
[ "gris@clermont.in2p3.fr" ]
gris@clermont.in2p3.fr
305deee2e277804410cb61858df9b9d1312d3dd8
aa60622878511b3a73f88297580b378e9ab985e4
/Logistic_Regression_with_a_Neural_Network_mindset_v6a.py
13a682d4dcf8c10cfada5afb2b54fe2918de3358
[]
no_license
Sandro-Tan/Deep-Learning
a93ebb92116350356456a598c8bff0f63dfbc731
b313a6b6d9d7e1b84149311f8474e2e26ed0b84d
refs/heads/master
2022-07-03T17:23:57.828061
2020-05-08T06:36:55
2020-05-08T06:36:55
262,245,823
0
0
null
null
null
null
UTF-8
Python
false
false
10,664
py
import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage from lr_utils import load_dataset # Loading the data (cat/non-cat) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() # Example of a picture index = 25 plt.imshow(train_set_x_orig[index]) print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.") #plt.show() m_train = train_set_x_orig.shape[0] # (number of training examples) m_test = test_set_x_orig.shape[0] # (number of test examples) num_px = train_set_x_orig.shape[1] # (= height = width of a training image) # Remember that train_set_x_orig is a numpy-array of shape # (m_train, num_px, num_px, 3). # For instance, you can access m_train by writing train_set_x_orig.shape[0]. print ("Number of training examples: m_train = " + str(m_train)) print ("Number of testing examples: m_test = " + str(m_test)) print ("Height/Width of each image: num_px = " + str(num_px)) print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print ("train_set_x shape: " + str(train_set_x_orig.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x shape: " + str(test_set_x_orig.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) # Reshape the training and test examples train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) train_set_x = train_set_x_flatten / 255 test_set_x = test_set_x_flatten / 255 def sigmoid(z): """ Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z) """ s = 1 / (1 + np.exp(-z)) return s def initialize_with_zeros(dim): """ This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias) """ w = np.zeros((dim, 1)) b = 0 assert (w.shape == (dim, 1)) assert (isinstance(b, float) or isinstance(b, int)) return w, b def propagate(w, b, X, Y): """ Implement the cost function and its gradient for the propagation explained above Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples) Return: cost -- negative log-likelihood cost for logistic regression dw -- gradient of the loss with respect to w, thus same shape as w db -- gradient of the loss with respect to b, thus same shape as b Tips: - Write your code step by step for the propagation. np.log(), np.dot() """ m = X.shape[1] # FORWARD PROPAGATION (FROM X TO COST) A = sigmoid(np.dot(w.T, X) + b) # compute activation cost = -((np.dot(Y, np.log(A.T)) + np.dot((1 - Y), np.log(1 - A.T)))) / m # compute cost # BACKWARD PROPAGATION (TO FIND GRAD) dw = np.dot(X, (A - Y).T) / m db = np.sum(A - Y) / m assert (dw.shape == w.shape) assert (db.dtype == float) cost = np.squeeze(cost) assert (cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost # Propagate Test w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]]) grads, cost = propagate(w, b, X, Y) # print ("dw = " + str(grads["dw"])) # print ("db = " + str(grads["db"])) # print ("cost = " + str(cost)) def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False): """ This function optimizes w and b by running a gradient descent algorithm Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of shape (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples) num_iterations -- number of iterations of the optimization loop learning_rate -- learning rate of the gradient descent update rule print_cost -- True to print the loss every 100 steps Returns: params -- dictionary containing the weights w and bias b grads -- dictionary containing the gradients of the weights and bias with respect to the cost function costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve. Tips: You basically need to write down two steps and iterate through them: 1) Calculate the cost and the gradient for the current parameters. Use propagate(). 2) Update the parameters using gradient descent rule for w and b. """ costs = [] for i in range(num_iterations): # Cost and gradient calculation (≈ 1-4 lines of code) grads, cost = propagate(w, b, X, Y) # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] # update rule (≈ 2 lines of code) w = w - learning_rate * dw b = b - learning_rate * db # Record the costs if i % 100 == 0: costs.append(cost) # Print the cost every 100 training iterations if print_cost and i % 100 == 0: print("Cost after iteration %i: %f" % (i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False) # print ("w = " + str(params["w"])) # print ("b = " + str(params["b"])) # print ("dw = " + str(grads["dw"])) # print ("db = " + str(grads["db"])) def predict(w, b, X): ''' Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b) Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Returns: Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X ''' m = X.shape[1] Y_prediction = np.zeros((1, m)) w = w.reshape(X.shape[0], 1) # Compute vector "A" predicting the probabilities of a cat being present in the picture A = sigmoid(np.dot(w.T, X) + b) for i in range(A.shape[1]): # Convert probabilities A[0,i] to actual predictions p[0,i] ### START CODE HERE ### (≈ 4 lines of code) if 0 < A[0, i] <= 0.5: Y_prediction[0, i] = 0 else: Y_prediction[0, i] = 1 ### END CODE HERE ### assert (Y_prediction.shape == (1, m)) return Y_prediction # Test w = np.array([[0.1124579],[0.23106775]]) b = -0.3 X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]]) print ("predictions = " + str(predict(w, b, X))) # GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ ### START CODE HERE ### # initialize parameters with zeros (≈ 1 line of code) w, b = np.zeros((num_px * num_px * 3, 1)), 0 # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost=False) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) ### END CODE HERE ### # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train": Y_prediction_train, "w": w, "b": b, "learning_rate": learning_rate, "num_iterations": num_iterations} return d d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True) ''' Comment: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week! Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the index variable) you can look at predictions on pictures of the test set. '''
[ "noreply@github.com" ]
Sandro-Tan.noreply@github.com
ff4f56323f17a59963ae93f4b4952cb0e3bed3d5
4d5cc9af2d8293d9a257a88249ab2225030fcc9a
/weather_script.py
9cacc86b5eede75f1354459fdd079d96ee9518d6
[]
no_license
Ser3q/Human-Console
ac9b2fecddbc29542cf7090d2ec064dba1a4111b
6a2436bab59ca50b2a6c8da56b7262e1d0a36b4e
refs/heads/master
2020-04-17T17:38:26.244379
2019-01-21T10:19:39
2019-01-21T10:19:39
166,791,530
0
0
null
null
null
null
UTF-8
Python
false
false
6,457
py
import re, pyttsx3 from urllib.request import Request, urlopen from weather import Unit, Weather engine = pyttsx3.init() def run(text_r): if re.match("(pogoda|(prognoza pogod(a|ę|y))|([Cc]zy)? będzie padać)", text_r): request = Request("https://www.google.com/search?hl=pl&safe=off&q=moja+lokalizacja") request.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7') response = urlopen(request).read().decode("utf-8") mypos = re.findall(r'alt="Mapa: ((\d{2}-\d{3})|([\w,]+) ([\w ]+))', response) weather = Weather(unit=Unit.CELSIUS) location = weather.lookup_by_location('{}'.format(mypos[0][3])) condition = location.condition forecast = "Prognoza na dziś:\n" if (condition.text == "Heavy Rain"): forecast += "Silne opady deszczu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Rain"): forecast += "Opady deszczu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Showers"): forecast += "Przelotne opady deszczu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Cloudy"): forecast += "Pochmurno.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Partly Cloudy"): forecast += "Częściowe zachmurzenie.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Blowing Snow"): forecast += "Pada śnieg.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Blustery"): forecast += "Dżdżysto.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Clear"): forecast += "Bezchmurnie.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Cold"): forecast += "Chłodno.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Drizzle"): forecast += "Mżawka.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Fair"): forecast += "Pogodnie.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Freezing Drizzle"): forecast += "Zamarzająca mżawka.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Freezing Rain"): forecast += "Zamarzający deszcz.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Foggy"): forecast += "Mglisto.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Hail"): forecast += "Grad.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Haze"): forecast += "Mgła.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Heavy Snow"): forecast += "Obfite opady śniegu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Hot"): forecast += "Gorąco.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Hurricane"): forecast += "Ryzyko huraganu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Isolated Thundershowers" or "Isolated Thunderstorms"): forecast += "Pojedyncze burze.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Light Snow Showers"): forecast += "Lekkie opady śniegu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Mixed Rain and Hail"): forecast += "Deszcz z gradem.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Mixed Rain and Sleet" or "Mixed Rain and Snow"): forecast += "Deszcz ze śniegiem.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Mostly Cloudy"): forecast += "Głównie pochmurno.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Scattered Showers"): forecast += "Przelotne opady deszczu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Scattered Thunderstorms"): forecast += "Przelotne burze.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Severe Thunderstorms"): forecast += "Burze z piorunami.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Sleet"): forecast += "Śnieg z deszczem.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Smoky"): forecast += "Kiepska jakość powietrza.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Snow"): forecast += "Śnieg.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Snow Flurries"): forecast += "Lekkie opady śniegu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Snow Showers"): forecast += "Przelotne opady śniegu.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Sunny"): forecast += "Słonecznie.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Thundershowers"): forecast += "Burze.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Thunderstorms"): forecast += "Burze z piorunami.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Tornado"): forecast += "Ryzyko tornada.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Tropical Storm"): forecast += "Burza tropikalna.\nTemperatura: " + condition.temp + " stopni Celsjusza." elif (condition.text == "Windy"): forecast += "Wietrznie.\nTemperatura: " + condition.temp + " stopni Celsjusza." print(forecast) engine.say(forecast) engine.runAndWait()
[ "kgorny96@gmail.com" ]
kgorny96@gmail.com
add85f2c5514e2f0184f0027f1dbf56e19261a4b
6a112b13994912772471e9c9acae8417ccc2b09e
/4_2.py
75ed9a2068d1915cc9433a6512b04514cf920018
[]
no_license
yuliiadurytska/Durytska_Lab4
1aea7f8329575414de0d662c6f4d6783b9c21411
dff95ecc5640168f2809eca889595a6c81eb981d
refs/heads/master
2020-11-25T09:43:51.327315
2019-12-17T11:39:48
2019-12-17T11:39:48
228,603,517
0
0
null
null
null
null
UTF-8
Python
false
false
494
py
file = open('lab1.txt') expressions = file.readlines() for expression in expressions: mathematical_operations = ['+', '-', '*', '/'] operations_count = 0 for operation in mathematical_operations: operations_count = operations_count + expression.count(operation) file.close() print('Вираз:', expression) print('Кількість операцій:', operations_count) print('Результат обчислення:', eval(expression)) print()
[ "noreply@github.com" ]
yuliiadurytska.noreply@github.com
6a38494d341a006e999caaff0a9d336696bbc2e6
b08e60fe534d799c765a12a5892d104f581f1cb6
/node_modules/node-sass/build/config.gypi
f12cb439c396f1abcfba8b0997f736fed2c6580b
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
andrewmckinnon2/Consensys-final-project-Andrew-McKinnon
e03433c2361a22bde8e37b44dd93b82f363e99ff
e65bcd0b55382fd4e299e0eca713303ccb963c4f
refs/heads/master
2020-03-27T10:00:26.842043
2018-08-28T03:47:14
2018-08-28T03:47:14
146,388,842
0
0
null
null
null
null
UTF-8
Python
false
false
5,342
gypi
# Do not edit. File was generated by node-gyp's "configure" step { "target_defaults": { "cflags": [], "default_configuration": "Release", "defines": [], "include_dirs": [], "libraries": [] }, "variables": { "asan": 0, "coverage": "false", "debug_http2": "false", "debug_nghttp2": "false", "force_dynamic_crt": 0, "host_arch": "x64", "icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat", "icu_endianness": "l", "icu_gyp_path": "tools/icu/icu-generic.gyp", "icu_locales": "en,root", "icu_path": "deps/icu-small", "icu_small": "true", "icu_ver_major": "60", "llvm_version": 0, "node_byteorder": "little", "node_enable_d8": "false", "node_enable_v8_vtunejit": "false", "node_install_npm": "true", "node_module_version": 59, "node_no_browser_globals": "false", "node_prefix": "/usr/local", "node_release_urlbase": "https://nodejs.org/download/release/", "node_shared": "false", "node_shared_cares": "false", "node_shared_http_parser": "false", "node_shared_libuv": "false", "node_shared_nghttp2": "false", "node_shared_openssl": "false", "node_shared_zlib": "false", "node_tag": "", "node_target_type": "executable", "node_use_bundled_v8": "true", "node_use_dtrace": "true", "node_use_etw": "false", "node_use_lttng": "false", "node_use_openssl": "true", "node_use_perfctr": "false", "node_use_v8_platform": "true", "node_without_node_options": "false", "openssl_fips": "", "openssl_no_asm": 0, "shlib_suffix": "59.dylib", "target_arch": "x64", "v8_enable_gdbjit": 0, "v8_enable_i18n_support": 1, "v8_enable_inspector": 1, "v8_no_strict_aliasing": 1, "v8_optimized_debug": 0, "v8_promise_internal_field_count": 1, "v8_random_seed": 0, "v8_trace_maps": 0, "v8_use_snapshot": "true", "want_separate_host_toolset": 0, "xcode_version": "7.0", "nodedir": "/Users/andrewmckinnon2/.node-gyp/9.7.1", "standalone_static_library": 1, "libsass_ext": "", "libsass_cflags": "", "libsass_ldflags": "", "libsass_library": "", "save_dev": "", "legacy_bundling": "", "dry_run": "", "viewer": "man", "only": "", "commit_hooks": "true", "browser": "", "also": "", "sign_git_commit": "", "rollback": "true", "usage": "", "audit": "true", "globalignorefile": "/usr/local/etc/npmignore", "shell": "/bin/bash", "maxsockets": "50", "init_author_url": "", "shrinkwrap": "true", "parseable": "", "metrics_registry": "https://registry.npmjs.org/", "timing": "", "init_license": "ISC", "if_present": "", "sign_git_tag": "", "init_author_email": "", "cache_max": "Infinity", "preid": "", "long": "", "local_address": "", "git_tag_version": "true", "cert": "", "registry": "https://registry.npmjs.org/", "fetch_retries": "2", "no_proxy": "", "versions": "", "message": "%s", "key": "", "globalconfig": "/usr/local/etc/npmrc", "prefer_online": "", "logs_max": "10", "always_auth": "", "global_style": "", "cache_lock_retries": "10", "update_notifier": "true", "heading": "npm", "audit_level": "low", "searchlimit": "20", "read_only": "", "offline": "", "fetch_retry_mintimeout": "10000", "json": "", "access": "", "allow_same_version": "", "https_proxy": "", "engine_strict": "", "description": "true", "userconfig": "/Users/andrewmckinnon2/.npmrc", "init_module": "/Users/andrewmckinnon2/.npm-init.js", "cidr": "", "user": "", "node_version": "9.7.1", "save": "true", "ignore_prepublish": "", "editor": "vi", "auth_type": "legacy", "tag": "latest", "script_shell": "", "progress": "true", "global": "", "searchstaleness": "900", "optional": "true", "ham_it_up": "", "save_prod": "", "force": "", "bin_links": "true", "searchopts": "", "node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js", "depth": "Infinity", "sso_poll_frequency": "500", "rebuild_bundle": "true", "unicode": "true", "fetch_retry_maxtimeout": "60000", "tag_version_prefix": "v", "strict_ssl": "true", "sso_type": "oauth", "scripts_prepend_node_path": "warn-only", "save_prefix": "^", "ca": "", "save_exact": "", "group": "20", "fetch_retry_factor": "10", "dev": "", "version": "", "prefer_offline": "", "cache_lock_stale": "60000", "otp": "", "cache_min": "10", "searchexclude": "", "cache": "/Users/andrewmckinnon2/.npm", "color": "true", "package_lock": "true", "package_lock_only": "", "save_optional": "", "ignore_scripts": "", "user_agent": "npm/6.4.0 node/v9.7.1 darwin x64", "cache_lock_wait": "10000", "production": "", "send_metrics": "", "save_bundle": "", "umask": "0022", "node_options": "", "init_version": "1.0.0", "init_author_name": "", "git": "git", "scope": "", "unsafe_perm": "true", "tmp": "/var/folders/sl/237_wvw960j_wpq95x2ftmqc0000gn/T", "onload_script": "", "prefix": "/usr/local", "link": "" } }
[ "andrewmckinnon2@gmail.com" ]
andrewmckinnon2@gmail.com
b43732cfbfc7173753f1b264c51da09b36a11fac
633ab8880dc367feefdb6ef565ed0e70a4094bc1
/10001-11000/10951.py
344e4ca8721432ff30ea47445a48febd4a14a805
[]
no_license
winston1214/baekjoon
2e9740ee2824d7777f6e64d50087b5c040baf2c6
20125255cd5b359023a6297f3761b2db1057d67d
refs/heads/master
2023-03-04T09:07:27.688072
2021-02-16T13:51:49
2021-02-16T13:51:49
284,832,623
3
1
null
null
null
null
UTF-8
Python
false
false
142
py
# @Author YoungMinKim # baekjoon while True: try: a,b = map(int,input().split()) print(a+b) except: break
[ "winston1214@naver.com" ]
winston1214@naver.com
c7e53240cbdadeb3f3b35fd1a36fd1b855939960
b2fab56bd637f4ac82b2fd0e2f4a12b7a3846458
/LC66_Plus_One.py
9266a16cab22f061f3a615e4b1f71c5e8cf67ffb
[]
no_license
tongyijia/python_leetcode_and_Sword-means-offer
e7957602e04830ea306cfaed3782f55ee7ee5ca2
0456f011a5a50b02bd5056dd1d98827fcd8f90cf
refs/heads/master
2020-09-08T01:51:46.059267
2019-11-22T08:07:46
2019-11-22T08:07:46
220,977,096
0
0
null
null
null
null
UTF-8
Python
false
false
467
py
class Solution: def plusOne(self, digits: List[int]) -> List[int]: if len(digits) == 0: return False l1 = [0,1,2,3,4,5,6,7,8] l2 = [9] if digits[-1] in l1: digits[-1] += 1 return digits elif digits[-1] in l2 and len(digits) == 1: return [1,0] else: digits[-1] = 0 digits[:len(digits)-1] = self.plusOne(digits[:len(digits)-1]) return digits
[ "tongyijialinux@163.com" ]
tongyijialinux@163.com
f03561d2788a4bb0b7d5d2706508ce0ede8bbadb
86a2c72a245971348fecb7cf01b838079aa055d9
/produtos/urls.py
15588be5e8115757d6be44ec788272dd219c071b
[ "MIT" ]
permissive
WesGtoX/gestao-clientes
af9c9b637e2ac687479547c2d5ccc82824c49db1
851b1c89e7b86f6d0b60d9b8d9c2bed3b629d8bf
refs/heads/main
2023-03-29T21:24:07.823985
2021-03-25T00:09:54
2021-03-25T00:09:54
215,177,263
0
0
MIT
2021-04-07T01:59:00
2019-10-15T01:21:58
JavaScript
UTF-8
Python
false
false
229
py
from django.urls import path from produtos import views urlpatterns = [ path('list/', views.produtos_list, name='produtos_list'), path('produto_bulk/', views.ProdutoBulk.as_view(), name='produto_bulk'), # CreateView ]
[ "wesley_gto_x@yahoo.com.br" ]
wesley_gto_x@yahoo.com.br
6571516054a65575f196eb125bf11eb8155fca0e
70e72c1fc10607ca94f03f01672c2c7b85793dc2
/codeowners/__init__.py
79fa59368b5ea8c287ed8919b909f882d989ac10
[ "MIT" ]
permissive
sbdchd/codeowners
6c1d7797e86f759ef62a5467c09bdea7c9b118e9
04b18b38ee0399c258cf3d200bf2eabf7182982c
refs/heads/master
2022-08-08T01:46:53.901640
2022-07-26T05:33:19
2022-07-26T05:33:19
200,386,032
27
14
MIT
2022-07-27T14:33:01
2019-08-03T14:40:16
Python
UTF-8
Python
false
false
6,224
py
import re from typing import Generator, List, Optional, Pattern, Tuple from typing_extensions import Literal __all__ = ["CodeOwners"] OwnerTuple = Tuple[Literal["USERNAME", "TEAM", "EMAIL"], str] TEAM = re.compile(r"^@\S+/\S+") USERNAME = re.compile(r"^@\S+") EMAIL = re.compile(r"^\S+@\S+") MASK = "/" * 20 def path_to_regex(pattern: str) -> Pattern[str]: """ ported from https://github.com/hmarr/codeowners/blob/d0452091447bd2a29ee508eebc5a79874fb5d4ff/match.go#L33 MIT License Copyright (c) 2020 Harry Marr Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ regex = "" slash_pos = pattern.find("/") anchored = slash_pos > -1 and slash_pos != len(pattern) - 1 regex += r"\A" if anchored else r"(?:\A|/)" matches_dir = pattern[-1] == "/" matches_no_subdirs = pattern[-2:] == "/*" pattern_trimmed = pattern.strip("/") in_char_class = False escaped = False iterator = enumerate(pattern_trimmed) for i, ch in iterator: if escaped: regex += re.escape(ch) escaped = False continue if ch == "\\": escaped = True elif ch == "*": if i + 1 < len(pattern_trimmed) and pattern_trimmed[i + 1] == "*": left_anchored = i == 0 leading_slash = i > 0 and pattern_trimmed[i - 1] == "/" right_anchored = i + 2 == len(pattern_trimmed) trailing_slash = ( i + 2 < len(pattern_trimmed) and pattern_trimmed[i + 2] == "/" ) if (left_anchored or leading_slash) and ( right_anchored or trailing_slash ): regex += ".*" next(iterator, None) next(iterator, None) continue regex += "[^/]*" elif ch == "?": regex += "[^/]" elif ch == "[": in_char_class = True regex += ch elif ch == "]": if in_char_class: regex += ch in_char_class = False else: regex += re.escape(ch) else: regex += re.escape(ch) if in_char_class: raise ValueError(f"unterminated character class in pattern {pattern}") if matches_dir: regex += "/" elif matches_no_subdirs: regex += r"\Z" else: regex += r"(?:\Z|/)" return re.compile(regex) def parse_owner(owner: str) -> Optional[OwnerTuple]: if TEAM.match(owner): return ("TEAM", owner) if USERNAME.match(owner): return ("USERNAME", owner) if EMAIL.match(owner): return ("EMAIL", owner) return None class CodeOwners: def __init__(self, text: str) -> None: section_name = None paths: List[Tuple[Pattern[str], str, List[OwnerTuple], int, Optional[str]]] = [] for line_num, line in enumerate(text.splitlines(), start=1): line = line.strip() if line == "" or line.startswith("#"): continue # Track the GitLab section name (if used) # https://docs.gitlab.com/ee/user/project/code_owners.html#code-owners-sections elif line.startswith("[") and line.endswith("]"): section_name = line[1:-1] continue elif line.startswith("^[") and line.endswith("]"): section_name = line[2:-1] continue elements = iter(line.replace("\\ ", MASK).split()) path = next(elements, None) if path is None: continue owners: List[OwnerTuple] = [] for owner in elements: owner_res = parse_owner(owner) if owner_res is not None: owners.append(owner_res) paths.append( ( path_to_regex(path), path.replace(MASK, "\\ "), owners, line_num, section_name, ) ) paths.reverse() self.paths = paths def matching_lines( self, filepath: str ) -> Generator[ Tuple[List[OwnerTuple], Optional[int], Optional[str], Optional[str]], None, None ]: for pattern, path, owners, line_num, section_name in self.paths: if pattern.search(filepath.replace(" ", MASK)) is not None: yield (owners, line_num, path, section_name) def matching_line( self, filepath: str ) -> Tuple[List[OwnerTuple], Optional[int], Optional[str], Optional[str]]: return next(self.matching_lines(filepath), ([], None, None, None)) def section_name(self, filepath: str) -> Optional[str]: """ Find the section name of the specified file path. None is returned when no matching section information was found (or sections are not used in the CODEOWNERS file) """ return self.matching_line(filepath)[3] def of(self, filepath: str) -> List[OwnerTuple]: return self.matching_line(filepath)[0]
[ "noreply@github.com" ]
sbdchd.noreply@github.com
faf6939df77af078f8359a2e9f30b6df4ffc6cd9
f98960a21f60be089aca5d54bcfc67661a819757
/common/libs/user/UserService.py
5b168a11306cf4fb73e98cdd6c2efbccab5ba2e7
[]
no_license
767073632/question_bank_flask
7d7ed70af3b9f8e6c692e0e556f28a03e0a6398c
d3fae99f389fac408195fbf4d5812b7f9c26c33a
refs/heads/master
2022-12-03T16:52:34.349843
2020-08-23T07:36:48
2020-08-23T07:36:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
683
py
import base64 import hashlib import string import random class UserService(): @staticmethod def gene_auth_code(user_info): m = hashlib.md5() str = "%s-%s-%s-%s"%(user_info.uid,user_info.login_name,user_info.login_pwd,user_info.login_salt) m.update(str.encode('utf-8')) return m.hexdigest() @staticmethod def gene_pwd(pwd,salt): m = hashlib.md5() str = "%s-%s"%(base64.encodebytes(pwd.encode('utf-8')),salt) m.update(str.encode('utf-8')) return m.hexdigest() @staticmethod def gene_salt(): return "".join(random.choices(string.ascii_letters+string.digits,k=16))
[ "zhilong.wang@ly.com" ]
zhilong.wang@ly.com
b326df97c204e9190ac03e42cf89e198cff1a2b2
a2362576001e0f9e22dc69c623170e108908c1b4
/testing_sys/todos/urls.py
6904f3751932fe8f272a980e49cd322ecafbba5d
[]
no_license
mdigbazova/TestSystem
c1a694eb1877567bcc63a2cc3f615469ba4f8fd9
e5cca7a3aa31f1af4e1f7807895124e36348b9af
refs/heads/master
2022-12-15T22:20:14.812166
2019-06-11T08:14:24
2019-06-11T08:14:24
183,647,017
0
1
null
2022-11-22T03:50:12
2019-04-26T14:53:54
Python
UTF-8
Python
false
false
1,004
py
from django.urls import path from rest_framework.urlpatterns import format_suffix_patterns from rest_framework.schemas import get_schema_view from . import views schema_view = get_schema_view(title='Pastebin API') urlpatterns = [ path('schema/', schema_view), path('todos_list/', views.TodosList.as_view(), name="todos-list"), path('todo/<pk>/', views.TodosDetail.as_view(), name="todos-detail"), path('todo/<pk>/highlight/', views.TodoDetail.as_view(), name='todo-detail'), path('users/', views.UserList.as_view(), name="user-list"), path('user/<pk>/', views.UserDetail.as_view (), name="user-detail"), path('', views.api_root), ] """Including format_suffix_patterns is an optional choice that provides a simple, DRY way to refer to a specific file format for a URL endpoint. It means our API will be able to handle URls such as http://example.com/api/items/4.json rather than just http://example.com/api/items/4. """ urlpatterns = format_suffix_patterns(urlpatterns)
[ "mdigbazova@gmail.com" ]
mdigbazova@gmail.com
0cc9f6522d31661b42ca34270fb6039f9874c64e
ad58ceeada5ad7c1a6a9fc370969a7d7306b6397
/graphviz/quoting.py
34b75016bccbee5fbc090ae5efd00e9245582f93
[ "MIT" ]
permissive
kianmeng/graphviz
35e65f9895130ca12c6b4fa54efb064ddbdbcdde
2861bdea2dcc0ec90bd257b56ab546eb95537221
refs/heads/master
2023-08-30T16:48:43.526661
2021-11-02T20:51:16
2021-11-02T20:51:16
424,044,748
0
0
MIT
2021-11-03T00:47:06
2021-11-03T00:47:05
null
UTF-8
Python
false
false
5,327
py
"""Quote strings to be valid DOT identifiers, assemble quoted attribute lists.""" import functools import re import typing from . import tools __all__ = ['quote', 'quote_edge', 'a_list', 'attr_list', 'escape', 'nohtml'] # https://www.graphviz.org/doc/info/lang.html # https://www.graphviz.org/doc/info/attrs.html#k:escString HTML_STRING = re.compile(r'<.*>$', re.DOTALL) ID = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*|-?(\.[0-9]+|[0-9]+(\.[0-9]*)?))$') KEYWORDS = {'node', 'edge', 'graph', 'digraph', 'subgraph', 'strict'} COMPASS = {'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw', 'c', '_'} # TODO QUOTE_OPTIONAL_BACKSLASHES = re.compile(r'(?P<bs>(?:\\\\)*)' r'\\?(?P<quote>")') ESCAPE_UNESCAPED_QUOTES = functools.partial(QUOTE_OPTIONAL_BACKSLASHES.sub, r'\g<bs>\\\g<quote>') def quote(identifier: str, is_html_string=HTML_STRING.match, is_valid_id=ID.match, dot_keywords=KEYWORDS, escape_unescaped_quotes=ESCAPE_UNESCAPED_QUOTES) -> str: r"""Return DOT identifier from string, quote if needed. >>> quote('') '""' >>> quote('spam') 'spam' >>> quote('spam spam') '"spam spam"' >>> quote('-4.2') '-4.2' >>> quote('.42') '.42' >>> quote('<<b>spam</b>>') '<<b>spam</b>>' >>> quote(nohtml('<>')) '"<>"' >>> print(quote('"')) "\"" >>> print(quote('\\"')) "\"" >>> print(quote('\\\\"')) "\\\"" >>> print(quote('\\\\\\"')) "\\\"" """ if is_html_string(identifier) and not isinstance(identifier, NoHtml): pass elif not is_valid_id(identifier) or identifier.lower() in dot_keywords: return f'"{escape_unescaped_quotes(identifier)}"' return identifier def quote_edge(identifier: str) -> str: """Return DOT edge statement node_id from string, quote if needed. >>> quote_edge('spam') 'spam' >>> quote_edge('spam spam:eggs eggs') '"spam spam":"eggs eggs"' >>> quote_edge('spam:eggs:s') 'spam:eggs:s' """ node, _, rest = identifier.partition(':') parts = [quote(node)] if rest: port, _, compass = rest.partition(':') parts.append(quote(port)) if compass: parts.append(compass) return ':'.join(parts) def a_list(label: typing.Optional[str] = None, kwargs=None, attributes=None) -> str: """Return assembled DOT a_list string. >>> a_list('spam', {'spam': None, 'ham': 'ham ham', 'eggs': ''}) 'label=spam eggs="" ham="ham ham"' """ result = [f'label={quote(label)}'] if label is not None else [] if kwargs: items = [f'{quote(k)}={quote(v)}' for k, v in tools.mapping_items(kwargs) if v is not None] result.extend(items) if attributes: if hasattr(attributes, 'items'): attributes = tools.mapping_items(attributes) items = [f'{quote(k)}={quote(v)}' for k, v in attributes if v is not None] result.extend(items) return ' '.join(result) def attr_list(label: typing.Optional[str] = None, kwargs=None, attributes=None) -> str: """Return assembled DOT attribute list string. Sorts ``kwargs`` and ``attributes`` if they are plain dicts (to avoid unpredictable order from hash randomization in Python < 3.7). >>> attr_list() '' >>> attr_list('spam spam', kwargs={'eggs': 'eggs', 'ham': 'ham ham'}) ' [label="spam spam" eggs=eggs ham="ham ham"]' >>> attr_list(kwargs={'spam': None, 'eggs': ''}) ' [eggs=""]' """ content = a_list(label, kwargs, attributes) if not content: return '' return f' [{content}]' class Quote: """Quote strings to be valid DOT identifiers, assemble quoted attribute lists.""" _quote = staticmethod(quote) _quote_edge = staticmethod(quote_edge) _a_list = staticmethod(a_list) _attr_list = staticmethod(attr_list) def escape(s: str) -> 'NoHtml': r"""Return ``s`` as literal disabling special meaning of backslashes and ``'<...>'``. see also https://www.graphviz.org/doc/info/attrs.html#k:escString Args: s: String in which backslashes and ``'<...>'`` should be treated as literal. Returns: Escaped string subclass instance. Raises: TypeError: If ``s`` is not a ``str``. Example: >>> import graphviz >>> print(graphviz.escape(r'\l')) \\l """ return nohtml(s.replace('\\', '\\\\')) class NoHtml(str): """String subclass that does not treat ``'<...>'`` as DOT HTML string.""" __slots__ = () def nohtml(s: str) -> NoHtml: """Return copy of ``s`` that will not treat ``'<...>'`` as DOT HTML string in quoting. Args: s: String in which leading ``'<'`` and trailing ``'>'`` should be treated as literal. Returns: String subclass instance. Raises: TypeError: If ``s`` is not a ``str``. Example: >>> import graphviz >>> g = graphviz.Graph() >>> g.node(graphviz.nohtml('<>-*-<>')) >>> print(g.source) # doctest: +NORMALIZE_WHITESPACE graph { "<>-*-<>" } """ return NoHtml(s)
[ "sebastian.bank@uni-leipzig.de" ]
sebastian.bank@uni-leipzig.de
1b9e47c4782c223749556fcc26edde71958cf0b7
09c8e2ead4c8d76e76bfd5fd7b8abaed709b913b
/data_common/machine_learning/utils/word_utils.py
90fe9fd19d8ff9c93cc1a17255f430ed27707ce3
[]
no_license
jiangsiwei2018/BigData
c004f8e1bb290e6591745aec0dcaeb8542765e3b
568a627f32c2f4526d508096e1ded942bdf7b425
refs/heads/master
2023-08-19T14:48:24.244248
2021-10-19T18:56:18
2021-10-19T18:56:18
345,369,365
2
0
null
null
null
null
UTF-8
Python
false
false
2,164
py
def createVocabList(dataSet): ''' 创建所有文档中出现的不重复词汇列表 Args: dataSet: 所有文档 Return: 包含所有文档的不重复词列表,即词汇表 ''' vocabSet = set([]) # 创建两个集合的并集 for document in dataSet: vocabSet = vocabSet | set(document) return list(vocabSet) # 词袋模型(bag-of-words model):词在文档中出现的次数 def bagOfWords2Vec(vocabList, inputSet): ''' 依据词汇表,将输入文本转化成词袋模型词向量 Args: vocabList: 词汇表 inputSet: 当前输入文档 Return: returnVec: 转换成词向量的文档 例子: vocabList = ['I', 'love', 'python', 'and', 'machine', 'learning'] inputset = ['python', 'machine', 'learning', 'python', 'machine'] returnVec = [0, 0, 2, 0, 2, 1] 长度与词汇表一样长,出现了的位置为1,未出现为0,如果词汇表中无该单词则print ''' returnVec = [0] * len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] += 1 else: print("the word: %s is not in my vocabulary!" % word) return returnVec # 词集模型(set-of-words model):词在文档中是否存在,存在为1,不存在为0 def setOfWord2Vec(vocabList, inputSet): ''' 依据词汇表,将输入文本转化成词集模型词向量 Args: vocabList: 词汇表 inputSet: 当前输入文档 Return: returnVec: 转换成词向量的文档 例子: vocabList = ['I', 'love', 'python', 'and', 'machine', 'learning'] inputset = ['python', 'machine', 'learning'] returnVec = [0, 0, 1, 0, 1, 1] 长度与词汇表一样长,出现了的位置为1,未出现为0,如果词汇表中无该单词则print ''' returnVec = [0] * len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 else: print("the word: %s is not in my vocabulary!" % word) return returnVec
[ "1910479313@qq.com" ]
1910479313@qq.com
2e4a196d264e7391673866250f0d8235569d7a90
6268777dc14e1870f122c3f6d9eb159ae64e71d2
/plugin/py/lib/evernote/edam/limits/constants.py
5894682f0f534b679148c3dee10634237d1500e2
[ "Apache-2.0", "BSD-2-Clause" ]
permissive
vim-scripts/evervim
9b36bb2caf4344850e12a85eac187357fe0d07e8
fd333a0757c27b3fd737786cdf9bb4f6cffadb0c
refs/heads/master
2020-05-19T09:22:53.141709
2012-06-03T00:00:00
2012-06-09T22:20:03
4,316,143
2
0
null
null
null
null
UTF-8
Python
false
false
4,554
py
# # Autogenerated by Thrift # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # from thrift.Thrift import * from ttypes import * EDAM_ATTRIBUTE_LEN_MIN = 1 EDAM_ATTRIBUTE_LEN_MAX = 4096 EDAM_ATTRIBUTE_REGEX = "^[^\\p{Cc}\\p{Zl}\\p{Zp}]{1,4096}$" EDAM_ATTRIBUTE_LIST_MAX = 100 EDAM_GUID_LEN_MIN = 36 EDAM_GUID_LEN_MAX = 36 EDAM_GUID_REGEX = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" EDAM_EMAIL_LEN_MIN = 6 EDAM_EMAIL_LEN_MAX = 255 EDAM_EMAIL_LOCAL_REGEX = "^[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*$" EDAM_EMAIL_DOMAIN_REGEX = "^[A-Za-z0-9-]+(\\.[A-Za-z0-9-]+)*\\.([A-Za-z]{2,})$" EDAM_EMAIL_REGEX = "^[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@[A-Za-z0-9-]+(\\.[A-Za-z0-9-]+)*\\.([A-Za-z]{2,})$" EDAM_TIMEZONE_LEN_MIN = 1 EDAM_TIMEZONE_LEN_MAX = 32 EDAM_TIMEZONE_REGEX = "^([A-Za-z_-]+(/[A-Za-z_-]+)*)|(GMT(-|\\+)[0-9]{1,2}(:[0-9]{2})?)$" EDAM_MIME_LEN_MIN = 3 EDAM_MIME_LEN_MAX = 255 EDAM_MIME_REGEX = "^[A-Za-z]+/[A-Za-z0-9._+-]+$" EDAM_MIME_TYPE_GIF = "image/gif" EDAM_MIME_TYPE_JPEG = "image/jpeg" EDAM_MIME_TYPE_PNG = "image/png" EDAM_MIME_TYPE_WAV = "audio/wav" EDAM_MIME_TYPE_MP3 = "audio/mpeg" EDAM_MIME_TYPE_AMR = "audio/amr" EDAM_MIME_TYPE_INK = "application/vnd.evernote.ink" EDAM_MIME_TYPE_PDF = "application/pdf" EDAM_MIME_TYPE_DEFAULT = "application/octet-stream" EDAM_MIME_TYPES = set([ "image/gif", "image/jpeg", "image/png", "audio/wav", "audio/mpeg", "audio/amr", "application/vnd.evernote.ink", "application/pdf", ]) EDAM_COMMERCE_SERVICE_GOOGLE = "Google" EDAM_COMMERCE_SERVICE_PAYPAL = "Paypal" EDAM_COMMERCE_SERVICE_GIFT = "Gift" EDAM_COMMERCE_SERVICE_TRIALPAY = "TrialPay" EDAM_COMMERCE_SERVICE_TRIAL = "Trial" EDAM_COMMERCE_SERVICE_GROUP = "Group" EDAM_COMMERCE_SERVICE_CYBERSOURCE = "CYBERSRC" EDAM_COMMERCE_DEFAULT_CURRENCY_COUNTRY_CODE = "USD" EDAM_SEARCH_QUERY_LEN_MIN = 0 EDAM_SEARCH_QUERY_LEN_MAX = 1024 EDAM_SEARCH_QUERY_REGEX = "^[^\\p{Cc}\\p{Zl}\\p{Zp}]{0,1024}$" EDAM_HASH_LEN = 16 EDAM_USER_USERNAME_LEN_MIN = 1 EDAM_USER_USERNAME_LEN_MAX = 64 EDAM_USER_USERNAME_REGEX = "^[a-z0-9]([a-z0-9_-]{0,62}[a-z0-9])?$" EDAM_USER_NAME_LEN_MIN = 1 EDAM_USER_NAME_LEN_MAX = 255 EDAM_USER_NAME_REGEX = "^[^\\p{Cc}\\p{Zl}\\p{Zp}]{1,255}$" EDAM_TAG_NAME_LEN_MIN = 1 EDAM_TAG_NAME_LEN_MAX = 100 EDAM_TAG_NAME_REGEX = "^[^,\\p{Cc}\\p{Z}]([^,\\p{Cc}\\p{Zl}\\p{Zp}]{0,98}[^,\\p{Cc}\\p{Z}])?$" EDAM_NOTE_TITLE_LEN_MIN = 1 EDAM_NOTE_TITLE_LEN_MAX = 255 EDAM_NOTE_TITLE_REGEX = "^[^\\p{Cc}\\p{Z}]([^\\p{Cc}\\p{Zl}\\p{Zp}]{0,253}[^\\p{Cc}\\p{Z}])?$" EDAM_NOTE_CONTENT_LEN_MIN = 0 EDAM_NOTE_CONTENT_LEN_MAX = 5242880 EDAM_NOTEBOOK_NAME_LEN_MIN = 1 EDAM_NOTEBOOK_NAME_LEN_MAX = 100 EDAM_NOTEBOOK_NAME_REGEX = "^[^\\p{Cc}\\p{Z}]([^\\p{Cc}\\p{Zl}\\p{Zp}]{0,98}[^\\p{Cc}\\p{Z}])?$" EDAM_NOTEBOOK_STACK_LEN_MIN = 1 EDAM_NOTEBOOK_STACK_LEN_MAX = 100 EDAM_NOTEBOOK_STACK_REGEX = "^[^\\p{Cc}\\p{Z}]([^\\p{Cc}\\p{Zl}\\p{Zp}]{0,98}[^\\p{Cc}\\p{Z}])?$" EDAM_PUBLISHING_URI_LEN_MIN = 1 EDAM_PUBLISHING_URI_LEN_MAX = 255 EDAM_PUBLISHING_URI_REGEX = "^[a-zA-Z0-9.~_+-]{1,255}$" EDAM_PUBLISHING_URI_PROHIBITED = set([ "..", ]) EDAM_PUBLISHING_DESCRIPTION_LEN_MIN = 1 EDAM_PUBLISHING_DESCRIPTION_LEN_MAX = 200 EDAM_PUBLISHING_DESCRIPTION_REGEX = "^[^\\p{Cc}\\p{Z}]([^\\p{Cc}\\p{Zl}\\p{Zp}]{0,198}[^\\p{Cc}\\p{Z}])?$" EDAM_SAVED_SEARCH_NAME_LEN_MIN = 1 EDAM_SAVED_SEARCH_NAME_LEN_MAX = 100 EDAM_SAVED_SEARCH_NAME_REGEX = "^[^\\p{Cc}\\p{Z}]([^\\p{Cc}\\p{Zl}\\p{Zp}]{0,98}[^\\p{Cc}\\p{Z}])?$" EDAM_USER_PASSWORD_LEN_MIN = 6 EDAM_USER_PASSWORD_LEN_MAX = 64 EDAM_USER_PASSWORD_REGEX = "^[A-Za-z0-9!#$%&'()*+,./:;<=>?@^_`{|}~\\[\\]\\\\-]{6,64}$" EDAM_NOTE_TAGS_MAX = 100 EDAM_NOTE_RESOURCES_MAX = 1000 EDAM_USER_TAGS_MAX = 100000 EDAM_USER_SAVED_SEARCHES_MAX = 100 EDAM_USER_NOTES_MAX = 100000 EDAM_USER_NOTEBOOKS_MAX = 250 EDAM_USER_RECENT_MAILED_ADDRESSES_MAX = 10 EDAM_USER_MAIL_LIMIT_DAILY_FREE = 50 EDAM_USER_MAIL_LIMIT_DAILY_PREMIUM = 200 EDAM_USER_UPLOAD_LIMIT_FREE = 62914560 EDAM_USER_UPLOAD_LIMIT_PREMIUM = 1073741824 EDAM_NOTE_SIZE_MAX_FREE = 26214400 EDAM_NOTE_SIZE_MAX_PREMIUM = 52428800 EDAM_RESOURCE_SIZE_MAX_FREE = 26214400 EDAM_RESOURCE_SIZE_MAX_PREMIUM = 52428800 EDAM_USER_LINKED_NOTEBOOK_MAX = 100 EDAM_NOTEBOOK_SHARED_NOTEBOOK_MAX = 250
[ "scraper@vim-scripts.org" ]
scraper@vim-scripts.org