blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
618421f2c2ca41e846e9a550696550ebdaf5f9b0
|
05d04370e2547aba75c5a6ddfab52ee66a4421ce
|
/ccpweb/ccpweb/views.py
|
35e5668861f40f36e8c3866e01adb1ef928d98ee
|
[
"BSD-3-Clause"
] |
permissive
|
story645/ccp-viz-toolkit
|
4488404518dee1b4dc0cb33ac51c19aa15ee3156
|
cd75f674d7b17a2bba8ed4bffb6853fae94885c5
|
refs/heads/master
| 2020-05-19T09:20:37.152986
| 2015-03-26T16:32:14
| 2015-03-26T16:32:14
| 32,113,131
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,425
|
py
|
#!/usr/bin/env python
#
# views.py
#
# Hannah Aizenman, 2011-08
#
# http://www.opensource.org/licenses/bsd-license.php
"""Views (web pages) served by ccpweb.
"""
__docformat__ = "restructuredtext"
# http://docs.python.org/library/os.html
import os
# https://docs.pylonsproject.org/projects/pyramid/1.0/api/response.html
from pyramid.response import Response
# http://docs.pylonsproject.org/projects/pyramid/1.0/api/view.html
from pyramid.view import view_config
# http://docs.pylonsproject.org/projects/pyramid/1.0/api/exceptions.html
from pyramid.exceptions import NotFound
from ccplib.datahandlers.ccpdata import CCPData
from ccpweb.resources import DataList, AlgList, Static
from ccpweb import tasks
SITE_LIB_ROOT = os.path.abspath(os.path.dirname(__file__))
# ccpviz.html
@view_config(context=Static, request_method='GET')
def page_view(context, request):
key = request.traversed[0]
pagepath = os.path.join(SITE_LIB_ROOT, key)
try:
page = open(pagepath).read()
return Response(content_type='text/html', body=page)
except IOError, e:
return NotFound()
# list of available datasets
@view_config(context=DataList, request_method='GET', renderer='json')
def get_datalist(context, request):
return tasks.get_configs(" ")
# list of algorithms
@view_config(context=AlgList, request_method='GET', renderer='json')
def get_alglist(context, request):
return tasks.alglist()
# random metadata about the dataset
@view_config(context=CCPData, request_method='GET')
def get_objattrs(context, request):
return Response(tasks.objattrs(context, request))
#bundles time and grid in one request/json object
@view_config(context=CCPData, name='menu', request_method='GET', renderer='json')
def get_dsmenu(context, request):
dsmenu = dict(time=tasks.get_time(context))
dsmenu.update(tasks.get_grid(context))
return dsmenu
# Returns dictionary of valid ranges
@view_config(context=CCPData, name='validrange', request_method='GET', renderer='json')
def get_valid_range(context, request):
return tasks.valid_range(context)
# returns time as a long space seperated string so that
# it's used to populate autocomplete
@view_config(context=CCPData, name='time', request_method='GET')
def get_time(context, request):
time = tasks.get_time(context)
return Response("\n".join(time))
# bundles the lat and lon arrays/list into a json object
@view_config(context=CCPData, name='grid', request_method='GET', renderer='json')
def get_grid(context, request):
latlon = tasks.get_grid(context)
return latlon
# returns the data/doesn't quite work as expected
@view_config(context=CCPData, name='data', request_method='GET')
def get_data(context, request):
image = tasks.select_data(context, request.subpath)
return Response(image)
# returns the graph as a response
@view_config(context=CCPData, name='graph', request_method='GET')
def make_graph(context, request):
image = tasks.select_data(context, request.subpath)
graph_obj = tasks.set_graph(context, image.ndim)
return tasks.drawgraph(graph_obj, image, request.subpath)
# 404 page-should be replaced with something fun
@view_config(context='pyramid.exceptions.NotFound')
def notfound_view(self):
return Response('404: Page Not Found.')
# used to test stuff/part of the paster default
def my_view(request):
return {'project':'ccpweb'}
|
[
"story645@gmail.com"
] |
story645@gmail.com
|
aa5a47cef1ff7e66f6dc3832cbb11801660d4db4
|
3e3d49eb90e7f2d69007c047fce5d399c1b3f290
|
/timing_inv_scipy_false_double.py
|
9ee1999e0ba4e72a20338343114c78934643c0ff
|
[] |
no_license
|
paulavillalobosb/MCOC2021-P0
|
4a246ee0a950966cd8813e3618f1ea7d9f37f1fc
|
3f95a52fe06f4cef2f8bd869dc53547081a99c31
|
refs/heads/main
| 2023-08-14T06:47:26.942540
| 2021-09-04T03:16:03
| 2021-09-04T03:16:03
| 393,170,255
| 0
| 0
| null | 2021-08-05T21:05:00
| 2021-08-05T21:04:59
| null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
from time import perf_counter
from numpy import zeros
from numpy import half, single, double, longdouble
from scipy.linalg import inv
from laplaciana import laplaciana
Ns = [1, 5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 2000, 3000, 4000, 5000]
dts_inv = []
mems = []
corridas = 10
for i in range (corridas):
txt = open(f"Rendimiento{i+1} scipy_false_double.txt", "w")
for N in Ns:
t1 = perf_counter()
A = laplaciana(N, dtype = double)
t2 = perf_counter()
Am1 = inv(A, overwrite_a = False)
t3 = perf_counter()
dt_ensamblaje = t2 - t1 # Tiempo que se demora en armar la matriz
dt_inversion = t3 - t2 # Tiempo que se demora en invertir la matriz
mem = A.nbytes + Am1.nbytes
print (f"Para N = {N} :")
print (f"Tiempo ensamblaje: {dt_ensamblaje} s")
print (f"Tiempo inversion: {dt_inversion} s")
print (f"Uso memoria: {mem} bytes")
print ("")
dts_inv.append(dt_inversion)
mems.append(mem)
txt.write(f"{N} {dt_inversion} {mem}\n")
txt.close()
|
[
"noreply@github.com"
] |
paulavillalobosb.noreply@github.com
|
9fe1ed6f574a42544e5e006c9a68e4d65d323276
|
0861a97ef4d89614c5a581eefc00e355d4c1e454
|
/data_transformation.py
|
04d2ff162fdcdae695fe691a4d851bdb9a6a478f
|
[] |
no_license
|
befitz/ISOM837_AnalyticsStoryTelling
|
c3c917764e4d7f89330c374e8dbe062a86e59f37
|
b13708c46b8c068f1da66fe64d56d0b9ee54fe5f
|
refs/heads/main
| 2023-09-03T06:06:51.653802
| 2021-11-17T02:56:04
| 2021-11-17T02:56:04
| 425,417,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
from fred_data_extract import build_seperations_table
import pandas as pd
import numpy as np
def create_transformed_table(table_raw):
"""
Function to transform the variables which improve skewness
args: table_raw (pd.DataFrame)
returns: table_transformed (pd.DataFrame)
"""
table_transformed = pd.DataFrame()
table_transformed['women_ratio'] = table_raw['women_ratio'] #normalizing does not reduce the skewness
table_transformed['hourly_earn'] = table_raw['hourly_earn'] #normalizing does not reduce the skewness
table_transformed['lfp_25_54'] = table_raw['lfp_25_54'] #normalizing does not reduce the skewness
table_transformed['lfp_16_19_inversesqrt'] = 1/np.sqrt(table_raw['lfp_16_19']) #normalizing reduces the skewness
table_transformed['lfp_0ver55_inversesqrt'] = 1/np.sqrt(table_raw['lfp_0ver55']) #normalizing reduces the skewness
table_transformed['cap_utilization'] = table_raw['cap_utilization'] #normalizing does not reduce the skewness
table_transformed['hours_worked'] = table_raw['hours_worked'] #normalizing does not reduce the skewness
table_transformed['cpi'] = table_raw['cpi'] #normalizing does not reduce the skewness
table_transformed['self_employed_inversesqrt'] = 1/np.sqrt(table_raw['self_employed']) #normalizing reduces the skewness
table_transformed['policy_uncertainty_inversesqrt'] = 1/np.sqrt(table_raw['policy_uncertainty']) #normalizing reduces the skewness
table_transformed['job_openings_inversesqrt'] = 1/np.sqrt(table_raw['job_openings']) #normalizing reduces the skewness
table_transformed['quits'] = table_raw['quits'] #normalizing does not reduce the skewness
return table_transformed
def standardize_data(series):
"""
Function to standardize a series of the dataframe.
formula is Xchanged = (x-mean)/stdev
args: series (pd.Series) indexed by 'Date'
returns: stand_series (pd.Series)
"""
xmin = np.min(series)
xmax = np.max(series)
stand_series = []
for i in series:
stand_series.append((i-xmin)/(xmax-xmin))
standardized_series = pd.Series(stand_series)
return standardized_series
def standardize_table(df):
"""
Function to standardize a dataframe
args: df (pd.DataFrame)
returns: table_standardized (pd.DataFrame)
"""
table_standardized= pd.DataFrame()
for column in df.columns:
standard = standardize_data(df[f'{column}'])
table_standardized[f'{column}'] = standard
table_standardized.reset_index()
return table_standardized
|
[
"brynnefitzgerald@gmail.com"
] |
brynnefitzgerald@gmail.com
|
4b99c672e34294a5f110b6531518b6d7056de15a
|
1be2cbc9fd62cf77cc05a64807acf7d857b84eee
|
/blackopt/config.py
|
a2114330cc418e849f46670ef206413bcc1d54b6
|
[] |
no_license
|
ikamensh/blackopt
|
4fdce2c0147b1a5a85024c9b59925d3d1a35b13f
|
a6ab24ce1be21a5ca9e26d0bb1f59bb50fd007a2
|
refs/heads/master
| 2023-01-23T12:55:42.087216
| 2020-12-05T19:18:30
| 2020-12-05T19:18:30
| 178,232,685
| 0
| 0
| null | 2020-10-18T20:57:29
| 2019-03-28T15:33:53
|
Python
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
import os
default_workspace = "_blackopt_workspace"
_rootdir = default_workspace
def set_rootdir(path):
path = os.path.expanduser(path)
global _rootdir
_rootdir = path
def prepend_rootdir(prefix):
prefix = os.path.expanduser(prefix)
path = os.path.join(prefix, default_workspace)
global _rootdir
_rootdir = path
def get_rootdir():
return _rootdir
|
[
"ikkamens@amazon.com"
] |
ikkamens@amazon.com
|
fd955ae2db91bc85be3d90b6dca3e3008638e687
|
916aca4052eeb9f04bca9082c95b8121d9854b4c
|
/codility/Iterations/binarygap/BinaryGap.py
|
bc3ad55f522d643d056cc7c9ccb10f9eb4778a73
|
[] |
no_license
|
sirbega/ikre
|
db15fc8e06fe66e1bc0d8d3cfb3b47a29d960666
|
3372928b1ac1fcb7e6643ccd709b6d306e1383de
|
refs/heads/master
| 2020-05-31T12:44:28.931216
| 2019-11-28T18:32:43
| 2019-11-28T18:32:43
| 94,031,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
#!/usr/bin/env python3
"""Return the binary representation of the input integer."""
def solution(N):
"""Return the longest binary gap of the input integer."""
mod = ""
bi = ""
num = N
while num > 0:
tmp = num % 2
mod = str(tmp)
bi = mod + bi
num = num // 2
broj = bi
duz = len(broj)
tmp = 0
gap = 0
for i in range(0, duz):
for j in range(i, duz):
if broj[j] == "0":
tmp = tmp + 1
if broj[j] == "1":
break
if j < duz:
if broj[j] == "1":
if tmp > gap:
gap = tmp
tmp = 0
return gap
N = int(input("Give me a positive integer! "))
print(solution(N))
|
[
"sirbega@gmail.com"
] |
sirbega@gmail.com
|
ba0d04740f9c5e84c0b1e7a8614b8ae861af8c7b
|
8b36dacd454c99b6d742bff0aceca7983e9e2f5d
|
/myportfolio/urls.py
|
7ff088260a8fa45764610d9672a09d429dd7cd59
|
[] |
no_license
|
netteNz/nettenz-portfolio
|
ec3afa3b170665961c58cfc0d6af2485c22a68b8
|
ea95d7a1f99f54ceceb4d470de854c06483eaecd
|
refs/heads/master
| 2021-09-23T09:30:24.491470
| 2020-05-21T03:01:10
| 2020-05-21T03:01:10
| 251,180,385
| 1
| 0
| null | 2021-09-22T18:49:01
| 2020-03-30T02:21:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 251
|
py
|
from django.contrib import admin
from django.urls import path, include
from blog.views import BlogView, ContentView
urlpatterns = [
path('admin/', admin.site.urls),
path('', BlogView.as_view()),
path('content/', ContentView.as_view())
]
|
[
"lugo.emanuel@gmail.com"
] |
lugo.emanuel@gmail.com
|
d24e4f1aa4d8188ac9c432b29ddf64d3c59c0c73
|
ee7a2e1b18c870cb13840fc0d47f217050bac3fe
|
/bnbvolunteer/volunteers/migrations/0016_auto_20150116_1722.py
|
8a5ab1823a968332dc038b17877768849e5edb2f
|
[
"MIT"
] |
permissive
|
lfzhangMIT/bnb
|
92344d50599fe915915d63ee3f1c11677f0a4416
|
15ea0170f6c495e9f455b2f380b4eb40d466968e
|
refs/heads/master
| 2021-05-28T09:20:28.723664
| 2015-01-29T19:08:25
| 2015-01-29T19:08:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('volunteers', '0015_auto_20150115_1631'),
]
operations = [
migrations.AddField(
model_name='activity',
name='staff',
field=models.ForeignKey(related_name='staff', to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activity',
name='user',
field=models.ForeignKey(related_name='user', default=b'', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
[
"lfzhang@mit.edu"
] |
lfzhang@mit.edu
|
c8a0aef9d6c8d9789721dd16c42b8132ae7309e8
|
e9daa35941efb7e1cab3cfee788110bd66d088cc
|
/dataset.py
|
2b6e6bddf516cfc1d347ce1f219c30103158dffe
|
[] |
no_license
|
Souli-prgms/Bengali
|
5763d48861eeb0606105f6345e56b08cef60612d
|
33de884a7b32de202ebd2627c29a8d50dc9ea54a
|
refs/heads/master
| 2020-12-26T18:33:00.407173
| 2020-02-01T11:19:44
| 2020-02-01T11:19:44
| 237,598,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
import os
import torch
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import pickle
import constants
class BengaliDataset(Dataset):
def __init__(self, label_file, index):
self.label_file = label_file
self.labels = self.retrieve_labels()
self.images = None
with open(constants.PICKLE_FILES[index], 'rb') as file:
self.images = pickle.load(file)
self.num_samples = self.images.shape[0]
self.index = index
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
img = (self.images[idx] - constants.MEAN) / constants.STD
labels = self.get_label(idx)
return torch.from_numpy(img).unsqueeze(0), torch.tensor(labels)
def retrieve_labels(self):
labels = pd.read_csv(self.label_file)
for label in constants.CATEGORIES:
labels[label] = labels[label].astype('uint8')
return labels
def get_label(self, index):
img_name = "Train_{}".format(index + self.num_samples * self.index)
labels = []
for l in constants.CATEGORIES:
label = self.labels.loc[self.labels['image_id'] == img_name][l].iloc[0]
labels.append(label)
return labels
def get_datasets(index):
ds = BengaliDataset(os.path.join(constants.DATA_DIR, constants.TRAIN_CSV), index)
nb_train = int((1.0 - constants.VALID_RATIO) * len(ds))
nb_valid = len(ds) - nb_train
return torch.utils.data.dataset.random_split(ds, [nb_train, nb_valid])
def get_dataloaders(index):
train_ds, valid_ds = get_datasets(index)
return DataLoader(dataset=train_ds, batch_size=constants.BS, shuffle=True), DataLoader(dataset=valid_ds, batch_size=constants.BS, shuffle=False)
|
[
"soulignach@gmail.com"
] |
soulignach@gmail.com
|
3cc86621a38c55c60af190e6064d74da255a9e2b
|
14d8adc86adc14c1d64a5550b1bbd5663e984545
|
/combination_sum_ii.py
|
97ed76da8e3052e379884a175fa1c814d6591641
|
[] |
no_license
|
milllu/leetcode
|
e1b68ef7774cc0c1b49325ec1b87280d27570d94
|
458b3e72cd82a203b10bdca747c4c3ba85708f75
|
refs/heads/master
| 2020-03-30T23:41:46.180308
| 2018-10-11T01:08:31
| 2018-10-11T01:08:31
| 151,709,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
"""
给定一个数组 candidates 和一个目标数 target ,找出 candidates 中所有可以使数字和为 target 的组合。
candidates 中的每个数字在每个组合中只能使用一次。
说明:
所有数字(包括目标数)都是正整数。
解集不能包含重复的组合。
示例 1:
输入: candidates = [10,1,2,7,6,1,5], target = 8,
所求解集为:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
示例 2:
输入: candidates = [2,5,2,1,2], target = 5,
所求解集为:
[
[1,2,2],
[5]
]
"""
class Solution(object):
def combinationSum2(self, nums, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
def dfs(dic, target, lst, suml):
if suml == target:
lst.sort()
if lst not in result:
result.append(lst)
return
if suml > target:
return
for key in dic:
if dic[key] > 0:
dic[key] -= 1
dfs(dic, target, lst+[key], suml+key)
dic[key] += 1
dic = {}
for num in nums:
dic[num] = dic.get(num, 0) + 1
result = []
dfs(dic, target, [], 0)
return result
|
[
"3351440959@qq.com"
] |
3351440959@qq.com
|
c98d6c778d6329030608bdebead5f8f1899b0e82
|
3b0e5028888bbed694dd33e2fed89f594d5f11e4
|
/vscode/extensions/donjayamanne.python-0.5.8/pythonFiles/completion.py
|
6b8484b0cf0f6c39df45f0ea3f90114cc110afb0
|
[
"MIT"
] |
permissive
|
gustavosoares/dotfiles
|
670cb6c89117fd69f4eb2fca6859029dda7da1c2
|
c2eb5332711819a52fa22878e7f5cd2fb79e3633
|
refs/heads/master
| 2022-05-03T06:04:01.726283
| 2019-06-14T18:44:04
| 2019-06-14T18:44:04
| 67,193,472
| 1
| 0
| null | 2022-04-22T20:44:35
| 2016-09-02T05:41:06
|
Python
|
UTF-8
|
Python
| false
| false
| 20,922
|
py
|
import os
import io
import re
import sys
import json
import traceback
WORD_RE = re.compile(r'\w')
class JediCompletion(object):
basic_types = {
'module': 'import',
'instance': 'variable',
'statement': 'value',
'param': 'variable',
}
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding='utf-8')
def _get_definition_type(self, definition):
is_built_in = definition.in_builtin_module
# if definition.type not in ['import', 'keyword'] and is_built_in():
# return 'builtin'
if definition.type in ['statement'] and definition.name.isupper():
return 'constant'
return self.basic_types.get(definition.type, definition.type)
def _additional_info(self, completion):
"""Provide additional information about the completion object."""
if not hasattr(completion, '_definition') or completion._definition is None:
return ''
if completion.type == 'statement':
nodes_to_display = ['InstanceElement', 'String', 'Node', 'Lambda',
'Number']
return ''.join(c.get_code() for c in
completion._definition.children if type(c).__name__
in nodes_to_display).replace('\n', '')
return ''
@classmethod
def _get_top_level_module(cls, path):
"""Recursively walk through directories looking for top level module.
Jedi will use current filepath to look for another modules at same
path, but it will not be able to see modules **above**, so our goal
is to find the higher python module available from filepath.
"""
_path, _ = os.path.split(path)
if os.path.isfile(os.path.join(_path, '__init__.py')):
return cls._get_top_level_module(_path)
return path
def _generate_signature(self, completion):
"""Generate signature with function arguments.
"""
if completion.type in ['module'] or not hasattr(completion, 'params'):
return ''
return '%s(%s)' % (
completion.name,
', '.join(p.description for p in completion.params if p))
def _get_call_signatures(self, script):
"""Extract call signatures from jedi.api.Script object in failsafe way.
Returns:
Tuple with original signature object, name and value.
"""
_signatures = []
try:
call_signatures = script.call_signatures()
except KeyError:
call_signatures = []
for signature in call_signatures:
for pos, param in enumerate(signature.params):
if not param.name:
continue
if param.name == 'self' and pos == 0:
continue
if WORD_RE.match(param.name) is None:
continue
try:
name, value = param.description.split('=')
except ValueError:
name = param.description
value = None
if name.startswith('*'):
continue
_signatures.append((signature, name, value))
return _signatures
def _get_call_signatures_with_args(self, script):
"""Extract call signatures from jedi.api.Script object in failsafe way.
Returns:
Array with dictionary
"""
_signatures = []
try:
call_signatures = script.call_signatures()
except KeyError:
call_signatures = []
for signature in call_signatures:
sig = {"name": "", "description": "", "docstring": "",
"paramindex": 0, "params": [], "bracketstart": []}
sig["description"] = signature.description
sig["docstring"] = signature.docstring()
sig["raw_docstring"] = signature.docstring(raw=True)
sig["name"] = signature.name
sig["paramindex"] = signature.index
sig["bracketstart"].append(signature.index)
_signatures.append(sig)
for pos, param in enumerate(signature.params):
if not param.name:
continue
if param.name == 'self' and pos == 0:
continue
if WORD_RE.match(param.name) is None:
continue
try:
name, value = param.description.split('=')
except ValueError:
name = param.description
value = None
# if name.startswith('*'):
# continue
#_signatures.append((signature, name, value))
sig["params"].append({"name": name, "value": value, "docstring": param.docstring(
), "description": param.description})
return _signatures
def _serialize_completions(self, script, identifier=None, prefix=''):
"""Serialize response to be read from VSCode.
Args:
script: Instance of jedi.api.Script object.
identifier: Unique completion identifier to pass back to VSCode.
prefix: String with prefix to filter function arguments.
Used only when fuzzy matcher turned off.
Returns:
Serialized string to send to VSCode.
"""
_completions = []
for signature, name, value in self._get_call_signatures(script):
if not self.fuzzy_matcher and not name.lower().startswith(
prefix.lower()):
continue
_completion = {
'type': 'property',
'raw_type': '',
'rightLabel': self._additional_info(signature)
}
# we pass 'text' here only for fuzzy matcher
if value:
_completion['snippet'] = '%s=${1:%s}$0' % (name, value)
_completion['text'] = '%s=%s' % (name, value)
else:
_completion['snippet'] = '%s=$1$0' % name
_completion['text'] = name
_completion['displayText'] = name
if self.show_doc_strings:
_completion['description'] = signature.docstring()
_completion['raw_docstring'] = signature.docstring(raw=True)
else:
_completion['description'] = self._generate_signature(
signature)
_completions.append(_completion)
try:
completions = script.completions()
except KeyError:
completions = []
for completion in completions:
if self.show_doc_strings:
description = completion.docstring()
else:
description = self._generate_signature(completion)
_completion = {
'text': completion.name,
'type': self._get_definition_type(completion),
'raw_type': completion.type,
'description': description,
'raw_docstring': completion.docstring(raw=True),
'rightLabel': self._additional_info(completion)
}
if any([c['text'].split('=')[0] == _completion['text']
for c in _completions]):
# ignore function arguments we already have
continue
_completions.append(_completion)
return json.dumps({'id': identifier, 'results': _completions})
def _serialize_methods(self, script, identifier=None, prefix=''):
_methods = []
try:
completions = script.completions()
except KeyError:
return []
for completion in completions:
if completion.name == '__autocomplete_python':
instance = completion.parent().name
break
else:
instance = 'self.__class__'
for completion in completions:
params = []
if hasattr(completion, 'params'):
params = [p.description for p in completion.params
if ARGUMENT_RE.match(p.description)]
if completion.parent().type == 'class':
_methods.append({
'parent': completion.parent().name,
'instance': instance,
'name': completion.name,
'params': params,
'moduleName': completion.module_name,
'fileName': completion.module_path,
'line': completion.line,
'column': completion.column,
})
return json.dumps({'id': identifier, 'results': _methods})
def _serialize_arguments(self, script, identifier=None):
"""Serialize response to be read from VSCode.
Args:
script: Instance of jedi.api.Script object.
identifier: Unique completion identifier to pass back to VSCode.
Returns:
Serialized string to send to VSCode.
"""
return json.dumps({"id": identifier, "results": self._get_call_signatures_with_args(script)})
def _top_definition(self, definition):
for d in definition.goto_assignments():
if d == definition:
continue
if d.type == 'import':
return self._top_definition(d)
else:
return d
return definition
def _extract_range(self, definition):
"""Provides the definition range of a given definition
For regular symbols it returns the start and end location of the
characters making up the symbol.
For scoped containers it will return the entire definition of the
scope.
The scope that jedi provides ends with the first character of the next
scope so it's not ideal. For vscode we need the scope to end with the
last character of actual code. That's why we extract the lines that
make up our scope and trim the trailing whitespace.
"""
from jedi import common
from jedi.parser.utils import load_parser
# get the scope range
try:
if definition.type in ['class', 'function'] and hasattr(definition, '_definition'):
scope = definition._definition
start_line = scope.start_pos[0] - 1
start_column = scope.start_pos[1]
end_line = scope.end_pos[0] - 1
end_column = scope.end_pos[1]
# get the lines
path = definition._definition.get_parent_until().path
parser = load_parser(path)
lines = common.splitlines(parser.source)
lines[end_line] = lines[end_line][:end_column]
# trim the lines
lines = lines[start_line:end_line + 1]
lines = '\n'.join(lines).rstrip().split('\n')
end_line = start_line + len(lines) - 1
end_column = len(lines[-1]) - 1
else:
symbol = definition._name
start_line = symbol.start_pos[0] - 1
start_column = symbol.start_pos[1]
end_line = symbol.end_pos[0] - 1
end_column = symbol.end_pos[1]
return {
'start_line': start_line,
'start_column': start_column,
'end_line': end_line,
'end_column': end_column
}
except Exception as e:
return {
'start_line': definition.line - 1,
'start_column': definition.column,
'end_line': definition.line - 1,
'end_column': definition.column
}
def _get_definitions(self, definitions, identifier=None):
"""Serialize response to be read from VSCode.
Args:
definitions: List of jedi.api.classes.Definition objects.
identifier: Unique completion identifier to pass back to VSCode.
Returns:
Serialized string to send to VSCode.
"""
_definitions = []
for definition in definitions:
try:
if definition.module_path:
if definition.type == 'import':
definition = self._top_definition(definition)
if not definition.module_path:
continue
try:
parent = definition.parent()
container = parent.name if parent.type != 'module' else ''
except Exception:
container = ''
_definition = {
'text': definition.name,
'type': self._get_definition_type(definition),
'raw_type': definition.type,
'fileName': definition.module_path,
'container': container,
'range': self._extract_range(definition)
}
_definitions.append(_definition)
except Exception as e:
pass
return _definitions
def _serialize_definitions(self, definitions, identifier=None):
"""Serialize response to be read from VSCode.
Args:
definitions: List of jedi.api.classes.Definition objects.
identifier: Unique completion identifier to pass back to VSCode.
Returns:
Serialized string to send to VSCode.
"""
_definitions = []
for definition in definitions:
try:
if definition.module_path:
if definition.type == 'import':
definition = self._top_definition(definition)
if not definition.module_path:
continue
try:
parent = definition.parent()
container = parent.name if parent.type != 'module' else ''
except Exception:
container = ''
_definition = {
'text': definition.name,
'type': self._get_definition_type(definition),
'raw_type': definition.type,
'fileName': definition.module_path,
'container': container,
'range': self._extract_range(definition)
}
_definitions.append(_definition)
except Exception as e:
pass
return json.dumps({'id': identifier, 'results': _definitions})
def _serialize_tooltip(self, definitions, identifier=None):
_definitions = []
for definition in definitions:
signature = definition.name
description = None
if definition.type in ['class', 'function']:
signature = self._generate_signature(definition)
description = definition.docstring(raw=True).strip()
if not description and not hasattr(definition, 'get_line_code'):
# jedi returns an empty string for compiled objects
description = definition.docstring().strip()
if definition.type == 'module':
signature = definition.full_name
description = definition.docstring(raw=True).strip()
if not description and not definition.get_line_code():
# jedi returns an empty string for compiled objects
description = definition.docstring().strip()
_definition = {
'type': self._get_definition_type(definition),
'description': description,
'signature': signature
}
_definitions.append(_definition)
return json.dumps({'id': identifier, 'results': _definitions})
def _serialize_usages(self, usages, identifier=None):
_usages = []
for usage in usages:
_usages.append({
'name': usage.name,
'moduleName': usage.module_name,
'fileName': usage.module_path,
'line': usage.line,
'column': usage.column,
})
return json.dumps({'id': identifier, 'results': _usages})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _set_request_config(self, config):
"""Sets config values for current request.
This includes sys.path modifications which is getting restored to
default value on each request so each project should be isolated
from each other.
Args:
config: Dictionary with config values.
"""
sys.path = self.default_sys_path
self.use_snippets = config.get('useSnippets')
self.show_doc_strings = config.get('showDescriptions', True)
self.fuzzy_matcher = config.get('fuzzyMatcher', False)
jedi.settings.case_insensitive_completion = config.get(
'caseInsensitiveCompletion', True)
for path in config.get('extraPaths', []):
if path and path not in sys.path:
sys.path.insert(0, path)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response.
"""
request = self._deserialize(request)
self._set_request_config(request.get('config', {}))
path = self._get_top_level_module(request.get('path', ''))
if path not in sys.path:
sys.path.insert(0, path)
lookup = request.get('lookup', 'completions')
if lookup == 'names':
return self._write_response(self._serialize_definitions(
jedi.api.names(
source=request.get('source', None),
path=request.get('path', ''),
all_scopes=True),
request['id']))
script = jedi.api.Script(
source=request.get('source', None), line=request['line'] + 1,
column=request['column'], path=request.get('path', ''))
if lookup == 'definitions':
defs = self._get_definitions(script.goto_definitions(), request['id'])
if len(defs) == 0:
defs = self._get_definitions(script.goto_assignments(), request['id'])
return self._write_response(json.dumps({'id': request['id'], 'results': defs}))
if lookup == 'tooltip':
return self._write_response(self._serialize_tooltip(
script.goto_definitions(), request['id']))
elif lookup == 'arguments':
return self._write_response(self._serialize_arguments(
script, request['id']))
elif lookup == 'usages':
return self._write_response(self._serialize_usages(
script.usages(), request['id']))
elif lookup == 'methods':
return self._write_response(
self._serialize_methods(script, request['id'],
request.get('prefix', '')))
else:
return self._write_response(
self._serialize_completions(script, request['id'],
request.get('prefix', '')))
def _write_response(self, response):
sys.stdout.write(response + '\n')
sys.stdout.flush()
def watch(self):
while True:
try:
self._process_request(self._input.readline())
except Exception:
sys.stderr.write(traceback.format_exc() + '\n')
sys.stderr.flush()
if __name__ == '__main__':
jediPreview = False
cachePrefix = 'v'
if len(sys.argv) > 0 and sys.argv[1] == 'preview':
jediPath = os.path.join(os.path.dirname(__file__), 'preview')
jediPreview = True
elif len(sys.argv) > 0 and sys.argv[1] == 'custom':
jediPath = sys.argv[2]
jediPreview = True
cachePrefix = 'custom_v'
else:
jediPath = os.path.join(os.path.dirname(__file__), 'release')
sys.path.insert(0, jediPath)
import jedi
if jediPreview:
jedi.settings.cache_directory = os.path.join(
jedi.settings.cache_directory, cachePrefix + jedi.__version__.replace('.', ''))
# remove jedi from path after we import it so it will not be completed
sys.path.pop(0)
JediCompletion().watch()
|
[
"gustavosoares@gmail.com"
] |
gustavosoares@gmail.com
|
327c77ed90386f136794d5cc8bff500841a33e77
|
b797527c6f73940ca689c54cd39ee340bd20bb94
|
/venv/Scripts/pip3.7-script.py
|
5fe98e8c50a2b3fbec83993a42c23db079181f91
|
[] |
no_license
|
xiaohanZhang1993/Alien
|
cda241b1fa20c402c473adc07509bbc5366b9f3f
|
56708730a5575f22c6c35869b8ada18a10245c49
|
refs/heads/master
| 2022-09-04T01:00:03.615416
| 2020-05-28T08:57:39
| 2020-05-28T08:57:39
| 267,540,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
#!D:\zxhDemo\PyDemo\Alien\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"15901192657@163.com"
] |
15901192657@163.com
|
765285890e983bd8e8cc25a9fff7a46290138ba2
|
2b84403b0dd9a0a5ffc13e868280af16964de03b
|
/assg.py
|
e83ceeb2553e6cb5efd7cc8375fd206f64b29035
|
[] |
no_license
|
akhil8031/Python-Basics
|
6de2f93d85b67095f32526c7ceb46c03588f390e
|
52a8eb64a5e6ddcc12a7ed36103495ef5e2f3894
|
refs/heads/master
| 2021-01-19T03:52:26.659429
| 2017-04-05T17:55:51
| 2017-04-05T17:55:51
| 87,339,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import urllib
from BeautifulSoup import *
url=raw_input('Enter url-')
c=raw_input('Count : ')
count=int(c)
p=raw_input('pos : ')
pos=int(p)
i=1
print url
while i<=count:
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
tags = soup('a')
url=tags[pos-1].get('href',None)
print url
i=i+1
|
[
"akhil_jain96@ymail.com"
] |
akhil_jain96@ymail.com
|
aace7c3ac8ae4dfbaf9a425ce523bb342eaafc68
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_2204+034/sdB_pg_2204+034_coadd.py
|
1d84ed11150bebd234261af2a75e6b07e15f708b
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[331.818708,3.705497], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_2204+034/sdB_pg_2204+034_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_2204+034/sdB_pg_2204+034_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
5b790c05efde005633fb1c19cb300766e68666b7
|
f5439c1a7a15c4eb709da6f32eb252679a1d44bd
|
/preprocessing/kmeans.py
|
6717fa4ba4ccf1d88609971f2dbf28a8e0289d70
|
[] |
no_license
|
Veranep/rideshare-replication
|
fa8b0c6535fb864d4597d01603e9391269361b87
|
2c6cd7d2aa26402445a6f306c0244018f36682b0
|
refs/heads/master
| 2022-05-21T10:01:53.750758
| 2022-04-28T10:53:35
| 2022-04-28T10:53:35
| 443,341,991
| 0
| 0
| null | 2022-04-28T10:53:37
| 2021-12-31T12:35:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
import pickle
from sklearn.cluster import KMeans
def write_kmeans():
zone_lat_long = open("../data/paper_replication/zone_latlong.csv").read().split("\n")
d = {}
coords = []
for i in zone_lat_long:
if i!='':
a,b,c = i.split(",")
d[a] = (float(b),float(c))
coords.append((float(b),float(c)))
regions = KMeans(n_clusters=10).fit(coords)
labels = regions.labels_
centers = regions.cluster_centers_
pickle.dump(labels,open("../data/paper_replication/new_labels.pkl","wb"))
write_kmeans()
|
[
"veraneplenbroek@icloud.com"
] |
veraneplenbroek@icloud.com
|
c0bdac944aed5cb00d3ab2541709a23fecbc22e3
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/application_gateway_rewrite_rule_set.py
|
bf4a4ba184e7533ad006b1884070f8b6fb8071ac
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayRewriteRuleSet(SubResource):
"""Rewrite rule set of an application gateway.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param rewrite_rules: Rewrite rules in the rewrite rule set.
:type rewrite_rules:
list[~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayRewriteRule]
:ivar provisioning_state: Provisioning state of the rewrite rule set
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param name: Name of the rewrite rule set that is unique within an
Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rewrite_rules': {'key': 'properties.rewriteRules', 'type': '[ApplicationGatewayRewriteRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayRewriteRuleSet, self).__init__(**kwargs)
self.rewrite_rules = kwargs.get('rewrite_rules', None)
self.provisioning_state = None
self.name = kwargs.get('name', None)
self.etag = None
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
4a023c9917b1ece7ff0f3b855279ef7f22908d65
|
3edfb949e51ca489d4d18baa60dde4c3cbeaa14a
|
/TextProcessing10.py
|
6e38e4ecb36822d4769a04d8befe78b7f0a679a5
|
[] |
no_license
|
BartKlak/Python_TextProcessing
|
f0d5083f3c9a75b9860a016c269db380919930b3
|
2293bdedfbe231e06aa0968297419445e3867e9e
|
refs/heads/main
| 2022-12-28T03:22:02.792036
| 2020-10-15T13:28:00
| 2020-10-15T13:28:00
| 304,333,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
#coding: utf-8
import sys, codecs
def palindrome_j(text):
text = text.decode('utf-8')
final_text = ""
c = False
if text.endswith(u'。') or text.endswith(u'?') or text.endswith(u'!'):
if text.endswith(u'。'):
b = u'。'
c = True
elif text.endswith(u'?'):
b = u'?'
c = True
elif text.endswith(u'!'):
b = u'!'
c = True
else:
c = False
text = text[:-1]
for a in text:
final_text = a + final_text
if c == True:
final_text = final_text + b
return final_text
###
text = input()
final_text = palindrome_j(text).encode('utf-8')
print(final_text)
|
[
"noreply@github.com"
] |
BartKlak.noreply@github.com
|
680b6a92527d1990409c17c06dc480420d55e2ce
|
ac8af2ee72e946de29e18a24a30aa0a3c0d9ac25
|
/Image/EncoderDecoderImage.py
|
34c200412b9e67cd4faa93167ceb0c32e44dee3f
|
[] |
no_license
|
SaraHisham/NeuralNetwork-Based-Generic-Compression
|
668c33ee18cf437cf7485ab94a94757271eb069f
|
b3a5c2575f7ef005979535557c8cf5ab1b8861ae
|
refs/heads/master
| 2022-06-07T18:58:19.275782
| 2020-05-05T22:51:43
| 2020-05-05T22:51:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,714
|
py
|
import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
from PIL import Image
def encode_image(in_file, out_file):
# Load the trained model
loaded_model = tf.keras.models.load_model('image_autoencoder.h5')
# Encoder Model
encoder = tf.keras.Model(loaded_model.input, loaded_model.layers[9].output)
fragments_array = []
original_image = Image.open(in_file)
original_image = original_image.convert('RGB')
# Original_Image = Original_Image.convert("RGB")
width, height = original_image.size # Fetch image size
# Crop input image into 32x32 blocks
width_fragments_count = math.ceil(width / 32) # No. of horizontal blocks
height_fragments_count = math.ceil(height / 32) # No. of vertical blocks
# Setting the points for cropped image
x1 = 0
y1 = 0
x2 = 32
y2 = 32
fragments_count = 0
for i in range(0, height_fragments_count):
for j in range(0, width_fragments_count):
Image_Fragment = original_image.crop((x1, y1, x2, y2)) # Crop the first block
Image_Fragment_Array = np.array(Image_Fragment) # Save it into a numpy array
fragments_count += 1
fragments_array.append(Image_Fragment_Array) # Add the first block numpy array to Input Image
# Increment the points to catch the next fragment (32x32x3 fragments)
x1 = x1 + 32
x2 = x2 + 32
x1 = 0
y1 = y1 + 32
x2 = 32
y2 = y2 + 32
# Convert input image into numpy array
fragments_array = np.array(fragments_array)
fragments_array = fragments_array.astype('float32') / 255
# Output of the decoder
prediction_encoded = encoder.predict(fragments_array)
# Input image dimensions array
size = [width_fragments_count, height_fragments_count]
# Save output to the decoder and input image dimension arrays into a compressed file
np.savez_compressed(out_file, Pred=prediction_encoded, Size=size, Type=[0])
def decode_image(in_file, out_file):
# Load the trained model
loaded_model = tf.keras.models.load_model('image_autoencoder.h5')
# Decoder Model
decoder_input = tf.keras.Input(shape=(8, 8, 2))
decoder_layer_1 = loaded_model.layers[10]
decoder_layer_2 = loaded_model.layers[11]
decoder_layer_3 = loaded_model.layers[12]
decoder_layer_4 = loaded_model.layers[13]
decoder_layer_5 = loaded_model.layers[14]
decoder_layer_6 = loaded_model.layers[15]
decoder_layer_7 = loaded_model.layers[16]
decoder_layer_8 = loaded_model.layers[17]
decoder = tf.keras.Model(decoder_input, decoder_layer_8(
decoder_layer_7(
decoder_layer_6(
decoder_layer_5(
decoder_layer_4(
decoder_layer_3(
decoder_layer_2(
decoder_layer_1(
decoder_input)))))))))
# Load the compressed file
compressed_file = np.load(in_file + ".npz")
# Load the Input to Decoder from the compressed file
prediction = decoder.predict(compressed_file['Pred'])
# Load the Original Image Size from the compressed file
size_decoded = compressed_file['Size']
# Assign the Image Size parameters to Length and Width
width_fragments_count = size_decoded[0] # Width
height_fragments_count = size_decoded[1] # Length
# Initialize arrays to load the Decoded Image
vertical_concatenated_image = []
horizontal_concatenated_image = []
# Initialize counter to load the Decoded Image
fragments_count = 0
# Loop to load and concatenate the image from the Decoder
for i in range(0, (width_fragments_count * height_fragments_count)):
image_fragment_array = prediction[i].reshape(32, 32, 3) # Load 32x32 block from the decoder
horizontal_concatenated_image.append(image_fragment_array) # Push 32x32 block into Horizontal Concatenate Array
fragments_count = fragments_count + 1 # Increment the Counter
if fragments_count == width_fragments_count: # Check if Image Width is reached by the Counter
fragments_count = 0 # Initialize Counter
im_h = cv2.hconcat(horizontal_concatenated_image) # Concatenate Horizontally
vertical_concatenated_image.append(im_h) # Push Widthx32 blocks into Vertical Concatenate Array
horizontal_concatenated_image.clear() # Clear the Horizontal Concatenate Array
reconstructed_image = cv2.vconcat(vertical_concatenated_image) # Concatenate Vertically
plt.imsave(out_file + ".tiff", reconstructed_image) # Save the Output Image
|
[
"noreply@github.com"
] |
SaraHisham.noreply@github.com
|
75adb90a03bc1dd4b3be5b75369e93d8a907d7a6
|
97c330276400e6bbf28f2e290232737e38da08cb
|
/ok/client/models/concept_case.py
|
d640c795374fff81a6ffec659a335941ce7834b0
|
[] |
no_license
|
dudduss/Ants
|
5e70818a8c5a17ee71e98db5c1f2312d3b9cdc5e
|
4d64c1e450fc0aee7d82fd43fd1c1251573e984a
|
refs/heads/master
| 2021-01-10T21:04:01.132400
| 2014-12-29T22:11:01
| 2014-12-29T22:11:01
| 28,608,245
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
"""TestCase for conceptual tests.
ConceptTestCases are designed to be natural language tests that help
students understand high-level understanding. As such, these test cases
focus mainly on unlocking. When used in the grading protocol,
ConceptTestCases simply display the answer if already unlocked.
"""
from client.models import serialize
from client.protocols import grading
from client.protocols import unlock
from client.utils import formatting
class ConceptCase(grading.GradedTestCase, unlock.UnlockTestCase):
"""TestCase for conceptual questions."""
type = 'concept'
REQUIRED = {
'type': serialize.STR,
'question': serialize.STR,
'answer': serialize.STR,
}
OPTIONAL = {
'locked': serialize.BOOL_FALSE,
'choices': serialize.SerializeArray(serialize.STR),
'never_lock': serialize.BOOL_FALSE,
'hidden': serialize.BOOL_FALSE,
}
def __init__(self, **fields):
super().__init__(**fields)
self['question'] = formatting.dedent(self['question'])
self['answer'] = formatting.dedent(self['answer'])
######################################
# Protocol interface implementations #
######################################
def on_grade(self, logger, verbose, interact, timeout):
"""Implements the GradedTestCase interface."""
print('Q: ' + self['question'])
print('A: ' + self['answer'])
print()
return False
def should_grade(self):
return not self['locked']
def on_unlock(self, logger, interact_fn):
"""Implements the UnlockTestCase interface."""
print('Q: ' + self['question'])
print()
answer = interact_fn(self['answer'], self['choices'])
self['answer'] = answer
self['locked'] = False
def on_lock(self, hash_fn):
#TODO(soumya): Make this a call to normalize after it's moved to an appropriate place.
if self['choices']:
self['answer'] = hash_fn("".join(self['answer']))
else:
self['answer'] = hash_fn(self['answer'])
self['locked'] = True
|
[
"duddus@berkeley.edu"
] |
duddus@berkeley.edu
|
f628da0c9740b5ad71322a330b762fa7226e6c0b
|
564efe38e9e6f2d21f0dcd7ee87c7dc4feeccf5a
|
/05-Lists/112-remove-outliers.py
|
30234d16ddf7bf3ce30e5cd31ab2c02b9f35d071
|
[] |
no_license
|
khalid-hussain/python-workbook
|
79210ffa920a5ef2c44f00e9a806bdfe0bfa8e6f
|
eac2b2678530fec76a14805aeb5c9ff3ea02da5d
|
refs/heads/master
| 2020-09-12T23:15:10.381083
| 2020-08-25T10:42:59
| 2020-08-25T10:42:59
| 222,589,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
def removeOutliers(theList, n):
theList.sort()
for i in range(n):
theList.pop()
theList.reverse()
for i in range(n):
theList.pop()
def main():
theCount = 0
myList = []
while True:
theInt = input('Input an integer (blank to quit): ')
if theInt == '':
break
else:
theCount = theCount + 1
myList.append(int(theInt))
if theCount < 4:
print('You have input less than 4 values. Try again.')
else:
removeOutliers(myList, 2)
print(myList)
if __name__ == "__main__":
main()
|
[
"mohammad.khalid.hussain@gmail.com"
] |
mohammad.khalid.hussain@gmail.com
|
2fcd7e6b876d749e2093d3fd619eeecf1d36ff0d
|
8e814a581f30bd4536460b468cfc689f893a6122
|
/run.py
|
1092d58483c4ccbfd76e9d549ee4d9f06711c31c
|
[] |
no_license
|
oopaze/palapas_bot
|
f4ba7e39b2749a84607b8114f4179cea113e283d
|
f3745abe1ea0fd4acb47b7aa53a4e54b88a43269
|
refs/heads/main
| 2023-07-22T02:14:22.151484
| 2021-09-04T19:33:23
| 2021-09-04T19:33:23
| 399,289,028
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
import os
import app
if __name__ == '__main__':
bot = app.run()
|
[
"jgomes@geobit.com.br"
] |
jgomes@geobit.com.br
|
6b5e099ccb9a5b1a542954e0ee18584f02edbe2a
|
5fde19b9fd338a59607135401d80c7894688b5c5
|
/modules/topic_modeling.py
|
f97bf7b34d1c3605c6f5322710f7f9222c539461
|
[] |
no_license
|
mdahwireng/Corona-virus-tweet-analysis
|
86e07585ad432e306a2477ae062d7b55a260a021
|
6a75ff86ad40cc338b5102385ecca1fe8365fad8
|
refs/heads/master
| 2023-06-09T08:16:18.461046
| 2021-06-30T19:40:11
| 2021-06-30T19:40:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,685
|
py
|
from gensim import corpora
from gensim.models import ldamodel, CoherenceModel
from gensim.parsing.preprocessing import remove_stopwords
import pyLDAvis.gensim_models as gensimvis
import pyLDAvis
class TweetDFTopicModeling:
"""
this function will create topic modeling model parsed data for modeling
Return
------
dict
"""
def __init__(self, df):
self.data = df
def prepare_data(self, data=None, output=False)->None:
"""
this function will prepare data for creating topic modeling model
Return
------
None
"""
if data == None:
data = self.data
#Converting tweets to list of words For feature engineering
sentence_list = [remove_stopwords(tweet) for tweet in data['cleaned_tweet']]
word_list = [sent.split() for sent in sentence_list]
#Create dictionary which contains Id and word
word_to_id = corpora.Dictionary(word_list)
corpus_1= [word_to_id.doc2bow(tweet) for tweet in word_list]
data = {'word_list':word_list, 'word_to_id':word_to_id, 'corpus':corpus_1 }
self.data = data
if output:
return data
def creat_topic_model (self)->dict:
"""
this function will create topic modeling model
Return
------
dict
"""
data = self.data
corpus = data['corpus']
word_to_id = data['word_to_id']
lda_model = ldamodel.LdaModel(corpus,
id2word=word_to_id,
num_topics=3,
random_state=100,
update_every=1,
chunksize=50,
passes=50,
alpha='auto',
per_word_topics=True)
self.model = lda_model
return lda_model
def viz_lda_topics(self, lda_model, data = None, streamlit=False):
if data == None:
data = self.data
corpus = data['corpus']
word_to_id = data['word_to_id']
if streamlit:
LDAvis_prepared = gensimvis.prepare(lda_model, corpus, word_to_id)
html_string = pyLDAvis.prepared_data_to_html(LDAvis_prepared)
return html_string
# Visualize the topics
pyLDAvis.enable_notebook()
LDAvis_prepared = gensimvis.prepare(lda_model, corpus, word_to_id)
return LDAvis_prepared
|
[
"kaaymike@hotmail.co.uk"
] |
kaaymike@hotmail.co.uk
|
a21618f0ce0aa6432175d36b0042e7df8e21bb69
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/prime-big-291.py
|
dac9afb7dc0a8d107113bc4bc06e8af627553a69
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
# Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
$Definition
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
439f5f98233e4a3cae705adbae6d64ead367bd92
|
58d5b73c0b7d29da81be6aaacfd995762c2e8a4e
|
/Exercício63.py
|
3874d0d94f9c7f0963c164c3358e776e61912422
|
[] |
no_license
|
DevRaphael7/exercercios-Python
|
92d40076c9364159939bd0db8a49125bec757591
|
ecfe3e469ee8d2dbae8c7cfa921c3a78e4f91996
|
refs/heads/master
| 2023-03-29T12:09:00.692504
| 2021-04-06T19:36:09
| 2021-04-06T19:36:09
| 354,956,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
salario = float(input('Salario do funcionário: '))
if salario < 500:
novo_salario = (salario * 0.05) + salario
elif salario >= 500 or salario < 1200:
novo_salario = (salario * 0.12) + salario
if salario < 600:
aux_escolar = 150
print('Auxílio escolar: %.2f R$' % aux_escolar)
novo_salario += aux_escolar
elif salario > 600:
aux_escolar = 100
print('Auxílio escolar: %.2f R$' % aux_escolar)
novo_salario += aux_escolar
elif salario > 1200:
print('Sem bonificação')
print('Novo salario: %.2f R$' % novo_salario)
|
[
"raphaelramalho1989@gmail.com"
] |
raphaelramalho1989@gmail.com
|
731068a69d0161d159f5927e1f21c41ba145374c
|
5782615a0c591a68ec416c32b5de84cd7d821f86
|
/regex.py
|
3a576936269a30dd67e581b2650c4ae1ea1c8ddf
|
[
"Apache-2.0"
] |
permissive
|
richardcardona/learnpython-exercises
|
2d8c6d99cc9bb43d43357999bcb9d7d37d6d0e8e
|
7941b2a6e9110e4ce54e576477c35ec524f5a676
|
refs/heads/master
| 2016-09-06T21:21:39.173502
| 2013-12-23T04:38:50
| 2013-12-23T04:38:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#!/usr/bin/env python
# Example
import re
pattern = re.compile(r"\[(on|off)\]") # Slight optimization
re.search(pattern, "Mono: Playback 65 [75%] [-16.50dB] [on]")
# Returns a Match object!
re.search(pattern, "Nada...:-(")
# Doesn't return anything.
# End Example
# Exercise: make a regular expression that will match an email
def test_email(your_pattern):
pattern = re.compile(your_pattern)
emails = ["john@example.com", "python-list@python.org", '"wha.t.`1an?ug{}ly@email.com"']
for email in emails:
if not re.match(pattern, email):
print "You failed to match %s" % (email)
elif not your_pattern:
print "Forgot to enter a pattern!"
else:
print "Pass"
pattern = r"[\"A-Za-z\d\-\._\{\}\?\`]+@[A-Za-z0-9\-._\"]+"
test_email(pattern)
|
[
"rcardona@zenoss.com"
] |
rcardona@zenoss.com
|
ccc52c7268b209204e24e471f4f03112e1051a6d
|
78fda3f59f3e74ad638f50320fbd17dce0daaa9f
|
/PcHome.py
|
621be741a0f4ad6af3d936c9900881731524b835
|
[] |
no_license
|
Pika0908/getTitle
|
48979584d38f9873b0f748c930f87784ae3964ec
|
693daa12aa1bef1b08a7c44f5d109607534b49ba
|
refs/heads/master
| 2020-05-09T04:17:26.894916
| 2019-04-12T10:16:50
| 2019-04-12T10:16:50
| 180,983,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
import requests
from lxml import etree
import urllib
def url( word ):
return urllib.parse.quote( word )
import base64
def b64( word ):
b =base64.b64encode( word.encode('utf-8') ).decode( "utf-8" )
return b
class Craw():
def __init__(self, keyword ):
self.keyword = keyword
def getTitle( self, link ):
res = requests.get( link )
res.encoding = 'big5'
pa = etree.HTML( res.text )
tmp1 = pa.xpath( '//div[contains(@id, "con_")]/a' )
for title in tmp1:
print( "".join( title.itertext() ) )
keyword_in = input("keyword: ")
keyword_tmp = url( keyword_in )
keyword = b64 ( keyword_tmp )
link = "https://www.pcstore.com.tw/adm/psearch.htm?store_k_word=" + keyword + "&slt_k_option=1"
test_1 = Craw( keyword )
test_1.getTitle( link )
|
[
"noreply@github.com"
] |
Pika0908.noreply@github.com
|
4d64441abe0869be9f47a8f134236aca6d023928
|
9f5c33c8fe13682c0212046f376ed7585a99fff9
|
/venv/Scripts/pwiz.py
|
ab80f14785575ce67dd32af16b2fbc73339dd790
|
[] |
no_license
|
ajsmash7/Lab4part2
|
a421ca77a401cdd6b69d7427cfcfbe44ac0bc852
|
016dc6ff7f32504aa54c0da721303ee2c2fbb894
|
refs/heads/master
| 2020-05-03T12:57:31.400564
| 2019-03-31T04:03:23
| 2019-03-31T04:03:23
| 178,640,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,123
|
py
|
#!C:\Users\Ashley Johnson\PycharmProjects\Lab4part2\venv\Scripts\python.exe
import datetime
import sys
from getpass import getpass
from optparse import OptionParser
from peewee import *
from peewee import print_
from peewee import __version__ as peewee_version
from playhouse.reflection import *
TEMPLATE = """from peewee import *%s
database = %s('%s', **%s)
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
class Meta:
database = database
"""
DATABASE_ALIASES = {
MySQLDatabase: ['mysql', 'mysqldb'],
PostgresqlDatabase: ['postgres', 'postgresql'],
SqliteDatabase: ['sqlite', 'sqlite3'],
}
DATABASE_MAP = dict((value, key)
for key in DATABASE_ALIASES
for value in DATABASE_ALIASES[key])
def make_introspector(database_type, database_name, **kwargs):
if database_type not in DATABASE_MAP:
err('Unrecognized database, must be one of: %s' %
', '.join(DATABASE_MAP.keys()))
sys.exit(1)
schema = kwargs.pop('schema', None)
DatabaseClass = DATABASE_MAP[database_type]
db = DatabaseClass(database_name, **kwargs)
return Introspector.from_database(db, schema=schema)
def print_models(introspector, tables=None, preserve_order=False,
include_views=False):
database = introspector.introspect(table_names=tables,
include_views=include_views)
print_(TEMPLATE % (
introspector.get_additional_imports(),
introspector.get_database_class().__name__,
introspector.get_database_name(),
repr(introspector.get_database_kwargs())))
def _print_table(table, seen, accum=None):
accum = accum or []
foreign_keys = database.foreign_keys[table]
for foreign_key in foreign_keys:
dest = foreign_key.dest_table
# In the event the destination table has already been pushed
# for printing, then we have a reference cycle.
if dest in accum and table not in accum:
print_('# Possible reference cycle: %s' % dest)
# If this is not a self-referential foreign key, and we have
# not already processed the destination table, do so now.
if dest not in seen and dest not in accum:
seen.add(dest)
if dest != table:
_print_table(dest, seen, accum + [table])
print_('class %s(BaseModel):' % database.model_names[table])
columns = database.columns[table].items()
if not preserve_order:
columns = sorted(columns)
primary_keys = database.primary_keys[table]
for name, column in columns:
skip = all([
name in primary_keys,
name == 'id',
len(primary_keys) == 1,
column.field_class in introspector.pk_classes])
if skip:
continue
if column.primary_key and len(primary_keys) > 1:
# If we have a CompositeKey, then we do not want to explicitly
# mark the columns as being primary keys.
column.primary_key = False
print_(' %s' % column.get_field())
print_('')
print_(' class Meta:')
print_(' table_name = \'%s\'' % table)
multi_column_indexes = database.multi_column_indexes(table)
if multi_column_indexes:
print_(' indexes = (')
for fields, unique in sorted(multi_column_indexes):
print_(' ((%s), %s),' % (
', '.join("'%s'" % field for field in fields),
unique,
))
print_(' )')
if introspector.schema:
print_(' schema = \'%s\'' % introspector.schema)
if len(primary_keys) > 1:
pk_field_names = sorted([
field.name for col, field in columns
if col in primary_keys])
pk_list = ', '.join("'%s'" % pk for pk in pk_field_names)
print_(' primary_key = CompositeKey(%s)' % pk_list)
elif not primary_keys:
print_(' primary_key = False')
print_('')
seen.add(table)
seen = set()
for table in sorted(database.model_names.keys()):
if table not in seen:
if not tables or table in tables:
_print_table(table, seen)
def print_header(cmd_line, introspector):
timestamp = datetime.datetime.now()
print_('# Code generated by:')
print_('# python -m pwiz %s' % cmd_line)
print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p'))
print_('# Database: %s' % introspector.get_database_name())
print_('# Peewee version: %s' % peewee_version)
print_('')
def err(msg):
sys.stderr.write('\033[91m%s\033[0m\n' % msg)
sys.stderr.flush()
def get_option_parser():
parser = OptionParser(usage='usage: %prog [options] database_name')
ao = parser.add_option
ao('-H', '--host', dest='host')
ao('-p', '--port', dest='port', type='int')
ao('-u', '--user', dest='user')
ao('-P', '--password', dest='password', action='store_true')
engines = sorted(DATABASE_MAP)
ao('-e', '--engine', dest='engine', default='postgresql', choices=engines,
help=('Database type, e.g. sqlite, mysql or postgresql. Default '
'is "postgresql".'))
ao('-s', '--schema', dest='schema')
ao('-t', '--tables', dest='tables',
help=('Only generate the specified tables. Multiple table names should '
'be separated by commas.'))
ao('-v', '--views', dest='views', action='store_true',
help='Generate model classes for VIEWs in addition to tables.')
ao('-i', '--info', dest='info', action='store_true',
help=('Add database information and other metadata to top of the '
'generated file.'))
ao('-o', '--preserve-order', action='store_true', dest='preserve_order',
help='Model definition column ordering matches source table.')
return parser
def get_connect_kwargs(options):
ops = ('host', 'port', 'user', 'schema')
kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o))
if options.password:
kwargs['password'] = getpass()
return kwargs
if __name__ == '__main__':
raw_argv = sys.argv
parser = get_option_parser()
options, args = parser.parse_args()
if len(args) < 1:
err('Missing required parameter "database"')
parser.print_help()
sys.exit(1)
connect = get_connect_kwargs(options)
database = args[-1]
tables = None
if options.tables:
tables = [table.strip() for table in options.tables.split(',')
if table.strip()]
introspector = make_introspector(options.engine, database, **connect)
if options.info:
cmd_line = ' '.join(raw_argv[1:])
print_header(cmd_line, introspector)
print_models(introspector, tables, options.preserve_order, options.views)
|
[
"wt6268bu@go.minneapolis.edu"
] |
wt6268bu@go.minneapolis.edu
|
0d5fa245a529ddeeda28d724b8adc4bcffa098d1
|
efd2ec91bb1861f571a9ac6a663b72203eeaf8c1
|
/env/lib/python3.6/tempfile.py
|
e5f4dd3d9c1adb7fde84d96552564a983ab0c6e4
|
[] |
no_license
|
bopopescu/DjangoFirst
|
de2cfdf6dcf9d72e211b3374865f4b38dd2465df
|
d2776f44d15024d6ed03d184e27269dff0c53d2a
|
refs/heads/master
| 2021-10-28T08:48:15.508077
| 2019-04-23T06:29:28
| 2019-04-23T06:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
/home/satish/anaconda/lib/python3.6/tempfile.py
|
[
"satish.kumar@egovernments.org"
] |
satish.kumar@egovernments.org
|
4510030ace0a00577d04e5e396c4983e2c081ec9
|
5143d1523f4d97162e053a5ec3bd9b27f984e8ff
|
/adding.py
|
5bd494a5ec79393e15bda2833b7546271d4fcf82
|
[] |
no_license
|
Crolabear/record1
|
674c61f59146383ae3eba07a574373ff1874e69a
|
179409b1ada938d59334f61fafe4af9287169599
|
refs/heads/master
| 2021-01-10T18:19:24.374436
| 2016-04-06T03:36:50
| 2016-04-06T03:36:50
| 44,950,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
# the goal of this is to find different ways to add some integers to a specified number within a fix number of operation
def checkDuplicateElements(list1):
l2 = []
a=len(list1)
for k in range(a):
for item in list1:
if list1[k]
return 1 # not done yet
def pairWiseSum(l11,l22):
# produce a list of ordered pairs? if input list is length m, and n, then i get mxn pairs
# make sure both l1, and l2 are string, not numbers
l1 = map(lambda x:str(x),l11)
l2 = map(lambda x:str(x),l22)
output = []
for item1 in l1:
temp = []
for item2 in l2:
a=item1+item2
temp.append(a)
output.extend(temp)
return output
def addString(string1):
# this adds up the digits in a string of numbers;
count = 0
for item in string1:
count = count + int(item)
return count
def main():
numberPool = [1,2,3,4]
numberOfOperation = 3
goal = 8
comp = [numberPool]*(numberOfOperation+1)
temp = comp[0]
for i in range(1,len(comp)):
temp = pairWiseSum(temp,comp[i])
sumz = map(lambda x: (x,addString(x)),temp)
match = []
for item in sumz:
if item[1] == goal:
match.append(item[0])
return match
# there are k different base situations, k = length of numberPool
# number of ways to get to 5 is number of ways to get to 4, number of ways to get to 3* number of ways to get to 2
# so if k = 10, we have to find number of ways to get to 1,2,3,4,5,6,7,8,9. Then
# 1*9 + 2*8 + 3*7 + 4*6 + 5*5 where 1 is Number of ways to get 1, 2 is number of ways to get 2 etc...
# so i want to use a dynamic programming approach, and save all the results along the way to get to 9
|
[
"crolabear@gmail.com"
] |
crolabear@gmail.com
|
c61d44bcc8be1346b3b1a8bb2742d5847838cc8a
|
2b115f9c5929fedd06d4dd6969100ab2df484adf
|
/messenger/urls.py
|
9903187e19c8320adcd0f81ea319f70a102f44a3
|
[] |
no_license
|
sorXCode/Avina-API
|
0b790bfd8ac8b9c84d1db45db0819e0585d954b9
|
7687ba7434b77d6c33944c65fff0409459a9d5ce
|
refs/heads/master
| 2023-04-11T01:17:35.570752
| 2021-04-14T15:01:45
| 2021-04-14T15:01:45
| 342,429,666
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from .views import StartMessaging, Conversation
urlpatterns = [
path('', StartMessaging.as_view()),
path('<str:product_uid>', Conversation.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"sorxcode@gmail.com"
] |
sorxcode@gmail.com
|
938122dec190bda5aaa71ba0b043213672f7cde2
|
7f9bff4b21048faa813f4c67c5316e899ca5de20
|
/scrape.py
|
76723569acc6445960263fbf1ec2a51a069cdadd
|
[] |
no_license
|
neilnach/DLeagueAnalysis
|
c0fb43dda4a29b4c9d417316653d602f942da4d8
|
67cb72cd5ce244c56cd7f9ad3d5b83ace498d4de
|
refs/heads/master
| 2021-01-24T04:01:53.820714
| 2019-01-29T22:24:06
| 2019-01-29T22:24:06
| 122,917,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,515
|
py
|
import csv
import requests
from bs4 import BeautifulSoup
#opening csv
outfile = open("./D-League.csv", "w")
writer = csv.writer(outfile)
writer.writerow(["Player", "Split", "Team", "GP", "GS", "Min", "FGM", "FGA", "FG%", "3PM", "3PA",
"3P%", "FTM", "FTA", "FT%", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "PF", "TOV", "PTS"])
list_of_urls = ["https://basketball.realgm.com/player/Semaj-Christon/D-League/26729/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Luke-Babbitt/D-League/2185/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Miles-Plumlee/D-League/3654/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Shane-Larkin/D-League/28945/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Abdel-Nader/D-League/31320/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Guerschon-Yabusele/D-League/29646/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Marcus-Morris/D-League/4078/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Quincy-Acy/D-League/4130/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Joe-Harris/D-League/23549/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Sean-Kilpatrick/D-League/4347/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jeremy-Lin/D-League/10168/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Treveon-Graham/D-League/31269/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jeremy-Lamb/D-League/22394/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Johnny-OBryant/D-League/24170/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Julyan-Stone/D-League/8270/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Justin-Holiday/D-League/6968/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/David-Nwaba/D-League/58185/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Yogi-Ferrell/D-League/24263/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Salah-Mejri/D-League/24116/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Dwight-Powell/D-League/9346/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Seth-Curry/D-League/4873/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Josh-McRoberts/D-League/6/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Will-Barton/D-League/9269/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Malik-Beasley/D-League/73131/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Juancho-Hernangomez/D-League/52816/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Reggie-Bullock/D-League/7120/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Henry-Ellenson/D-League/54842/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Langston-Galloway/D-League/22568/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Reggie-Jackson/D-League/3771/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Stanley-Johnson/D-League/26756/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jon-Leuer/D-League/4000/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Boban-Marjanovic/D-League/2413/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ish-Smith/D-League/3665/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Eric-Moreland/D-League/23255/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tarik-Black/D-League/9359/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Damien-Wilkins/D-League/295/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Willie-Reed/D-League/10776/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jordan-Clarkson/D-League/22892/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tyler-Ennis/D-League/27049/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ivica-Zubac/D-League/49290/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/James-Ennis/D-League/31122/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/James-Johnson/D-League/1604/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tyler-Johnson/D-League/23365/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Josh-Richardson/D-League/28787/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Okaro-White/D-League/9344/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Hassan-Whiteside/D-League/7693/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/AJ-Hammons/D-League/24294/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Rodney-McGruder/D-League/4210/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/DeAndre-Liggins/D-League/5713/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Cole-Aldrich/D-League/2159/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tyus-Jones/D-League/26752/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Shabazz-Muhammad/D-League/24245/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jordan-Crawford/D-League/4441/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Cheick-Diallo/D-League/49975/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Darius-Miller/D-League/2364/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Alexis-Ajinca/D-League/775/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Solomon-Hill/D-League/6653/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ron-Baker/D-League/30538/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tim-Hardaway-Jr/D-League/9386/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ramon-Sessions/D-League/14/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Lance-Thomas/D-League/1948/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Dakari-Johnson/D-League/24676/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Khem-Birch/D-League/24151/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Justin-Anderson/D-League/24291/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Robert-Covington/D-League/11707/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Richaun-Holmes/D-League/42932/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Eric-Bledsoe/D-League/5714/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Derrick-Jones/D-League/59508/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tyler-Ulis/D-League/52567/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/TJ-Warren/D-League/24292/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Alan-Williams/D-League/31153/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Kosta-Koufos/D-League/778/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Skal-Labissiere/D-League/42074/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Malachi-Richardson/D-League/49322/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Garrett-Temple/D-League/1757/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/George-Papagiannis/D-League/32198/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Brandon-Paul/D-League/2273/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Alfonzo-McKinnie/D-League/22478/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Lucas-Nogueira/D-League/24240/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/KJ-McDaniels/D-League/28823/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Carrick-Felix/D-League/23244/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tim-Frazier/D-League/3977/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Mike-Scott/D-League/3753/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Kent-Bazemore/D-League/18858/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/DeAndre-Bembry/D-League/49891/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ryan-Kelly/D-League/2193/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Mike-Muscala/D-League/17065/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Taurean-Prince/D-League/42410/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Dennis-Schroder/D-League/26503/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Avery-Bradley/D-League/2194/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jae-Crowder/D-League/22407/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Gerald-Green/D-League/354/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Demetrius-Jackson/D-League/43395/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jordan-Mickey/D-League/28236/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Terry-Rozier/D-League/24644/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Marcus-Smart/D-League/24275/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/James-Young/D-League/24681/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Isaiah-Canaan/D-League/12049/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Cristiano-Felicio/D-League/24840/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jerian-Grant/D-League/9389/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Cameron-Payne/D-League/57476/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Bobby-Portis/D-League/41433/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Denzel-Valentine/D-League/44077/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Paul-Zipser/D-League/26506/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Kay-Felder/D-League/57281/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Dahntay-Jones/D-League/445/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Edy-Tavares/D-League/39662/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ian-Clark/D-League/14156/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Damian-Jones/D-League/42379/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Shaun-Livingston/D-League/377/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Kevon-Looney/D-League/41494/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/James-McAdoo/D-League/9304/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Patrick-McCaw/D-League/80940/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Patrick-Beverley/D-League/1684/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Clint-Capela/D-League/28284/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Sam-Dekker/D-League/28239/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Montrezl-Harrell/D-League/24290/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Chinanu-Onuaku/D-League/54847/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Isaiah-Taylor/D-League/56862/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Troy-Williams/D-League/24319/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Kyle-Wiltjer/D-League/24155/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Aaron-Brooks/D-League/53/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Rakeem-Christmas/D-League/24156/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Georges-Niang/D-League/42429/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Glenn-Robinson/D-League/28500/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Joe-Young/D-League/22869/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Alan-Anderson/D-League/939/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Brice-Johnson/D-League/24658/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Diamond-Stone/D-League/41493/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Wade-Baldwin-IV/D-League/81056/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Troy-Daniels/D-League/18948/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Deyonta-Davis/D-League/73162/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/JaMychal-Green/D-League/2187/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Andrew-Harrison/D-League/24306/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jarell-Martin/D-League/41442/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Wayne-Selden/D-League/24687/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Khris-Middleton/D-League/4294/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Gary-Payton-II/D-League/80992/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Rashad-Vaughn/D-League/42082/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Josh-Huestis/D-League/23259/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Andre-Roberson/D-League/23283/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Allen-Crabbe/D-League/9300/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ed-Davis/D-League/2174/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Festus-Ezeli/D-League/5962/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jake-Layman/D-League/41248/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/CJ-McCollum/D-League/17251/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Shabazz-Napier/D-League/22395/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Tim-Quarterman/D-League/56701/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Noah-Vonleh/D-League/24323/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Kyle-Anderson/D-League/24265/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Joel-Anthony/D-League/789/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Davis-Bertans/D-League/24343/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Dewayne-Dedmon/D-League/28991/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Bryn-Forbes/D-League/42884/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Danny-Green/D-League/1642/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Patty-Mills/D-League/1632/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Dejounte-Murray/D-League/73136/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jonathon-Simmons/D-League/30740/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Bruno-Caboclo/D-League/59172/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/DeMarre-Carroll/D-League/1631/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Cory-Joseph/D-League/7108/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Lucas-Nogueira/D-League/24240/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Patrick-Patterson/D-League/2152/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Jakob-Poeltl/D-League/53231/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Norman-Powell/D-League/24195/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Pascal-Siakam/D-League/81426/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Fred-VanVleet/D-League/44094/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Delon-Wright/D-League/56842/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Joel-Bolomboy/D-League/45041/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Alec-Burks/D-League/4169/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Rudy-Gobert/D-League/25858/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Shelvin-Mack/D-League/9419/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Raulzinho-Neto/D-League/24092/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Marcin-Gortat/D-League/145/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Brandon-Jennings/D-League/1609/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Ian-Mahinmi/D-League/264/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Sheldon-Mac/D-League/24189/Career/By_Season/Per_Game/Regular_Season",
"https://basketball.realgm.com/player/Chris-McCullough/D-League/41445/Career/By_Season/Per_Game/Regular_Season",
]
for x in list_of_urls:
#url = "https://basketball.realgm.com/player/Chris-McCullough/D-League/41445/Career/By_Season/Per_Game/Regular_Season"
response = requests.get(x)
html = response.content
soup = BeautifulSoup(html, "lxml")
print ("Scraping Player")
list_of_cells = []
#parse the url
splitList = x.split("/")
playerName = splitList[4].split("-")
print(playerName[0])
print(playerName[1])
fullName = playerName[0] + " " + playerName[1]
list_of_cells.append(fullName)
print(splitList)
table = soup.findAll('tr', attrs={'class': 'per_game'})[1]
for row in table.findAll('td'):
list_of_cells.append(row.text)
writer.writerow(list_of_cells)
|
[
"nnachnan@purdue.edu"
] |
nnachnan@purdue.edu
|
30609fa30e639e7b15f7080d945b32a768d2b7bd
|
51ae7595b30725ac64f207aada4b24ee14a67c28
|
/django_lv2/new_populate.py
|
819d735af36b656f6aad8c7121152bdf085b4823
|
[] |
no_license
|
aniketgarg22/django-deployment-example
|
0889429aefb9ef5a3bff185a2c56ffff8d745797
|
405aefae263f03e40f8e5ad5ccb574f04e2e3eb4
|
refs/heads/master
| 2023-01-13T08:50:34.019429
| 2020-11-15T07:26:55
| 2020-11-15T07:26:55
| 312,978,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'django_lv2.settings')
import django
django.setup()
from djangoapp.models import User
from faker import Faker
fakegen = Faker()
def populate(N=5):
for entry in range(N):
fake_name = fakegen.name().split()
fake_first_name = fake_name[0]
fake_last_name = fake_name[1]
fake_email = fakegen.email()
user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]
if __name__ == '__main__':
print("POPULATING DB")
populate(20)
print("COMPLETE!")
|
[
"aniketgarg@Anikets-MacBook-Air.local"
] |
aniketgarg@Anikets-MacBook-Air.local
|
51e08bae586536d9a36d487813134e6cc5884797
|
397bea9873d99bd57838ee20b9159cb75ec94862
|
/lab8/text_recognizer/tests/support/create_emnist_lines_support_files.py
|
a5b833120bc7c0d8460e88e411789819c6d59dc1
|
[] |
no_license
|
richardyantas/text-recognizer-project
|
a6e4de23ee04c3872b033604aaedbe26b48e2948
|
46eee48583391c5d2ccfd304aab0f07f8a7651dd
|
refs/heads/master
| 2020-04-27T03:53:28.642746
| 2019-03-06T00:05:15
| 2019-03-06T00:05:15
| 174,038,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
from pathlib import Path
import shutil
import numpy as np
from text_recognizer.datasets import EmnistLinesDataset
import text_recognizer.util as util
SUPPORT_DIRNAME = Path(__file__).parents[0].resolve() / 'emnist_lines'
def create_emnist_lines_support_files():
shutil.rmtree(SUPPORT_DIRNAME, ignore_errors=True)
SUPPORT_DIRNAME.mkdir()
dataset = EmnistLinesDataset()
dataset.load_or_generate_data()
for ind in [0, 1, 3]:
image = dataset.x_test[ind]
print(image.sum(), image.dtype)
label = ''.join(
dataset.mapping[label]
for label in np.argmax(dataset.y_test[ind], axis=-1).flatten()
).strip(' _')
print(label)
util.write_image(image, str(SUPPORT_DIRNAME / f'{label}.png'))
if __name__ == '__main__':
create_emnist_lines_support_files()
|
[
"richardyantas5@gmail.com"
] |
richardyantas5@gmail.com
|
be478fa628908fb468452b0e8d62065396268a9c
|
ab53f5af878bbbf23c5b94e14febebdceae8303e
|
/test/test_windows.py
|
179515d8e12ddd9d30190e7fb56863c2d2dfa205
|
[
"BSD-2-Clause"
] |
permissive
|
nadyacla/jsonformatter
|
dd0445505530f05017fda5f3cf7f17f6a83ad677
|
a5561e58de0857766d8cd10729ebe261f9fe8296
|
refs/heads/master
| 2023-06-21T17:40:04.755771
| 2021-01-02T01:32:21
| 2021-01-02T01:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,872
|
py
|
#!/usr/bin/env python
# -*- coding: gbk -*-
"""
File: jsonformatter.py
Author: Me
Email: yourname@email.com
Github: https://github.com/yourname
Description: jsonformatter.py
"""
import datetime
import logging
import os
import random
import unittest
from collections import OrderedDict
from logging.config import fileConfig
if __file__ == 'test_windows.py':
import sys
sys.path.insert(0, '..')
from jsonformatter import JsonFormatter, basicConfig
class JsonFormatterTest(unittest.TestCase):
def test_default_config(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info("test %s config", 'default')
def test_string_format(self):
STRING_FORMAT = '''{
"Name": "name",
"Levelno": "levelno",
"Levelname": "levelname",
"Pathname": "pathname",
"Filename": "filename",
"Module": "module",
"Lineno": "lineno",
"FuncName": "funcName",
"Created": "created",
"Asctime": "asctime",
"Msecs": "msecs",
"RelativeCreated": "relativeCreated",
"Thread": "thread",
"ThreadName": "threadName",
"Process": "process",
"Message": "message"
}'''
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(STRING_FORMAT, datefmt)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info("test %s format", 'string')
def test_format_style(self):
FORMT_STYLE = {
"name": "name",
"levelno": "levelno",
"levelname": "levelname",
"pathname": "pathname",
"filename": "filename",
"module": "module",
"lineno": "lineno",
"funcName": "funcName",
"created": "created",
"asctime": "asctime",
"msecs": "msecs",
"relativeCreated": "relativeCreated",
"thread": "thread",
"threadName": "threadName",
"process": "process",
"message": "{message}"
}
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(FORMT_STYLE, datefmt, '{')
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info("test %s style", 'format')
def test_template_style(self):
TEMPLATE_STYLE = {
"name": "name",
"levelno": "levelno",
"levelname": "levelname",
"pathname": "pathname",
"filename": "filename",
"module": "module",
"lineno": "lineno",
"funcName": "funcName",
"created": "created",
"asctime": "asctime",
"msecs": "msecs",
"relativeCreated": "relativeCreated",
"thread": "thread",
"threadName": "threadName",
"process": "process",
"message": "${message}"
}
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(TEMPLATE_STYLE, datefmt, '$')
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info("test %s style", 'template')
def test_percent_style_unicode(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
formatter = JsonFormatter(
"""{"log":"%(message)s"}""", style="%", encoding='gbk', ensure_ascii=False)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test percent style unicode: %s', '����')
def test_format_style_unicode(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
formatter = JsonFormatter(
"""{"log":"{message}"}""", style="{", encoding='gbk', ensure_ascii=False)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test format style unicode: %s', '����')
def test_template_style_unicode(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
formatter = JsonFormatter(
"""{"log":"${message}"}""", style="$", encoding='gbk', ensure_ascii=False)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test template style unicode: %s', '����')
def test_dict_format(self):
DICT_FORMAT = {
"name": "name",
"levelno": "levelno",
"levelname": "levelname",
"pathname": "pathname",
"filename": "filename",
"module": "module",
"lineno": "lineno",
"funcName": "funcName",
"created": "created",
"asctime": "asctime",
"msecs": "msecs",
"relativeCreated": "relativeCreated",
"thread": "thread",
"threadName": "threadName",
"process": "process",
"message": "message"
}
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(DICT_FORMAT, datefmt)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info("test %s format", 'dict')
def test_ordered_dict_format(self):
ORDERED_DICT_FORMAT = OrderedDict([
("name", "name"),
("levelno", "levelno"),
("levelname", "levelname"),
("pathname", "pathname"),
("filename", "filename"),
("module", "module"),
("lineno", "lineno"),
("funcName", "funcName"),
("created", "created"),
("asctime", "asctime"),
("msecs", "msecs"),
("relativeCreated", "relativeCreated"),
("thread", "thread"),
("threadName", "threadName"),
("process", "process"),
("message", "message")
])
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(ORDERED_DICT_FORMAT, datefmt)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info("test %s format", 'ordered dict')
def test_log_exception(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = JsonFormatter()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
try:
1 / 0
except Exception as e:
root.exception('test log exception')
def test_record_custom_attrs(self):
RECORD_CUSTOM_ATTRS = {
'asctime': lambda: datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S.%f'),
'user id': lambda: str(random.random())[2:10]
}
RECORD_CUSTOM_FORMAT = OrderedDict([
("user id", "user id"), # new custom attrs
("name", "name"),
("levelno", "levelno"),
("levelname", "levelname"),
("pathname", "pathname"),
("filename", "filename"),
("module", "module"),
("lineno", "lineno"),
("funcName", "funcName"),
("created", "created"),
("asctime", "asctime"), # use custom format replace default.
("msecs", "msecs"),
("relativeCreated", "relativeCreated"),
("thread", "thread"),
("threadName", "threadName"),
("process", "process"),
("message", "message")
])
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(
RECORD_CUSTOM_FORMAT, datefmt, record_custom_attrs=RECORD_CUSTOM_ATTRS)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test record custom attrs')
def test_multi_value_in_one_key(self):
MULTI_VALUE_FORMAT = {
"multi value": "%(name)s - %(levelno)s - %(levelname)s - %(pathname)s - %(filename)s - %(module)s - %(lineno)d - %(funcName)s - %(created)f - %(asctime)s - %(msecs)d - %(relativeCreated)d - %(thread)d - %(threadName)s - %(process)d - %(message)s"
}
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(
MULTI_VALUE_FORMAT, datefmt)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test multi value in one key')
def test_json_dumps_parameter_indent(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(indent=4)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test json dumps parameter `index`: 4')
def test_json_dumps_parameter_ensure_ascii_false(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = JsonFormatter(ensure_ascii=False, encoding='gbk')
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test json dumps parameter `ensure_ascii` False: ����')
def test_file_config(self):
fileConfig(os.path.join(os.path.dirname(
__file__), 'logger_config.ini'))
root = logging.getLogger('root')
root.info('test file config')
def test_multi_handlers(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = JsonFormatter(ensure_ascii=False, encoding='gbk')
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
sh = logging.StreamHandler()
formatter = JsonFormatter(ensure_ascii=False, encoding='gbk')
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
sh = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test multi handlers')
def test_mix_extra(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = JsonFormatter(
ensure_ascii=False, encoding='gbk', mix_extra=True, indent=4)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info(
'test mix extra in fmt',
extra={
'extra1': 'extra content 1',
'extra2': 'extra content 2'
})
root.info(
'test mix extra in fmt',
extra={
'extra3': 'extra content 3',
'extra4': 'extra content 4'
})
def test_mix_extra_position_is_head(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = JsonFormatter(
ensure_ascii=False, encoding='gbk', mix_extra=True, mix_extra_position='head', indent=4)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test mix extra position is head',
extra={'extra': 'extra content'})
def test_mix_extra_multi_formatter(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
fh = logging.FileHandler('jsonformatter.log')
formatter = JsonFormatter(
ensure_ascii=False, mix_extra=True, indent=4)
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
root.addHandler(fh)
sh = logging.StreamHandler()
formatter = JsonFormatter(
ensure_ascii=False, mix_extra=True, indent=4)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test mix extra multi formatter',
extra={'extra': 'extra content'})
def test_mix_extra_position_is_mix(self):
root = logging.getLogger()
root.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = JsonFormatter(
ensure_ascii=False, encoding='gbk', mix_extra=True, mix_extra_position='mix', indent=4)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test mix extra position is mix',
extra={'extra': 'extra content'})
def test_record_custom_attrs_with_kwargs(self):
def _new_custom_attribute_status(**record_attrs):
if record_attrs['levelname'] in ['ERROR', 'CRITICAL']:
return 'failed'
else:
return 'success'
RECORD_CUSTOM_ATTRS = {
'status': _new_custom_attribute_status,
}
RECORD_CUSTOM_FORMAT = OrderedDict([
("status", "status"), # new custom attribute
("log", "message")
])
root = logging.getLogger()
root.setLevel(logging.INFO)
datefmt = None
sh = logging.StreamHandler()
formatter = JsonFormatter(
RECORD_CUSTOM_FORMAT,
record_custom_attrs=RECORD_CUSTOM_ATTRS
)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)
root.info('test record custom attrs with kwargs')
root.error('test record custom attrs with kwargs')
def test_basic_config_level(self):
basicConfig(level=logging.INFO)
logging.info('basic config level')
def test_basic_config_format(self):
basicConfig(
level=logging.INFO,
format="""{
"levelname": "levelname",
"name": "name",
"log": "message"
}"""
)
logging.info('basic config format')
def tearDown(self):
root = logging.getLogger()
# remove handlers
root.handlers = []
if __name__ == '__main__':
unittest.main()
|
[
"my_colorful_days@163.com"
] |
my_colorful_days@163.com
|
6d18d34dfc24af3535f595e30436cb9f835c5d28
|
c69db65f1dbb1a817bf67ada19ddf419d167ef8e
|
/ks_crm/urls.py
|
759e86e9782869913ad1f31a56380154e87dbf1c
|
[] |
no_license
|
saury2013/ks_edu
|
8c7c32d15904e1c71c5db7cf5ac2a810acf30d52
|
66c1d0c0cd6345d2833fc583dbf77a68477fdb3c
|
refs/heads/master
| 2020-03-12T13:32:21.497020
| 2018-08-30T13:46:46
| 2018-08-30T13:46:46
| 130,644,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
"""ThreeFish URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from ks_crm import views
urlpatterns = [
url(r'^$', views.index,name='crm_home'),
url(r'^stu_index$', views.stu_index,name='stu_index'),
url(r'^testing_system', views.testing_system,name='testing_system'),
url(r'^lesson_video', views.lesson_video,name='lesson_video'),
url(r'^new_article', views.add_news,name='new_article'),
url(r'^new_action', views.add_action,name='new_action'),
url(r'^faq_editors', views.add_FAQ,name='faq_editors'),
url(r'^new_course', views.add_course,name='new_course'),
url(r'^profile_editors', views.profile_modify,name='profile_editors'),
url(r'^image_upload/(\w+)/', views.image_upload),
]
|
[
"saury2011@sina.com"
] |
saury2011@sina.com
|
8f7dcc5a6f73539920f68c2f94233a37e626e3f4
|
9a22ecc2c9d063b398834bae6948187c84227695
|
/Hookworm_InputData.py
|
02f12b721658923feea9f4a1f34f26840c3bd149
|
[] |
no_license
|
natalieolson/HPM573_PROJECT
|
ea92115eac44e1f99f85b59154625a2f2d408b0a
|
2d9542914e8a6b7f19188dbbbbb6b1c9b94163d7
|
refs/heads/master
| 2020-03-14T17:30:21.100982
| 2018-05-01T14:20:14
| 2018-05-01T14:20:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
import numpy as np
# simulation settings
POP_SIZE = 500 # cohort population size
SIM_LENGTH = 10 # length of simulation (years)
ALPHA = 0.05 # significance level for calculating confidence intervals
DISCOUNT = 0.03 # daily discount rate
DELTA_T = 1/12
PSA_ON = False
#Transmission Rate (Rate of infection S-->I)
a = -np.log(1-(6.5/100))
#Natural Recovery Rate
b = 1/2
#Probability of Treatment(coverage)
c = 0.562 * 1
#Probability of Effective Treatment
d = 0.784 * 52
e = 52 * (1-0.784)
# transition matrix
TRANS_MATRIX = [
[None, a, 0], #Susceptible
[b, None, c], #Infected
[d, e, None] #Treatment
]
TRANS_MATRIX_SEMI = [
[None, a, 0], #Susceptible
[b, None, 2*c], #Infected
[d, e, None] #Treatment
]
# annual cost of each health state
ANNUAL_STATE_COST = [
0, # Susceptible
0, # Infected
.50, #Treatment
]
# annual health utility of each health state
ANNUAL_STATE_UTILITY = [
1.00, # Susceptible
0.902, # Infected
1.00, #Treatment
]
# annual drug costs
MDA_COST = 0.5 #per person
RR_TREAT = 0.562
RR_RECOVERY = 0.784
|
[
"noreply@github.com"
] |
natalieolson.noreply@github.com
|
a1d6925b48ced31e74c4adeb3aa9fa318d89ddab
|
42ea67fa053b8dd01e0b85b1ec731cd10f34723e
|
/Classification/Adult/script.py
|
889fe460d6128a60688b98a479fabc6bc2ea2d50
|
[] |
no_license
|
Jinsu-Han/DATAMINING
|
cbfafb962ead7bcba918c7dd37b52236dcf59cb2
|
2148dbb8a87ab6038ad731bf4f1254ad094e8c7f
|
refs/heads/master
| 2021-05-16T06:54:29.025746
| 2017-06-10T22:53:05
| 2017-06-10T22:53:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,963
|
py
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
import random
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import (accuracy_score,
average_precision_score,
auc,
#auc_score,
classification_report,
confusion_matrix,
explained_variance_score,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_similarity_score,
log_loss,
matthews_corrcoef,
mean_squared_error,
mean_absolute_error,
precision_recall_curve,
precision_recall_fscore_support,
precision_score,
recall_score,
r2_score,
roc_auc_score,
roc_curve,
zero_one_loss)
train=pd.read_csv('data.csv')
test=pd.read_csv('data.csv')
train['type']='Train' #Create a flag for Train and Test Data set
test['type']='Test'
fullData = pd.concat([train,test],axis=0) #Combined both Train and Test Data set
fullData.columns # This will show all the column names
print fullData.head(5) # Show first 10 records of dataframe
print fullData.describe() #You can look at summary of numerical fields by using describe() function
#ID_col = ['REF_NO']
target_col = ["income"]
cat_cols = ['workclass','education','marital-status','occupation','relationship', 'race','sex','native-country']
num_cols= list(set(list(fullData.columns))-set(cat_cols))
other_col=['type'] #Test and Train Data set identifier
fullData.isnull().any()#Will return the feature with True or False,True means have missing value else False
num_cat_cols = num_cols+cat_cols # Combined numerical and Categorical variables
#Create a new variable for each variable having missing value with VariableName_NA
# and flag missing value with 1 and other with 0
for var in num_cat_cols:
if fullData[var].isnull().any()==True:
fullData[var+'_NA']=fullData[var].isnull()*1
#Impute numerical missing values with mean
fullData[num_cols] = fullData[num_cols].fillna(fullData[num_cols].mean(),inplace=True)
#Impute categorical missing values with -9999
fullData[cat_cols] = fullData[cat_cols].fillna(value = -9999)
#create label encoders for categorical features
for var in cat_cols:
number = LabelEncoder()
fullData[var] = number.fit_transform(fullData[var].astype('str'))
#Target variable is also a categorical so convert it
#fullData["Account.Status"] = number.fit_transform(fullData["Account.Status"].astype('str'))
train=fullData[fullData['type']=='Train']
test=fullData[fullData['type']=='Test']
train['is_train'] = np.random.uniform(0, 1, len(train)) <= .75
Train, Validate = train[train['is_train']==True], train[train['is_train']==False]
#features=list(set(list(fullData.columns))-set(ID_col)-set(target_col)-set(other_col))
features=list(set(list(fullData.columns))-set(target_col)-set(other_col))
x_train = Train[list(features)].values
y_train = Train["income"].values
x_validate = Validate[list(features)].values
y_validate = Validate["income"].values
x_test=test[list(features)].values
random.seed(100)
rf = RandomForestClassifier(n_estimators=1000)
rf.fit(x_train, y_train)
status = rf.predict_proba(x_validate)
fpr, tpr, _ = roc_curve(y_validate, status[:,1], pos_label='T')
roc_auc = auc(fpr, tpr)
print roc_auc
final_status = rf.predict_proba(x_test)
test["income"]=final_status[:,1]
test.to_csv('model_output.csv',columns=['income'])
|
[
"mohammadnrdn@gmail.com"
] |
mohammadnrdn@gmail.com
|
6d4a8f2e86197d24d64165978b5ee9b80a879c81
|
90f468d1a8328421041e1d1eb17e8333228a3d84
|
/instituteapp/admin.py
|
571823c429dbf451d9fefbb14b706bb8334e1307
|
[] |
no_license
|
nikhilpilla/remote_repository
|
27c339a373823efedb3aaf87e0b4c15ccc4fb48e
|
1015cc6b9b3bae82cb30fe894aa4d957484e8650
|
refs/heads/master
| 2020-12-20T09:07:01.865114
| 2020-01-24T14:14:19
| 2020-01-24T14:14:19
| 236,024,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from django.contrib import admin
from instituteapp.models import ServicerData
class ServicerDataAdmin(admin.ModelAdmin):
list_display = ['courseno',
'coursename',
'fee',
'faculty',
'timeduration']
admin.site.register(ServicerData,ServicerDataAdmin)
|
[
"nikhiljaikumar.pilla@gmail.com"
] |
nikhiljaikumar.pilla@gmail.com
|
17ccb12c8a2aa66962df220602332c829a79fc87
|
a56fdac4317ee4a7b1c9f70395d7ed8b074e9359
|
/pizzacart/migrations/0006_alter_product_desc.py
|
9f9514db020e78de6a2d552e682c21a9b8fea043
|
[] |
no_license
|
akshaysaini13/Pizza-App
|
2496a8017acca966ef827c457100458f8838692a
|
0d56736db9cd7079fe54102966a54e01fa1d4211
|
refs/heads/master
| 2023-08-28T19:52:54.052654
| 2021-10-23T16:18:12
| 2021-10-23T16:18:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 3.2.6 on 2021-10-23 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pizzacart', '0005_alter_product_piztype'),
]
operations = [
migrations.AlterField(
model_name='product',
name='desc',
field=models.TextField(),
),
]
|
[
"akshaysaini13@gmail.com"
] |
akshaysaini13@gmail.com
|
20ca100f65320d6d541360471bad566804fcf5e0
|
996cb7f9a28ad5f5cd7e355ef82545f5a7c43100
|
/python/problems/034_problem.py
|
31de1b530e737b09b201d0925e0c041ff6106753
|
[] |
no_license
|
rhedshi/project-euler
|
abec8888136362ff7a5b06e656c1e1fa9e5a7318
|
3e841c2de10d5b1616e179e3fe95d3e8939de47c
|
refs/heads/master
| 2023-05-24T16:14:12.036456
| 2021-12-02T02:25:22
| 2021-12-02T02:26:53
| 80,599,212
| 3
| 0
| null | 2021-12-01T18:40:17
| 2017-02-01T07:44:55
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Problem 34 - Digit factorials
=============================
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their
digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
"""
from utils.number import is_factorion
sum = 0
for n in range(10, 100000):
if is_factorion(n):
sum += n
print(sum)
# Answer: 40730
|
[
"rhed@fb.com"
] |
rhed@fb.com
|
32445e94f16c9d5cb84226c5780b74192610469b
|
96cc32daf3a1cc0406b3f15f6c2a3e3ae7158d1e
|
/V2RaycSpider1225/src/config.py
|
060def8b9bce1bb68a814197a7a1c70f84666c18
|
[
"MIT"
] |
permissive
|
mcyrj/V2RayCloudSpider
|
272999de21443d8b450ef0dcbf3cc1f16d63a3db
|
c1808571b7d244f0ac4bd50078661a3d5a2bc254
|
refs/heads/master
| 2023-06-11T09:55:53.498179
| 2021-07-11T09:02:35
| 2021-07-11T09:02:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,864
|
py
|
"""
读取yaml配置文件,生成全局静态变量。
"""
import os
import shutil
from sys import platform
import pytz
import yaml
# ---------------------------------------------------
# TODO 配置文件索引
# 默认的单机配置文件为 config.yaml
# ---------------------------------------------------
# 若在单机上开启多个进程的任务,既每个进程应对应一个启动配置文件(.yaml)则需改动此处user_指向的文件名,如:
# config_1.yaml user_= os.path.join(os.path.dirname(__file__), 'config_1.yaml')
# config_master.yaml user_= os.path.join(os.path.dirname(__file__), 'config_master.yaml')
user_ = os.path.join(os.path.dirname(__file__), 'config.yaml')
# 配置模板的文件名 无特殊需求请不要改动
sample_ = os.path.join(os.path.dirname(__file__), 'config-sample.yaml')
try:
if not os.path.exists(sample_):
print(">>> 请不要删除系统生成的配置模板config-sample.yaml,确保它位于工程根目录下")
raise FileNotFoundError
elif os.path.exists(sample_) and not os.path.exists(user_):
print(f">>> 工程根目录下缺少config.yaml配置文件")
shutil.copy(sample_, user_)
print(">>> 初始化启动参数... ")
print(">>> 请根据docs配置启动参数 https://github.com/QIN2DIM/V2RayCloudSpider")
exit()
elif os.path.exists(sample_) and os.path.exists(user_):
# 读取yaml配置变量
with open(user_, 'r', encoding='utf8') as stream:
config_ = yaml.load(stream.read(), Loader=yaml.FullLoader)
if __name__ == '__main__':
print(f'>>> 读取配置文件{config_}')
except FileNotFoundError:
try:
import requests
res_ = requests.get("http://123.56.77.6:8888/down/AgIHWQ6QXtLg")
with open(sample_, 'wb') as fp:
fp.write(res_.content)
print(">>> 配置模板拉取成功,请重启项目")
except Exception as e_:
print(e_)
print('>>> 配置模板自动拉取失败,请检查本地网络')
finally:
exit()
"""
================================================ ʕ•ﻌ•ʔ ================================================
(·▽·)欢迎使用V2Ray云彩姬,请跟随提示合理配置项目启动参数
================================================ ʕ•ﻌ•ʔ ================================================
# TODO 2020/11/22
# -- Modify
# -- Panel -> UpdatedModule
# -- Optimize -> Atomic operation
# -- Expand -> action slaver
# TODO v_1.0.2.11162350.11-beta
# --请不要在<小带宽的国内云服务器>上部署此项目,推荐利用性能闲置的非大陆IP的VPS运行项目(非生产环境)
# --若您的服务器配置 ~= `Linux 1xCPU 1GxRAM` 请勿在生产环境中开启coroutine_speed_up
# --若您的服务器配置过于豪华或至少有 2xCPU 2GxRAM,请将所有加速特效拉满(脚本引入了一定的鲁棒均衡模组,可以保证稳定采集)
# TODO v_5.0.3-beta
# -- 尝试引入6进程多哨兵模式
"""
# TODO (√) 强制填写;(*)可选项
# ---------------------------------------------------
# TODO (√) Function Authority -- 功能权限
# ---------------------------------------------------
# SINGLE_DEPLOYMENT 部署模式,单机部署True(默认),分布式False
SINGLE_DEPLOYMENT = config_['SINGLE_DEPLOYMENT']
# ENABLE_DEPLOY 单节点部署定时任务开关
ENABLE_DEPLOY: dict = config_['ENABLE_DEPLOY']
# ENABLE_DEPLOY 服务器内核开关
ENABLE_KERNEL: dict = config_['ENABLE_KERNEL']
# ENABLE_SERVER 部署Flask
ENABLE_SERVER: bool = config_['ENABLE_SERVER']
# ENABLE_COROUTINE 协程加速
ENABLE_COROUTINE: bool = config_['ENABLE_COROUTINE']
# ENABLE_DEBUG Flask DEBUG
ENABLE_DEBUG: bool = not ENABLE_DEPLOY
# ENABLE_REBOUND 数据回弹
# 当master server宕机后,将slave中的订阅清洗后传回。
# 该选项会反转主从关系,仅在主机数据丢失情况下手动开启。
# 在完成数据覆盖后应立即将ENABLE_REBOUND置False后重启项目。
ENABLE_REBOUND: bool = config_['ENABLE_REBOUND']
# ---------------------------------------------------
# TODO (√)SINGLE_TASK_CAP -- 单类订阅的队列容载极限
# 当某种链接的数量达到这个阈值则不会启动该类链接的采集任务
# <Performance limit of 1xCPU 1GRAM VPS KVM>
# Defaults type:int = 25
# 个人使用 推荐SINGLE_TASK_CAP不超过3
# ---------------------------------------------------
SINGLE_TASK_CAP: int = config_['SINGLE_TASK_CAP']
# ---------------------------------------------------
# TODO (√)DEPLOY_INTERVAL -- schedule任务间隔,单位秒(s)
# 定时任务中采集任务频次: 1轮/INTERVAL_ACTION
# Defaults type:int = 5 * 60 s 既30分钟
#
# 定时任务中数据备份/过期移除频次: 1轮/INTERVAL_REFRESH
# Defaults type:int = 60 * 60 s 既1小时
# 定时任务中数据解耦/检查频次: 1轮/INTERVAL_REFRESH
# Defaults type:int = 1 * 60 s 既1分钟
# 为保证系统高可用性,请不要让任务巡回频次低于以上预设值
# ---------------------------------------------------
LAUNCH_INTERVAL: dict = config_['LAUNCH_INTERVAL']
# ---------------------------------------------------
# TODO (√)Redis Cluster Configuration(SSH-BASE)
# 若您不知道如何配置Redis远程连接,请自行搜索或↓
# https://shimo.im/docs/5bqnroJYDbU4rGqy/
# ---------------------------------------------------
# TODO (√)Settings of the Master-Redis responsible for leading the workflow
REDIS_MASTER: dict = config_['REDIS_MASTER']
# TODO (*)Setting of the Slave-Redis responsible for data disaster tolerance(DDT)
REDIS_SLAVER_DDT: dict = config_['REDIS_SLAVER_DDT']
# TODO (x)This configuration is not applicable in the current version
MYSQL_CONFIG: dict = config_['MYSQL_CONFIG']
# ---------------------------------------------------
# TODO (√)API for Flask(SSH-BASE)
# ---------------------------------------------------
API_HOST: str = REDIS_MASTER["host"]
API_DEBUG: bool = ENABLE_DEBUG
API_THREADED: bool = True
API_PORT: int = config_['API_PORT']
OPEN_HOST: str = "127.0.0.1" if API_DEBUG or "win" in platform else "0.0.0.0"
# ---------------------------------------------------
# 路由接口(公开)
# ---------------------------------------------------
ROUTE_API = {
"capture_subscribe": "/v2raycs/api/capture_subscribe",
"version_manager": "/v2raycs/api/version_manager",
"get_subs_num": "/v2raycs/api/get_sbus_num"
}
# ---------------------------------------------------
# 任务队列
# ---------------------------------------------------
SEQ_TEST = {
"v2ray": True,
"ssr": True,
"trojan": False,
}
CRAWLER_SEQUENCE = [i[0].lower() for i in SEQ_TEST.items() if i[-1]]
# ---------------------------------------------------
# TODO (*)Noticer -- 用于发送panic信息警报,默认发送给自己
# 1. 当只需要发送给自己时推荐使用SERVER酱,两种通讯方式SERVER酱优先级更高
# 2. 此项非必要填写,若为空则不会发送警告信息
# ---------------------------------------------------
# ---------------------------------------------------
# TODO > 使用Email推送-推荐使用QQ邮箱,开启邮箱SMTP服务教程如下
# https://service.mail.qq.com/cgi-bin/help?subtype=1&&id=28&&no=1001256
# ---------------------------------------------------
SMTP_ACCOUNT: dict = config_['SMTP_ACCOUNT']
# ---------------------------------------------------
# TODO > 使用<SERVER酱>推送,请在SERVER_CHAN_SCKEY填写自己的Key
# http://sc.ftqq.com/3.version
# ---------------------------------------------------
SERVER_CHAN_SCKEY: str = config_['SERVER_CHAN_SCKEY']
# ---------------------------------------------------
# TODO (√)CHROMEDRIVER_PATH -- ChromeDriver的路径
# 本项目依赖google-chrome驱动插件,请确保您的开发环境中已经安装chrome以及对应版本的chromedriver
# 1. 配置google-chrome开发环境
# 1.1 安装Chrome
# 若无特殊需求请直接拉取最近版程序
# >> Windows -> https://www.google.cn/chrome/index.html
# >> Linux -> https://shimo.im/docs/5bqnroJYDbU4rGqy/
# 1.2 安装chromedriver
# 查看chrome版本并安装对应版本的匹配操作系统的chromedriver。
# >> http://npm.taobao.org/mirrors/chromedriver/
# 1.3 配置环境变量
# (1)将下载好的对应版本的chromedriver放到工程`./BusinessCentralLayer/`目录下
# (2)配置Chrome环境变量,Windows编辑系统环境变量Path,定位到Application文件夹为止,示例如下:
# C:\Program Files\Google\Chrome\Application
# 2. 注意事项
# -- 本项目基于Windows环境开发测试,Linux环境部署运行,若您的系统基于MacOS或其他,请根据报错提示微调启动参数。
# -- 若您的Chrome安装路径与上文所述不一致,请适当调整。
# -- 若您不知如何查看Chrome版本或在参考blog后仍遇到预料之外的问题请在issue中留言或通过检索解决。
# >> Project:https://github.com/QIN2DIM/V2RayCloudSpider
# ---------------------------------------------------
"""
================================================== ʕ•ﻌ•ʔ ==================================================
如果您并非<V2RayCloudSpider>项目开发者 请勿修改以下变量的默认参数
================================================== ʕ•ﻌ•ʔ ==================================================
Enjoy it -> ♂ main.py
"""
# ---------------------------------------------------
# (*)Redis BusinessLogicLayer Server Configuration(SSH-Advanced)
# ---------------------------------------------------
# (*) Core settings of the Master's secret key.Do not modify!
REDIS_SECRET_KEY: str = "v2rayc_spider:{}"
# ---------------------------------------------------
# 工程编号与版本号,版本号必须与工程版本(文件名)号一致 请勿改动!
# version命名规则 k.u.r -b
# --k kernel 内核级更新
# --u update 加入新功能/模块/机制
# --r repair 修复已知漏洞或小改动
# --b branch 分支版本,分为 测试版(beta)、稳定版(release)
# project_num命名规则 K25,既每进行一次内核级更新时K+1,如:
# "4.u.r 0925" -> "5.u.r 1025"
# ---------------------------------------------------
project_num = "1225"
version = "5.1.0"
# ---------------------------------------------------
# 时区
TIME_ZONE_CN = pytz.timezone("Asia/Shanghai")
TIME_ZONE_NY = pytz.timezone("America/New_York")
# 任务开关
if not ENABLE_DEPLOY['global']:
for reset_ in ENABLE_DEPLOY['tasks'].items():
ENABLE_DEPLOY['tasks'][reset_[0]] = False
# 主从反转
if ENABLE_REBOUND:
REDIS_MASTER, REDIS_SLAVER_DDT = REDIS_SLAVER_DDT, REDIS_MASTER
|
[
"62018067+QIN2DIM@users.noreply.github.com"
] |
62018067+QIN2DIM@users.noreply.github.com
|
3c6be4d87f52e0e542c15e1383f89d83b2ac5584
|
9fa5d9ef9f9a4896126f42d2d16e87948a73579d
|
/build_xclient_all.py
|
a4a0968b88b5a11f61430e93a6aeea2c74583ff0
|
[] |
no_license
|
ReadingCode/xware
|
6c3272171922014f49216c36f70de31756ccd2b1
|
6a7edf746822d38c084867a7ce9930cc5a764da9
|
refs/heads/master
| 2020-03-26T05:03:53.876231
| 2013-05-29T06:09:32
| 2013-05-29T06:09:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
#!/usr/bin/env python
import os
import sys
commands = [
'scons -f xclient.scons unicode=yes release=no',
'scons -f xclient.scons unicode=no release=no',
'scons -f xclient.scons unicode=yes release=yes',
'scons -f xclient.scons unicode=no release=yes']
if __name__ == '__main__':
for command in commands:
os.system(' '.join([command] + sys.argv[1:]))
|
[
"stoneyrh@163.com"
] |
stoneyrh@163.com
|
537981f5f8ae26ec5e8ef25268157569c2eef36e
|
f45851b43325ab6cb76035f4cb6156f3e95d0ca2
|
/programming101/small.py
|
82609fad1e3a4071b7c9f039c6f06cb9a34171fe
|
[] |
no_license
|
csreyno/Digital-Crafts-Classes
|
8a6ba104a1921694521a199c075040afe429aaf4
|
216fd7744fee9dd127d2ff36192cad057dc08f2f
|
refs/heads/master
| 2023-01-21T06:45:52.727514
| 2020-11-24T21:36:30
| 2020-11-24T21:36:30
| 303,484,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
# #exercise 1
# name = input("what's your name?\n")
# print(f"Hello, {name}!")
# #exercise 2 - convert all lowercase to uppercase, letters already uppercase remain uppercase
# name = input("WHAT'S YOUR NAME?\n")
# print(f"HELLO, {name.upper()}!")
# print(f"YOUR NAME HAS {len(name)} LETTERS IN IT! AWESOME!")
# #exercise 3
# name = input("What is your name?\n")
# subject = input("what is you favorite subject in school?\n")
# print(f"{name}'s favorite subject in school is {subject}.")
# #exercise 4
# day = int(input("What day is it using (0-6) in place of the day?\n"))
# if day == 0:
# print("Sunday")
# elif day == 1:
# print("Monday")
# elif day == 2:
# print("Tuesday")
# elif day == 3:
# print("Wendsday")
# elif day == 4:
# print("Thursday")
# elif day == 5:
# print("Friday")
# elif day == 6:
# print("Saturday")
# #better programming practice for above
# day = int(input("What day is it using (0-6) in place of the day?\n"))
# days = ["Sunday", "Monday", "Tues", "Weds", "Thurs", "Fri", "Sat"]
# #Exercise 5
# day = int(input("What day is it using (0-6) in place of the day?\n"))
# if day == 0:
# print("Sleep in")
# elif day == 6:
# print("Sleep in")
# elif day == 1 or 2 or 3 or 4 or 5:
# print("GO TO WORK!")
# #exercise 6
# celsius = int(input("What is temp in degrees C? "))
# fahr = (celsius * 9/5) + 32
# print("%i F" % (fahr))
# # print(int(celsius * 9/5 + 32) + str(F)) how to get this to concatenate correctly?
# #exercise 7
# i = 1
# while i < 11:
# print(i)
# i += 1
# # #exercise 8
# start = int(input("Start from: "))
# end = int(input("End on: "))
# while start <= end:
# print(start)
# start += 1
# #exercise 9 - - prob needed a 'while' loop here
# print("*****\n*****\n*****\n*****\n*****\n")
# or
# i = 0
# while i < 5:
# print("*" * 5) # or print("******")
# i += 1
# or
# rows = 5
# stars = "*****\n"
# print(stars * rows)
#exercise 10 - create a X * X size square of * at input of a number
i = int(input("How big is the square? "))
stars = ("*" * i)
|
[
"chad.s.reynolds@gmail.com"
] |
chad.s.reynolds@gmail.com
|
150eb61a972bb02a429f3eb79cb6df50bb234b19
|
a01feaa3f62764bd73bdc4c57b32f6fd4019e3e9
|
/dat/week3.py
|
bfb2032451c440af7a63dd7415325654439700bc
|
[] |
no_license
|
jonathanfspencer/data-analyst
|
dcbc085902ace68df18f1fe38630002da4e6f4ce
|
8938b0e3dd24420371fd1a505f2dd1d6d5a13b63
|
refs/heads/master
| 2023-04-10T09:21:07.591711
| 2021-04-20T18:52:00
| 2021-04-20T18:52:00
| 335,087,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,220
|
py
|
# -*- coding: utf-8 -*-
"""
Jonathan Spencer
Week 3 Assignment
"""
import pandas
import numpy
import seaborn
import scipy
import matplotlib.pyplot as plt
#Set PANDAS to show all columns in DataFrame
pandas.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pandas.set_option('display.max_rows', None)
# bug fix for display formats to avoid run time errors
pandas.set_option('display.float_format', lambda x:'%f'%x)
data = pandas.read_csv('dat/gapminder.csv', low_memory=False)
data = data.replace(r'^\s*$', numpy.NaN, regex=True)
data['incomeperperson'] = pandas.to_numeric(data['incomeperperson'])
data['femaleemployrate'] = pandas.to_numeric(data['femaleemployrate'])
# H0: There is no correlation between whether most women in a country are employed and the income per capita
# H1: There is a correlation between whether most women in a country are employed and the income per capita
#subset data to remove rows where any of the variables contain missing data
sub1=data.dropna(how='any', subset=['incomeperperson', 'femaleemployrate'])
sub1=sub1[sub1['incomeperperson'] > 0]
# Make a scatter plot to visualize the relationship
fig1, scat1 = plt.subplots()
scat1 = seaborn.regplot(x="femaleemployrate", y="incomeperperson", fit_reg=True, data=sub1, ax=scat1)
scat1.set_xlabel('Female Employment Rate')
scat1.set_ylabel('Income Per Capita')
scat1.set_title('Female Employment Rate and Income Per Capita')
fig1.savefig('dat/femgdp.png')
# Perform a Pearson Correlation Coefficient Test
print ('Association between female employment rate and income per capita')
print (scipy.stats.pearsonr(sub1['femaleemployrate'], sub1['incomeperperson']))
# I realized from the visualization that many countries have a severely depressed GDP relative to others
# H0: There is no correlation between whether most women in a country are employed and the income per capita in countries where the income per capita is greater than $1000USD
# H1: There is a correlation between whether most women in a country are employed and the income per capita in countries where the income per capita is greater than $1000USD
#subset data to remove rows where any of the variables contain missing data
sub2=data.dropna(how='any', subset=['incomeperperson', 'femaleemployrate'])
sub2=sub1[sub1['incomeperperson'] > 1000]
# Make a scatter plot to visualize the relationship
fig2, scat2 = plt.subplots()
scat2 = seaborn.regplot(x="femaleemployrate", y="incomeperperson", fit_reg=True, data=sub2, ax=scat2)
scat2.set_xlabel('Female Employment Rate')
scat2.set_ylabel('Income Per Capita')
scat2.set_title('Female Employment Rate and Income Per Capita')
fig2.savefig('dat/femgdp1000.png')
# Perform a Pearson Correlation Coefficient Test
print ('Association between female employment rate and income per capita in countries where the income per capita is greater than $1000USD')
print (scipy.stats.pearsonr(sub2['femaleemployrate'], sub2['incomeperperson']))
# There was a stronger correlation at $1000 min gdp, so let's try at $5000
# H0: There is no correlation between whether most women in a country are employed and the income per capita in countries where the income per capita is greater than $5000USD
# H1: There is a correlation between whether most women in a country are employed and the income per capita in countries where the income per capita is greater than $5000USD
#subset data to remove rows where any of the variables contain missing data
sub3=data.dropna(how='any', subset=['incomeperperson', 'femaleemployrate'])
sub3=sub1[sub1['incomeperperson'] > 5000]
# Make a scatter plot to visualize the relationship
fig3, scat3 = plt.subplots()
scat3 = seaborn.regplot(x="femaleemployrate", y="incomeperperson", fit_reg=True, data=sub3, ax=scat3)
scat3.set_xlabel('Female Employment Rate')
scat3.set_ylabel('Income Per Capita')
scat3.set_title('Female Employment Rate and Income Per Capita')
fig3.savefig('dat/femgdp5000.png')
# Perform a Pearson Correlation Coefficient Test
print ('Association between female employment rate and income per capita in countries where the income per capita is greater than $5000USD')
print (scipy.stats.pearsonr(sub3['femaleemployrate'], sub3['incomeperperson']))
|
[
"jonathanfspencer@gmail.com"
] |
jonathanfspencer@gmail.com
|
6186282c3446f83d419a1ac0c4b554890f932503
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/python/framework/dtypes.py
|
f1c6251c483d69a34f5710f9b346fb9ccd2b713e
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 24,187
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import builtins
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
_np_bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
@tf_export("dtypes.DType", "DType")
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.uint32`: 32-bit unsigned integer.
* `tf.uint64`: 64-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
* `tf.variant`: Values of arbitrary types.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values() or
type_enum == types_pb2.DT_INVALID):
raise TypeError("type_enum is not a valid types_pb2.DataType: %s" %
type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return self._type_enum not in _NUMPY_INCOMPATIBLE
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
np.issubdtype(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return ((self.is_numpy_compatible and
np.issubdtype(self.as_numpy_dtype, np.floating)) or
self.base_dtype == bfloat16)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in _QUANTIZED_DTYPES_NO_REF
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("-0x1.FEp127"))
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("0x1.FEp127"))
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e.
(min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional If True, clip the negative range (i.e.
return 0 for min intensity) even if the image dtype allows negative
values. Returns
min, max : tuple Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin
if clip_negative:
min = 0 # pylint: disable=redefined-builtin
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (other.as_datatype_enum,
other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
def __reduce__(self):
return as_dtype, (self.name,)
@property
def size(self):
if (self._type_enum == types_pb2.DT_VARIANT or
self._type_enum == types_pb2.DT_RESOURCE):
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
tf_export("dtypes.resource", "resource").export_constant(__name__, "resource")
variant = DType(types_pb2.DT_VARIANT)
tf_export("dtypes.variant", "variant").export_constant(__name__, "variant")
float16 = DType(types_pb2.DT_HALF)
tf_export("dtypes.float16", "float16").export_constant(__name__, "float16")
half = float16
tf_export("dtypes.half", "half").export_constant(__name__, "half")
float32 = DType(types_pb2.DT_FLOAT)
tf_export("dtypes.float32", "float32").export_constant(__name__, "float32")
float64 = DType(types_pb2.DT_DOUBLE)
tf_export("dtypes.float64", "float64").export_constant(__name__, "float64")
double = float64
tf_export("dtypes.double", "double").export_constant(__name__, "double")
int32 = DType(types_pb2.DT_INT32)
tf_export("dtypes.int32", "int32").export_constant(__name__, "int32")
uint8 = DType(types_pb2.DT_UINT8)
tf_export("dtypes.uint8", "uint8").export_constant(__name__, "uint8")
uint16 = DType(types_pb2.DT_UINT16)
tf_export("dtypes.uint16", "uint16").export_constant(__name__, "uint16")
uint32 = DType(types_pb2.DT_UINT32)
tf_export("dtypes.uint32", "uint32").export_constant(__name__, "uint32")
uint64 = DType(types_pb2.DT_UINT64)
tf_export("dtypes.uint64", "uint64").export_constant(__name__, "uint64")
int16 = DType(types_pb2.DT_INT16)
tf_export("dtypes.int16", "int16").export_constant(__name__, "int16")
int8 = DType(types_pb2.DT_INT8)
tf_export("dtypes.int8", "int8").export_constant(__name__, "int8")
string = DType(types_pb2.DT_STRING)
tf_export("dtypes.string", "string").export_constant(__name__, "string")
complex64 = DType(types_pb2.DT_COMPLEX64)
tf_export("dtypes.complex64",
"complex64").export_constant(__name__, "complex64")
complex128 = DType(types_pb2.DT_COMPLEX128)
tf_export("dtypes.complex128",
"complex128").export_constant(__name__, "complex128")
int64 = DType(types_pb2.DT_INT64)
tf_export("dtypes.int64", "int64").export_constant(__name__, "int64")
bool = DType(types_pb2.DT_BOOL) # pylint: disable=redefined-builtin
tf_export("dtypes.bool", "bool").export_constant(__name__, "bool")
qint8 = DType(types_pb2.DT_QINT8)
tf_export("dtypes.qint8", "qint8").export_constant(__name__, "qint8")
quint8 = DType(types_pb2.DT_QUINT8)
tf_export("dtypes.quint8", "quint8").export_constant(__name__, "quint8")
qint16 = DType(types_pb2.DT_QINT16)
tf_export("dtypes.qint16", "qint16").export_constant(__name__, "qint16")
quint16 = DType(types_pb2.DT_QUINT16)
tf_export("dtypes.quint16", "quint16").export_constant(__name__, "quint16")
qint32 = DType(types_pb2.DT_QINT32)
tf_export("dtypes.qint32", "qint32").export_constant(__name__, "qint32")
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
variant_ref = DType(types_pb2.DT_VARIANT_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
tf_export("dtypes.bfloat16", "bfloat16").export_constant(__name__, "bfloat16")
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint32_ref = DType(types_pb2.DT_UINT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
uint64_ref = DType(types_pb2.DT_UINT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
_NUMPY_INCOMPATIBLE = frozenset([
types_pb2.DT_VARIANT, types_pb2.DT_VARIANT_REF, types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF
])
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_UINT32: uint32,
types_pb2.DT_UINT64: uint64,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_VARIANT: variant,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT32_REF: uint32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_UINT64_REF: uint64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
types_pb2.DT_VARIANT_REF: variant_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_UINT32: "uint32",
types_pb2.DT_UINT64: "uint64",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_VARIANT: "variant",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT32_REF: "uint32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_UINT64_REF: "uint64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
types_pb2.DT_VARIANT_REF: "variant_ref",
}
_STRING_TO_TF = {
value: _INTERN_TABLE[key] for key, value in _TYPE_TO_STRING.items()
}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8)])
_np_quint8 = np.dtype([("quint8", np.uint8)])
_np_qint16 = np.dtype([("qint16", np.int16)])
_np_quint16 = np.dtype([("quint16", np.uint16)])
_np_qint32 = np.dtype([("qint32", np.int32)])
# _np_bfloat16 is defined by a module import.
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = {
np.float16: float16,
np.float32: float32,
np.float64: float64,
np.int32: int32,
np.int64: int64,
np.uint8: uint8,
np.uint16: uint16,
np.uint32: uint32,
np.uint64: uint64,
np.int16: int16,
np.int8: int8,
np.complex64: complex64,
np.complex128: complex128,
np.object_: string,
np.string_: string,
np.unicode_: string,
np.bool_: bool,
_np_qint8: qint8,
_np_quint8: quint8,
_np_qint16: qint16,
_np_quint16: quint16,
_np_qint32: qint32,
_np_bfloat16: bfloat16,
}
# Map (some) NumPy platform dtypes to TF ones using their fixed-width
# synonyms. Note that platform dtypes are not always simples aliases,
# i.e. reference equality is not guaranteed. See e.g. numpy/numpy#9799.
for pdt in [
np.intc,
np.uintc,
np.int_,
np.uint,
np.longlong,
np.ulonglong,
]:
if pdt not in _NP_TO_TF:
_NP_TO_TF[pdt] = next(
_NP_TO_TF[dt] for dt in _NP_TO_TF if dt == pdt().dtype)
TF_VALUE_DTYPES = set(_NP_TO_TF.values())
_TF_TO_NP = {
types_pb2.DT_HALF:
np.float16,
types_pb2.DT_FLOAT:
np.float32,
types_pb2.DT_DOUBLE:
np.float64,
types_pb2.DT_INT32:
np.int32,
types_pb2.DT_UINT8:
np.uint8,
types_pb2.DT_UINT16:
np.uint16,
types_pb2.DT_UINT32:
np.uint32,
types_pb2.DT_UINT64:
np.uint64,
types_pb2.DT_INT16:
np.int16,
types_pb2.DT_INT8:
np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING:
np.object,
types_pb2.DT_COMPLEX64:
np.complex64,
types_pb2.DT_COMPLEX128:
np.complex128,
types_pb2.DT_INT64:
np.int64,
types_pb2.DT_BOOL:
np.bool,
types_pb2.DT_QINT8:
_np_qint8,
types_pb2.DT_QUINT8:
_np_quint8,
types_pb2.DT_QINT16:
_np_qint16,
types_pb2.DT_QUINT16:
_np_quint16,
types_pb2.DT_QINT32:
_np_qint32,
types_pb2.DT_BFLOAT16:
_np_bfloat16,
# Ref types
types_pb2.DT_HALF_REF:
np.float16,
types_pb2.DT_FLOAT_REF:
np.float32,
types_pb2.DT_DOUBLE_REF:
np.float64,
types_pb2.DT_INT32_REF:
np.int32,
types_pb2.DT_UINT32_REF:
np.uint32,
types_pb2.DT_UINT8_REF:
np.uint8,
types_pb2.DT_UINT16_REF:
np.uint16,
types_pb2.DT_INT16_REF:
np.int16,
types_pb2.DT_INT8_REF:
np.int8,
types_pb2.DT_STRING_REF:
np.object,
types_pb2.DT_COMPLEX64_REF:
np.complex64,
types_pb2.DT_COMPLEX128_REF:
np.complex128,
types_pb2.DT_INT64_REF:
np.int64,
types_pb2.DT_UINT64_REF:
np.uint64,
types_pb2.DT_BOOL_REF:
np.bool,
types_pb2.DT_QINT8_REF:
_np_qint8,
types_pb2.DT_QUINT8_REF:
_np_quint8,
types_pb2.DT_QINT16_REF:
_np_qint16,
types_pb2.DT_QUINT16_REF:
_np_quint16,
types_pb2.DT_QINT32_REF:
_np_qint32,
types_pb2.DT_BFLOAT16_REF:
_np_bfloat16,
}
_QUANTIZED_DTYPES_NO_REF = frozenset([qint8, quint8, qint16, quint16, qint32])
_QUANTIZED_DTYPES_REF = frozenset(
[qint8_ref, quint8_ref, qint16_ref, quint16_ref, qint32_ref])
QUANTIZED_DTYPES = _QUANTIZED_DTYPES_REF.union(_QUANTIZED_DTYPES_NO_REF)
tf_export(
"dtypes.QUANTIZED_DTYPES",
v1=["dtypes.QUANTIZED_DTYPES",
"QUANTIZED_DTYPES"]).export_constant(__name__, "QUANTIZED_DTYPES")
_PYTHON_TO_TF = {
builtins.float: float32,
builtins.bool: bool,
builtins.object: string
}
_ANY_TO_TF = {}
_ANY_TO_TF.update(_INTERN_TABLE)
_ANY_TO_TF.update(_STRING_TO_TF)
_ANY_TO_TF.update(_PYTHON_TO_TF)
_ANY_TO_TF.update(_NP_TO_TF)
# Ensure no collisions.
assert len(_ANY_TO_TF) == sum(
len(d) for d in [_INTERN_TABLE, _STRING_TO_TF, _PYTHON_TO_TF, _NP_TO_TF])
@tf_export("dtypes.as_dtype", "as_dtype")
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
if isinstance(type_value, np.dtype):
try:
return _NP_TO_TF[type_value.type]
except KeyError:
pass
try:
return _ANY_TO_TF[type_value]
except KeyError:
pass
raise TypeError("Cannot convert value %r to a TensorFlow DType." %
(type_value,))
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
6b27894d6781b7a1a591111f28f575c8857b9a50
|
fb3061b2e8752e59e59d0f54f8a9637c25bcc8f4
|
/Tristan/randomnumber.py
|
0dd78cf9ab6872d179441ebf8ea0bba5d6a197ba
|
[] |
no_license
|
idcrypt3/camp_2019_07_14
|
25383c67dbd2d44ad9e5966b2666d545688a4d36
|
708cccb313181fc15dc7aa45699024b9b44ba275
|
refs/heads/master
| 2020-06-20T06:21:37.907263
| 2019-07-19T18:53:17
| 2019-07-19T18:53:17
| 197,023,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
import random
print(random.randint(1, 20))
|
[
"idcrypt3@gmail.com"
] |
idcrypt3@gmail.com
|
f94dc3e35df3d080642dc8f8fd2a3ffb9b4675a5
|
0d2c2ffe431b159a87bcd78c97147422dce8d778
|
/GUI学习/01PyQt5快速开发与实战/ch05高级界面控件/11timer2.py
|
c00045f390bd96d04ec0f63ccf8a09b77033800c
|
[] |
no_license
|
YuanXianguo/Python-Project-ITC
|
9e297fc1e1e8ec2b136e6e8b1db0afaaba81c16c
|
afd14cbe501147ec66b4aa0c1c7907b3ae41d148
|
refs/heads/master
| 2020-04-16T13:54:33.727825
| 2019-12-20T02:16:52
| 2019-12-20T02:16:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
import sys
from PyQt5.QtWidgets import QApplication, QLabel
from PyQt5.QtCore import Qt, QTimer
def test():
print(1)
if __name__ == '__main__':
app = QApplication(sys.argv)
label = QLabel('<font color=red size=128><b>'
'Hello PyQt,窗口会在3秒后消失!</b></font>')
# 无边框窗口
label.setWindowFlags(Qt.SplashScreen | Qt.FramelessWindowHint)
label.show()
# 设置10秒后自动退出
QTimer.singleShot(3000, test)
sys.exit(app.exec_())
|
[
"736913978@qq.com"
] |
736913978@qq.com
|
6f08a86ea414a778c093cdd193e66adf1fa27fb9
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/kuchevn.py
|
a9cc915cd7f15c2358aed743c2373312c26e7f93
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755
| 2019-10-22T07:53:41
| 2019-10-22T07:53:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='page_title']/h1",
'price' : "//meta[@property='og:price:amount']/@content",
'category' : "",
'description' : "//div[@class='tab-container']/div[@class='pd_description_content tab-content clearfix ui-tabs-panel ui-widget-content ui-corner-bottom']",
'images' : "//meta[@property='og:image'][1]/@content",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'kuche.vn'
allowed_domains = ['kuche.vn']
start_urls = ['http://kuche.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
#Rule(LinkExtractor(), 'parse_item'),
#Rule(LinkExtractor(), 'parse'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+($|\?page=\d+$)']), 'parse_item_and_links'),
]
|
[
"nguyenchungthuy.hust@gmail.com"
] |
nguyenchungthuy.hust@gmail.com
|
d4cef37cd7a9b3cca60e775fd7bc904a27ceeef8
|
e30cb969dde7e4267519f1c6921d4ab724fff2ea
|
/order/queueMessaging/connector.py
|
48a866cbb9b599d8821e4b2b2875ad3c73de257c
|
[] |
no_license
|
Platonov2/nepomoika
|
8e13a11b8fd0137e959867a0e9a922fabf150aa0
|
e5d4503c436aaf7131e0c1fe9d5a8cd7764f29e2
|
refs/heads/master
| 2023-03-26T03:05:25.099971
| 2021-03-30T05:41:43
| 2021-03-30T05:41:43
| 340,888,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
import pika
import os
class Connector:
def __init__(self):
self.credentials = pika.PlainCredentials(os.environ['RABBITMQ_DEFAULT_USER'], os.environ['RABBITMQ_DEFAULT_PASS'])
self.connection = ""
self.channel = ""
def connect(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit', 5672, '/',
credentials=self.credentials))
self.channel = self.connection.channel()
def perform_setup_queue_infrastructure(self):
if self.channel == "":
raise BaseException("First you need to use connect method")
self.channel.queue_declare(queue='message_queue_order', durable=True)
|
[
"uytera12@yandex.ru"
] |
uytera12@yandex.ru
|
d5574e4e9fc5e91915817a374d447d7d44106be0
|
ca08a9b9c05d95aa7532098231dd9ef17ad3247c
|
/models/model_SR.py
|
5908a801a21f2f7816f5318f3bcefd3529e06dd5
|
[] |
no_license
|
Mbamin/successor-features-A2C
|
ae8aa3e107e730843e4ad04d16c76dba660ae29f
|
cc0875d4fa8e6f9550475d6073a1de8e219a9c38
|
refs/heads/master
| 2022-12-26T21:22:50.527379
| 2020-10-01T19:59:14
| 2020-10-01T19:59:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,739
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
#import torch_rbf as rbf
import numpy as np
#from utils.ssps import *
from gym.spaces import Discrete, Box
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
class SRModel(nn.Module, torch_ac.RecurrentACModel):
def __init__(self, obs_space, action_space, use_memory=False, use_text=False,
input_type="image", feature_learn="curiosity"):
super().__init__()
# Decide which components are enabled
self.use_text = use_text
self.use_memory = use_memory
self.n_actions = action_space.n
self.feature_learn = feature_learn
if input_type == "image":
self.feature_in = ImageInput(obs_space,use_memory,use_text)
elif input_type=="flat":
self.feature_in = FlatInput(obs_space,use_memory,use_text)
self.image_embedding_size = self.feature_in.input_embedding_size
self.embedding_size = self.feature_in.embedding_size
if feature_learn=="reconstruction" and input_type=="image":
self.feature_out = ImageReconstruction()
elif feature_learn=="reconstruction" and input_type=="flat":
self.feature_out = FlatReconstruction(obs_space["image"].shape[0])
elif feature_learn=="curiosity":
self.feature_out = Curiosity(self.embedding_size,self.n_actions)
# Define reward model
self.reward = nn.Linear(self.embedding_size, 1, bias=False)
# Define SR model
self.SR = nn.Sequential(
nn.Linear(self.embedding_size, self.embedding_size),
nn.Tanh()
)
# Define actor's model
if type(action_space) == Box:
self.actor = ContinuousActor(self.embedding_size,self.n_actions)
self.continuous_action = True
else:
self.actor = DiscreteActor(self.embedding_size,self.n_actions)
self.continuous_action = False
# Initialize parameters correctly
self.apply(init_params)
@property
def memory_size(self):
return 2*self.semi_memory_size
@property
def semi_memory_size(self):
return self.image_embedding_size
def forward(self, obs, action=None, next_obs=None, memory=None):
embedding, memory = self.feature_in(obs, memory)
if action is not None:
next_embedding, _ = self.feature_in(next_obs, memory)
predictions = self.feature_out(embedding, next_embedding = next_embedding,action=action, next_obs=next_obs, memory=memory)
else:
predictions = None
dist = self.actor(embedding)
successor = self.SR(embedding) + embedding
r = self.reward(embedding)
reward = r.squeeze(1)
with torch.no_grad():
value = self.reward(successor)
value = value.squeeze(1)
return dist, value, embedding, predictions, successor, reward, memory
def _get_embed_text(self, text):
_, hidden = self.text_rnn(self.word_embedding(text))
return hidden[-1]
### Modules for getting feature representation from raw input
## Add-ons that use memory (LSTMs) and text input
class InputModule(nn.Module):
def __init__(self,obs_space, input_embedding_size, use_text, use_memory):
super(InputModule, self).__init__()
self.use_text = use_text
self.use_memory = use_memory
self.input_embedding_size = input_embedding_size
# Define memory
if self.use_memory:
self.memory_rnn = nn.LSTMCell(self.input_embedding_size, self.input_embedding_size)
# Define text embedding
if self.use_text:
self.word_embedding_size = 32
self.word_embedding = nn.Embedding(obs_space["text"], self.word_embedding_size)
self.text_embedding_size = 128
self.text_rnn = nn.GRU(self.word_embedding_size, self.text_embedding_size, batch_first=True)
# Resize image embedding
self.embedding_size = self.input_embedding_size
if self.use_text:
self.embedding_size += self.text_embedding_size
def forward(self, obs, x, memory):
if self.use_memory:
hidden = (memory[:, :self.semi_memory_size].clone(), memory[:, self.semi_memory_size:].clone())
hidden = self.memory_rnn(x, hidden)
embedding = hidden[0].clone()
memory = torch.cat(hidden, dim=1)
else:
embedding = x
if self.use_text:
embed_text = self._get_embed_text(obs.text)
embedding = torch.cat((embedding, embed_text), dim=1)
return embedding, memory
## Features from image data
class ImageInput(nn.Module):
def __init__(self, obs_space, use_text, use_memory):
super(ImageInput, self).__init__()
self.use_text = use_text
self.use_memory = use_memory
n = obs_space["image"][0]
m = obs_space["image"][1]
self.input_embedding_size = ((n-1)//2-2)*((m-1)//2-2)*64
self.image_conv = nn.Sequential(
nn.Conv2d(3, 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.Tanh()
)
self.other = InputModule(obs_space, self.input_embedding_size, use_text, use_memory)
self.embedding_size = self.other.embedding_size
def forward(self, obs, memory):
x = obs.image.transpose(1, 3).transpose(2, 3)
embedding = self.image_conv(x)
x = embedding.reshape(embedding.shape[0], -1)
embedding, memory = self.other(obs, x, memory)
return embedding, memory
## Features from flat input (e.g. one hot, ssps)
class FlatInput(nn.Module):
def __init__(self, obs_space, use_text, use_memory, input_embedding_size=200, hidden_size=256):
super(FlatInput, self).__init__()
self.input_dim = obs_space["image"][0]
self.input_embedding_size = self.input_dim# input_embedding_size
self.layers = nn.Sequential(
nn.Linear(self.input_dim, self.input_embedding_size),#nn.Tanh()
nn.Tanh()
)
self.other = InputModule(obs_space, self.input_embedding_size, use_text, use_memory)
self.embedding_size = self.other.embedding_size
def forward(self, obs, memory):
x = obs.image
x = self.layers(x)
embedding, memory = self.other(obs, x, memory)
return embedding, memory
### Modules for getting predictions used for feature learning
## Auto encoder type
class ImageReconstruction(nn.Module):
def __init__(self):
super(ImageReconstruction, self).__init__()
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, (2, 2)),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, (2, 2)),
nn.ReLU(),
nn.ConvTranspose2d(16, 16, (4, 4)),
nn.ReLU(),
nn.ConvTranspose2d(16, 3, (2, 2))
)
def forward(self, embedding, **kwargs):
obs_pred = self.decoder(embedding.reshape(-1,64,1,1))
obs_pred = obs_pred.transpose(3, 2).transpose(1, 3)
return obs_pred
class FlatReconstruction(nn.Module):
def __init__(self, output_size):
super(ImageReconstruction, self).__init__()
self.decoder = nn.Sequential(
)
def forward(self, embedding, **kwargs):
obs_pred = self.decoder(embedding)
return obs_pred
## curiosity type
class Curiosity(nn.Module):
def __init__(self,embedding_size,n_actions):
super(Curiosity, self).__init__()
self.embedding_size = embedding_size
self.n_actions = n_actions
self.forward_model = nn.Sequential(
nn.Linear(self.embedding_size + self.n_actions, 256),
nn.ReLU(),
nn.Linear(256, self.embedding_size),
nn.Tanh()
)
self.inverse_model = nn.Sequential(
nn.Linear(self.embedding_size*2, 64),
nn.ReLU(),
nn.Linear(64, self.n_actions),
nn.LogSigmoid()
)
def forward(self, embedding, next_embedding, action, next_obs, memory):
if self.n_actions > 1:
action = F.one_hot(action.long(), num_classes=self.n_actions).float()
else:
action = action.float()
forward_input = torch.cat((embedding, action), 1)
next_obs_pred = self.forward_model(forward_input)
inverse_input = torch.cat((embedding, next_embedding), 1)
action_pred = self.inverse_model(inverse_input)
return [next_embedding, next_obs_pred, action_pred]
## Actor Modules
class DiscreteActor(nn.Module):
def __init__(self,embedding_size, n_actions):
super(DiscreteActor, self).__init__()
self.n_actions = n_actions
self.embedding_size = embedding_size
self.actor_layers = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, self.n_actions)
)
def forward(self, embedding):
x = self.actor_layers(embedding)
dist = Categorical(logits=F.log_softmax(x, dim=1))
return dist
class ContinuousActor(nn.Module):
def __init__(self,embedding_size, n_actions):
super(ContinuousActor, self).__init__()
self.embedding_size = embedding_size
self.n_actions = n_actions
self.actor = nn.Sequential(
nn.Linear(self.embedding_size, 40),
nn.ReLU()
)
self.mean = nn.Sequential(
nn.Linear(40, self.n_actions),
)
self.var = nn.Sequential(
nn.Linear(40, self.n_actions),
nn.Softplus()
)
def forward(self, embedding):
x = self.actor(embedding)
mean = self.mean(x)
scale = self.var(x) + 1e-7
dist = torch.distributions.normal.Normal(mean, scale)
return dist
|
[
"nicole.s.dumont@gmail.com"
] |
nicole.s.dumont@gmail.com
|
23fb0fe5d6efc75907bc0f232f107ae59b59db7e
|
85f09715fec929f094b8b0144624731f0d7ad44f
|
/tests/test_factories.py
|
edea1e00945c3718f8bf03528abb51cee47dab48
|
[] |
no_license
|
Bernardstanislas/api-particulier-portail
|
76c87183c03917ade91e1d935cafff573f87815a
|
237d02527ee948cd4ec9f702736b4127966d2f9a
|
refs/heads/master
| 2022-10-22T01:58:15.675400
| 2020-06-15T14:29:46
| 2020-06-15T14:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
from app.db import Api
from security.hasher import hash_api_key
from gateway.models import ApiKey
def test_fixtures_loading():
assert len(Api.query.all()) > 0
assert len(ApiKey.query.all()) > 0
def test_api_key_generation(test_api_key):
assert test_api_key
def test_api_key_hash(test_api_key, test_api_key_value):
assert hash_api_key(test_api_key_value) == test_api_key.hashed_key
|
[
"bernardstanislas@gmail.com"
] |
bernardstanislas@gmail.com
|
a5b65d3bad29412dc9af65b993d04e460edb0739
|
ec6079855db9e3e873cc9ada966da2e0f7be2632
|
/Competitive Programming/CodeChefContestApr21/WordsFrSameBaseAlphabet.py
|
2816e461422a6b7c542b1b483321f4101d20091d
|
[] |
no_license
|
tylerprehl/Computer-Science-Club
|
92e5856a16c07728a076e0c8443458355ad86e20
|
2ce670ead87cf42c5087675e1a8b026d64d70311
|
refs/heads/master
| 2023-05-22T20:52:31.150087
| 2021-06-09T20:06:45
| 2021-06-09T20:06:45
| 344,685,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
'''
Specification
In this problem you are asked to write a program to determine if two words
come from the same base alphabet. When one word is a rearrangement of
letters of the other, the two words are called “anagrams”, like “rose” and
“sore”. But this problem asks you to determine if two words use the same
letters, not necessarily use equal numbers of them. For example, “curse”
and “rescue” come from the same base alphabet, but “cure” does not, since
its base alphabet does not contain the “s” that both “curse” and “rescue”
use.
Input Format
The input consists of multiple lines, each line with a pair of words
separated by spaces. The words have only lowercase letters.
Output:
Output Yes or No for each line of input.
'''
while True:
try:
inp = input()
except EOFError:
break
if inp == "":
break
splitInp = inp.split()
words = []
for word in splitInp:
letters = set()
for letter in word:
letters.add(letter)
words.append(letters)
# print(words)
if words[0] == words[1]:
print("Yes")
else:
print("No")
|
[
"tyler@prehl.us"
] |
tyler@prehl.us
|
f2bc688ca9d8b92a0e9c371c827cc04b050a745e
|
63cf26ee97b2c2d97aa951c319b75c340cd839ec
|
/catalog/migrations/0006_auto_20181105_0804.py
|
9b94939975ccba435e6ac58660e687de5e57e0c1
|
[] |
no_license
|
Junlli/django_local_library
|
4a7a2a155380251e8eff42347cc5a195a30e6adf
|
c54541bf922d0bb180228263038f505e4f54bab6
|
refs/heads/master
| 2020-03-27T22:21:59.610300
| 2018-11-28T16:06:18
| 2018-11-28T16:06:18
| 147,224,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
# Generated by Django 2.0.7 on 2018-11-05 08:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_auto_20180829_1126'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'ordering': ['last_name', 'first_name']},
),
migrations.AddField(
model_name='book',
name='image',
field=models.ImageField(blank=True, upload_to='profile_image'),
),
]
|
[
"279110089@qq.com"
] |
279110089@qq.com
|
a11c216ccd83de27c2498fc31e7adcb24de5c462
|
69f83bcff8a2bd9c8ef082a2141a39a5322c4b2a
|
/pyenv/env/lib/python2.7/site-packages/transport/tester.py
|
a0c92e0c385d219b834498f737ba0f7ed0dcd5a7
|
[] |
no_license
|
md848-cornell/NRF-ROKIX-sensor-mesh
|
ab12f6572a992ed5c468eb08b8c4586b52b411b2
|
b244207af0fb0fce6e2722c384d3c6c25d5ac025
|
refs/heads/master
| 2020-05-21T10:56:15.013174
| 2019-05-16T16:12:11
| 2019-05-16T16:12:11
| 186,021,295
| 0
| 1
| null | 2020-03-07T21:39:41
| 2019-05-10T16:35:25
|
C
|
UTF-8
|
Python
| false
| false
| 679
|
py
|
"""
Copyright (c) 2017 Nordic Semiconductor ASA
CoAP transport class for tests.
"""
from transport.base import TransportBase
from ipaddress import ip_address
class TesterTransport(TransportBase):
def __init__(self, port=None):
TransportBase.__init__(self, port)
self.tester_opened = False
self.tester_data = None
self.tester_remote = None
self.output_count = 0
def open(self):
self.tester_opened = True
def close(self):
self.tester_opened = False
def send(self, data, dest):
self.tester_data = data
self.tester_remote = dest
self.output_count += 1
|
[
"Mike DiDomenico"
] |
Mike DiDomenico
|
4808cbaedeec5b5afd0caf7172bca3b9c3bb2900
|
557ca4eae50206ecb8b19639cab249cb2d376f30
|
/Chapter04/spiral.py
|
b642ee9c1d01400018b8cff8264cad308b034929
|
[] |
no_license
|
philipdongfei/Think-python-2nd
|
781846f455155245e7e82900ea002f1cf490c43f
|
56e2355b8d5b34ffcee61b38fbfd200fd6d4ffaf
|
refs/heads/master
| 2021-01-09T19:57:49.658680
| 2020-03-13T06:32:11
| 2020-03-13T06:32:11
| 242,441,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
import turtle
def draw_spiral(t, n, length=3, a=0.1, b=0.0002):
theta = 0.0
for i in range(n):
t.fd(length)
dtheta = 1 / (a + b * theta)
t.lt(dtheta)
theta += dtheta
def main():
bob = turtle.Turtle()
draw_spiral(bob, n=1000)
turtle.mainloop()
if __name__ == "__main__":
main()
|
[
"philip.dongfei@gmail.com"
] |
philip.dongfei@gmail.com
|
342a379d6b9f105f42149eb9a3a20f5b3dffb719
|
8371e46c6e96aea5da8bed8b0dcd34a57b144fb1
|
/Django/mainsite/todo/views.py
|
cf81e056b0c40c78932c6a31992dbaa118bf1fef
|
[] |
no_license
|
PDXChloe/PDXcodeguild
|
4bfcd31072bfd17cb7959f71adfd867ff9e5d9ac
|
23ca3dc245bf51be932790d03e3333d89c462180
|
refs/heads/master
| 2021-08-24T08:13:36.027155
| 2017-12-08T20:30:14
| 2017-12-08T20:30:14
| 105,682,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from .models import TodoItem
from django.shortcuts import render
from django.urls import reverse
# Create your views here.
def index(request):
# print(request.method)
# print(request.body)
# print(request.GET)
# name
#
# output = '<html><head></head><body>'
# output += '<ul>'
#
# for i in range(100):
# output += '<li>{i}</li>'
# output += '</body></html>'
# print(output)
# return HttpResponse('Hello World')
# todo_items = TodoItem.objects.all()
#
# output = '<html><head></head><body>'
# output += "<ul>"
#
# for todo_item in todo_items:
# # print(todo_item.todo_text)
# output += f'<li>{todo_item.todo_text}</li>'
# output += '</ul>'
# output += '</body></html>'
# return HttpResponse(output)
todo_items = TodoItem.objects.all()
context = {"todo_items": todo_items}
return render(request, 'todo/index.html', context)
def savetodo(request):
todo_text = request.POST['todo_text']
todo_item = TodoItem(todo_text=todo_text)
todo_item.save() #saving new item into database
return HttpResponseRedirect(reverse('todo:index'))
|
[
"chloe_elliott@me.com"
] |
chloe_elliott@me.com
|
9a5aafa5fcb58b756ab4bcfb7ed48e4dac8cf7e7
|
c0bd54ef52021c47a9112e27fa4104f7f9ae571f
|
/trainer/tests.py
|
6e2e4649c58ed45fd30c8a10130b41948d4c3667
|
[] |
no_license
|
coolshan008/rpserver
|
00582e858e80c97a78b4fd1748e00f8fe92fef67
|
f2587f5cb85a53c0c368b5f901fc21ad77e04068
|
refs/heads/master
| 2020-12-24T20:42:46.465265
| 2016-12-04T15:58:32
| 2016-12-04T15:58:32
| 56,149,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
from models import TUPLE
from models import CLASSROOM
from math import *
import pickle
import pdb
import gl
from django.http import HttpResponse
output_file = open('test/output', 'a')
def flash():
global output_file
output_file = open('test/output', 'a')
output_file.write('\n\n')
def test(request):
try:
flash()
precision = randomTest()
finally:
output_file.close()
return HttpResponse(str(precision))
def randomTest():
Ts = dict()
Thres = dict()
Weights = dict()
for i in range(0, gl.NUMBER_OF_PIS):
modelf = open('model/room' + str(i))
Ts[i] = pickle.load(modelf)
Thres[i] = pickle.load(modelf)
Weights[i] = pickle.load(modelf)
modelf.close()
tuples = TUPLE.objects.filter(Used=0)
total = 0.0
correct_count = 0
for tp in tuples:
correct_no = tp.Correct_no
delta = dict()
vote = dict()
ssi_array = pickle.loads(tp.Array)
debug(str(correct_no) + str(ssi_array))
for i in range(0, gl.NUMBER_OF_PIS):
vote[i] = 0.0
for j in range(0, Ts[i].__len__()):
delta[Ts[i][j]] = ssi_array[Ts[i][j][0]] - ssi_array[Ts[i][j][1]]
if delta[Ts[i][j]] > Thres[i][j]:
vote[i] += Weights[i][j]
max = -1
position = -1
for key in vote.keys():
if vote[key] > max:
max = vote[key]
position = key
continue
'''if vote[key] == max:
assert(False)'''
if position == correct_no:
correct_count += 1
total += 1
return correct_count / total
def debug(obj):
output_file.write(str(obj) + '\n')
|
[
"coolshan008@gmail.com"
] |
coolshan008@gmail.com
|
84c6051cd1c083c73006b2058485e017d4f6a001
|
4d259f441632f5c45b94e8d816fc31a4f022af3c
|
/eventlet/prod_worker.py
|
51d9d239ff441f414a29933caf1e28379ec9f8d3
|
[] |
no_license
|
xiaoruiguo/lab
|
c37224fd4eb604aa2b39fe18ba64e93b7159a1eb
|
ec99f51b498244c414b025d7dae91fdad2f8ef46
|
refs/heads/master
| 2020-05-25T01:37:42.070770
| 2016-05-16T23:24:26
| 2016-05-16T23:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
from eventlet.queue import Queue
import eventlet
q = Queue()
def worker():
while True:
q.get()
a = 0
for i in xrange(1000000):
a = a + 1
print 'get'
def producer():
while True:
a = 0
for i in xrange(1000000):
a = a + 1
q.put('lol')
print 'put'
eventlet.spawn(worker)
eventlet.spawn(producer)
eventlet.sleep(30)
|
[
"junmein@junmeinde-macbook-pro-3.local"
] |
junmein@junmeinde-macbook-pro-3.local
|
c9851d71ff6de782f266a5a05d35eb235639a7e9
|
7c2959d41ab8814cf7cdc56ea067ec0bfe558438
|
/easy/one.py
|
b695db11ed3dcd14df1731de587f6208f28e425f
|
[] |
no_license
|
AtiqulHaque/HackerRank
|
306f515c24e54708461967011c2fddfc6057fed3
|
d2c1189dfce913a8c204351470866a446fd0d88c
|
refs/heads/master
| 2020-03-30T12:06:44.295949
| 2018-10-04T13:42:20
| 2018-10-04T13:42:20
| 151,208,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
# file one.py
def func():
print("func() in one.py")
print("top-level in one.py")
if __name__ == "__main__":
def anotherFun():
print ("Another funtion")
print("one.py is being run directly")
anotherFun()
|
[
"md_atiqulhaque@yahoo.com"
] |
md_atiqulhaque@yahoo.com
|
25bd19ebec3d335bb1ab4630ad5ef6a7c9856ce5
|
fde8c89b352076f95cc16e589b1baf18f7befb51
|
/dulwich/pack.py
|
878162b964ef50a9c5bcebdcb0a02cf5529b4243
|
[] |
no_license
|
571451370/devstack_mitaka
|
b11145256deab817bcdf60a01a67bb6b2f9ddb52
|
1bdd3f2598f91c1446b85c5b6def7784a2f6ab02
|
refs/heads/master
| 2020-08-26T12:53:07.482514
| 2017-04-12T01:32:55
| 2017-04-12T01:32:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66,284
|
py
|
# pack.py -- For dealing with packed git objects.
# Copyright (C) 2007 James Westby <jw+debian@jameswestby.net>
# Copyright (C) 2008-2013 Jelmer Vernooij <jelmer@samba.org>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Classes for dealing with packed git objects.
A pack is a compact representation of a bunch of objects, stored
using deltas where possible.
They have two parts, the pack file, which stores the data, and an index
that tells you where the data is.
To find an object you look in all of the index files 'til you find a
match for the object name. You then use the pointer got from this as
a pointer in to the corresponding packfile.
"""
from collections import defaultdict
import binascii
from io import BytesIO, UnsupportedOperation
from collections import (
deque,
)
import difflib
import struct
from itertools import chain
try:
from itertools import imap, izip
except ImportError:
# Python3
imap = map
izip = zip
import os
import sys
try:
import mmap
except ImportError:
has_mmap = False
else:
has_mmap = True
# For some reason the above try, except fails to set has_mmap = False for plan9
if sys.platform == 'Plan9':
has_mmap = False
from hashlib import sha1
from os import (
SEEK_CUR,
SEEK_END,
)
from struct import unpack_from
import zlib
from dulwich.errors import (
ApplyDeltaError,
ChecksumMismatch,
)
from dulwich.file import GitFile
from dulwich.lru_cache import (
LRUSizeCache,
)
from dulwich.objects import (
ShaFile,
hex_to_sha,
sha_to_hex,
object_header,
)
OFS_DELTA = 6
REF_DELTA = 7
DELTA_TYPES = (OFS_DELTA, REF_DELTA)
DEFAULT_PACK_DELTA_WINDOW_SIZE = 10
def take_msb_bytes(read, crc32=None):
"""Read bytes marked with most significant bit.
:param read: Read function
"""
ret = []
while len(ret) == 0 or ret[-1] & 0x80:
b = read(1)
if crc32 is not None:
crc32 = binascii.crc32(b, crc32)
ret.append(ord(b[:1]))
return ret, crc32
class UnpackedObject(object):
"""Class encapsulating an object unpacked from a pack file.
These objects should only be created from within unpack_object. Most
members start out as empty and are filled in at various points by
read_zlib_chunks, unpack_object, DeltaChainIterator, etc.
End users of this object should take care that the function they're getting
this object from is guaranteed to set the members they need.
"""
__slots__ = [
'offset', # Offset in its pack.
'_sha', # Cached binary SHA.
'obj_type_num', # Type of this object.
'obj_chunks', # Decompressed and delta-resolved chunks.
'pack_type_num', # Type of this object in the pack (may be a delta).
'delta_base', # Delta base offset or SHA.
'comp_chunks', # Compressed object chunks.
'decomp_chunks', # Decompressed object chunks.
'decomp_len', # Decompressed length of this object.
'crc32', # CRC32.
]
# TODO(dborowitz): read_zlib_chunks and unpack_object could very well be
# methods of this object.
def __init__(self, pack_type_num, delta_base, decomp_len, crc32):
self.offset = None
self._sha = None
self.pack_type_num = pack_type_num
self.delta_base = delta_base
self.comp_chunks = None
self.decomp_chunks = []
self.decomp_len = decomp_len
self.crc32 = crc32
if pack_type_num in DELTA_TYPES:
self.obj_type_num = None
self.obj_chunks = None
else:
self.obj_type_num = pack_type_num
self.obj_chunks = self.decomp_chunks
self.delta_base = delta_base
def sha(self):
"""Return the binary SHA of this object."""
if self._sha is None:
self._sha = obj_sha(self.obj_type_num, self.obj_chunks)
return self._sha
def sha_file(self):
"""Return a ShaFile from this object."""
return ShaFile.from_raw_chunks(self.obj_type_num, self.obj_chunks)
# Only provided for backwards compatibility with code that expects either
# chunks or a delta tuple.
def _obj(self):
"""Return the decompressed chunks, or (delta base, delta chunks)."""
if self.pack_type_num in DELTA_TYPES:
return (self.delta_base, self.decomp_chunks)
else:
return self.decomp_chunks
def __eq__(self, other):
if not isinstance(other, UnpackedObject):
return False
for slot in self.__slots__:
if getattr(self, slot) != getattr(other, slot):
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
data = ['%s=%r' % (s, getattr(self, s)) for s in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(data))
_ZLIB_BUFSIZE = 4096
def read_zlib_chunks(read_some, unpacked, include_comp=False,
buffer_size=_ZLIB_BUFSIZE):
"""Read zlib data from a buffer.
This function requires that the buffer have additional data following the
compressed data, which is guaranteed to be the case for git pack files.
:param read_some: Read function that returns at least one byte, but may
return less than the requested size.
:param unpacked: An UnpackedObject to write result data to. If its crc32
attr is not None, the CRC32 of the compressed bytes will be computed
using this starting CRC32.
After this function, will have the following attrs set:
* comp_chunks (if include_comp is True)
* decomp_chunks
* decomp_len
* crc32
:param include_comp: If True, include compressed data in the result.
:param buffer_size: Size of the read buffer.
:return: Leftover unused data from the decompression.
:raise zlib.error: if a decompression error occurred.
"""
if unpacked.decomp_len <= -1:
raise ValueError('non-negative zlib data stream size expected')
decomp_obj = zlib.decompressobj()
comp_chunks = []
decomp_chunks = unpacked.decomp_chunks
decomp_len = 0
crc32 = unpacked.crc32
while True:
add = read_some(buffer_size)
if not add:
raise zlib.error('EOF before end of zlib stream')
comp_chunks.append(add)
decomp = decomp_obj.decompress(add)
decomp_len += len(decomp)
decomp_chunks.append(decomp)
unused = decomp_obj.unused_data
if unused:
left = len(unused)
if crc32 is not None:
crc32 = binascii.crc32(add[:-left], crc32)
if include_comp:
comp_chunks[-1] = add[:-left]
break
elif crc32 is not None:
crc32 = binascii.crc32(add, crc32)
if crc32 is not None:
crc32 &= 0xffffffff
if decomp_len != unpacked.decomp_len:
raise zlib.error('decompressed data does not match expected size')
unpacked.crc32 = crc32
if include_comp:
unpacked.comp_chunks = comp_chunks
return unused
def iter_sha1(iter):
"""Return the hexdigest of the SHA1 over a set of names.
:param iter: Iterator over string objects
:return: 40-byte hex sha1 digest
"""
sha = sha1()
for name in iter:
sha.update(name)
return sha.hexdigest().encode('ascii')
def load_pack_index(path):
"""Load an index file by path.
:param filename: Path to the index file
:return: A PackIndex loaded from the given path
"""
with GitFile(path, 'rb') as f:
return load_pack_index_file(path, f)
def _load_file_contents(f, size=None):
try:
fd = f.fileno()
except (UnsupportedOperation, AttributeError):
fd = None
# Attempt to use mmap if possible
if fd is not None:
if size is None:
size = os.fstat(fd).st_size
if has_mmap:
try:
contents = mmap.mmap(fd, size, access=mmap.ACCESS_READ)
except mmap.error:
# Perhaps a socket?
pass
else:
return contents, size
contents = f.read()
size = len(contents)
return contents, size
def load_pack_index_file(path, f):
"""Load an index file from a file-like object.
:param path: Path for the index file
:param f: File-like object
:return: A PackIndex loaded from the given file
"""
contents, size = _load_file_contents(f)
if contents[:4] == b'\377tOc':
version = struct.unpack(b'>L', contents[4:8])[0]
if version == 2:
return PackIndex2(path, file=f, contents=contents,
size=size)
else:
raise KeyError('Unknown pack index format %d' % version)
else:
return PackIndex1(path, file=f, contents=contents, size=size)
def bisect_find_sha(start, end, sha, unpack_name):
"""Find a SHA in a data blob with sorted SHAs.
:param start: Start index of range to search
:param end: End index of range to search
:param sha: Sha to find
:param unpack_name: Callback to retrieve SHA by index
:return: Index of the SHA, or None if it wasn't found
"""
assert start <= end
while start <= end:
i = (start + end) // 2
file_sha = unpack_name(i)
if file_sha < sha:
start = i + 1
elif file_sha > sha:
end = i - 1
else:
return i
return None
class PackIndex(object):
"""An index in to a packfile.
Given a sha id of an object a pack index can tell you the location in the
packfile of that object if it has it.
"""
def __eq__(self, other):
if not isinstance(other, PackIndex):
return False
for (name1, _, _), (name2, _, _) in izip(self.iterentries(),
other.iterentries()):
if name1 != name2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
"""Return the number of entries in this pack index."""
raise NotImplementedError(self.__len__)
def __iter__(self):
"""Iterate over the SHAs in this pack."""
return imap(sha_to_hex, self._itersha())
def iterentries(self):
"""Iterate over the entries in this pack index.
:return: iterator over tuples with object name, offset in packfile and
crc32 checksum.
"""
raise NotImplementedError(self.iterentries)
def get_pack_checksum(self):
"""Return the SHA1 checksum stored for the corresponding packfile.
:return: 20-byte binary digest
"""
raise NotImplementedError(self.get_pack_checksum)
def object_index(self, sha):
"""Return the index in to the corresponding packfile for the object.
Given the name of an object it will return the offset that object
lives at within the corresponding pack file. If the pack file doesn't
have the object then None will be returned.
"""
if len(sha) == 40:
sha = hex_to_sha(sha)
return self._object_index(sha)
def _object_index(self, sha):
"""See object_index.
:param sha: A *binary* SHA string. (20 characters long)_
"""
raise NotImplementedError(self._object_index)
def objects_sha1(self):
"""Return the hex SHA1 over all the shas of all objects in this pack.
:note: This is used for the filename of the pack.
"""
return iter_sha1(self._itersha())
def _itersha(self):
"""Yield all the SHA1's of the objects in the index, sorted."""
raise NotImplementedError(self._itersha)
class MemoryPackIndex(PackIndex):
"""Pack index that is stored entirely in memory."""
def __init__(self, entries, pack_checksum=None):
"""Create a new MemoryPackIndex.
:param entries: Sequence of name, idx, crc32 (sorted)
:param pack_checksum: Optional pack checksum
"""
self._by_sha = {}
for name, idx, crc32 in entries:
self._by_sha[name] = idx
self._entries = entries
self._pack_checksum = pack_checksum
def get_pack_checksum(self):
return self._pack_checksum
def __len__(self):
return len(self._entries)
def _object_index(self, sha):
return self._by_sha[sha][0]
def _itersha(self):
return iter(self._by_sha)
def iterentries(self):
return iter(self._entries)
class FilePackIndex(PackIndex):
"""Pack index that is based on a file.
To do the loop it opens the file, and indexes first 256 4 byte groups
with the first byte of the sha id. The value in the four byte group indexed
is the end of the group that shares the same starting byte. Subtract one
from the starting byte and index again to find the start of the group.
The values are sorted by sha id within the group, so do the math to find
the start and end offset and then bisect in to find if the value is present.
"""
def __init__(self, filename, file=None, contents=None, size=None):
"""Create a pack index object.
Provide it with the name of the index file to consider, and it will map
it whenever required.
"""
self._filename = filename
# Take the size now, so it can be checked each time we map the file to
# ensure that it hasn't changed.
if file is None:
self._file = GitFile(filename, 'rb')
else:
self._file = file
if contents is None:
self._contents, self._size = _load_file_contents(self._file, size)
else:
self._contents, self._size = (contents, size)
def __eq__(self, other):
# Quick optimization:
if (isinstance(other, FilePackIndex) and
self._fan_out_table != other._fan_out_table):
return False
return super(FilePackIndex, self).__eq__(other)
def close(self):
self._file.close()
if getattr(self._contents, "close", None) is not None:
self._contents.close()
def __len__(self):
"""Return the number of entries in this pack index."""
return self._fan_out_table[-1]
def _unpack_entry(self, i):
"""Unpack the i-th entry in the index file.
:return: Tuple with object name (SHA), offset in pack file and CRC32
checksum (if known).
"""
raise NotImplementedError(self._unpack_entry)
def _unpack_name(self, i):
"""Unpack the i-th name from the index file."""
raise NotImplementedError(self._unpack_name)
def _unpack_offset(self, i):
"""Unpack the i-th object offset from the index file."""
raise NotImplementedError(self._unpack_offset)
def _unpack_crc32_checksum(self, i):
"""Unpack the crc32 checksum for the i-th object from the index file."""
raise NotImplementedError(self._unpack_crc32_checksum)
def _itersha(self):
for i in range(len(self)):
yield self._unpack_name(i)
def iterentries(self):
"""Iterate over the entries in this pack index.
:return: iterator over tuples with object name, offset in packfile and
crc32 checksum.
"""
for i in range(len(self)):
yield self._unpack_entry(i)
def _read_fan_out_table(self, start_offset):
ret = []
for i in range(0x100):
fanout_entry = self._contents[start_offset+i*4:start_offset+(i+1)*4]
ret.append(struct.unpack('>L', fanout_entry)[0])
return ret
def check(self):
"""Check that the stored checksum matches the actual checksum."""
actual = self.calculate_checksum()
stored = self.get_stored_checksum()
if actual != stored:
raise ChecksumMismatch(stored, actual)
def calculate_checksum(self):
"""Calculate the SHA1 checksum over this pack index.
:return: This is a 20-byte binary digest
"""
return sha1(self._contents[:-20]).digest()
def get_pack_checksum(self):
"""Return the SHA1 checksum stored for the corresponding packfile.
:return: 20-byte binary digest
"""
return bytes(self._contents[-40:-20])
def get_stored_checksum(self):
"""Return the SHA1 checksum stored for this index.
:return: 20-byte binary digest
"""
return bytes(self._contents[-20:])
def _object_index(self, sha):
"""See object_index.
:param sha: A *binary* SHA string. (20 characters long)_
"""
assert len(sha) == 20
idx = ord(sha[:1])
if idx == 0:
start = 0
else:
start = self._fan_out_table[idx-1]
end = self._fan_out_table[idx]
i = bisect_find_sha(start, end, sha, self._unpack_name)
if i is None:
raise KeyError(sha)
return self._unpack_offset(i)
class PackIndex1(FilePackIndex):
"""Version 1 Pack Index file."""
def __init__(self, filename, file=None, contents=None, size=None):
super(PackIndex1, self).__init__(filename, file, contents, size)
self.version = 1
self._fan_out_table = self._read_fan_out_table(0)
def _unpack_entry(self, i):
(offset, name) = unpack_from('>L20s', self._contents,
(0x100 * 4) + (i * 24))
return (name, offset, None)
def _unpack_name(self, i):
offset = (0x100 * 4) + (i * 24) + 4
return self._contents[offset:offset+20]
def _unpack_offset(self, i):
offset = (0x100 * 4) + (i * 24)
return unpack_from('>L', self._contents, offset)[0]
def _unpack_crc32_checksum(self, i):
# Not stored in v1 index files
return None
class PackIndex2(FilePackIndex):
"""Version 2 Pack Index file."""
def __init__(self, filename, file=None, contents=None, size=None):
super(PackIndex2, self).__init__(filename, file, contents, size)
if self._contents[:4] != b'\377tOc':
raise AssertionError('Not a v2 pack index file')
(self.version, ) = unpack_from(b'>L', self._contents, 4)
if self.version != 2:
raise AssertionError('Version was %d' % self.version)
self._fan_out_table = self._read_fan_out_table(8)
self._name_table_offset = 8 + 0x100 * 4
self._crc32_table_offset = self._name_table_offset + 20 * len(self)
self._pack_offset_table_offset = (self._crc32_table_offset +
4 * len(self))
self._pack_offset_largetable_offset = (self._pack_offset_table_offset +
4 * len(self))
def _unpack_entry(self, i):
return (self._unpack_name(i), self._unpack_offset(i),
self._unpack_crc32_checksum(i))
def _unpack_name(self, i):
offset = self._name_table_offset + i * 20
return self._contents[offset:offset+20]
def _unpack_offset(self, i):
offset = self._pack_offset_table_offset + i * 4
offset = unpack_from('>L', self._contents, offset)[0]
if offset & (2**31):
offset = self._pack_offset_largetable_offset + (offset&(2**31-1)) * 8
offset = unpack_from('>Q', self._contents, offset)[0]
return offset
def _unpack_crc32_checksum(self, i):
return unpack_from('>L', self._contents,
self._crc32_table_offset + i * 4)[0]
def read_pack_header(read):
"""Read the header of a pack file.
:param read: Read function
:return: Tuple of (pack version, number of objects). If no data is available
to read, returns (None, None).
"""
header = read(12)
if not header:
return None, None
if header[:4] != b'PACK':
raise AssertionError('Invalid pack header %r' % header)
(version,) = unpack_from(b'>L', header, 4)
if version not in (2, 3):
raise AssertionError('Version was %d' % version)
(num_objects,) = unpack_from(b'>L', header, 8)
return (version, num_objects)
def chunks_length(chunks):
if isinstance(chunks, bytes):
return len(chunks)
else:
return sum(imap(len, chunks))
def unpack_object(read_all, read_some=None, compute_crc32=False,
include_comp=False, zlib_bufsize=_ZLIB_BUFSIZE):
"""Unpack a Git object.
:param read_all: Read function that blocks until the number of requested
bytes are read.
:param read_some: Read function that returns at least one byte, but may not
return the number of bytes requested.
:param compute_crc32: If True, compute the CRC32 of the compressed data. If
False, the returned CRC32 will be None.
:param include_comp: If True, include compressed data in the result.
:param zlib_bufsize: An optional buffer size for zlib operations.
:return: A tuple of (unpacked, unused), where unused is the unused data
leftover from decompression, and unpacked in an UnpackedObject with
the following attrs set:
* obj_chunks (for non-delta types)
* pack_type_num
* delta_base (for delta types)
* comp_chunks (if include_comp is True)
* decomp_chunks
* decomp_len
* crc32 (if compute_crc32 is True)
"""
if read_some is None:
read_some = read_all
if compute_crc32:
crc32 = 0
else:
crc32 = None
bytes, crc32 = take_msb_bytes(read_all, crc32=crc32)
type_num = (bytes[0] >> 4) & 0x07
size = bytes[0] & 0x0f
for i, byte in enumerate(bytes[1:]):
size += (byte & 0x7f) << ((i * 7) + 4)
raw_base = len(bytes)
if type_num == OFS_DELTA:
bytes, crc32 = take_msb_bytes(read_all, crc32=crc32)
raw_base += len(bytes)
if bytes[-1] & 0x80:
raise AssertionError
delta_base_offset = bytes[0] & 0x7f
for byte in bytes[1:]:
delta_base_offset += 1
delta_base_offset <<= 7
delta_base_offset += (byte & 0x7f)
delta_base = delta_base_offset
elif type_num == REF_DELTA:
delta_base = read_all(20)
if compute_crc32:
crc32 = binascii.crc32(delta_base, crc32)
raw_base += 20
else:
delta_base = None
unpacked = UnpackedObject(type_num, delta_base, size, crc32)
unused = read_zlib_chunks(read_some, unpacked, buffer_size=zlib_bufsize,
include_comp=include_comp)
return unpacked, unused
def _compute_object_size(value):
"""Compute the size of a unresolved object for use with LRUSizeCache."""
(num, obj) = value
if num in DELTA_TYPES:
return chunks_length(obj[1])
return chunks_length(obj)
class PackStreamReader(object):
"""Class to read a pack stream.
The pack is read from a ReceivableProtocol using read() or recv() as
appropriate.
"""
def __init__(self, read_all, read_some=None, zlib_bufsize=_ZLIB_BUFSIZE):
self.read_all = read_all
if read_some is None:
self.read_some = read_all
else:
self.read_some = read_some
self.sha = sha1()
self._offset = 0
self._rbuf = BytesIO()
# trailer is a deque to avoid memory allocation on small reads
self._trailer = deque()
self._zlib_bufsize = zlib_bufsize
def _read(self, read, size):
"""Read up to size bytes using the given callback.
As a side effect, update the verifier's hash (excluding the last 20
bytes read).
:param read: The read callback to read from.
:param size: The maximum number of bytes to read; the particular
behavior is callback-specific.
"""
data = read(size)
# maintain a trailer of the last 20 bytes we've read
n = len(data)
self._offset += n
tn = len(self._trailer)
if n >= 20:
to_pop = tn
to_add = 20
else:
to_pop = max(n + tn - 20, 0)
to_add = n
self.sha.update(bytes(bytearray([self._trailer.popleft() for _ in range(to_pop)])))
self._trailer.extend(data[-to_add:])
# hash everything but the trailer
self.sha.update(data[:-to_add])
return data
def _buf_len(self):
buf = self._rbuf
start = buf.tell()
buf.seek(0, SEEK_END)
end = buf.tell()
buf.seek(start)
return end - start
@property
def offset(self):
return self._offset - self._buf_len()
def read(self, size):
"""Read, blocking until size bytes are read."""
buf_len = self._buf_len()
if buf_len >= size:
return self._rbuf.read(size)
buf_data = self._rbuf.read()
self._rbuf = BytesIO()
return buf_data + self._read(self.read_all, size - buf_len)
def recv(self, size):
"""Read up to size bytes, blocking until one byte is read."""
buf_len = self._buf_len()
if buf_len:
data = self._rbuf.read(size)
if size >= buf_len:
self._rbuf = BytesIO()
return data
return self._read(self.read_some, size)
def __len__(self):
return self._num_objects
def read_objects(self, compute_crc32=False):
"""Read the objects in this pack file.
:param compute_crc32: If True, compute the CRC32 of the compressed
data. If False, the returned CRC32 will be None.
:return: Iterator over UnpackedObjects with the following members set:
offset
obj_type_num
obj_chunks (for non-delta types)
delta_base (for delta types)
decomp_chunks
decomp_len
crc32 (if compute_crc32 is True)
:raise ChecksumMismatch: if the checksum of the pack contents does not
match the checksum in the pack trailer.
:raise zlib.error: if an error occurred during zlib decompression.
:raise IOError: if an error occurred writing to the output file.
"""
pack_version, self._num_objects = read_pack_header(self.read)
if pack_version is None:
return
for i in range(self._num_objects):
offset = self.offset
unpacked, unused = unpack_object(
self.read, read_some=self.recv, compute_crc32=compute_crc32,
zlib_bufsize=self._zlib_bufsize)
unpacked.offset = offset
# prepend any unused data to current read buffer
buf = BytesIO()
buf.write(unused)
buf.write(self._rbuf.read())
buf.seek(0)
self._rbuf = buf
yield unpacked
if self._buf_len() < 20:
# If the read buffer is full, then the last read() got the whole
# trailer off the wire. If not, it means there is still some of the
# trailer to read. We need to read() all 20 bytes; N come from the
# read buffer and (20 - N) come from the wire.
self.read(20)
pack_sha = bytearray(self._trailer)
if pack_sha != self.sha.digest():
raise ChecksumMismatch(sha_to_hex(pack_sha), self.sha.hexdigest())
class PackStreamCopier(PackStreamReader):
"""Class to verify a pack stream as it is being read.
The pack is read from a ReceivableProtocol using read() or recv() as
appropriate and written out to the given file-like object.
"""
def __init__(self, read_all, read_some, outfile, delta_iter=None):
"""Initialize the copier.
:param read_all: Read function that blocks until the number of requested
bytes are read.
:param read_some: Read function that returns at least one byte, but may
not return the number of bytes requested.
:param outfile: File-like object to write output through.
:param delta_iter: Optional DeltaChainIterator to record deltas as we
read them.
"""
super(PackStreamCopier, self).__init__(read_all, read_some=read_some)
self.outfile = outfile
self._delta_iter = delta_iter
def _read(self, read, size):
"""Read data from the read callback and write it to the file."""
data = super(PackStreamCopier, self)._read(read, size)
self.outfile.write(data)
return data
def verify(self):
"""Verify a pack stream and write it to the output file.
See PackStreamReader.iterobjects for a list of exceptions this may
throw.
"""
if self._delta_iter:
for unpacked in self.read_objects():
self._delta_iter.record(unpacked)
else:
for _ in self.read_objects():
pass
def obj_sha(type, chunks):
"""Compute the SHA for a numeric type and object chunks."""
sha = sha1()
sha.update(object_header(type, chunks_length(chunks)))
if isinstance(chunks, bytes):
sha.update(chunks)
else:
for chunk in chunks:
sha.update(chunk)
return sha.digest()
def compute_file_sha(f, start_ofs=0, end_ofs=0, buffer_size=1<<16):
"""Hash a portion of a file into a new SHA.
:param f: A file-like object to read from that supports seek().
:param start_ofs: The offset in the file to start reading at.
:param end_ofs: The offset in the file to end reading at, relative to the
end of the file.
:param buffer_size: A buffer size for reading.
:return: A new SHA object updated with data read from the file.
"""
sha = sha1()
f.seek(0, SEEK_END)
length = f.tell()
if (end_ofs < 0 and length + end_ofs < start_ofs) or end_ofs > length:
raise AssertionError(
"Attempt to read beyond file length. "
"start_ofs: %d, end_ofs: %d, file length: %d" % (
start_ofs, end_ofs, length))
todo = length + end_ofs - start_ofs
f.seek(start_ofs)
while todo:
data = f.read(min(todo, buffer_size))
sha.update(data)
todo -= len(data)
return sha
class PackData(object):
"""The data contained in a packfile.
Pack files can be accessed both sequentially for exploding a pack, and
directly with the help of an index to retrieve a specific object.
The objects within are either complete or a delta against another.
The header is variable length. If the MSB of each byte is set then it
indicates that the subsequent byte is still part of the header.
For the first byte the next MS bits are the type, which tells you the type
of object, and whether it is a delta. The LS byte is the lowest bits of the
size. For each subsequent byte the LS 7 bits are the next MS bits of the
size, i.e. the last byte of the header contains the MS bits of the size.
For the complete objects the data is stored as zlib deflated data.
The size in the header is the uncompressed object size, so to uncompress
you need to just keep feeding data to zlib until you get an object back,
or it errors on bad data. This is done here by just giving the complete
buffer from the start of the deflated object on. This is bad, but until I
get mmap sorted out it will have to do.
Currently there are no integrity checks done. Also no attempt is made to
try and detect the delta case, or a request for an object at the wrong
position. It will all just throw a zlib or KeyError.
"""
def __init__(self, filename, file=None, size=None):
"""Create a PackData object representing the pack in the given filename.
The file must exist and stay readable until the object is disposed of. It
must also stay the same size. It will be mapped whenever needed.
Currently there is a restriction on the size of the pack as the python
mmap implementation is flawed.
"""
self._filename = filename
self._size = size
self._header_size = 12
if file is None:
self._file = GitFile(self._filename, 'rb')
else:
self._file = file
(version, self._num_objects) = read_pack_header(self._file.read)
self._offset_cache = LRUSizeCache(1024*1024*20,
compute_size=_compute_object_size)
self.pack = None
@property
def filename(self):
return os.path.basename(self._filename)
@classmethod
def from_file(cls, file, size):
return cls(str(file), file=file, size=size)
@classmethod
def from_path(cls, path):
return cls(filename=path)
def close(self):
self._file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_size(self):
if self._size is not None:
return self._size
self._size = os.path.getsize(self._filename)
if self._size < self._header_size:
errmsg = ('%s is too small for a packfile (%d < %d)' %
(self._filename, self._size, self._header_size))
raise AssertionError(errmsg)
return self._size
def __len__(self):
"""Returns the number of objects in this pack."""
return self._num_objects
def calculate_checksum(self):
"""Calculate the checksum for this pack.
:return: 20-byte binary SHA1 digest
"""
return compute_file_sha(self._file, end_ofs=-20).digest()
def get_ref(self, sha):
"""Get the object for a ref SHA, only looking in this pack."""
# TODO: cache these results
if self.pack is None:
raise KeyError(sha)
try:
offset = self.pack.index.object_index(sha)
except KeyError:
offset = None
if offset:
type, obj = self.get_object_at(offset)
elif self.pack is not None and self.pack.resolve_ext_ref:
type, obj = self.pack.resolve_ext_ref(sha)
else:
raise KeyError(sha)
return offset, type, obj
def resolve_object(self, offset, type, obj, get_ref=None):
"""Resolve an object, possibly resolving deltas when necessary.
:return: Tuple with object type and contents.
"""
# Walk down the delta chain, building a stack of deltas to reach
# the requested object.
base_offset = offset
base_type = type
base_obj = obj
delta_stack = []
while base_type in DELTA_TYPES:
prev_offset = base_offset
if get_ref is None:
get_ref = self.get_ref
if base_type == OFS_DELTA:
(delta_offset, delta) = base_obj
# TODO: clean up asserts and replace with nicer error messages
assert (
isinstance(base_offset, int)
or isinstance(base_offset, long))
assert (
isinstance(delta_offset, int)
or isinstance(base_offset, long))
base_offset = base_offset - delta_offset
base_type, base_obj = self.get_object_at(base_offset)
assert isinstance(base_type, int)
elif base_type == REF_DELTA:
(basename, delta) = base_obj
assert isinstance(basename, bytes) and len(basename) == 20
base_offset, base_type, base_obj = get_ref(basename)
assert isinstance(base_type, int)
delta_stack.append((prev_offset, base_type, delta))
# Now grab the base object (mustn't be a delta) and apply the
# deltas all the way up the stack.
chunks = base_obj
for prev_offset, delta_type, delta in reversed(delta_stack):
chunks = apply_delta(chunks, delta)
# TODO(dborowitz): This can result in poor performance if
# large base objects are separated from deltas in the pack.
# We should reorganize so that we apply deltas to all
# objects in a chain one after the other to optimize cache
# performance.
if prev_offset is not None:
self._offset_cache[prev_offset] = base_type, chunks
return base_type, chunks
def iterobjects(self, progress=None, compute_crc32=True):
self._file.seek(self._header_size)
for i in range(1, self._num_objects + 1):
offset = self._file.tell()
unpacked, unused = unpack_object(
self._file.read, compute_crc32=compute_crc32)
if progress is not None:
progress(i, self._num_objects)
yield (offset, unpacked.pack_type_num, unpacked._obj(),
unpacked.crc32)
self._file.seek(-len(unused), SEEK_CUR) # Back up over unused data.
def _iter_unpacked(self):
# TODO(dborowitz): Merge this with iterobjects, if we can change its
# return type.
self._file.seek(self._header_size)
if self._num_objects is None:
return
for _ in range(self._num_objects):
offset = self._file.tell()
unpacked, unused = unpack_object(
self._file.read, compute_crc32=False)
unpacked.offset = offset
yield unpacked
self._file.seek(-len(unused), SEEK_CUR) # Back up over unused data.
def iterentries(self, progress=None):
"""Yield entries summarizing the contents of this pack.
:param progress: Progress function, called with current and total
object count.
:return: iterator of tuples with (sha, offset, crc32)
"""
num_objects = self._num_objects
resolve_ext_ref = (
self.pack.resolve_ext_ref if self.pack is not None else None)
indexer = PackIndexer.for_pack_data(
self, resolve_ext_ref=resolve_ext_ref)
for i, result in enumerate(indexer):
if progress is not None:
progress(i, num_objects)
yield result
def sorted_entries(self, progress=None):
"""Return entries in this pack, sorted by SHA.
:param progress: Progress function, called with current and total
object count
:return: List of tuples with (sha, offset, crc32)
"""
ret = sorted(self.iterentries(progress=progress))
return ret
def create_index_v1(self, filename, progress=None):
"""Create a version 1 file for this data file.
:param filename: Index filename.
:param progress: Progress report function
:return: Checksum of index file
"""
entries = self.sorted_entries(progress=progress)
with GitFile(filename, 'wb') as f:
return write_pack_index_v1(f, entries, self.calculate_checksum())
def create_index_v2(self, filename, progress=None):
"""Create a version 2 index file for this data file.
:param filename: Index filename.
:param progress: Progress report function
:return: Checksum of index file
"""
entries = self.sorted_entries(progress=progress)
with GitFile(filename, 'wb') as f:
return write_pack_index_v2(f, entries, self.calculate_checksum())
def create_index(self, filename, progress=None,
version=2):
"""Create an index file for this data file.
:param filename: Index filename.
:param progress: Progress report function
:return: Checksum of index file
"""
if version == 1:
return self.create_index_v1(filename, progress)
elif version == 2:
return self.create_index_v2(filename, progress)
else:
raise ValueError('unknown index format %d' % version)
def get_stored_checksum(self):
"""Return the expected checksum stored in this pack."""
self._file.seek(-20, SEEK_END)
return self._file.read(20)
def check(self):
"""Check the consistency of this pack."""
actual = self.calculate_checksum()
stored = self.get_stored_checksum()
if actual != stored:
raise ChecksumMismatch(stored, actual)
def get_object_at(self, offset):
"""Given an offset in to the packfile return the object that is there.
Using the associated index the location of an object can be looked up,
and then the packfile can be asked directly for that object using this
function.
"""
try:
return self._offset_cache[offset]
except KeyError:
pass
assert offset >= self._header_size
self._file.seek(offset)
unpacked, _ = unpack_object(self._file.read)
return (unpacked.pack_type_num, unpacked._obj())
class DeltaChainIterator(object):
"""Abstract iterator over pack data based on delta chains.
Each object in the pack is guaranteed to be inflated exactly once,
regardless of how many objects reference it as a delta base. As a result,
memory usage is proportional to the length of the longest delta chain.
Subclasses can override _result to define the result type of the iterator.
By default, results are UnpackedObjects with the following members set:
* offset
* obj_type_num
* obj_chunks
* pack_type_num
* delta_base (for delta types)
* comp_chunks (if _include_comp is True)
* decomp_chunks
* decomp_len
* crc32 (if _compute_crc32 is True)
"""
_compute_crc32 = False
_include_comp = False
def __init__(self, file_obj, resolve_ext_ref=None):
self._file = file_obj
self._resolve_ext_ref = resolve_ext_ref
self._pending_ofs = defaultdict(list)
self._pending_ref = defaultdict(list)
self._full_ofs = []
self._shas = {}
self._ext_refs = []
@classmethod
def for_pack_data(cls, pack_data, resolve_ext_ref=None):
walker = cls(None, resolve_ext_ref=resolve_ext_ref)
walker.set_pack_data(pack_data)
for unpacked in pack_data._iter_unpacked():
walker.record(unpacked)
return walker
def record(self, unpacked):
type_num = unpacked.pack_type_num
offset = unpacked.offset
if type_num == OFS_DELTA:
base_offset = offset - unpacked.delta_base
self._pending_ofs[base_offset].append(offset)
elif type_num == REF_DELTA:
self._pending_ref[unpacked.delta_base].append(offset)
else:
self._full_ofs.append((offset, type_num))
def set_pack_data(self, pack_data):
self._file = pack_data._file
def _walk_all_chains(self):
for offset, type_num in self._full_ofs:
for result in self._follow_chain(offset, type_num, None):
yield result
for result in self._walk_ref_chains():
yield result
assert not self._pending_ofs
def _ensure_no_pending(self):
if self._pending_ref:
raise KeyError([sha_to_hex(s) for s in self._pending_ref])
def _walk_ref_chains(self):
if not self._resolve_ext_ref:
self._ensure_no_pending()
return
for base_sha, pending in sorted(self._pending_ref.items()):
if base_sha not in self._pending_ref:
continue
try:
type_num, chunks = self._resolve_ext_ref(base_sha)
except KeyError:
# Not an external ref, but may depend on one. Either it will get
# popped via a _follow_chain call, or we will raise an error
# below.
continue
self._ext_refs.append(base_sha)
self._pending_ref.pop(base_sha)
for new_offset in pending:
for result in self._follow_chain(new_offset, type_num, chunks):
yield result
self._ensure_no_pending()
def _result(self, unpacked):
return unpacked
def _resolve_object(self, offset, obj_type_num, base_chunks):
self._file.seek(offset)
unpacked, _ = unpack_object(
self._file.read, include_comp=self._include_comp,
compute_crc32=self._compute_crc32)
unpacked.offset = offset
if base_chunks is None:
assert unpacked.pack_type_num == obj_type_num
else:
assert unpacked.pack_type_num in DELTA_TYPES
unpacked.obj_type_num = obj_type_num
unpacked.obj_chunks = apply_delta(base_chunks,
unpacked.decomp_chunks)
return unpacked
def _follow_chain(self, offset, obj_type_num, base_chunks):
# Unlike PackData.get_object_at, there is no need to cache offsets as
# this approach by design inflates each object exactly once.
todo = [(offset, obj_type_num, base_chunks)]
for offset, obj_type_num, base_chunks in todo:
unpacked = self._resolve_object(offset, obj_type_num, base_chunks)
yield self._result(unpacked)
unblocked = chain(self._pending_ofs.pop(unpacked.offset, []),
self._pending_ref.pop(unpacked.sha(), []))
todo.extend(
(new_offset, unpacked.obj_type_num, unpacked.obj_chunks)
for new_offset in unblocked)
def __iter__(self):
return self._walk_all_chains()
def ext_refs(self):
return self._ext_refs
class PackIndexer(DeltaChainIterator):
"""Delta chain iterator that yields index entries."""
_compute_crc32 = True
def _result(self, unpacked):
return unpacked.sha(), unpacked.offset, unpacked.crc32
class PackInflater(DeltaChainIterator):
"""Delta chain iterator that yields ShaFile objects."""
def _result(self, unpacked):
return unpacked.sha_file()
class SHA1Reader(object):
"""Wrapper around a file-like object that remembers the SHA1 of its data."""
def __init__(self, f):
self.f = f
self.sha1 = sha1(b'')
def read(self, num=None):
data = self.f.read(num)
self.sha1.update(data)
return data
def check_sha(self):
stored = self.f.read(20)
if stored != self.sha1.digest():
raise ChecksumMismatch(self.sha1.hexdigest(), sha_to_hex(stored))
def close(self):
return self.f.close()
def tell(self):
return self.f.tell()
class SHA1Writer(object):
"""Wrapper around a file-like object that remembers the SHA1 of its data."""
def __init__(self, f):
self.f = f
self.length = 0
self.sha1 = sha1(b'')
def write(self, data):
self.sha1.update(data)
self.f.write(data)
self.length += len(data)
def write_sha(self):
sha = self.sha1.digest()
assert len(sha) == 20
self.f.write(sha)
self.length += len(sha)
return sha
def close(self):
sha = self.write_sha()
self.f.close()
return sha
def offset(self):
return self.length
def tell(self):
return self.f.tell()
def pack_object_header(type_num, delta_base, size):
"""Create a pack object header for the given object info.
:param type_num: Numeric type of the object.
:param delta_base: Delta base offset or ref, or None for whole objects.
:param size: Uncompressed object size.
:return: A header for a packed object.
"""
header = []
c = (type_num << 4) | (size & 15)
size >>= 4
while size:
header.append(c | 0x80)
c = size & 0x7f
size >>= 7
header.append(c)
if type_num == OFS_DELTA:
ret = [delta_base & 0x7f]
delta_base >>= 7
while delta_base:
delta_base -= 1
ret.insert(0, 0x80 | (delta_base & 0x7f))
delta_base >>= 7
header.extend(ret)
elif type_num == REF_DELTA:
assert len(delta_base) == 20
header += delta_base
return bytearray(header)
def write_pack_object(f, type, object, sha=None):
"""Write pack object to a file.
:param f: File to write to
:param type: Numeric type of the object
:param object: Object to write
:return: Tuple with offset at which the object was written, and crc32
"""
if type in DELTA_TYPES:
delta_base, object = object
else:
delta_base = None
header = bytes(pack_object_header(type, delta_base, len(object)))
comp_data = zlib.compress(object)
crc32 = 0
for data in (header, comp_data):
f.write(data)
if sha is not None:
sha.update(data)
crc32 = binascii.crc32(data, crc32)
return crc32 & 0xffffffff
def write_pack(filename, objects, deltify=None, delta_window_size=None):
"""Write a new pack data file.
:param filename: Path to the new pack file (without .pack extension)
:param objects: Iterable of (object, path) tuples to write.
Should provide __len__
:param window_size: Delta window size
:param deltify: Whether to deltify pack objects
:return: Tuple with checksum of pack file and index file
"""
with GitFile(filename + '.pack', 'wb') as f:
entries, data_sum = write_pack_objects(f, objects,
delta_window_size=delta_window_size, deltify=deltify)
entries = sorted([(k, v[0], v[1]) for (k, v) in entries.items()])
with GitFile(filename + '.idx', 'wb') as f:
return data_sum, write_pack_index_v2(f, entries, data_sum)
def write_pack_header(f, num_objects):
"""Write a pack header for the given number of objects."""
f.write(b'PACK') # Pack header
f.write(struct.pack(b'>L', 2)) # Pack version
f.write(struct.pack(b'>L', num_objects)) # Number of objects in pack
def deltify_pack_objects(objects, window_size=None):
"""Generate deltas for pack objects.
:param objects: An iterable of (object, path) tuples to deltify.
:param window_size: Window size; None for default
:return: Iterator over type_num, object id, delta_base, content
delta_base is None for full text entries
"""
if window_size is None:
window_size = DEFAULT_PACK_DELTA_WINDOW_SIZE
# Build a list of objects ordered by the magic Linus heuristic
# This helps us find good objects to diff against us
magic = []
for obj, path in objects:
magic.append((obj.type_num, path, -obj.raw_length(), obj))
magic.sort()
possible_bases = deque()
for type_num, path, neg_length, o in magic:
raw = o.as_raw_string()
winner = raw
winner_base = None
for base in possible_bases:
if base.type_num != type_num:
continue
delta = create_delta(base.as_raw_string(), raw)
if len(delta) < len(winner):
winner_base = base.sha().digest()
winner = delta
yield type_num, o.sha().digest(), winner_base, winner
possible_bases.appendleft(o)
while len(possible_bases) > window_size:
possible_bases.pop()
def write_pack_objects(f, objects, delta_window_size=None, deltify=False):
"""Write a new pack data file.
:param f: File to write to
:param objects: Iterable of (object, path) tuples to write.
Should provide __len__
:param window_size: Sliding window size for searching for deltas;
Set to None for default window size.
:param deltify: Whether to deltify objects
:return: Dict mapping id -> (offset, crc32 checksum), pack checksum
"""
if deltify:
pack_contents = deltify_pack_objects(objects, delta_window_size)
else:
pack_contents = (
(o.type_num, o.sha().digest(), None, o.as_raw_string())
for (o, path) in objects)
return write_pack_data(f, len(objects), pack_contents)
def write_pack_data(f, num_records, records):
"""Write a new pack data file.
:param f: File to write to
:param num_records: Number of records
:param records: Iterator over type_num, object_id, delta_base, raw
:return: Dict mapping id -> (offset, crc32 checksum), pack checksum
"""
# Write the pack
entries = {}
f = SHA1Writer(f)
write_pack_header(f, num_records)
for type_num, object_id, delta_base, raw in records:
offset = f.offset()
if delta_base is not None:
try:
base_offset, base_crc32 = entries[delta_base]
except KeyError:
type_num = REF_DELTA
raw = (delta_base, raw)
else:
type_num = OFS_DELTA
raw = (offset - base_offset, raw)
crc32 = write_pack_object(f, type_num, raw)
entries[object_id] = (offset, crc32)
return entries, f.write_sha()
def write_pack_index_v1(f, entries, pack_checksum):
"""Write a new pack index file.
:param f: A file-like object to write to
:param entries: List of tuples with object name (sha), offset_in_pack,
and crc32_checksum.
:param pack_checksum: Checksum of the pack file.
:return: The SHA of the written index file
"""
f = SHA1Writer(f)
fan_out_table = defaultdict(lambda: 0)
for (name, offset, entry_checksum) in entries:
fan_out_table[ord(name[:1])] += 1
# Fan-out table
for i in range(0x100):
f.write(struct.pack('>L', fan_out_table[i]))
fan_out_table[i+1] += fan_out_table[i]
for (name, offset, entry_checksum) in entries:
if not (offset <= 0xffffffff):
raise TypeError("pack format 1 only supports offsets < 2Gb")
f.write(struct.pack('>L20s', offset, name))
assert len(pack_checksum) == 20
f.write(pack_checksum)
return f.write_sha()
def _delta_encode_size(size):
ret = bytearray()
c = size & 0x7f
size >>= 7
while size:
ret.append(c | 0x80)
c = size & 0x7f
size >>= 7
ret.append(c)
return ret
# The length of delta compression copy operations in version 2 packs is limited
# to 64K. To copy more, we use several copy operations. Version 3 packs allow
# 24-bit lengths in copy operations, but we always make version 2 packs.
_MAX_COPY_LEN = 0xffff
def _encode_copy_operation(start, length):
scratch = []
op = 0x80
for i in range(4):
if start & 0xff << i*8:
scratch.append((start >> i*8) & 0xff)
op |= 1 << i
for i in range(2):
if length & 0xff << i*8:
scratch.append((length >> i*8) & 0xff)
op |= 1 << (4+i)
return bytearray([op] + scratch)
def create_delta(base_buf, target_buf):
"""Use python difflib to work out how to transform base_buf to target_buf.
:param base_buf: Base buffer
:param target_buf: Target buffer
"""
assert isinstance(base_buf, bytes)
assert isinstance(target_buf, bytes)
out_buf = bytearray()
# write delta header
out_buf += _delta_encode_size(len(base_buf))
out_buf += _delta_encode_size(len(target_buf))
# write out delta opcodes
seq = difflib.SequenceMatcher(a=base_buf, b=target_buf)
for opcode, i1, i2, j1, j2 in seq.get_opcodes():
# Git patch opcodes don't care about deletes!
#if opcode == 'replace' or opcode == 'delete':
# pass
if opcode == 'equal':
# If they are equal, unpacker will use data from base_buf
# Write out an opcode that says what range to use
copy_start = i1
copy_len = i2 - i1
while copy_len > 0:
to_copy = min(copy_len, _MAX_COPY_LEN)
out_buf += _encode_copy_operation(copy_start, to_copy)
copy_start += to_copy
copy_len -= to_copy
if opcode == 'replace' or opcode == 'insert':
# If we are replacing a range or adding one, then we just
# output it to the stream (prefixed by its size)
s = j2 - j1
o = j1
while s > 127:
out_buf.append(127)
out_buf += bytearray(target_buf[o:o+127])
s -= 127
o += 127
out_buf.append(s)
out_buf += bytearray(target_buf[o:o+s])
return bytes(out_buf)
def apply_delta(src_buf, delta):
"""Based on the similar function in git's patch-delta.c.
:param src_buf: Source buffer
:param delta: Delta instructions
"""
if not isinstance(src_buf, bytes):
src_buf = b''.join(src_buf)
if not isinstance(delta, bytes):
delta = b''.join(delta)
out = []
index = 0
delta_length = len(delta)
def get_delta_header_size(delta, index):
size = 0
i = 0
while delta:
cmd = ord(delta[index:index+1])
index += 1
size |= (cmd & ~0x80) << i
i += 7
if not cmd & 0x80:
break
return size, index
src_size, index = get_delta_header_size(delta, index)
dest_size, index = get_delta_header_size(delta, index)
assert src_size == len(src_buf), '%d vs %d' % (src_size, len(src_buf))
while index < delta_length:
cmd = ord(delta[index:index+1])
index += 1
if cmd & 0x80:
cp_off = 0
for i in range(4):
if cmd & (1 << i):
x = ord(delta[index:index+1])
index += 1
cp_off |= x << (i * 8)
cp_size = 0
# Version 3 packs can contain copy sizes larger than 64K.
for i in range(3):
if cmd & (1 << (4+i)):
x = ord(delta[index:index+1])
index += 1
cp_size |= x << (i * 8)
if cp_size == 0:
cp_size = 0x10000
if (cp_off + cp_size < cp_size or
cp_off + cp_size > src_size or
cp_size > dest_size):
break
out.append(src_buf[cp_off:cp_off+cp_size])
elif cmd != 0:
out.append(delta[index:index+cmd])
index += cmd
else:
raise ApplyDeltaError('Invalid opcode 0')
if index != delta_length:
raise ApplyDeltaError('delta not empty: %r' % delta[index:])
if dest_size != chunks_length(out):
raise ApplyDeltaError('dest size incorrect')
return out
def write_pack_index_v2(f, entries, pack_checksum):
"""Write a new pack index file.
:param f: File-like object to write to
:param entries: List of tuples with object name (sha), offset_in_pack, and
crc32_checksum.
:param pack_checksum: Checksum of the pack file.
:return: The SHA of the index file written
"""
f = SHA1Writer(f)
f.write(b'\377tOc') # Magic!
f.write(struct.pack('>L', 2))
fan_out_table = defaultdict(lambda: 0)
for (name, offset, entry_checksum) in entries:
fan_out_table[ord(name[:1])] += 1
# Fan-out table
largetable = []
for i in range(0x100):
f.write(struct.pack(b'>L', fan_out_table[i]))
fan_out_table[i+1] += fan_out_table[i]
for (name, offset, entry_checksum) in entries:
f.write(name)
for (name, offset, entry_checksum) in entries:
f.write(struct.pack(b'>L', entry_checksum))
for (name, offset, entry_checksum) in entries:
if offset < 2**31:
f.write(struct.pack(b'>L', offset))
else:
f.write(struct.pack(b'>L', 2**31 + len(largetable)))
largetable.append(offset)
for offset in largetable:
f.write(struct.pack(b'>Q', offset))
assert len(pack_checksum) == 20
f.write(pack_checksum)
return f.write_sha()
write_pack_index = write_pack_index_v2
class Pack(object):
"""A Git pack object."""
def __init__(self, basename, resolve_ext_ref=None):
self._basename = basename
self._data = None
self._idx = None
self._idx_path = self._basename + '.idx'
self._data_path = self._basename + '.pack'
self._data_load = lambda: PackData(self._data_path)
self._idx_load = lambda: load_pack_index(self._idx_path)
self.resolve_ext_ref = resolve_ext_ref
@classmethod
def from_lazy_objects(self, data_fn, idx_fn):
"""Create a new pack object from callables to load pack data and
index objects."""
ret = Pack('')
ret._data_load = data_fn
ret._idx_load = idx_fn
return ret
@classmethod
def from_objects(self, data, idx):
"""Create a new pack object from pack data and index objects."""
ret = Pack('')
ret._data_load = lambda: data
ret._idx_load = lambda: idx
return ret
def name(self):
"""The SHA over the SHAs of the objects in this pack."""
return self.index.objects_sha1()
@property
def data(self):
"""The pack data object being used."""
if self._data is None:
self._data = self._data_load()
self._data.pack = self
self.check_length_and_checksum()
return self._data
@property
def index(self):
"""The index being used.
:note: This may be an in-memory index
"""
if self._idx is None:
self._idx = self._idx_load()
return self._idx
def close(self):
if self._data is not None:
self._data.close()
if self._idx is not None:
self._idx.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __eq__(self, other):
return isinstance(self, type(other)) and self.index == other.index
def __len__(self):
"""Number of entries in this pack."""
return len(self.index)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._basename)
def __iter__(self):
"""Iterate over all the sha1s of the objects in this pack."""
return iter(self.index)
def check_length_and_checksum(self):
"""Sanity check the length and checksum of the pack index and data."""
assert len(self.index) == len(self.data)
idx_stored_checksum = self.index.get_pack_checksum()
data_stored_checksum = self.data.get_stored_checksum()
if idx_stored_checksum != data_stored_checksum:
raise ChecksumMismatch(sha_to_hex(idx_stored_checksum),
sha_to_hex(data_stored_checksum))
def check(self):
"""Check the integrity of this pack.
:raise ChecksumMismatch: if a checksum for the index or data is wrong
"""
self.index.check()
self.data.check()
for obj in self.iterobjects():
obj.check()
# TODO: object connectivity checks
def get_stored_checksum(self):
return self.data.get_stored_checksum()
def __contains__(self, sha1):
"""Check whether this pack contains a particular SHA1."""
try:
self.index.object_index(sha1)
return True
except KeyError:
return False
def get_raw(self, sha1):
offset = self.index.object_index(sha1)
obj_type, obj = self.data.get_object_at(offset)
type_num, chunks = self.data.resolve_object(offset, obj_type, obj)
return type_num, b''.join(chunks)
def __getitem__(self, sha1):
"""Retrieve the specified SHA1."""
type, uncomp = self.get_raw(sha1)
return ShaFile.from_raw_string(type, uncomp, sha=sha1)
def iterobjects(self):
"""Iterate over the objects in this pack."""
return iter(PackInflater.for_pack_data(
self.data, resolve_ext_ref=self.resolve_ext_ref))
def pack_tuples(self):
"""Provide an iterable for use with write_pack_objects.
:return: Object that can iterate over (object, path) tuples
and provides __len__
"""
class PackTupleIterable(object):
def __init__(self, pack):
self.pack = pack
def __len__(self):
return len(self.pack)
def __iter__(self):
return ((o, None) for o in self.pack.iterobjects())
return PackTupleIterable(self)
def keep(self, msg=None):
"""Add a .keep file for the pack, preventing git from garbage collecting it.
:param msg: A message written inside the .keep file; can be used later to
determine whether or not a .keep file is obsolete.
:return: The path of the .keep file, as a string.
"""
keepfile_name = '%s.keep' % self._basename
with GitFile(keepfile_name, 'wb') as keepfile:
if msg:
keepfile.write(msg)
keepfile.write(b'\n')
return keepfile_name
try:
from dulwich._pack import apply_delta, bisect_find_sha
except ImportError:
pass
|
[
"tony.pig@gmail.com"
] |
tony.pig@gmail.com
|
c806d03688663a7c63addca02154f9c4fd0d7d4f
|
8103c04c3478910903189e4a99d528159e1ae661
|
/Versoes.py
|
705419c7cf369350832204aca9ff5a1154d9127a
|
[] |
no_license
|
franciscopereira32/Cadastro.py-1.0
|
31c7d57eac6eff8c4b235e5f8e7ed51b9042b93e
|
86fdf04f21e373d5fbbb5981a77a7df6de976ab2
|
refs/heads/main
| 2023-03-15T04:27:33.615602
| 2021-03-16T19:01:03
| 2021-03-16T19:01:03
| 348,455,661
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
from Banco import Banco
class Versoes(object):
def __init__(self, swversao = "", produto = "", hw = "",
nb = "", fornecedor = ""):
self.info = {}
self.swversao = swversao
self.produto= produto
self.hw = hw
self.nb = nb
self.fornecedor = fornecedor
def inserirVersoes(self):
banco = Banco()
try:
c = banco.conexao.cursor()
c.execute("insert into versoes (swversao, produto, hw, nb, fornecedor) values ('" + self.swversao + "','" + self.produto + "','" + self.hw + "', '" + self.nb + "', '" + self.fornecedor + "')")
banco.conexao.commit()
c.close()
return "Versão cadastrada com sucesso!"
except:
return "Ocorreu um erro na inserção da Versão"
def updateVersaoes(self):
banco = Banco()
try:
c = banco.conexao.cursor()
c.execute("update versoes set produto = '" + self.produto + "', hw = '" + self.hw +
"', nb = '" + self.nb + "', fornecedor = '" + self.fornecedor +
"' where swversao = " + self.swversao + " ")
banco.conexao.commit()
c.close()
return "Versão atualizada com sucesso!"
except:
return "Ocorreu um erro na alteração da versão"
def deleteVersoes(self):
banco = Banco()
try:
c = banco.conexao.cursor()
c.execute("delete from versoes where swversao = " + self.swversao + " ")
banco.conexao.commit()
c.close()
return "Versão excluída com sucesso!"
except:
return "Ocorreu um erro na exclusão da versão"
def buscarVersoes(self, swversao):
banco = Banco()
try:
c = banco.conexao.cursor()
c.execute("select * from versoes where swversao = " + swversao + " ")
for linha in c:
self.swversao = linha[0]
self.produto = linha[1]
self.hw = linha[2]
self.nb = linha[3]
self.fornecedor = linha[4]
c.close()
return "Busca feita com sucesso!"
except:
return "Ocorreu um erro na busca da versão"
|
[
"noreply@github.com"
] |
franciscopereira32.noreply@github.com
|
c0fc94a656f1cee1a7c8ee20e88f8085721c9112
|
c67f449dc7187f154df7093a95ddcc14a3f0a18f
|
/youngseokcoin/test/functional/net.py
|
a4a15da130025b87c9678b54942f92c910989ea7
|
[
"MIT"
] |
permissive
|
youngseokaaa-presentation/A_system_to_ensure_the_integrity_of_Internet_of_things_by_using_Blockchain
|
cee9ba19e9d029759fc2fe4a43235c56fd9abe43
|
b2a47bc63386b5a115fc3ce62997034ebd8d4a1e
|
refs/heads/master
| 2023-02-17T07:58:43.043470
| 2021-01-11T05:40:28
| 2021-01-11T05:40:28
| 295,317,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,225
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Youngseokcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
import time
from test_framework.test_framework import YoungseokcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
)
class NetTest(YoungseokcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# check that getnettotals totalbytesrecv and totalbytessent
# are consistent with getpeerinfo
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 2)
net_totals = self.nodes[0].getnettotals()
assert_equal(sum([peer['bytesrecv'] for peer in peer_info]),
net_totals['totalbytesrecv'])
assert_equal(sum([peer['bytessent'] for peer in peer_info]),
net_totals['totalbytessent'])
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
time.sleep(0.1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
net_totals_after_ping = self.nodes[0].getnettotals()
for before, after in zip(peer_info, peer_info_after_ping):
assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong'])
assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping'])
assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv'])
assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent'])
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
timeout = 3
while self.nodes[0].getnetworkinfo()['connections'] != 0:
# Wait a bit for all sockets to close
assert timeout > 0, 'not all connections closed in time'
timeout -= 0.1
time.sleep(0.1)
self.nodes[0].setnetworkactive(True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existant node returns an error
assert_raises_rpc_error(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
|
[
"youngseokaaa@gmail.com"
] |
youngseokaaa@gmail.com
|
7ffbc5519c0ad6ddc3bad591dcb77a78f27c5333
|
3ef2b760c607b80e82f4941358e6c8cc67659d3b
|
/lib/utils.py
|
7f4c4d61999620f3ee10ab50df5b07eccd97081b
|
[] |
no_license
|
LAYTAT/Senti-Attend
|
a4e2494fea22fc0861513c5637a1af91233fd2a5
|
bf076fdb1831d27a4452e12943cb04477c8adaa3
|
refs/heads/master
| 2020-07-06T17:20:23.386645
| 2019-08-09T01:53:09
| 2019-08-09T01:53:09
| 203,089,460
| 1
| 0
| null | 2019-08-19T03:09:37
| 2019-08-19T03:09:35
| null |
UTF-8
|
Python
| false
| false
| 4,859
|
py
|
import numpy as np
import cPickle as pickle
import hickle
import time
import os
def load_coco_data(data_path='./data', split='train'):
start_t = time.time()
data = {}
if split == "debug":
split = 'val'
with open(os.path.join(os.path.join(data_path, 'train'), 'word_to_idx.pkl'), 'rb') as f:
data['word_to_idx'] = pickle.load(f)
data_path = os.path.join(data_path, split)
data['features'] = hickle.load(os.path.join(data_path, '%s.features.hkl' % split))
with open(os.path.join(data_path, '%s.file.names.pkl' % split), 'rb') as f:
data['file_names'] = pickle.load(f)
with open(os.path.join(data_path, '%s.captions.pkl' % split), 'rb') as f:
data['captions'] = pickle.load(f)
with open(os.path.join(data_path, '%s.image.idxs.pkl' % split), 'rb') as f:
data['image_idxs'] = pickle.load(f)
if split == 'train':
with open(os.path.join(data_path, 'word_to_idx.pkl'), 'rb') as f:
data['word_to_idx'] = pickle.load(f)
for k, v in data.iteritems():
if type(v) == np.ndarray:
print k, type(v), v.shape, v.dtype
else:
print k, type(v), len(v)
end_t = time.time()
print "Elapse time: %.2f" % (end_t - start_t)
return data
def load_inference_data(data_path='./data'):
start_t = time.time()
data = {}
data['features'] = hickle.load(os.path.join(data_path, 'inference.features.hkl'))
with open(os.path.join(data_path, 'inference.file.names.pkl'), 'rb') as f:
data['file_names'] = pickle.load(f)
with open(os.path.join(data_path, 'inference.image.idxs.pkl'), 'rb') as f:
data['image_idxs'] = pickle.load(f)
for k, v in data.iteritems():
if type(v) == np.ndarray:
print
k, type(v), v.shape, v.dtype
else:
print
k, type(v), len(v)
end_t = time.time()
print
"Elapse time: %.2f" % (end_t - start_t)
return data
def decode_captions(captions, idx_to_word):
if captions.ndim == 1:
T = captions.shape[0]
N = 1
else:
N, T = captions.shape
decoded = []
for i in range(N):
words = []
for t in range(T):
if captions.ndim == 1:
word = idx_to_word[captions[t]]
else:
word = idx_to_word[captions[i, t]]
if word == '<END>':
words.append('.')
break
if word != '<NULL>':
words.append(word)
decoded.append(' '.join(words))
return decoded
def decode_captions_for_blue(captions, idx_to_word):
if captions.ndim == 1:
T = captions.shape[0]
N = 1
else:
N, T = captions.shape
decoded = []
masks = []
for i in range(N):
words = []
mask = []
for t in range(T):
if captions.ndim == 1:
word = idx_to_word[captions[t]]
else:
word = idx_to_word[captions[i, t]]
if word == '<END>':
words.append('.')
mask.append(1)
break
if word != '<NULL>':
words.append(word)
mask.append(1)
decoded.append(' '.join(words))
mask.extend([0]*(T-len(mask)))
masks.append(mask)
return masks, decoded
def sample_coco_minibatch(data, batch_size):
data_size = data['features'].shape[0]
mask = np.random.choice(data_size, batch_size)
features = data['features'][mask]
file_names = data['file_names'][mask]
return features, file_names
def sample_coco_minibatch_inference(data, batch_size):
mask = np.array([0,1,2,3,4,5,6,7,8,9])
features = data['features'][mask]
file_names = data['file_names'][mask]
return features, file_names
def write_bleu(scores, path, epoch, senti):
if epoch == 0 and senti == [1]:
file_mode = 'w'
else:
file_mode = 'a'
with open(os.path.join(path, 'val.bleu.scores.txt'), file_mode) as f:
f.write('Epoch %d\n' %(epoch+1))
if senti==[1]:
f.write('positive\n')
else:
f.write('negative\n')
f.write('Bleu_1: %f\n' %scores['Bleu_1'])
f.write('Bleu_2: %f\n' %scores['Bleu_2'])
f.write('Bleu_3: %f\n' %scores['Bleu_3'])
f.write('Bleu_4: %f\n' %scores['Bleu_4'])
f.write('METEOR: %f\n' %scores['METEOR'])
f.write('ROUGE_L: %f\n' %scores['ROUGE_L'])
f.write('CIDEr: %f\n\n' %scores['CIDEr'])
def load_pickle(path):
with open(path, 'rb') as f:
file = pickle.load(f)
print ('Loaded %s..' % path)
return file
def save_pickle(data, path):
with open(path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print ('Saved %s..' % path)
|
[
"odnzmi@gmail.com"
] |
odnzmi@gmail.com
|
1148775b89c0a6ca12c5770758ed5ae05744f99e
|
5e31e137bfda29c946024f01dd43e18e24e4e6df
|
/ProjetoDjango/Ouvidoria/admin.py
|
6277653ad0e856e4e57a1ef19da9ea702b2f0999
|
[] |
no_license
|
osvaldoj12/ProjetoDjango
|
0584a5702a7f096590591a0b32a949c7cb5e4e61
|
12ee83c1d07a1fc0d91719d10c7ddc9f8e6ab239
|
refs/heads/master
| 2020-09-27T12:57:58.539685
| 2019-12-07T14:50:37
| 2019-12-07T14:50:37
| 226,521,894
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
from django.contrib import admin
from Ouvidoria.models import Sugestoes, Elogios, Reclamacoes, Denuncias, Funcionario
# Register your models here.
admin.site.register(Sugestoes)
admin.site.register(Elogios)
admin.site.register(Reclamacoes)
admin.site.register(Denuncias)
admin.site.register(Funcionario)
|
[
"osvaldojuniorlima@hotmail.com"
] |
osvaldojuniorlima@hotmail.com
|
fcf6ea3c41543c2039b67ee2278d7ee410816469
|
d27a77f8dfec5f510aed31dabf2fddd30c0dae5f
|
/brosys/urls.py
|
a8a4ef1d346daade000689680f91b68f455c151a
|
[] |
no_license
|
MixBars/brosys
|
3ec1e82927ab45cad7993d5e8d557845c8ca10d3
|
40fef7a33294b9c087d7094eac9c27cf8e00be19
|
refs/heads/master
| 2023-09-06T09:43:14.677035
| 2021-11-02T22:53:02
| 2021-11-02T22:53:02
| 424,001,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
"""brosys URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls import url
from django.views.static import serve
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('core.urls')),
url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$', serve, {'document_root': settings.STATIC_ROOT}),
]
|
[
"portalmixbars@gmail.com"
] |
portalmixbars@gmail.com
|
b80973b7ecf39e1f551d773d6bf72d6bdd294317
|
ea6ffbcf929941513d171f9ebf959ec42d633612
|
/TradingNews/wsgi.py
|
e8869caa62e0b989d57df9c7820c1c9c82106312
|
[] |
no_license
|
Souce01/TradingNews
|
43c82496d6da3cf773e993da9cf8ca1f5a1a78ff
|
392497419c44fe170ba425e86db40afce29b1857
|
refs/heads/master
| 2023-07-24T05:28:42.955035
| 2021-08-27T02:57:45
| 2021-08-27T02:57:45
| 287,260,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for TradingNews project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TradingNews.settings')
application = get_wsgi_application()
|
[
"alexandresoucy7@gmail.com"
] |
alexandresoucy7@gmail.com
|
29ff7530a12d24ef2ff8f6e0744b6cf91faba8cf
|
a5b09349bb10685621788f815acfcef23e93b540
|
/tests/test_set_item.py
|
6e7fa7390740d64e413f0be77568016de3a82fe9
|
[] |
no_license
|
yishuihanhan/slimurl
|
05d95edf3b83a118bc22a4fda4f0e8ca9d4662f7
|
d6ee69b0c825dcc4129bb265bd97e61218b73ccc
|
refs/heads/master
| 2020-04-02T08:34:55.228207
| 2017-01-10T10:09:50
| 2017-01-10T10:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
#!/usr/bin/env python
# encoding: utf-8
from slimurl import URL
def check_set(url, args, result):
key, value = args
url[key] = value
assert url == result
def test_set():
cases = [
["http://example.net/", ('foo', 'bar'), "http://example.net/?foo=bar"],
["http://example.net/", ('foo', (0, 1)), "http://example.net/?foo=0&foo=1"],
["http://example.net/", ('foo', ("0", "1")), "http://example.net/?foo=0&foo=1"],
["http://example.net/", ('foo', (0, "1")), "http://example.net/?foo=0&foo=1"],
]
for url, args, result in cases:
yield check_set, URL(url), args, URL(result)
|
[
"me@mosquito.su"
] |
me@mosquito.su
|
75dbe56cf58aa518de51464a64dfaa8d7f95feea
|
9e929843f73b099456bab9df1b08971288e3b839
|
/tests/integration_tests/vectors_tests/test_lower_than_or_equals.py
|
bedb21d4a788496d3c2bbb9138f82d33ab29733b
|
[
"MIT"
] |
permissive
|
lycantropos/cppstd
|
fd20a37c46bd730d15b6e5c34e795f39907fad75
|
2a44dad540a8cc36e7fac8805cf2f5402be34aee
|
refs/heads/master
| 2023-01-11T01:13:25.821513
| 2020-11-12T23:19:40
| 2020-11-12T23:19:40
| 302,339,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
from hypothesis import given
from tests.utils import (BoundPortedVectorsPair,
equivalence)
from . import strategies
@given(strategies.vectors_pairs, strategies.vectors_pairs)
def test_basic(first_pair: BoundPortedVectorsPair,
second_pair: BoundPortedVectorsPair) -> None:
first_bound, first_ported = first_pair
second_bound, second_ported = second_pair
assert equivalence(first_bound <= second_bound,
first_ported <= second_ported)
|
[
"azatibrakov@gmail.com"
] |
azatibrakov@gmail.com
|
d6bc7164d5e3d4930bcdbb6d3a9ddfdd071805e2
|
98b3ba54c5c078f5b6973b633ced5ae5fdce39b1
|
/A基础知识/错误调试和测试/错误处理.py
|
2654ede49800b1b81d1dc4ecb350879119285d32
|
[] |
no_license
|
FutaoSmile/pythonLearn
|
97766292aad100b210dfa04e7bbfc0757418f915
|
0cbafc47826ebad95b534020e81ce995a4d472bd
|
refs/heads/master
| 2021-07-21T19:35:20.337829
| 2021-07-15T02:51:41
| 2021-07-15T02:51:41
| 202,057,691
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
import logging
logging.basicConfig(level=logging.INFO)
try:
a = 0 / int(input('请输入一个数字'))
# 可同时捕获多种类型的异常
except ZeroDivisionError as ze:
print('发生除零异常', ze)
logging.exception(ze, '发生除零异常')
# 可同时捕获多种类型的异常
except ValueError as ve:
print('发生整型转换异常', ve)
logging.exception(ve, '发生整型转换异常')
# 没有发生异常
else:
print('没有发生异常,good job')
logging.info('没有发生异常,good job')
finally:
print('我是必须要输出的内容')
logging.info('我是必须要输出的内容')
# 错误类型全部继承自BaseException类
# 如果错误没有被捕获,它就会一直往上抛,最后被Python解释器捕获,打印一个错误信息,然后程序退出
# 我们从上往下可以看到整个错误的调用函数链
|
[
"1185172056@qq.com"
] |
1185172056@qq.com
|
52a625ea02f8c1c83dee9dfc16faba640999ba8c
|
5ed2a2f1f5915786f0ee1d084582dd251a5c8fff
|
/honour roll calc.py
|
1dee679d3cd59db698ffe0f2f353956e7c63d98d
|
[] |
no_license
|
khematil/Python-Averag-Calc
|
aaae00f023bea9f0f1858f54e8de34e981e723ec
|
f0397671dbc84d03ce10ee796a5a231b312e74ee
|
refs/heads/master
| 2020-04-23T16:55:48.589983
| 2019-02-18T16:11:38
| 2019-02-18T16:11:38
| 171,314,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
#Honour Roll Calculator
sum = 0 #counter
while True:
try:
coursenumbers = int(raw_input("How many courses were there? (1-8)")) #asks user for course amnt
while coursenumbers >=9 or coursenumbers <=0: #while loop so you cant enter anything more than 9 or less than 0
print "Stop right there, you must enter a number between 1-8! Try again!"
coursenumbers = int(raw_input("How many courses were there? (1-8)"))
else: #code to prevent it from entering strings into the input
break
except ValueError:
print "Stop right there, you must enter a number between 1-8! Try again!"
for i in range(coursenumbers): #for loop. loops the amount of courses there are
mark = input("What were your averages? (0-100)") #input for averages
while mark >100 or mark <0: #while loop so you cant enter more than 100, less than 0
print "Stop right there, you must enter a number between 0-100! Try again!"
mark = input("What were your averages? (0-100)")
if mark <100 or mark >0: #if it meets the range 0 - 100 then add that number to sum
sum=sum+mark
totalaverage = float(sum)/coursenumbers #divide sum by course numbers to get average
print "The amount of course(s) that you have is {0}".format(coursenumbers)
print "Your total average is {0}".format(totalaverage)
if totalaverage >= 80: #if you have more than an 80 average you get honour roll
print "Congratulations you made honour roll."
else: #otherwise you didnt get honour roll
print "You didn't make honour roll! :C, that must really suck man."
|
[
"noreply@github.com"
] |
khematil.noreply@github.com
|
b9464a20171c79cf512a724b383ba1aa947fb884
|
324aad96376dce516263e795599bdf3d40978154
|
/alien_invasion.py
|
bd5a0b04865a0759aba88251c4b906610b717dd6
|
[] |
no_license
|
andrewsanc/ALIENS
|
6dd983614028052eb2abb0338f075e39b3ad04fb
|
3e6bf3789576214d87c68bdbf4aef731d3f32418
|
refs/heads/master
| 2020-03-18T22:30:40.616197
| 2018-05-29T20:37:55
| 2018-05-29T20:37:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
import sys
import pygame
def run_game():
# Initialize game and create a screen object
pygame.init()
screen = pygame.display.set_mode((1200, 800))
pygame.display.set_caption("Alien Invasion")
# Start the main loop for the game
while True:
# Watch for keyboard and mouse events
for even in pygame.event.get()
if event.type == pygame.QUIT:
sys.exit()
pygame.display.flip()
run_game()
|
[
"druu831@gmail.com"
] |
druu831@gmail.com
|
1c1917ab1339c7cbb623080afbb9a8125b03933c
|
7c25e479b21b1e3e69a6be140f6511a892901182
|
/python_practice/middle_linked_list.py
|
5379e1fe8bdae8d8f5d08bb398c0fd1504ec458c
|
[] |
no_license
|
ahrav/Python-Django
|
6be3e3b5a39a6eabcf2b97b071232f8b85de64d3
|
8a2a3f706aab557b872f27e780bd7e4ebd274f72
|
refs/heads/master
| 2022-09-09T01:29:31.391833
| 2019-05-23T03:34:15
| 2019-05-23T03:34:15
| 181,137,783
| 0
| 0
| null | 2022-08-23T00:22:08
| 2019-04-13T07:40:44
|
Python
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printMiddle(self):
slow_ptr = self.head
fast_ptr = self.head
if self.head is not None:
while (fast_ptr is not None and fast_ptr.next is not None):
fast_ptr = fast_ptr.next.next
slow_ptr = slow_ptr.next
return slow_ptr.data
|
[
"ahravdutta02@gmail.com"
] |
ahravdutta02@gmail.com
|
8ffd41f7dfd8ec01e5a83eabf84d1c32b0c453f6
|
85bf023f561caa580703347798cdbbbc4afc0c5f
|
/leetcode/53.Maximum Subarray/maximum_subarray_greed.py
|
0d16e1a32a34041804dbe297c065f776c3158318
|
[] |
no_license
|
Math312/Algorithm-python
|
f346b74ec0b6ee8cce9cbf820c39a9a63c4db35f
|
f047150ffa71d4c0c4bcfc9ce6d1ea155e361655
|
refs/heads/master
| 2020-11-24T23:29:38.582202
| 2020-03-03T10:49:55
| 2020-03-03T10:49:55
| 228,387,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
sum_num = 0
max_num = nums[0]
for i in nums:
sum_num += i
max_num = max(sum_num, max_num)
if sum_num < 0:
sum_num = 0
return max_num
|
[
"894688078@qq.com"
] |
894688078@qq.com
|
4b30d61e07bfa3a4839fcb6fe9d0d2e52479a80d
|
401f783a202949adbf144b5780bcd87a6daf2299
|
/code/python/Day-69/SnakeGame.py
|
c61b7e9c28275ea314027b26a33f30786ac67215
|
[] |
no_license
|
TalatWaheed/100-days-code
|
1934c8113e6e7be86ca86ea66c518d2f2cedf82a
|
b8fd92d4ddb6adc4089d38ac7ccd2184f9c47919
|
refs/heads/master
| 2021-07-04T14:28:45.363798
| 2019-03-05T13:49:55
| 2019-03-05T13:49:55
| 140,101,486
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
# SNAKES GAME
# Use ARROW KEYS to play, SPACE BAR for pausing/resuming and Esc Key for exiting
import curses
from curses import KEY_RIGHT, KEY_LEFT, KEY_UP, KEY_DOWN
from random import randint
curses.initscr()
win = curses.newwin(20, 60, 0, 0)
win.keypad(1)
curses.noecho()
curses.curs_set(0)
win.border(0)
win.nodelay(1)
key = KEY_RIGHT
score = 0
snake = [[4,10], [4,9], [4,8]]
food = [10,20]
win.addch(food[0], food[1], '*')
while key != 27:
win.border(0)
win.addstr(0, 2, 'Score : ' + str(score) + ' ')
win.addstr(0, 27, ' SNAKE ')
win.timeout(150 - (len(snake)/5 + len(snake)/10)%120)
prevKey = key
event = win.getch()
key = key if event == -1 else event
if key == ord(' '):
key = -1
while key != ord(' '):
key = win.getch()
key = prevKey
continue
if key not in [KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN, 27]:
key = prevKey
snake.insert(0, [snake[0][0] + (key == KEY_DOWN and 1) + (key == KEY_UP and -1), snake[0][1] + (key == KEY_LEFT and -1) + (key == KEY_RIGHT and 1)])
if snake[0][0] == 0: snake[0][0] = 18
if snake[0][1] == 0: snake[0][1] = 58
if snake[0][0] == 19: snake[0][0] = 1
if snake[0][1] == 59: snake[0][1] = 1
# Exit if snake crosses the boundaries (Uncomment to enable)
#if snake[0][0] == 0 or snake[0][0] == 19 or snake[0][1] == 0 or snake[0][1] == 59: break
if snake[0] in snake[1:]: break
if snake[0] == food:
food = []
score += 1
while food == []:
food = [randint(1, 18), randint(1, 58)]
if food in snake: food = []
win.addch(food[0], food[1], '*')
else:
last = snake.pop()
win.addch(last[0], last[1], ' ')
win.addch(snake[0][0], snake[0][1], '#')
curses.endwin()
print("\nScore - " + str(score))
print("http://bitemelater.in\n")
|
[
"noreply@github.com"
] |
TalatWaheed.noreply@github.com
|
505790ad49da2ead0214eb1ec70ebbceb9c7d4d8
|
7aa98923157b2709cb29e13864deec811286d0d5
|
/10.9.2.py
|
7eadeb9b0080ec1037c726d08ae8f57ef6631f02
|
[] |
no_license
|
htdhret/D1047291
|
83a638d5995b649ea8622c400edf251455f9a696
|
aaae5df5b633d31da5aa50d030d98563ff8c7a23
|
refs/heads/main
| 2023-08-02T13:56:46.822167
| 2021-10-09T12:13:13
| 2021-10-09T12:13:13
| 415,295,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
x= int(input("输入一个年份:"))
if (x % 4) == 0:
if (x % 100) == 0:
if (x % 400) == 0:
print("{0} 是闰年".format(x))
else:
print("{0} 平年".format(x))
print(x%400+x)
else:
print("{0} 是闰年".format(x))
else:
print("{0} 平年".format(x))
|
[
"noreply@github.com"
] |
htdhret.noreply@github.com
|
5227e683eeda405361ef120fe342105988983379
|
cc93d8c24a3b6082823b12edb9d4328fa4616e04
|
/run_inference.py
|
f04b99f080c56da4623ff2e2538b2fe0c4319fb7
|
[] |
no_license
|
C2H5OHlife/UnsupDepth
|
31c1e615e5361a8ff657555e8cb7b8c34566feb3
|
710160e88e3817382d88851328bbbf0e46f53ec8
|
refs/heads/master
| 2020-04-11T10:51:17.839474
| 2018-12-17T01:45:35
| 2018-12-17T01:45:35
| 161,726,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,653
|
py
|
import torch
from imageio import imread, imsave
from scipy.misc import imresize
import numpy as np
from path import Path
import argparse
from tqdm import tqdm
from models import DispNetS, DispResNet
from utils import tensor2array
parser = argparse.ArgumentParser(description='Inference script for DispNet learned with \
Structure from Motion Learner inference on KITTI and CityScapes Dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--output-disp", action='store_true', help="save disparity img")
parser.add_argument("--output-depth", action='store_true', help="save depth img")
parser.add_argument("--pretrained", required=True, type=str, help="pretrained DispNet path")
parser.add_argument("--img-height", default=128, type=int, help="Image height")
parser.add_argument("--img-width", default=416, type=int, help="Image width")
parser.add_argument("--no-resize", action='store_true', help="no resizing is done")
parser.add_argument("--dataset-list", default='./kitti_eval/test_files_eigen.txt', type=str, help="Dataset list file")
parser.add_argument("--dataset-dir", default='K:/Dataset/KITTI', type=str, help="Dataset directory")
parser.add_argument("--output-dir", default='E:/PCProjects/output_origin', type=str, help="Output directory")
parser.add_argument("--img-exts", default=['png', 'jpg', 'bmp'], nargs='*', type=str, help="images extensions to glob")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
@torch.no_grad()
def main():
args = parser.parse_args()
if not(args.output_disp or args.output_depth):
print('You must at least output one value !')
return
# disp_net = DispNetS().to(device)
disp_net = DispResNet(3, alpha=1).to(device)
weights = torch.load(args.pretrained)
disp_net.load_state_dict(weights['state_dict'])
disp_net.eval()
dataset_dir = Path(args.dataset_dir)
output_dir = Path(args.output_dir)
output_disp = output_dir / 'disp'
output_depth = output_dir / 'depth'
output_disp.makedirs_p()
output_depth.makedirs_p()
if args.dataset_list is not None:
with open(args.dataset_list, 'r') as f:
test_files = [dataset_dir/file for file in f.read().splitlines()]
else:
test_files = sum([dataset_dir.files('*.{}'.format(ext)) for ext in args.img_exts], [])
print('{} files to test'.format(len(test_files)))
count = 0
for file in tqdm(test_files, ncols=100):
img = imread(file).astype(np.float32)
h,w,_ = img.shape
if (not args.no_resize) and (h != args.img_height or w != args.img_width):
img = imresize(img, (args.img_height, args.img_width)).astype(np.float32)
img = np.transpose(img, (2, 0, 1))
tensor_img = torch.from_numpy(img).unsqueeze(0)
tensor_img = ((tensor_img/255 - 0.5)/0.2).to(device)
output = disp_net(tensor_img)[0][0]
if args.output_disp:
disp = (255*tensor2array(output, max_value=None, colormap='bone', channel_first=False)).astype(np.uint8)
img = np.transpose(img, (1, 2, 0))
im_save = np.concatenate((disp, img), axis=1).astype(np.uint8)
imsave(output_disp/'{}_disp{}'.format(count, file.ext), im_save)
if args.output_depth:
depth = 1/output
depth = (255*tensor2array(depth, max_value=1, colormap='rainbow', channel_first=False)).astype(np.uint8)
imsave(output_depth/'{}_depth{}'.format(count, file.ext), depth)
count += 1
if __name__ == '__main__':
main()
|
[
"zhoulingtao7658@Hotmail.com"
] |
zhoulingtao7658@Hotmail.com
|
ca7ca9fadb6b8350641f806e917ff8ca39079686
|
ecf0f11c093ae4e134683afe13701df7ed7985e5
|
/testpackage/pytestargsusing.py
|
9d7ea480a1625925d1b299e65f791e000b53d13b
|
[] |
no_license
|
yeazhang/myhomework
|
9bd88208ce3927084f0e3eddc050afc6263901ec
|
7f50fb1c9d1cc48d09f9313033631c5aae8b3f89
|
refs/heads/main
| 2023-03-01T14:11:35.881451
| 2021-02-06T15:05:33
| 2021-02-06T15:05:33
| 331,968,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
#参数化的变量argsnames(String逗号分隔),list,tuple
# argvalues参数化的值list,list[tuple]
|
[
"2818645781@qq.com"
] |
2818645781@qq.com
|
f84891006c72890c8a4c8b22fee0ceb75c90f1c3
|
d436742b111df6d31ace5a274d18037ff34c9f7d
|
/penv/price_generators.py
|
5d4cabf44e8f6d6845b2e0bc20bde60e1ab1c6d8
|
[
"MIT"
] |
permissive
|
maxclchen/penv
|
ca21a12f25ca35804b452241b912ca8f83c0ccaa
|
e9745e5ca7025ee575fe30da6a849b17c14dee9a
|
refs/heads/main
| 2023-08-22T04:44:16.281487
| 2021-04-12T19:01:10
| 2021-04-12T19:01:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,659
|
py
|
import numpy as np
import pandas as pd
from scipy.stats import truncnorm
from tensortrade.feed import Stream
class MultiGBM(Stream[np.array]):
def __init__(self, s0: np.array, drift: np.array, volatility: np.array, rho: np.array, n: int):
super().__init__()
self.n = n
self.m = len(s0)
self.i = 0
self.dt = 1 / n
self.s0 = s0.reshape(-1, 1)
self.mu = drift.reshape(-1, 1)
self.v = volatility.reshape(-1, 1)
V = (self.v@self.v.T)*rho
self.A = np.linalg.cholesky(V)
self.x = None
def forward(self) -> np.array:
self.i += 1
if self.x is None:
self.x = self.s0.flatten().astype(float)
return self.x
dw = np.random.normal(loc=0, scale=np.sqrt(self.dt), size=[self.m, 1])
s = np.exp((self.mu - (1 / 2)*self.v**2)*self.dt + (self.A@dw)).T
s = s.flatten()
self.x *= s
return self.x
def has_next(self):
return self.i < self.n
def reset(self):
super().reset()
self.i = 0
self.x = None
def multi_corr_gbm(s0: np.array, drift: np.array, volatility: np.array, rho: np.array, n: int):
m = len(s0)
assert drift.shape == (m,)
assert volatility.shape == (m,)
assert rho.shape == (m, m)
dt = 1 / n
s0 = s0.reshape(-1, 1) # Shape: (m, 1)
mu = drift.reshape(-1, 1) # Shape: (m, 1)
v = volatility.reshape(-1, 1) # Shape: (m, 1)
V = (v@v.T)*rho
A = np.linalg.cholesky(V) # Shape: (m, m)
dW = np.random.normal(loc=0, scale=np.sqrt(dt), size=[m, n]) # Shape (m, n)
S = np.exp((mu - (1 / 2)*v**2)*dt + (A@dW)).T
S = np.vstack([np.ones(m), S])
S = s0.T*S.cumprod(0)
return S
def make_multi_gbm_price_curve(n: int):
rho = np.array([
[ 1. , -0.34372319, 0.23809065, -0.21918481],
[-0.34372319, 1. , -0.07774865, -0.17430333],
[ 0.23809065, -0.07774865, 1. , -0.17521052],
[-0.21918481, -0.17430333, -0.17521052, 1. ]
])
s0 = np.array([50, 48, 45, 60])
drift = np.array([0.13, 0.16, 0.10, 0.05])
volatility = np.array([0.25, 0.20, 0.30, 0.15])
P = multi_corr_gbm(s0, drift, volatility, rho, n)
prices = pd.DataFrame(P).astype(float)
prices.columns = ["p1", "p2", "p3", "p4"]
#prices = prices.ewm(span=50).mean()
return prices
def make_shifting_sine_price_curves(n: int, warmup: int = 0):
n += 1
slide = 2*np.pi*(warmup / n)
steps = n + warmup
x = np.linspace(-slide, 2*np.pi, num=steps)
x = np.repeat(x, 4).reshape(steps, 4)
s0 = np.array([50, 48, 45, 60]).reshape(1, 4)
shift = np.array([0, np.pi / 2, np.pi, 3*np.pi / 2]).reshape(1, 4)
freq = np.array([1, 4, 3, 2]).reshape(1, 4)
y = s0 + 25*np.sin(freq*(x - shift))
prices = pd.DataFrame(y, columns=["p1", "p2", "p3", "p4"])
return prices
class MultiSinePriceCurves(Stream[np.array]):
def __init__(self, s0: np.array, shift: np.array, freq: np.array, n: int, warmup: int = 0):
super().__init__()
self.s0 = s0
self.shift = shift
self.freq = freq
self.steps = n + warmup + 1
self.i = 0
self.m = len(s0)
self.x = np.linspace(-2*np.pi*(warmup / n), 2*np.pi, num=self.steps)
def forward(self) -> np.array:
rv = truncnorm.rvs(a=-10, b=10, size=self.m)
v = self.s0 + 25*np.sin(self.freq*(self.x[self.i] - self.shift)) + rv
self.i += 1
return v
def has_next(self):
return self.i < self.steps
def reset(self):
super().reset()
self.i = 0
|
[
"mwbrulhardt@gmail.com"
] |
mwbrulhardt@gmail.com
|
1a862f916178e0640ace21aa06f612164fe5255b
|
ffec472086272974ffb7fc2e2c59848223b38c96
|
/hackerRank-solutions/Compare the Triplets.py
|
15cae3d18e34d0a9cc768b209e7a7a4e724d2fa9
|
[] |
no_license
|
senatn/learning-python
|
24b5b360cb4c8ef8cebd8b34f983fd81c956a747
|
f35d690d130bc68252386f01a9b399c5453ca6f6
|
refs/heads/master
| 2023-01-12T03:50:08.211506
| 2023-01-03T00:46:38
| 2023-01-03T00:46:38
| 179,149,508
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
def compareTriplets(a, b):
score = [0,0]
for i in range(3):
if a[i] > b[i]:
score[0] += 1
if a[i] < b[i]:
score[1] += 1
return score
if __name__ == '__main__':
a = list(map(int, input().rstrip().split()))
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
print(result)
|
[
"noreply@github.com"
] |
senatn.noreply@github.com
|
0085d294a98c3bbe2a203b31818b30a734454fc5
|
d20137e064ff0a935c93902a9f2499b8d3ace063
|
/tensorflow/Linear_Regression/2_tensorflow_tutrial.py
|
778124c72c1efdcc8989ddaea5fcb27e3f0b3814
|
[] |
no_license
|
futo0713/Tensorflow_validation
|
99a948a67cd6a6bd9f35275cb849b6ada202b72d
|
769f8e10aeb0837c35b19e77c326683cea83afa4
|
refs/heads/master
| 2020-06-13T20:09:13.177525
| 2019-07-06T04:20:03
| 2019-07-06T04:20:03
| 194,775,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
import time
s = time.time()
#to turn off the warning
#----------------------------------------------------------
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#----------------------------------------------------------
#inatall dataset
#==========================================================
import pickle
name = 'dataset_linear'
dataset_dir = 'C:\\Users\FutoshiTsutsumi\Desktop\python_test\\tensorflow\dataset_sample\pickle'
save_file = dataset_dir + '\dataset_linear.pkl'
with open(save_file, 'rb') as f:
dataset = pickle.load(f)
x = dataset[0]
t = dataset[1]
#==========================================================
#I store x in placeholder and set a variable w.
#I multiply these two.
import tensorflow as tf
holder_x = tf.placeholder(tf.float32)
holder_t = tf.placeholder(tf.float32)
w = tf.Variable(tf.random_normal([1],mean=0.0, stddev=1.0))
mul_op = holder_x * w
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result_w = sess.run(w)
print(result_w)
result_x = sess.run(holder_x, feed_dict={holder_x: x})
print(result_x)
result = sess.run(mul_op, feed_dict={holder_x: x})
print(result)
#Time required
e = time.time()
print('time:' + str(e-s))
|
[
"noreply@github.com"
] |
futo0713.noreply@github.com
|
d8968b27ed11019d2fdbaca25eff49da686fa2bf
|
69d138e53b2cac30b9fa12fc125f8ac57783358a
|
/employee/migrations/0020_auto_20190314_0954.py
|
48b046f3a233f9d08ec2c3532279306f599269e3
|
[] |
no_license
|
PallaGangadhar/employee
|
32544da737417b4f90fdcb5f28deb111892c8ffe
|
af40c851b77bc7c466054d961ee5ab30bfbe1d53
|
refs/heads/master
| 2020-04-27T17:38:24.583249
| 2019-03-22T04:54:20
| 2019-03-22T04:54:20
| 174,495,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# Generated by Django 2.1.5 on 2019-03-14 09:54
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0019_auto_20190314_0933'),
]
operations = [
migrations.AlterField(
model_name='leave',
name='leave_date_one',
field=models.DateField(blank=True, default=datetime.date.today, null=True),
),
]
|
[
"c100-120@C100-120s-iMac.local"
] |
c100-120@C100-120s-iMac.local
|
04cbebd041955bd31731244dfbe22af47d532a32
|
1744cf93475d0d62913cc95225ac144351dbd576
|
/mafia_bot/bot.py
|
f524de545876de6bf3a2dd49402b953b66d01cc1
|
[] |
no_license
|
hilleri123/mafia_bot
|
7fc8e58d04a3568accf4b1ebaa297f917811f0ec
|
acd22fd45053cb20de86e7b0d89c5878696670f9
|
refs/heads/master
| 2022-12-27T02:37:55.941779
| 2020-10-11T16:36:15
| 2020-10-11T16:36:15
| 303,147,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,704
|
py
|
import discord
from discord.ext import commands
from config import settings
import random
bot = commands.Bot(command_prefix = settings['prefix'])
@bot.command()
async def stupid_help(ctx):
await ctx.send(
'help - для хелпы, очевидно.\n'+
'check [civ, maf, com=1, don=1] - чтоб разослать роди, тупица.\n'
)
text_tokens = ['Мирный', 'Мафия', 'Комиссар', 'Дон мафии']
@bot.command()
async def check(ctx, *args):
roles = []
print(args)
for i, role in enumerate(text_tokens):
if i < len(args):
roles.extend([role]*int(args[i]))
else:
roles.append(role)
random.shuffle(roles)
print(roles)
author = ctx.author
channel = author.voice.channel
print(author, dir(channel), channel.members)
for i, member in enumerate(channel.members):
if i == int(member.mention.split()[0]):
member_role = roles[i]
print(i, member.mention, member_role)
await member.send(f'{author.mention} loshara, ti est {member_role}')
@bot.command()
async def shuffle(ctx):
author = ctx.author
channel = author.voice.channel
print(channel.members)
print(filter(lambda member: member.mention.split()[0].isnumeric(), channel.members))
members = list(filter(lambda member: member.nick.split()[0].isnumeric(), channel.members))
random.shuffle(members)
print(members)
for i, member in enumerate(members):
tmp = list(member.nick.split()[1:])
tmp.insert(0, str(i+1))
print(tmp)
nick = " ".join(tmp)
await member.edit(nick=nick)
|
[
"shurik@skotch.nirvana"
] |
shurik@skotch.nirvana
|
544fa44c5473f9a479f1b85eb8a92d0430190313
|
5a69e7f47d6f30e434bff6a4742c61bb4a0b07b6
|
/pythonWeb1/zHomeWork1.py
|
de52cb50eca6f1969c92b29255b9b3268a31a45c
|
[] |
no_license
|
Islotus/LearnPythonWeb
|
eb85cff9a53ed241992b4f982597825be6507fba
|
c601d7d55954e0160ff4c5167508ff16461b830f
|
refs/heads/master
| 2022-12-30T06:04:50.188180
| 2018-01-21T12:44:50
| 2018-01-21T12:44:50
| 118,332,090
| 0
| 1
| null | 2022-12-15T14:38:12
| 2018-01-21T12:18:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,332
|
py
|
#coding: utf-8
import socket
"""
2017/02/16
作业 1
资料:
在 Python3 中,bytes 和 str 的互相转换方式是
str.encode('utf-8')
bytes.decode('utf-8')
send 函数的参数和 recv 函数的返回值都是 bytes 类型
其他请参考上课内容, 不懂在群里发问, 不要憋着
"""
# 1
# 补全函数
def protocol_of_url(url):
'''
url 是字符串, 可能的值如下
'g.cn'
'g.cn/'
'g.cn:3000'
'g.cn:3000/search'
'http://g.cn'
'https://g.cn'
'http://g.cn/'
返回代表协议的字符串, 'http' 或者 'https'
'''
pass
# 2
# 补全函数
def host_of_url(url):
'''
url 是字符串, 可能的值如下
'g.cn'
'g.cn/'
'g.cn:3000'
'g.cn:3000/search'
'http://g.cn'
'https://g.cn'
'http://g.cn/'
返回代表主机的字符串, 比如 'g.cn'
'''
pass
# 3
# 补全函数
def port_of_url(url):
'''
url 是字符串, 可能的值如下
'g.cn'
'g.cn/'
'g.cn:3000'
'g.cn:3000/search'
'http://g.cn'
'https://g.cn'
'http://g.cn/'
返回代表端口的字符串, 比如 '80' 或者 '3000'
注意, 如上课资料所述, 80 是默认端口
'''
pass
# 4
# 补全函数
def path_of_url(url):
'''
url 是字符串, 可能的值如下
'g.cn'
'g.cn/'
'g.cn:3000'
'g.cn:3000/search'
'http://g.cn'
'https://g.cn'
'http://g.cn/'
返回代表路径的字符串, 比如 '/' 或者 '/search'
注意, 如上课资料所述, 当没有给出路径的时候, 默认路径是 '/'
'''
pass
# 4
# 补全函数
def parsed_url(url):
'''
url 是字符串, 可能的值如下
'g.cn'
'g.cn/'
'g.cn:3000'
'g.cn:3000/search'
'http://g.cn'
'https://g.cn'
'http://g.cn/'
返回一个 tuple, 内容如下 (protocol, host, port, path)
'''
pass
# 5
# 把向服务器发送 HTTP 请求并且获得数据这个过程封装成函数
# 定义如下
def get(url):
'''
本函数使用上课代码 client.py 中的方式使用 socket 连接服务器
获取服务器返回的数据并返回
注意, 返回的数据类型为 bytes
'''
pass
# 使用
def main():
url = 'http://movie.douban.com/top250'
r = get(url)
print(r)
if __name__ == '__main__':
main()
|
[
"2791201278@qq.com"
] |
2791201278@qq.com
|
61f629b40dc83c829a630b41609a344feeee182d
|
55e03c505043914590fce17c3626a8197a66d22e
|
/singleton.py
|
a5e9c6946feb230885b724a7f86b813bd400773c
|
[
"MIT"
] |
permissive
|
dcesarz/-MIRROR-design_patterns_factories
|
17c575a96d29cec9af09604554752eff1d787b73
|
e276e25586dc631309febb0fe9eb5f1454627107
|
refs/heads/main
| 2023-04-05T23:06:32.393099
| 2021-04-30T09:45:58
| 2021-04-30T09:45:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
# metaclass used for singleton implementation
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
[
"noreply@github.com"
] |
dcesarz.noreply@github.com
|
4edb5b20ad57964ba3c34820c435749c0c57a3c6
|
6863b1dd96103bb17e4f91d38763f7226184811c
|
/migrations/0001_initial.py
|
e1cdb55428e94f9bd24c275de17aad8b6a544af7
|
[] |
no_license
|
gkrnours/django-foreignkey-example
|
d0fc00a847fdf11b28fe35d024be09f16e103d46
|
305562db83b7b1b7271ab7c58673340622062d30
|
refs/heads/master
| 2021-01-16T23:17:43.524009
| 2017-02-22T16:33:28
| 2017-02-22T16:33:28
| 82,827,066
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-22 11:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='books', to='author.Author')),
],
),
]
|
[
"gkr@jellycopter.net"
] |
gkr@jellycopter.net
|
7c729a0fc6594a57b9d3c905a92694febaab14da
|
edcd3847115ae76b68195bdda54e04ed06a04318
|
/bulbs/content/south_migrations/0004_add_groups.py
|
85024af72caab5aaadc7f311d651cd4656d9300a
|
[
"MIT"
] |
permissive
|
NeuralNoise/django-bulbs
|
b146725fdcb3bfb4158aec90d5fc0b7830d8fcbe
|
0c0e6e3127a7dc487b96677fab95cacd2b3806da
|
refs/heads/master
| 2021-06-07T08:11:31.996448
| 2016-09-21T19:35:32
| 2016-09-21T19:35:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,829
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
PERM_CONF = {
"publish_content": "Can publish content",
"publish_own_content": "Can publish own content",
"change_content": "Can change content",
"promote_content": "Can promote content"
}
GROUP_CONF = dict(
contributor=(),
author=("publish_own_content",),
editor=(
"publish_content",
"change_content",
"promote_content",
),
admin=(
"publish_content",
"change_content",
"promote_content",
)
)
content_ct, _ = orm["contenttypes.ContentType"].objects.get_or_create(
model="content", app_label="content"
)
for group_name, group_perms in GROUP_CONF.items():
group, _ = orm["auth.Group"].objects.get_or_create(
name=group_name
)
for perm_name in group_perms:
perm, _ = orm["auth.Permission"].objects.get_or_create(
content_type=content_ct,
codename=perm_name,
defaults={
"name": PERM_CONF[perm_name]
}
)
group.permissions.add(perm)
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'content.content': {
'Meta': {'object_name': 'Content'},
'_thumbnail': ('djbetty.fields.ImageField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['content.FeatureType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indexed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_content.content_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'subhead': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['content.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'content.featuretype': {
'Meta': {'object_name': 'FeatureType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'content.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_content.tag_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['content']
symmetrical = True
|
[
"v.forgione@gmail.com"
] |
v.forgione@gmail.com
|
66058275ffb49f9d792cd59de52a36ef53c76667
|
af88433af66ed8234f1aa4373c44d5d41721f98f
|
/4pairs_joiner.py
|
9a99a90bad393e6f5b46881476b00b0ddf27dabb
|
[] |
no_license
|
chohlasa/citibike_routes
|
e2f16fbb6c8412d1193ed008f31acf5fe2455ce8
|
de92224b29b1986914d5cad35bf0f614c75147e9
|
refs/heads/master
| 2016-09-05T22:14:22.809021
| 2014-04-20T18:43:03
| 2014-04-20T18:43:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,233
|
py
|
# This script takes the parsed bikeroutes (which are simply pairs of
# lat/long points) and first filters out all the duplicates. This
# includes lines with two identical endpoints (adds the ridecount),
# and lines in opposite directions but two identical endpoints (adds
# the ridecount).
# Input file: 'processed_data/coordinate_pairs.csv'
# Output file: 'processed_data/coordinate_pairs_flattened.csv'
import pandas as pd
pd.set_option('display.width', 1000)
# JOINING LINES
# Starting with lines with same start and end points
pairs = pd.io.parsers.read_table('coordinate_pairs.csv', sep=',')
print pairs[pairs['start_lat'] == 40.71210]
print "The output above..."
print "Should be much longer than the output below."
pairs = pairs.groupby(['start_lat','start_long','end_lat','end_long']).sum()
pairs["index"] = pairs.reset_index().index
print pairs[pairs.index.get_level_values('start_lat') == 40.71210]
# Much more complicated: matching lines with inverted identical start
# and end points
print "\nThese are the reverse pairs from the same point."
print pairs[pairs.index.get_level_values('end_lat') == 40.71210]
inv_pairs = pairs.copy()
inv_pairs.reset_index(inplace=True)
inv_pairs.columns = ['end_lat', 'end_long', 'start_lat', \
'start_long', 'ride_count', 'index']
inv_pairs.set_index(['start_lat', 'start_long', 'end_lat', 'end_long'], \
inplace=True)
merged_df = inv_pairs.join(pairs, how="inner", rsuffix="_orig", lsuffix="_inv")
merged_df = merged_df[merged_df['index_orig'] < merged_df['index_inv']]
# print merged_df[merged_df.index.get_level_values('start_lat') == 40.71210]
# print merged_df[merged_df.index.get_level_values('end_lat') == 40.71210]
pairs = pairs[~pairs['index'].isin(merged_df['index_inv'])]
merged_df = merged_df.drop(['index_inv', 'ride_count_orig', 'index_orig'], axis=1)
pairs = pairs.join(merged_df, how="left")
pairs = pairs.fillna(0)
pairs['ride_count'] = pairs['ride_count'] + pairs['ride_count_inv']
pairs = pairs.drop(['ride_count_inv', 'index'], axis=1)
pairs = pairs.reset_index()
pairs.sort('ride_count', ascending=True, inplace=True)
#print pairs
pairs.to_csv('processed_data/coordinate_pairs_flattened.csv', index=False)
|
[
"chohlasa@gmail.com"
] |
chohlasa@gmail.com
|
a9fbb50669b46c5cb2fc455b2c7aa5aba75b5d1d
|
438abf2253e8015492ea1a5c6441c707a14909bc
|
/homework01/ceasar.py
|
672617ac5d7849eff2cd8489d617ebd3fedc1ee2
|
[] |
no_license
|
emina13/cs102
|
508f2d5883fd8a97879b8508d60ab6684a653752
|
05ce638aa62d3ec8f9c5225c4953d3febd7b86be
|
refs/heads/master
| 2020-08-06T02:26:25.536254
| 2020-06-28T18:07:21
| 2020-06-28T18:07:21
| 212,799,968
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# -*- coding: utf-8 -*-
def encrypt_caesar(plaintext):
"""
>>> encrypt_caesar("PYTHON")
'SBWKRQ'
>>> encrypt_caesar("python")
'sbwkrq'
>>> encrypt_caesar("Python3.6")
'Sbwkrq3.6'
>>> encrypt_caesar("")
''
"""
new_data = []
for char in plaintext:
if 'a' <= char <= 'z':
new_char = (ord(char) % ord('a') + 3) % 26 + ord('a')
elif 'A' <= char <= 'Z':
new_char = (ord(char) % ord('A') + 3) % 26 + ord('A')
else:
new_char = char
new_data.append(new_char)
return "".join(new_data)
def decrypt_caesar(plaintext):
"""
>>> decrypt_caesar("SBWKRQ")
'PYTHON'
>>> decrypt_caesar("sbwkrq")
'python'
>>> decrypt_caesar("Sbwkrq3.6")
'Python3.6'
>>> decrypt_caesar("")
''
"""
new_data = []
for char in plaintext:
if 'a' <= char <= 'z':
new_char = (ord(char) % ord('a') - 3) % 26 - ord('a')
elif 'A' <= char <= 'Z':
new_char = (ord(char) % ord('A') - 3) % 26 - ord('A')
else:
new_char = char
new_data.append(new_char)
return "".join(new_data)
|
[
"noreply@github.com"
] |
emina13.noreply@github.com
|
9ecfc23fc63a0e911bb99ee9a2c99df53c0a1829
|
44531cd3494bea5d3278c058f7adea6246bce676
|
/Invisible-Clock/background.py
|
b6329c454428cdbeb575ac871df3cd9142e18041
|
[] |
no_license
|
Kowsihan-sk/Machine-Learning-Projects
|
9521de7645cc77524ed94cb10898af0eb0301c0a
|
a547336ec0a32cf42a439aaf56107b101337e9c8
|
refs/heads/master
| 2022-12-02T10:15:23.608311
| 2020-07-28T02:01:27
| 2020-07-28T02:01:27
| 283,067,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
import cv2
cap = cv2.VideoCapture(0) # webcam
while cap.isOpened():
ret, back = cap.read() # reading from webcam
if ret:
# ret is if op is successful or not
# back is what the camera is reading
cv2.imshow("image", back)
if cv2.waitKey(5) == ord('q'):
# save the image
cv2.imwrite('bgimage.jpg', back)
break
cap.release()
cv2.destroyAllWindows()
|
[
"kowsihan2sk@gmail.com"
] |
kowsihan2sk@gmail.com
|
50d5f6ee92ac6e4055b601e0cb08de38b79a0d78
|
17381e0cc68b141a487338e25a4f348b9362faf5
|
/plugins/check_nrpe_CN.py
|
e615f03095b1f683c2f6a1ad88a02db4a6b9dcda
|
[] |
no_license
|
amgaa/ClusterNap
|
9c662772db5e9b833c46af55130e9df5ffb79bd7
|
725427631de874aa0b70aad7f1879df0e86acb6f
|
refs/heads/master
| 2021-07-11T11:01:56.980416
| 2016-11-28T09:23:31
| 2016-11-28T09:23:31
| 10,011,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
#!/usr/bin/env python
# A ClusterNap compatible check_nrpe plugin
import sys
import os
import subprocess
import socket
clusternap_dir = os.path.dirname(os.path.abspath(__file__)) + "/../state/nodes/"
nodes = os.listdir(clusternap_dir)
plugin_dir = "/usr/lib/nagios/plugins/"
orig_plugin = plugin_dir + "check_nrpe"
args = sys.argv[1:]
cmd = orig_plugin
# Get unique service name we are checking
service_name = ""
block = -1
for i in range(0, len(args)):
if args[i] == "-s":
service_name = args[i+1]
block = i
# Erase service name from arguments
if block != -1:
args.pop(block)
args.pop(block)
# Run the original check_nrpe command
for arg in args:
cmd += " " + arg
ret = subprocess.Popen(cmd, shell=True)
ret.wait()
# St values for ClusterNap. 1 is for ON, 0 is for OFF, and -1 is for Unknown states.
if ret.returncode == 0 or ret.returncode == 1: # OK or WARNING
val = "1"
elif ret.returncode == 2: # CRITICAL
val = "0"
elif ret.returncode == 3: # UNKNOWN
val = "-1"
else:
val = "-1"
#Check if service is defined in argument and exists in ClusterNap configuration.
#If so, put the corresponding value to the state config file in ClusterNap.
if service_name != "" and service_name in nodes:
f=open( clusternap_dir + service_name, "w" )
f.write( val + "\n" )
f.close()
else:
print "Warning in check_nrpe_CN.py. Your service \"" +service_name + "\" is not defined in ClusterNap.\n"
# Exit with same value as check_nrpe
sys.exit(ret.returncode)
|
[
"amgaa.hpc@gmail.com"
] |
amgaa.hpc@gmail.com
|
3e119cfa6d182283fd4105948f57cc3949392715
|
f6cadae90eb8fa3766acbb00a6c65fd2dfb3cb8d
|
/source/aiocomments/views/user_requests.py
|
5a2aab955d11fec38b9d07f0c05db87df8cf20f5
|
[] |
no_license
|
RTyy/aiocomments
|
e6d3883777116a81653f12f498473883cebecbc4
|
2cb470462dbc1eaafbad1453f8f9050dc33d2412
|
refs/heads/master
| 2020-05-27T21:26:26.497154
| 2017-07-10T09:02:01
| 2017-07-10T09:02:01
| 94,754,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,798
|
py
|
"""User Requests Controller."""
import aiofiles
import trafaret as t
from aiohttp.web import StreamResponse, FileResponse
from datetime import datetime
from core.exceptions import CoreException
from core.db import acquire_connection
from ..consumers import DlResponseConsumer
from ..models import UserDlRequest, DlRequest, Comment, Instance, EventLog
@acquire_connection
async def get_user_dlrequests(request, db):
"""Return a list of previously created user request."""
# use trafaret as validator
trafaret = t.Dict({
t.Key('user_id'): t.Int,
})
try:
req = trafaret.check(request.match_info)
requests = DlRequest.list(db).raw.select(
DlRequest.id, DlRequest.itype_id, DlRequest.i_id,
DlRequest.author_id, DlRequest.start, DlRequest.end,
DlRequest.fmt, DlRequest.created) \
.filter(
UserDlRequest.user_id == req['user_id']) \
.order_by(UserDlRequest.created.desc())
return await requests
except t.DataError as e:
raise CoreException(400, 'Bad Request', e.as_dict())
@acquire_connection
async def download(request, db):
"""Prepare and return report according to request params."""
# use trafaret as validator
trafaret = t.Dict({
t.Key('user_id'): t.Int,
t.Key('start', optional=True, default=None): (t.Int | t.Null),
t.Key('end', optional=True, default=None): (t.Int | t.Null),
t.Key('author_id', optional=True, default=None): (t.Int | t.Null),
t.Key('i_id', optional=True, default=None): (t.Int | t.Null),
# 0 or unspecified means "comment"
t.Key('itype_id', optional=True, default=0): t.Int,
})
trafaret_format = t.Dict({
# t.Key('format', optional=True, default='xml'): t.Enum('xml'),
t.Key('format', optional=True,
default='xml'): lambda d: \
DlRequest.Format.by_verbose(d, DlRequest.Format.XML),
})
try:
req = trafaret.check(request.query)
if not req['i_id'] and not req['author_id']:
raise CoreException(400, 'Bad Request', {
'_': 'Instance or Author should be specidied.'})
req_fmt = trafaret_format.check(request.match_info).get('format')
root = None
# try to get previously stored request
try:
# make a filter
flt = DlRequest.fmt == req_fmt
if req['i_id']:
# make sure that requested instance exists.
if req['itype_id'] == 0:
root = await Comment.list(db).get(
Comment.id == req['i_id'])
else:
root = await Instance.list(db).get(
(Instance.i_id == req['i_id']) &
(Instance.itype_id == req['itype_id']))
flt &= (DlRequest.i_id == req['i_id']) \
& (DlRequest.itype_id == req['itype_id'])
if req['author_id']:
flt &= DlRequest.author_id == req['author_id']
if req['start'] is not None:
req['start'] = datetime.fromtimestamp(req['start'] / 1000)
flt &= (DlRequest.start == req['start'])
if req['end'] is not None:
req['end'] = datetime.fromtimestamp(req['end'] / 1000)
flt &= (DlRequest.end == req['end'])
dlreq = await DlRequest.list(db).get(flt)
# get user download request
try:
udlreq = await UserDlRequest.list(db).get(
(UserDlRequest.user_id == req['user_id']) &
(UserDlRequest.dlrequest_id == dlreq.id)
)
except UserDlRequest.DoesNotExist:
# crate a new one
udlreq = UserDlRequest(user_id=req['user_id'],
dlrequest_id=dlreq.id)
await udlreq.save(db)
except DlRequest.DoesNotExist:
# create both new download request and its link to the user
dlreq = DlRequest(**req)
dlreq.fmt = req_fmt
await dlreq.save(db, request.app['fs'])
udlreq = UserDlRequest(user_id=req['user_id'],
dlrequest_id=dlreq.id)
await udlreq.save(db)
except (Comment.DoesNotExist, Instance.DoesNotExist):
raise CoreException(404, 'Root Instance Not Found')
# proceed with request validation
# make sure there are no events that could affect
# previously generated report
if dlreq.state == DlRequest.State.VALID:
# build events query based on DlRequest params
events = EventLog.list(db).filter(EventLog.e_date > dlreq.created)
if root is not None:
events = events.filter(EventLog.tree_id == root.tree_id)
if dlreq.author_id:
events = events.filter(EventLog.author_id == dlreq.author_id)
if dlreq.start:
if dlreq.end:
events = events.filter(
EventLog.comment_cdate.between(dlreq.start, dlreq.end))
else:
events = events.filter(
EventLog.comment_cdate >= dlreq.start)
elif dlreq.end:
events = events.filter(EventLog.comment_cdate <= dlreq.end)
# check the number of events which affected
# previously generated report
if await events.count() > 0:
# mark report invalid if there at least one event found
dlreq.state = DlRequest.State.INVALID
await dlreq.save(db, request.app['fs'])
# prepare requested report
report_filename = 'report'
report_filepath = request.app['fs'].path(dlreq.filename)
# if req['author_id']:
# report_filename += '-user%s' % req['author_id']
# if req['i_id']:
# report_filename += '-comment%s' % i_id if req['type_id'] == 0 \
# else '-instance%s(%s)' % (req['i_id'], req['itype_id'])
headers = {
'Content-Type': 'text/xml',
'Content-Disposition':
'attachment; filename="%s.xml"' % report_filename,
'Cache-Control': 'no-cache',
'Connection': 'keep-alive'
}
# is_valid flag telling us if requested report was generated
# and if it's still valid (there were no updates or new comments
# created within a period specified by the request)
if dlreq.state == DlRequest.State.VALID:
# return a pure FileResponse
return FileResponse(report_filepath, headers=headers)
else:
stream = StreamResponse(status=200, reason='OK', headers=headers)
# stream.enable_chunked_encoding()
await stream.prepare(request)
# here we will await for the message from the report builder
# over local pubsub service
await DlResponseConsumer(dlreq.id, loop=request.app.loop).run()
# stream generated report file
async with aiofiles.open(request.app['fs'].path(dlreq.filename),
'r') as fd:
while True:
chunk = await fd.read(1024)
if not chunk:
break
stream.write(chunk.encode('utf-8'))
# yield to the scheduler so other processes do stuff.
await stream.drain()
await stream.write_eof()
return stream
except t.DataError as e:
raise CoreException(400, 'Bad Request', e.as_dict())
|
[
"a@itd.su"
] |
a@itd.su
|
0b62db421f1b592394ee367b6221c3e0068a2a38
|
8477ff7ec00bc8307c51d7b5d0af7c35e0fa4758
|
/elecciones/admin.py
|
00a0af34cd8cdf926d224b134d34c7ff55482b93
|
[] |
no_license
|
ciudadanointeligente/votainteligente-primarias
|
afc512fb47075cbc31419361a51b9857d9c8a064
|
11b91990d99b41541b899bfc68d2cbc8fb64a4e1
|
refs/heads/master
| 2021-01-10T19:26:04.374281
| 2013-09-10T20:43:19
| 2013-09-10T20:43:19
| 10,316,015
| 0
| 0
| null | 2013-09-10T20:43:20
| 2013-05-27T14:06:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,770
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import *
class ColectivoAdmin(admin.ModelAdmin):
model = Colectivo
admin.site.register(Colectivo,ColectivoAdmin)
class IndiceInline(admin.TabularInline):
model = Indice
class CandidatoInline(admin.TabularInline):
model = Candidato
extra = 0
class PreguntaInline(admin.TabularInline):
model = Pregunta
class RespuestaInline(admin.TabularInline):
model = Respuesta
readonly_fields = ['candidato']
extra = 0
class EleccionAdmin(admin.ModelAdmin):
search_fields = ['nombre', 'candidato__nombre']
inlines = [
CandidatoInline,
IndiceInline
]
admin.site.register(Eleccion, EleccionAdmin)
# action de aprobacion masiva de preguntas
def aprobar_preguntas(modeladmin, request, queryset):
for obj in queryset:
obj.enviar()
obj.procesada=True
obj.aprobada=True
obj.save()
aprobar_preguntas.short_description = "Aprobar Preguntas para enviar"
class PreguntaAdmin(admin.ModelAdmin):
model = Pregunta
list_display = ['texto_pregunta', 'aprobada', 'procesada']
ordering = ['aprobada','procesada']
# readonly_fields = ['procesada']
actions = [aprobar_preguntas]
inlines = [RespuestaInline]
#funcion especial para la aprobación de mail en el admin
def save_model(self, request, obj, form, change):
if obj.aprobada and not obj.procesada:
obj.enviar()
obj.procesada=True
obj.save()
admin.site.register(Pregunta, PreguntaAdmin)
class AreaAdmin(admin.ModelAdmin):
pass
admin.site.register(Area, AreaAdmin)
class DatoAdmin(admin.ModelAdmin):
pass
admin.site.register(Dato, DatoAdmin)
class ContactoAdmin(admin.ModelAdmin):
search_fields = ['valor', 'candidato__nombre']
admin.site.register(Contacto, ContactoAdmin)
|
[
"falvarez@votainteligente.cl"
] |
falvarez@votainteligente.cl
|
ef6fafef4b74a8b781be2dd4dc742f5cb6dea588
|
1b9badf4c5dddc6a9e850ef99ad9f8a9ea449283
|
/data_structure/string/reverse_words.py
|
1be9b33c8aca44acc473bd5b3f9a4ff93203a77f
|
[] |
no_license
|
contactshadab/data-structure-algo-python
|
e066a9f75f7063bb7317b22e9198f439f4a8001f
|
57cf2d4c28e26a94259a4eb03703f97f3ab5f351
|
refs/heads/main
| 2023-02-24T01:41:01.142659
| 2021-01-07T17:38:05
| 2021-01-07T17:38:05
| 305,084,168
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# Run time complexity: O(d)
def reverse(text):
if str is None:
raise Exception('Illegal argument')
result = []
words = text.split(' ')
for i in range(len(words)-1, -1, -1):
result.append(words[i])
return ' '.join(result)
if __name__ == "__main__":
print(reverse("Reverse me"))
print(reverse(""))
# print(reverse(None)) # Exception: Illegal argument
|
[
"58193713+contactshadab@users.noreply.github.com"
] |
58193713+contactshadab@users.noreply.github.com
|
840d64cf18e623fd8f1c7d4f4111377ff7fe60b7
|
2e62b9e4c92e054c80000b3d7084f74168262b68
|
/Ch03/destination_miles.py
|
370be7aac203394ab7fb676c1d874d8ba92174ea
|
[] |
no_license
|
SegoviaJ/MyPythonCourse
|
e429de06c87920bc6667c17d24080270f5da5d6e
|
5d6f980b1e7dee1de3e257c5feb97b1dbbf15d32
|
refs/heads/master
| 2020-07-31T01:08:22.513060
| 2019-09-27T21:58:21
| 2019-09-27T21:58:21
| 210,427,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
total_distance = int(input("How far from origin to destination? "))
travelled = int(input("How far have you gone? "))
remaining = total_distance-travelled
print(abs(remaining),'miles to go')
|
[
"james.segovia@hotmail.com"
] |
james.segovia@hotmail.com
|
6f22947c146cdb3d4b4e9218a0d8cdabc51ca34a
|
a211aafcd8ae2369cd3289fca6ced287ee09cc26
|
/algos_2/sprint_6_hashes/task_e.py
|
881eb70ff3dc32e6e169dd04b5e31c413aba9309
|
[] |
no_license
|
vitt0ri0/yalgorithms
|
562d27449bbc0907c37d632b6eff4d5dbf9da208
|
1ff6bdc92b61baf03463f299e62c6199d05be040
|
refs/heads/master
| 2023-03-21T21:07:02.339838
| 2021-03-21T13:00:43
| 2021-03-21T13:00:43
| 287,198,070
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
def task_e(arr):
if len(arr) < 2:
return 0
prev1 = arr[0]
prev2 = None
counter = 0
i = 1
mmax = 0
while i < len(arr):
el = arr[i]
el2 = arr[i-1]
if counter:
a = (el, el2) == (prev2, prev1)
b = (el, el2) == (prev1, prev2)
if a or b:
counter += 1
prev2 = el
prev1 = el2
i += 1
else:
counter = 0
prev1 = el
elif el != prev1:
counter = 1
prev2 = prev1
prev1 = el
i += 1
else:
prev1 = el
if counter > mmax:
mmax = counter
i += 1
return mmax
if __name__ == '__main__':
arr = [1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0]
n = input()
arr = list(map(int, input().split()))
res = task_e(arr)
print(res)
|
[
"vitt0ri0.progr@gmail.com"
] |
vitt0ri0.progr@gmail.com
|
d1e21770e28bf318bb9670ca416bde39191d4f7d
|
6e0108c11132e63c81adbfab4309011b1f9f6dda
|
/tests/python/extra/clear_leaves.py
|
4d2f1e3a58a3fcb2fd07655efd2646b28d0a5f5f
|
[
"Apache-2.0"
] |
permissive
|
scottdonaldau/ledger-qrl
|
c28a614ae52c44e53947e444abf078ec27041815
|
7a3b933b84065b9db2b775d50205efcdbed2399e
|
refs/heads/master
| 2020-04-12T07:12:25.687015
| 2018-12-19T02:55:43
| 2018-12-19T02:55:43
| 162,360,262
| 0
| 0
|
Apache-2.0
| 2018-12-19T00:15:27
| 2018-12-19T00:15:27
| null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from pyledgerqrl.ledgerqrl import *
dev = LedgerQRL()
start = time.time()
for i in range(256):
data = bytearray([i]) + bytearray.fromhex("00" * 32)
answer = dev.send(INS_TEST_WRITE_LEAF, data)
assert len(answer) == 0
answer = dev.send(INS_TEST_PK_GEN_1)
|
[
"lenijuan@gmail.com"
] |
lenijuan@gmail.com
|
416b6e3bfd65de33b7549956af8ebdb24642ebc6
|
6cfcf1b6ef7afe49eebe4edbd21184fc914c7755
|
/Exercicios/ex024.py
|
a340c88d759c0fc679b1a895d2a9b21fbe5500a7
|
[] |
no_license
|
mmagalha/Python
|
254df310a2a4f79258638321094296860bf4097a
|
95cbcc57c6f26d37954bc8835da885d32b4e607e
|
refs/heads/master
| 2021-05-02T07:42:56.872807
| 2018-02-12T01:05:02
| 2018-02-12T01:05:02
| 118,944,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
#Exercício Python 024: Crie um programa que leia o nome de uma cidade diga se ela começa ou não com o nome "SANTO".
cidade = str(input("Digite o nome da sua cidade: ")).upper()
print(cidade[:5] == "SANTO")
|
[
"mmagalha@gmail.com"
] |
mmagalha@gmail.com
|
a41d7737fdb64767088b4153d8994a0422a6044c
|
ca2dbcfeac6ab571a19bd7d91b7234fd461d09e3
|
/contact/settings.py
|
f6b23ebea5443fb592009997c1e7ce9e73093d67
|
[] |
no_license
|
RahulSinghDhek/test-contact
|
51ebcc85e32a3d4fc86cb978824337b444f077be
|
ff14bb369e4caae6cd4db95388f7c87bf65c3227
|
refs/heads/master
| 2020-04-20T02:18:27.516767
| 2019-01-31T17:44:41
| 2019-01-31T17:44:41
| 168,568,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,655
|
py
|
"""
Django settings for contact project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+mg_ysn-@n6l*ltqbi59wn(b(9pt32ugy_l!ztko^ux0nl80@k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','https://contactlistdhek.herokuapp.com/']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'phonebook',
'rest_framework.authtoken'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'contact.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'contact.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '1234', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.join(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra lookup directories for collectstatic to find static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
|
[
"rdhek@qti.qualcomm.com"
] |
rdhek@qti.qualcomm.com
|
e4f2ffff2bf16986aa3131d71811dfb973f65ef7
|
9d126bd1569104d953f59005cae73197678c1566
|
/n-apekatter.py
|
35d7c584585e02c3aa2ee4b038a0ad99058296a4
|
[] |
no_license
|
supermons/n-apekatter
|
75e84a9b5b9b3833e06b8a8bbdd00cb8716a4737
|
db78878e3e8913117e9b3a04e36ad67b686e9844
|
refs/heads/master
| 2020-04-01T07:54:38.056944
| 2018-10-14T19:35:57
| 2018-10-14T19:35:57
| 153,009,295
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
n = 5
while n != 0:
if n == 1:
print("En liten apekatt sitter i ett tre")
print("Den erter krokodillen: 'Du kan ikke ta meg ned'")
else:
print(str(n) + " små apekatter sitter i ett tre")
print("De erter krokodillen: 'Du kan ikke ta oss ned'")
print("Sa kom krokodillen, så diger og så svær og slurp!")
n = n - 1
if n == 0:
print("Da var det ingen apekatter der")
else:
print("Så var de bare " + str(n) + " apekatter der")
print("\n")
|
[
"noreply@github.com"
] |
supermons.noreply@github.com
|
222b65cae3e331d1bb9d612840a859b2b8569ee0
|
238eef9423ef2a909d4531bb70b02afda7b25a38
|
/sutils/logger.py
|
33e816e792fd1f10ed271bc0cadfd1b99d080cd0
|
[] |
no_license
|
sunqiang85/spytorch_v2
|
7f8f2209e5c6bc266484a6eebc5992d7accc3616
|
635c18cf2dd56bd92079fe761c70e73743a7890a
|
refs/heads/master
| 2021-05-21T14:51:52.986344
| 2020-04-03T11:13:57
| 2020-04-03T11:13:57
| 252,686,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
from torch.utils.tensorboard import SummaryWriter
import logging
class Logger():
def __init__(self, __C):
self.__C = __C
self.tensorboard = SummaryWriter(__C.tensorboard_path)
self.filelogger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
fh = logging.FileHandler(__C.log_path, mode='w')
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
self.filelogger.addHandler(fh)
|
[
"sunqiang85@163.com"
] |
sunqiang85@163.com
|
1b0b318977f920b37ea3424f58bc5c6b179df0c8
|
1eaf99e876b5fc5b05de1b41014dca6efc6601f1
|
/cupcake.py
|
23923a460a24f77dd8a4d3e9eda0372799595992
|
[] |
no_license
|
puspita-sahoo/codechef_program
|
5466dfc675e11b276a76d30fd8a3787fa106590a
|
1fae1e9b89ebedb963216e5e79a673716e8b5cc9
|
refs/heads/master
| 2023-08-21T15:01:42.804814
| 2021-09-10T17:47:26
| 2021-09-10T17:47:26
| 405,164,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
T = int(input())
for i in range(T):
N = int(input())
ap = (N//2) + 1
print(ap)
|
[
"puspita.private@gmail.com"
] |
puspita.private@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.