blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3df0a1ceeeace730a593525fe10af770673e4a9f
|
57522f0bdc09c57e32f8a8e34e4c5da64aedbc86
|
/ERROR_UNUSUAL/ERROE_FROM_PYTHON.py
|
6ce889b0debfc93ea890cfb822d651f105813efb
|
[] |
no_license
|
Alexanderklau/Start_again-python-
|
97b30345e2ef13d4552d7efd82498e7e615c262e
|
7ffbc2a6d53e1cff1c57258169c66bbab87210bc
|
refs/heads/master
| 2021-01-19T03:27:53.865013
| 2017-05-01T13:47:31
| 2017-05-01T13:47:31
| 87,314,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
# -*-coding:utf-8 -*-
__author__ = 'Yemilice_lau'
# NameError
# ZeroDivisionError
# SyntaxError
# KeyError
# IOError
# AttributeError
# ImportError
# 检测异常
try:
try_site()#检测这里的异常
except IOError,e:
print 'Error is:',e
# if __name__ == '__main__':
|
[
"429095816@qq.com"
] |
429095816@qq.com
|
e54187180a4b3c1c7bb03c5ea33d3a7e525b28f0
|
7327ec847993aee7d19f647499a99aaa335894f0
|
/ExceptionsClasses.py
|
5f39dbc10b34c380ebfc8c145d7c28bb7d60478b
|
[] |
no_license
|
chetat/chingu-journal
|
ae56749fd62076ab31398afbcd78acef22519033
|
1a6ef77075e866d08613a884d474303e96cb7aa8
|
refs/heads/master
| 2023-02-05T08:13:38.987587
| 2019-08-29T22:33:36
| 2019-08-29T22:33:36
| 200,136,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,728
|
py
|
from flask import jsonify
class BadRequest(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class ResourceExist(Exception):
status_code = 409
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class UnAuthorized(Exception):
status_code = 401
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class NotAcceptable(Exception):
status_code = 406
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class UnprocessableEntity(Exception):
status_code = 422
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class NotFound(Exception):
status_code = 404
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class InternalServerError(Exception):
status_code = 500
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class Forbiden(Exception):
status_code = 403
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class MethodNotAllowed(Exception):
status_code = 405
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
|
[
"yekuwilfred@gmail.com"
] |
yekuwilfred@gmail.com
|
4d080f5004c60a7a65725ed543f71140381ea82a
|
665455c521cc7cf76c5436337ed545de90976af4
|
/cohesity_management_sdk/models/protection_runs_stats.py
|
07e3d61720f3193a37daf1c0aaeaedb77522bcca
|
[
"Apache-2.0"
] |
permissive
|
hsantoyo2/management-sdk-python
|
d226273bc8eedcf9220ea4999a6f0b9a1a30d99c
|
0093194d125fc6746f55b8499da1270c64f473fc
|
refs/heads/master
| 2023-03-01T06:09:39.644085
| 2021-01-15T08:23:16
| 2021-01-15T08:23:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,044
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
class ProtectionRunsStats(object):
"""Implementation of the 'ProtectionRunsStats' model.
Specifies the Protection Runs statistics response.
Attributes:
num_archival_runs (long|int): Specifies the count of archival Runs.
num_backup_runs (long|int): Specifies the count of backup Runs.
num_replication_runs (long|int): Specifies the count of replication
Runs.
"""
# Create a mapping from Model property names to API property names
_names = {
"num_archival_runs":'numArchivalRuns',
"num_backup_runs":'numBackupRuns',
"num_replication_runs":'numReplicationRuns'
}
def __init__(self,
num_archival_runs=None,
num_backup_runs=None,
num_replication_runs=None):
"""Constructor for the ProtectionRunsStats class"""
# Initialize members of the class
self.num_archival_runs = num_archival_runs
self.num_backup_runs = num_backup_runs
self.num_replication_runs = num_replication_runs
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
num_archival_runs = dictionary.get('numArchivalRuns')
num_backup_runs = dictionary.get('numBackupRuns')
num_replication_runs = dictionary.get('numReplicationRuns')
# Return an object of this model
return cls(num_archival_runs,
num_backup_runs,
num_replication_runs)
|
[
"ashish@cohesity.com"
] |
ashish@cohesity.com
|
0c27d20fdc87eca7ffd5e3dd23dc7183700d8b76
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc152_a.py
|
9ff50dea04ec97c69fb7147455ba99f12c361aed
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
# https://atcoder.jp/contests/abc152/tasks/abc152_a
N, M = map(int, input().split())
if N == M:
print('Yes')
else:
print('No')
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
0fd81e0c525d6d7d7955212ffee3b926f8fce3b1
|
d039da1c0b99e2642d3c354de9faa6f427141ee3
|
/problems/leetcode/AddBinary.py
|
808381e8e1d6c8ea369dad3c843f8d22c2e756a4
|
[
"MIT"
] |
permissive
|
qicst23/pyshua
|
5a3e317823d0620d2034adfe345eddd6a722c7ff
|
4ae7bb8b626f233ebc2267024ba67dcfe49051ed
|
refs/heads/master
| 2016-09-15T20:26:16.694738
| 2013-12-15T04:50:04
| 2013-12-15T04:50:04
| 15,198,867
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
from problems.leetcode.LeetcodeProblem import LeetcodeProblem
class AddBinary(LeetcodeProblem):
def solve(self, a, b):
array = []
i = len(a) - 1
j = len(b) - 1
plusOne = 0
while i >= 0 and j >= 0:
d1 = 1 if a[i] == '1' else 0
d2 = 1 if b[j] == '1' else 0
d = d1 + d2 + plusOne
plusOne = d / 2
d %= 2
array.append(str(d))
i -= 1
j -= 1
while i >= 0:
d1 = 1 if a[i] == '1' else 0
d = d1 + plusOne
plusOne = d / 2
d %= 2
array.append(str(d))
i -= 1
while j >= 0:
d2 = 1 if b[j] == '1' else 0
d = d2 + plusOne
plusOne = d / 2
d %= 2
array.append(str(d))
j -= 1
if plusOne:
array.append('1')
array.reverse()
return ''.join(array)
def verify(self, original_input, input, s1, s2):
return s1 == s2
def input(self):
from Parser import parseTwoStrings
return parseTwoStrings(open(self.inputPath))
def output(self):
from Parser import parseString
for o in parseString(open(self.outputPath)):
yield o[0]
problem = AddBinary
|
[
"baidingding7@gmail.com"
] |
baidingding7@gmail.com
|
92d380a45dfc641f0c4dd4893b526402f12b7a81
|
8e4e612bd50302fce4c9b2496bd7fa58b7151f92
|
/docs/examples/metaflow/src/deploy.py
|
06b83b50c0ba1757b7c065fd062ee2a633d81915
|
[
"Apache-2.0"
] |
permissive
|
yaliqin/tempo
|
1b30db685adcb37d2d46c356fc3b347579654d89
|
0878ae32ed6163a1c5115f20167d991a28535364
|
refs/heads/master
| 2023-09-02T10:51:22.167955
| 2021-11-10T07:53:26
| 2021-11-10T07:53:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
import tempfile
from typing import Tuple
import numpy as np
from metaflow import FlowSpec, IncludeFile
from tempo.metaflow.utils import create_s3_folder, save_pipeline_with_conda, upload_s3_folder
from tempo.serve.model import Model
from tempo.serve.pipeline import Pipeline, PipelineModels
from tempo.serve.utils import pipeline
PipelineFolder = "classifier"
def get_tempo_artifacts(
flow_spec: FlowSpec, sklearn_model: Model, xgboost_model: Model, conda_env_path: IncludeFile
) -> Tuple[Pipeline, bool]:
classifier_local_path = tempfile.mkdtemp()
classifier_url = create_s3_folder(flow_spec, PipelineFolder)
@pipeline(
name="classifier",
uri=classifier_url,
local_folder=classifier_local_path,
models=PipelineModels(sklearn=sklearn_model, xgboost=xgboost_model),
description="A pipeline to use either an sklearn or xgboost model for Iris classification",
)
def classifier(payload: np.ndarray) -> Tuple[np.ndarray, str]:
res1 = classifier.models.sklearn(input=payload)
if res1[0] == 1:
return res1, "sklearn prediction"
else:
return classifier.models.xgboost(input=payload), "xgboost prediction"
save_pipeline_with_conda(classifier, classifier_local_path, conda_env_path)
if classifier_url:
upload_s3_folder(flow_spec, PipelineFolder, classifier_local_path)
return classifier, classifier_url != ""
|
[
"cc@seldon.io"
] |
cc@seldon.io
|
6e061aed0334c5aabbf9c797a0301cfaf8794128
|
0e806bd0081741b64e499cc5aa5160e3441faf05
|
/setup.py
|
e7bd2684c8cbd49b5e06af2799c78c537af52f41
|
[
"BSD-3-Clause"
] |
permissive
|
AngeloKandah/py4web
|
8e36f749707c807d462daca690d4284223688434
|
8fc8349f7f3d87dd3d98bd256980a9f83af40361
|
refs/heads/master
| 2023-05-28T12:35:50.647129
| 2021-06-07T23:53:18
| 2021-06-07T23:53:18
| 370,612,357
| 0
| 0
|
BSD-3-Clause
| 2021-05-25T08:01:09
| 2021-05-25T08:01:09
| null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
"""
The future of web2py
"""
import subprocess
import re
from setuptools import setup
def get_version():
regex = re.compile("__version__\s*\=\s*['\"](?P<version>.+?)['\"]")
return regex.findall(open("py4web/__init__.py").read())[0]
setup(
name="py4web",
version=get_version(),
url="https://github.com/web2py/py4web",
license="BSD",
author="Massimo Di Pierro",
author_email="massimo.dipierro@gmail.com",
maintainer="Massimo Di Pierro",
maintainer_email="massimo.dipierro@gmail.com",
description="Experimental py4web (a better web2py)",
packages=["py4web", "py4web.utils", "py4web.utils.auth_plugins"],
package_data={"py4web": ["assets/*"],},
install_requires=[
"bottle",
"click",
"gunicorn",
"gevent",
"threadsafevariable",
"pydal",
"pyjwt",
"yatl",
"tornado",
"pluralize",
"requests",
"watchgod",
],
entry_points={"console_scripts": ["py4web=py4web.core:cli"],},
zip_safe=False,
platforms="any",
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database :: Front-Ends",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
[
"massimo.dipierro@gmail.com"
] |
massimo.dipierro@gmail.com
|
af81e0db25a608e71b0508000dc02e02530b7234
|
75db8f938e8f766ad6977b813c4170490ea570c0
|
/images/img.py
|
84bd12f6906bac9e98b587f98385a64d9154a869
|
[] |
no_license
|
Nzparra/Chatbot_Haana
|
da0df1c012a969c023e13e9a495263ca68a083ed
|
7965876b68b579c0cbc248e31fe91dc35aaa0fed
|
refs/heads/main
| 2023-04-17T11:58:44.057523
| 2021-05-05T21:38:11
| 2021-05-05T21:38:11
| 364,453,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
from tkinter import *
imagelist = {
'banner': ['banner.jpg', None],
'Haanna': ['Haana.png', None],
}
def get(name):
if name in imagelist:
if imagelist[name][1] is None:
print('loading image:', name)
imagelist[name][1] = PhotoImage(file=imagelist[name][0])
return imagelist[name][1]
return None
|
[
"nzparra@gmail.com"
] |
nzparra@gmail.com
|
8c4a0a056672ebe9c3ab02af2964b3c37ab112a1
|
402cb8ac32c5ca7a53f5875688d1ebba1e96474b
|
/set103.py
|
20f04fb7293b6b0248c32679c6948d0789c03aa5
|
[] |
no_license
|
Srija-U/codekataplayer
|
c073a13d8621f641a8aba8f23ebee4e1b673d58f
|
392f24f35f178b034cfb76d2acc31bbc4b3a5814
|
refs/heads/master
| 2020-05-02T10:59:45.052802
| 2019-07-22T00:27:46
| 2019-07-22T00:27:46
| 177,914,184
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
n=int(input())
t=0
if(n%2==1):
n=n-1
t=1
l=[int(i) for i in input().split()]
r=[]
for i in range(n):
if(i%2==0):
r.append(l[i+1])
else:
r.append(l[i-1])
if(t==1):
r.append(l[n])
print(sep=" ",*r)
|
[
"noreply@github.com"
] |
Srija-U.noreply@github.com
|
018cb05750fc80d8c67df193536f4cdb378257ce
|
4d327de5447519d3c00e6572f74362380783006f
|
/source/res/scripts/client/gui/impl/windows/__init__.py
|
62086e5e820d17c53b13514fc18efc2662b63020
|
[] |
no_license
|
XFreyaX/WorldOfTanks-Decompiled
|
706ac55d919b766aa89f90c97a75672bf2142611
|
5025466edd0dd3e5e50a6c60feb02ae793f6adac
|
refs/heads/master
| 2021-09-21T15:10:32.655452
| 2018-08-28T07:34:00
| 2018-08-28T07:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/windows/__init__.py
from frameworks.wulf import WindowFlags
from gui.impl.windows.content_menu_id import ContextMenuID
from gui.impl.windows.context_menu_window import ContextMenuContent, ContextMenuWindow
from gui.impl.windows.popup_window import PopUpWindow
from gui.impl.windows.main_window import MainWindow
from gui.impl.windows.service_window import ServiceWindow
from gui.impl.windows.standard_window import StandardWindow
from gui.impl.windows.tooltip_window import SimpleToolTipWindow, ToolTipWindow
from gui.impl.windows.window_view import WindowView
class UserWindowFlags(WindowFlags):
LOBBY_MAIN_WND = WindowFlags.MAIN_WINDOW | 65536
BATTLE_MAIN_WND = WindowFlags.MAIN_WINDOW | 131072
USER_TYPES_MASK = WindowFlags.WINDOW_TYPE_MASK | 983040
__all__ = ('ContextMenuID', 'ContextMenuContent', 'ContextMenuWindow', 'MainWindow', 'ServiceWindow', 'StandardWindow', 'SimpleToolTipWindow', 'ToolTipWindow', 'PopUpWindow', 'WindowView')
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
3cea620947c202587408de931f65151901e7d471
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/106/usersdata/191/51970/submittedfiles/questao2.py
|
dc6a45115c8ab2373b6df6e309d2ac7593d76b60
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
# -*- coding: utf-8 -*-
ap1=int(input('digite o primeiro numero apostado:'))
ap2=int(input('digite o segundo numero apostado:'))
ap3=int(input('digite o terceiro numero apostado:'))
ap4=int(input('digite o quarto numero apostado:'))
ap5=int(input('digite o quinto numero apostado:'))
ap6=int(input('digite o sexto numero apostado:'))
s1=int(input('digite o primeiro numero sorteado:'))
s2=int(input('digite o segundo numero sorteado:'))
s3=int(input('digite o terceiro numero sorteado:'))
s4=int(input('digite o quarto numero sorteado:'))
s5=int(input('digite o quintonumero sorteado:'))
s6=int(input('digite o sexto numero sorteado:'))
if (ap1==s1 or ap1==s2 or ap1==s3 or ap1==s4 or ap1==s5 or ap1==s6) and (ap2==s1 or ap2==s2 or ap2==s3 or ap2==s4 or ap2==s5 or ap2==s6) and (ap3==s1 or ap3==s2 or ap3==s3 or ap3==s4 or ap3==s5 or ap3==s6):
print(terna)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4ccdd823740fe9f5733cbda40c7455622cb8a1b9
|
4908b1d34d69c1cb652f25049552562574e1075f
|
/2020/Day-21/Allergen_Assessment/solve_1.py
|
bc99cdd01a13e1505ee0f04c259e5b8bf69fda85
|
[
"MIT"
] |
permissive
|
sreekesari-vangeepuram/adventofcode
|
3d4ad98a25a30640182d928538b421e00ad8259d
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
refs/heads/main
| 2023-07-26T13:36:03.036721
| 2021-08-11T08:27:25
| 2021-08-11T08:27:25
| 317,850,039
| 1
| 0
|
MIT
| 2021-08-11T08:27:26
| 2020-12-02T12:08:13
|
Go
|
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
#!/usr/bin/env python
def pair_up(ingredients_list):
buff_dict = dict()
all_ingredients = list()
for row in ingredients_list:
ingredients, allergens = row.replace(")", "").split(" (contains ")
ingredients, allergens = set(ingredients.split()), set(allergen.strip() for allergen in allergens.split(","))
all_ingredients += list(ingredients)
for allergen in set(allergens):
buff_dict[allergen] = buff_dict.get(allergen, ingredients).intersection(ingredients)
return buff_dict, all_ingredients
ingredients_list = open("input.txt").read().strip().split("\n")
pairs, all_ingredients = pair_up(ingredients_list)
verified_allergens, verified_ingredients = set(), set()
while len(pairs.keys()) != 0:
for allergen, ingredients in pairs.items():
if len(ingredients) == 1:
verified_allergens.add(allergen)
verified_ingredients.add(ingredients.pop())
else:
pairs[allergen] = ingredients - verified_ingredients
for allergen in verified_allergens:
if allergen in pairs.keys():
_ = pairs.pop(allergen)
unmatched_ingredients = set(all_ingredients) - verified_ingredients
appearances = sum(all_ingredients.count(ingredient) for ingredient in unmatched_ingredients)
print(f"Count of the [duplicate] unmatched ingredinets: {appearances}")
|
[
"kesari.vangeepuram@gmail.com"
] |
kesari.vangeepuram@gmail.com
|
d0f36bff5a8e9441f03620fa0d8be3b18a40d2c2
|
79f42fd0de70f0fea931af610faeca3205fd54d4
|
/base_lib/ChartDirector/pythondemo/shadowpie.py
|
c14b2d3667d78add97f394492cf2a5f7860ad9dc
|
[
"IJG"
] |
permissive
|
fanwen390922198/ceph_pressure_test
|
a900a6dc20473ae3ff1241188ed012d22de2eace
|
b6a5b6d324e935915090e791d9722d921f659b26
|
refs/heads/main
| 2021-08-27T16:26:57.500359
| 2021-06-02T05:18:39
| 2021-06-02T05:18:39
| 115,672,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
#!/usr/bin/python
from pychartdir import *
def createChart(chartIndex) :
# the tilt angle of the pie
angle = chartIndex * 90 + 45
# The data for the pie chart
data = [25, 18, 15, 12, 8, 30, 35]
# Create a PieChart object of size 110 x 110 pixels
c = PieChart(110, 110)
# Set the center of the pie at (50, 55) and the radius to 36 pixels
c.setPieSize(55, 55, 36)
# Set the depth, tilt angle and 3D mode of the 3D pie (-1 means auto depth, "true" means the 3D
# effect is in shadow mode)
c.set3D(-1, angle, 1)
# Add a title showing the shadow angle
c.addTitle("Shadow @ %s deg" % (angle), "arial.ttf", 8)
# Set the pie data
c.setData(data)
# Disable the sector labels by setting the color to Transparent
c.setLabelStyle("", 8, Transparent)
# Output the chart
c.makeChart("shadowpie%s.png" % chartIndex)
createChart(0)
createChart(1)
createChart(2)
createChart(3)
|
[
"fanwen@sscc.com"
] |
fanwen@sscc.com
|
0b581dfd0400c5c54324568a3983a2c3fb21fe1e
|
6d8d05e6fce7ff4a6b58c4ab021ea605e8d00878
|
/PDF/urls.py
|
2a48ebda94a510517922c077bbd612edf326d994
|
[] |
no_license
|
joy1954islam/How-to-create-PDF-files-in-a-Django-project
|
727cfd758123392b37d4f7e625c58901d0b7ef9b
|
442a438536ce290009d3bf2559c7bcdfef4cefbf
|
refs/heads/main
| 2022-12-29T17:18:52.812433
| 2020-10-14T18:07:59
| 2020-10-14T18:07:59
| 304,097,011
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
"""PDF URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from PDFApps import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.PDFLISTVIEW.as_view(),name='pdf_list_view'),
path('view/<int:pk>/',views.render_pdf_view,name='pdf_view'),
path('create/',views.PDFCreate.as_view(),name='create'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"joyislam1954@gmail.com"
] |
joyislam1954@gmail.com
|
3969ff07af1dcbc0f0f9d92c21c339379ec8c241
|
fbb141c9b99c4c08ce2c0acfe13630d694d98744
|
/Week_02-Hash&stack&queue/08_nTreeLevelorder.py
|
885576cab7b3f2b497b3db36a89c5a61006c158a
|
[] |
no_license
|
huixian3/algorithm017
|
1534bc8a0364595b056e0f346cfe9fa8b8fee3bd
|
f43c99dc7810de863f8cd79115e272ac65ce9257
|
refs/heads/master
| 2023-04-02T07:10:03.670003
| 2021-04-13T14:38:36
| 2021-04-13T14:38:36
| 297,989,771
| 0
| 0
| null | 2020-09-23T14:05:41
| 2020-09-23T14:05:40
| null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
#!/usr/bin/python3
# coding=utf-8
# TODO
# BFS 使用队列实现 queue deque
# 栈 用于深度优先搜索
import collections
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
# 77.9% 递归:保存每个节点的level,每层的顺序保持就可以
def levelSave(node, level):
if len(result) == level:
result.append([])
result[level].append(node.val)
for child in node.children:
levelSave(child, level+1)
if not root:
return list()
result = []
levelSave(root, 0)
return result
# 77.9% BFS 逐层遍历
# if not root:
# return list()
# d = collections.deque()
# res = []
# d.append(root)
# while d:
# r = []
# for i in range(len(d)):
# node = d.popleft()
# r.append(node.val)
# for child in node.children:
# d.append(child)
# res.append(r)
# return res
# BFS PYTHON代码简化
d, res = [root] if root else [], []
while d:
res.append([node.val for node in d])
d = [child for node in d for child in node.children]
return res
|
[
"zhanhuixian@meituan.com"
] |
zhanhuixian@meituan.com
|
352d33ea6c330f082f7b8af1d5d5a548eca76fd4
|
9b2bb0c822a2d637354c92eea8dddbdbbfea89d2
|
/Generic/common/registration/api/signup_completion/apidocumentation_signup_completion.py
|
c16f8bf0315cf8af42f9934bb40848107e28edbb
|
[] |
no_license
|
archiemb303/common_backend_django
|
69d299c9bc564ef520b9c9130e7f5abd7ff68306
|
36eb9931f330e64902354c6fc471be2adf4b7049
|
refs/heads/master
| 2023-06-26T19:25:27.355021
| 2021-07-24T06:23:49
| 2021-07-24T06:23:49
| 389,017,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,123
|
py
|
# Sample Input:
# {
# "APIDetails":{
# "token_type":1,
# "token_vendor_id":1,
# "token_string":"sdxfcgvbhjnmklasdfghjk",
# "dev_key":"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234"
# },
# "APIParams": {
# "first_name":"raj",
# "last_name":"raj",
# "email_id":"raj111@mailinator.com",
# "activation_key": "d36Ej82HhRWjqIUi9baKOm4MA3gy0KLb",
# "password": "Sac$2045",
# "sex": "male",
# "date_of_birth": "1985-07-04",
# "orientation": "straight",
# "city_id":1
#
#
# }
# }
# Sample Output:
# {
# "AuthenticationDetails": {
# "Status": "Success",
# "Message": "ApiDetails fine to process"
# },
# "Payload": {
# "Status": "Success",
# "Message": "Congratulations, you areregistered successfully with genericbackend",
# "Payload": {
# "profile_id": 145,
# "first_name": "raj",
# "last_name": "raj",
# "sex": "male",
# "date_of_birth": "1985-07-04",
# "orientation": "straight",
# "web_profile_key": "6DX5SFX9mFpkRBpkSBAPPux3C4UmF2rp",
# "android_app_profile_key": "6DX5SFX9mFpkRBpkSBAPPux3C4UmF2rp",
# "ios_app_profile_key": "6DX5SFX9mFpkRBpkSBAPPux3C4UmF2rp",
# "global_profile_key": "6DX5SFX9mFpkRBpkSBAPPux3C4UmF2rp",
# "added_date": "2020-05-30T19:09:42.607003Z",
# "added_by": "EV_raj111@mailinator.com",
# "last_modified_date": "2020-05-30T19:09:42.607003Z",
# "last_modified_by": "EV_raj111@mailinator.com",
# "city_id_id": 1,
# "dp_flag_id": 1,
# "profile_status_id": 1,
# "profile_completion_status_id": 1
# }
# }
# }
#Sample Failed Output:
# {
# "AuthenticationDetails": {
# "Status": "Success",
# "Message": "ApiDetails fine to process"
# },
# "Payload": {
# "Status": "Failure",
# "Message": "invalid activation key",
# "Payload": null
# }
# }
|
[
"archiemb303@gmail.com"
] |
archiemb303@gmail.com
|
8078afef56368c393a9ce27d6c53b841545144d9
|
cad9c13ad5864317d7687b44f39db42a402f36f0
|
/lec05_module/module04.py
|
96b6933788465ef6d16690f7b9e549ba9a7b5e99
|
[] |
no_license
|
handaeho/lab_python
|
12b686eb0d57358509f2d0cd607064deced5b25d
|
da068ea62682ffa70c7d23dde4ef132c49a81364
|
refs/heads/master
| 2020-11-26T08:22:27.656109
| 2020-04-13T02:28:47
| 2020-04-13T02:28:47
| 229,013,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
"""
module04.py
utils 패키지 안의 mymath01, mymath02 모듈을 사용하고자 한다.
"""
# from 패키지 import 모듈
from utils import mymath01
from utils import mymath02
print(mymath01.pi)
print(mymath02.div(10, 20))
|
[
"mrdh94@naver.com"
] |
mrdh94@naver.com
|
0370295f0a04bef959d0c9e07c0a941f55f712d2
|
0c6dd99ecc2c3228ed9d47f30b26b4ef6b207b67
|
/integration_tests/conftest.py
|
01fbb282c4bb6499015e574d83d96c6d67f630bf
|
[
"BSD-3-Clause"
] |
permissive
|
yaelmi3/backslash
|
68d14bbca63d2fe7c160418768d5573f92dcdfb5
|
3d3a10c07a01a8a3a1214a85ace70566b10697a2
|
refs/heads/master
| 2020-12-28T22:47:13.782026
| 2019-08-06T10:35:37
| 2019-08-06T10:35:37
| 245,207,397
| 0
| 0
|
NOASSERTION
| 2020-03-05T16:02:12
| 2020-03-05T16:02:11
| null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
import json
import time
import subprocess
import requests
import pytest
from urlobject import URLObject
_docker_running = False
def pytest_addoption(parser):
parser.addoption(
"--app-url", action="store", default=None, help="Integration App URL"
)
parser.addoption("--admin-username", action="store", default="admin@localhost")
parser.addoption("--admin-password", action="store", default="12345678")
@pytest.fixture(autouse=True, scope="session")
def cleanup_docker(request):
@request.addfinalizer
def cleanup():
if _docker_running:
_stop_docker()
@pytest.fixture(scope="session")
def integration_url(request, timeout=30):
url = request.config.getoption("--app-url")
if url is None:
raise RuntimeError("No integration URL provided")
end_time = time.time() + timeout
retry = 0
while time.time() < end_time:
retry += 1
if retry > 0:
time.sleep(3)
try:
resp = requests.get(url)
except requests.RequestException:
continue
if resp.ok:
returned = URLObject(url)
_do_setup_if_needed(returned)
return returned
raise RuntimeError(f"URl {url} did not become available in time")
def _do_setup_if_needed(url):
with requests.Session() as s:
s.headers.update({"Content-type": "application/json"})
if s.post(url.add_path("api/get_app_config"), data="{}").json()["result"][
"setup_needed"
]:
resp = s.post(
url.add_path("api/setup"),
data=json.dumps(
{
"config": {
"admin_user_email": "admin@localhost",
"admin_user_password": "12345678",
}
}
),
)
resp.raise_for_status()
def _start_docker():
global _docker_running
if _docker_running:
return
_docker_running = True
_run_docker_compose("build")
_run_docker_compose("up -d")
_docker_running = True
def _stop_docker():
global _docker_running
_run_docker_compose("down")
_docker_running = False
def _run_docker_compose(cmd):
subprocess.run(
f"docker-compose -f docker/docker-compose.yml -f docker/docker-compose-testing-override.yml -p backslash-testing {cmd}",
shell=True,
check=True,
)
|
[
"vmalloc@gmail.com"
] |
vmalloc@gmail.com
|
ba1d47b84928ddee170164408faa078f1f1dd689
|
3d7a3cb5044ad1334353fd06e6c4d8aa0990de89
|
/tests/test_templatetags.py
|
f88ecbfa12a27294518006f9316519055bbfb499
|
[
"MIT"
] |
permissive
|
Convious/django-concurrency
|
ed46faf91e54be58d7ed8e030b764d9537bed240
|
815230336aa173bd73df1f411a77434944958c39
|
refs/heads/develop
| 2023-05-11T07:10:46.448831
| 2019-08-27T17:29:39
| 2019-08-27T17:29:39
| 225,925,608
| 0
| 0
|
MIT
| 2023-05-02T05:10:32
| 2019-12-04T17:53:48
| null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pytest
from demo.models import SimpleConcurrentModel
from concurrency.templatetags.concurrency import identity, is_version, version
logger = logging.getLogger(__name__)
@pytest.fixture
def obj():
return SimpleConcurrentModel.objects.create()
@pytest.mark.django_db
def test_identity(obj):
assert identity(obj).split(',') == [str(obj.pk), str(obj.version)]
@pytest.mark.django_db
def test_version(obj):
assert version(obj) == obj.version
@pytest.mark.django_db
def test_is_version(obj):
assert is_version(obj._concurrencymeta.field)
|
[
"s.apostolico@gmail.com"
] |
s.apostolico@gmail.com
|
9d1001b8abd5c5a9bd0ccdf3d5411d67d97ac9cc
|
61a21ed2dcdfe9a43588c5582eea38ce8fdfcbf2
|
/akshare/bond/bond_futures.py
|
404381fb3be49c7c36c462965ae3ccc93718a333
|
[
"MIT"
] |
permissive
|
huanghyw/akshare
|
44187c6c56872d499651bb62c178ee837c776388
|
ed84e937773c0420cc003793d74b73e64223e08b
|
refs/heads/master
| 2023-04-22T07:06:08.929307
| 2021-05-02T16:05:59
| 2021-05-02T16:05:59
| 319,346,216
| 13
| 5
|
MIT
| 2021-05-02T16:05:59
| 2020-12-07T14:32:08
| null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/10/10 13:42
Desc: 国债期货可交割券相关指标
http://www.csindex.com.cn/zh-CN/bond-valuation/bond-futures-deliverable-coupons-related-indicators?date=2020-09-22
"""
import pandas as pd
import requests
def bond_futures_deliverable_coupons(trade_date: str = "2020-09-23") -> pd.DataFrame:
"""
国债期货可交割券相关指标
http://www.csindex.com.cn/zh-CN/bond-valuation/bond-futures-deliverable-coupons-related-indicators
:param trade_date: 交易日
:type trade_date: str
:return: 国债期货可交割券相关指标
:rtype: pandas.DataFrame
"""
url = "http://www.csindex.com.cn/zh-CN/bond-valuation/bond-futures-deliverable-coupons-related-indicators"
params = {
"date": trade_date
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text)[0]
return temp_df
if __name__ == '__main__':
bond_futures_deliverable_coupons_df = bond_futures_deliverable_coupons(trade_date="2020-09-22")
print(bond_futures_deliverable_coupons_df)
|
[
"jindaxiang@163.com"
] |
jindaxiang@163.com
|
99f34fa74eaf2e0f18522bb675fe078774e0b38b
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/repos/simpleapi-master/example_project/client/python/flask/testclient.py
|
467379407d15db6ef518a0bf3549f60eed722cb8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632
| 2021-02-04T09:08:40
| 2021-02-04T09:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# -*- coding: utf-8 -*-
import sys
sys.path.append("../../../../")
from simpleapi.client import Client, RemoteException
calculator = Client(ns='http://localhost:5000/api/',
transport_type='xml', timeout=60)
print "5 + 5 =", calculator.add(a=5, b=16)
|
[
"jinxufang@tencent.com"
] |
jinxufang@tencent.com
|
74db1bd87aa3d7a2b8f7237fe22f7e0d6381601d
|
f1d9df04036fc43c9e5cc7998b83261f4daa94b8
|
/tests/base_test_case.py
|
618b5e7cad75c6d2d82d4d926a30004158dacbc8
|
[] |
no_license
|
Eaterator/web
|
019eb6547995be30b3468e5c44ecc52f05858fb4
|
9c598607f76ad770c66d85c47ffcec05f92f4d66
|
refs/heads/master
| 2021-01-09T20:30:13.417308
| 2017-04-25T02:44:35
| 2017-04-25T02:44:35
| 81,286,177
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
from datetime import datetime
import json
import random
from application.app import create_app
from application.auth.models import Role, User
from application.auth.auth_utilities import PasswordUtilities, ACCESS_TOKEN_HEADER
from application.recipe.models import Ingredient, IngredientRecipe, Recipe
from application.auth.roles import ROLES
random.seed(1000)
class BaseTempDBTestCase:
"""
Creates helper functions to create fresh DB instance between tests, and helper function to populate
necessary data for tests to avoid mock responses to try to imitate real responses in production.
"""
def setUpDB(self):
from tests.tsting_config import SQLALCHEMY_DATABASE_URI
from application import config
config.SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI
self.app = create_app(app_config=config)
from application.base_models import db
self.db = db
with self.app.app_context():
self.db.session.remove()
self.db.session.close()
self.db.drop_all()
self.db.create_all()
self.roles = self.ingredients = self.recipes = None
self.test_client = self.app.test_client()
def create_recipes(self):
self.recipes = []
with self.app.app_context():
for recipe in RECIPES:
new_recipe = Recipe(**recipe)
self.db.session.add(new_recipe)
self.recipes.append(new_recipe)
self.db.session.commit()
def create_ingredients(self):
with self.app.app_context():
self.ingredients = []
for ingredient in INGREDIENTS:
new_ingredient = Ingredient(name=ingredient)
self.db.session.add(new_ingredient)
self.ingredients.append(new_ingredient)
self.db.session.commit()
def create_recipe_ingredients(self):
with self.app.app_context():
num_ingredients = 0
for recipe in self.recipes:
if num_ingredients < 5:
num_ingredients += 1
for ingredient in random.sample(self.ingredients, num_ingredients):
new_ingredient_recipe = IngredientRecipe(
ingredient=ingredient.pk,
recipe=recipe.pk
)
self.db.session.add(new_ingredient_recipe)
self.db.session.commit()
def create_roles(self):
with self.app.app_context():
self.roles = {}
for role in ROLES:
self.roles[role["name"]] = Role(**role)
self.db.session.add(self.roles[role["name"]])
self.db.session.commit()
def create_user(self, user_payload, role):
with self.app.app_context():
user = User(**user_payload)
user.password = PasswordUtilities.generate_password(user.password)
user.role = role.pk
self.db.session.add(user)
self.db.session.commit()
def create_regular_user(self):
with self.app.app_context():
if not self.roles:
self.create_roles()
self.create_user(TEST_REGULAR_USER, self.roles["regular"])
return TEST_REGULAR_USER
def create_business_user(self):
with self.app.app_context():
if not self.roles:
self.create_roles()
self.create_user(TEST_BUSINESS_USER, self.roles['corporate'])
return TEST_BUSINESS_USER
def create_admin_user(self):
with self.app.app_context():
if not self.roles:
self.create_roles()
self.create_user(TEST_ADMIN_USER, self.roles['admin'])
return TEST_ADMIN_USER
def get_jwt_token(self, user):
resp = self.test_client.post('/auth/',
data=json.dumps({
"username": user["username"],
"password": user["password"]
}),
content_type="application/json"
)
return json.loads(resp.data.decode('utf-8')).get(ACCESS_TOKEN_HEADER), resp
def tearDownDB(self):
with self.app.app_context():
self.db.session.remove()
self.db.session.close()
self.db.drop_all()
INGREDIENTS = ["chicken", "potato", "pepper", "onion", "carrot", "celery", "beef", "pork"]
RECIPES = [
{"title": "chicken with onions"},
{"title": "chicken with peppers"},
{"title": "chicken with potato"},
{"title": "chicken with potato and peppers"},
{"title": "onion with peppers and onion"},
{"title": "peppers with potato and onion"},
{"title": "chicken with onions"},
{"title": "chicken with peppers"},
{"title": "chicken with potato"},
{"title": "chicken with potato and peppers"},
{"title": "onion with peppers and onion"},
{"title": "peppers with potato and onion"},
]
TEST_REGISTER_USER_PAYLOAD = dict(
username="testuser@123.com",
password="TestUser123!",
confirm="TestUser123!",
first_name="test",
last_name="user",
date_of_birth=datetime(1991, 1, 1)
)
TEST_REGULAR_USER = dict(
username="testregularuser@123.com",
password="TestUser123!",
first_name="test",
last_name="user",
date_of_birth=datetime(1991, 1, 1)
)
TEST_BUSINESS_USER = dict(
username="testbusinessuser@123.com",
password="TestUser123!",
first_name="test",
last_name="user",
date_of_birth=datetime(1991, 1, 1)
)
TEST_ADMIN_USER = dict(
username="testadminuser@123.com",
password="TestUser123!",
first_name="test",
last_name="user",
date_of_birth=datetime(1991, 1, 1)
)
|
[
"currahl@yahoo.ca"
] |
currahl@yahoo.ca
|
c40a4d51b0c5e6ad6b99005c2341a3eccd0a3b90
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetCode/111 Minimum Depth of Binary Tree.py
|
f77921ed53bd989cfd4ee577d98dec3b06404713
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 875
|
py
|
"""
Given a binary tree, find its minimum depth.
The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
"""
__author__ = 'Danyang'
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def minDepth(self, root):
"""
:param root: TreeNode
:return: integer
"""
return self.fathom(root, 0)
def fathom(self, root, depth):
"""
DFS
"""
if not root: return depth
elif root.right and not root.left: return self.fathom(root.right, depth+1)
elif root.left and not root.right: return self.fathom(root.left, depth+1)
else: return min(self.fathom(root.left, depth+1),
self.fathom(root.right, depth+1))
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
efd5603bda7dcc385b754567a05f493a43bc8d0b
|
ed48e992ad5fbb579afae6d0f7e6df775f8d306e
|
/lib/util.py
|
d5df4f598dd4accf11d107af1830400849d1696e
|
[] |
no_license
|
sterlingbaldwin/e3sm_to_cmip
|
6a23b92145c042af16979ff67b17555c3a9222e8
|
bc5bcfaad5901eb6f07ab450eeab20144f4029cb
|
refs/heads/master
| 2020-03-12T12:08:30.743456
| 2018-04-25T23:27:47
| 2018-04-25T23:27:47
| 130,611,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
import sys
import traceback
def format_debug(e):
"""
Return a string of an exceptions relavent information
"""
_, _, tb = sys.exc_info()
return """
1: {doc}
2: {exec_info}
3: {exec_0}
4: {exec_1}
5: {lineno}
6: {stack}
""".format(
doc=e.__doc__,
exec_info=sys.exc_info(),
exec_0=sys.exc_info()[0],
exec_1=sys.exc_info()[1],
lineno=traceback.tb_lineno(sys.exc_info()[2]),
stack=traceback.print_tb(tb))
|
[
"baldwin32@llnl.gov"
] |
baldwin32@llnl.gov
|
f8108f1761c2e56a4f7449a8e629de145543dded
|
e3f5f41b242650b4bef68aa191a5779aedd3e02e
|
/Chapter03/config.py
|
d12aeca3649c2d96b68e8b32d23a06c74a03f285
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Flask-Web-Development-Second-Edition
|
d4675c047bb51b0154958205f53c962ab4d32e4c
|
c3174127b40f8af1e2ab5e614994ffed7acbc11b
|
refs/heads/master
| 2023-05-11T00:23:30.213655
| 2023-01-18T09:14:14
| 2023-01-18T09:14:14
| 154,667,293
| 168
| 131
|
MIT
| 2023-05-01T20:52:13
| 2018-10-25T12:30:58
|
Python
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
class Config(object):
POSTS_PER_PAGE = 10
class ProdConfig(Config):
SECRET_KEY = '\xcb\xd7\x8a.\x82\x9c1Lu\xf1&2\xf6i\xfa\x8e\xb1\xc9t^\xccW\xdbw'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///database.db'
class DevConfig(Config):
DEBUG = True
SECRET_KEY = '\xa8\xcc\xeaP+\xb3\xe8 |\xad\xdb\xea\xd0\xd4\xe8\xac\xee\xfaW\x072@O3'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///database.db'
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
ccf8ee337ea275a691e6d35ae32738e200fead01
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/ZH3l/STXS_nanoAOD/v5/Full2018nano_STXS_1p1/aliases.py
|
303e9601b0371fd3ca5cf4bb282c6e5665853816
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
#Aliases (mostly btag)
mc = [skey for skey in samples if skey not in ('Fake', 'DATA')]
#2017
#bWP = '0.1522' #Loose
bWP = '0.4941'
aliases['bVeto'] = {
'expr': '(Sum$( CleanJet_pt > 20.0 && Jet_btagDeepB[CleanJet_jetIdx] > '+bWP+' ) == 0)'
}
aliases['dphilmet_mme'] = {
'expr': '( (abs(PuppiMET_phi-Lepton_phi[]) > 3.14159)*(abs(PuppiMET_phi-Lepton_phi[]) - 3.14159) + (abs(PuppiMET_phi-Lepton_phi[]) < 3.14159)*abs(PuppiMET_phi-Lepton_phi[]))*(abs(Lepton_pdgId[]) == 11 && abs(Lepton_pdgId[0] * Lepton_pdgId[1] * Lepton_pdgId[2]) == 13*13*11) + -999*(abs(Lepton_pdgId[]) != 11 || abs(Lepton_pdgId[0] * Lepton_pdgId[1] * Lepton_pdgId[2]) != 13*13*11)'
}
aliases['pt_e_mme'] = {
'expr': '(Lepton_pt[])*(abs(Lepton_pdgId[]) == 11 && abs(Lepton_pdgId[0] * Lepton_pdgId[1] * Lepton_pdgId[2]) == 13*13*11) + -999*(abs(Lepton_pdgId[]) != 11 || abs(Lepton_pdgId[0] * Lepton_pdgId[1] * Lepton_pdgId[2]) != 13*13*11)'
}
# Temporary patch for BTV postprocessor bug (no SF for eta < 0, <= 102X_nAODv5_Full2018v5)
#2017
btagSFSource = '%s/src/PhysicsTools/NanoAODTools/data/btagSF/DeepCSV_94XSF_V2_B_F.csv' % os.getenv('CMSSW_BASE')
aliases['Jet_btagSF_shapeFix'] = {
'linesToAdd': [
'gSystem->Load("libCondFormatsBTauObjects.so");',
'gSystem->Load("libCondToolsBTau.so");',
'gSystem->AddIncludePath("-I%s/src");' % os.getenv('CMSSW_RELEASE_BASE'),
'.L %s/src/PlotsConfigurations/Configurations/patches/btagsfpatch.cc+' % os.getenv('CMSSW_BASE')
],
'class': 'BtagSF',
'args': (btagSFSource,),
'samples': mc
}
aliases['btagSF'] = {
'expr': '( TMath::Exp(Sum$( TMath::Log( (CleanJet_pt>20 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shapeFix[CleanJet_jetIdx]+1*(CleanJet_pt<20 || abs(CleanJet_eta)>2.5) ) ) ) )',
'samples': mc
}
systs = ['jes','lf','hf','lfstats1','lfstats2','hfstats1','hfstats2','cferr1','cferr2']
for s in systs:
aliases['Jet_btagSF_shapeFix_up_%s' % s] = {
'class': 'BtagSF',
'args': (btagSFSource, 'up_' + s),
'samples': mc
}
aliases['Jet_btagSF_shapeFix_down_%s' % s] = {
'class': 'BtagSF',
'args': (btagSFSource, 'down_' + s),
'samples': mc
}
aliases['btagSF'+s+'up'] = {
'expr': aliases['btagSF']['expr'].replace('shapeFix','shapeFix_up_'+s),
'samples':mc
}
aliases['btagSF'+s+'down'] = {
'expr': aliases['btagSF']['expr'].replace('shapeFix','shapeFix_down_'+s),
'samples':mc
}
aliases['EleWPTight'] = {
'expr' : '(abs(Lepton_pdgId[0])==13 || Electron_cutBased[Lepton_electronIdx[0]]>=4) \
&& (abs(Lepton_pdgId[1])==13 || Electron_cutBased[Lepton_electronIdx[1]]>=4) \
&& (abs(Lepton_pdgId[2])==13 || Electron_cutBased[Lepton_electronIdx[2]]>=4)',
}
aliases['genZPt'] = {
'expr': 'Sum$(GenPart_pt*(abs(GenPart_pdgId)==23&&((GenPart_statusFlags&8192)==8192)))',
'samples' : mc
}
|
[
"Susan.Dittmer@cern.ch"
] |
Susan.Dittmer@cern.ch
|
0f549c7b98395f5a504787ed41be7b2cb7f15398
|
3d5bcd57b893c95bbcbfafe77bbc33c65432c9ed
|
/Algorithms/LeetCode/L1268suggestedProducts.py
|
6022d3b1d95efdd64998c9ee172cc452f33506fb
|
[] |
no_license
|
arunachalamev/PythonProgramming
|
c160f34c7cb90e82cd0d4762ff9dcb4abadf9c1c
|
ea188aaa1b72511aeb769a2829055d0aae55e73e
|
refs/heads/master
| 2021-06-04T03:50:37.976293
| 2020-11-12T19:52:28
| 2020-11-12T19:52:28
| 97,364,002
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# Given an array of strings products and a string searchWord. We want to design a system that suggests
# at most three product names from products after each character of searchWord is typed.
# Suggested products should have common prefix with the searchWord. If there are more than three products
# with a common prefix return the three lexicographically minimums products.
# Return list of lists of the suggested products after each character of searchWord is typed.
def suggestedProducts(products,searchWord):
products.sort()
temp = ""
output = list()
for char in searchWord:
temp = temp + char
result = [k for k in products if k.startswith(temp)]
if len(result) > 3:
output.append(result[:3])
else:
output.append(result)
return output
print (suggestedProducts(["mobile","mouse","moneypot","monitor","mousepad"],"mouse"))
print (suggestedProducts(["havana"],"havana"))
print (suggestedProducts(["havana"],"titanic"))
|
[
"arunachalamev@gmail.com"
] |
arunachalamev@gmail.com
|
a3a3f127f0d5d5d6cd29ffc6073cb8100e216345
|
976f270299c39d9c1c20a3ac3022ac1a32fc2f68
|
/project/helper.py
|
d4455cd0799708cf80401e7ac4100741675059e2
|
[
"Apache-2.0"
] |
permissive
|
yazici/starthinker
|
958bfe35e4a8a422c7f4146c8eb36de05c2e6761
|
bdbac52ee57add39f71c37e599fbf5eb03782e20
|
refs/heads/master
| 2020-04-29T21:23:36.852893
| 2019-03-11T07:31:24
| 2019-03-11T07:31:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
###########################################################################
#
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""Evaluate the validity of a json file. Helps in debugging recipes.
Print the line and character position of any errors in the given json file.
Arguments
file - path to JSON file to be evaluated
Example
python project/helper.py project/sample.json
"""
import argparse
from starthinker.util.project import get_project
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', help='A JSON file.')
parser.add_argument('--debug', '-d', help='Debug mode, do not scrub newlines.', action='store_true')
args = parser.parse_args()
try:
project = get_project(args.file, debug=args.debug)
print 'JSON OK:', args.file
except Exception, e:
print 'JSON ERROR:', args.file, str(e)
|
[
"kenjora@kenjora-macbookair.roam.corp.google.com"
] |
kenjora@kenjora-macbookair.roam.corp.google.com
|
79bf0be94707c72897e7b2b72d58a43560d61f7d
|
5708bbb9da243bec789a3ddff394e12cf89c956e
|
/tests/write_separate_files.py
|
58e04e16985c97f39e36590ecec220a4b00e12f5
|
[] |
no_license
|
webclinic017/CacheFS
|
39d9f2898ab3f9cf3dc0d1dd64a4a323be8fe09d
|
8a50cfe0301e1938753817138411dcc4b0a68bcd
|
refs/heads/master
| 2023-03-14T20:14:00.018678
| 2020-08-04T12:48:47
| 2020-08-04T12:48:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import sys
import os
import shutil
num = int(sys.argv[1])
assert 0<num<20
port = 1234 + num
path = '/data/cache%d/yxr' % num
if not os.path.isdir(path):
os.makedirs(path)
def app(env, start_response):
name = path + env['PATH_INFO']
with open(name, 'w') as f:
shutil.copyfileobj(env['wsgi.input'], f)
start_response('200 ok', [('Content-Length', str(len(name)))])
return name
import bjoern
bjoern.run(app, '0.0.0.0', port)
|
[
"617080352@qq.com"
] |
617080352@qq.com
|
75efac61085d8599a4694b34ba6e566b551e5886
|
6773e281d2000faf724713571a326fe5440acce2
|
/phone/migrations/0002_auto_20210320_2137.py
|
fff685720e89d0996234dbaf8a556e1cac735d1a
|
[] |
no_license
|
abhisheksahu92/Phone-Directory
|
4a16f69504e5153c29fbeceaad0c4a7e19f72c1c
|
87ed73c015caf3c05364e02b6364756d2e1f87a4
|
refs/heads/main
| 2023-04-20T09:56:56.697106
| 2021-05-10T12:35:10
| 2021-05-10T12:35:10
| 349,765,236
| 0
| 0
| null | 2021-05-10T12:33:51
| 2021-03-20T15:37:39
|
HTML
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
# Generated by Django 3.1.7 on 2021-03-20 16:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('phone', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='phonemodel',
name='middle_name',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
|
[
"70888785+abhisheksahu92@users.noreply.github.com"
] |
70888785+abhisheksahu92@users.noreply.github.com
|
dd68c7efbbedecd20d7e2eff84b6e32ac1718b24
|
1065a2782e4947b5bf14ec4536e4ad7addc7aec3
|
/strategy/crystalball/cbparameters.py
|
8c14b84a9ff8bc41f5a523fbe7de690157eb87d1
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Johannesduvenage/siis
|
0bf6875d4a5f3638cadb01ed5541aab29ba1d77a
|
57e537cf9b6a71c8ad0b3bb0759772d126496a17
|
refs/heads/master
| 2020-09-10T21:51:56.814014
| 2019-11-13T23:57:34
| 2019-11-13T23:57:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,831
|
py
|
# @date 2019-01-19
# @author Frederic SCHERMA
# @license Copyright (c) 2019 Dream Overflow
# Crystal ball strategy indicator default parameters.
DEFAULT_PARAMS = {
"reversal": True,
"pyramided": 0,
"hedging": False,
"max-trades": 3, # max number of simultaned trades for a same market
"trade-delay": 30, # at least wait 30 seconds before sending another signal
"base-timeframe": "t", # process each time strategy receive a tick
"min-traded-timeframe": "1m",
"max-traded-timeframe": "4h",
"min-vol24h": 100, # 300 BTC per 24h
"min-price": 0.00000069, # or 69 sats (to binary otherwise)
"timeframes": {
"4hour": {
"timeframe": "4h",
"mode": "A",
"depth": 22,
"history": 22,
"update-at-close": False,
"signal-at-close": True,
"indicators": {
"price": ("price", 2,),
"volume": ("volume", 0,),
"rsi": ("rsi", 21,),
"pivotpoint": ("pivotpoint", 5,),
"tomdemark": ("tomdemark", 9),
"atr": ("atr", 14, 1.0), # was 1.5 , but too large else
"bbawe": ("bbawe", 20, 2.0, 3.0, 5, 34, False),
},
"constants": {
"rsi_low": 0.3,
"rsi_high": 0.7,
}
},
"hourly": {
"timeframe": "1h",
"mode": "A",
"depth": 22,
"history": 22,
"update-at-close": False,
"signal-at-close": True,
"indicators": {
"price": ("price", 2,),
"volume": ("volume", 0,),
"rsi": ("rsi", 21,),
"pivotpoint": ("pivotpoint", 5,),
"tomdemark": ("tomdemark", 9),
"atr": ("atr", 14, 1.0), # was 1.5 , but too large else
"bbawe": ("bbawe", 20, 2.0, 3.0, 5, 34, False),
},
"constants": {
"rsi_low": 0.3,
"rsi_high": 0.7,
}
},
"15min": {
"timeframe": "15m",
"mode": "A",
"depth": 22,
"history": 22,
"update-at-close": False,
"signal-at-close": True,
"indicators": {
"price": ("price", 2,),
"volume": ("volume", 0,),
"rsi": ("rsi", 21,),
"pivotpoint": ("pivotpoint", 5,),
"tomdemark": ("tomdemark", 9),
"atr": ("atr", 14, 1.0),
"bbawe": ("bbawe", 20, 2.0, 3.0, 5, 34, False),
},
"constants": {
"rsi_low": 0.3,
"rsi_high": 0.7,
}
},
"5min": {
"timeframe": "5m",
"mode": "A",
"depth": 22,
"history": 22,
"update-at-close": False,
"signal-at-close": True,
"indicators": {
"price": ("price", 2,),
"volume": ("volume", 0,),
"rsi": ("rsi", 21,),
"pivotpoint": ("pivotpoint", 5,),
"tomdemark": ("tomdemark", 9),
"atr": ("atr", 14, 3.0),
"bbawe": ("bbawe", 20, 2.0, 3.0, 5, 34, False),
},
"constants": {
"rsi_low": 0.3,
"rsi_high": 0.7,
}
},
"2min":{
"timeframe": "2m",
"mode": "A",
"depth": 22,
"history": 22,
"update-at-close": False,
"signal-at-close": True,
"indicators": {
"price": ("price", 2,),
"volume": ("volume", 0,),
"rsi": ("rsi", 21,),
"pivotpoint": ("pivotpoint", 5,),
"tomdemark": ("tomdemark", 9),
"atr": ("atr", 14, 3.0),
"bbawe": ("bbawe", 20, 2.0, 3.0, 5, 34, False),
},
"constants": {
"rsi_low": 0.3,
"rsi_high": 0.7,
}
},
"1min": {
"timeframe": "1m",
"mode": "A",
"depth": 22,
"history": 22,
"update-at-close": True,
"signal-at-close": True,
"indicators": {
"price": ("price", 2,),
"volume": ("volume", 0,),
"rsi": ("rsi", 21,),
"pivotpoint": ("pivotpoint", 5,),
"tomdemark": ("tomdemark", 9),
"atr": ("atr", 14, 3.0),
"bbawe": ("bbawe", 20, 2.0, 3.0, 5, 34, False),
},
"constants": {
"rsi_low": 0.3,
"rsi_high": 0.7,
}
}
}
}
|
[
"frederic.scherma@gmail.com"
] |
frederic.scherma@gmail.com
|
09402cd9cb8b46c4bea215f5fb80144e37a7266b
|
045025f41201dba54c005dd0601d97ccdedf3062
|
/ScrapePlugins/MangaPark/FeedLoader.py
|
fd8b09f1220b87bdd841d7a1030882891c7728a3
|
[] |
no_license
|
Gazzilow/MangaCMS
|
6720c45c63c0429cc0e0a37d99738bfb553ca98b
|
e620bfef62c9b3d4678b635a2ea6463ffb26fc34
|
refs/heads/master
| 2021-01-22T09:40:17.277953
| 2015-12-19T21:33:25
| 2015-12-19T21:33:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
import webFunctions
import bs4
import re
import urllib.parse
import time
import calendar
import dateutil.parser
import runStatus
import settings
import datetime
import ScrapePlugins.RetreivalDbBase
class FeedLoader(ScrapePlugins.RetreivalDbBase.ScraperDbBase):
loggerPath = "Main.Manga.Mp.Fl"
pluginName = "MangaPark Link Retreiver"
tableKey = "mp"
dbName = settings.dbName
wg = webFunctions.WebGetRobust(logPath=loggerPath+".Web")
tableName = "MangaItems"
urlBase = "http://mangapark.com/"
feedUrl = "http://mangapark.com/latest"
def closeDB(self):
self.log.info( "Closing DB...",)
self.conn.close()
self.log.info( "done")
def checkMatureAgree(self, page, url):
if "This series contains mature contents" in page:
self.log.info("Need to step through mature agreement page.")
page = self.wg.getpage(url, postData={"adult" : "true"})
return page
def getItemPages(self, info):
url, series = info
# print("Should get item for ", url)
page = self.wg.getpage(url)
page = self.checkMatureAgree(page, url)
soup = bs4.BeautifulSoup(page)
series = soup.find("h1", class_="title")
container = soup.find("div", class_="list")
seriesName = series.get_text().strip()
segmentDivs = container.find_all("div", class_="group", recursive=False)
ret = []
for segment in segmentDivs:
chaps = segment.find_all("div", class_="element")
for chap in chaps:
dlLink = chap.find("div", class_="icon_wrapper").a["href"]
dlTitle = chap.find("div", class_="title").get_text()
dlTitle = dlTitle.replace(":", " -") # Can't have colons in filenames
# print("dlLink", dlLink, dlTitle)
item = {}
chapDate = chap.find("div", class_="meta_r")
datestr = list(chapDate)[-1]
datestr.strip(", ")
date = dateutil.parser.parse(datestr, fuzzy=True)
item["originName"] = "{series} - {file}".format(series=seriesName, file=dlTitle)
item["sourceUrl"] = dlLink
item["seriesName"] = seriesName
item["retreivalTime"] = calendar.timegm(date.timetuple())
# print("Item", item)
ret.append(item)
return ret
def getSeriesUrls(self):
ret = []
soup = self.wg.getSoup(self.feedUrl)
content = soup.find('div', class_='ls1')
divs = content.find_all("div", class_="item")
for div in divs:
# First a in the div is the title image
url = div.a["href"]
url = urllib.parse.urljoin(self.urlBase, url)
text = div.a['title']
ret.append((url, text))
return ret
def getAllItems(self):
# for item in items:
# self.log.info( item)
#
self.log.info( "Loading Mc Items")
ret = []
seriesPages = self.getSeriesUrls()
for item in seriesPages:
itemList = self.getItemPages(item)
for item in itemList:
ret.append(item)
if not runStatus.run:
self.log.info( "Breaking due to exit flag being set")
break
self.log.info("Found %s total items", len(ret))
return ret
def go(self):
self.resetStuckItems()
self.log.info("Getting feed items")
feedItems = self.getAllItems()
self.log.info("Processing feed Items")
self.processLinksIntoDB(feedItems)
self.log.info("Complete")
if __name__ == "__main__":
import utilities.testBase as tb
with tb.testSetup(startObservers=False):
mon = FeedLoader()
# mon.getSeriesUrls()
mon.getItemPages(('http://mangapark.com/manga/zai-x-10-yamauchi-yasunobu', 'Zai x 10'))
# mon.go()
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
6605bcf24b243c6cbe8ac5a6bd439b0d5204fe19
|
dd05adda58c40b3a6593d89c53be1ce64df2be0a
|
/partd/encode.py
|
3e3a15f7966f18e7ac388517e92437fc832a5ecd
|
[] |
permissive
|
dask/partd
|
4fb98971d4f9b891ca944584517be3157e06de81
|
4183caf149b686538752608bac6acbaa052dba23
|
refs/heads/main
| 2023-07-20T04:38:51.234966
| 2023-07-17T21:12:35
| 2023-07-17T21:12:35
| 35,185,364
| 94
| 35
|
BSD-3-Clause
| 2023-07-17T21:12:37
| 2015-05-06T22:08:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
from .core import Interface
from .file import File
from toolz import valmap
from .utils import frame, framesplit
class Encode(Interface):
def __init__(self, encode, decode, join, partd=None):
if not partd or isinstance(partd, str):
partd = File(partd)
self.partd = partd
self.encode = encode
self.decode = decode
self.join = join
Interface.__init__(self)
def __getstate__(self):
return self.__dict__
__setstate__ = Interface.__setstate__
def append(self, data, **kwargs):
data = valmap(self.encode, data)
data = valmap(frame, data)
self.partd.append(data, **kwargs)
def _get(self, keys, **kwargs):
raw = self.partd._get(keys, **kwargs)
return [self.join([self.decode(frame) for frame in framesplit(chunk)])
for chunk in raw]
def delete(self, keys, **kwargs):
return self.partd.delete(keys, **kwargs)
def _iset(self, key, value, **kwargs):
return self.partd.iset(key, frame(self.encode(value)), **kwargs)
def drop(self):
return self.partd.drop()
@property
def lock(self):
return self.partd.lock
def __exit__(self, *args):
self.drop()
self.partd.__exit__(*args)
|
[
"mrocklin@gmail.com"
] |
mrocklin@gmail.com
|
6028ec0837d82763699b394753bc8459206e12af
|
8b2ff2f65b8bc55d1a4643e29cc6f30d6cba5688
|
/live/django_form/articles/views.py
|
425af1be08ecd3ce793270e9b42caa24c3d60822
|
[] |
no_license
|
chloe-codes1/Django
|
a177161a89b3f3592b04c5711246847d4f0dd32f
|
496d7219d7f9aa7269d160f46f2efa4b9bf07431
|
refs/heads/master
| 2023-08-07T21:27:34.523326
| 2023-05-27T13:10:22
| 2023-05-27T13:10:22
| 250,606,673
| 0
| 1
| null | 2023-09-04T08:49:57
| 2020-03-27T17:57:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from .models import Article
from .forms import ArticleForm
# Create your views here.
def index(request):
articles = Article.objects.order_by('-pk')
context = {
'articles': articles
}
return render(request, 'articles/index.html', context)
def create(request):
if request.method == 'POST':
# POST /articles/new -> (구) create() 함수
form = ArticleForm(request.POST)
# 검증하기
if form.is_valid():
article = form.save()
# -> article은 Article instance를 return 함
return redirect('articles:index')
else:
# GET /articles/new
form = ArticleForm()
# 공용 context
context = {
'form': form
}
return render(request, 'articles/form.html', context)
def detail(request, pk):
article = get_object_or_404(Article, id=pk)
context = {
'article': article
}
return render(request, 'articles/detail.html', context)
@require_POST
def delete(request,pk):
article = get_object_or_404(Article, id=pk)
article.delete()
return redirect('articles:index')
def update(request, pk):
article = get_object_or_404(Article, id=pk)
if request.method == 'POST':
form =ArticleForm(request.POST)
if form.is_valid():
article = form.save()
return redirect('articles:detail', article.pk)
else:
# 수정시에는 해당 article instance를 넘겨줘야 한다!
form = ArticleForm(instance=article)
context = {
'form':form
}
return render(request, 'articles/form.html', context)
|
[
"juhyun.kim@lindsey.edu"
] |
juhyun.kim@lindsey.edu
|
b8f094584d71326d60fc7d0ae1dc6345b4a8c508
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-k8s/tests/clustering/assisted-clustering.py
|
7a76ab10eaba664de63a610c5d9aaabf790e4cb0
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,324
|
py
|
import requests
import argparse
import sys
import os
import time
from kubernetes import client, config, watch
def wait_deployment_ready(deployment_name: str, namespace: str) -> client.V1Deployment:
"""
Waits until a deployment of given name is reported to be in status `Ready` by Kubernetes.
A deployment is ready once all it's underlying pods are ready. This means there is H2O running inside each pod,
and the clustering REST API is listening for an incoming flatfile.
:param deployment_name: Name of the H2O deployment to find the correct H2O deployment
:param namespace: Namespace the deployment belongs to.
:return: An instance of V1Deployment, if found.
"""
print("Waiting for H2O deployment to be ready")
v1_apps = client.AppsV1Api()
w = watch.Watch()
for deployment in w.stream(v1_apps.list_namespaced_deployment, namespace,
field_selector="metadata.name={}".format(deployment_name), _request_timeout=360):
deployment = deployment["object"]
status: client.V1DeploymentStatus = deployment.status
if status.ready_replicas == status.replicas:
print("H2O deployment ready")
return deployment
def create_h2o_cluster(deployment_name: str, namespace: str) -> [str]:
"""
Orchestrates the creation/clustering of an H2O cluster.
:param deployment_name: Name of the H2O deployment to find the correct H2O deployment
:param namespace: Namespace the deployment belongs to.
:return: A list of pod IPs (IPv4), each IP in a separate string.
"""
config.load_incluster_config()
print("Kubeconfig Loaded")
deployment = wait_deployment_ready(deployment_name, namespace)
print(deployment)
return cluster_deployment_pods(deployment, namespace)
def cluster_deployment_pods(deployment: client.V1Deployment, namespace: str) -> [str]:
"""
Orchestrates the clustering process of H2O nodes running inside Kubernetes pods.
The label selector key is "app" - this is dependent on the configuration of the resource.
:param deployment: H2O Deployment resource
:param namespace: Namespace of the deployment resource
:return: A list of pod IPs (IPv4) clustered, each IP in a separate string.
"""
pod_label = deployment.spec.selector.match_labels["app"];
pod_ips = get_pod_ips_by_label(pod_label, namespace)
print("Detected pod_ips: {}".format(pod_ips))
send_ips_to_pods(pod_ips)
return pod_ips
def get_deployment(deployment_name: str, namespace: str) -> client.V1Deployment:
"""
Finds H2O deployment inside Kubernetes cluster withing given namespace. Exits the process with status code one
to indicate a failed test if not found.
:param deployment_name: Name of the H2O deployment to find the correct H2O deployment
:param namespace: Namespace the deployment belongs to.
:return: An instance of V1Deployment, if found.
"""
v1_apps_api = client.AppsV1Api()
deployment = v1_apps_api.read_namespaced_deployment(deployment_name, namespace)
if deployment is None:
print("Deployment '{}' does not exist".format(deployment_name))
sys.exit(1)
else:
return deployment
def send_ips_to_pods(pod_ips):
"""
Performs actualy clustering by sending all H2O pod's ClusterIP to each of the pods in a form
of a flatfile, as defined by H2O's NetworkInit.java class.
:param pod_ips: A list of pod IPs (IPv4), each IP in a separate string.
"""
flatfile_body = ""
for i in range(len(pod_ips)):
if i == len(pod_ips) - 1:
flatfile_body += "{}:54321".format(pod_ips[i]) # no \n after last flatfile record
else:
flatfile_body += "{}:54321\n".format(pod_ips[i])
for pod_ip in pod_ips:
url = "http://{}:8080/clustering/flatfile".format(pod_ip)
headers = {"accept": "*/*",
"Content-Type": "text/plain"}
response = requests.post(url, headers=headers, data=flatfile_body)
if response.status_code != 200:
print("Unexpected response code from pod '{}'")
sys.exit(1)
def check_h2o_clustered(pod_ips):
"""
Checks each and every H2O pod identified by its Kubernetes ClusterIP reports a healthy cluster of given size.
If any node is unresponsive or reports wrong cluster status, this script is exited with status code 1.
:param pod_ips: A list of pod IPs (IPv4), each IP in a separate string.
"""
for pod_ip in pod_ips:
url = "http://{}:8080/cluster/status".format(pod_ip)
response = None
max_retries = 360
retries = 0
while retries < max_retries:
response = requests.get(url)
if response.status_code == 200:
break
time.sleep(1)
if response is None:
print("Unable to obtain /cluster/status response from pod '{}' in time.".format(pod_ip))
sys.exit(1)
response_json = response.json()
if len(response_json["unhealthy_nodes"]) > 0:
print("Unhealthy nodes detected in the cluster: {}".format(response_json["unhealthy_nodes"]))
sys.exit(1)
if len(response_json["healthy_nodes"]) != len(pod_ips):
print("Healthy cluster with less node reported by node {}. IPs: {}".format(pod_ip,
response_json[
"healthy_nodes"]))
sys.exit(1)
print("Pod {} reporting healthy cluster:\n{}".format(pod_ip, response_json))
def get_pod_ips_by_label(pod_label: str, namespace: str) -> [str]:
"""
:param pod_label: A label of the H2O Pods used in Kubernetes to filter the pods by.
:param namespace: Kubernetes namespace the pods have been deployed to.
:return: A list of pod IPs (IPv4), each IP in a separate string.
"""
v1_core_pi = client.CoreV1Api();
pods = v1_core_pi.list_namespaced_pod(watch=False, namespace=namespace, label_selector="app={}".format(pod_label),
_request_timeout=360)
pod_ips = list()
for pod in pods.items:
pod_ips.append(pod.status.pod_ip)
return pod_ips
if __name__ == '__main__':
# Performs assisted clustering on H2O Cluster inside Kubernetes.
# In order to simplify usage of this script,as it's scoped is narrowed only to h2-k8s test suite,
# it is assumed all H2O nodes run on default H2O port - 54321 and H2O pods expose that port
#
args = argparse.ArgumentParser("H2O Assisted clustering test script")
args.add_argument("deployment_name",
help="Name of the H2O Deployment in K8S. Used as a label to find the H2O pods to cluster",
metavar="L", type=str)
args.add_argument("--namespace", required=True, help="Namespace the H2O has been deployed to",
type=str)
parsed_args = args.parse_args()
print("Attempting to cluster H2O")
deployment_name, namespace = parsed_args.deployment_name, parsed_args.namespace
pod_ips = create_h2o_cluster(deployment_name, namespace)
check_h2o_clustered(pod_ips)
|
[
"noreply@github.com"
] |
h2oai.noreply@github.com
|
30fd9db05df431710018b687699b91fdd66bbcdb
|
850d778687e3692ab2a38d4d2227391d92c21e6b
|
/atcoder.jp/abc051/abc051_c/Main.py
|
98d9a7d6607c0487dc39d1ee50d494f2896dd483
|
[] |
no_license
|
Valkyrja3607/AtCoder
|
77e2e5e66c0e8e12bb902c35f679119c6576fad7
|
9218a50b1eb83e4498845d15d9dda41fab90ed73
|
refs/heads/master
| 2023-07-15T20:38:52.911301
| 2018-05-30T17:56:22
| 2018-05-30T17:56:22
| 294,980,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
sx,sy,tx,ty=map(int,input().split())
x=tx-sx
y=ty-sy
print("U"*y+"R"*x+"D"*y+"L"*(x+1)+"U"*(y+1)+"R"*(x+1)+"DR"+"D"*(y+1)+"L"*(x+1)+"U")
|
[
"purinjolly@gmail.com"
] |
purinjolly@gmail.com
|
d5de6683f124bae4468b8e2827a58b7b9fde5378
|
0b514feea82eaa2e341130d9e23d13d72271d644
|
/3.Python_Coding_Basic/Step_01/Unit_11-1.py
|
79a6192f84771f6e99d0e62b4531770a79637d29
|
[] |
no_license
|
Jerrykim91/FromZero
|
f8478012130948a11978a46ab6ec7922cb354a8f
|
fdd5a0716b29c77019cfcd1e1eab7ed4afd1aed4
|
refs/heads/master
| 2022-12-25T15:04:22.656462
| 2020-10-10T14:35:06
| 2020-10-10T14:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
# 사용자 함수
from fun_pkg.random_num import random_num # 랜덤 숫자 생성
# Unit_11-1.py
# 인덱스 사용하기
txt_index = """
# 인덱스 사용하기
=> 시퀸스 객체에 [](대괄호)를 붙이고 []안에 각 요소의 인텍스를 지정하면 해당 요소에 접근가능
- 시퀸즈 객체에 들어있는 요소에 접근하는 방법을 확인
- 시퀸즈 객체의 각 요소는 순서는 정해져 있으며 이 순서를 인덱스라고 함
시퀸스 객체[인덱스]
## 인덱스( index, 색인 )
- 위칫값을 뜻하는데 국어사전 옆면에 ㄱ, ㄴ, ㄷ으로 표시해 놓은것과 비슷
- 주의 ) 시퀸스 객체의 인덱스는 항상 0부터 시작한다는거
"""
print(txt_index)
print('-'*40)
# 인덱스 사용하기 - 1
a = [38,26,53,72,19]
print('-'*20)
print(a[0]) # 리스트의 첫번째 요소를 출력 => 인덱스 0
print('-'*20)
print(a[2]) # 리스트의 세번째 요소를 출력 => 인덱스 3
print('-'*20)
print(a[4]) # 리스트의 다섯번째 요소를 출력 => 인덱스 4
print('-'*20)
# 튜 플
b = (38,26,53,72,19)
print(b[2]) # 튜플의 세번째 요소를 출력 => 인덱스 3
print('-'*20)
ran = range(0, 10, 2) # 0 부터 10 까지 2 단계씩 출력
r = list(ran)
print(ran[2])
print('-'*20)
print( r ) # print(ran[2])가 제대로 출력된 것을 확인가능
print('-'*40)
# 문자열 인덱스
hello = 'hello world!'
print( hello[2] )
print( hello[5] ) # 공백도 인식
print('-'*20)
hello = list(hello) # 확인
print(hello)
print('-'*40)
# 시퀸즈 객체에 인덱스를 지정하지 않으면
# c를 이용해서 확인
c = [ 38, 26, 53, 72, 19]
print(c) # c를 그냥 불러오는것 => c에 담긴 리스트 내용의 전부를 출력
print('-'*40)
# __getitem__ 메소드
txt_getitem = """
# __getitem__ 메소드
시퀸즈 객체에서 대괄호를 사용하면
실제로는 __getitem__ 메소드가 호출되어 요소를 가져옴
직접 호출도 가능
시퀸스 객체. __getitem__(index)
__getitem__메서드를 이용한 추가적인 것은
unit_39(이터레이터)에서 추가로 설명
일단 아래에 __getitem__메서드를 이용해 호출 해보겠음
"""
print(txt_getitem)
print('-'*40)
# __getitem__ 메소드 - 실습
a = list(range(10))
# print( a )
print(a.__getitem__(5))
print('-'*40)
# 음수 인덱스 지정하기
# 내가 만든 함수로 임의 숫자를 가지고 오는 거임
ran = random_num(5)
print(ran, ran[-2], ran[-1])
print('-'*40)
# 튜플로 인덱스 지정하기
ran_1 = tuple(ran)
print(ran_1,ran_1[-3])
print('-'*40)
r = range(1,11,2)
print(r[-2])
print('-'*40)
hello = 'hello world!'
print(hello[-1])
print('-'*40)
# 인덱스의 범위를 벗어 날때
# 에러 남 => index error : list index out of range
# 리스트의 인덱스가 범위를 벗어나서 에러 발생
# 마지막 요소에 접근하기
a = random_num(10)
print(a,'\n 요소 길이 : ', len(a),'\n 마지막요소 : ',a[len(a)-1])
# a[len(a)-1] 이방법은 마지막 인덱스를 구할때 종종 사용 !
print('-'*40)
# 요소에 값 할당하기
"""
- 시퀸즈 객체[인텍스] = 값
"""
tmp = []
zro = [0] * 5
rdm_num = random_num(5)
print('rdm_num :',rdm_num)
# print(zro)
for i in range(len(zro)) :
# print('zro[%d] = ' % i, zro[i])
print(i)
for j in range(len(rdm_num)):
# print(rdm_num)
# dum = 0
pass
# print('test : rdm_num[%d] = ' % i, rdm_num[i])
# print('정상작동')
zro[i] = rdm_num[i]
print('rdm_num[%d] = ' % i, rdm_num[i])
tmp.append(zro[i])
print('zro[i]만 추출 :', tmp)
print('tmp[0] : ', tmp[0])
print('tmp[4] : ', tmp[4])
print('-'*40)
# del로 요소 삭제하기
"""
del 시퀸즈 객체[인덱스]
"""
print(tmp) # before
del tmp[2]
print(tmp) # after -> 삭제된것을 확인 할수 있음
print('-'*40)
|
[
"sun4131@gmail.com"
] |
sun4131@gmail.com
|
06297336808d521782cd2cb87a60fbda8e1592f6
|
b253bec2667e0cff7cc2306761ff01d6522606d1
|
/setup.py
|
48e34c07fad71e830862c2ed272485d56c152d32
|
[
"MIT"
] |
permissive
|
pmp-p/fstrings_helper
|
dc50b9cc2a8c61faa16c911fe9fc70f3ccfc4ba1
|
49cc9e17aca04b73fc2b9717742af9d2053f366b
|
refs/heads/master
| 2020-03-31T14:59:20.785649
| 2019-07-01T15:22:27
| 2019-07-01T15:22:27
| 152,318,729
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,268
|
py
|
import distutils
import os.path
from setuptools import setup
from setuptools.command.install import install as _install
PTH = (
'try:\n'
' import future_fstrings\n'
'except ImportError:\n'
' pass\n'
'else:\n'
' future_fstrings.register()\n'
)
class install(_install):
def initialize_options(self):
_install.initialize_options(self)
# Use this prefix to get loaded as early as possible
name = 'aaaaa_' + self.distribution.metadata.name
contents = 'import sys; exec({!r})\n'.format(PTH)
self.extra_path = (name, contents)
def finalize_options(self):
_install.finalize_options(self)
install_suffix = os.path.relpath(
self.install_lib, self.install_libbase,
)
if install_suffix == '.':
distutils.log.info('skipping install of .pth during easy-install')
elif install_suffix == self.extra_path[1]:
self.install_lib = self.install_libbase
distutils.log.info(
"will install .pth to '%s.pth'",
os.path.join(self.install_lib, self.extra_path[0]),
)
else:
raise AssertionError(
'unexpected install_suffix',
self.install_lib, self.install_libbase, install_suffix,
)
setup(
name='future_fstrings',
description='A backport of fstrings to python<3.6',
url='https://github.com/asottile/future-fstrings',
version='0.4.4',
author='Anthony Sottile',
author_email='asottile@umich.edu',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
extras_require={':python_version<"3.6"': ['tokenize-rt']},
py_modules=['future_fstrings'],
entry_points={'console_scripts': [
'future-fstrings-show=future_fstrings:main',
]},
cmdclass={'install': install},
)
|
[
"asottile@umich.edu"
] |
asottile@umich.edu
|
2d6d228c3a12cc76f0283c0d637c76d28e52964f
|
7e36343736b542e4cdf660bea3e25aa2e88036f2
|
/dp/509.fibonacci-number.py
|
63ddcf119cb447455e6cb27e1069f83dcea0bcef
|
[] |
no_license
|
soaringk/Leetcode-Log
|
2d1b52b5fa189951e6c2b1d8b67000bb99c43f06
|
a820f55bd05f43f3b923b0fbe9dc059fe9fdb7e8
|
refs/heads/master
| 2023-03-23T12:22:25.352632
| 2021-03-15T13:38:56
| 2021-03-15T13:38:56
| 332,972,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
#
# @lc app=leetcode id=509 lang=python3
#
# [509] Fibonacci Number
#
# @lc code=start
class Solution:
def fib(self, n: int) -> int:
if n < 1: return 0
if n == 1 or n == 2: return 1
prev = 1
curr = 1
for i in range(3, n + 1):
nex = prev + curr
prev = curr
curr = nex
return curr
# @lc code=end
|
[
"k3vin.zhang@gmail.com"
] |
k3vin.zhang@gmail.com
|
8eddb740d27e5bceed86e8295591b36306f6bfe3
|
531caac957596fc623e534bce734ef6b45be0b07
|
/tests/operators/vector/test_smooth_l1_loss_grad_001.py
|
694c06266318023b1ea9df25b4479d5e79b82912
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
wxyhv/akg
|
02e64d81bbb84472e0bf1c57a691b688ea743d6e
|
fc9b6f5b6fa024da89bf90466a815359ca54015d
|
refs/heads/master
| 2023-03-11T02:59:18.472826
| 2021-02-23T07:44:16
| 2021-02-23T07:44:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,837
|
py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
from base import TestBase
import pytest
from test_run.smooth_l1_loss_grad_run import smooth_l1_loss_grad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_smooth_l1_loss_grad"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
kernel = smooth_l1_loss_grad_run
kernel_name = "smooth_l1_loss_grad"
self.testarg = [
## testflag,opfuncname,testRunArgs, dimArgs
]
self.testarg_cloud = [
## testflag,opfuncname,testRunArgs, dimArgs
("test_smooth_l1_loss_grad_05_fp32", kernel, ((1, 16, 4), "float16")),
]
self.testarg_rpc_cloud = [
## testflag,opfuncname,testRunArgs, dimArgs
("test_smooth_l1_loss_grad_01_fp16", kernel, ((8, 4718, 4), "float16")),
("test_smooth_l1_loss_grad_02_fp32", kernel, ((8, 4718, 4), "float32")),
("test_smooth_l1_loss_grad_03_fp16", kernel, ((8, 8732, 4), "float16")),
("test_smooth_l1_loss_grad_04_fp16", kernel, ((8, 8732, 4), "float32")),
# ("test_smooth_l1_loss_grad_05_fp16_pad", kernel, ((8, 8732, '4,16'), "float16")), # multicore wrong
("test_smooth_l1_loss_grad_06_fp16", kernel, ((32, 8732, 4), "float16")),
("test_smooth_l1_loss_grad_07_fp16", kernel, ((32, 8732, 4), "float32")),
]
return
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_cloud)
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
[
"ckey.chengbin@huawei.com"
] |
ckey.chengbin@huawei.com
|
f456cae0b212c1aafc8d42125def987bb22a525a
|
91cac04472c4f9a874f9c4c3393d239c2a364bb3
|
/Scripts/heft/dynamic_hetero_resources_exp2.py
|
a23c566f840113592dbce19df5966e41618d3ed7
|
[
"MIT"
] |
permissive
|
radical-experiments/campaign_manager
|
a1ab2fd9e8510919bf6d4b5fb1241aa0e5730208
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
refs/heads/master
| 2023-01-13T19:28:37.590334
| 2020-11-11T15:37:57
| 2020-11-11T15:37:57
| 257,754,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,916
|
py
|
from radical.cm.planner import HeftPlanner
from random import gauss
import pandas as pd
import numpy as np
import sys
from time import time
def campaign_creator(num_workflows):
tmp_campaign = list()
tmp_num_oper = list()
for i in range(num_workflows):
workflow = {'description':None}
workflow['id'] = i + 1
workflow['num_oper'] = 75000
tmp_campaign.append(workflow)
tmp_num_oper.append(workflow['num_oper'])
return tmp_campaign, tmp_num_oper
def resdf_to_dict(res_df, size, prev_set=None):
if size == len(res_df):
tmp_resources = list()
for i in range(size):
point = res_df.loc[i]
tmp_res = {'id': i + 1,
#'performance': 1.0}
'performance': point['PFlops Mean']}
tmp_resources.append(tmp_res)
return tmp_resources
else:
new_res = size - len(prev_set)
tmp_resources = list()
for i in range(new_res):
point = res_df.loc[i % 4]
tmp_res = {'id': len(prev_set) + i + 1,
'performance': gauss(point['PFlops Mean'], point['Pflops STD'])}
tmp_resources.append(tmp_res)
return prev_set + tmp_resources
def get_makespan(curr_plan, dyn_resources, used_resources):
'''
Calculate makespan
'''
resource_usage = [0] * len(dyn_resources)
tmp_idx = [0] * len(dyn_resources)
for placement in curr_plan:
workflow = placement[0]
resource_id = placement[1]['id']
perf = used_resources[resource_id - 1]['performance']
resource_usage[resource_id - 1] += workflow['num_oper'] / gauss(perf, perf * 0.0644)
tmp_idx[resource_id - 1] += 1
return max(resource_usage)
if __name__ == "__main__":
repetitions = int(sys.argv[1])
dyn_resources = np.load('../../Data/homogeneous_resources_dyn.npy')
total_resources = pd.read_csv('../../Data/heterogeneous_resources.csv')
num_resources = [4, 8, 16, 32, 64, 128]
results = pd.DataFrame(columns=['size','planner','plan','makespan','time'])
campaign, num_oper = campaign_creator(num_workflows=1024)
resources = None
for res_num in num_resources:
print('Number of resources: %d' % res_num)
resources = resdf_to_dict(res_df=total_resources, size=res_num, prev_set=resources)
for _ in range(repetitions):
planner = HeftPlanner(campaign=campaign, resources=resources, num_oper=num_oper, sid='test1')
tic = time()
plan = planner.plan()
toc = time()
makespan = get_makespan(plan, dyn_resources[0:res_num,:], used_resources=resources)
results.loc[len(results)]= [res_num, 'HEFT', plan, makespan, toc - tic]
del planner
results.to_csv('../../Data/heft/DynHeteroResources_StHomoCampaignsHEFT.csv', index=False)
|
[
"i.paraskev@rutgers.edu"
] |
i.paraskev@rutgers.edu
|
9012c87a2aee27b2fd02601681c078389d4b09e1
|
9b30c078a08cb7168529fbe0954fd5f1ad8867c1
|
/bitonic_tour/bitonic_tour.py
|
fe88fa5a19ebc8f4cc00f11ee9a6f83019be6720
|
[
"BSD-3-Clause"
] |
permissive
|
PythonicNinja/BitonicTour
|
8beaac4e0994b2d2a89b52c550abf23a148cdbb2
|
7936d186e49f3543dded3296e3f00280eb231c6f
|
refs/heads/master
| 2021-01-19T22:33:09.657804
| 2015-04-28T12:58:23
| 2015-04-28T12:58:23
| 34,730,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import matplotlib.pyplot as plt
import sys
import math
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return '({},{})'.format(self.x, self.y)
def distance(self, p2):
return math.sqrt((self.x - p2.x) ** 2 + (self.y - p2.y) ** 2)
class Result(object):
def __init__(self, p1, p2, d=None):
self.p1 = p1
self.p2 = p2
if not d:
self.d = self.p1.distance(p2)
else:
self.d = d
def __str__(self):
return '({} <- {} -> {})'.format(self.p1, self.d, self.p2)
def __repr__(self):
return self.__str__()
def bitonic_tour(points):
b = [None for _ in points]
b[1] = Result(points[0], points[1])
for j in range(2, len(points)):
minimum = Result(None, None, sys.maxint)
suma = 0
for i in range(j - 2, -1, -1):
d = Result(points[i], points[j], (b[i + 1].d + points[i].distance(points[j]) + suma))
if d.d < minimum:
minimum = d
suma += points[i].distance(points[i + 1])
b[j] = minimum
return b
def bitonic_tour_with_final(points):
b = bitonic_tour(points)
final = b[-1].d + points[-1].distance(points[-2])
return namedtuple('Result', 'b final')(b, final)
def plot(points):
'''
plots bitonic tour
'''
bitonic_path = bitonic_tour(points)
for point in points:
plt.plot(point.x, point.y, marker='o', linestyle='.', color='r', label='Points')
for path in bitonic_path:
if isinstance(path, Result):
pair = [path.p1, path.p2]
x_s = [(p.x, p.x) for p in pair]
y_s = [(p.y, p.y) for p in pair]
plt.plot(x_s, y_s, marker='o', linestyle='-', color='b', label='Path')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Bitonic Path')
plt.legend()
plt.show()
if __name__ == '__main__':
points = [
Point(0, 6),
Point(1, 0),
Point(2, 3),
Point(5, 4),
Point(6, 1),
Point(7, 5),
Point(8, 2)
]
plot(points)
|
[
"vojtek.nowak@gmail.com"
] |
vojtek.nowak@gmail.com
|
f5b25c538987fd6ce5b14f35adb94b5f6c313ba9
|
40aa8243e28eaecc895e0187e1e12e3b66195003
|
/Practice/Learn Python the Hard Way/ex44d.py
|
1dc4d868c58bdd6125687a0af81238ae30f54a2d
|
[] |
no_license
|
Toruitas/Python
|
94c5dc82736914203e8b712055b824796816c145
|
2d1ea9cdad9501ae9d934c90d516a5a846d2a631
|
refs/heads/master
| 2016-09-05T20:10:41.584677
| 2015-06-01T03:14:51
| 2015-06-01T03:14:51
| 17,966,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
__author__ = 'Stuart'
class Parent(object):
def override(self):
print("PARENT override()")
def implicit(self):
print("PARENT implicit()")
def altered(self):
print('PARENT altered()')
class Child(Parent):
def override(self):
print("CHILD override()")
def altered(self):
print("CHILD, BEFORE PARENT altered()")
super(Child,self).altered() # calls the super class version of .altered()
print("CHILD,AFTER PARENT altered()")
class Child2(Parent):
def __init__(self,stuff):
self.stuff = stuff
super(Child,stuff).__init__()
dad = Parent()
son = Child()
print("--")
dad.implicit()
son.implicit()
print("--")
dad.override()
son.override()
print("--")
dad.altered()
son.altered()
|
[
"Toruitas@gmail.com"
] |
Toruitas@gmail.com
|
2d1a46124b1bcd05edae440c40989402e8038634
|
258b656d1b6864726864f89d4c8dc38fc633a48f
|
/odoo_addons_others/opyme_banks_cards_payments/tests/__init__.py
|
537843db122d3786e710767444e8f3bffed770b5
|
[] |
no_license
|
test-odoorosario/opt
|
c17e1c1767710ca8e13a799644fb85b07e83639b
|
77921b4d965f2e4c081d523b373eb306a450a873
|
refs/heads/master
| 2022-12-02T04:36:04.685119
| 2019-07-11T17:17:20
| 2019-07-11T17:17:20
| 196,436,293
| 0
| 1
| null | 2022-11-22T00:30:40
| 2019-07-11T17:13:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# coding: utf-8
##############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from . import set_up
from . import bank_card_coupon_test
from . import account_payment_test
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"root@ip-172-31-8-107.sa-east-1.compute.internal"
] |
root@ip-172-31-8-107.sa-east-1.compute.internal
|
f92c7ef64554501dda97c9d6219ae15b22fe6c96
|
49f61714a6f78d984fd2194d6064d84e891bc5b7
|
/2019-1/221/users/3746/codes/1818_2570.py
|
99c933c1cdb0a6149f2fcf9608a84330f969d007
|
[] |
no_license
|
psbarros/Variaveis3
|
b5c4e1517e7d94a846ee03791d25d5821a1c651c
|
3dcf6f810709ce03c78335acf9533e008a2ae125
|
refs/heads/master
| 2023-06-13T07:05:00.878430
| 2021-07-06T17:51:37
| 2021-07-06T17:51:37
| 383,549,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from numpy import *
x = array(eval(input("Digite uma sequencia: ")))
m = sum(x)/size(x)
desvio = 1
for i in arange(size(x)):
desvio *= abs(x[i] - m)
p = ( desvio ) ** (1/len(x))
print(round(p,3))
|
[
"psb@icomp.ufam.edu.br"
] |
psb@icomp.ufam.edu.br
|
f6e27ff9b7c2954f1e1efd0374cea3fab623326c
|
0174ee349709eb578b25741998b8e32c5f64647e
|
/ENV_estimation/Matlab_env_training/steps_torch_env_BEGAN/NET/Net_full_cnn_deep_64filters.py
|
a08e8c9b2255821a7ec05186ba04328bf0fa5f7f
|
[] |
no_license
|
iiscleap/Joint_FDLP_envelope_dereverberation_E2E_ASR
|
8cbe2e97a86cd2abc4a33daccf98eed136e330ab
|
4ab007d4b946f848f8f6fe1a1576d4053c295116
|
refs/heads/master
| 2023-08-16T17:42:26.626435
| 2021-10-04T06:28:02
| 2021-10-04T06:28:02
| 413,298,107
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
import torch
import torch.nn as nn
class Net (nn.Module):
def __init__(self):
super(Net,self).__init__()
#self.B4 = nn.BatchNorm2d(8,track_running_stats=False)
self.conv1 = nn.Conv2d(1,64,kernel_size=(41,3), padding=(20,1))
self.drop1 = nn.Dropout(0.2)
self.conv2 = nn.Conv2d(64,64,kernel_size=(41,3), padding=(20,1))
self.drop2 = nn.Dropout(0.2)
self.conv3 = nn.Conv2d(64,32,kernel_size=(41,3),padding=(20,1))
self.drop3 = nn.Dropout(0.2)
self.conv4 = nn.Conv2d(32,16,kernel_size=(41,3), padding=(20,1))
self.drop4 = nn.Dropout(0.2)
self.conv5 = nn.Conv2d(16,1,kernel_size=(41,3), padding=(20,1))
self.relu = nn.ReLU()
#self.tanh = nn.Tanh()
def forward(self, x):
x = self.drop1(self.relu(self.conv1(x)))
x = self.drop2(self.relu(self.conv2(x)))
x = self.drop3(self.relu(self.conv3(x)))
x = self.drop4(self.relu(self.conv4(x)))
x = self.conv5(x)
return x
|
[
"anirudhsreeram@gmail.com"
] |
anirudhsreeram@gmail.com
|
85ba744861e35db375cefd578b8d05b380b43753
|
988176bcdae841e08106b0fe5cf07aabbc210c83
|
/task is to tell all the numbers ranging from 1-n with the fact that absolute diff between consecutive digits is 1.py
|
29235440a84cbcc7ec6fc56e6544024acddf10d3
|
[] |
no_license
|
gopiprasad008/GUVI_CODEKATA_PYTHON_CODE
|
ce1a63c7eea2a099c01748162c1deb47172dcd0a
|
78f374e344df25aab181408d8f41b3ebe03b34ef
|
refs/heads/master
| 2023-03-16T00:27:31.539524
| 2020-05-16T11:46:08
| 2020-05-16T11:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
n = int(input())
l = []
if n >= 10:
for i in range(1, n+1):
i = str(i)
flag = True
if len(i) == 1:
flag = False
else:
for j in range(len(i)-1):
if abs(int(i[j]) - int(i[j+1])) != 1:
flag = False
break
if flag:
l.append(i)
print(*l)
else:
print(-1)
|
[
"noreply@github.com"
] |
gopiprasad008.noreply@github.com
|
9d9d8148a72ea925742ef997d1ff6ecf05914f8c
|
ef32b87973a8dc08ba46bf03c5601548675de649
|
/pytglib/api/types/passport_required_element.py
|
3961e39bab983a8c1c4d49907f8eb8b77f522af3
|
[
"MIT"
] |
permissive
|
iTeam-co/pytglib
|
1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721
|
d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5
|
refs/heads/master
| 2022-07-26T09:17:08.622398
| 2022-07-14T11:24:22
| 2022-07-14T11:24:22
| 178,060,880
| 10
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from ..utils import Object
class PassportRequiredElement(Object):
"""
Contains a description of the required Telegram Passport element that was requested by a service
Attributes:
ID (:obj:`str`): ``PassportRequiredElement``
Args:
suitable_elements (List of :class:`telegram.api.types.passportSuitableElement`):
List of Telegram Passport elements any of which is enough to provide
Returns:
PassportRequiredElement
Raises:
:class:`telegram.Error`
"""
ID = "passportRequiredElement"
def __init__(self, suitable_elements, **kwargs):
self.suitable_elements = suitable_elements # list of passportSuitableElement
@staticmethod
def read(q: dict, *args) -> "PassportRequiredElement":
suitable_elements = [Object.read(i) for i in q.get('suitable_elements', [])]
return PassportRequiredElement(suitable_elements)
|
[
"me@amirh.co"
] |
me@amirh.co
|
d7fcea89d8f0962d52b1409db8a2a0f4cdcfd64a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02601/s340228332.py
|
23c95eace5ee6c08286bda4d45d58f8a21a5abd7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import sys
sys.setrecursionlimit(10 ** 8)
input = sys.stdin.readline
def main():
A, B, C = [int(x) for x in input().split()]
K = int(input())
cnt = 0
while A >= B:
cnt += 1
B *= 2
while B >= C:
cnt += 1
C *= 2
if cnt <= K:
print("Yes")
else:
print("No")
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1d7b78b317883196e980830a250cee69aa6b6255
|
b461ea3843d2772545886faaa041250c9b8d9634
|
/backend/test_mobile_app_4_28824/settings.py
|
4a1fa7cdeab6acca5a17d509a16ae0f01e96a9d2
|
[] |
no_license
|
crowdbotics-apps/test-mobile-app-4-28824
|
66f92e3dbe0008ad32b43d468f66c254c0904d1c
|
05ef5ad9d4b3d54106fda9f874277abd25f83dcf
|
refs/heads/master
| 2023-06-22T06:20:36.513365
| 2021-07-23T16:01:48
| 2021-07-23T16:01:48
| 385,782,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,206
|
py
|
"""
Django settings for test_mobile_app_4_28824 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_mobile_app_4_28824.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_mobile_app_4_28824.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bc1f1845e476f67df90a1de79d7693592e018721
|
b1c99061474c4e2f2653f6f3d83931c949c58b13
|
/Basic/chapter4/duplicate/DictMethod2.py
|
208133bf693ddfa70ca63628644e696dff14c016
|
[] |
no_license
|
hyperaeon/python
|
df75346040a5ccc588e21b0d761493c59e1a4fe3
|
21d10ef7af3227d29092a6720666c0db8e418ec4
|
refs/heads/master
| 2016-09-14T08:58:53.794960
| 2016-04-26T05:34:56
| 2016-04-26T05:34:56
| 57,100,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
__author__ = 'hzliyong'
people = {
'Alice': {
'phone' : '2323',
'addr' : 'Foo drive 23'
},
'Beth' : {
'phone' : '9102',
'addr' : 'Bar street 32'
},
'Cecil' : {
'phone' : '3158',
'addr' : 'Baz avenue 90'
}
}
labels = {
'phone': 'phone number',
'addr' : 'address'
}
name = str(input("name:"))
request = str(input('phone number (p) or address(a)?'))
key = request
if request == 'p' :
key = 'phone'
elif request == 'a':
key = 'addr'
person = people.get(name, {})
label = labels.get(key, key)
result = person.get(key, 'not available')
print("%s's %s is %s." %(name,label, result))
d = {'title':'asdfasf', 'url': 'foasefsf'}
print(d.items())
it = d.__iter__()
print(list(it))
d = {'x':1, 'y':2}
print(d.pop('x'))
print(d)
d = {}
d.setdefault('name', 'N/A')
print(d)
d['name'] = 'Gumby'
d.setdefault('name', 'NA')
print(d)
d = {
'title':'pheone',
'url':'fasdfas'
}
x = {'title':'safasdfsdf'}
d.update(x)
print(d)
|
[
"hzliyong@corp.netease.com"
] |
hzliyong@corp.netease.com
|
d6c5877baea26781df716bd69865cb4b57600719
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/cv/ocr/satrn/pytorch/base/models/recognizer/satrn.py
|
be984093faaef6b6e108e026315a40335620cde5
|
[
"Apache-2.0"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
from models.builder import DETECTORS
from .encode_decode_recognizer import EncodeDecodeRecognizer
@DETECTORS.register_module()
class SATRN(EncodeDecodeRecognizer):
"""Implementation of `SATRN <https://arxiv.org/abs/1910.04396>`_"""
|
[
"jia.guo@iluvatar.ai"
] |
jia.guo@iluvatar.ai
|
32041dd783fe53575a0f49eadcca2ce9733f4d6f
|
4a48593a04284ef997f377abee8db61d6332c322
|
/python/gradio/authentication_single_user.py
|
e018fe42e2780394ae124de3611a6f3d0acac8dc
|
[
"MIT"
] |
permissive
|
jeremiedecock/snippets
|
8feaed5a8d873d67932ef798e16cb6d2c47609f0
|
b90a444041c42d176d096fed14852d20d19adaa7
|
refs/heads/master
| 2023-08-31T04:28:09.302968
| 2023-08-21T07:22:38
| 2023-08-21T07:22:38
| 36,926,494
| 26
| 9
|
MIT
| 2023-06-06T02:17:44
| 2015-06-05T10:19:09
|
Python
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
#!/usr/bin/env python3
# To run this demo, type in a terminal: gradio authentication_single_user.py
# See: https://www.gradio.app/guides/sharing-your-app#authentication
import gradio as gr
def greet(name):
return "Hello " + name + "!"
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch(auth=("admin", "pass1234"))
|
[
"jd.jdhp@gmail.com"
] |
jd.jdhp@gmail.com
|
d4185600be3f5118a23c40c7bfea74284068f38d
|
2b0eab74af8d23244ff11699830f9bb10fbd717a
|
/experiences/serializers/experience_serializer.py
|
5cc50421a259b063d5d0b16570a1bd2d818eddc3
|
[] |
no_license
|
alexandrenorman/mixeur
|
c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b
|
95d21cd6036a99c5f399b700a5426e9e2e17e878
|
refs/heads/main
| 2023-03-13T23:50:11.800627
| 2021-03-07T15:49:15
| 2021-03-07T15:49:15
| 345,384,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
# -*- coding: utf-8 -*-
from helpers.serializers import AutoModelSerializer
from experiences.models import Experience
# from experiences.serializers.tag_select_serializer import TagSelectSerializer
from .assignment_tag_serializer import AssignmentTagSerializer
from .experience_sponsor_serializer import ExperienceSponsorSerializer
from .experience_tag_serializer import ExperienceTagSerializer
from .job_tag_serializer import JobTagSerializer
from .partner_tag_serializer import PartnerTagSerializer
from .public_tag_serializer import PublicTagSerializer
from .year_tag_serializer import YearTagSerializer
from accounts.serializers.user_simple_serializer import UserSimpleSerializer
from helpers.strings import truncate_html
class ExperienceSerializer(AutoModelSerializer):
model = Experience
referent = UserSimpleSerializer(required=False)
def get_owning_group(self, obj):
return obj.owning_group.pk
def get_assignments(self, obj):
assignments = obj.assignments.all()
serializer = AssignmentTagSerializer(assignments, many=True)
return serializer.data
def get_sponsors(self, obj):
sponsors = obj.sponsors.all()
serializer = ExperienceSponsorSerializer(sponsors, many=True)
return serializer.data
def get_jobs(self, obj):
jobs = obj.jobs.all()
serializer = JobTagSerializer(jobs, many=True)
return serializer.data
def get_partners(self, obj):
partners = obj.partners.all()
serializer = PartnerTagSerializer(partners, many=True)
return serializer.data
def get_publics(self, obj):
publics = obj.publics.all()
serializer = PublicTagSerializer(publics, many=True)
return serializer.data
def get_tags(self, obj):
tags = obj.tags.all()
serializer = ExperienceTagSerializer(tags, many=True)
return serializer.data
def get_years(self, obj):
years = obj.years.all()
serializer = YearTagSerializer(years, many=True)
return serializer.data
def get_image1(self, obj):
if obj.image1:
return obj.image1.url
else:
return None
def get_image2(self, obj):
if obj.image2:
return obj.image2.url
else:
return None
def get_description_truncated(self, obj):
return truncate_html(obj.description, length=200, ellipsis=" […]")
def get_role_truncated(self, obj):
return truncate_html(obj.role, length=200, ellipsis=" […]")
|
[
"norman@xael.org"
] |
norman@xael.org
|
9064a6404cbbf0636c8390857c56ffc3aafddc45
|
42a812ac785752921dcdddd4ae56064b51452b39
|
/bulletin/post/service.py
|
63027a0b640318fac71d029488fe033e2db32ed7
|
[] |
no_license
|
Pre-Onboarding-Listerine/aimmo-assignment-team-1
|
e4a15d3e71f1985febf911360691389f5996f0fb
|
d94dd7482f065ac1b020bb500984740c13af14e6
|
refs/heads/main
| 2023-09-02T12:23:49.693075
| 2021-11-03T00:25:18
| 2021-11-03T00:25:18
| 423,444,898
| 1
| 3
| null | 2021-11-02T16:35:38
| 2021-11-01T11:46:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
from datetime import datetime
from typing import Optional
from .dto.deleted_post_id import DeletedPostId
from .dto.list_params import ListParams
from .dto.post_changes import PostChanges
from .dto.post_content import PostContents
from member.models import Member
from .dto.post_list import PostList
from .exceptions import PostNotFoundException
from .models.posting import Posting
from security.exceptions import UnauthorizedException
class PostService:
def write(self, contents: PostContents, author: Member):
new_posting = Posting(
member=author,
title=contents.title,
content=contents.content
)
Posting.add(new_posting)
def edit(self, changes: PostChanges, updater: Member):
posting = Posting.get_by_id(post_id=changes.id)
if posting is None:
raise PostNotFoundException
if posting.member != updater:
raise UnauthorizedException
edited = Posting(
id=changes.id,
member=updater,
title=changes.title if changes.title else posting.title,
content=changes.content if changes.content else posting.content,
created_at=posting.created_at,
updated_at=datetime.utcnow(),
comments=posting.comments,
hits=posting.hits
)
edited.save()
def remove(self, deleted_post_id: DeletedPostId, deleter: Member):
target = Posting.get_by_id(post_id=deleted_post_id.id)
if target is None:
raise PostNotFoundException
if target.member != deleter:
raise UnauthorizedException
target.delete()
def details(self, post_id: int, member: Optional[Member]):
target = Posting.get_by_id(post_id=post_id)
if target is None:
raise PostNotFoundException
if member not in target.hit_members:
target.hits += 1
if member:
target.hit_members.append(member)
target.member.save()
target.save()
return target.to_details()
def list(self, params: ListParams):
postings = Posting.get_partial(params)
return PostList(posts=[posting.to_details() for posting in postings])
|
[
"rlawndhks217@gmail.com"
] |
rlawndhks217@gmail.com
|
0a518aaafd69d81b11d9bc75f1a6d5ceb74437be
|
a57e66be33512a7e2e99adb6f597151b56c4c373
|
/psextractzip.py
|
7ca1385fe50f200807f7ee27cb2f74eb2b6a7aa5
|
[] |
no_license
|
ravijaya/sep28
|
17025ea0997a371f54a6374f90d4bf56e0206840
|
8907f4b03ac2c4b2f1806d0d7cf3fd6aa680680c
|
refs/heads/master
| 2022-12-18T23:58:12.989144
| 2020-09-30T12:13:15
| 2020-09-30T12:13:15
| 299,901,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from zipfile import ZipFile
from os.path import isdir
from os import mkdir
extract_path = '/tmp/catch32'
if not isdir(extract_path):
mkdir(extract_path)
zf = ZipFile('source.zip', mode='r')
zf.extractall(extract_path)
zf.close()
|
[
"ravi.goglobium@gmail.com"
] |
ravi.goglobium@gmail.com
|
f3cdc3c4ed9fb20772082063893b5fe2e7011323
|
c8a7ccfb42628d1100562a053c4334488e1bf239
|
/bf_transpiler.py
|
c9b0b6600a3268218f8306c93743175946997ef0
|
[
"CC0-1.0"
] |
permissive
|
LyricLy/python-snippets
|
8487619a916e33e02b5772aba577d9dafdfd803b
|
9d868b7bbccd793ea1dc513f51290963584a1dee
|
refs/heads/master
| 2020-04-08T01:57:22.511167
| 2018-11-24T08:12:20
| 2018-11-24T08:12:20
| 158,916,096
| 1
| 0
|
CC0-1.0
| 2018-11-24T08:16:59
| 2018-11-24T08:16:59
| null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
#!/usr/bin/env python3
# encoding: utf-8
import sys
print('#include <stdio.h>')
# https://en.wikipedia.org/wiki/Brainfuck#Commands
print('char array[30000] = {0};')
print('char *ptr=array;')
print('int main(void) {')
toke1s = {
'>': '++ptr;',
'<': '--ptr;',
'+': '++*ptr;',
'-': '--*ptr;',
'.': 'putchar(*ptr);',
',': '*ptr=getchar();',
'[': 'while (*ptr) {',
']': '}'}
for line in sys.stdin:
for char in line:
print(toke1s.get(char, ''))
print('return 0;}')
|
[
"bmintz@protonmail.com"
] |
bmintz@protonmail.com
|
871a7bf6dcd9ae919c3619cdc9d1c3a241dab433
|
f6cb3563a412f148a8a9f47204ac1e2226ae7b2e
|
/models/encoders/core/multitask_lstm.py
|
7710216ba0ae581443b27d07387a2b26dbe0f6d4
|
[
"MIT"
] |
permissive
|
sundogrd/tensorflow_end2end_speech_recognition
|
424789888a54d7149aa9a35a68e781df553abfd9
|
61e4a65fb5c9f3d9f690d713dcd77a48b1de0a14
|
refs/heads/master
| 2020-05-17T19:21:34.206076
| 2019-04-28T14:03:12
| 2019-04-28T14:03:12
| 183,913,207
| 0
| 0
|
MIT
| 2019-04-28T13:40:36
| 2019-04-28T13:40:36
| null |
UTF-8
|
Python
| false
| false
| 5,376
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Multi-task unidirectional LSTM encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models.encoders.core.lstm import basiclstmcell, lstmcell, lstmblockcell, lstmblockfusedcell, cudnnlstm
class MultitaskLSTMEncoder(object):
"""Multi-task unidirectional LSTM encoder.
Args:
num_units (int): the number of units in each layer
num_proj (int): the number of nodes in recurrent projection layer
num_layers_main (int): the number of layers of the main task
num_layers_sub (int): the number of layers of the sub task
lstm_impl (string, optional): a base implementation of LSTM.
- BasicLSTMCell: tf.contrib.rnn.BasicLSTMCell (no peephole)
- LSTMCell: tf.contrib.rnn.LSTMCell
- LSTMBlockCell: tf.contrib.rnn.LSTMBlockCell
- LSTMBlockFusedCell: under implementation
- CudnnLSTM: under implementation
Choose the background implementation of tensorflow.
use_peephole (bool): if True, use peephole
parameter_init (float): the range of uniform distribution to
initialize weight parameters (>= 0)
clip_activation (float): the range of activation clipping (> 0)
time_major (bool, optional): if True, time-major computation will be
performed
name (string, optional): the name of encoder
"""
def __init__(self,
num_units,
num_proj,
num_layers_main,
num_layers_sub,
lstm_impl,
use_peephole,
parameter_init,
clip_activation,
time_major=False,
name='multitask_lstm_encoder'):
assert num_proj != 0
self.num_units = num_units
if lstm_impl != 'LSTMCell':
self.num_proj = None
else:
self.num_proj = num_proj
# TODO: fix this
self.num_layers_main = num_layers_main
self.num_layers_sub = num_layers_sub
self.lstm_impl = lstm_impl
self.use_peephole = use_peephole
self.parameter_init = parameter_init
self.clip_activation = clip_activation
self.time_major = time_major
self.name = name
if self.num_layers_sub < 1 or self.num_layers_main < self.num_layers_sub:
raise ValueError(
'Set num_layers_sub between 1 to num_layers_main.')
def __call__(self, inputs, inputs_seq_len, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size`[B, T, input_size]`
inputs_seq_len (placeholder): A tensor of size` [B]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: A tensor of size `[T, B, input_size]` in the main task
final_state: A final hidden state of the encoder in the main task
outputs_sub: A tensor of size `[T, B, input_size]` in the sub task
final_state_sub: A final hidden state of the encoder in the sub task
"""
initializer = tf.random_uniform_initializer(
minval=-self.parameter_init, maxval=self.parameter_init)
if self.lstm_impl == 'BasicLSTMCell':
outputs, final_state, outputs_sub, final_state_sub = basiclstmcell(
self.num_units, self.num_layers_main,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major, self.num_layers_sub)
elif self.lstm_impl == 'LSTMCell':
outputs, final_state, outputs_sub, final_state_sub = lstmcell(
self.num_units, self.num_proj, self.num_layers_main,
self.use_peephole, self.clip_activation,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major, self.num_layers_sub)
elif self.lstm_impl == 'LSTMBlockCell':
outputs, final_state, outputs_sub, final_state_sub = lstmblockcell(
self.num_units, self.num_layers_main,
self.use_peephole, self.clip_activation,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major, self.num_layers_sub)
elif self.lstm_impl == 'LSTMBlockFusedCell':
outputs, final_state, outputs_sub, final_state_sub = lstmblockfusedcell(
self.num_units, self.num_layers_main,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major, self.num_layers_sub)
elif self.lstm_impl == 'CudnnLSTM':
outputs, final_state, outputs_sub, final_state_sub = cudnnlstm(
self.num_units, self.num_layers_main,
inputs, inputs_seq_len, keep_prob, initializer,
self.time_major, self.num_layers_sub)
else:
raise IndexError(
'lstm_impl is "BasicLSTMCell" or "LSTMCell" or ' +
'"LSTMBlockCell" or "LSTMBlockFusedCell" or ' +
'"CudnnLSTM".')
return outputs, final_state, outputs_sub, final_state_sub
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
6229624933460871ef10628976ea5a6527362b59
|
4d5a312ddd0d158fd76652a582cfc18c291a7a71
|
/tests/test_faust_poly_wavetable.py
|
6a24d4d662d2df1932f8bbf8f5d98e4a1b1f61a7
|
[
"MIT"
] |
permissive
|
startreker-shzy/DawDreamer
|
6ba12e1f25e949c38851d1c31edc2307ea8d34f4
|
d503fedc67db57630ccce14d71ea64512ab0728b
|
refs/heads/main
| 2023-06-15T02:32:18.301901
| 2021-07-11T21:24:36
| 2021-07-11T21:24:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,275
|
py
|
import pytest
import numpy as np
from scipy.io import wavfile
from os.path import abspath
from utils import *
import dawdreamer as daw
BUFFER_SIZE = 1
def _test_faust_poly_wavetable(wavecycle, output_path, lagrange_order=4):
engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
dsp_path = abspath("faust_dsp/polyphonic_wavetable.dsp")
faust_processor = engine.make_faust_processor("faust", "")
faust_processor.num_voices = 8
dsp_code = open(dsp_path).read()
waveform_length = wavecycle.shape[0]
wavecycle = ", ".join([str(num) for num in wavecycle.tolist()])
# print(wavecycle)
dsp_code = """
LAGRANGE_ORDER = {LAGRANGE_ORDER}; // lagrange order. [2-4] are good choices.
CYCLE_SEQ = waveform{{{CYCLE_SEQ}}} : !, _;
CYCLE_LENGTH = {CYCLE_LENGTH};
""".format(LAGRANGE_ORDER=lagrange_order,
CYCLE_LENGTH=waveform_length,
CYCLE_SEQ=wavecycle) + dsp_code
# print('dsp code: ')
# print(dsp_code)
assert(faust_processor.set_dsp_string(dsp_code))
assert(faust_processor.compiled)
desc = faust_processor.get_parameters_description()
# (MIDI note, velocity, start sec, duration sec)
faust_processor.add_midi_note(60, 60, 0.0, .25)
faust_processor.add_midi_note(64, 80, 0.5, .5)
faust_processor.add_midi_note(67, 127, 0.75, .5)
assert(faust_processor.n_midi_events == 3*2) # multiply by 2 because of the off-notes.
graph = [
(faust_processor, [])
]
assert(engine.load_graph(graph))
render(engine, file_path='output/'+output_path, duration=3.)
def test_faust_poly_wavetable_sine():
waveform_length = 4
wavecycle = np.sin(np.pi*2.*np.arange(waveform_length)/float(waveform_length))
output_path = 'test_faust_poly_wavetable_sine_4.wav'
# this won't sound like a perfect sine wave, but it only used 4 samples!
_test_faust_poly_wavetable(wavecycle, output_path)
waveform_length = 2048
wavecycle = np.sin(np.pi*2.*np.arange(waveform_length)/float(waveform_length))
output_path = 'test_faust_poly_wavetable_sine_2048.wav'
# this should sound perfect.
_test_faust_poly_wavetable(wavecycle, output_path)
def test_faust_poly_wavetable_saw():
waveform_length = 4
wavecycle = -1.+2.*np.arange(waveform_length)/float(waveform_length)
output_path = 'test_faust_poly_wavetable_saw.wav'
_test_faust_poly_wavetable(wavecycle, output_path)
|
[
"email@example.com"
] |
email@example.com
|
4164c98dac604edeb930b9597057603d7aa4b6c8
|
2f638d47a9681cbb2caab865702ddca39a0456d3
|
/djangocms_misc/tests/test_app/migrations/0003_auto_20171014_0359.py
|
22686a37b2f487eb82f492007080c823b3032a0b
|
[
"MIT"
] |
permissive
|
bnzk/djangocms-misc
|
b0d1a1950b3d8c7752ea661c74bc08bfbd0360a6
|
8869384305ef7ff8538af986f4854bcfde7257de
|
refs/heads/develop
| 2023-06-08T10:12:11.275012
| 2023-05-30T13:00:34
| 2023-05-30T13:00:34
| 66,085,267
| 1
| 1
|
MIT
| 2023-02-04T07:49:28
| 2016-08-19T13:43:34
|
Python
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-14 03:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_app', '0002_auto_20170805_0136'),
]
operations = [
migrations.AddField(
model_name='testpluginmodel',
name='field1_de',
field=models.CharField(default='', max_length=64, null=True),
),
migrations.AddField(
model_name='testpluginmodel',
name='field1_en',
field=models.CharField(default='', max_length=64, null=True),
),
]
|
[
"bnzk@bnzk.ch"
] |
bnzk@bnzk.ch
|
466ee9b5ce41e5e937ad981d0f953b9efbc02ad1
|
9f3991f4e7b405c04f2ef03ac7747b5a69d26b4b
|
/basic_grammar/higher-order function/higher-order function.py
|
c7024ce852e83bc73b790378586dcc8aa44b349a
|
[] |
no_license
|
zcxyun/pythonDemo
|
f66eb5e6e4274db2137480786eae4d6ca7e73163
|
adf18cf6b58282a7f2f9203aa09d5cb60ced2e35
|
refs/heads/master
| 2021-07-29T19:06:52.481792
| 2021-07-27T16:10:38
| 2021-07-27T16:10:38
| 101,542,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,379
|
py
|
# 测试 map list #################################################
##numList = [1,2,3,4,5,6,7]
##def f(x):
## return x*x
##L = []
##for n in [1,2,3,4,5,6,7]:
## L.append(f(n))
##print(L)
##
##n = map(f, [1,2,3,4,5,6,7])
##print(list(n))
##from functools import reduce
##def add(x, y):
## return x * 10 + y
##print(reduce(add, numList))
# strList = ['adamdfdfdsFFF', 'LISAddfdfdFFFf', 'FbdddFarT']
##def normalize(name):
## strL = []
## for i in range(len(name)):
## if i == 0:
## strL.append(name[i].upper())
## else:
## strL.append(name[i].lower())
## str = ''.join(strL)
## return str
##print(list(map(normalize, strList)))
##def normalize(name):
## return name[:1].upper() + name[1:].lower()
##print(list(map(normalize, strList)))
# 测试reduce() ##########################################
from functools import reduce
def add(x, y):
return x + y
def fn(x, y):
return x * 10 + y
##print(reduce(fn, range(0,10,2)))
def char2num(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
def str2int(s):
return reduce(lambda x, y: x * 10 + y, map(char2num, s))
##print(str2int('12435345435') == int( '12435345435'))
# ---------------------------------------------------
# kv ={'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
# def strToFloat(s):
# return reduce(lambda x,y : x*10+y,map(lambda a: kv[a],s.replace('.',"")))/(10**len(s[s.find('.')+1:]))
# print('\"12.12\" = %.2f' % strToFloat('12.12'))
# 测试filter() ######################################
def is_odd(n):
return n % 2 == 1
##print(list(filter(is_odd, [1, 2, 4, 5, 6, 9, 10, 15])))
# 用filter() 求素数 ###################################
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter() # 初始序列
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
##for n in primes():
## if n < 100:
## print(n)
## else:
## break
# closure ########################################
##def count():
## fs = []
## for i in range(1, 4):
## def f():
## return i * i
## fs.append(f)
## return fs
def count():
fs = []
def f(j):
def g():
return j * j
return g
for i in range(1, 4):
fs.append(f(i))
return fs
##
##f1, f2, f3 = count()
##print(f1(), f2(), f3())
# decorator ######################################
##def log(func):
## def wrapper(*args, **kw):
## print('call %s():' % func.__name__)
## return func(*args, **kw)
## return wrapper
##@log
##def now():
## print('2014-4-4')
##now()
##def log(text):
## def decorator(func):
## def wrapper(*args, **kw):
## print('%s %s():' % (text, func.__name__))
## return func(*args, **kw)
## return wrapper
## return decorator
##@log('exe')
##def now():
## print('3028-3-2')
##now()
##print(now.__name__)
import functools
def log(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('3028-3-2')
now()
print(now.__name__)
|
[
"zcxyun@126.com"
] |
zcxyun@126.com
|
0ab5b9a6dbcb10aaa152146be20bec7498d73b63
|
e50cacdd30e7050a3f5db46003ff531b57c939e8
|
/regressao_valorGlobal.py
|
a12ff05224d6b5365a245c890b48cec48a665c46
|
[] |
no_license
|
C4st3ll4n/rn_videogame
|
bd9972d54efcf9fd4d695b14c468db82cc17d77c
|
2175cb2146069671451d8f277df64ccc325683ac
|
refs/heads/master
| 2020-05-02T09:02:37.103831
| 2019-03-26T19:53:42
| 2019-03-26T19:53:42
| 177,859,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
import pandas as pd
from keras.layers import Dense, Dropout, Activation, Input
from keras.models import Model
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
base = pd.read_csv('games.csv')
base = base.dropna(axis=0)
base = base.drop('Other_Sales', axis=1)
base = base.drop('Developer', axis = 1)
base = base.drop('Name', axis = 1)
base = base.drop('NA_Sales', axis=1)
base = base.drop('EU_Sales', axis=1)
base = base.drop('JP_Sales', axis=1)
base = base.loc[base['Global_Sales'] > 0.01]
previsores = base.iloc[:,[0,1,2,3,5,6,7,8]].values
vendaGlobal = base.iloc[:,4]
lb = LabelEncoder()
previsores[:, 0] = lb.fit_transform(previsores[:, 0])
previsores[:, 2] = lb.fit_transform(previsores[:, 2])
previsores[:, 3] = lb.fit_transform(previsores[:, 3])
previsores[:, 5] = lb.fit_transform(previsores[:, 5])
ohe = OneHotEncoder(n_values='auto',categorical_features=[0,2,3,5])
previsores = ohe.fit_transform(previsores).toarray()
ce = Input(shape=(396,))
co1 = Dense(units=186, activation='sigmoid')(ce)
co2 = Dense(units=186, activation='sigmoid')(co1)
cs1 = Dense(units=1, activation='linear')(co2)
regressor = Model(inputs = ce,
outputs=cs1)
regressor.compile(optimizer='adam', loss='mae')
regressor.fit(previsores, [vendaGlobal], epochs=1000, batch_size=1000 )
previsaoGlobal = regressor.predict(previsores)
|
[
"p13dr0h@gmail.com"
] |
p13dr0h@gmail.com
|
13f5f3247507241ab44d566d44aa6d25d184cb5d
|
2eeeefe48c56d0dfae4fd568dbaee3c8d2cf3463
|
/0Demo/BottleDemo/BottleDemo.py
|
e35c154d100e30eada7d29ec775a34fc90da646c
|
[] |
no_license
|
lijianmingCN/pybase
|
f6377f7944c043f7241452fcffccc3f49ef0cef9
|
7286a022ff7f40a7289cf69d73e8418a1ecf7b88
|
refs/heads/master
| 2021-01-02T08:10:42.215672
| 2017-08-01T03:16:29
| 2017-08-01T03:16:29
| 98,953,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
# -*- coding: utf-8 -*-
from bottle import route,run,template,get,post,request,response,static_file,error,abort,redirect
@route('/index')
def index():
return "Hi 码农!!!"
@route('/hello/<name>')
def hello(name):
return template('<b>Hello {{name}}</b>!', name=name)
@get('/login') # or @route(’/login’)
def login():
return '''
<form action="/login" method="post">
Username: <input name="username" type="text" />
Password: <input name="password" type="password" />
<input value="Login" type="submit" />
</form>
'''
@post('/login') # or @route(’/login’, method=’POST’)
def do_login():
username = request.forms.get('username')
password = request.forms.get('password')
print username
print password
if username == password:
return "<p>Your login information was correct.</p>"
else:
return "<p>Login failed.</p>"
@route('/static/js/<filename>')
def server_static(filename):
return static_file(filename,root='./static/js')
@error(404)
def error404(error):
return 'Nothing here, sorry'
@route('/restricted')
def restricted():
abort(401, "Sorry, access denied.")
@route('/wrong/url')
def wrong():
redirect("/login")
@route('/iso')
def get_iso():
response.charset = 'ISO-8859-1'
return u'This will be sent with ISO-8859-15 encoding.'
@route('/image/<filename:re:.*\.png>')
def send_image(filename):
return static_file(filename, root='./image', mimetype='image/png')
@route('/file/<filename:path>')
def send_static(filename):
return static_file(filename, root='./file')
@route('/download/<filename:path>')
def download(filename):
return static_file(filename, root='./file', download=filename)
run(host='0.0.0.0', port=8080)
|
[
"lijianming@baidu.com"
] |
lijianming@baidu.com
|
be7c25b3c60424dd6ae4afe74366a35ed9503d7d
|
0c10d5c3dedd8e17275146e596edc6104b6ca325
|
/flask_benchmark/client.py
|
7e48a30f6cd58893d35350763555c95e7beeada8
|
[
"MIT"
] |
permissive
|
mpetyx/python-rpc-frameworks-comparison
|
f5cf01dfd1bb61fca4e8bbcd4bf08b471ef6a02a
|
5b69a5aef1f8ead2fb88e744b6b1787b27165898
|
refs/heads/master
| 2020-03-27T13:15:32.841893
| 2018-08-30T17:08:40
| 2018-08-30T17:08:40
| 146,599,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
__author__ = 'mpetyx (Michael Petychakis)'
__version__ = "1.0.0"
__maintainer__ = "Michael Petychakis"
__email__ = "hello@apilama.com"
__status__ = "Production"
import grpc
# import the generated classes
import requests
# import the original calculator.py
class Client:
def __init__(self, channel=None):
pass
def squareRoot(self, number):
return requests.get('http://127.0.0.1:5000/int/'+str(number)).text
# import sys
# sys.path.append(".")
# import timer
#
#
# with timer.MyTimer():
# print(Client().squareRoot(64))
|
[
"mpetyx@gmail.com"
] |
mpetyx@gmail.com
|
7bd668d47a2122f2507dd518f36921016a1c4664
|
fe1768a66ce67f9ee7f42aeeff46d8d7773a14e2
|
/flod_sak/alembic/versions/20150929-0733-23adc825714b_add_column_avskrevet_rapportkrav_.py
|
5c1f0169e9fc832120381f60072894e43d01bb9e
|
[
"BSD-2-Clause-Views"
] |
permissive
|
Trondheim-kommune/Tilskuddsbasen
|
eb7f6ee026aba86165b4d6d6c3f73b16d7d3a9f8
|
4f8ce270ef7296069f8e43bfb4bf6a570a7a35d4
|
refs/heads/master
| 2022-09-20T10:51:50.247573
| 2017-05-29T19:09:16
| 2017-05-29T19:09:16
| 49,863,325
| 0
| 0
|
NOASSERTION
| 2022-09-16T17:45:47
| 2016-01-18T08:39:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 695
|
py
|
"""add column avskrevet_rapportkrav_kommentar
Revision ID: 23adc825714b
Revises: 4bb0faf5d52
Create Date: 2015-09-29 07:33:26.377207
"""
# revision identifiers, used by Alembic.
revision = '23adc825714b'
down_revision = '4bb0faf5d52'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('soknader', sa.Column('avskrevet_rapportkrav_kommentar', sa.String(length=700), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('soknader', 'avskrevet_rapportkrav_kommentar')
### end Alembic commands ###
|
[
"teeejay@gmail.com"
] |
teeejay@gmail.com
|
4f1802317263e6acc86168a31df2eaccc57b3b4a
|
537345f90de44dac4e2a20037d21f858f82e3120
|
/deMultiGene_afterGb2fasta.py
|
3edebf3809218662f2a12160c63ae7ae475ea1de
|
[] |
no_license
|
kaiyaprovost/misc_scripts
|
f8fc8ca646c5c97ad3495e612bc9656e2b8d238c
|
5c460ea608c13ff271fa6772fe548b89aa68c225
|
refs/heads/master
| 2021-11-11T15:33:34.211463
| 2021-11-10T23:11:56
| 2021-11-10T23:11:56
| 237,049,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
# -*- coding: utf-8 -*-
"""
Program to remove multiple genes from a FASTA file
Requires that the first term of the FASTA file is the gene identity
Edited on 10 dec 2016 - NOT TESTED
@author: kprovost
"""
def main():
import sys
import glob
import os
cwd = os.getcwd()
try:
path = sys.argv[1]
print("\tPath is: ",path)
except:
print("Path not given, using current working directory + concatGenbankFiles")
path = os.getcwd()+"/concatGenbankFiles/"
outpath = path+"/genes/"
if not os.path.exists(outpath):
print("creating folder: ",outpath)
os.makedirs(outpath)
#path = "/Users/kprovost/Documents/Classes/Systematics/ParrotPipelineRedo/"
os.chdir(path)
for filename in glob.glob("*.fa*"):
print("FILE IS ",filename)
currentFile = "temp"
with open(filename,"r") as infile:
lines = infile.readlines()
for line in lines:
if line[0] == ">":
gene,descrip = line.split(" from ",1)
gene = gene[1:].strip().replace(" ","").replace("/","").upper().replace("-","").replace("\t","")
gene = gene.replace(",","").replace(":","").replace("'","").replace(";","").replace("_","")
descrip = descrip.strip().upper()
#print("\tGENE IS ",gene)
#currentFile = gene+"_"+filename
currentFile = filename[:-7]+gene+".fa"
with open(outpath+currentFile,"a") as outfile:
outfile.write(line)
else:
with open(outpath+currentFile,"a") as outfile:
outfile.write(line)
if __name__ == "__main__":
main()
|
[
"17089935+kaiyaprovost@users.noreply.github.com"
] |
17089935+kaiyaprovost@users.noreply.github.com
|
6c060566f70b199370d957617bc8d3b868155b0d
|
121a41645d86109d35d9bfffd0a499a0815cec04
|
/scripts/list.py
|
02eea217866ebb6395c056d98a7c235ffefae52f
|
[
"MIT"
] |
permissive
|
shrut1996/docker-lambda
|
f1bd30a8f81ca57dc138a7f84862567b265a9694
|
b93b60c774bac9d2a59644adc850ad4fcad99b71
|
refs/heads/master
| 2023-02-02T13:06:07.637913
| 2020-12-23T20:57:15
| 2020-12-23T20:57:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
import json
from boto3.session import Session as boto3_session
AWS_REGIONS = [
"ap-northeast-1",
"ap-northeast-2",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
]
layers = [
"gdal24",
"gdal31",
"gdal32",
"gdal24-al2",
"gdal31-al2",
"gdal32-al2",
]
def main():
results = []
for region in AWS_REGIONS:
res = {"region": region, "layers": []}
session = boto3_session(region_name=region)
client = session.client("lambda")
for layer in layers:
response = client.list_layer_versions(LayerName=layer)
latest = response["LayerVersions"][0]
res["layers"].append(dict(
name=layer,
arn=latest["LayerVersionArn"],
version=latest["Version"]
))
results.append(res)
print(json.dumps(results))
if __name__ == '__main__':
main()
|
[
"vincent.sarago@gmail.com"
] |
vincent.sarago@gmail.com
|
58b2b7a50bc30bf790f06105fb090b6e5bf93764
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_wildernesses.py
|
f8915af5d463f35dae83accf49c99e5c9c60a17f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
#calss header
class _WILDERNESSES():
def __init__(self,):
self.name = "WILDERNESSES"
self.definitions = wilderness
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['wilderness']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d0be8abb5d97d27ebae00c6ee20aff4ae9dfa35c
|
be5ea20226c37d81f1ccb2f704d8825d36e88765
|
/04. Encapsulation/LAB/demo.py
|
482f5ae858cdc0745c96c0db38ed2daa2978bfc7
|
[] |
no_license
|
dimDamyanov/PythonOOP
|
3845e450e5a48fef4f70a186664e07c0cd60e09b
|
723204f5b7e953874fac9314e48eb1d1628d6ff5
|
refs/heads/main
| 2023-04-07T18:00:36.735248
| 2021-04-19T20:57:14
| 2021-04-19T20:57:14
| 341,329,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
class Person:
MIN_AGE = 0
MAX_AGE = 150
def __init__(self, first_name, last_name, age, city=None):
self.first_name = first_name
self.last_name = last_name
self.set_age(age)
@property
def full_name(self):
return f'{self.first_name} {self.last_name}'
@property
def first_name(self):
return self.__first_name
@first_name.setter
def first_name(self, new_name):
if not new_name:
raise ValueError('Name cannot be None')
self.__first_name = new_name
@property
def last_name(self):
return self.__last_name
@last_name.setter
def last_name(self, new_name):
self.__last_name = new_name
def set_age(self, new_age):
if new_age < Person.MIN_AGE or Person.MAX_AGE < new_age:
raise ValueError(f'Age must be between {Person.MIN_AGE} and {Person.MAX_AGE}')
self.__age = new_age
def get_age(self):
return self.__age
# def __setattr__(self, key, value):
# if len(key) > 1 and key.startswith('_') and key[1] != '_':
# key = f'_{self.__class__.__name__}${key}'
# return super().__setattr__(key, value)
#
# def __getattr__(self, item):
# if len(item) > 1 and item.startswith('_') and item[1] != '_':
# item = f'_{self.__class__.__name__}${item}'
# return super().__getattribute__(item)
pesho = Person('Pesho', 'Ivanov', 1)
print(pesho.__dict__)
print(pesho.full_name)
pesho.first_name = 'Ivan'
pesho.last_name = 'Peshov'
print(pesho.__dict__)
print(pesho.full_name)
|
[
"dim.damianov@gmail.com"
] |
dim.damianov@gmail.com
|
f629d3d3a29b863c872af3c01286eb1e11683ac8
|
cbda89443b351bb2047180dad4e300c13dc3df7f
|
/Crystals/Morpurgo_all_sp_Reorgs_qsplit_Molscreen_largepiatoms_sq/Jobs/Pc/Pc_neut_neut_inner3_outer0/Pc_neut_neut_inner3_outer0.py
|
a687794c0396bfb9bcc5416ff9532b00704451b0
|
[] |
no_license
|
sheridanfew/pythonpolarisation
|
080f52979f98d26360a46412a10c8e3f51ee4549
|
178e2684e9a239a8e60af5f7b1eb414ac5f31e92
|
refs/heads/master
| 2021-07-10T01:07:40.978790
| 2021-03-11T16:56:37
| 2021-03-11T16:56:37
| 96,101,351
| 0
| 0
| null | 2017-07-03T13:37:06
| 2017-07-03T10:54:52
| null |
UTF-8
|
Python
| false
| false
| 5,755
|
py
|
import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0}
name='Pc_neut_neut_inner3_outer0'
#For crystals here, all cubic and centred at centre
insize=3
#number of TVs in each dir central mol is from edge of inner region
outsize=0
state='neut'
mols_cen=['sp_Pc_mola_neut.xyz','sp_Pc_molb_neut.xyz']
mols_sur=['sp_Pc_mola_neut.xyz','sp_Pc_molb_neut.xyz']
mols_outer=['sp_Pc_mola_neut.xyz','sp_Pc_molb_neut.xyz']
Natoms=22
#From cif:
'''
Pc
_cell_length_a 7.900
_cell_length_b 6.060
_cell_length_c 16.010
_cell_angle_alpha 101.90
_cell_angle_beta 112.60
_cell_angle_gamma 85.80
_cell_volume 692.384
'''
#Get translation vectors:
a=7.900/0.5291772109217
b=6.060/0.5291772109217
c=16.010/0.5291772109217
alpha=101.90*(pi/180)
beta=112.60*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=692.384/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
screenradius=1.6623/(Natoms**2)
# Thole paper screenradius value for fit to components of pol. tensor divided by no. atoms in mol. We choose this screenradius value for smearing of charge as, with near planar mols, in some dirs we have molecule-like polarisabilities with near atom-like separations.
#This form of screenradius will result in charge being smeared along the separation axis of molecules by NAtoms*(Thole's value for a single atom)
jm = JMatrix(jmtype='TholeExp',screenradius=screenradius)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
print 'Job Completed Successfully.'
|
[
"sheridan.few@gmail.com"
] |
sheridan.few@gmail.com
|
959cb13f99cd46f5fe46d460b2415eabb7781041
|
2dd5e62eddf7679d195efcad62d3b61c8617c5b7
|
/practice/Practice_6/Practice_6_solutions (2)/sum_of_digits.py
|
fa6eec35300ed0d8242ab0eec86e3c44381b9fcc
|
[] |
no_license
|
unswit/COMP9021_2
|
03b38314a4723a4eafbb758954bc9f78f1556b0e
|
713f2c3b31a2e5081e0602a19660056eb71b1144
|
refs/heads/master
| 2021-04-24T11:53:07.760756
| 2020-03-25T23:47:03
| 2020-03-25T23:47:03
| 250,114,841
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
# COMP9021 Practice 6 - Solutions
'''
Prompts the user for two numbers, say available_digits and desired_sum, and outputs
the number of ways of selecting digits from available_digits that sum up to desired_sum.
'''
import sys
def solve(available_digits, desired_sum):
if desired_sum < 0:
return 0
if available_digits == 0:
if desired_sum == 0:
return 1
return 0
# Either take the last digit d form available_digits
# and try to get desired_sum - d from the remaining digits,
# or do not take the last digit and try to get desired_sum
# from the remaining digits.
# available_digits //10 是给的一传数字中除去倒数第一位剩下的
return solve(available_digits // 10, desired_sum) +\
solve(available_digits // 10, desired_sum - available_digits % 10)
try:
available_digits = abs(int(input('Input a number that we will use as available digits: ')))
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
try:
desired_sum = int(input('Input a number that represents the desired sum: '))
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
nb_of_solutions = solve(available_digits, desired_sum)
if nb_of_solutions == 0:
print('There is no solution.')
elif nb_of_solutions == 1:
print('There is a unique solution.')
else:
print(f'There are {nb_of_solutions} solutions.')
|
[
"55487375+ahcjdxzd@users.noreply.github.com"
] |
55487375+ahcjdxzd@users.noreply.github.com
|
395b9d03b55ce1b219ff5076d2f1a45722b308ec
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_06_01/models/frontend_ip_configuration_py3.py
|
54bdb1af858c0479b850d858e454f80f14f3222f
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,850
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class FrontendIPConfiguration(SubResource):
"""Frontend IP address of the load balancer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar inbound_nat_rules: Read only. Inbound rules URIs that use this
frontend IP.
:vartype inbound_nat_rules:
list[~azure.mgmt.network.v2018_06_01.models.SubResource]
:ivar inbound_nat_pools: Read only. Inbound pools URIs that use this
frontend IP.
:vartype inbound_nat_pools:
list[~azure.mgmt.network.v2018_06_01.models.SubResource]
:ivar outbound_nat_rules: Read only. Outbound rules URIs that use this
frontend IP.
:vartype outbound_nat_rules:
list[~azure.mgmt.network.v2018_06_01.models.SubResource]
:ivar load_balancing_rules: Gets load balancing rules URIs that use this
frontend IP.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2018_06_01.models.SubResource]
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The Private IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod
:param subnet: The reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2018_06_01.models.Subnet
:param public_ip_address: The reference of the Public IP resource.
:type public_ip_address:
~azure.mgmt.network.v2018_06_01.models.PublicIPAddress
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param zones: A list of availability zones denoting the IP allocated for
the resource needs to come from.
:type zones: list[str]
"""
_validation = {
'inbound_nat_rules': {'readonly': True},
'inbound_nat_pools': {'readonly': True},
'outbound_nat_rules': {'readonly': True},
'load_balancing_rules': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[SubResource]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[SubResource]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[SubResource]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, *, id: str=None, private_ip_address: str=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state: str=None, name: str=None, etag: str=None, zones=None, **kwargs) -> None:
super(FrontendIPConfiguration, self).__init__(id=id, **kwargs)
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.outbound_nat_rules = None
self.load_balancing_rules = None
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.zones = zones
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
017cf440806ff69d7a9e676592b6f089ffd064d5
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/sample_20200121111638.py
|
ffbcc7d22240798053cb4ec12fdcdd1fb8d207b8
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
from stochastic import stoch
from histogram import list_hist
import random
def prob_sample(histo):
''' Input a histogram. Output a randomly chosen word from the histogram
relative to its frequency in the body of text.
'''
words = stoch(histo)
# print(words)
dart = random.randint(0, 100)
counter = 0
word = None
for pair in words:
if counter < dart:
counter += pair[1]
word = pair[0]
return dart, counter, word
if __name__ == "__main__":
listo_histo = list_hist("source.txt")
dicto_histo = dict_hist("source.txt")
print(dicto_histo)
print(prob_sample(listo_histo))
print(prob_sample(dicto_histo))
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
5f5cd720a6ca6b6a01cdd4604aebc12afb3faa52
|
c21f50c5090ca09386d868c0820873b4c9e0a7aa
|
/setup.py
|
354963e580052371621028baec4bd762a02f9d34
|
[] |
no_license
|
sujanshresthanet/api-wrappers-python
|
38d192a7033ec40f591db1dede1661787b5cf025
|
bbe56940f83ca426aac25843b3410a3d164be2e3
|
refs/heads/master
| 2023-04-10T02:34:19.039252
| 2017-10-03T14:36:34
| 2017-10-03T14:36:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
# coding: utf-8
"""
Moosend API
TODO: Add a description
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import sys
from setuptools import setup, find_packages
NAME = "moosend-api-wrapper"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Moosend API",
author_email="",
url="",
keywords=["Swagger", "Moosend API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
TODO: Add a description
"""
)
|
[
"theo@moosend.com"
] |
theo@moosend.com
|
0a9d68e7bafd2655f96aa01861d275921ad3c066
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/cautious.py
|
1fae757d76605f6891ed5e82fbdebe496de34115
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('cautious', __name__, url_prefix='/cautious')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
5820d7928b355d65f1bfbf6b21ee461d09f9afca
|
d6dda992a02fc351810e75856ffb00eb391607d7
|
/saleor/saleor/product/migrations/0084_auto_20181227_1246.py
|
452f7bbc928496828099163ed0636c6f98d9b228
|
[
"BSD-3-Clause"
] |
permissive
|
rds0751/harshnew
|
f99f26a49c58810c27855ef5b0809a6b9d5b1df8
|
5b49102afdb26587223ff7549ce6c5c8820b272e
|
refs/heads/master
| 2022-12-15T09:40:38.749726
| 2018-12-28T06:31:05
| 2018-12-28T06:31:05
| 162,980,274
| 0
| 0
| null | 2022-12-08T01:30:01
| 2018-12-24T10:42:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
# Generated by Django 2.1.4 on 2018-12-27 07:16
from django.db import migrations, models
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('product', '0083_auto_20181226_1158'),
]
operations = [
migrations.AddField(
model_name='product',
name='GST',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='product',
name='price',
field=django_prices.models.MoneyField(currency='INR', decimal_places=2, max_digits=12),
),
migrations.AlterField(
model_name='productvariant',
name='cost_price',
field=django_prices.models.MoneyField(blank=True, currency='INR', decimal_places=2, max_digits=12, null=True),
),
migrations.AlterField(
model_name='productvariant',
name='price_override',
field=django_prices.models.MoneyField(blank=True, currency='INR', decimal_places=2, max_digits=12, null=True),
),
]
|
[
"rsingh_bemba16@thapar.edu"
] |
rsingh_bemba16@thapar.edu
|
9555328ed60bf5236e6a59ad31192c7dd7c909b8
|
e40111dda0ad509d474adfe4c52ae9b5525f388e
|
/show_weather/models.py
|
f686f6d78d17794aa0d1b756e6587e61883f1c30
|
[] |
no_license
|
XeyyamSherif/Weather-App
|
2fb997fcfb5a6885ffffbf05e6ebe2127fd2bccf
|
6de019cf289ff60d299b9f1e58c1f8c04fa3517f
|
refs/heads/master
| 2023-01-23T06:57:55.655632
| 2020-12-04T20:10:42
| 2020-12-04T20:10:42
| 318,623,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
<<<<<<< HEAD
from django.db import models
class added_cities(models.Model):
city_name = models.CharField(max_length=100)
added_time = models.DateField()
=======
from django.db import models
class added_cities(models.Model):
city_name = models.CharField(max_length=100)
added_time = models.DateField()
>>>>>>> 2001d54b7f6aa08db2779480e425bd1c54579a2f
|
[
"you@example.com"
] |
you@example.com
|
fb81fc3745a2e0bb6e0650163a7b729c67d49b21
|
8e221c0f0528a2c26029508ae4f618021bb81788
|
/infra_macros/fbcode_macros/tests/ocaml_library_test.py
|
34067e01873c5f218fe7f1f727a28225e1a9e26a
|
[
"BSD-3-Clause"
] |
permissive
|
nataliejameson/buckit
|
4f7127c941e6ad621fd50424b6d171dd715f8bc5
|
83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8
|
refs/heads/master
| 2021-10-09T12:12:31.062734
| 2018-12-21T04:38:35
| 2018-12-21T04:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import tests.utils
from tests.utils import dedent
class OcamlLibraryTest(tests.utils.TestCase):
includes = [("@fbcode_macros//build_defs:ocaml_library.bzl", "ocaml_library")]
@tests.utils.with_project()
def test_ocaml_library_parses(self, root):
root.addFile(
"BUCK",
dedent(
"""
load("@fbcode_macros//build_defs:ocaml_library.bzl", "ocaml_library")
ocaml_library(
name = "foo",
srcs = [
"foo.ml",
"thrift_IDL.ml",
],
warnings_flags = "-27-42",
deps = [
"//dep:hh_json",
],
external_deps = [],
)
"""
),
)
self.assertSuccess(root.runAudit(["BUCK"]))
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
72a4ca7de3bc83da307ab4b894c973fd5fde295d
|
b326dac9d39e3004b6bc5ff6cb95adbd0766811c
|
/aleph/logic/datasets.py
|
caaf9eeec4e1794c762c9c0391a29f8fe382098e
|
[
"MIT"
] |
permissive
|
singingwolfboy/aleph
|
9c73d93f73cb6a7f4df9c648e347ec605cf00964
|
5f30d22062815eb91d22705420c0815743a27478
|
refs/heads/master
| 2021-01-19T11:22:03.889408
| 2017-04-07T14:32:01
| 2017-04-10T09:06:23
| 87,958,403
| 0
| 0
| null | 2017-04-11T16:53:33
| 2017-04-11T16:53:33
| null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
import time
import logging
from random import randrange
from elasticsearch import ElasticsearchException
from elasticsearch.helpers import BulkIndexError
from aleph.core import celery, datasets
from aleph.index import index_items
log = logging.getLogger(__name__)
QUEUE_PAGE = 1000
@celery.task()
def load_rows(dataset_name, query_idx, rows):
"""Load a single batch of QUEUE_PAGE rows from the given query."""
dataset = datasets.get(dataset_name)
query = list(dataset.queries)[query_idx]
entities = {}
links = []
for row in rows:
entity_map = {}
for entity in query.entities:
data = entity.to_index(row)
if data is not None:
entity_map[entity.name] = data
entities[data['id']] = data
for link in query.links:
for inverted in [False, True]:
data = link.to_index(row, entity_map, inverted=inverted)
if data is not None:
links.append(data)
while True:
try:
index_items(entities, links)
break
except (ElasticsearchException, BulkIndexError) as exc:
delay = randrange(60, 180)
log.info("%s - Sleep %ss...", exc, delay)
time.sleep(delay)
log.info("[%r] Indexed %s rows as %s entities, %s links...",
dataset_name, len(rows), len(entities), len(links))
def load_dataset(dataset):
"""Index all the entities and links in a given dataset."""
for query_idx, query in enumerate(dataset.queries):
rows = []
for row_idx, row in enumerate(query.iterrows()):
rows.append(row)
if len(rows) >= QUEUE_PAGE:
load_rows.delay(dataset.name, query_idx, rows)
rows = []
if row_idx != 0 and row_idx % 10000 == 0:
log.info("Tasked %s rows...", row_idx)
if len(rows):
load_rows.delay(dataset.name, query_idx, rows)
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
4f679263e2a338a3037aebf258f4d69fe4fd38b1
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/feature_column/feature_column.py
|
4aacbcbb511263ab68f2ebae137b68e0ba1b7c28
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:e9f74559da9c8672bb5a313b705ca9f480e002b13b23285d68619b4547aeeacf
size 126015
|
[
"github@cuba12345"
] |
github@cuba12345
|
448193cf4f842da161a46960e4711499950d8a94
|
9ca9cad46f2358717394f39e2cfac2af4a2f5aca
|
/Week05/02_manual_gradient/02_manual_gradient_YYH.py
|
6273ab431684da732c6fefec84adb301c33e7bac
|
[] |
no_license
|
Artinto/Python_and_AI_Study
|
ddfd165d1598914e99a125c3019a740a7791f6f6
|
953ff3780287825afe9ed5f9b45017359707d07a
|
refs/heads/main
| 2023-05-05T15:42:25.963855
| 2021-05-24T12:24:31
| 2021-05-24T12:24:31
| 325,218,591
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
plt.show()
# Training Data #x,y 데이터값 입력
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0 # a random guess: random value
# our model forward pass #선형함수 설정
def forward(x):
return x * w
# Loss function #loss 구하기
def loss(x, y):
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)
# compute gradient #기울기를 구하는 것으로 loss의 변화량을 w의 변화량으로 나눈 식
def gradient(x, y): # d_loss/d_w
return 2 * x * (x * w - y)
# Before training #
print("Prediction (before training)", 4, forward(4))
#4를 넣었을때에 리턴값을 출력
# Training loop
for epoch in range(10): #gradient를 10번 실행
for x_val, y_val in zip(x_data, y_data):
# Compute derivative w.r.t to the learned weights
# Update the weights
# Compute the loss and print progress
grad = gradient(x_val, y_val) #기울기값 저장
w = w - 0.01 * grad # 알맞은 값을 찾기위해 w값을 조근씩 줄여줌
print("\tgrad: ", x_val, y_val, round(grad, 2)) #소수점 아래 2번째까지만 출력 그밑으론 반홀림
l = loss(x_val, y_val) #loss 값 저장
print("progress:", epoch, "w=", round(w, 2), "loss=", round(l, 2))
# After training
print("Predicted score (after training)", "4 hours of studying: ", forward(4))
#다시 4를 넣어 값을 확인
|
[
"noreply@github.com"
] |
Artinto.noreply@github.com
|
c3b4c58d5324c2b1d589cf777773eb97388e1235
|
7b3711d4c6d7284255ba0270d49d120f984bf7c6
|
/problems/099_recovery_binary_search_tree.py
|
14246631d7cb839d132d8494b5e492975510c78c
|
[] |
no_license
|
loganyu/leetcode
|
2d336f30feb55379aaf8bf0273d00e11414e31df
|
77c206305dd5cde0a249365ce7591a644effabfc
|
refs/heads/master
| 2023-08-18T09:43:10.124687
| 2023-08-18T00:44:51
| 2023-08-18T00:44:51
| 177,875,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
'''
You are given the root of a binary search tree (BST), where exactly two nodes of the tree were swapped by mistake. Recover the tree without changing its structure.
Follow up: A solution using O(n) space is pretty straight forward. Could you devise a constant space solution?
Example 1:
Input: root = [1,3,null,null,2]
Output: [3,1,null,null,2]
Explanation: 3 cannot be a left child of 1 because 3 > 1. Swapping 1 and 3 makes the BST valid.
Example 2:
Input: root = [3,1,4,null,null,2]
Output: [2,1,4,null,null,3]
Explanation: 2 cannot be in the right subtree of 3 because 2 < 3. Swapping 2 and 3 makes the BST valid.
Constraints:
The number of nodes in the tree is in the range [2, 1000].
-231 <= Node.val <= 231 - 1
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# iterative
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
stack = []
x = y = pred = None
while stack or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if pred and root.val < pred.val:
y = root
if x is None:
x = pred
else:
break
pred = root
root = root.right
x.val, y.val = y.val, x.val
# recursive
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
def find_two_swapped(root: TreeNode):
nonlocal x, y, pred
if root is None:
return
find_two_swapped(root.left)
if pred and root.val < pred.val:
y = root
if x is None:
x = pred
else:
return
pred = root
find_two_swapped(root.right)
x = y = pred = None
find_two_swapped(root)
x.val, y.val = y.val, x.val
# Morris Inorder Traversal
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
x = y = predecessor = pred = None
while root:
if root.left:
predecessor = root.left
while predecessor.right and predecessor.right != root:
predecessor = predecessor.right
if predecessor.right is None:
predecessor.right = root
root = root.left
else:
if pred and root.val < pred.val:
y = root
if x is None:
x = pred
pred = root
predecessor.right = None
root = root.right
else:
if pred and root.val < pred.val:
y = root
if x is None:
x = pred
pred = root
root = root.right
x.val, y.val = y.val, x.val
|
[
"logan.yu@cadre.com"
] |
logan.yu@cadre.com
|
d878db88ea2851312cf55dc51d7c157b4ade92fb
|
7c1b599c5b5be0ec5ad142e52e5c15a7c9c8ea12
|
/venv/Lib/site-packages/tensorflow/contrib/autograph/converters/directives.py
|
66fd7519478c8046de11a5e758049de8851325a4
|
[] |
no_license
|
namtran98/NSTAR---MuddHacks
|
88d602a0847bb923088c7f0be6d5c2980b11a36d
|
cbc04873e1f02cb6b62a7b77c5c44eb4e9422ab8
|
refs/heads/master
| 2020-04-21T19:41:49.889253
| 2019-02-09T01:02:37
| 2019-02-09T01:02:37
| 169,816,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,136
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles directives.
This converter removes the directive functions from the code and moves the
information they specify into AST annotations. It is a specialized form of
static analysis, one that is specific to AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.lang import directives
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.python.util import tf_inspect
ENCLOSING_LOOP = 'enclosing_loop'
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
Raises:
ValueError: if the default arguments are not correctly set
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
call_args = tf_inspect.getcallargs(function, *args, **kwds)
# Keyword arguments not specified in kwds will be mapped to their defaults,
# which are Python values. Since we don't currently have a way to transform
# those into AST references, we simply remove them. By convention, directives
# use UNSPECIFIED as default value for for optional arguments. No other
# defaults should be present.
unexpected_defaults = []
for k in call_args:
if (k not in kwds
and call_args[k] not in args
and call_args[k] is not directives.UNSPECIFIED):
unexpected_defaults.append(k)
if unexpected_defaults:
raise ValueError('Unexpected keyword argument values, %s, for function %s'
% (zip(unexpected_defaults,
[call_args[k] for k in unexpected_defaults]),
function))
return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}
class DirectivesTransformer(converter.Base):
"""Parses compiler directives and converts them into AST annotations."""
def _process_symbol_directive(self, call_node, directive):
if len(call_node.args) < 1:
raise ValueError('"%s" requires a positional first argument'
' as the target' % directive.__name__)
target = call_node.args[0]
defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
for def_ in defs:
def_.directives[directive] = _map_args(call_node, directive)
return call_node
def _process_statement_directive(self, call_node, directive):
if self.local_scope_level < 1:
raise ValueError(
'"%s" must be used inside a statement' % directive.__name__)
target = self.get_local(ENCLOSING_LOOP)
node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
node_anno[directive] = _map_args(call_node, directive)
anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
return call_node
def visit_Expr(self, node):
if isinstance(node.value, gast.Call):
call_node = node.value
if anno.hasanno(call_node.func, 'live_val'):
live_val = anno.getanno(call_node.func, 'live_val')
if live_val is directives.set_element_type:
call_node = self._process_symbol_directive(call_node, live_val)
elif live_val is directives.set_loop_options:
call_node = self._process_statement_directive(call_node, live_val)
else:
return self.generic_visit(node)
return None # Directive calls are not output in the generated code.
return self.generic_visit(node)
# TODO(mdan): This will be insufficient for other control flow.
# That means that if we ever have a directive that affects things other than
# loops, we'll need support for parallel scopes, or have multiple converters.
def _track_and_visit_loop(self, node):
self.enter_local_scope()
self.set_local(ENCLOSING_LOOP, node)
node = self.generic_visit(node)
self.exit_local_scope()
return node
def visit_While(self, node):
return self._track_and_visit_loop(node)
def visit_For(self, node):
return self._track_and_visit_loop(node)
def transform(node, ctx):
return DirectivesTransformer(ctx).visit(node)
|
[
"ntranmn@gmail.com"
] |
ntranmn@gmail.com
|
f1ce015e673a7f4d1b931c5fdc1c510b8411d01e
|
79bb7105223895235263fd391906144f9f9645fd
|
/python/client/graph_util_test.py
|
4102b27588d68b3537a230b099e8874e5a466156
|
[] |
no_license
|
ml-lab/imcl-tensorflow
|
f863a81bfebe91af7919fb45036aa05304fd7cda
|
54ab3ec2e32087ce70ecae2f36b56a8a92f2ba89
|
refs/heads/master
| 2021-01-22T06:37:18.129405
| 2016-06-08T15:53:28
| 2016-06-08T15:53:28
| 63,518,098
| 1
| 2
| null | 2016-07-17T06:29:14
| 2016-07-17T06:29:13
| null |
UTF-8
|
Python
| false
| false
| 8,123
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op == "Variable" else op.device
class DeviceFunctionsTest(tf.test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = state_ops.variable_op([1], dtype=dtypes.float32)
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = state_ops.variable_op([1], dtype=dtypes.float32)
var_2 = state_ops.variable_op([1], dtype=dtypes.float32)
var_3 = state_ops.variable_op([1], dtype=dtypes.float32)
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = state_ops.variable_op([1], dtype=dtypes.float32)
with g.device("/device:GPU:0"):
var_5 = state_ops.variable_op([1], dtype=dtypes.float32)
var_6 = state_ops.variable_op([1], dtype=dtypes.float32)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
def testNestedDeviceFunctions(self):
with tf.Graph().as_default():
var_0 = tf.Variable(0)
with tf.device(test_device_func_pin_variable_to_cpu):
var_1 = tf.Variable(1)
with tf.device(lambda op: "/gpu:0"):
var_2 = tf.Variable(2)
with tf.device("/gpu:0"): # Implicit merging device function.
var_3 = tf.Variable(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = tf.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testConvertVariablesToConsts(self):
with tf.Graph().as_default():
variable_node = tf.Variable(1.0, name="variable_node")
_ = tf.Variable(1.0, name="unused_variable_node")
output_node = tf.mul(variable_node, 2.0, name="output_node")
with tf.Session() as sess:
init = tf.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
sess.run(tf.initialize_all_variables())
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with tf.Graph().as_default():
_ = tf.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotEqual("Variable", node.op)
with tf.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
if __name__ == "__main__":
tf.test.main()
|
[
"mrlittlezhu@gmail.com"
] |
mrlittlezhu@gmail.com
|
aa7fa5e63735dc329ad367ab2a01e383f1fbe5b4
|
5883449aa14eb5e8b3fa6ad4d03d1dfacc40ccee
|
/Amazon_Framework/DentOsTestbedLib/src/dent_os_testbed/lib/bridge/linux/linux_bridge_fdb.py
|
c6d5ff30762ba7407ed6f5b67e51f6d9d3121cfe
|
[
"Apache-2.0"
] |
permissive
|
tld3daniel/testing
|
826183f30d65f696e8476d4a584c4668355e0cb3
|
e4c8221e18cd94e7424c30e12eb0fb82f7767267
|
refs/heads/master
| 2023-09-01T12:39:26.845648
| 2021-08-11T15:53:16
| 2021-08-11T15:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# generated using file ./gen/model/linux/network/bridge/bridge.yaml
#
# DONOT EDIT - generated by diligent bots
from dent_os_testbed.lib.test_lib_object import TestLibObject
class LinuxBridgeFdb(TestLibObject):
"""
The corresponding commands display fdb entries, add new entries, append entries, and delete old ones.
"""
def format_update(self, command, *argv, **kwarg):
raise NotImplementedError
def parse_update(self, command, output, *argv, **kwarg):
raise NotImplementedError
def format_show(self, command, *argv, **kwarg):
raise NotImplementedError
def parse_show(self, command, output, *argv, **kwarg):
raise NotImplementedError
def format_command(self, command, *argv, **kwarg):
if command in ["add", "append", "delete", "replace"]:
return self.format_update(command, *argv, **kwarg)
if command in ["show"]:
return self.format_show(command, *argv, **kwarg)
raise NameError("Cannot find command " + command)
def parse_output(self, command, output, *argv, **kwarg):
if command in ["add", "append", "delete", "replace"]:
return self.parse_update(command, output, *argv, **kwarg)
if command in ["show"]:
return self.parse_show(command, output, *argv, **kwarg)
raise NameError("Cannot find command " + command)
|
[
"muchetan@amazon.com"
] |
muchetan@amazon.com
|
793f2059789077f913c11bee3d42d0eaf8f85304
|
920f0fbb7064f2017ff62da372eaf79ddcc9035b
|
/lc_ladder/company/sq/Pancake_Sort.py
|
c805df046a36a6f61d1095423c2e4997897d9117
|
[] |
no_license
|
JenZhen/LC
|
b29a1c45d8c905680c7b4ad0017516b3dca80cc4
|
85219de95e41551fce5af816b66643495fe51e01
|
refs/heads/master
| 2021-06-03T10:03:02.901376
| 2020-08-05T19:44:48
| 2020-08-05T19:44:48
| 104,683,578
| 3
| 1
| null | 2020-08-05T19:44:50
| 2017-09-24T23:30:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
#! /usr/local/bin/python3
# Requirement
# Example
# 一堆 pancakes, 根据大小排序,但是要先 implement swap(List<Pancake> pancakes, int end),
# 把从 index 0 ~ end 的pancakes的顺序全部倒过来,然后用这个方法正常排序pancakes, 最后size 从小到大。
# 比如 {3, 2, 5, 4, 6} swap(pancakes, 3) 之后是 {4, 5, 2, 3, 6} implement 一个方法 不停地 swap 得到 {2, 3, 4, 5, 6}, 数字表示对应 pancake size
"""
Algo: Sorting
D.S.:
Solution:
Swap Sort
Time: O(N^2)
Corner cases:
"""
def findMaxIdx(arr, cnt):
idx = 0
max = arr[0]
for i in range(1, cnt + 1):
if arr[i] > max:
max = arr[i]
idx = i
return idx
def reverse(arr, cnt):
l, r = 0, cnt
while l < r:
temp = arr[l]
arr[l] = arr[r]
arr[r] = temp
l += 1
r -= 1
def pancakeSort(arr):
if not arr:
return arr
cnt = len(arr) - 1
while cnt >= 1:
maxIdx = findMaxIdx(arr, cnt)
# if maxIdx at last of current range, cnt -= 1 do nothing
if maxIdx == cnt:
cnt -= 1
continue
# shift maxIdx element to front
reverse(arr, maxIdx)
# shift the maxIdx element to cnt (last position of current range)
reverse(arr, cnt)
cnt -= 1
return arr
# Test Cases
if __name__ == "__main__":
testCases = [
[],
[1],
[1, 2],
[3, 2, 1],
[2, 1, 5, 3, 4]
]
for arr in testCases:
res = pancakeSort(arr)
print(res)
|
[
"jenzhen.nyc89@yahoo.com"
] |
jenzhen.nyc89@yahoo.com
|
de30fb950229b9c7d7b72c825c41a5a03d2d1876
|
7e471dd6d2b4d6a429941ec43c6048397a0b5456
|
/Arrays/Array Manipulation.py
|
1c529a6a8811d87b745e728674c5eea4e52d5ec8
|
[] |
no_license
|
Benson1198/Practice-Problems
|
ab0c4629e456a990310b2c365401b6b46cd66982
|
505a2d0f4941e564516c9e521178270cdc5c0744
|
refs/heads/master
| 2022-11-28T18:00:16.714068
| 2020-08-11T17:59:13
| 2020-08-11T17:59:13
| 270,238,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
def arrayManipulation(n, queries):
arr = [0]*n
max_sum = 0
for q in queries:
arr[q[0] - 1] += q[2]
if arr[q[1]] <= n:
arr[q[1]] -= q[2]
for i in arr:
temp = max_sum + i
if temp >= max_sum:
max_sum = temp
return max_sum
|
[
"34964177+Benson1198@users.noreply.github.com"
] |
34964177+Benson1198@users.noreply.github.com
|
440e11f75f0b47160e56760518042862b3d06cbd
|
331fca39c99354bb96f9f07a2309c59c34a9fb15
|
/lib/python2.6/site-packages/tg/test_stack/rendering/templates/mako_noop.mak.py
|
49e125649ea0213411dc208654116ae26a254256
|
[] |
no_license
|
rudyvallejos/GestionItems
|
d368a940a63cae9a2e5845cdf50db6b232aa9871
|
1eb56b582f0539c883a4914ad48291941b3c6c38
|
refs/heads/master
| 2016-09-06T08:37:46.582041
| 2011-07-09T18:39:02
| 2011-07-09T18:39:02
| 1,662,813
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 5
_modified_time = 1254065369.406944
_template_filename='tg/test_stack/rendering/templates/mako_noop.mak'
_template_uri='mako_noop.mak'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
from webhelpers.html import escape
_exports = []
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(u'\n<p>This is the mako index page</p>')
return ''
finally:
context.caller_stack._pop_frame()
|
[
"rudy.vallejos@gmail.com"
] |
rudy.vallejos@gmail.com
|
41f369934b8f755ab549927bdfe7383246b9ca49
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/jinja2/compiler.py
|
b1f370e1ab080128788c5a1572565d1ae353aa48
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4daf56d4b8ade78db022d0075e50dc834b043ac143322ac276514f1c0bab838a
size 66284
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
56261f33345742b0b0cdbd398c61208e19eac3cf
|
50d3c1e9f524e5d9ee2684222ec872d1514c661f
|
/pygitversion/__init__.py
|
25e746ad3c6e4afc686b7aaa8712ad4a2d88aeed
|
[
"MIT"
] |
permissive
|
RadioAstronomySoftwareGroup/pygitversion
|
ceca85a6a0df835039918ffea30ea04646080822
|
36c78c87bb15f8d23418698ad4acb14391a77795
|
refs/heads/master
| 2021-07-12T23:51:30.834527
| 2019-12-16T13:27:49
| 2019-12-16T13:27:49
| 207,653,583
| 1
| 2
|
NOASSERTION
| 2020-10-02T15:54:07
| 2019-09-10T20:09:36
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
def branch_scheme(version):
"""Local version scheme that adds the branch name for absolute reproducibility."""
if version.exact or version.node is None:
return version.format_choice("", "+d{time:{time_format}}", time_format="%Y%m%d")
else:
if version.branch == "master":
return version.format_choice("+{node}", "+{node}.dirty")
else:
return version.format_choice("+{node}.{branch}", "+{node}.{branch}.dirty")
|
[
"steven.murray@curtin.edu.au"
] |
steven.murray@curtin.edu.au
|
47b62311375e0cec75989ac45dbfcd2d5c15e413
|
c074fb834cb4a8ac75d107146df10f9496590792
|
/subscriptions/migrations/0007_auto_20201008_0023.py
|
30d39f427afe15cb651a5be18c08001ded97e71f
|
[
"Unlicense"
] |
permissive
|
jmhubbard/quote_of_the_day_custom_user
|
4d5ffd4183d7e6290161b84cae2aa1f7ad621a99
|
27024b2953c1c94fd2970563c3ab31ad444912b6
|
refs/heads/master
| 2023-02-19T00:59:27.372671
| 2021-01-10T02:45:56
| 2021-01-10T02:45:56
| 293,443,918
| 1
| 0
|
Unlicense
| 2020-12-03T17:59:59
| 2020-09-07T06:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 463
|
py
|
# Generated by Django 3.1.2 on 2020-10-08 00:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0006_auto_20200915_0434'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='status',
field=models.IntegerField(choices=[(0, 'Unknown'), (1, 'Subscribed'), (2, 'Unsubscribed')], default=2),
),
]
|
[
"jasonhubb@gmail.com"
] |
jasonhubb@gmail.com
|
8b649adf9ab63a818ffabb3813e55d2ac9b06e8a
|
3246503531497b8287b834038fcb0c5212d7db23
|
/awx/main/migrations/0156_capture_mesh_topology.py
|
90f5a5e0a26c00e35828acb499a24e15b010c10d
|
[
"Apache-2.0"
] |
permissive
|
AlexSCorey/awx
|
ba19ff243e6e831e45fd43154477d0c180b728b9
|
268a4ad32d8f48a40e9837f6eb3504caa736e243
|
refs/heads/devel
| 2023-08-08T23:29:13.932495
| 2022-08-11T12:03:35
| 2022-08-11T12:03:35
| 171,529,429
| 1
| 1
|
NOASSERTION
| 2023-03-01T04:05:10
| 2019-02-19T18:43:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
# Generated by Django 2.2.20 on 2021-12-17 19:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0155_improved_health_check'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='node_type',
field=models.CharField(
choices=[
('control', 'Control plane node'),
('execution', 'Execution plane node'),
('hybrid', 'Controller and execution'),
('hop', 'Message-passing node, no execution capability'),
],
default='hybrid',
max_length=16,
),
),
migrations.CreateModel(
name='InstanceLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='main.Instance')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reverse_peers', to='main.Instance')),
],
options={
'unique_together': {('source', 'target')},
},
),
migrations.AddField(
model_name='instance',
name='peers',
field=models.ManyToManyField(through='main.InstanceLink', to='main.Instance'),
),
]
|
[
"jeff.bradberry@gmail.com"
] |
jeff.bradberry@gmail.com
|
6b464c01d7971b6cccec176a98a2c279bd71b542
|
f11600b9a256bf6a2b584d127faddc27a0f0b474
|
/normal/880.py
|
1836e72c5ff4d8f702b6338f04d808f0d45ac0c5
|
[] |
no_license
|
longhao54/leetcode
|
9c1f0ce4ca505ec33640dd9b334bae906acd2db5
|
d156c6a13c89727f80ed6244cae40574395ecf34
|
refs/heads/master
| 2022-10-24T07:40:47.242861
| 2022-10-20T08:50:52
| 2022-10-20T08:50:52
| 196,952,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
class Solution:
def decodeAtIndex(self, S: str, K: int) -> str:
size = 0
# Find size = length of decoded string
for c in S:
if c.isdigit():
size *= int(c)
else:
size += 1
for c in reversed(S):
K %= size
if K == 0 and c.isalpha():
return c
if c.isdigit():
size /= int(c)
else:
size -= 1
|
[
"jinlha@jiedaibao.com"
] |
jinlha@jiedaibao.com
|
e0e993f713a245f48dddc49afc3028d72bc1d18e
|
e1b1163fcf4c6fea7dcb3d4e403ef05578426412
|
/erpnext_mfg/erpnext_mfg/doctype/replenishment/replenishment.py
|
ef337439b2864fdc7fded014a15ddf6337a20ba7
|
[
"MIT"
] |
permissive
|
amutaher/erpnext_mfg
|
e5e5eb822ed2f0b012c5fb6d202e4ae9e20843a9
|
0a4be3dea07802a37d947ed3e838719f10dc16e3
|
refs/heads/main
| 2023-07-11T14:46:14.707841
| 2021-08-09T12:13:38
| 2021-08-09T12:13:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,197
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Bai Web and Mobile Lab and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from erpnext_mfg.api.replenishment import with_qty_details
class Replenishment(Document):
def on_update(self):
frappe.db.sql("DELETE FROM tabSingles WHERE doctype = 'Replenishment'")
frappe.db.sql("DELETE FROM `tabReplenishment Item`")
def _set_items(self, items):
for item in items:
filled_item = with_qty_details(item, self.warehouse)
self.append("items", filled_item)
frappe.msgprint(
_("Please click <strong>Update</strong> in order to save your changes.")
)
def _set_order_qty(self):
for item in self.items:
item.order_qty = (item.max_qty or 0) - (item.projected_qty or 0)
@frappe.whitelist()
def load_items(self):
self.items = []
self._set_items(_get_replenishment_rules(self.warehouse))
@frappe.whitelist()
def pull_from_work_order(self, work_order):
required_items = _with_item_reorder_details(
_get_required_items_by_work_order(work_order)
)
self._set_items(required_items)
self._set_order_qty() # this should be after all the details is set
@frappe.whitelist()
def pull_from_bin(self):
requested_items = _with_item_reorder_details(
_get_bin_requested_items_by_warehouse(self.warehouse)
)
self._set_items(requested_items)
self._set_order_qty()
@frappe.whitelist()
def pull_from_reorder_details(self):
requested_items = _get_item_reorder_details_by_warehouse(self.warehouse)
self._set_items(requested_items)
self._set_order_qty()
@frappe.whitelist()
def update_replenishment_rules(self):
if not self.warehouse:
frappe.throw(_("Please set your warehouse"))
_validate_items(self.items)
_clear_replenishment_rules(self.items, self.warehouse)
_update_replenishment_rules(self.items, self.warehouse)
_create_replenishment_rules(self.items, self.warehouse)
frappe.msgprint(_("Replenishment Rules are updated."))
def _get_replenishment_rules(warehouse):
return frappe.get_all(
"Replenishment Rule",
filters={"warehouse": warehouse},
fields=[
"item",
"min_qty",
"max_qty",
"order_qty",
"supplier",
],
)
def _get_required_items_by_work_order(work_order):
return frappe.get_all(
"Work Order Item",
filters={"parent": work_order},
fields=["item_code as item", "required_qty as max_qty"],
)
def _get_bin_requested_items_by_warehouse(warehouse):
return frappe.db.sql(
"""
SELECT
item_code as item,
indented_qty as order_qty
FROM `tabBin`
WHERE warehouse=%s
""",
warehouse,
as_dict=1,
)
def _get_item_reorder_details_by_warehouse(warehouse):
return frappe.db.sql(
"""
SELECT
ir.parent as item,
ir.warehouse_reorder_level as min_qty,
ir.warehouse_reorder_qty as max_qty
FROM `tabItem Reorder` AS ir
INNER JOIN `tabItem` i ON i.name = ir.parent
WHERE ir.warehouse=%s
AND i.is_purchase_item = 1
""",
warehouse,
as_dict=1
)
def _clear_replenishment_rules(items, warehouse):
existing_rules = frappe.get_all(
"Replenishment Rule",
fields=["name", "item"],
filters={"warehouse": warehouse},
)
item_names = [x.get("item") for x in items]
for rule in existing_rules:
if rule.get("item") not in item_names:
frappe.delete_doc("Replenishment Rule", rule.get("name"))
def _create_replenishment_rules(items, warehouse):
existing_rules = frappe.get_all(
"Replenishment Rule",
fields=["item"],
filters={"warehouse": warehouse},
)
existing_items = [x.get("item") for x in existing_rules]
for item in items:
if item.get("item") not in existing_items:
rule = _get_replenishment_rule(item)
frappe.get_doc(
{**rule, "doctype": "Replenishment Rule", "warehouse": warehouse}
).insert()
def _update_replenishment_rules(items, warehouse):
existing_rules = frappe.get_all(
"Replenishment Rule",
fields=["name", "item"],
filters={"warehouse": warehouse},
)
existing = {x.get("item"): x.get("name") for x in existing_rules}
item_names = list(existing.keys())
for item in items:
name = existing.get(item.get("item"))
if item.get("item") in item_names and name:
frappe.db.set_value(
"Replenishment Rule", name, _get_replenishment_rule(item)
)
def _validate_items(items):
item_names = [x.get("item") for x in items]
tmp_duplicates = []
for item in item_names:
if item not in tmp_duplicates:
tmp_duplicates.append(item)
else:
frappe.throw(
_(
"Unable to update rules. There are item <strong>{}</strong> in multiples.".format(
item
)
)
)
for item in items:
if not item.supplier:
frappe.throw(
_(
"Please set the supplier on Item <strong>{}</strong>".format(
item.item
)
)
)
def _get_replenishment_rule(item):
item_dict = item.as_dict()
unused_keys = [
"name",
"owner",
"creation",
"modified",
"modified_by",
"parent",
"parentfield",
"parenttype",
"idx",
"docstatus",
"doctype",
"__islocal",
"projected_qty",
"actual_qty",
"__unsaved",
]
for x in unused_keys:
if x in item_dict:
del item_dict[x]
return item_dict
def _get_reorder_details(items):
def make_detail(x):
return {
"min_qty": x.get("warehouse_reorder_level"),
"max_qty": x.get("warehouse_reorder_qty"),
}
parent_items = [x.get("item") for x in items]
reorder_details = frappe.get_all(
"Item Reorder",
filters={"parent": ["in", parent_items]},
fields=["parent", "warehouse_reorder_level", "warehouse_reorder_qty"],
)
return {x.get("parent"): make_detail(x) for x in reorder_details}
def _with_item_reorder_details(items):
reorder_details = _get_reorder_details(items)
for item in items:
item_code = item.item
min_qty = 0
max_qty = 0
if item_code in reorder_details:
min_qty = reorder_details[item_code].get("min_qty")
max_qty = reorder_details[item_code].get("max_qty")
item["min_qty"] = min_qty
item["max_qty"] = max_qty
return items
|
[
"irayspacii@gmail.com"
] |
irayspacii@gmail.com
|
a309bbc972daaa409266e621f066fe03d4044950
|
453d0bd78a145a323785a8f86efffbbed36e2aff
|
/estimagic/tests/parameters/test_kernel_transformations.py
|
6eec2853685a1bc4f6573b8d8ad5aa3fed5fc602
|
[
"BSD-3-Clause"
] |
permissive
|
vishalbelsare/estimagic
|
2da897ac8f11cccf764a775c6aea23097082e862
|
afae1be3a1566056d11962c495b67e64bc4a0822
|
refs/heads/master
| 2023-08-17T04:41:04.179038
| 2021-09-27T09:50:22
| 2021-09-27T09:50:22
| 286,576,997
| 0
| 0
|
BSD-3-Clause
| 2021-09-27T10:40:04
| 2020-08-10T20:55:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,940
|
py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal as aaae
import estimagic.parameters.kernel_transformations as kt
from estimagic.differentiation.derivatives import first_derivative
from estimagic.parameters.kernel_transformations import cov_matrix_to_sdcorr_params
to_test = list(product(range(10, 30, 5), [1234, 5471]))
def get_internal_cholesky(dim, seed=0):
"""Return random internal cholesky values given dimension."""
np.random.seed(seed)
chol = np.tril(np.random.randn(dim, dim))
internal = chol[np.tril_indices(len(chol))]
return internal
def get_external_covariance(dim, seed=0):
"""Return random external covariance values given dimension."""
np.random.seed(seed)
data = np.random.randn(dim, 1000)
cov = np.cov(data)
external = cov[np.tril_indices(dim)]
return external
def get_internal_probability(dim, seed=0):
"""Return random internal positive values given dimension."""
np.random.seed(seed)
internal = np.random.uniform(size=dim)
return internal
def get_external_probability(dim, seed=0):
"""Return random internal positive values that sum to one."""
internal = get_internal_probability(dim, seed)
external = internal / internal.sum()
return external
def get_external_sdcorr(dim, seed=0):
"""Return random external sdcorr values given dimension."""
np.random.seed(seed)
data = np.random.randn(dim, 1000)
cov = np.cov(data)
external = cov_matrix_to_sdcorr_params(cov)
return external
@pytest.mark.parametrize("dim, seed", to_test)
def test_covariance_from_internal_jacobian(dim, seed):
internal = get_internal_cholesky(dim)
func = partial(kt.covariance_from_internal, **{"constr": None})
numerical_deriv = first_derivative(func, internal)
deriv = kt.covariance_from_internal_jacobian(internal, None)
aaae(deriv, numerical_deriv["derivative"], decimal=3)
@pytest.mark.parametrize("dim, seed", to_test)
def test_covariance_to_internal_jacobian(dim, seed):
external = get_external_covariance(dim)
func = partial(kt.covariance_to_internal, **{"constr": None})
numerical_deriv = first_derivative(func, external)
deriv = kt.covariance_to_internal_jacobian(external, None)
aaae(deriv, numerical_deriv["derivative"], decimal=3)
@pytest.mark.parametrize("dim, seed", to_test)
def test_probability_from_internal_jacobian(dim, seed):
internal = get_internal_probability(dim)
func = partial(kt.probability_from_internal, **{"constr": None})
numerical_deriv = first_derivative(func, internal)
deriv = kt.probability_from_internal_jacobian(internal, None)
aaae(deriv, numerical_deriv["derivative"], decimal=3)
@pytest.mark.parametrize("dim, seed", to_test)
def test_probability_to_internal_jacobian(dim, seed):
external = get_external_probability(dim)
func = partial(kt.probability_to_internal, **{"constr": None})
numerical_deriv = first_derivative(func, external)
deriv = kt.probability_to_internal_jacobian(external, None)
aaae(deriv, numerical_deriv["derivative"], decimal=3)
@pytest.mark.parametrize("dim, seed", to_test)
def test_sdcorr_from_internal_jacobian(dim, seed):
internal = get_internal_cholesky(dim)
func = partial(kt.sdcorr_from_internal, **{"constr": None})
numerical_deriv = first_derivative(func, internal)
deriv = kt.sdcorr_from_internal_jacobian(internal, None)
aaae(deriv, numerical_deriv["derivative"], decimal=3)
@pytest.mark.parametrize("dim, seed", to_test)
def test_sdcorr_to_internal_jacobian(dim, seed):
external = get_external_sdcorr(dim)
func = partial(kt.sdcorr_to_internal, **{"constr": None})
numerical_deriv = first_derivative(func, external)
deriv = kt.sdcorr_to_internal_jacobian(external, None)
aaae(deriv, numerical_deriv["derivative"], decimal=3)
|
[
"noreply@github.com"
] |
vishalbelsare.noreply@github.com
|
45a8ad2f96978babc0f6b9c6b8ee581c042201d2
|
01b281b97b733a8b537bd997110412763a9188d9
|
/timetracker/timetracker/tests.py
|
3b61d410fb193503b134f4d296a9933a8d09a083
|
[] |
no_license
|
simonecastellazzi/lesson-two
|
1e81dd25c9ae8fb967fb91b9185f7770e6cd20f0
|
30221d469ba9b9690af05d426d273b40e4e1fb95
|
refs/heads/master
| 2020-12-11T02:13:56.379185
| 2015-07-23T08:50:01
| 2015-07-23T08:50:22
| 39,557,689
| 2
| 0
| null | 2015-07-23T09:16:43
| 2015-07-23T09:16:43
| null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from django.test import TestCase, Client
class LessonOneTests(TestCase):
def test_hello_melbdjango(self):
c = Client()
response = c.get('/?name=melbdjango')
self.assertTrue('melbdjango' in str(response.content))
|
[
"curtis@tinbrain.net"
] |
curtis@tinbrain.net
|
94d549582831c32cc8be6b022bd0d3b28e113706
|
c96dba0f44c7996748ebcbf5c0c2489766ea65b4
|
/scripts/NetworkStats.py
|
e530ca1b48216edd1419d450b48e248ea31902fa
|
[] |
no_license
|
JKOK005/3dcnn-clone
|
7189b0c3fa9c5e648931c887360639101ac27131
|
0c91ed60c164c9e683d6932b6dbd3f2accae9e8e
|
refs/heads/master
| 2021-01-01T19:13:17.085176
| 2017-07-27T13:51:28
| 2017-07-27T13:51:28
| 98,540,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
import os
import numpy as np
from UtilitiesScript import *
from settings import params
class NetworkStats(object):
@staticmethod
def __consoleLog(true_pos_count, false_pos_count, false_neg_count, true_neg_count, precision, recall, f1):
print("True positive count: {0}".format(true_pos_count))
print("False positive count: {0}".format(false_pos_count))
print("False negative count: {0}".format(false_neg_count))
print("True negative count: {0}".format(true_neg_count))
print("Precision: {0}".format(precision))
print("Recall: {0}".format(recall))
print("F1: {0} \n".format(f1))
@staticmethod
def report(predicted, ground_truth, thresh, disp=True):
pred_pos = np.where(predicted >= thresh)
true_nodules = np.where(ground_truth == 1)
true_pos = np.intersect1d(pred_pos, true_nodules)
false_pos = np.setxor1d(pred_pos, true_pos)
false_neg = np.setxor1d(true_nodules, true_pos)
sample_count = len(predicted)
true_pos_count = len(true_pos)
false_pos_count = len(false_pos)
false_neg_count = len(false_neg)
true_neg_count = sample_count - true_pos_count - false_pos_count - false_neg_count
precision = true_pos_count / (true_pos_count + false_pos_count + 0.0)
recall = true_pos_count / (true_pos_count + false_neg_count + 0.0)
f1 = 2*precision*recall / (precision + recall)
false_pos_rate = false_pos_count / (false_pos_count + true_neg_count)
if(disp):
NetworkStats.__consoleLog(true_pos_count, false_pos_count, false_neg_count, true_neg_count, precision, recall, f1)
return
if __name__ == "__main__":
model_dir = "set_p_6228_n_31140_raw_data_aug" # Model directory where the evaluated .csv file is stored
file_str = "predicted_03-0.0421-0.0596.hd5.csv" # Name of the fiile inside the model directory
result_path = os.path.join(params["sc_test"]["results_dir"], model_dir, file_str)
predicted = CsvUtilities.read(result_path, class_field="class")
ground_truth = CsvUtilities.read(params["backup"]["label"], class_field="class")
# threshold = 0.8 # Between 0 - 1
for threshold in np.arange(0.01, 1, 0.01):
print("Threshold {0}".format(threshold))
NetworkStats.report(predicted, ground_truth, threshold, disp=True)
|
[
"JKOK005@e.ntu.edu.sg"
] |
JKOK005@e.ntu.edu.sg
|
eb7d120c25c64f029b2468e43a512848be088d9a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2466/60799/234954.py
|
cf99f1d1117b7d983a73f03f3f7cafce84cebb35
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
T = int(input())
for hhh in range(0, T):
input()
list = [int(i) for i in sorted(input().split())]
num = 0
for i in range(0, len(list) - 2):
for j in range(i + 1, len(list) - 1):
for k in range(j + 1, len(list)):
if (list[i] + list[j]) > list[k]:
num += 1
print(num)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
286854fe25353d24896d5afa386aafa689d2098d
|
37468a4d833cb26fbf1e3989166a54b77600c8fe
|
/hw03/Etch-A-Sketch.py
|
984f39bc1346d92e8de9e49b0436838d1c66cecf
|
[] |
no_license
|
AugustofMay/ECE434-Embedded-Linux
|
c5b9338ade80f5a5cdab31c6624c32b7ff2c5bc2
|
7b628f7f5ec740460a067f44a0fb9a24c1614246
|
refs/heads/master
| 2020-03-27T18:33:21.219335
| 2018-10-10T13:54:44
| 2018-10-10T13:54:44
| 146,929,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,179
|
py
|
#!/usr/bin/env python3
# From: https://adafruit-beaglebone-io-python.readthedocs.io/en/latest/Encoder.html
from Adafruit_BBIO.Encoder import RotaryEncoder, eQEP2, eQEP1
import smbus
import time
bus = smbus.SMBus(2)
matrix = 0x70
tmp101_1 = 0x4a
tmp101_0 = 0x48
# Instantiate the class to access channel eQEP2, and initialize that channel
Vertical = RotaryEncoder(eQEP2)
Horizontal = RotaryEncoder(eQEP1)
Vertical.setAbsolute()
Horizontal.setAbsolute()
Vertical.enable()
Horizontal.enable()
cursorX = 1
cursorY = 0x80
# The first byte is GREEN, the second is RED
clear = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
state = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
# Start oscillator (p10)
bus.write_byte_data(matrix, 0x21, 0)
# Disp on, blink off (p11)
bus.write_byte_data(matrix, 0x81, 0)
# Full brightness (p15)
bus.write_byte_data(matrix, 0xe7, 0)
# time.sleep(5)
# bus.read_byte_data(tmp101_0, 0)
bus.write_i2c_block_data(matrix, 0, clear)
bus.write_i2c_block_data(matrix, 0, state)
while True:
# Get the current position
print("Vertical: " + str(Vertical.position))
print("Horizontal: " + str(Horizontal.position))
temp = bus.read_byte_data(tmp101_0, 0)
print("Temperature: " + str(temp))
if(temp > 0x1c):
bus.write_i2c_block_data(matrix, 0, clear)
state = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
if(Vertical.position > 0):
if(cursorY < 0x80):
cursorY = cursorY << 1
elif(Vertical.position < 0):
if(cursorY > 0x01):
cursorY = cursorY >> 1
Vertical.position = 0
if(Horizontal.position > 0):
if(cursorX < 15):
cursorX = cursorX + 2
elif(Horizontal.position < 0):
if(cursorX > 1):
cursorX = cursorX - 2
Horizontal.position = 0
state[cursorX] = state[cursorX] | cursorY
bus.write_i2c_block_data(matrix, 0, state)
print("cursorX: " + str(cursorX))
print("cursorY: " + str(cursorY))
time.sleep(0.25)
|
[
"debian@beaglebone.localdomain"
] |
debian@beaglebone.localdomain
|
8339a891b2162c3bce0e2559e5008b1466ff62a9
|
f3d38d0e1d50234ce5f17948361a50090ea8cddf
|
/백준/Bronze/Bronze 2/10040번 ; 투표.py
|
86be452918990773f6ae4dc27000d405347a7588
|
[] |
no_license
|
bright-night-sky/algorithm_study
|
967c512040c183d56c5cd923912a5e8f1c584546
|
8fd46644129e92137a62db657187b9b707d06985
|
refs/heads/main
| 2023-08-01T10:27:33.857897
| 2021-10-04T14:36:21
| 2021-10-04T14:36:21
| 323,322,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
# https://www.acmicpc.net/problem/10040
# 첫째 줄에 경기의 수 N, 위원의 수 M을 입력합니다.
# 1 <= N, M <= 1000
N, M = map(int, input().split(' '))
A = []
for i in range(N):
A.push(int(input()))
|
[
"bright_night_sky@naver.com"
] |
bright_night_sky@naver.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.