blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87dec6ef073bd02fe7709d0c6b011cbfd0f6b878
|
7861798672463b239a3102b8407ec56c593c2811
|
/setup.py
|
0130f5685229a7217c8d3b52ceae52cde7687776
|
[] |
no_license
|
nag92/strokeRehabSystem
|
33b38cb41de4a357e1a9c99cb30b5608d97932b7
|
f51752bd355c91e162f94c26b4078e7d7bcee744
|
refs/heads/master
| 2020-03-31T14:25:06.853916
| 2018-05-02T15:35:50
| 2018-05-02T15:35:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['strokeRehabSystem'],
package_dir={'': 'src'})
setup(**setup_args)
|
[
"nagoldfarb@wpi.edu"
] |
nagoldfarb@wpi.edu
|
0224791ecdacf52585dc82bcf696f6feda3eb560
|
b0c02d7ca86c1ef84af18a8c701702e8bb212b64
|
/robotcar/robot_demo.py
|
d835d77755ccbeb16aa91e9b243d69dbd81e23e3
|
[] |
no_license
|
flashypepo/myMicropython-Examples
|
24fa2f372e68742abe0f74913df000dfe64a9e55
|
b2b63df865b5ad471b351ca5f279135025859f5d
|
refs/heads/master
| 2021-09-24T18:52:18.083444
| 2018-10-13T11:59:19
| 2018-10-13T11:59:19
| 98,223,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
import machine, motor, bot, time
print('creating i2c and motors ...')
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4))
motors = motor.DCMotors(i2c) #creates motors object
LEFT=0 #M0 - left motor
RIGHT=3 #M4 - right motor
print('creating robot ...')
robot = bot.Robot(motors, LEFT, RIGHT) # creates robot
dt = 3 # duration in seconds
print('robot moves ...')
robot.left(2000, dt) #turn left
time.sleep(0.3)
robot.right(2000, dt) # turn right
time.sleep(0.3)
robot.forward(2000, dt) #forward
time.sleep(0.3)
robot.backward(2000, dt) #backwards
time.sleep(0.3)
print('robot demo ...')
speed = 3000 #motorspeed
for i in range(3):
robot.left(speed, dt)
time.sleep(0.3)
robot.right(speed, dt)
time.sleep(0.3)
robot.forward(speed, dt)
time.sleep(0.3)
robot.backward(speed, dt)
time.sleep(1.0)
print('done')
|
[
"peter@pepo.nl"
] |
peter@pepo.nl
|
6fa666ea6d1840544f96f471b1e3fa431e6625ce
|
2b468b1d22ecc5668529255676a1d43936829074
|
/codes/personal_backend/support/test/api/account/test_account_staff_api.py
|
77c65d5ee97fc600c4f0bfb8569cff2aaa68c41e
|
[] |
no_license
|
MaseraTiGo/4U
|
5ac31b4cccc1093ab9a07d18218c3d8c0157dc9c
|
f572830aa996cfe619fc4dd8279972a2f567c94c
|
refs/heads/master
| 2023-07-26T09:44:21.014294
| 2023-07-13T03:43:34
| 2023-07-13T03:43:34
| 149,217,706
| 0
| 0
| null | 2020-06-05T20:38:16
| 2018-09-18T02:34:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
# coding=UTF-8
import json
from support.common.testcase.api_test_case import APITestCase
'''
class Add(APITestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_account_staff_add(self):
"""test account staff to add"""
flag = "user"
api = "account.staff.add"
user_info = json.dumps({
'username': "fengshiyu002",
'name': "冯时宇002",
'birthday': "2018-04-16",
'phone': "15232626262",
'email': "2058556456@qq.com",
'gender': "man",
'number': "008",
'identity': "123456789",
'role_ids' :[1,17],
'department_ids' :[1,7],
})
result = self.access_api(flag = flag, api = api, user_info = user_info)
class UpdatePassword(APITestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_account_staff_update_password(self):
"""test account staff to update password"""
flag = "user"
api = "account.staff.update.password"
uid = 2
newpassword = "e10adc3949ba59abbe56e057f20f883e"
oldpassword = "123456"
result = self.access_api(flag = flag, api = api, oldpassword = oldpassword, \
newpassword = newpassword)
'''
class Generate(APITestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_account_staff_generate(self):
"""test account staff to generate"""
flag = "user"
api = "account.staff.generate"
staff_id = 11
username = "fsy"
result = self.access_api(flag = flag, api = api, staff_id = staff_id)
|
[
"344627181@qq.com"
] |
344627181@qq.com
|
87b3c9e11b14cb7d689ba36d1587e35e28f58976
|
c97b9ae1bf06757ba61f90905e4d9b9dd6498700
|
/venv/Lib/site-packages/skimage/draw/draw_nd.py
|
03c268fb11faaef98beb8414071d9f7ed38a343a
|
[] |
no_license
|
Rahulk1p/image-processor
|
f7ceee2e3f66d10b2889b937cdfd66a118df8b5d
|
385f172f7444bdbf361901108552a54979318a2d
|
refs/heads/main
| 2023-03-27T10:09:46.080935
| 2021-03-16T13:04:02
| 2021-03-16T13:04:02
| 348,115,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:c5e7f1c5ad5f275def9df43f330f4af4782e674274fb765bbb93af0c05902092
size 3841
|
[
"rksc.k1p@gmail.com"
] |
rksc.k1p@gmail.com
|
86ce704f77b7c265463560e188583cbaa2aac01e
|
f29d69eea45f4383db37b1b6876be4bcfd286312
|
/user_portrait_0320/user_portrait/cron/network/cron_network.py
|
ed9c43bd9b965c0a28d58ca37f802ddade6ad69a
|
[] |
no_license
|
xuzhiq/user_portrait_ending2
|
5ac9952cf275923677d6e2f575289236df4dde9b
|
f2978135ff672f58090e202e588f7321ed121477
|
refs/heads/master
| 2021-05-31T05:15:21.316687
| 2016-05-11T11:56:38
| 2016-05-11T11:56:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,804
|
py
|
# -*- coding:utf-8 -*-
import tempfile
import sys
import json
import time
import tempfile
reload(sys)
sys.path.append('../../')
from spam.pagerank_for_portrait import pagerank
from time_utils import ts2datetime, datetime2ts, ts2date
from keywords_utils import get_task_information, identify_task_exist,\
compute_network_task, write_tmp_file, save_task_results,\
push_task_information
#use to read task information from queue
def scan_network_keywords_task():
#step1: read task information from redis queue
#step2: identify the task information is exist in es
#step3: compute the network trend task
while True:
#read task informaiton from redis queue
network_task_information = get_task_information()
print network_task_information
#when redis queue null - file break
if not network_task_information:
break
#identify the task is exist in es
exist_mark = identify_task_exist(network_task_information)
print 'exist_mark:', exist_mark
if exist_mark:
print 'step 1: compute', ts2date(time.time())
results = compute_network_task(network_task_information)
if results:
tmp_file = tempfile.NamedTemporaryFile(delete=False)
write_tmp_file(tmp_file, results)
tmp_file.close()
if not tmp_file:
return
input_tmp_path = tmp_file.name
print input_tmp_path
ITER_COUNT = 10
TOP_N = 50
print 'step 2: pagerank', ts2date(time.time())
all_uids_count, dg_sorted_uids, pr_sorted_uids = pagerank(ITER_COUNT, input_tmp_path, TOP_N, 'keywords')
#save results
print 'step 3: save', ts2date(time.time())
save_mark = save_task_results(dg_sorted_uids, pr_sorted_uids, network_task_information)
print 'save done', ts2date(time.time())
#identify save status
if not save_mark:
#status fail: push task information to redis queue
push_mark = push_task_information(network_task_information)
if not push_mark:
print 'error push task queue'
else:
#if no exist - pass
pass
if __name__=='__main__':
log_time_ts = time.time()
log_time_date = ts2date(log_time_ts)
print 'cron/network/cron_network.py&start&' + log_time_date
try:
scan_network_keywords_task()
except Exception, e:
print e, '&error&', ts2date(time.time())
log_time_ts = time.time()
log_time_date = ts2date(log_time_ts)
print 'cron/network/cron_network.py&end&' + log_time_date
|
[
"lijiahongasdf@163.com"
] |
lijiahongasdf@163.com
|
220d3da93147ba464b5fd1a2eeefdba19a37c65f
|
26552adb0d8889affd40e009d3c311e41a873e43
|
/Python_Solutions/9095.py
|
6b8aaf7f8f39b7b0b8e579984c319a8acee871ab
|
[] |
no_license
|
Isaac-Lee/BOJ-Algorithm
|
3b9b64aba9ab3b48d15133cbf5ad122822e441d0
|
27f0339195c48f416e672390758e85305203b71a
|
refs/heads/main
| 2022-06-29T21:36:11.500158
| 2022-06-25T06:35:05
| 2022-06-25T06:35:05
| 203,349,860
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
import sys
def make123(n):
if memo[n] > 0:
return memo[n]
if n == 1 or n == 0:
memo[n] = 1
return memo[n]
memo[n] += make123(n-1)
if n-3 >= 0:
memo[n] += make123(n-3)
if n-2 >= 0:
memo[n] += make123(n-2)
return memo[n]
if __name__ == "__main__":
n = int(sys.stdin.readline())
for _ in range(n):
k = int(sys.stdin.readline())
memo = [0] * (k + 1)
print(make123(k))
|
[
"yy0221ss@gmail.com"
] |
yy0221ss@gmail.com
|
c015abc83aad9d4d4eb62342b203ad222667c74b
|
24684138f7a74672e084511e2f0202680b318112
|
/lib/nmdc_metaassembly/nmdc_metaassemblyImpl.py
|
e0e0904a89147ec217adef5203a3d73a74881dd3
|
[
"MIT"
] |
permissive
|
microbiomedata/nmdc_kbase_metaassembly
|
531abc003bace8ead6334966f90a8e925bd583ca
|
2cb091007e556933e90c7c342a3e800d931e15ca
|
refs/heads/master
| 2023-03-16T06:16:24.445768
| 2021-03-05T16:56:48
| 2021-03-05T16:56:48
| 341,439,883
| 0
| 2
|
MIT
| 2021-02-24T18:53:34
| 2021-02-23T05:31:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,579
|
py
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import logging
import os
from installed_clients.KBaseReportClient import KBaseReport
from nmdc_metaassembly.assemble import nmdc_mg_assembly
#END_HEADER
class nmdc_metaassembly:
'''
Module Name:
nmdc_metaassembly
Module Description:
A KBase module: nmdc_metaassembly
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = ""
GIT_COMMIT_HASH = ""
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
print(os.getcwd())
self.asu = nmdc_mg_assembly(self.callback_url, self.shared_folder)
#END_CONSTRUCTOR
pass
def run_nmdc_metaassembly(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_nmdc_metaassembly
os.chdir(self.shared_folder)
output = self.asu.assemble(params)
#END run_nmdc_metaassembly
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_nmdc_metaassembly return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
[
"scanon@lbl.gov"
] |
scanon@lbl.gov
|
b07717ae965c5aa2e55fdbcbf027e893ba95b680
|
46ae8264edb9098c9875d2a0a508bc071201ec8b
|
/res/scripts/client/gui/scaleform/daapi/view/battlegas_attack.py
|
c45a4342116a35a23080122c5c705cc4d96ee7d0
|
[] |
no_license
|
Difrex/wotsdk
|
1fc6156e07e3a5302e6f78eafdea9bec4c897cfb
|
510a34c67b8f4c02168a9830d23f5b00068d155b
|
refs/heads/master
| 2021-01-01T19:12:03.592888
| 2016-10-08T12:06:04
| 2016-10-08T12:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/gas_attack.py
from gui.Scaleform.locale.FALLOUT import FALLOUT
from gui.battle_control import g_sessionProvider
from gui.shared.utils.plugins import IPlugin
from gui import makeHtmlString
from helpers import i18n
class GasAttackPlugin(IPlugin):
def start(self):
super(GasAttackPlugin, self).start()
self._parentObj.movie.falloutItems.as_loadGasItems(i18n.makeString(FALLOUT.SAFEZONE_MESSAGE), self.__getPanelText())
g_sessionProvider.getGasAttackCtrl().start(self._parentObj)
def stop(self):
g_sessionProvider.getGasAttackCtrl().stop()
super(GasAttackPlugin, self).stop()
def __getPanelText(self):
infoStr = i18n.makeString(FALLOUT.GASATTACKPANEL_SAFEZONE_MESSAGE)
return (FALLOUT.GASATTACKPANEL_START_TITLE,
FALLOUT.GASATTACKPANEL_START_MESSAGE,
FALLOUT.GASATTACKPANEL_GASATTACK_TITLE,
FALLOUT.GASATTACKPANEL_GASATTACK_MESSAGE,
FALLOUT.GASATTACKPANEL_INSIDE_TITLE,
FALLOUT.GASATTACKPANEL_INSIDE_MESSAGE,
FALLOUT.GASATTACKPANEL_SAFEZONE_TITLE,
makeHtmlString('html_templates:battle/gasAtackPanel', 'safeZone', infoStr))
|
[
"m4rtijn@gmail.com"
] |
m4rtijn@gmail.com
|
c0604ecc3e5fec3aa2883092810bbfee31e16a8e
|
f50368f3165c182a0adc914dec56f0cc03d9fb5a
|
/visual_mpc/envs/sawyer_robot/vanilla_sawyer_env.py
|
dc1c25c0a1aabdcacfee00d56d4d3d2dbb6b5243
|
[
"MIT"
] |
permissive
|
anestisdotpy/visual_foresight
|
16ea71f938458a35892c1f557903ed885810dda3
|
957df706b4c7a11b7a0c9ba2de15853df62cd4ed
|
refs/heads/master
| 2020-06-22T05:59:10.578361
| 2019-07-18T20:23:51
| 2019-07-18T20:23:51
| 197,651,312
| 0
| 0
| null | 2019-07-18T20:17:26
| 2019-07-18T20:17:26
| null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
from .base_sawyer_env import BaseSawyerEnv
import copy
class VanillaSawyerEnv(BaseSawyerEnv):
def __init__(self, env_params, _=None):
self._hyper = copy.deepcopy(env_params)
BaseSawyerEnv.__init__(self, env_params)
self._adim, self._sdim = self._base_adim, self._base_sdim
def _next_qpos(self, action):
assert action.shape[0] == self._base_adim, "Action should have shape (5,)"
return self._previous_target_qpos * self.mode_rel + action
|
[
"sdasari@berkeley.edu"
] |
sdasari@berkeley.edu
|
970d1f55ec7af00a76cc77d234fbbdb727238328
|
43ae032297b492fbdf2df478588d2367f59d0b6b
|
/4 - Classes-inheritance-oops/8-classes-inheritance-multiple-classes.py
|
14e655efe6e7abee7b0671b4f7bac92126fa490d
|
[] |
no_license
|
thippeswamydm/python
|
59fa4dbb2899894de5481cb1dd4716040733c378
|
db03b49eb531e75b9f738cf77399a9813d16166b
|
refs/heads/master
| 2020-07-05T06:57:18.575099
| 2019-10-23T04:30:27
| 2019-10-23T04:30:27
| 202,562,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
# Describes multiple inheritance of class from multiple classes
# Declaration of first parent
class Parent:
# Will not be accessible in the child class
__test = None
def __init__(self, val):
self.val = val
def printValFoo(self):
print(self.val)
# Will not be accessible in the child class
def __printValFoo__(self):
print(self.val)
# Declaration of second parent
class ParentTwo:
def __init__(self, val):
self.val = val
def printValFoos(self):
print(self.val)
# Simple inheritance of Foo class by DerivedChild
class DerivedChild(Parent, ParentTwo):
def negateVal(self):
self.val = -self.val
# Instatiating class and accessing methods
obj1 = DerivedChild('test')
obj1.printValFoo()
obj1.printValFoos()
|
[
"ganeshsurfs@gmail.com"
] |
ganeshsurfs@gmail.com
|
a583b094e10e271c7a87b12272c61dc1263274db
|
350ade9361645f87d96589a0c90c76d8a951832b
|
/search.py
|
103e3e43619fb4d0f0232b3c8f8036cdd866439b
|
[] |
no_license
|
dongzeyuan/Practise
|
becf7c7ca15928213aa22ae15bd8b3f1f9b7dc8b
|
ecef4466d30c5c9e88e766b4f3df6db24959b9d3
|
refs/heads/master
| 2021-09-21T02:06:24.629708
| 2018-08-19T08:50:02
| 2018-08-19T08:50:02
| 119,028,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,776
|
py
|
import wx
class ExamplePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# 创建一些sizer
mainSizer = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(vgap=5, hgap=5) # 虚拟网格sizer,此处指定了行和列之间的间隙
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.quote = wx.StaticText(self, label="面板示例:", pos=(20, 20))
grid.Add(self.quote, pos=(0, 0))
# 展示事件是如何在程序中工作的一个多行文本框控件
self.logger = wx.TextCtrl(self, pos=(300, 20), size=(
200, 300), style=wx.TE_MULTILINE | wx.TE_READONLY)
# 一个按钮
self.button = wx.Button(self, label="保存", pos=(200, 325))
self.Bind(wx.EVT_BUTTON, self.Onclick, self.button)
# 编辑组件
self.lblname = wx.StaticText(self, label="Your Name:", pos=(20, 60))
grid.Add(self.lblname, pos=(1, 0))
self.editname = wx.TextCtrl(
self, value="input your name", pos=(140, 60), size=(140, -1))
grid.Add(self.editname, pos=(1, 1))
self.Bind(wx.EVT_TEXT, self.EvtText, self.editname)
self.Bind(wx.EVT_CHAR, self.EvtChar, self.editname)
# 组合框组件
self.sampleList = ['friends', 'advertising',
'web search', 'Yellow Pages']
self.lblhear = wx.StaticText(
self, label="Select the topic ?", pos=(20, 90))
grid.Add(self.lblhear, pos=(3, 0))
self.edithear = wx.ComboBox(self, pos=(150, 90), size=(
95, -1), choices=self.sampleList, style=wx.CB_DROPDOWN)
grid.Add(self.edithear, pos=(3, 1))
self.Bind(wx.EVT_COMBOBOX, self.EvtComboBox, self.edithear)
self.Bind(wx.EVT_TEXT, self.EvtText, self.edithear)
# 往sizer中添加一些空间
grid.Add((10, 40), pos=(2, 0)) # 此处设置了间隔物的宽高
# 复选框
self.insure = wx.CheckBox(
self, label="Do you want Insured Shipment ?", pos=(20, 180))
grid.Add(self.insure, pos=(4, 0), span=(1, 2))
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.insure)
# 单选按钮
radioList = ['blue', 'red', 'yellow', 'orange',
'green', 'purple', 'navy blue', 'black', 'gray']
rb = wx.RadioBox(self, label="What color would you like ?", pos=(20, 210), choices=radioList, majorDimension=3,
style=wx.RA_SPECIFY_COLS)
grid.Add(rb, pos=(5, 0), span=(1, 2))
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
hSizer.Add(grid, 0, wx.ALL, 5)
hSizer.Add(self.logger)
mainSizer.Add(hSizer, 0, wx.ALL, 5)
mainSizer.Add(self.button, 0, wx.CENTER)
self.SetSizerAndFit(mainSizer)
def EvtRadioBox(self, event):
self.logger.AppendText('EvtRadioBox: %d\n' % event.GetInt())
def EvtComboBox(self, event):
self.logger.AppendText('EvtComboBox: %s\n' % event.GetString())
def Onclick(self, event):
self.logger.AppendText(' Click on object with Id %d\n' % event.GetId())
def EvtText(self, event):
self.logger.AppendText('EvtText: %s\n' % event.GetString())
def EvtChar(self, event):
self.logger.AppendText('EvtChar: %d\n' % event.GetKeyCode())
event.Skip()
def EvtCheckBox(self, event):
self.logger.AppendText('EvtCheckBox: %d\n' % event.IsChecked())
# 带有可切换导航的面板
app = wx.App(False)
frame = wx.Frame(None, title="NoteBook示例", size=(600, 400))
nb = wx.Notebook(frame)
nb.AddPage(ExamplePanel(nb), "Absolute Positioning")
nb.AddPage(ExamplePanel(nb), "Page Two")
nb.AddPage(ExamplePanel(nb), "Page Three")
frame.Show()
app.MainLoop()
|
[
"dongfujing88@gmail.com"
] |
dongfujing88@gmail.com
|
db221693696bd678140b5b4dffd264e3e5f29f9b
|
e3fe6ea7a67f19f35d3edc4e82a900f988a710d1
|
/ML/regression/optimizer.py
|
b643b44b5a2f13b5ddebd8f647bb964b5519cff8
|
[] |
no_license
|
fooSynaptic/NLP_utils
|
8258b857458d2021b4ead31680b06edaed031fcd
|
9a02d3caf9f97b1fc777ffeefba87be7a44fe262
|
refs/heads/master
| 2022-08-29T03:45:01.977332
| 2022-07-28T15:09:12
| 2022-07-28T15:09:12
| 163,344,219
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,274
|
py
|
# encoding=utf-8
# /usr/bin/python3
import numpy as np
"""vanilla linear regression with gradient descent"""
class linReg():
def __init__(self, num_inputs):
self.w = np.random.rand(num_inputs, )
self.b = np.random.rand(1, )
def squared_loss(self, y_hat, y):
squared_err = (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
res = np.sqrt(np.mean(squared_err))
return res
def grad(self, X, y, W = None):
return np.array([
np.mean(X[:, 0] * np.mean(self.y_hat(X) - y)),
np.mean(X[:, 1] * np.mean(self.y_hat(X) - y)),
1 * np.mean(self.y_hat(X) - y)
])
def batchGD(self, grad, lr):
self.w -= (lr * grad)[:2]
self.b -= (lr * grad)[-1]
def y_hat(self, X):
return X @ self.w + self.b
def parameters(self):
return [self.w, self.b]
"""linear regression with momentum"""
class momentumLinreg(linReg):
def __init__(self, num_inputs):
super(momentumLinreg, self).__init__(num_inputs)
self.wv = np.random.rand(num_inputs, )
self.bv = np.random.rand(1, )
self.momentum = 0.5
def sgd_momentum(self, grad, lr):
# update momentum v
self.wv = self.wv * self.momentum + lr * grad[:2]
self.bv = self.bv * self.momentum + lr * grad[-1]
# update parameters
self.w -= self.wv
self.b -= self.bv
""" adagrad enable the param update with different learning rate """
class AdaGradLinreg(linReg):
def __init__(self, num_inputs):
super(AdaGradLinreg, self).__init__(num_inputs)
# according to linreg, grad is a vector with 3 dimension so
self.S = np.zeros(num_inputs+1)
def sgd_AdaGrad(self, grad, lr, sigma = 1E-6):
# update adagrad vector
self.S += grad ** 2
# update parameters
adagrad = (lr / np.sqrt(self.S + sigma)) * grad
self.w -= adagrad[:2]
self.b -= adagrad[-1]
"""RMSProp- little improvement for adaGrad, avoid too small learning rate """
class RMSPropLinreg(linReg):
def __init__(self, num_inputs):
super(RMSPropLinreg, self).__init__(num_inputs)
# according to linreg, grad is a vector with 3 dimension so
self.S1 = np.zeros(num_inputs)
self.S2 = np.zeros(1)
self.gama = 0.9
def sgd_RMSProp(self, grad, lr, sigma = 1E-6):
self.S1 = self.gama*self.S1 + ((1-self.gama)*grad**2)[:2]
self.S2 = self.gama*self.S2 + ((1-self.gama)*grad**2)[-1]
# update parameters
self.w -= (lr / np.sqrt(self.S1 + sigma)) * grad[:2]
self.b -= (lr / np.sqrt(self.S2 + sigma)) * grad[-1]
"""AdaDelta Solving the problem when it's hard to find global optimization"""
class AdaDeltaLinreg(linReg):
def __init__(self, num_inputs):
super(AdaDeltaLinreg, self).__init__(num_inputs)
self.S1 = np.zeros(2)
self.S2 = np.zeros(1)
self.delta = np.zeros(num_inputs+1)
def sgd_AdaDelta(self, grad, sigma = 1E-5, ro=0.9):
# update S
self.S1 = ro*self.S1 + ((1-ro)*grad**2)[:2]
self.S2 = ro*self.S2 + ((1-ro)*grad**2)[-1]
#fix grad
grad1 = np.sqrt((self.delta[:2]+sigma)/(self.S1+sigma)) * grad[:2]
grad2 = np.sqrt((self.delta[-1]+sigma)/(self.S2+sigma)) * grad[-1]
# update parameters
self.w -= grad1
self.b -= grad2
# upadte delta
self.delta = ro*self.delta + (1-ro)*np.concatenate([grad1, grad2])**2
"""Adam: RMSProp-Improvement for batch grad"""
class AdamLinreg(linReg):
def __init__(self, num_inputs):
super(AdamLinreg, self).__init__(num_inputs)
self.S = np.zeros(num_inputs+1)
self.V = np.zeros(num_inputs+1)
self.t = 1
def sgd_Adam(self, grad, lr, beta1=0.9, beta2=0.999, sigma=1E-6):
self.V = beta1*self.V + (1-beta1)*grad
self.S = beta2*self.S + (1-beta2) * grad**2
### bias fix
Vfix = self.V / (1- beta1**self.t)
Sfix = self.S / (1- beta2**self.t)
self.t += 1
# fix grad
grad = (lr*Vfix)/(np.sqrt(Sfix)+sigma) * grad
# update parameters
self.w -= grad[:2]
self.b -= grad[-1]
|
[
"hujiaxin@ajmide.com"
] |
hujiaxin@ajmide.com
|
8a9bb8495e53836929e9fd749a53a604f9cec9a4
|
43fac2df4893f5b5448dd64f3c9e59ddebca7efe
|
/andromap/polytools.py
|
d8d3e5933a73f9d6a0e4bb11d12400f27bf2b6e7
|
[
"BSD-3-Clause"
] |
permissive
|
jonathansick/andromap
|
8a159eb7a85ab5ea7602cdff1755ba411ef38d97
|
b8905a39e0a45c7d803e45ce9a78c64ecac00bee
|
refs/heads/master
| 2021-01-18T23:01:36.996791
| 2017-01-27T05:32:06
| 2017-01-27T05:32:06
| 14,810,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Tools for working with polygons through shapely
2013-12-11 - Created by Jonathan Sick
"""
from shapely.geometry import Polygon, MultiPolygon
from shapely.ops import cascaded_union
import numpy as np
def close_vertices(polygon):
"""Make the last vertex the same as the first."""
polygon.append(polygon[0])
return polygon
def polygon_union(polygons):
"""Make the union of polygons. Returns a list of all isolated polygon
unions."""
shapely_polys = [Polygon(p) for p in polygons]
multipoly = MultiPolygon(shapely_polys)
u = cascaded_union(multipoly)
if isinstance(u, MultiPolygon):
vert_seq = []
for p in u:
vert_seq.append(np.array(p.exterior.coords[:]))
return vert_seq
else:
return [np.array(u.exterior.coords[:])]
|
[
"jonathansick@mac.com"
] |
jonathansick@mac.com
|
3a5c2d05027ad0b4d276f391d8f18fc7563905d0
|
ba0731b2dbc4c1529eaaa79811ec15754c19b4cd
|
/extractors/refextract/extract/routes.py
|
da9b4cb042c0fb5305c062073e04e91f29db71c1
|
[
"MIT"
] |
permissive
|
arXiv/arxiv-references
|
35f87084cf91947c572faf1a86f119b308fada66
|
a755aeaa864ff807ff16ae2c3960f9fee54d8dd8
|
refs/heads/master
| 2022-12-21T02:34:57.166298
| 2018-05-04T20:30:48
| 2018-05-04T20:30:48
| 94,906,433
| 8
| 6
|
MIT
| 2022-12-08T02:06:20
| 2017-06-20T15:26:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
"""HTTP routes for refextract API."""
import os
from refextract import extract_references_from_file
from flask.json import jsonify
from flask import Blueprint, request, current_app
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
import logging
HTTP_200_OK = 200
HTTP_400_BAD_REQUEST = 400
HTTP_500_INTERNAL_SERVER_ERROR = 500
blueprint = Blueprint('refextract', __name__, url_prefix='/refextract')
def getLogger():
"""Create a logger based on application configuration."""
default_format = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
try:
log_level = int(current_app.config.get('LOGLEVEL', logging.INFO))
log_format = current_app.config.get('LOGFORMAT', default_format)
log_file = current_app.config.get('LOGFILE')
except AttributeError:
log_level = logging.INFO
log_format = default_format
log_file = None
logging.basicConfig(format=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
if log_file is not None:
logger.addHandler(logging.FileHandler(log_file))
return logger
def handle_upload(uploaded_file: FileStorage) -> str:
"""Store an uploaded file."""
filename = secure_filename(uploaded_file.filename)
if not filename.endswith('.pdf'):
raise ValueError('Unsupported file type')
filepath = os.path.join(current_app.config['UPLOAD_PATH'], filename)
uploaded_file.save(filepath)
return filepath
def cleanup_upload(filepath: str) -> None:
"""Remove uploaded file."""
if os.path.exists(filepath):
os.remove(filepath)
return
@blueprint.route('/status', methods=['GET'])
def status() -> tuple:
"""Health check endpoint."""
return jsonify({'iam': 'ok'}), HTTP_200_OK
@blueprint.route('/extract', methods=['POST'])
def extract() -> tuple:
"""Handle a request for reference extraction for a POSTed PDF."""
logger = getLogger()
if 'file' not in request.files:
return jsonify({'explanation': 'No file found'}), HTTP_400_BAD_REQUEST
try:
filepath = handle_upload(request.files['file'])
except ValueError as e:
return jsonify({'explanation': e.msg}), HTTP_400_BAD_REQUEST
try:
response_data = extract_references_from_file(filepath)
status = HTTP_200_OK
except Exception as e:
response_data = {'explanation': 'refextract failed: %s' % e}
status = HTTP_500_INTERNAL_SERVER_ERROR
finally:
try:
cleanup_upload(filepath)
except IOError as e:
logger.warning('Could not remove file %s: %s' % filepath, e)
return jsonify(response_data), status
|
[
"brp53@cornell.edu"
] |
brp53@cornell.edu
|
ca14cfcc4f0020bf14072b28b13b5efd0c0a1140
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/real-life/test_sof_example.py
|
67de4d5643861c429a1c60920db8424086e6352e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232
| 2017-09-21T11:19:04
| 2017-09-21T11:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import datetime
#get_ipython().magic('matplotlib inline')
trainfile = "data/sof_example.csv";
df = pd.read_csv(trainfile, sep=r',', engine='python', skiprows=0);
df['Date'] = df['Date'].apply(lambda x : datetime.datetime.strptime(x, "%m/%d/%Y"))
print(df.head());
lDateVar = 'Date'
lSignalVar = 'Used'
lEngine = autof.cForecastEngine()
lEngine
H = 10;
#lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.train(df , lDateVar , lSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standrdPlots("outputs/sof_example");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/sof_example_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[lDateVar , lSignalVar,
lSignalVar + '_Forecast' ,
lSignalVar + '_Forecast_Lower_Bound',
lSignalVar + '_Forecast_Upper_Bound']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(2*H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
a1eb66dbe95cb27bee4d6db88aae787c660163b8
|
5b49b2f8934e21445757a36174d924bb713ad810
|
/elvers/rules/rcorrector/rcorrector-wrapper.py
|
d38953bd8abd25a4ea0521807acf6cc1283d78a0
|
[
"BSD-3-Clause"
] |
permissive
|
maligang/elvers
|
bf982442d08411a7081f186c7b76ac09035b0466
|
b2dab8092351f6db13c437c89004b897fb8a40d7
|
refs/heads/master
| 2022-12-11T12:45:06.905102
| 2020-08-05T17:09:50
| 2020-08-05T17:09:50
| 290,427,388
| 0
| 0
|
NOASSERTION
| 2020-08-26T07:30:10
| 2020-08-26T07:30:09
| null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
__author__ = "N .Tessa Pierce"
__copyright__ = "Copyright 2019, N. Tessa Pierce"
__email__ = "ntpierce@gmail.com"
__license__ = "MIT"
from os import path
from snakemake.shell import shell
extra = snakemake.params.get("extra", "")
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
outdir = path.dirname(snakemake.output.get('r1'))
r1 = snakemake.input.get("r1")
r2 = snakemake.input.get("r2")
r = snakemake.input.get("r")
def move_files(outdir, in_list, out_list):
for f, o in zip(in_list, out_list):
f = path.join(outdir, f)
shell("cp {f} {o}")
shell("rm -f {f}")
def build_default_outname(infile):
# Rcorrector outputs gzipped files IF input files are gzipped
end = '.gz' if infile.endswith('.gz') else ''
return(path.basename(infile.rsplit('.f')[0]) + '.cor.fq' + end)
assert (r1 is not None and r2 is not None) or r is not None, "either r1 and r2 (paired), or r (unpaired) are required as input"
if r1:
# handle inputs
r1 = [snakemake.input.r1] if isinstance(snakemake.input.r1, str) else snakemake.input.r1
r2 = [snakemake.input.r2] if isinstance(snakemake.input.r2, str) else snakemake.input.r2
assert len(r1) == len(r2), "input-> equal number of files required for r1 and r2"
r1_cmd = ' -1 ' + ",".join(r1)
r2_cmd = ' -2 ' + ",".join(r2)
read_cmd = " ".join([r1_cmd,r2_cmd])
# handle outputs
r1_out = [snakemake.output.r1] if isinstance(snakemake.output.r1, str) else snakemake.output.r1
r2_out = [snakemake.output.r2] if isinstance(snakemake.output.r2, str) else snakemake.output.r2
r1_default, r2_default = [], []
for f in r1:
r1_default+= [build_default_outname(f)]
for f in r2:
r2_default+= [build_default_outname(f)]
if r:
# handle inputs
assert r1 is None and r2 is None, "cannot handle mixed paired/unpaired input files. Please input either r1,r2 (paired) or r (unpaired)"
r = [snakemake.input.r] if isinstance(snakemake.input.r, str) else snakemake.input.r
read_cmd = ' -r ' + ",".join(r)
# handle outputs
r_out = [snakemake.output.r] if isinstance(snakemake.output.r, str) else snakemake.output.r
r_default = []
for f in r:
r_default += [build_default_outname(f)]
shell("run_rcorrector.pl {read_cmd} -od {outdir} {snakemake.params.extra} -t {snakemake.threads} {log}")
if r1_default:
move_files(outdir, r1_default, r1_out)
move_files(outdir, r2_default, r2_out)
elif r_default:
move_files(outdir, r_default, r_out)
|
[
"ntpierce@gmail.com"
] |
ntpierce@gmail.com
|
0540f3bb95f151504c0503e6e8179fec0de7207e
|
f98f4aaeca3ac841905e0cd8547bbf41944fe690
|
/编程语言/Python/Python编程从入门到实践/第一部分_基础知识/第10章_文件和异常/10_6.py
|
3835fd1a20ce17bffe0d169f134703a891d57347
|
[] |
no_license
|
zhb339/book-learning
|
64f433b1ee1f66f3120828352df3b533be4cf9de
|
5273fc8d11b2d602484dbe95e55f1e931858382f
|
refs/heads/master
| 2020-03-29T10:48:22.771146
| 2018-11-07T13:46:53
| 2018-11-07T13:46:53
| 149,823,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
while True:
try:
num1 = int(input("Enter first num: "))
num2 = int(input("Enter second num: "))
total = num1 + num2
print("The sum of two numbers is " + str(total) + ".")
except:
print("Please enter number!")
|
[
"551788157@qq.com"
] |
551788157@qq.com
|
2beb8e21a0723043fb3bf99c43435fcea367c31d
|
1be37064cf303b79cf83ab1d5d120a8db6dbeaa4
|
/fastparquet/thrift_structures.py
|
280b8583627af5b1b4c31b246d18bc52192c32fd
|
[
"Apache-2.0"
] |
permissive
|
mrocklin/fastparquet
|
02e0252609b8825b2ad89c0b21b3a6e20bc34f94
|
b0171ba3a7894f4860c1ce761ad47f451fc4cbc8
|
refs/heads/master
| 2020-12-24T08:54:09.977491
| 2016-11-08T17:14:38
| 2016-11-08T17:14:38
| 73,311,617
| 2
| 0
| null | 2016-11-09T18:46:52
| 2016-11-09T18:46:51
| null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
import os
import thriftpy
THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
parquet_thrift = thriftpy.load(THRIFT_FILE, module_name="parquet_thrift") # pylint: disable=invalid-name
|
[
"martin.durant@utoronto.ca"
] |
martin.durant@utoronto.ca
|
e763f92c41f17a6f2fdbc603b1a4ce8c2d338382
|
9a5438bdb8e84d0167ddea5458a7f729fdd54121
|
/metadata/tests/test_models/test_meta/test_attribute.py
|
5d8ef2b6856c0df33ffb8ea2141cb61013e7a8aa
|
[] |
no_license
|
Grusinator/MetaDataApi
|
740fd2be4cb97b670f827a071a0ac8c50f79f8ff
|
081f881c735466ed1dbbd68646b821299c5168f8
|
refs/heads/master
| 2023-07-25T23:58:22.179717
| 2020-03-15T09:36:05
| 2020-03-15T09:36:05
| 149,087,967
| 5
| 1
| null | 2023-07-25T15:39:12
| 2018-09-17T07:45:09
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,856
|
py
|
import unittest
from datetime import datetime
import django
from django.test import TransactionTestCase
class TestAttribute(TransactionTestCase):
@classmethod
def setUpClass(cls):
super(TestAttribute, cls).setUpClass()
django.setup()
@unittest.skip
def test_datatype_to_data_object(self):
self.fail()
@unittest.skip
def test_exists_by_label(self):
self.fail()
@unittest.skip
def test_exists(self):
self.fail()
@unittest.skip
def test_assert_data_type(self):
self.fail()
def test_all_instances(self):
from metadata.tests import LoadTestData
schema = LoadTestData.init_foaf()
from metadata.models import SchemaNode, SchemaAttribute
obj = SchemaNode(label="test", schema=schema)
obj.save()
from metadata.models import BaseAttribute, Node
from metadata.models import FileAttribute
from metadata.models import ImageAttribute
for InstanceType in set(BaseAttribute.get_all_instance_types()) - {FileAttribute,
ImageAttribute}:
data_type = InstanceType.get_data_type()
att = SchemaAttribute(
label="test_%s" % str(data_type),
data_type=data_type,
object=obj,
)
att.save()
obj_inst = Node(base=obj)
obj_inst.save()
value = data_type(2011, 4, 3) if data_type is datetime else data_type()
att_inst = InstanceType(
value=value,
base=att,
object=obj_inst
)
att_inst.save()
instances = BaseAttribute.get_all_instances_from_base(att)
self.assertListEqual([att_inst], instances)
|
[
"grusinator@gmail.com"
] |
grusinator@gmail.com
|
be4958352ebe1cd5a7ed7e0e5a5a17eb690f7a54
|
9e26e9b8e1e0f7bbf116fdf5c92b4e3006d385ef
|
/user/views.py
|
a70640e853d683e97575775ec1e3f0e5b67f3000
|
[] |
no_license
|
talhajubair100/e-commerce
|
fd77877b877a91a447e9b17ab58a4d59cc1456df
|
e7620b0971489ce558a97dd8d9988562663fe560
|
refs/heads/main
| 2023-02-15T08:57:29.308811
| 2021-01-06T18:25:35
| 2021-01-06T18:25:35
| 317,015,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,539
|
py
|
from django.contrib.auth.forms import PasswordChangeForm
from .forms import UserUpdateForm, ProfileUpdateForm
from django.shortcuts import redirect, render
from django.http.response import HttpResponse, HttpResponseRedirect
from product.models import Category, Comment
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib import messages
from .models import UserProfile
from order.models import Order, OrderProduct
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def user_profile(request):
category = Category.objects.all()
current_user = request.user
profile = UserProfile.objects.get(user_id=current_user.id)
context = {'category': category, 'profile': profile}
return render(request, 'user_profile.html', context)
@login_required
def user_update(request):
if request.method == 'POST':
user_form = UserUpdateForm(request.POST, instance=request.user)
profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.userprofile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Your account has been update')
return redirect('/user')
else:
category = Category.objects.all()
user_form = UserUpdateForm(instance=request.user)
profile_form = ProfileUpdateForm(instance=request.user.userprofile)
context = {'user_form': user_form, 'profile_form': profile_form, 'category': category}
return render(request, 'user_update.html', context)
@login_required
def user_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) #important
messages.success(request, "Your password was successfully update !")
return HttpResponseRedirect('/user')
else:
messages.error(request, 'Please correct the error.<br>' + str(form.errors))
return HttpResponseRedirect('/user/password')
else:
category = Category.objects.all()
form = PasswordChangeForm(request.user)
context = {'category': category, 'form': form}
return render(request, 'user_password.html', context)
@login_required
def user_orders(request):
category = Category.objects.all()
current_user = request.user
orders = Order.objects.filter(user_id=current_user.id).order_by('-create_at')
context = {'category': category, 'orders': orders}
return render(request, 'user_orders.html', context)
@login_required
def order_details(request, id):
category = Category.objects.all()
current_user = request.user
orders = Order.objects.get(user_id=current_user.id, id=id)
orderitems = OrderProduct.objects.filter(order_id=id)
context = {'category': category, 'orders': orders, 'orderitems': orderitems}
return render(request, 'user_order_detail.html', context)
@login_required
def user_orders_product(request):
category = Category.objects.all()
current_user = request.user
order_product = OrderProduct.objects.filter(user_id=current_user.id).order_by('-id')
context = {'category': category, 'order_product': order_product}
return render(request, 'user_orders_product.html', context)
@login_required
def user_order_product_details(request, id, oid):
category = Category.objects.all()
current_user = request.user
orders = Order.objects.get(user_id=current_user.id, id=oid)
orderitems = OrderProduct.objects.filter(id=id, user_id=current_user.id)
context = {'category': category, 'orderitems': orderitems, 'orders': orders}
return render(request, 'user_order_detail.html', context)
@login_required
def user_comments(request):
category = Category.objects.all()
current_user = request.user
comments = Comment.objects.filter(user_id=current_user.id)
context = {'category': category, 'comments': comments}
return render(request, 'user_comments.html', context)
@login_required
def user_delete_comment(request, id):
current_user = request.user
Comment.objects.filter(id=id ,user_id=current_user.id).delete()
messages.success(request, "Secessfully delete comment....")
return HttpResponseRedirect('user/comments/')
# def login_view(request):
# if request.method == 'POST':
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(request, username=username, password=password)
# if user is not None:
# login(request, user)
# current_user = request.user
# userprofile = UserProfile.objects.get(user_id=current_user.id)
# request.session['userimage'] = userprofile.image.url
# return HttpResponseRedirect("/")
# else:
# messages.warning(request, "Login Error !! Username or Password is incorrect")
# return HttpResponseRedirect("/login")
# category = Category.objects.all()
# context = {'category': category}
# return render (request, 'login.html', context)
def logout_view(request):
logout(request)
return HttpResponseRedirect("/")
# def signup_view(request):
# if request.method == "POST":
# form = SignUpForm(request.POST)
# if form.is_valid():
# form.save() #signup complete here
# # this code for auto login
# username = form.cleaned_data.get('username')
# password = form.cleaned_data.get('password1')
# user = authenticate(username=username, password=password)
# login(request, user)
# # Create data in profile table for user
# current_user = request.user
# data=UserProfile()
# data.user_id=current_user.id
# data.image="media/users/user.jpg"
# data.save()
# messages.success(request, 'Your account has been created!')
# return HttpResponseRedirect("/login/")
# else:
# messages.warning(request,form.errors)
# return HttpResponseRedirect('/signup')
# form = SignUpForm()
# category = Category.objects.all()
# context = {'category': category, 'form': form}
# return render (request, 'signup.html', context)
|
[
"talhajubair100.bd@gmail.com"
] |
talhajubair100.bd@gmail.com
|
8a809d0d0ffedb0a224620d12bc9ab3749e4ff9e
|
5f7c40860e584cb86e140d0819397a75f93e28ea
|
/motorcycles.py
|
3f2b9124a2a2adecfbcef98ba88d8e0607f78df7
|
[] |
no_license
|
bailijiang/Python_exercise
|
cb149db1bc770fc684bf461ebae1b6dd53bf10a5
|
744a2a3b44036495ae28ec87fea98c04d008b099
|
refs/heads/master
| 2021-09-02T03:30:21.512752
| 2017-12-29T23:39:05
| 2017-12-29T23:39:05
| 114,254,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
__author__ = 'Bryan'
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
motorcycles[0] = 'ducati'
print(motorcycles)
motorcycles.append('ducati')
print(motorcycles)
motorcycles.insert(1, 'BRZ')
print(motorcycles)
del motorcycles[0]
print(motorcycles)
poped_motocycles = motorcycles.pop()
print(poped_motocycles)
first_owned = motorcycles.pop(0)
print(first_owned.title())
motorcycles.remove('suzuki')
print(motorcycles)
|
[
"bailijiang@hotmail.com"
] |
bailijiang@hotmail.com
|
fe5700984481c97ee479bdc3980dd0df676e42cf
|
9658ec133fd777d349487d7f06df74e4c99f5613
|
/src/microplot/explorer/examples/demo_mono_bitmap.py
|
1606e08cf9d539ddfa43668d0769153d2c67744e
|
[
"MIT"
] |
permissive
|
Palmbear/microplot
|
6a6d0fc0be7318d95974c8541b2db2cf4390b6e8
|
9cb07c1f1a231b81f4517417c8a3a0fff6aeb3b1
|
refs/heads/master
| 2023-07-01T22:59:50.577556
| 2021-03-19T15:42:14
| 2021-03-19T15:42:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
import math
from plotter import Plotter
from plots import LinePlot
def run():
sines = list(math.sin(math.radians(x))
for x in range(0, 361, 5))
plot = LinePlot([sines],'MicroPlot line')
plotter = Plotter()
plot.plot(plotter)
plotter.write_mono_bitmap('demo-mono.bmp')
|
[
"romilly.cocking@gmail.com"
] |
romilly.cocking@gmail.com
|
660d07bde49e51d20f8d3401e32430d4edd6b1ee
|
0f36de8254443fd6bf0ae5fe0c1dc1a3bda619d2
|
/devel/.private/differential_robot_185104iaib/lib/python2.7/dist-packages/differential_robot_185104iaib/msg/_counter_message.py
|
34df98f8e6c0f454f8f33569e3add0f1ab2278c6
|
[] |
no_license
|
alekal1/ros
|
88c5d2a1b013e36b4acecfc5f131ce0113088bc6
|
e17174b023f60aab8982195a6427e569b01ab9d0
|
refs/heads/master
| 2023-01-24T03:39:57.639016
| 2020-11-21T20:01:26
| 2020-11-21T20:01:26
| 296,635,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,007
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from differential_robot_185104iaib/counter_message.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class counter_message(genpy.Message):
_md5sum = "9acad0024d496a45d7194e5310734a3c"
_type = "differential_robot_185104iaib/counter_message"
_has_header = False # flag to mark the presence of a Header object
_full_text = """
int32 count_left
int32 count_right
"""
__slots__ = ['count_left','count_right']
_slot_types = ['int32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
count_left,count_right
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(counter_message, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.count_left is None:
self.count_left = 0
if self.count_right is None:
self.count_right = 0
else:
self.count_left = 0
self.count_right = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.count_left, _x.count_right))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 8
(_x.count_left, _x.count_right,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.count_left, _x.count_right))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 8
(_x.count_left, _x.count_right,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
|
[
"sasha.aleksandov@gmail.com"
] |
sasha.aleksandov@gmail.com
|
516036f8a0bc3f47d5d771b46101d15d00bbf14f
|
22295cda10cf11472fee987093e0b245f6f96ef3
|
/nick/twophoton/imag003_depth_report_signals.py
|
40243b4470c47a81c063c4888295fcf8c03f4476
|
[] |
no_license
|
sjara/jaratest
|
aecb9e3bcc1ff91db35e7cd551c0f4f3da0b690a
|
09bf2c76bd5bf45191a2c37c14171ae1e8902c4b
|
refs/heads/master
| 2023-08-11T09:55:17.684814
| 2023-08-03T22:03:31
| 2023-08-03T22:03:31
| 63,100,718
| 2
| 5
| null | 2023-04-11T18:14:08
| 2016-07-11T20:43:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
import os
import numpy as np
from scipy import io
from jaratoolbox import loadbehavior
from jaratoolbox import extraplots
from matplotlib import pyplot as plt
from skimage.external import tifffile
dataDir = '/home/nick/data/2pdata/imag003/'
# sessionsToPlot = [0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 14, 15, 17, 18, 19]
sessionsToPlot = [8]
# session = '002_019'
for ses in sessionsToPlot:
Fn = 'imag003_002_{0:03d}_rigid.signals.mat'.format(ses)
#Read the file with the extracted signals
sigMat = os.path.join(dataDir, Fn)
sigData = io.loadmat(sigMat)
#Get number of frames and extracted ROIs
signals = sigData['sig']
nFrames, nROIs = np.shape(signals)
minSig = np.min(signals.ravel())
maxSig = np.max(signals.ravel())
sdSig = np.std(signals.ravel())
timebase = np.arange(nFrames)
plt.clf()
for indROI in range(nROIs):
yOffset = (4*sdSig) * indROI
plt.plot(timebase, signals[:,indROI]+yOffset, 'k')
plt.title('imag003_002_{0:03d}'.format(ses))
plt.xlabel('Frame')
extraplots.boxoff(plt.gca(), yaxis=False)
plt.gca().set_yticks([])
# plt.show()
plt.tight_layout()
plt.show()
# plt.savefig('/home/nick/data/2pdata/depthReportFigs/{0:03d}.png'.format(ses))
|
[
"nickponvert@gmail.com"
] |
nickponvert@gmail.com
|
269bf66d37d33754342bddb0248ac1f47af381c5
|
0246c0ded95e8cfb1139159c267f6c024ca83f7a
|
/5kyu_rgb_to_hex_conversion.py
|
1296ce04e6bd1bdea4d07c74e38cbcc1fec3c027
|
[] |
no_license
|
cromox1/KodPerang_kata
|
e40a4f2c3f4eba029caabc3312236aac1bea43bd
|
242effc71fef40c0c9a7dc46653845a20da5f239
|
refs/heads/main
| 2023-01-21T00:18:00.232300
| 2020-11-26T12:13:17
| 2020-11-26T12:13:17
| 312,572,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
def rgb(r, g, b):
int_to_hex = lambda number: "%0.2X" % number if (0 <= number <= 255) else '00' if number < 0 else 'FF'
return int_to_hex(r) + int_to_hex(g) + int_to_hex(b)
# def int_to_hex(num):
# if num <= 0:
# num = 0
# elif num >= 255:
# num = 255
# return "%0.2X" % num
class Test:
def assert_equals(value, expected):
from nose.tools import assert_equal
try:
assert_equal(value, expected)
print('EQUAL --> v =', value, " == x =", expected)
except:
message = ' // # ' + str(value) + ' should == ' + str(expected)
print('UNEQUAL!! --> v =', value, " != x =", expected, message)
@classmethod
def describe(cls, param):
print(param)
# Test.assert_equals(rgb(0,0,0),"000000", "testing zero values")
# Test.assert_equals(rgb(1,2,3),"010203", "testing near zero values")
# Test.assert_equals(rgb(255,255,255), "FFFFFF", "testing max values")
# Test.assert_equals(rgb(254,253,252), "FEFDFC", "testing near max values")
# Test.assert_equals(rgb(-20,275,125), "00FF7D", "testing out of range values")
Test.assert_equals(rgb(0,0,0),"000000")
Test.assert_equals(rgb(1,2,3),"010203")
Test.assert_equals(rgb(255,255,255), "FFFFFF")
Test.assert_equals(rgb(254,253,252), "FEFDFC")
Test.assert_equals(rgb(-20,275,125), "00FF7D")
Test.assert_equals(rgb(148, 0, 211), "9400D3")
Test.assert_equals(rgb(254,253,300), "FEFDFF")
|
[
"xixa01@yahoo.co.uk"
] |
xixa01@yahoo.co.uk
|
d57bb865836da6b50b08d1e0795f50aef737cded
|
74f0c966d09786f447ad60bf837ea342cb405874
|
/neutron/db/sqlalchemyutils.py
|
b720554d8bfe626a674b13cc47b1c6054d97fa9f
|
[
"Apache-2.0"
] |
permissive
|
mrwukang/neutron
|
ad354d19a6ba4ec9a92b4e54d02cf1bbfd66e47e
|
ebdb2ad1213eaf09c6a3f061a94ff4453c3e7506
|
refs/heads/master
| 2020-04-11T18:24:09.601969
| 2019-01-02T15:42:05
| 2019-01-02T15:42:05
| 161,997,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,493
|
py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from six import moves
import sqlalchemy
from sqlalchemy.orm import properties
from neutron._i18n import _
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
def paginate_query(query, model, limit, sorts, marker_obj=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort key, specified by sorts.
(If sort keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort key, this would be easy: sort_key > X.
With a compound-values sort key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
The reason of didn't use OFFSET clause was it don't scale, please refer
discussion at https://lists.launchpad.net/openstack/msg02547.html
We also have to cope with different sort directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sorts: array of attributes and direction by which results should
be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if not sorts:
return query
# A primary key must be specified in sort keys
assert not (limit and
len(set(dict(sorts).keys()) &
set(model.__table__.primary_key.columns.keys())) == 0)
# Add sorting
for sort_key, sort_direction in sorts:
sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc
try:
sort_key_attr = getattr(model, sort_key)
except AttributeError:
# Extension attribute doesn't support for sorting. Because it
# existed in attr_info, it will be caught here
msg = _("%s is invalid attribute for sort_key") % sort_key
raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
if isinstance(sort_key_attr.property, properties.RelationshipProperty):
msg = _("The attribute '%(attr)s' is reference to other "
"resource, can't used by sort "
"'%(resource)s'") % {'attr': sort_key,
'resource': model.__tablename__}
raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker_obj:
marker_values = [getattr(marker_obj, sort[0]) for sort in sorts]
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i, sort in enumerate(sorts):
crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j])
for j in moves.range(i)]
model_attr = getattr(model, sort[0])
if sort[1]:
crit_attrs.append((model_attr > marker_values[i]))
else:
crit_attrs.append((model_attr < marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit:
query = query.limit(limit)
return query
|
[
"wukangmr@163.com"
] |
wukangmr@163.com
|
fa882c3cca9a31dfa9f0468d57cc0eba36f3d1b4
|
a9f4434d3b410886ffc10aa5aede3634692152b6
|
/0218/ex_if4.py
|
7c7e8186913a879087edcca4ecb26393f4434e91
|
[] |
no_license
|
parka01/python_ex
|
d3690dcd8753864c335bf7782553719a072bd01d
|
a5811487516eb9ef86d5ae93e9060cac267b87ce
|
refs/heads/main
| 2023-03-13T08:35:03.837790
| 2021-02-26T03:40:41
| 2021-02-26T03:40:41
| 339,892,972
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
age=int(input('나이를 입력하세요: '))
if age>=65:
print('지하철 경로 우대 승차권 발급')
else:
print('일반 승차권 발급')
print('자동 발매기를 이용해주셔서 감사합니다.')
|
[
"68191916+parka01@users.noreply.github.com"
] |
68191916+parka01@users.noreply.github.com
|
facce1dd0ae0e979ec432e8a3d4697e93750f24b
|
312ab41033c2cb043d617d3e633c166503fd280c
|
/Informatikk/Bachelor/H2017/ITGK/Eksamensøving/Øving 6/Mynter.py
|
473758343370ecab300a3a1f0300f96a8752c55d
|
[] |
no_license
|
skanin/NTNU
|
cb9b833d9de0d504965979584370b8f353435cd1
|
e4023856f69060f8d3d09ff4990e29f7a75d98b1
|
refs/heads/master
| 2022-01-30T14:31:22.947512
| 2022-01-20T14:11:14
| 2022-01-20T14:11:14
| 113,476,017
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
def count_coins(coins):
summen = 0
for coin in coins:
summen += coin
return summen
def num_coins(numbers):
coins = []
temp = []
for num in numbers:
if num > 10:
enere = num % 10
tiere = num // 10
if tiere % 2 == 0:
tjue = tiere
tiere = 0
else:
tjue = num // 20
femere = num // 5
temp.append(tjue)
temp.append(tiere)
temp.append(femere)
temp.append(enere)
coins.append(temp)
return coins
print(num_coins([63, 55]))
|
[
"sander.b.lindberg@gmail.com"
] |
sander.b.lindberg@gmail.com
|
5816095d02686182c4cc3f9e02f65d91965b2fe6
|
e6f1137903b9658e5e3c1ee51201a931894303b9
|
/util/melt/layers/__init__.py
|
4324748862ecebf1960d5f5dae2b09531e22e1a5
|
[] |
no_license
|
fword/hasky
|
8ed69ef85bb34823d9ade27bb3b19aac02872440
|
d3c680ffa04f7487b931a5575977798157b42b7e
|
refs/heads/master
| 2021-01-23T01:18:49.275631
| 2017-03-18T13:01:27
| 2017-03-18T13:01:27
| 85,898,744
| 1
| 1
| null | 2017-03-23T02:39:06
| 2017-03-23T02:39:06
| null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python
# ==============================================================================
# \file __init__.py
# \author chenghuige
# \date 2016-08-16 16:36:38.289129
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from melt.layers.layers import *
from melt.layers.optimizers_backward_compat import *
#TODO
#if int(tf.__version__.split('.')[1]) > 10:
# from melt.layers.optimizers import *
#else:
# from melt.layers.optimizers_backward_compat import *
|
[
"29109317@qq.com"
] |
29109317@qq.com
|
12773f151707596988078c66d295a28579590999
|
6515dee87efbc5edfbf4c117e262449999fcbb50
|
/eet/Distant_Barcodes.py
|
9c536919456a6b4b5ac94f5b3db75a3e8443f3df
|
[] |
no_license
|
wangyunge/algorithmpractice
|
24edca77e180854b509954dd0c5d4074e0e9ef31
|
085b8dfa8e12f7c39107bab60110cd3b182f0c13
|
refs/heads/master
| 2021-12-29T12:55:38.096584
| 2021-12-12T02:53:43
| 2021-12-12T02:53:43
| 62,696,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
"""
In a warehouse, there is a row of barcodes, where the ith barcode is barcodes[i].
Rearrange the barcodes so that no two adjacent barcodes are equal. You may return any answer, and it is guaranteed an answer exists.
Example 1:
Input: barcodes = [1,1,1,2,2,2]
Output: [2,1,2,1,2,1]
Example 2:
Input: barcodes = [1,1,1,1,2,2,3,3]
Output: [1,3,1,3,1,2,1,2]
"""
# DFS
class Solution(object):
def rearrangeBarcodes(self, barcodes):
"""
:type barcodes: List[int]
:rtype: List[int]
"""
# count
table = {}
for bar in barcodes:
cnt = table.get(bar, 0)
table[bar] = cnt + 1
def _dfs(path, left):
if len(path) == len(barcodes):
return path
for key, cnt in left.items():
if cnt > 0 and key != path[-1]:
new_table = left
new_table[key] -= 1
_dfs(path+[key], new_table)
res = _dfs(['0'], table)
return res[1:]
# Priority Queue
class Solution(object):
def rearrangeBarcodes(self, barcodes):
"""
:type barcodes: List[int]
:rtype: List[int]
"""
# Task Completion
#Fill the most frequency first with every (len(s) / most_freq)-1 positions
|
[
"wangyunge1@yahoo.com"
] |
wangyunge1@yahoo.com
|
5f6c2cc23bfa6be883b2211c1cbe82dfdc70fee1
|
7b437e095068fb3f615203e24b3af5c212162c0d
|
/enaml/qt/qt_menu.py
|
eed0ca7ad1bdec1ef6ddb3227edc9944a2341163
|
[
"BSD-3-Clause"
] |
permissive
|
ContinuumIO/enaml
|
d8200f97946e5139323d22fba32c05231c2b342a
|
15c20b035a73187e8e66fa20a43c3a4372d008bd
|
refs/heads/master
| 2023-06-26T16:16:56.291781
| 2013-03-26T21:13:52
| 2013-03-26T21:13:52
| 9,047,832
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,532
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QMenu
from atom.api import Typed
from enaml.widgets.menu import ProxyMenu
from .qt_action import QtAction
from .qt_action_group import QtActionGroup
from .qt_toolkit_object import QtToolkitObject
class QCustomMenu(QMenu):
""" A custom subclass of QMenu which adds some convenience apis.
"""
def __init__(self, *args, **kwargs):
""" Initialize a QCustomMenu.
Parameters
----------
*args, **kwargs
The positional and keyword arguments needed to initialize
a QMenu.
"""
super(QCustomMenu, self).__init__(*args, **kwargs)
self._is_context_menu = False
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _onShowContextMenu(self, pos):
""" A private signal handler for displaying the context menu.
This handler is connected to the context menu requested signal
on the parent widget when this menu is marked as a context
menu.
"""
parent = self.parentWidget()
if parent is not None:
global_pos = parent.mapToGlobal(pos)
self.exec_(global_pos)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def isContextMenu(self):
""" Whether this menu acts as a context menu for its parent.
Returns
-------
result : bool
True if this menu acts as a context menu, False otherwise.
"""
return self._is_context_menu
def setContextMenu(self, context):
""" Set whether this menu acts as a context menu for its parent.
Parameters
----------
context : bool
True if this menu should act as a context menu, False
otherwise.
"""
old_context = self._is_context_menu
self._is_context_menu = context
if old_context != context:
parent = self.parentWidget()
if parent is not None:
handler = self._onShowContextMenu
if context:
parent.setContextMenuPolicy(Qt.CustomContextMenu)
parent.customContextMenuRequested.connect(handler)
else:
parent.setContextMenuPolicy(Qt.DefaultContextMenu)
parent.customContextMenuRequested.disconnect(handler)
def removeActions(self, actions):
""" Remove the given actions from the menu.
Parameters
----------
actions : iterable
An iterable of QActions to remove from the menu.
"""
remove = self.removeAction
for action in actions:
remove(action)
class QtMenu(QtToolkitObject, ProxyMenu):
""" A Qt implementation of an Enaml ProxyMenu.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QCustomMenu)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying menu widget.
"""
self.widget = QCustomMenu(self.parent_widget())
def init_widget(self):
""" Initialize the widget.
"""
super(QtMenu, self).init_widget()
d = self.declaration
self.set_title(d.title)
self.set_enabled(d.enabled)
self.set_visible(d.visible)
self.set_context_menu(d.context_menu)
def init_layout(self):
""" Initialize the layout of the widget.
"""
super(QtMenu, self).init_layout()
widget = self.widget
for child in self.children():
if isinstance(child, QtMenu):
widget.addMenu(child.widget)
elif isinstance(child, QtAction):
widget.addAction(child.widget)
elif isinstance(child, QtActionGroup):
widget.addActions(child.actions())
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def find_next_action(self, child):
""" Get the QAction instance which follows the child.
Parameters
----------
child : QtToolkitObject
The child of interest.
Returns
-------
result : QAction or None
The QAction which comes immediately after the actions of the
given child, or None if no actions follow the child.
"""
found = False
for dchild in self.children():
if found:
if isinstance(dchild, QtMenu):
return dchild.widget.menuAction()
if isinstance(dchild, QtAction):
return dchild.widget
if isinstance(dchild, QtActionGroup):
acts = dchild.actions()
if len(acts) > 0:
return acts[0]
else:
found = dchild is child
def child_added(self, child):
""" Handle the child added event for a QtMenu.
"""
super(QtMenu, self).child_added(child)
if isinstance(child, QtMenu):
before = self.find_next_action(child)
self.widget.insertMenu(before, child.widget)
elif isinstance(child, QtAction):
before = self.find_next_action(child)
self.widget.insertAction(before, child.widget)
elif isinstance(child, QtActionGroup):
before = self.find_next_action(child)
self.widget.insertActions(before, child.actions())
def child_removed(self, child):
""" Handle the child removed event for a QtMenu.
"""
super(QtMenu, self).child_removed(child)
if isinstance(child, QtMenu):
self.widget.removeAction(child.widget.menuAction())
elif isinstance(child, QtAction):
self.widget.removeAction(child.widget)
elif isinstance(child, QtActionGroup):
self.widget.removeActions(child.actions())
#--------------------------------------------------------------------------
# ProxyMenu API
#--------------------------------------------------------------------------
def set_title(self, title):
""" Set the title of the underlying widget.
"""
self.widget.setTitle(title)
def set_visible(self, visible):
""" Set the visibility on the underlying widget.
"""
self.widget.menuAction().setVisible(visible)
def set_enabled(self, enabled):
""" Set the enabled state of the widget.
"""
self.widget.setEnabled(enabled)
def set_context_menu(self, context):
""" Set whether or not the menu is a context menu.
"""
self.widget.setContextMenu(context)
|
[
"sccolbert@gmail.com"
] |
sccolbert@gmail.com
|
75c0f023556c19ce152dd2ed15092cba945a4cb9
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_enchant.py
|
30a2682435ccb97a29ce664927597299802bbdb4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
#calss header
class _ENCHANT():
def __init__(self,):
self.name = "ENCHANT"
self.definitions = [u'to attract or please someone very much: ', u'to have a magical effect on someone or something']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dd825169a7e52a6ad884d02fdd6500cf7257a189
|
3ea99519e25ec1bb605947a94b7a5ceb79b2870a
|
/modern_python/modernpython/lib/mypy/typeshed/stdlib/3.4/tracemalloc.pyi
|
462b03ca1c46a82c921e5591f87aaf07cc326d2c
|
[] |
no_license
|
tech-cow/spazzatura
|
437c7502a0654a3d3db2fd1e96ce2e3e506243c0
|
45fc0932186d2ef0c5044745a23507a692cfcc26
|
refs/heads/master
| 2022-09-01T12:01:11.309768
| 2018-11-15T04:32:03
| 2018-11-15T04:32:03
| 130,414,653
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
pyi
|
# Stubs for tracemalloc (Python 3.4+)
import sys
from typing import Any, List, Optional, Sequence, Tuple, Union
def clear_traces() -> None: ...
def get_object_traceback(obj: object) -> Optional[Traceback]: ...
def get_traceback_limit() -> int: ...
def get_traced_memory() -> Tuple[int, int]: ...
def get_tracemalloc_memory() -> int: ...
def is_tracing() -> bool: ...
def start(nframe: int = ...) -> None: ...
def stop() -> None: ...
def take_snapshot() -> Snapshot: ...
if sys.version_info >= (3, 6):
class DomainFilter:
inclusive = ... # type: bool
domain = ... # type: int
def __init__(self, inclusive: bool, domain: int) -> None: ...
class Filter:
if sys.version_info >= (3, 6):
domain = ... # type: Optional[int]
inclusive = ... # type: bool
lineno = ... # type: Optional[int]
filename_pattern = ... # type: str
all_frames = ... # type: bool
def __init__(self, inclusive: bool, filename_pattern: str, lineno: Optional[int] = ..., all_frames: bool = ..., domain: Optional[int] = ...) -> None: ...
class Frame:
filename = ... # type: str
lineno = ... # type: int
class Snapshot:
def compare_to(self, old_snapshot: Snapshot, key_type: str, cumulative: bool = ...) -> List[StatisticDiff]: ...
def dump(self, filename: str) -> None: ...
if sys.version_info >= (3, 6):
def filter_traces(self, filters: Sequence[Union[DomainFilter, Filter]]) -> Snapshot: ...
else:
def filter_traces(self, filters: Sequence[Filter]) -> Snapshot: ...
@classmethod
def load(cls, filename: str) -> Snapshot: ...
def statistics(self, key_type: str, cumulative: bool = ...) -> List[Statistic]: ...
traceback_limit = ... # type: int
traces = ... # type: Sequence[Trace]
class Statistic:
count = ... # type: int
size = ... # type: int
traceback = ... # type: Traceback
class StatisticDiff:
count = ... # type: int
count_diff = ... # type: int
size = ... # type: int
size_diff = ... # type: int
traceback = ... # type: Traceback
class Trace:
size = ... # type: int
traceback = ... # type: Traceback
class Traceback(Sequence[Frame]):
def format(self, limit: Optional[int] = ...) -> List[str]: ...
|
[
"yuzhoujr@yuzhou-7480.internal.synopsys.com"
] |
yuzhoujr@yuzhou-7480.internal.synopsys.com
|
69d1f1d6c3c6c4c911b19d88c4953d469307148f
|
1e0ae1f039668a65e480065d671235fc0fff9b52
|
/s11day2/backend/b2.py
|
bb0b3891a46e924deb3ce5866e6a53261cb4ac70
|
[] |
no_license
|
aixocm/svndata
|
a4da91c3c9e1d376abfd46e7cecc3c5c2e340e83
|
ee205301f3a1ce11acef98bba927877cb7c4fb0b
|
refs/heads/master
| 2021-01-21T04:39:41.607117
| 2016-07-01T01:48:36
| 2016-07-01T01:48:36
| 47,066,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,613
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#date:2016-1-11
import time
import hashlib
import pickle
import ConfigParser
import day
info = {}
def regiest():
global info
while True:
username = raw_input('please input your username:')
passwd = raw_input('please input your password:')
if not (username and passwd):
print 'your username or password is empty'
continue
else:
with open('user.txt','a+') as f:
string = f.read()
if string == "":
hash = hashlib.md5('JJ')
hash.update(passwd)
ret = hash.hexdigest()
info[username]=[ret,15000]
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
print 'regiest is sucessful!'
day.month_fun(username)
# dayday.dayday_fun(username)
else:
f=open('user.txt','rb')
info = pickle.load(f)
f.close()
if username in info.keys():
print 'This user is already exist!'
continue
else:
hash = hashlib.md5('JJ')
hash.update(passwd)
ret = hash.hexdigest()
info[username]=[ret,15000]
f = open('user.txt','ab')
pickle.dump(info,f)
f.close()
print 'regiest is sucessful!'
day.month_fun(username)
# dayday.dayday_fun(username)
def login():
global info
global username
f=open('user.txt','rb')
info = pickle.load(f)
f.close()
username = raw_input('please input your name:')
passwd = raw_input('please input your password:')
if username not in info.keys():
print 'please regiest!'
regiest()
else:
hash = hashlib.md5('JJ')
hash.update(passwd)
ret = hash.hexdigest()
if username in info.keys() and ret in info[username][0]:
print 'login successful!'
return True
else:
print 'login is failure'
return False
def get_money(username):
global info
if info[username][1] < 0:
print 'sorry,please the money'
else:
num = int(raw_input('please input your money num:'))
if info[username][1] - num*1.05 >= 0:
info[username][1] -= num*1.05
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
print 'get money is sucessful!'
else:
print 'sorry,you get money is too much'
def return_money(username):
global info
value_add=int(raw_input('please input your money:'))
with open('add.txt','a+') as f:
value=int(f.read())
with open('add.txt','wb') as f:
f.write(value+value_add)
info[username][1]=15000+value_add+value-Sum
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
print '已充值!'
def account_list(username):
global info
global Sum
with open('add.txt','a+') as f:
value=f.read()
if value == "":
value=0
with open('add.txt','wb') as f:
f.write(value)
config = ConfigParser.ConfigParser()
config.read(username)
month_list=config.sections()
Sum=0
for month in month_list:
key_list=config.items(month)
print month,"的账单如下:"
for opt in key_list:
print opt[0],":",opt[1]
Sum+=config.getint(month,opt[0])
if 15000 - Sum + int(value) < 0:
print '你已经欠款',15000 - Sum + int(value)
k=raw_input('if you add your money,please input A')
if k == 'A':
return_money(username)
else:
print 'your input is error'
else:
info[username][1]-=Sum
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
def M
flag = raw_input("if you regiest,please input 'R',if you login,please input 'L':")
if flag == 'R':
regiest()
elif flag == 'L':
if login():
select = raw_input("get money input G,query your account_list input Q:")
if select == 'G':
get_money(username)
if select =='Q':
if int(time.time()) > 0:
account_list(username)
else:
print 'date is do not to'
else:
print 'your input is error'
Main()
|
[
"1755897532@qq.com"
] |
1755897532@qq.com
|
ded09e44634a6c69a66cad4017e63a15258b6b35
|
67d4025c8b006a4342ce78bf4831d50432a4ed7a
|
/flow controls/current.py
|
b858a670d03af0b448bdb3d27cb8ffeb47d14c81
|
[] |
no_license
|
Anju-PT/pythonfilesproject
|
fd21484e98dc6c27fd9fd00feea0ede76ca8db17
|
fee8351a1610921700ee15ea88c6f765fe3d21cd
|
refs/heads/master
| 2023-04-29T18:13:46.490301
| 2021-05-19T08:06:51
| 2021-05-19T08:06:51
| 368,791,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
#year
#month
#date
#birth year
#month
#date
#print year
cyear=int(input("enter current year"))
cmonth=int(input("enter current month"))
cdate=int(input("enter current date"))
byear=int(input("enter birth year"))
bmonth=int(input("enter birth month"))
bdate=int(input("enter birth date"))
dyear=cyear-byear
dmonth=cmonth-bmonth
ddate=cdate-bdate
if(dyear>=1):
if(dmonth>=0):
if(ddate>=0):
print("age:",dyear,"years",dmonth,"months",ddate,"days")
else:
dmonth=dmonth-1
ddate=31+ddate
print("age:",dyear,"years",dmonth,"months",ddate,"days")
elif(dmonth<0):
if(ddate>=0):
dyear=dyear-1
dmonth=12+dmonth
#ddate=ddate
print("age:",dyear,"years",dmonth,"months",ddate,"days")
else:
dyear=dyear-1
dmonth-=1
dmonth=12+dmonth
ddate=31+ddate
print("age:",dyear,"years",dmonth,"months",ddate,"days")
else:
print("error")
elif(dyear==0):
if(dmonth<=0):
if(ddate<=0):
print("error")
else:
print(dyear,"years",dmonth,"months",ddate,"days old")
elif(dmonth>0):
if(ddate<=0):
dmonth=dmonth-1
ddate=31+ddate
print(dyear,"years",dmonth,"months",ddate,"days")
else:
print(dyear,"years",dmonth,"months",ddate, "days")
else:
print("error")
else:
print("invalid date of birth")
|
[
"anjuthankachen2012@gmail.com"
] |
anjuthankachen2012@gmail.com
|
c2f220b3b5f0bf91bfad97fd447ebc7840a3f497
|
f04fb8bb48e38f14a25f1efec4d30be20d62388c
|
/牛客Top200/41最长无重复子数组.py
|
925b34c978b77ae75f52c81087d78fbe3d6366fb
|
[] |
no_license
|
SimmonsChen/LeetCode
|
d8ef5a8e29f770da1e97d295d7123780dd37e914
|
690b685048c8e89d26047b6bc48b5f9af7d59cbb
|
refs/heads/master
| 2023-09-03T01:16:52.828520
| 2021-11-19T06:37:19
| 2021-11-19T06:37:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
class Solution:
# 用队列:当前数字在队列中,则一直出队直到当前元素不在队中,然后将当前元素入队。
# 每入队一个元素都要计算当前队长度,返回值即是队列长度的最大值。
def maxLength(self, arr):
n = len(arr)
if n < 2: return n
queue = []
ans = 0
for number in arr:
while number in queue:
queue.pop(0)
queue.append(number)
ans = max(ans, len(queue))
return ans
if __name__ == '__main__':
s = Solution()
print(s.maxLength([2, 2, 3, 4, 3]))
|
[
"15097686925@163.com"
] |
15097686925@163.com
|
df1e1b2efa5dc475ee5b76e61cad469f2164c2bf
|
052a89753a7917b7fa0ccdf5718d5250a1379d2c
|
/bin/explode.py
|
792bb4609f5f71183a72a0d26597362977ad7e7b
|
[] |
no_license
|
bopopescu/aws.example.com
|
25e2efda3bd9ae2a257c34904ccb53043fe20b55
|
97254868688c3c3a991843fcacc973c93b366700
|
refs/heads/master
| 2022-11-22T07:06:30.386034
| 2016-10-25T15:22:14
| 2016-10-25T15:22:14
| 282,553,417
| 0
| 0
| null | 2020-07-26T01:22:26
| 2020-07-26T01:22:25
| null |
UTF-8
|
Python
| false
| false
| 2,499
|
py
|
#!/Users/deanarmada/Desktop/projects/python-projects/aws.example.com/bin/python
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
|
[
"deanarmada@gmail.com"
] |
deanarmada@gmail.com
|
36d1293350f5b67bf202eb4d0c02875b0fb70e92
|
63f6ce221f6ac10f33761b6e57cf47725fd3d1cb
|
/08_Inteligencia_Artificial/01_Mineracao_de_Emocoes_em_Textos/base.py
|
d5d26c33f96c0e78d5a32f8964b53b1eae32c865
|
[] |
no_license
|
iamferreirajp/python-notebook
|
7dcc4305fec2a133a28b5449d77d486dcaca2f5f
|
c9056024bc7a7715db3607ec00f886a7b9eaf0c0
|
refs/heads/master
| 2020-05-03T09:54:43.612078
| 2019-03-24T07:07:21
| 2019-03-24T07:07:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
base = [
('eu sou admirada por muitos','alegria'),
('me sinto completamente amado','alegria'),
('amar e maravilhoso','alegria'),
('estou me sentindo muito animado novamente','alegria'),
('eu estou muito bem hoje','alegria'),
('que belo dia para dirigir um carro novo','alegria'),
('o dia está muito bonito','alegria'),
('estou contente com o resultado do teste que fiz no dia de ontem','alegria'),
('o amor e lindo','alegria'),
('nossa amizade e amor vai durar para sempre', 'alegria'),
('estou amedrontado', 'medo'),
('ele esta me ameacando a dias', 'medo'),
('isso me deixa apavorada', 'medo'),
('este lugar e apavorante', 'medo'),
('se perdermos outro jogo seremos eliminados e isso me deixa com pavor', 'medo'),
('tome cuidado com o lobisomem', 'medo'),
('se eles descobrirem estamos encrencados', 'medo'),
('estou tremendo de medo', 'medo'),
('eu tenho muito medo dele', 'medo'),
('estou com medo do resultado dos meus testes', 'medo')
]
stopwords = [
'a', 'agora', 'algum', 'alguma', 'aquele', 'aqueles', 'de', 'deu', 'do', 'e', 'estou', 'esta', 'esta',
'ir', 'meu', 'muito', 'mesmo', 'no', 'nossa', 'o', 'outro', 'para', 'que', 'sem', 'talvez', 'tem', 'tendo',
'tenha', 'teve', 'tive', 'todo', 'um', 'uma', 'umas', 'uns', 'vou'
]
|
[
"victorhad@gmail.com"
] |
victorhad@gmail.com
|
73cb855b2bd5a9631ecd288feca60ceac271a3a9
|
291ab4b5b1b99d0d59ce2fb65efef04b84fd78bd
|
/tmp_testdir/postgresDB/test5b_manual_dbtable_insert_date_time.py
|
ca1b04857efca706986da6e210cb68cdea5f6927
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
cromox1/Trading212
|
15b5ea55d86e7063228f72dd92525e1fca693338
|
68f9b91098bc9184e16e9823a5e07e6b31e59602
|
refs/heads/main
| 2023-04-17T23:03:07.078229
| 2021-05-05T23:02:54
| 2021-05-05T23:02:54
| 320,100,427
| 0
| 2
| null | 2021-04-13T07:03:41
| 2020-12-09T22:58:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,736
|
py
|
import psycopg2
import datetime
def connect_db(host, user, pswd):
conn = psycopg2.connect(host=host, user=user, password=pswd)
cur = conn.cursor()
return cur, conn
def connect_db_commit_close(cur, conn):
conn.commit()
cur.close()
conn.close()
def create_table_if_noexist(dbtable):
sqlcommand = """CREATE TABLE IF NOT EXISTS """ + dbtable + """
(currencyID int,
Currency varchar(10),
CurrentValue numeric(9,5),
CurrentMin numeric(9,5),
CurrentMax numeric(9,5),
CurrentAverage numeric(9,5),
Date date);"""
conn_db = connect_db("localhost", "postgres", "H0meBase")
cur = conn_db[0]
conn = conn_db[1]
print(sqlcommand)
cur.execute(sqlcommand)
connect_db_commit_close(cur, conn)
def put_values_to_dbtable(dbtable, values):
id = values[0]
currency = values[1]
valuex = values[2]
valuemin = values[3]
valuemax = values[4]
valueaverage = values[5]
date1 = values[6]
time1 = values[7]
date2 = datetime.datetime.strptime(date1 + ' ' + time1, '%Y-%m-%d %H:%M:%S')
print('date2 = ', type(date2))
# print('time = ', type(time))
sqlcommand = "INSERT INTO " + str(dbtable) \
+ '\n' + "SELECT DISTINCT " + '\n' + str(id) + ", \n'" + str(currency) + "', \n" + str(valuex) \
+ ", \n" + str(valuemin) + ", \n" + str(valuemax) + ", \n" + str(valueaverage) + ", \n" \
+ str(date2) \
+ "\nFROM " + str(dbtable) \
+ '\n' + "WHERE NOT EXISTS(SELECT DISTINCT currencyID FROM " + str(dbtable) \
+ " WHERE currencyID = " + str(id) + ");"
# + str(date) + ", \n'" + str(time) \
# sqlcommand = """INSERT INTO """ + str(dbtable) + """
# SELECT DISTINCT """ + str(id) + ',' + str(currency) + ',' + str(valuex) + ',' + str(valuemin) + ',' + \
# str( valuemax) + ',' + str(valueaverage) + ',' + str(date) \
# + ' FROM ' + str(dbtable) + """
# WHERE NOT EXISTS(SELECT DISTINCT PersonID FROM dailyfxcurrency WHERE id = """ + str(id) + """);"""
conn_db = connect_db("localhost", "postgres", "H0meBase")
cur = conn_db[0]
conn = conn_db[1]
print(sqlcommand)
cur.execute(sqlcommand)
connect_db_commit_close(cur, conn)
# TEST
dbtouse = 'dbtesttwo'
create_table_if_noexist(dbtouse)
# DATE = 2020-12-16 / 14:56
# GBP-USD = 1.354775 / min 1.336765 / max 1.354775 / avg 1.3462247916666668
# datatext = (20201216145600, 'GBP-USD' , 1.354775 , 1.336765 , 1.354775 , 1.3462247916666668 , "2020-12-16\n14:56:00")
datatext = (20201216145600, 'GBP-USD' , 1.35477 , 1.33676 , 1.35477 , 1.34622 , "2020-12-16", "14:56:00")
put_values_to_dbtable(dbtouse, datatext)
|
[
"xixa01@yahoo.co.uk"
] |
xixa01@yahoo.co.uk
|
6238e82437788028beba37bd4dc6074632053960
|
bccfab4d853f7417401a084be95de293e66ccd2a
|
/mySpider/spiders/Museum72.py
|
7cda6f18480e54909d7211b01dccc6d7a37022cd
|
[] |
no_license
|
CS1803-SE/The-First-Subsystem
|
a8af03ce04a9de72a6b78ece6411bac4c02ae170
|
4829ffd6a83133479c385d6afc3101339d279ed6
|
refs/heads/main
| 2023-05-06T02:32:08.751139
| 2021-05-24T06:09:37
| 2021-05-24T06:09:37
| 363,400,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
#lay
from ..items import *
class Museum72(scrapy.Spider):
name = "Museum72"
allowed_domains = ['nbmuseum.cn']
start_urls = ['http://nbmuseum.cn/col/col41/index.html']
custom_settings = {
'ITEM_PIPELINES': {
'mySpider.pipelines.MuseumPipeLine': 300,
},
'DOWNLOADER_MIDDLEWARES': {
'mySpider.middlewares.DefaultMiddleware': 0,
},
}
def parse(self, response, **kwargs):
item = MuseumBasicInformationItem()
item["museumID"] = 72
item["museumName"] = "宁波博物院"
item["address"] = "宁波市鄞州区首南中路1000号"
# str(response.xpath(
# "/html/body/div[3]/div[2]/div/div[1]/div[7]/dl[1]/dd[3]/text()").extract_first()).replace("\n", "")
item["openingTime"] = "参观入场时间9:00—16:00,闭馆时间17:00。周一闭馆(国家法定节假日除外)。"
# str(response.xpath(
# "/html/body/div[3]/div[2]/div/div[1]/div[7]/dl[2]/dd[3]/text()").extract_first()).replace("\n", "")
item["consultationTelephone"] = "(0574)82815588"
item["publicityVideoLink"] = None
item["longitude"] = "121.551803"
item["latitude"] = "29.821188"
item["introduction"] = response.xpath(
'//*[@id="zoom"]/p[1]/text()').extract()
# str(response.xpath("/html/body/div[3]/div[2]/div/div[1]/div[4]/div[2]").xpath(
# "string(.)").extract_first()).split("\n")[0]
yield item
|
[
"1300978939@qq.com"
] |
1300978939@qq.com
|
03d6b9442657118869f4d81fda133e26f127d88b
|
645cd832def2330ea923d69fcc82cf5b7719415b
|
/python/smqtk/algorithms/relevancy_index/__init__.py
|
36a407c72a844017a2550d0d6d0e1b72ee4d9c2c
|
[] |
no_license
|
mrG7/SMQTK
|
3103b6c59e347930a330e3284b288cb7af20f3b6
|
19917babd63767726b1bc21a184a5006366b59af
|
refs/heads/master
| 2021-01-20T11:35:21.861708
| 2015-10-02T21:50:44
| 2015-10-02T21:50:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
import abc
import logging
from smqtk.algorithms import SmqtkAlgorithm
__author__ = "paul.tunison@kitware.com"
class RelevancyIndex (SmqtkAlgorithm):
"""
Abstract class for IQR index implementations.
Similar to a traditional nearest-neighbors algorithm, An IQR index provides
a specialized nearest-neighbors interface that can take multiple examples of
positively and negatively relevant exemplars in order to produce a [0, 1]
ranking of the indexed elements by determined relevancy.
"""
def __len__(self):
return self.count()
@abc.abstractmethod
def count(self):
"""
:return: Number of elements in this index.
:rtype: int
"""
@abc.abstractmethod
def build_index(self, descriptors):
"""
Build the index based on the given iterable of descriptor elements.
Subsequent calls to this method should rebuild the index, not add to it.
:raises ValueError: No data available in the given iterable.
:param descriptors: Iterable of descriptor elements to build index over.
:type descriptors: collections.Iterable[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def rank(self, pos, neg):
"""
Rank the currently indexed elements given ``pos`` positive and ``neg``
negative exemplar descriptor elements.
:param pos: Iterable of positive exemplar DescriptorElement instances.
This may be optional for some implementations.
:type pos: collections.Iterable[smqtk.representation.DescriptorElement]
:param neg: Iterable of negative exemplar DescriptorElement instances.
This may be optional for some implementations.
:type neg: collections.Iterable[smqtk.representation.DescriptorElement]
:return: Map of indexed descriptor elements to a rank value between
[0, 1] (inclusive) range, where a 1.0 means most relevant and 0.0
meaning least relevant.
:rtype: dict[smqtk.representation.DescriptorElement, float]
"""
def get_relevancy_index_impls(reload_modules=False):
"""
Discover and return ``RelevancyIndex`` implementation classes found in the
given plugin search directory. Keys in the returned map are the names of the
discovered classes, and the paired values are the actual class type objects.
We look for modules (directories or files) that start with an alphanumeric
character ('_' prefixed files/directories are hidden, but not recommended).
Within a module we first look for a helper variable by the name
``RELEVANCY_INDEX_CLASS``, which can either be a single class object or
an iterable of class objects, to be exported. If the variable is set to
None, we skip that module and do not import anything. If the variable is not
present, we look for a class by the same name and casing as the module. If
neither are found, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class object of type ``RelevancyIndex`` whose
keys are the string names of the classes.
:rtype: dict of (str, type)
"""
from smqtk.utils.plugin import get_plugins
import os
this_dir = os.path.abspath(os.path.dirname(__file__))
helper_var = "RELEVANCY_INDEX_CLASS"
def class_filter(cls):
log = logging.getLogger('.'.join([__name__,
'get_relevancy_index_impls',
'class_filter']))
if not cls.is_usable():
log.warn("Class type '%s' not usable, filtering out.",
cls.__name__)
return False
return True
return get_plugins(__name__, this_dir, helper_var, RelevancyIndex,
class_filter, reload_modules)
|
[
"paul.tunison@kitware.com"
] |
paul.tunison@kitware.com
|
e1bfa27cf691a0b6ee3d8d8a074da682433cef02
|
f20931826a557f0d884f8b46de259840c29b7428
|
/meiduo_mall/meiduo_mall/utils/authenticate.py
|
2079f8c659a70dbebbb5291a0c6b7c9cbcc4867e
|
[] |
no_license
|
zy723/meiduo_project
|
38ccecc2fa1d61f2eb848ebc572dd43d45a534c8
|
f50a8105c63554b57419cb3494c3d323bb343f9c
|
refs/heads/master
| 2022-12-15T02:34:42.578549
| 2020-05-20T16:56:27
| 2020-05-20T16:56:27
| 248,264,846
| 0
| 0
| null | 2022-12-12T20:28:41
| 2020-03-18T15:08:40
|
TSQL
|
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
"""
增加支持管理员用户登录账号
JWT扩展的登录视图,在收到用户名与密码时,也是调用Django的认证系统中提供的authenticate()来检查用户名与密码是否正确。
我们可以通过修改Django认证系统的认证后端(主要是authenticate方法)来支持登录账号既可以是用户名也可以是手机号。
修改Django认证系统的认证后端需要继承django.contrib.auth.backends.ModelBackend,并重写authenticate方法。
authenticate(self, request, username=None, password=None, **kwargs)方法的参数说明:
request 本次认证的请求对象
username 本次认证提供的用户账号
password 本次认证提供的密码
我们想要让管理员用户才能登录我们的admin后台,这时我们就要修改django原有的用户验证方法。
重写authenticate方法的思路:
根据username参数查找用户User对象,在查询条件中在加上is_staff=True的条件
若查找到User对象,调用User对象的check_password方法检查密码是否正确
"""
from django.contrib.auth.backends import ModelBackend
from users.models import User
class MeiduoModelBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
# 判断是否通过vue组件发送请求
if request is None:
try:
user = User.objects.get(username=username, is_staff=True)
except:
return None
# 检查密码
if user.check_password(password):
return user
else:
# 变量username的值,可以是用户名,也可以是手机号,需要判断,再查询
try:
user = User.objects.get(username=username)
except:
# 如果未查到数据,则返回None,用于后续判断
return None
# 判断密码
if user.check_password(password):
return user
else:
return None
|
[
"zy723@vip.qq.com"
] |
zy723@vip.qq.com
|
9e819a87ebc1032db2785b8533da9e1f29cd9fe5
|
f62e4c46fb0f98879fb63977fa29631b02e3928c
|
/16 задание/РекурсФункцСТекст_005.py
|
555d533ac95655abf5e3a748654d72868d9b32bc
|
[] |
no_license
|
SeveralCamper/USE-2020-2021
|
c34f4d7a2c3e0f51529141781f523b63242a835d
|
ac1122649f2fd431a91af5dda5662492e2565109
|
refs/heads/master
| 2023-09-03T13:36:05.822568
| 2021-10-27T12:54:10
| 2021-10-27T12:54:10
| 392,303,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
# Задание 16 № 9163
# Ниже на пяти языках программирования записан рекурсивный алгоритм F.
# Чему равна сумма всех чисел, напечатанных на экране при выполнении вызова F(1)?
count = 0
def F(n):
global count
count += n
print(n)
if n < 4:
F(n + 1)
F(n + 3)
print(F(1), count)
# Ответ: 25
|
[
"mikha.alkhimovich@mail.ru"
] |
mikha.alkhimovich@mail.ru
|
270526ead40fed7395ab36a2f0e5538850c9fcd5
|
43575c1324dc0760958a110d7f056bce88422a03
|
/listing/arrayqueue.py
|
fe91f1bc669035d14da292523bec642ed81d0941
|
[] |
no_license
|
nicolas4d/Data-Structures-and-Algorithms-Using-Python
|
1ffd74d26f09de2057bdc53998a56e56ed77c1de
|
a879ce6fd4033867783ee487d57d459b029eb5f8
|
refs/heads/master
| 2020-09-24T12:48:30.726766
| 2019-12-31T03:15:44
| 2019-12-31T03:15:44
| 225,761,970
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
# Implementation of the Queue ADT using a circular array.
from array import Array
class Queue :
# Creates an empty queue.
def __init__( self, maxSize ) :
self._count = 0
self._front = 0
self._back = maxSize - 1
self._qArray = Array( maxSize )
# Returns True if the queue is empty.
def isEmpty( self ) :
return self._count == 0
# Returns True if the queue is full.
def isFull( self ) :
return self._count == len(self._qArray)
# Returns the number of items in the queue.
def __len__( self ) :
return self._count
# Adds the given item to the queue.
def enqueue( self, item ):
assert not self.isFull(), "Cannot enqueue to a full queue."
maxSize = len(self._qArray)
self._back = (self._back + 1) % maxSize
self._qArray[self._back] = item
self._count += 1
# Removes and returns the first item in the queue.
def dequeue( self ):
assert not self.isEmpty(), "Cannot dequeue from an empty queue."
item = self._qArray[ self._front ]
maxSize = len(self._qArray)
self._front = (self._front + 1) % maxSize
self._count -= 1
return item
|
[
"nicolas4d@foxmail.com"
] |
nicolas4d@foxmail.com
|
ed2bd9596b3c087bafd28769728ffae53934a728
|
e12e1e738d06dbbcdb7f3d051614e7aa493f795d
|
/mysite/config/settings.py
|
41635b0505022517d3d2ca9886160f51f35e9721
|
[] |
no_license
|
suhjohn/ec2-deploy-mysite
|
34c13e1ae3ff33ca14a6223ee8036432ea98d460
|
fb3c33cb64ecfa673f16da0385942f76bde748a1
|
refs/heads/master
| 2021-07-19T17:57:53.701059
| 2017-10-27T05:33:27
| 2017-10-27T05:33:27
| 108,366,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,380
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8!yz3f*(+w^kkhls0sl3)lfngzupjo(rsydyr2(89ci7!av(_w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'.ap-northeast-2.compute.amazonaws.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_DIR = os.path.join(BASE_DIR, "static")
#Django에서 정적파일을 검색하고 가져올 폴더 목
STATICFILES_DIRS = [
STATIC_DIR,
]
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
|
[
"johnsuh94@gmail.com"
] |
johnsuh94@gmail.com
|
c663cfef1a695d5be22587d9ff42d87025c79fdc
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/netex/models/destination_display_variant_ref.py
|
3255e0537fdd4c971133e232146768cd1aa74573
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
from dataclasses import dataclass
from .destination_display_variant_ref_structure import DestinationDisplayVariantRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class DestinationDisplayVariantRef(DestinationDisplayVariantRefStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
3a2ad1e33b7dc2a198f28492c836efb94a98b834
|
3562fa51db47b1b1e97785191f0c04644d47c283
|
/python/plat3/2152.py
|
62489943de0aeb7ac35dc6bf78cf5e4aa950e1b3
|
[] |
no_license
|
seono/algorithm
|
c74181d564525e3a0214824c4a619c51cd52a042
|
78a252b29290eaa1ea076d76cd83e5dbbb7d8d89
|
refs/heads/master
| 2021-07-13T07:13:41.523888
| 2021-04-24T14:05:00
| 2021-04-24T14:05:00
| 244,609,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
import sys
from collections import deque
sys.setrecursionlimit(100000)
input = sys.stdin.readline
N, M, S, T = map(int,input().split())
adj = [[] for _ in range(N+1)]
for i in range(M):
s,t = map(int,input().split())
adj[s].append(t)
cnt,SN = 0,0
dfsn = [0]*(N+1)
scc_arr = []
scc_num = [0]*(N+1)
finished = [False]*(N+1)
st = []
def scc(idx):
global cnt,SN
dfsn[idx] = cnt+1
cnt+=1
st.append(idx)
result = dfsn[idx]
for nx in adj[idx]:
if dfsn[nx]==0:result = min(result,scc(nx))
elif not finished[nx]: result = min(result, dfsn[nx])
if result == dfsn[idx]:
curSCC = []
while True:
t = st.pop()
curSCC.append(t)
finished[t]=True
scc_num[t]=SN
if t==idx:break
scc_arr.append(curSCC)
SN+=1
return result
for i in range(1,N+1):
if dfsn[i]==0:scc(i)
new_adj = [[] for _ in range(SN)]
indgree = [0]*SN
finished = [0]*SN
new_s,new_t = scc_num[S],scc_num[T]
for i,tmp in enumerate(scc_arr):
for n in tmp:
for nx in adj[n]:
if scc_num[nx]==i:continue
new_adj[i].append(scc_num[nx])
indgree[scc_num[nx]]+=1
def dfs():
can = [False]*SN
can[new_s]=True
finished[new_s]=len(scc_arr[new_s])
q = deque([])
for i in range(SN):
if not indgree[i]: q.append(i)
while q:
n = q.popleft()
for nx in new_adj[n]:
if can[n]:
finished[nx]=max(finished[nx],finished[n]+len(scc_arr[nx]))
can[nx]=True
indgree[nx]-=1
if indgree[nx]==0:
q.append(nx)
return finished[new_t]
print(dfs())
|
[
"tjsh0111@gmail.com"
] |
tjsh0111@gmail.com
|
a32df99969cc2b00821ca9dfd9e146584b61aad7
|
ed63b9b615c0f1484746e87b54a0c0b233ddf5c2
|
/tests/test_parser.py
|
90b755aedee63b62e364f59f3cb3c53381aaf2e0
|
[
"MIT"
] |
permissive
|
timofurrer/embedeval
|
ae02026da6cd5601b16afe1cbb543552cbf461ac
|
08a69c950c9a0ac59a8a0ca728af2627c7bcc43a
|
refs/heads/master
| 2020-07-29T10:41:56.322842
| 2020-01-20T10:32:46
| 2020-01-20T10:32:51
| 209,766,108
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
"""
embedeval
~~~~~~~~~
NLP Embedding Evaluation Tool
:copyright: (c) 2019 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import textwrap
import uuid
import numpy as np
import pytest
from embedeval.parsers.word2vec_gensim import load_embedding as gensim_load_embedding
from embedeval.parsers.word2vec_simple import load_embedding as simple_load_embedding
def create_tmp_word_embedding(path, embedding_content):
"""Create a temporary Word Embedding file"""
# FIXME(TF): maybe refactor interface so that file system can be avoided in unit tests.
created_file = path / str(uuid.uuid4())
with open(created_file, "w", encoding="utf-8") as embedding_file:
embedding_file.write(textwrap.dedent(embedding_content).strip())
return created_file
@pytest.mark.parametrize(
"load_embedding_func",
[
pytest.param(simple_load_embedding, id="simple parser"),
pytest.param(gensim_load_embedding, id="gensim parser"),
],
)
def test_should_parse_word2vec_with_single_entry(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for single word"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
1 2
word 1.0 2.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word"]
assert np.array_equal(embedding.get_word_vector("word"), np.array([1.0, 2.0]))
@pytest.mark.parametrize(
"load_embedding_func",
[
pytest.param(simple_load_embedding, id="simple parser"),
pytest.param(gensim_load_embedding, id="gensim parser"),
],
)
def test_should_parse_word2vec_with_multiple_entires(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for multiple word entries"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
4 2
word1 1.0 2.0
word2 3.0 4.0
word3 5.0 6.0
word4 7.0 8.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word1", "word2", "word3", "word4"]
assert np.array_equal(embedding.get_word_vector("word1"), np.array([1.0, 2.0]))
assert np.array_equal(embedding.get_word_vector("word2"), np.array([3.0, 4.0]))
assert np.array_equal(embedding.get_word_vector("word3"), np.array([5.0, 6.0]))
assert np.array_equal(embedding.get_word_vector("word4"), np.array([7.0, 8.0]))
|
[
"tuxtimo@gmail.com"
] |
tuxtimo@gmail.com
|
1e7fd967ad595fb9792cb574c9219de21724fb93
|
ac652ff7636d4c3336918d0f96aa8ea1bba3ab28
|
/fastvid/posts/serializers/postmodel.py
|
c0152d21e28e0e4c646800e2244e7b172f680400
|
[] |
no_license
|
pn101/fastvid
|
eebff58e9dd6b967a52361713ed34462e0713d88
|
9f57c577c558906e3fd5c3ab44f112588ae84ed2
|
refs/heads/develop
| 2021-01-20T18:58:34.398325
| 2016-07-05T09:29:49
| 2016-07-05T09:29:49
| 61,545,668
| 0
| 0
| null | 2016-06-20T13:14:42
| 2016-06-20T12:22:08
| null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
from rest_framework import serializers
from posts.models import Post
class PostModelSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
class Meta:
model = Post
fields = [
'pk',
'username',
'title',
'content',
'youtube_original_url',
'youtube_embed_url',
]
|
[
"philipnam101@gmail.com"
] |
philipnam101@gmail.com
|
1e3299112d0d4a422e71d7d55d2a4869b4e74dc6
|
917e376668f325c0452fe05fcf3f6348a6ac4336
|
/tests/xla_interpreter_test.py
|
d3b758aa0cb0e09d3959f3ad74c8e0192d75cc0a
|
[
"Apache-2.0"
] |
permissive
|
wusixer/jax
|
5f8d78a89679db74d0d62806725cc820246d4b4e
|
66de981e1dfbe04a41b2c003f171fea7bb92585f
|
refs/heads/main
| 2023-06-15T09:10:45.599555
| 2021-07-06T01:58:11
| 2021-07-06T01:58:11
| 383,305,925
| 0
| 0
|
NOASSERTION
| 2021-07-06T01:32:55
| 2021-07-06T01:32:55
| null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
from jax import test_util as jtu
from jax._src import api
from jax.interpreters import xla
class XlaInterpreterTest(jtu.JaxTestCase):
@unittest.skipIf(not xla._ALLOW_ARG_PRUNING, "Test requires jaxlib 0.1.66")
def test_prune_jit_args(self):
def f(*args):
return args[0]
closed_jaxpr = api.make_jaxpr(f)(*range(10))
pruned_jaxpr, kept_const_idx, kept_var_idx = xla._prune_unused_inputs(
closed_jaxpr.jaxpr)
assert len(pruned_jaxpr.invars) == 1
assert kept_const_idx == set()
assert kept_var_idx == {0}
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
[
"no-reply@google.com"
] |
no-reply@google.com
|
c35af357be8ae118dde3133aaaff753973cb786b
|
f756eedd0fd4cee9b369978d1e20287dd579e4da
|
/InmoovScript/services/7_Inmoov.py
|
ac1ef25dc0796c3cb41c2579757db642007edf0a
|
[] |
no_license
|
linuxrodo/inmoov
|
cf02421443d6976f153a64c898e2c209e32cc246
|
fe8391d6d59ccdf6bdf5b382872fdf4bf77f4b09
|
refs/heads/master
| 2021-01-21T09:06:52.835689
| 2017-02-23T04:07:38
| 2017-02-23T04:07:38
| 82,861,443
| 0
| 0
| null | 2017-02-22T23:05:24
| 2017-02-22T23:05:24
| null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
# ##############################################################################
# INMOOV SERVICE
# ##############################################################################
# ##############################################################################
# MRL SERVICE CALL
# ##############################################################################
inMoov=i01
#varduinoright = Runtime.start("varduinoright","VirtualArduino")
#varduinoright.connect(MyRightPort)
#varduinoleft = Runtime.start("varduinoleft","VirtualArduino")
#varduinoleft.connect(MyLeftPort)
#Inmoov Left / right arduino connect
if ScriptType=="RightSide" or ScriptType=="Full":
right = Runtime.createAndStart("i01.right", "Arduino")
RightPortIsConnected=CheckArduinos(right,MyRightPort)
if ScriptType=="LeftSide" or ScriptType=="Full":
left = Runtime.createAndStart("i01.left", "Arduino")
LeftPortIsConnected=CheckArduinos(left,MyLeftPort)
|
[
"moz4r@free.fr"
] |
moz4r@free.fr
|
5d7a771e779f0b24d4bc1ae2bf01ac98e9d0c325
|
9423dd5312d6c05f61ec902a26ff627c6ef58f97
|
/Python/functions/get_middle_point.py
|
6d5bb3a5c5f3d271d454c9c6da74dc57df5a617c
|
[] |
no_license
|
NehvedovichVlad/small_tasks
|
01c093b07d521da59c559591559d61e81829df0f
|
1c4085e3a2f0a4530c82f57b98f0f83b18e68567
|
refs/heads/main
| 2023-03-17T15:49:04.480092
| 2021-03-11T20:29:05
| 2021-03-11T20:29:05
| 308,935,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
""""
Середина отрезка
Напишите функцию get_middle_point(x1, y1, x2, y2),
которая принимает в качестве аргументов координаты концов отрезка
(x_1; \, y_1)(x 1;y1) и (x_2; \, y_2)(x2;y2)
и возвращает координаты точки являющейся серединой данного отрезка.
"""
# -------------------------------------------------------------------------------------------------
# 1)вариант
def get_middle_point(x1, y1, x2, y2):
return (x1+x2)/2, (y1+y2)/2
x_1, y_1 = int(input()), int(input())
x_2, y_2 = int(input()), int(input())
x, y = get_middle_point(x_1, y_1, x_2, y_2)
print(x, y)
# -------------------------------------------------------------------------------------------------
# 2)вариант
def get_middle_point(x1, y1, x2, y2):
return (x1 + x2) / 2, (y1 + y2) / 2
print(*get_middle_point(int(input()), int(input()), int(input()), int(input())))
|
[
"vladislavnehvedovich@gmail.com"
] |
vladislavnehvedovich@gmail.com
|
cec69182b84e9aa6bff4f48d54f59182d811ddf5
|
de847b2e9a5236887fb6a164fedc0e0c86b84e6c
|
/pythonturorial/workshopprograms/userinput.py
|
0b0ce93aae289361bd5e6a95386c281114c27be5
|
[] |
no_license
|
raghuprasadks/pythonmicrosoftexam
|
9a6bcafcdbc5bb6727278f421bb1a31dc5b7427b
|
68dacab8aa98d0ff39f1f36c3ce8e666be3760a0
|
refs/heads/master
| 2020-09-15T02:51:06.809959
| 2020-02-12T01:18:42
| 2020-02-12T01:18:42
| 223,330,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
name = input("Enter your name")
print(type(name))
print('your name is ',name)
age = int(input("enter your age"))
print('your age is ',age)
nextyear = age +1
print('your age after one year',nextyear)
amount = float(input("Enter the payment made for purchase of fruits"))
print('float conversion',amount)
print("Enter names of your friends")
friends = eval(input("Enter names as a list"))
print('evaluated as list ',type(friends))
print('here comes your friends ',friends)
|
[
"prasadraghuks@gmail.com"
] |
prasadraghuks@gmail.com
|
8e9aecb12e6e5e2f8c0bc687ca323a81ccf17b40
|
4935e2ef7994222178f950319f9f8d3e2adfa543
|
/summer/2018_07_26/4sum-ii.py
|
93f49d2327c742fa53619df9e5a30f374a233dd2
|
[] |
no_license
|
shaheming/leecode
|
e853b59469b97ca97a5b4ecd80497b3dac3fb10f
|
a8b59573dc201438ebd5a5ab64e9ac61255a4abd
|
refs/heads/master
| 2021-07-03T03:57:22.718410
| 2019-04-06T18:19:53
| 2019-04-06T18:19:53
| 140,241,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
#这个问题本来是一个 O(N^4) 但是通过拆解可以拆解为两个 O(2*N^2) 的问题
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count = 0
dicA, dicB, dicC, dicD = {}, {}, {}, {}
for a in A:
for b in B:
if a + b in dicA:
dicA[a + b] += 1
else:
dicA[a + b] = 1
for c in C:
for d in D:
if -(c + d) in dicA:
count += dicA[-(c + d)]
return count
|
[
"shmsand@gmail.com"
] |
shmsand@gmail.com
|
4d7505d380777b2beba7bed17181483a5992b5c4
|
da9b9f75a693d17102be45b88efc212ca6da4085
|
/sdk/appconfiguration/azure-appconfiguration/setup.py
|
cbab0ebedf6658d7f5da30366ae1070a9eca46c9
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
elraikhm/azure-sdk-for-python
|
e1f57b2b4d8cc196fb04eb83d81022f50ff63db7
|
dcb6fdd18b0d8e0f1d7b34fdf82b27a90ee8eafc
|
refs/heads/master
| 2021-06-21T22:01:37.063647
| 2021-05-21T23:43:56
| 2021-05-21T23:43:56
| 216,855,069
| 0
| 0
|
MIT
| 2019-10-22T16:05:03
| 2019-10-22T16:05:02
| null |
UTF-8
|
Python
| false
| false
| 3,251
|
py
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import sys
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-appconfiguration"
PACKAGE_PPRINT_NAME = "App Configuration Data"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.md', encoding='utf-8') as f:
history = f.read()
exclude_packages = [
'tests',
'samples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
]
if sys.version_info < (3, 5, 3):
exclude_packages.extend([
'*.aio',
'*.aio.*'
])
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=exclude_packages),
install_requires=[
"msrest>=0.6.10",
"azure-core<2.0.0,>=1.0.0b5",
],
extras_require={
":python_version<'3.0'": ['azure-nspkg'],
":python_version<'3.4'": ['enum34>=1.0.4'],
":python_version<'3.5'": ['typing'],
"async:python_version>='3.5'": [
'aiohttp>=3.0',
'aiodns>=2.0'
],
}
)
|
[
"noreply@github.com"
] |
elraikhm.noreply@github.com
|
32c207f3631eab9b520c22cef2980be18016e080
|
8b7d98c5077d1607568460ce5ae8da801b11293a
|
/accounts/forms.py
|
f47b51e9149eed83879485476cefed208ceca865
|
[] |
no_license
|
Th0rn-dev/kiteupru
|
de0e93fd791522433e2ab34efac1e86a0cb0f613
|
df240ff50f51b390f7e27ca35841c6482642d97d
|
refs/heads/master
| 2023-05-04T13:44:05.561708
| 2021-05-30T19:01:59
| 2021-05-30T19:45:47
| 372,293,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django import forms
from .models import Profile
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('avatar',)
|
[
"11_ka@rambler.ru"
] |
11_ka@rambler.ru
|
21190bb8e62dd782eafae6f70363d5471f54ebd4
|
39b35326534d6efa8a60344ef59eac3d8cea562f
|
/crudpj/crudpj/wsgi.py
|
b639603353b77ea9655595ff087206ea6ebb8995
|
[] |
no_license
|
Hyo-gyeong/Django_review
|
8635e8311111cab56066c6b87429c7f57c5e42c3
|
8b59d717c0c8c4404230c8eaa42e6074cacdd712
|
refs/heads/master
| 2021-01-03T08:32:06.706689
| 2020-08-31T04:55:59
| 2020-08-31T04:55:59
| 240,000,924
| 0
| 0
| null | 2020-08-17T19:21:30
| 2020-02-12T11:53:19
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for crudpj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudpj.settings')
application = get_wsgi_application()
|
[
"cdkrcd8@gmail.com"
] |
cdkrcd8@gmail.com
|
3d6106b6e7e3d37af803f11255cad2346a387720
|
434a76f2a39b6152e18f25c092e2d3e272bcaa7d
|
/api/views/blockchains/resources.py
|
b30b53e5a6ba756d1d935653476963c9e299f4e2
|
[
"Apache-2.0"
] |
permissive
|
DaCeige/machinaris
|
fce98168d0ec288b47c37662079cbb928975badc
|
2d3837c8af00bb41162f8be1cbf6eaf1cb6c6fdb
|
refs/heads/main
| 2023-08-24T13:10:22.511119
| 2021-10-07T18:55:25
| 2021-10-07T18:55:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,326
|
py
|
import datetime as dt
from flask.views import MethodView
from api import app
from api.extensions.api import Blueprint, SQLCursorPage
from common.extensions.database import db
from common.models import Blockchain
from .schemas import BlockchainSchema, BlockchainQueryArgsSchema
blp = Blueprint(
'Blockchains',
__name__,
url_prefix='/blockchains',
description="Operations on blockchains"
)
@blp.route('/')
class Blockchains(MethodView):
@blp.etag
@blp.arguments(BlockchainQueryArgsSchema, location='query')
@blp.response(200, BlockchainSchema(many=True))
@blp.paginate(SQLCursorPage)
def get(self, args):
return db.session.query(Blockchain).filter_by(**args)
@blp.etag
@blp.arguments(BlockchainSchema)
@blp.response(201, BlockchainSchema)
def post(self, new_item):
item = db.session.query(Blockchain).filter(Blockchain.hostname==new_item['hostname'], \
Blockchain.blockchain==new_item['blockchain']).first()
if item: # upsert
new_item['created_at'] = item.created_at
new_item['updated_at'] = dt.datetime.now()
BlockchainSchema().update(item, new_item)
else: # insert
item = Blockchain(**new_item)
db.session.add(item)
db.session.commit()
return item
@blp.route('/<hostname>/<blockchain>')
class BlockchainsByHostname(MethodView):
@blp.etag
@blp.response(200, BlockchainSchema)
def get(self, hostname):
return db.session.query(Blockchain).get_or_404(hostname)
@blp.etag
@blp.arguments(BlockchainSchema)
@blp.response(200, BlockchainSchema)
def put(self, new_item, hostname, blockchain):
item = db.session.query(Blockchain).get_or_404(hostname)
new_item['hostname'] = item.hostname
new_item['created_at'] = item.created_at
new_item['updated_at'] = dt.datetime.now()
blp.check_etag(item, BlockchainSchema)
BlockchainSchema().update(item, new_item)
db.session.add(item)
db.session.commit()
return item
@blp.etag
@blp.response(204)
def delete(self, hostname):
item = db.session.query(Blockchain).get_or_404(hostname)
blp.check_etag(item, BlockchainSchema)
db.session.delete(item)
db.session.commit()
|
[
"guydavis.ca@gmail.com"
] |
guydavis.ca@gmail.com
|
eb072ee218d2a1895d7da00df4591fd81018b7c7
|
584db1be8b6bdedaa56d186692ad72da5ee07164
|
/patron/tests/unit/virt/xenapi/test_driver.py
|
f8674f0cec1082ea4a77834f9a8001aa2c43c8e8
|
[
"Apache-2.0"
] |
permissive
|
casbin/openstack-patron
|
66006f57725cf1c3d735cd5529d3459fd77384c8
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
refs/heads/master
| 2023-05-31T05:23:37.721768
| 2015-12-31T12:18:17
| 2015-12-31T12:18:17
| 382,054,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,308
|
py
|
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from oslo_utils import units
from patron.compute import arch
from patron.tests.unit.virt.xenapi import stubs
from patron.virt import driver
from patron.virt import fake
from patron.virt import xenapi
from patron.virt.xenapi import driver as xenapi_driver
class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def _get_driver(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.flags(connection_url='test_url',
connection_password='test_pass', group='xenserver')
return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 5 * units.Gi,
'disk_used': 2 * units.Gi,
'disk_allocated': 4 * units.Gi,
'host_hostname': 'somename',
'supported_instances': arch.X86_64,
'host_cpu_info': {'cpu_count': 50},
'cpu_model': {
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
],
},
'vcpus_used': 10,
'pci_passthrough_devices': '',
'host_other-config': {'iscsi_iqn': 'someiqn'}}
def test_available_resource(self):
driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
resources = driver.get_available_resource(None)
self.assertEqual(6008002, resources['hypervisor_version'])
self.assertEqual(50, resources['vcpus'])
self.assertEqual(3, resources['memory_mb'])
self.assertEqual(5, resources['local_gb'])
self.assertEqual(10, resources['vcpus_used'])
self.assertEqual(3 - 2, resources['memory_mb_used'])
self.assertEqual(2, resources['local_gb_used'])
self.assertEqual('xen', resources['hypervisor_type'])
self.assertEqual('somename', resources['hypervisor_hostname'])
self.assertEqual(1, resources['disk_available_least'])
def test_overhead(self):
driver = self._get_driver()
instance = {'memory_mb': 30720, 'vcpus': 4}
# expected memory overhead per:
# https://wiki.openstack.org/wiki/XenServer/Overhead
expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
(instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
xenapi_driver.OVERHEAD_BASE)
expected = math.ceil(expected)
overhead = driver.estimate_instance_overhead(instance)
self.assertEqual(expected, overhead['memory_mb'])
def test_set_bootable(self):
driver = self._get_driver()
self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
driver._vmops.set_bootable('inst', True)
self.mox.ReplayAll()
driver.set_bootable('inst', True)
def test_post_interrupted_snapshot_cleanup(self):
driver = self._get_driver()
fake_vmops_cleanup = mock.Mock()
driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
driver.post_interrupted_snapshot_cleanup("context", "instance")
fake_vmops_cleanup.assert_called_once_with("context", "instance")
def test_public_api_signatures(self):
inst = self._get_driver()
self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
def test_get_volume_connector(self):
ip = '123.123.123.123'
driver = self._get_driver()
self.flags(connection_url='http://%s' % ip,
connection_password='test_pass', group='xenserver')
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
connector = driver.get_volume_connector({'uuid': 'fake'})
self.assertIn('ip', connector)
self.assertEqual(connector['ip'], ip)
self.assertIn('initiator', connector)
self.assertEqual(connector['initiator'], 'someiqn')
def test_get_block_storage_ip(self):
my_ip = '123.123.123.123'
connection_ip = '124.124.124.124'
driver = self._get_driver()
self.flags(connection_url='http://%s' % connection_ip,
group='xenserver')
self.flags(my_ip=my_ip, my_block_storage_ip=my_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(connection_ip, ip)
def test_get_block_storage_ip_conf(self):
driver = self._get_driver()
my_ip = '123.123.123.123'
my_block_storage_ip = '124.124.124.124'
self.flags(my_ip=my_ip, my_block_storage_ip=my_block_storage_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(my_block_storage_ip, ip)
|
[
"hsluoyz@qq.com"
] |
hsluoyz@qq.com
|
0c31c2c12ba0cee2fca07eaa29b494befb80343a
|
1626e16760c9c5b5dc9bd7c345871c716d5ffd99
|
/Problems/0001_0099/0037_Sudoku_Solver/Project_Python3/Solution1.py
|
55219e728189a2d7038f6a589b53cbfbcce69186
|
[] |
no_license
|
NobuyukiInoue/LeetCode
|
94ddb19e63cb8d0775cdc13f311fe90c87a1d718
|
3f0ffd519404165fd1a735441b212c801fd1ad1e
|
refs/heads/master
| 2023-09-01T07:38:50.939942
| 2023-08-23T09:51:17
| 2023-08-23T09:51:17
| 158,100,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
class Solution:
# def solveSudoku(self, board: List[List[str]]) -> None:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
from collections import defaultdict
nums = [str(i) for i in range(1, 10)]
rows, cols, cells, empty = defaultdict(set), defaultdict(set), defaultdict(set), set()
for i in range(9):
for j in range(9):
if board[i][j] == '.':
empty.add((i, j))
else:
rows[i].add(board[i][j])
cols[j].add(board[i][j])
cells[i//3, j//3].add(board[i][j])
def fill():
i, j = max(empty, key=lambda x: len(rows[x[0]]) + len(cols[x[1]]) + len(cells[x[0]//3, x[1]//3]))
empty.remove((i, j))
for num in nums:
if not (num in rows[i] or num in cols[j] or num in cells[i//3, j//3]):
board[i][j] = num; rows[i].add(num); cols[j].add(num); cells[i//3, j//3].add(num)
if not empty: return True
if fill(): return True
board[i][j] = '.'; rows[i].remove(num); cols[j].remove(num); cells[i//3, j//3].remove(num)
empty.add((i, j))
return False
if not empty: return
_ = fill()
|
[
"spring555@gmail.com"
] |
spring555@gmail.com
|
dc1572244f1304493d64d667155fcbbc94bf2c68
|
30f8afce1ba484183d8e1e14aae76cabb2d92354
|
/pythonNet/day2/server_udp.py
|
1d475122c9fb66755a8bb12c1c143cd7db4d6ed6
|
[] |
no_license
|
brooot/Python_Base_Codes
|
d83e8c3b8a37b86672412c812fdb0d47deb67836
|
a864685e160b5df4162a6f9fb910627eda702aaf
|
refs/heads/master
| 2023-04-10T20:08:39.161289
| 2021-03-25T12:59:23
| 2021-03-25T12:59:23
| 200,570,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
#!/usr/bin/env python3
from socket import *
# 创建套接字对象
sockfd = socket(AF_INET, SOCK_DGRAM)
# 绑定地址
IP = '0.0.0.0'
PORT = 8888
ADDR = (IP, PORT)
sockfd.bind(ADDR)
while True:
# 接受数据(与tcp不同)
data, addr = sockfd.recvfrom(1024)
message = "已收到来自%s的数据:%s" % (addr, data.decode())
print(message)
# 发送数据
send_message = "已经收到您的数据。".encode()
sockfd.sendto(send_message, addr)
sockfd.close()
|
[
"1442704671@qq.com"
] |
1442704671@qq.com
|
1818810ee229cd68db13a66efefecbe5872edcc2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_35/216.py
|
d0006fc021592ec292be67dc3cf4606ceec3d5d5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,591
|
py
|
import string
class GraphNode(object):
def __init__(self, x_pos, y_pos):
self.x_pos = x_pos
self.y_pos = y_pos
self.flows_to = None
self.flows_from = []
self.label = None
def set_flows_to(self, other):
assert self.flows_to != other
self.flows_to = other
other.flows_from.append(self)
def label_node(node, label):
if node.label is None:
node.label = label
if node.flows_to:
label_node(node.flows_to, label)
for from_node in node.flows_from:
label_node(from_node, label)
else:
if node.label != label:
print "Relabeling of node"
adsafa
def label_nodes(h, w, node_map):
current_label = 0
for i in xrange(h):
for j in xrange(w):
label = string.lowercase[current_label]
node = node_map[i][j]
if node.label is None:
label_node(node, label)
current_label += 1
def flow_water(w,h, height_map, node_map):
for i in xrange(h):
for j in xrange(w):
lowest = height_map[i][j]
flow_to = None
if i - 1 >= 0:
if height_map[i-1][j] < lowest:
lowest = height_map[i-1][j]
flow_to = node_map[i-1][j]
if j - 1 >= 0:
if height_map[i][j-1] < lowest:
lowest = height_map[i][j-1]
flow_to = node_map[i][j-1]
if j + 1 < w:
if height_map[i][j+1] < lowest:
lowest = height_map[i][j+1]
flow_to = node_map[i][j+1]
if i + 1 < h:
if height_map[i+1][j] < lowest:
lowest = height_map[i+1][j]
flow_to = node_map[i+1][j]
if flow_to is not None:
node_map[i][j].set_flows_to(flow_to)
def main():
number_of_cases = int(raw_input())
for case_number in range(1, number_of_cases+1):
h,w = map(int, raw_input().split())
print 'Case #%d:' % (case_number,)
height_map = []
node_map = []
for i in xrange(h):
height_map.append(raw_input().split())
line = []
for j in xrange(w):
line.append(GraphNode(i,j))
node_map.append(line)
flow_water(w, h, height_map, node_map)
label_nodes(h, w, node_map)
for node_line in node_map:
for node in node_line:
print node.label,
print
main()
#w, h = 3,3
#height_map = []
#node_map = []
#height_map.append([9,6,3])
#height_map.append([5,9,6])
#height_map.append([3,5,9])
#for i in xrange(h):
#line = []
#for j in xrange(w):
#line.append(GraphNode(i,j))
#node_map.append(line)
#flow_water(w, h, height_map, node_map)
#label_nodes(h, w, node_map)
#for node_line in node_map:
#for node in node_line:
#print node.label,
#print
##if node.flows_to:
##print node.x_pos, node.y_pos, node.flows_to.x_pos, node.flows_to.y_pos, node.label
##else:
##print node.x_pos, node.y_pos, -1, -1, node.label
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
661caf7b460c7daa1b1dcd64f2926900fa1374e5
|
2286b880df34e1bfabe79b3605de287040404560
|
/02-02/todolist/task/urls.py
|
97bdb244e32e547aaa634f5ef9fd3c9aa9311fa6
|
[] |
no_license
|
iklimah27/praxis-academy-2
|
e5d8b08807980d6fd8ff6ab73caa6ea18083c7f8
|
925853b520c9a8d7a87d8980d7fedfa604d3b4c8
|
refs/heads/master
| 2022-12-25T01:54:45.572190
| 2020-10-15T07:22:06
| 2020-10-15T07:22:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.contrib import admin
from django.urls import path
from django.shortcuts import render
from . import views
urlpatterns = [
path('', views.index),
path('<id>/', views.detail),
path('<id>/delete/', views.delete),
]
|
[
"hatami391998@gmail.com"
] |
hatami391998@gmail.com
|
1fe2656260edd35919c9745fc47bafc67970c346
|
c9c5463996bf9e2adcd4918857382121b0a5aa56
|
/leetcode/堆/重构字符串.py
|
0147b66b14e19194532f2ddae5788e111bc1a915
|
[] |
no_license
|
Da1anna/Data-Structed-and-Algorithm_python
|
fdf370c355248081990c57c1c8eb5e05c4781e2b
|
cce067ef4374128924018b00c5ea77d2e869a834
|
refs/heads/master
| 2022-12-27T13:24:36.084657
| 2020-10-13T02:39:39
| 2020-10-13T02:39:39
| 174,938,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
'''
给定一个字符串S,检查是否能重新排布其中的字母,使得两相邻的字符不同。
若可行,输出任意可行的结果。若不可行,返回空字符串。
示例 1:
输入: S = "aab"
输出: "aba"
示例 2:
输入: S = "aaab"
输出: ""
注意:
S 只包含小写字母并且长度在[1, 500]区间内。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/reorganize-string
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
'''
思路1:双指针交换法,遍历数组,当遇到一个字母与它之前一个不同时,从当前位置开始寻找一个可交换的字母,直到遍历完。
这个思路对 ‘baaba’就不行了,因为它无法将第一个b交换到后面
思路2:将每一类字母及其个数组合成一个元组,加入堆中,每次弹出两个不同的个数最多的字母,更新其个数,
重复直到弹完
'''
import heapq as hp
class Solution:
#双指针交换
def reorganizeString_demo(self, S: str) -> str:
lst = list(S)
for i in range(1, len(lst)):
if lst[i] == lst[i - 1]:
j = i+1
while j < len(lst) and lst[j] == lst[i]:
j += 1
if j < len(lst):
lst[i], lst[j] = lst[j], lst[i]
else:
return ''
return ''.join(lst)
#堆的巧用:一次性弹出两个元素
def reorganizeString(self, S: str) -> str:
#特判
if len(S) == 1:
return S
heap = [(-S.count(x),x) for x in set(S)]
for cnt,x in heap:
#这里的判断需要考虑len的奇偶
if -cnt >= (len(S)+1)//2 + 1:
return ''
hp.heapify(heap)
res = ''
while len(heap) >= 2:
cnt1, c1 = hp.heappop(heap)
cnt2, c2 = hp.heappop(heap)
res += c1 + c2
if cnt1 + 1:
hp.heappush(heap,(cnt1+1,c1))
if cnt2 + 1:
hp.heappush(heap,(cnt2+1,c2))
return res+heap[0][1] if heap else res
#测试
S = 'aaab'
res = Solution().reorganizeString(S)
print(res)
|
[
"1318176575@qq.com"
] |
1318176575@qq.com
|
58f225e91c9707ccec4037ee3789c38ff19785e9
|
799a0af9c05deabe5d5250a10e480ec15ae0216e
|
/Xpath_test/xpath_test_10.py
|
3c4e2e550651ef49c998f95a34ee15717ae8ac84
|
[
"MIT"
] |
permissive
|
waws520waws/waws_spider
|
9b2be28834c08166463fe265e0f5c37a874369c8
|
c6a5988121f32619a5c5134c09fdfd556c696fe7
|
refs/heads/master
| 2021-03-20T06:01:22.041937
| 2020-03-28T02:49:16
| 2020-03-28T02:49:16
| 247,183,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
from lxml import etree
"""
contains的使用:应用于一个标签的属性有多个值的情况,如果我们还是用之前的相等的模式,是匹配不到值的
"""
text = '''
<li class="li li-first"><a href="link.html">first item</a></li>
'''
html = etree.HTML(text)
result = html.xpath('//li[contains(@class, "li")]/a/text()')
print(result)
|
[
"16601203140@163.com"
] |
16601203140@163.com
|
f7c6dff56a5dbfbd57c51b742a1f32e141403c38
|
da2583af7a14f04aed029a79a79224547de4c1f2
|
/rl/policy/gp_linear_mean.py
|
ba4a963f759f350f923730c7a4ecbcfa39d55142
|
[] |
no_license
|
yun-long/rl_prototype
|
4b0af8b817ad1c8bc30a46d7fa2e8f5cd37f7ea1
|
0a86a097d58ce299da90ea346e074f20fe167a5d
|
refs/heads/master
| 2020-04-23T15:37:49.498870
| 2019-02-18T11:28:21
| 2019-02-18T11:28:21
| 171,271,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
"""
Gaussin policy, linear mean, constant variance
Reference: Jan Peters, A Survey on policy search for robotics
"""
import numpy as np
import time
from rl.policy.base import GaussianPolicy
class GPLinearMean(GaussianPolicy):
def __init__(self, env, featurizer):
#
self.env = env
#
self.num_features = featurizer.num_features
self.num_actions = env.action_space.shape[0]
self.featurizer = featurizer
#
self.Mu_theta = np.random.randn(self.num_features, self.num_actions) / np.sqrt(self.num_features)
self.Sigma_action = np.eye(self.num_actions) * 1e1 # for exploration in parameter space
super().__init__()
def predict_action(self, state):
"""
Exploration in action_space, used for Step-based usually.
:param state:
:return:
"""
featurized_state = self.featurizer.transform(state).T
Mu_action = np.dot(self.Mu_theta.T, featurized_state).reshape(self.num_actions)
try:
action = np.random.multivariate_normal(Mu_action, self.Sigma_action)
except:
raise ValueError
return action
def update_pg(self, alpha_coeff, theta_samples, advantanges):
pass
def update_wml(self, Weights, Phi, A):
T = Phi.shape[0]
phi = Phi.reshape((T, self.num_features))
Q = Weights.reshape(T)
Q = np.diag(Q)
A = A.reshape((T, self.num_actions))
theta_tmp1 = np.linalg.inv(np.dot(phi.T, np.dot(Q, phi)))
theta_tmp2 = np.dot(phi.T, np.dot(Q, A))
self.Mu_theta = np.dot(theta_tmp1, theta_tmp2).reshape(self.Mu_theta.shape)
#
Z = (np.sum(Weights)**2 - np.sum(Weights**2)) / np.sum(Weights)
nume_sum = 0
for i in range(len(Weights)):
tmp = np.outer((A[i] - np.dot(self.Mu_theta.T, phi[i, :])), (A[i] - np.dot(self.Mu_theta.T, phi[i, :])))
tmp = Weights[i] * tmp
nume_sum += tmp
self.Sigma_action = nume_sum / Z
def optimal_policy_demo(self, num_demos):
for i_demo in range(num_demos):
print("Optimal Policy Demo : ", i_demo)
state = self.env.reset()
while True:
action = self.predict_action(state)
next_state, rewards, done, _ = self.env.step(action)
state = next_state
self.env.render()
if done:
time.sleep(1)
break
self.env.render(close=True)
|
[
"yun-long.song@outlook.com"
] |
yun-long.song@outlook.com
|
2949ad30d2c1f779dd0f7906f17943d31c121fb1
|
eac22714038e840028cc5abb72bc750004626ebb
|
/mct_camera_tools/nodes/image_proc_master.py
|
3316d09e4957ac3d0f6500030feeee1cccdedf4f
|
[
"Apache-2.0"
] |
permissive
|
iorodeo/mct
|
79b19f6dab9f6567452df7274d67245bf64b1801
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
refs/heads/master
| 2022-11-11T18:03:18.178182
| 2014-08-20T19:21:27
| 2014-08-20T19:21:27
| 273,790,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,780
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_tools')
import rospy
import os
import os.path
import tempfile
import subprocess
from mct_xml_tools import launch
# Services
from mct_msg_and_srv.srv import CommandString
from mct_msg_and_srv.srv import CommandStringResponse
class Image_Proc_Master(object):
"""
Image proc master node. Provides service which launches/kills image_proc nodes for
every camera with a calibratoin.
"""
def __init__(self):
self.tmp_dir = tempfile.gettempdir()
self.launch_file = os.path.join(self.tmp_dir,'image_proc.launch')
self.image_proc_popen = None
rospy.on_shutdown(self.clean_up)
rospy.init_node('image_proc_master')
self.camera_srv = rospy.Service(
'image_proc_master',
CommandString,
self.handle_image_proc_srv,
)
def handle_image_proc_srv(self,req):
"""
Handles requests to lauch/kill the image proc nodes.
"""
command = req.command.lower()
response = True
message = ''
if command == 'start':
if self.image_proc_popen is None:
self.launch_image_proc_nodes()
else:
response = False
message = 'image proc nodes already running'
elif command == 'stop':
if self.image_proc_popen is not None:
self.kill_image_proc_nodes()
else:
response = False
message = 'image proc nodes not running'
else:
response = False
message = 'uknown command: {0}'.format(command)
return CommandStringResponse(response,message)
def launch_image_proc_nodes(self):
"""
Launches the image_proc nodes.
"""
if self.image_proc_popen is None:
launch.create_image_proc_launch(self.launch_file)
self.image_proc_popen = subprocess.Popen(['roslaunch',self.launch_file])
def kill_image_proc_nodes(self):
"""
Kills the image_proc nodes.
"""
if self.image_proc_popen is not None:
self.image_proc_popen.send_signal(subprocess.signal.SIGINT)
self.image_proc_popen = None
try:
os.remove(self.launch_file)
except OSError, e:
rospy.logwarn('Error removing image_proc launch file: {0}'.format(str(e)))
def clean_up(self):
self.kill_image_proc_nodes()
def run(self):
rospy.spin()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = Image_Proc_Master()
node.run()
|
[
"will@iorodeo.com"
] |
will@iorodeo.com
|
dd5617275d2a87e52a380d2ccfcdf4777e0566ba
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/common/graph/checks_infra/debug.py
|
26b247b24b08837b95bd15668c25aedf4d45d7c6
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,589
|
py
|
from __future__ import annotations
import json
import logging
from collections.abc import Iterable
from typing import Any, TYPE_CHECKING
import yaml
from termcolor import colored
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.resource_code_logger_filter import add_resource_code_filter_to_logger
from checkov.common.util.env_vars_config import env_vars_config
if TYPE_CHECKING:
from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
logger = logging.getLogger(__name__)
add_resource_code_filter_to_logger(logger)
def graph_check(check_id: str, check_name: str) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
print(f'\nEvaluating graph policy: "{check_id}" - "{check_name}"')
def resource_types(resource_types: Iterable[str], resource_count: int, operator: str) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
resource_types_str = '", "'.join(resource_types)
print(
f'\nFound {resource_count} resources with resource types: "{resource_types_str}" to check against operator: "{operator}"'
)
def attribute_block(
resource_types: Iterable[str],
attribute: str | None,
operator: str,
value: str | list[str] | None,
resource: dict[str, Any],
status: str,
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
attribute_block_conf = _create_attribute_block(
resource_types=resource_types, attribute=attribute, operator=operator, value=value
)
color = "green" if status == "passed" else "red"
print("\nEvaluated block:\n")
print(colored(yaml.dump([attribute_block_conf], sort_keys=False), "blue"))
print("and got:")
print(colored(f'\nResource "{resource[CustomAttributes.ID]}" {status}:', color))
print(colored(json.dumps(resource[CustomAttributes.CONFIG], indent=2), "yellow"))
def connection_block(
resource_types: Iterable[str],
connected_resource_types: Iterable[str],
operator: str,
passed_resources: list[dict[str, Any]],
failed_resources: list[dict[str, Any]],
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
connection_block_conf = _create_connection_block(
resource_types=resource_types,
connected_resource_types=connected_resource_types,
operator=operator,
)
passed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in passed_resources)
failed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in failed_resources)
print("\nEvaluated blocks:\n")
print(colored(yaml.dump([connection_block_conf], sort_keys=False), "blue"))
print("and got:\n")
print(colored(f'Passed resources: "{passed_resources_str}"', "green"))
print(colored(f'Failed resources: "{failed_resources_str}"', "red"))
def complex_connection_block(
solvers: list[BaseSolver],
operator: str,
passed_resources: list[dict[str, Any]],
failed_resources: list[dict[str, Any]],
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
# to prevent circular dependencies
from checkov.common.checks_infra.solvers.attribute_solvers.base_attribute_solver import BaseAttributeSolver
from checkov.common.checks_infra.solvers.complex_solvers.base_complex_solver import BaseComplexSolver
from checkov.common.checks_infra.solvers.connections_solvers.base_connection_solver import BaseConnectionSolver
from checkov.common.checks_infra.solvers.connections_solvers.complex_connection_solver import (
ComplexConnectionSolver,
)
from checkov.common.checks_infra.solvers.filter_solvers.base_filter_solver import BaseFilterSolver
complex_connection_block = []
for solver in solvers:
if isinstance(solver, BaseAttributeSolver):
block = _create_attribute_block(
resource_types=solver.resource_types,
attribute=solver.attribute,
operator=solver.operator,
value=solver.value,
)
elif isinstance(solver, BaseFilterSolver):
block = _create_filter_block(attribute=solver.attribute, operator=solver.operator, value=solver.value)
elif isinstance(solver, (ComplexConnectionSolver, BaseComplexSolver)):
# ComplexConnectionSolver check needs to be before BaseConnectionSolver, because it is a subclass
block = {solver.operator: ["..." for _ in solver.solvers]}
elif isinstance(solver, BaseConnectionSolver):
block = _create_connection_block(
resource_types=solver.resource_types,
connected_resource_types=solver.connected_resources_types,
operator=solver.operator,
)
else:
logger.info(f"Unsupported solver type {type(solver)} found")
continue
complex_connection_block.append(block)
passed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in passed_resources)
failed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in failed_resources)
print("\nEvaluated blocks:\n")
print(colored(yaml.dump([{operator: complex_connection_block}], sort_keys=False), "blue"))
print("and got:\n")
print(colored(f'Passed resources: "{passed_resources_str}"', "green"))
print(colored(f'Failed resources: "{failed_resources_str}"', "red"))
def _create_attribute_block(
resource_types: Iterable[str], attribute: str | None, operator: str, value: str | list[str] | None
) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "attribute",
"resource_types": resource_types,
"attribute": attribute,
"operator": operator,
}
if value is not None:
attribute_block_conf["value"] = value
return attribute_block_conf
def _create_connection_block(
resource_types: Iterable[str], connected_resource_types: Iterable[str], operator: str
) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "connection",
"resource_types": resource_types,
"connected_resource_types": connected_resource_types,
"operator": operator,
}
return attribute_block_conf
def _create_filter_block(attribute: str | None, operator: str, value: str | list[str]) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "filter",
"attribute": attribute,
"operator": operator,
"value": value,
}
return attribute_block_conf
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
b9c56ac1d31b2218826dbd63b673f4c3cff2e16a
|
a2f78983557c1ead7b2a7c3e720d4719099878b9
|
/python/ray/experimental/sgd/tf/tf_runner.py
|
384136ba79630ef2660e8ee46da3cf60f3455ccf
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Senmumu/ray
|
3fc914a0a5d9da8fcaa3411bc04be7fba3ce6bbd
|
130b8f21da4fb5383b079493faaea5d81065b772
|
refs/heads/master
| 2020-07-18T12:08:51.862689
| 2019-09-03T22:36:25
| 2019-09-03T22:36:25
| 206,242,928
| 1
| 0
|
Apache-2.0
| 2019-09-04T05:59:44
| 2019-09-04T05:59:44
| null |
UTF-8
|
Python
| false
| false
| 5,250
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import json
import os
import numpy as np
import ray
import ray.services
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
def _try_import_strategy():
"""Late import for Tesnorflow"""
from tensorflow.distribute.experimental import MultiWorkerMirroredStrategy
return MultiWorkerMirroredStrategy
class TFRunner(object):
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, data_creator, config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.config = {} if config is None else config
self.epoch = 0
self.verbose = verbose
def setup(self):
"""Initializes the model."""
logger.debug("Creating dataset")
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model")
self.model = self.model_creator(self.config)
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
MultiWorkerMirroredStrategy = _try_import_strategy()
self.strategy = MultiWorkerMirroredStrategy()
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
def step(self):
"""Runs a training epoch and updates the model parameters."""
fit_default_config = {"verbose": self.verbose}
fit_default_config.update(self.config.get("fit_config", {}))
history = self.model.fit(self.train_dataset, **fit_default_config)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += 1
return stats
def validate(self):
"""Evaluates the model on the validation data set."""
stats = {}
evaluate_config = {"verbose": self.verbose}
evaluate_config.update(self.config.get("evaluate_config", {}))
results = self.model.evaluate(self.test_dataset, **evaluate_config)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(self.test_dataset,
**evaluate_config)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"loss": results}
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.model = self.model_creator(self.config)
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
# This part is due to ray.get() changing scalar np.int64 object to int
state["optimizer_weights"][0] = np.array(
state["optimizer_weights"][0], dtype=np.int64)
if self.model.optimizer.weights == []:
self.model._make_train_function()
self.model.optimizer.set_weights(state["optimizer_weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
del self.train_dataset
del self.test_dataset
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
|
[
"rliaw@berkeley.edu"
] |
rliaw@berkeley.edu
|
b1fc4028b00d66db57ef3d4fca7602a0b3de1815
|
8eb0f65096f9a9fe90a88c85dcdcaf12f9a8a512
|
/apps/maintenance_mode/middleware.py
|
cd0e09b3e8aba28e3af198050f46e5958f5de4a4
|
[
"MIT"
] |
permissive
|
WhitespaceCrew/django-htk
|
57c8cc9ec30b4cd9511b717978758c47144de76f
|
6a7b87a3d0a2e4cb51f6b8059708a26fa8e613df
|
refs/heads/master
| 2020-12-31T01:10:14.900413
| 2016-02-03T19:24:27
| 2016-02-03T19:25:02
| 45,211,442
| 0
| 0
| null | 2015-10-29T21:23:54
| 2015-10-29T21:23:54
| null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from htk.apps.maintenance_mode.utils import is_maintenance_mode
from htk.utils import htk_setting
class MaintenanceModeMiddleware(object):
"""Checks whether HTK_MAINTENANCE_MODE is set
If so, redirects to the HTK_MAINTENANCE_MODE_URL_NAME page
"""
def process_request(self, request):
maintenance_mode_page = reverse(htk_setting('HTK_MAINTENANCE_MODE_URL_NAME'))
response = None
if request.path == maintenance_mode_page:
if not is_maintenance_mode():
response = redirect('/')
else:
# already here
pass
else:
if is_maintenance_mode():
response = redirect(maintenance_mode_page)
else:
pass
return response
|
[
"jontsai@jonathantsai.com"
] |
jontsai@jonathantsai.com
|
b6b7520917496dbd41f7f57d11d8d68f84434ff7
|
ee179dd9e9b24046508b11a60612da3758c7e122
|
/lib/python2.7/site-packages/nltk/stem/api.py
|
c6032423e84a9a5b8a1985afcf341e4084970792
|
[] |
no_license
|
buhtigexa/Nerit
|
fcd6cb08a0935e5b80392ae2acf68ba52ee8a899
|
d55629f6289c1fa6efe60802a78b79932ff248a2
|
refs/heads/master
| 2021-01-21T13:11:51.105930
| 2015-05-01T23:56:02
| 2015-05-01T23:56:02
| 34,728,820
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
# Natural Language Toolkit: Stemmer Interface
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
class StemmerI(object):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
raise NotImplementedError()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
[
"mrodriguez@alumnos.exa.unicen.edu.ar"
] |
mrodriguez@alumnos.exa.unicen.edu.ar
|
37cd36176891ea926eef36e5b677f6b4352ae940
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-as/huaweicloudsdkas/v1/model/pause_scaling_group_request.py
|
c88cd3d690fe6f2fc4753b1834d1bd3b10a9f9f9
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,079
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PauseScalingGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_group_id': 'str',
'body': 'PauseScalingGroupOption'
}
attribute_map = {
'scaling_group_id': 'scaling_group_id',
'body': 'body'
}
def __init__(self, scaling_group_id=None, body=None):
"""PauseScalingGroupRequest
The model defined in huaweicloud sdk
:param scaling_group_id: 伸缩组ID
:type scaling_group_id: str
:param body: Body of the PauseScalingGroupRequest
:type body: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
self._scaling_group_id = None
self._body = None
self.discriminator = None
self.scaling_group_id = scaling_group_id
if body is not None:
self.body = body
@property
def scaling_group_id(self):
"""Gets the scaling_group_id of this PauseScalingGroupRequest.
伸缩组ID
:return: The scaling_group_id of this PauseScalingGroupRequest.
:rtype: str
"""
return self._scaling_group_id
@scaling_group_id.setter
def scaling_group_id(self, scaling_group_id):
"""Sets the scaling_group_id of this PauseScalingGroupRequest.
伸缩组ID
:param scaling_group_id: The scaling_group_id of this PauseScalingGroupRequest.
:type scaling_group_id: str
"""
self._scaling_group_id = scaling_group_id
@property
def body(self):
"""Gets the body of this PauseScalingGroupRequest.
:return: The body of this PauseScalingGroupRequest.
:rtype: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this PauseScalingGroupRequest.
:param body: The body of this PauseScalingGroupRequest.
:type body: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PauseScalingGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
42ee0b0d809863a628c4d9a10375863e7328db4a
|
fb54704d4a6f9475f42b85d8c470e3425b37dcae
|
/medium/ex46.py
|
b8f578eefedb0af0bc3a15588f48718e85d76ec0
|
[] |
no_license
|
ziyuan-shen/leetcode_algorithm_python_solution
|
b2784071a94b04e687fd536b57e8d5a9ec1a4c05
|
920b65db80031fad45d495431eda8d3fb4ef06e5
|
refs/heads/master
| 2021-06-27T05:19:47.774044
| 2021-02-04T09:47:30
| 2021-02-04T09:47:30
| 210,991,299
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
ans = {(nums[i],): nums[:i] + nums[i+1:] for i in range(len(nums))}
for _ in range(len(nums)-1):
for permute in list(ans):
remaining = ans[permute]
for i in range(len(remaining)):
ans[permute+(remaining[i],)] = remaining[:i] + remaining[i+1:]
ans.pop(permute)
return [list(permute) for permute in ans]
|
[
"ziyuan.shen@duke.edu"
] |
ziyuan.shen@duke.edu
|
37434a2d02bf51c411162c56fe9eda123ad980d9
|
bede13ba6e7f8c2750815df29bb2217228e91ca5
|
/advance_cash_flow_statements/wizard/account_account.py
|
8ab4d6059149ffc32c123a592816f6a73772185a
|
[] |
no_license
|
CybroOdoo/CybroAddons
|
f44c1c43df1aad348409924603e538aa3abc7319
|
4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14
|
refs/heads/16.0
| 2023-09-01T17:52:04.418982
| 2023-09-01T11:43:47
| 2023-09-01T11:43:47
| 47,947,919
| 209
| 561
| null | 2023-09-14T01:47:59
| 2015-12-14T02:38:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,562
|
py
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2019-TODAY Cybrosys Technologies(<https://www.cybrosys.com>)
# Author: Cybrosys Techno Solutions(<https://www.cybrosys.com>)
#
# You can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from odoo import api, models, fields
from odoo.tools.misc import get_lang
class AccountCommonReport(models.Model):
_inherit = "account.report"
_description = "Account Common Report"
company_id = fields.Many2one('res.company', string='Company', required=True,
readonly=True,
default=lambda self: self.env.company)
journal_ids = fields.Many2many(
comodel_name='account.journal',
string='Journals',
required=True,
default=lambda self: self.env['account.journal'].search(
[('company_id', '=', self.company_id.id)]),
domain="[('company_id', '=', company_id)]",
)
date_from = fields.Date(string='Start Date')
date_to = fields.Date(string='End Date')
target_move = fields.Selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], string='Target Moves', required=True,
default='posted')
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
self.journal_ids = self.env['account.journal'].search(
[('company_id', '=', self.company_id.id)])
else:
self.journal_ids = self.env['account.journal'].search([])
def _build_contexts(self, data):
result = {}
result['journal_ids'] = 'journal_ids' in data['form'] and data['form'][
'journal_ids'] or False
result['state'] = 'target_move' in data['form'] and data['form'][
'target_move'] or ''
result['date_from'] = data['form']['date_from'] or False
result['date_to'] = data['form']['date_to'] or False
result['strict_range'] = True if result['date_from'] else False
result['company_id'] = data['form']['company_id'][0] or False
return result
def _print_report(self, data):
raise NotImplementedError()
def check_report(self):
print('Account.report')
self.ensure_one()
data = {'ids': self.env.context.get('active_ids', []),
'model': self.env.context.get('active_model', 'ir.ui.menu'),
'form': self.read(
['date_from', 'date_to', 'journal_ids', 'target_move',
'company_id'])[0]}
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context,
lang=get_lang(self.env).code)
return self.with_context(discard_logo_check=True)._print_report(data)
|
[
"ajmal@cybrosys.in"
] |
ajmal@cybrosys.in
|
79180c09bcb81b56e6d9d1043b6380e55871d2a0
|
c7e765a9bed33d3bfb21774e3995bf4a09e04add
|
/adminmgr/media/code/A3/task1/BD_135_703_2371_KhgNwL4.py
|
39a4a494197832cb4b20798bc47cbace9f61a4d5
|
[
"Apache-2.0"
] |
permissive
|
IamMayankThakur/test-bigdata
|
13dd2ac7fb76c9baed6c3a0aa943057a22e2d237
|
7f507918c7bec31c92eedcd94491a83486623049
|
refs/heads/master
| 2022-05-03T00:59:44.127494
| 2022-02-10T19:50:16
| 2022-02-10T19:50:16
| 201,585,028
| 10
| 4
|
Apache-2.0
| 2022-04-22T23:39:45
| 2019-08-10T05:34:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def func(rdd):
sorted_rdd1 = rdd.sortBy(lambda x: (-x[1],x[0]))
sorted_rdd=sorted_rdd1.filter(lambda y: y[0] !='')
s_list=sorted_rdd.collect()
if(s_list!=[]):
print(s_list[0][0],s_list[1][0],s_list[2][0],s_list[3][0],s_list[4][0],sep=",")
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[2]))
ssc.checkpoint("/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
hashtag1=dataStream.window(int(sys.argv[1]),1)
if(',' in hashtag1.select(lambda w: w.split(";")[7])):
hashtag2=hashtag1.select(lambda w: w.split(";")[7])
hashtag3=hashtag2.flatmap(lambda p:p.split(","))
else:
hashtag3=hashtag1.flatmap(lambda w: w.split(";")[7])
hashtag4 = hashtag3.map(lambda x: (x,1))
#hashtags=hashtag4.reduceByKey(add)
hashtags=hashtag4.updateStateByKey(lambda x,y:int(x)+int(y))
hashtags.foreachRDD(func)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
|
[
"ubuntu@ip-172-31-18-251.ap-south-1.compute.internal"
] |
ubuntu@ip-172-31-18-251.ap-south-1.compute.internal
|
71ff48d27a98e522cb1183c1508f3fd16ee598fa
|
521a5abf021aff0e5bec6e4723efb2d95bc1c528
|
/dva/urls.py
|
f5d1f059a314c2edc9fa63fd0894759abd496b16
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
CVML/DeepVideoAnalytics
|
be3ed41968a140328e25c22f2cb2be431a2b172d
|
d0969b503ed68dc9ee26279c341e1540bfefd4f6
|
refs/heads/master
| 2021-07-17T22:19:20.787228
| 2017-10-22T07:55:04
| 2017-10-22T07:55:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
"""dva URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
admin.autodiscover()
urlpatterns = [url(r'^admin/', admin.site.urls),
url(r'^api/', include('dvaapp.urls')),
url(r'', include('dvaui.urls'))]+\
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DVA_PRIVATE_ENABLE:
urlpatterns.append(url(r'^apps/', include('dvap.urls')))
if settings.DEBUG and settings.MACOS:
import debug_toolbar
urlpatterns = [url(r'^__debug__/', include(debug_toolbar.urls)),] + urlpatterns
|
[
"akshayubhat@gmail.com"
] |
akshayubhat@gmail.com
|
e9ab407ab62a078acf59f7b0df91d74002198a3b
|
76800454958c36c26d828ee989f181990813955f
|
/tutorial/HungyiLee/rnn/preprocess.py
|
327adbf18ae8258660063e2fe4c3dfa221fd5d67
|
[] |
no_license
|
JuneXia/proml
|
374b27b1d1c2d983aae4fed3141a2864a7196a1b
|
fbe86564013e7556c30d98c702c1ba6251f21851
|
refs/heads/master
| 2022-12-27T23:38:16.838639
| 2020-10-11T10:05:32
| 2020-10-11T10:05:32
| 267,207,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,405
|
py
|
# preprocess.py
# 這個 block 用來做 data 的預處理
import torch
from torch import nn
from gensim.models import Word2Vec
class Preprocess():
def __init__(self, sentences, sen_len, w2v_path="./w2v.model"):
self.w2v_path = w2v_path
self.sentences = sentences
self.sen_len = sen_len
self.idx2word = []
self.word2idx = {}
self.embedding_matrix = []
def get_w2v_model(self):
# 把之前訓練好的 word to vec 模型讀進來
self.embedding = Word2Vec.load(self.w2v_path)
self.embedding_dim = self.embedding.vector_size
def add_embedding(self, word):
# 把 word 加進 embedding,並賦予他一個隨機生成的 representation vector
# word 只會是 "<PAD>" 或 "<UNK>"
vector = torch.empty(1, self.embedding_dim)
torch.nn.init.uniform_(vector)
self.word2idx[word] = len(self.word2idx)
self.idx2word.append(word)
self.embedding_matrix = torch.cat([self.embedding_matrix, vector], 0)
def make_embedding(self, load=True):
print("Get embedding ...")
# 取得訓練好的 Word2vec word embedding
if load:
print("loading word to vec model ...")
self.get_w2v_model()
else:
raise NotImplementedError
# 製作一個 word2idx 的 dictionary
# 製作一個 idx2word 的 list
# 製作一個 word2vector 的 list
for i, word in enumerate(self.embedding.wv.vocab):
print('get words #{}'.format(i+1), end='\r')
#e.g. self.word2index['he'] = 1
#e.g. self.index2word[1] = 'he'
#e.g. self.vectors[1] = 'he' vector
self.word2idx[word] = len(self.word2idx)
self.idx2word.append(word)
self.embedding_matrix.append(self.embedding[word])
print('')
self.embedding_matrix = torch.tensor(self.embedding_matrix)
# 將 "<PAD>" 跟 "<UNK>" 加進 embedding 裡面
self.add_embedding("<PAD>")
self.add_embedding("<UNK>")
print("total words: {}".format(len(self.embedding_matrix)))
return self.embedding_matrix
def pad_sequence(self, sentence):
# 將每個句子變成一樣的長度
if len(sentence) > self.sen_len:
sentence = sentence[:self.sen_len]
else:
pad_len = self.sen_len - len(sentence)
for _ in range(pad_len):
sentence.append(self.word2idx["<PAD>"])
assert len(sentence) == self.sen_len
return sentence
def sentence_word2idx(self):
# 把句子裡面的字轉成相對應的 index
sentence_list = []
for i, sen in enumerate(self.sentences):
print('sentence count #{}'.format(i+1), end='\r')
sentence_idx = []
for word in sen:
if (word in self.word2idx.keys()):
sentence_idx.append(self.word2idx[word])
else:
sentence_idx.append(self.word2idx["<UNK>"])
# 將每個句子變成一樣的長度
sentence_idx = self.pad_sequence(sentence_idx)
sentence_list.append(sentence_idx)
return torch.LongTensor(sentence_list)
def labels_to_tensor(self, y):
# 把 labels 轉成 tensor
y = [int(label) for label in y]
return torch.LongTensor(y)
|
[
"junxstudio@sina.com"
] |
junxstudio@sina.com
|
9649970d62f951be36ba3cde0f8017e23aa5c6c1
|
7db6c1865cf9102808824ff06cda747b6e572a21
|
/Python/Test/Locust/Test/wsh_loadtest.py
|
53219d5e5215e6e3cee8591f2b5756445323a944
|
[] |
no_license
|
hyteer/testing
|
1f6cabc1d2b67faa4533e6ad7eb5be8c13d542c9
|
1d8b47b3bbb2daf00e4f15b5d18e86111ea4e113
|
refs/heads/master
| 2020-05-21T16:19:08.243676
| 2017-01-03T01:25:17
| 2017-01-03T01:25:17
| 60,914,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
from locust import HttpLocust, TaskSet, task
import json
counter = 0
class UserBehavior(TaskSet):
#global counter
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.login()
def login(self):
res = self.client.post("/login/login-ajax", {"username":"20160912", "password":"123456","captcha":"1"})
print "Response status code:", res.status_code
print "Response content:", res.content
if res:
self.count_test()
def count_test(self):
global counter
counter += 1
print "counter:%d" % counter
@task(2)
def index(self):
res = self.client.get("/")
if res:
self.count_test()
@task(1)
def member_list(self):
res = self.client.post("/member/list-ajax", {"_page":1,"_page_size":20,"nickname":"","group_id":None,"shop_sub_id":"","agent_id":"","is_search":False,"belong_to_staff_id":"","createStart":"","createEnd":"","group_ids":[],"yestoday":False,"user_platform":0,"tags":[]})
content = json.loads(res.content)
errmsg = content["errmsg"]
errcode = content["errcode"]
print "errcode:%s,\nerrmsg:%s" % (errcode,str(errmsg))
self.count_test()
#print "Response status code:", res.status_code
#print "Response content:", res.content
@task(1)
def second_kill_list(self):
res = self.client.get("/second-kill/list")
if res:
self.count_test()
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 6000
|
[
"hyteer@qq.com"
] |
hyteer@qq.com
|
17c928e3cafbcf5f45ca353e1f379ba1e9e04da5
|
01c3ff1d74e754e0d4ce0fb7f8a8b329ec3766e1
|
/python_exercises/19others/fun4.py
|
6f9ac6e1fda53a3f6312dd5622e67ea3388ae8c9
|
[] |
no_license
|
vineel2014/Pythonfiles
|
5ad0a2b824b5fd18289d21aa8306099aea22c202
|
0d653cb9659fe750cf676a70035ab67176179905
|
refs/heads/master
| 2020-04-28T03:56:22.713558
| 2019-03-11T08:38:54
| 2019-03-11T08:38:54
| 123,681,939
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
def product(numbers):
sum_so_far=1
for number in numbers:
sum_so_far*=number
return sum_so_far
print(product([1,2,3]))
print(product([7,-4,1,6,0]))
print(product([]))
|
[
"vineel2006@gmail.com"
] |
vineel2006@gmail.com
|
d4d95744592b24613ab6bfde2b48befb4f83d4c9
|
2e6309c8f2126a6196adf9288c31162e6a949c1d
|
/backend/djabgoBlog-master/blog/subscribe/forms.py
|
90898d74a085347109f8f89fa6ee3117df4562b4
|
[] |
no_license
|
triest/codeExample2
|
7ff87908caed3c4f58ff1609187a5af78ce11663
|
6d8a20fdd710d1560a3dbc2dfb4455fcc4d862b1
|
refs/heads/master
| 2020-04-02T03:12:21.027401
| 2018-10-21T11:22:14
| 2018-10-21T11:22:14
| 153,951,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from inspect import getmembers
from pprint import pprint
from django.forms import modelform_factory
from django import forms
from django.contrib.auth.models import User;
from . import models
#from blog.articles.models import Subscribe as Subscribe1
class SunscribeForm(forms.ModelForm):
class Meta:
model = models.Subscribe
fields = ['name']
|
[
"you@example.com"
] |
you@example.com
|
884b687ae12296554a2ae41f29b26bf4382a86c2
|
e2230f57dd5bb508b5c0f7bf4df6af8ae7b36f83
|
/sympycore/matrices/__init__.py
|
95ab7a348b3211d44be60bb7007659916c36a1b0
|
[
"Apache-2.0"
] |
permissive
|
wenyifdu/pymaclab
|
2fd40192d9dff4fda488f3f9a61e584c1adafd49
|
e5100ad4201bdf59c01dd600ac7e8865664075c3
|
refs/heads/master
| 2021-09-16T11:16:00.680933
| 2018-06-20T07:29:29
| 2018-06-20T07:29:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
""" Provides matrix support.
"""
__docformat__ = "restructuredtext"
from .algebra import Matrix, MatrixBase
from .functions import eye, concatenate, jacobian
from .polyhedra import Polyhedron
|
[
"emscheffel@gmail.com"
] |
emscheffel@gmail.com
|
854d50100ce45aef6d69f163a46a0fc3b8619b8c
|
b65d3777372d25402f11814288d992e6cb29c0a5
|
/tools/Ui_couput.py
|
986eea054ca7ed3f9226970215929d3410e40c05
|
[] |
no_license
|
JoeChen999/tools
|
082ee56982650b0fee9e26aea9baa7c762a0ceed
|
090215568c41beabd6f1fa6b2d0cf253a29ba913
|
refs/heads/master
| 2020-05-25T14:58:51.564658
| 2016-09-22T06:31:30
| 2016-09-22T06:31:30
| 68,891,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/chenbiao/Documents/workspaces/tools/couput.ui'
#
# Created: Thu Sep 5 15:37:23 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(702, 527)
self.textBrowser = QtGui.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(0, 0, 701, 531))
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
[
"chenbiao@elex-tech.com"
] |
chenbiao@elex-tech.com
|
20c80bd801a815a348f93512423abbf0ad984c81
|
82228ee4e8941d67bb71020a2917706ba6962989
|
/tests/utils/test_constants.py
|
aa54516e4bb5f8c500795b241090fda9839c7b05
|
[
"MIT"
] |
permissive
|
haoziyeung/puzzle
|
2daccf618347e29980daf4ef7fd25ca431d86ca5
|
9476f05b416d3a5135d25492cb31411fdf831c58
|
refs/heads/master
| 2021-06-17T13:07:27.916154
| 2017-05-17T13:03:10
| 2017-05-17T13:03:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
from puzzle.utils.constants import HGNC_TO_OMIM, SEVERITY_DICT
def test_HGNC_TO_OMIM():
assert HGNC_TO_OMIM['CACNA1F'].get('mim_nr') == 300110
assert HGNC_TO_OMIM['ADK'].get('mim_nr') == 102750
def test_SEVERITY_DICT():
assert SEVERITY_DICT['transcript_ablation'] == 0
assert SEVERITY_DICT['start_lost'] == 6
|
[
"robin.andeer@gmail.com"
] |
robin.andeer@gmail.com
|
5267ef629b697a6f0f4684057928eb7df6bedada
|
1c4de938e068c8967288ae06e15d1ea148d92ace
|
/Python_parallel/lu_mpi.py
|
8701770bf65cd8e545e449987031ca7b2c78b8e4
|
[
"MIT"
] |
permissive
|
610yilingliu/LU_Decomposition_MPIVersion
|
3d5ddd578dc703ea830c35a421aab89d9ae614d2
|
0bd8af0e86b5fcc36970bea7bb045c129fdd2238
|
refs/heads/master
| 2023-01-07T22:25:57.883027
| 2020-10-16T08:28:27
| 2020-10-16T08:28:27
| 297,609,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
import mpy4py
import random
def generate(sz, rg, sl):
"""
:type int sz: size of square matrix(sz * sz)
:type int rg, int sl: range and scale of data. For example if the value field will be [-0.2, 0.2], than
rg = 2, sl = 10
:rtype: (matrix List[List[float]], vector List[float]), float in Python equal to double in C
Way for generate matrix in parallel is already shown in C version, I will not do that again in Python
"""
random.seed(0)
matrix = []
vector = []
for _ in range(sz):
ls = []
for i in range(sz):
ele = (-rg + random.random() * 2 * rg) / sl
ls.append(ele)
matrix.append(ls)
vector.append((-rg + random.random() * 2 * rg) / sl)
return matrix, vector
def re_arrange(M, V):
"""
:type M: List[List[float]] generated matrix
:type V: Lists[float] generated vector. len(vector) == len(matrix)
:rtype (M List[List[float]], V List[float]) rearranged matrix and vector. Ax = b => PAx = Pb
"""
def find_mx(col):
mx = 0
idx = 0
for i in range(col, len(M)):
cur = abs(M[i][col])
if cur > mx:
mx = cur
idx = i
if mx == 0:
print("Invalid Matrix")
exit(0)
return idx
for i in range(M):
target = find_mx(i)
M[i], M[target] = M[target], M[i]
V[i], V[target] = V[target], V[i]
return M, V
def lu(M):
|
[
"yilingliu1994@gmail.com"
] |
yilingliu1994@gmail.com
|
55d046dd1d43409f967db43d069619a1b9a92d5f
|
d8b5aba2a1f53fbf3fcfc388c26e547afa76b13f
|
/modules/app_email/lib/delphi.py
|
d6e8461d29ca1decad1b18441f6a42b08596eed7
|
[
"Apache-2.0"
] |
permissive
|
dfrc-korea/carpe
|
e88b4e3bcb536355e2a64d00e807bccd631f8c93
|
f9299b8ad0cb2a6bbbd5e65f01d2ba06406c70ac
|
refs/heads/master
| 2023-04-28T01:12:49.138443
| 2023-04-18T07:37:39
| 2023-04-18T07:37:39
| 169,518,336
| 75
| 38
|
Apache-2.0
| 2023-02-08T00:42:41
| 2019-02-07T04:21:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
#
# delphi.py, 200611
#
import os
import errno
def CreateDir(dir):
try:
if not os.path.isdir(dir):
os.makedirs(os.path.join(dir))
return True
except OSError as e:
if e.errno != errno.EEXIST: raise
return False
def DirectoryExists(dir):
return os.path.isdir(dir)
def ExtractFilePath(fn):
v = os.path.dirname(fn)
return v if v == '' else v + PathDelimiter
def ExtractFileDir(fn):
return os.path.dirname(fn)
def ExtractFileName(fn):
return os.path.basename(fn)
def ExtractFileExt(fn):
p = fn.rfind('.')
return fn[p:] if p > fn.rfind('=') else ''
def ChangeFileExt(fn, ext):
p = fn.rfind('.')
if p == -1: return ''
return fn[:p] + ext
def FileExists(fn):
return os.path.isfile(fn)
def StrToIntDef(v, default):
try:
return int(v)
except:
return default
def IncludeTrailingBackslash(v):
return os.path.join(v, '')
def ExcludeTrailingBackslash(v):
return v.rstrip(PathDelimiter)
IncludeTrailingPathDelimiter = IncludeTrailingBackslash
ExcludeTrailingPathDelimiter = ExcludeTrailingBackslash
PathDelimiter = '\\'
_NonPathDelimiter = '/'
if os.path.join('_', '').find(PathDelimiter) == -1:
PathDelimiter = '/'
_NonPathDelimiter = '\\'
|
[
"jbc0729@gmail.com"
] |
jbc0729@gmail.com
|
8ba102ebdd7f92dc9a0e9dc574bebbf7da51e6a9
|
058ed13ab33e8af7f5c7f6cfb985edcc1f0075b8
|
/.history/application/controllers/sinhvien_20201006230934.py
|
c35efabcab728dece922a76da4e6ab65c34212a1
|
[] |
no_license
|
thinhpayer123/learning-python-vinacomi
|
e89a0b5302fbc42d1998aea82bba8ebea594eee6
|
f4d11998911360e25f68c2c6c0336d32a8c25c65
|
refs/heads/master
| 2023-03-27T08:57:48.600710
| 2020-12-13T15:43:53
| 2020-12-13T15:43:53
| 353,254,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,765
|
py
|
from application.extensions import apimanager
from application.models.model import QRUser, User, UserWallet
from application.extensions import auth
from application.database import db
from gatco.exceptions import ServerError
from sqlalchemy import create_engine
import os
import random
import string
import aiofiles
import time
from gatco.response import json
from application.server import app
from application.config import Config
import psycopg2
config = Config()
import pandas as pd
import xlrd
import qrcode
import shutil
import asyncio
import datetime
import ujson
def auth_func(request=None, **kw):
#uid = auth.current_user(request)
#if uid is None:
# raise ServerError("abc")
pass
@app.route('/api/v1/file/upload', methods=['GET', 'POST'])
async def file_load(request):
path = request.args.get("path", None)
ret = None
# url_qr = config.QR_SERVICE_URL
# url = config.FILE_SERVICE_URL
fsroot = config.FS_ROOT
if request.method == 'POST':
file = request.files.get('file', None)
if file:
extname = request.files.get('file').name
if not os.path.exists(fsroot):
os.makedirs(fsroot)
subPath = ""
if path is not None:
subPath = path + "/"
if not os.path.exists(fsroot + subPath):
os.makedirs(fsroot + subPath)
async with aiofiles.open(fsroot + subPath + extname, 'wb+') as f:
await f.write(file.body)
link_local = fsroot + subPath + extname
print(link_local)
data = pd.read_excel(link_local)
print(data)
df = pd.DataFrame(data, columns=['student_school_year', 'student_class', 'student_id', 'student_name', 'birthday', 'gender','email'])
# print('122112'+df)
# company_id = request.args.get("company_id")
company_id = 'TEST'
# print(company_id)
# result = []
a =df.get(["student_school_year", "student_class", "student_id",'student_name','birthday','gender','email'])
result = df.to_json(orient='records')
result_ujson = ujson.loads(result)
item_result = []
for item in result_ujson:
user_no = item.get("student_id",{})
extra_data = item
new_entry = UserWallet(user_no=user_no,
company_id=company_id,
extra_data=extra_data)
item_result.append(new_entry)
db.session.add_all(item_result)
db.session.commit()
# print(result)
# q = db.session.query(User).with_for_update(nowait=Tre, of=User)
# user_name =
# full_name
# email
# companyid
# alchemyEngine = create_engine('postgresql://icangteen_user:123456abcA@localhost:5432/icangteen', pool_recycle=3600);
# postgreSQLConnection = alchemyEngine.connect();
# postgreSQLTable = 'student';
# df.to_sql(postgreSQLTable, alchemyEngine, if_exists='append', index=False)
# #
ret = {
"notify":"upload file success ",
# "id": id
}
return json(ret)
@app.route('/api/v1/Genqr', methods=['GET' , 'POST'])
async def genqr(request):
fsroot = config.FS_ROOT
url = config.FILE_SERVICE_URL
qr = config.QR_ARCHIVE
# userWallets =[]
# print(id)
if request.method == 'POST':
path = request.args.get('')
userWallets = UserWallet.query.order_by(UserWallet.id).all()
for users in userWallets:
# format_data = ujson.loads
info_user = users.extra_data
student_id = info_user['student_id']
student_school_year = info_user['student_school_year']
student_class = info_user['student_class']
student_name = info_user['student_name']
birthday = info_user['birthday']
company_id = request.args.get('comapny_id')
company_no = request.args.get('comapny_no')
company_type = request.args.get('comapny_type')
current_user = request.args.get('user')
current_user_no = request.args.get('id')
# print(company_id)
# print(',..........'+ str(current_user))
img = qrcode.make(student_school_year + '-' + student_class + '-' + student_id + '-' + student_name + '-' + birthday)
name_img = student_class + '-' + student_id + '-' + student_name + '.png'
link_img = fsroot + 'qrcode/' + name_img
img.save(link_img)
user_wallet = UserWallet()
user_wallet.company_id = company_id
user_wallet.company_no = company_no
user_wallet.company_type = company_type
user_wallet.user_id = current_user
user_wallet.user_no =
user_wallet.created_at = user_wallet.created_at
user_wallet.
user_wallet.nameqr = student_class + '-' + student_id + '-' + student_name
user_wallet.saveDirectory = link_img
db.session.add(qr)
db.session.commit()
zipfile = shutil.make_archive(fsroot, 'zip', fsroot, 'qrcode/')
ret = {
"link": url
}
return json(ret)
# apimanager.create_api(collection_name='qrworker', model=QRworker,
# methods=['GET', 'POST', 'DELETE', 'PUT'],
# url_prefix='/api/v1',
# preprocess=dict(GET_SINGLE=[auth_func], GET_MANY=[auth_func], POST=[auth_func], PUT_SINGLE=[auth_func]),
# )
|
[
"cuongnd@upstart.vn"
] |
cuongnd@upstart.vn
|
987f92a7554c420a41bb61e75372a9b7d318c61d
|
a7ed4da896faab105ff08259a82ae822548d1c2c
|
/6/6_1.py
|
7ed377a9d8fe394412cc185b039795c97bc0a055
|
[] |
no_license
|
nemesmarci/Advent-of-Code-2019
|
e8e59d89be8ed43ce046fd7854b1c9b779a8930e
|
251dc4dc62777a7c0daf7a74d832c0be3d75140e
|
refs/heads/master
| 2021-12-31T10:53:35.924253
| 2019-12-27T17:17:06
| 2021-12-29T19:17:13
| 225,252,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from space import read_data
objects = read_data()
root = objects['COM']
def traverse(current, level=1):
n = len(current.orbiters) * level
for child in current.orbiters:
n += traverse(child, level + 1)
return n
print(traverse(root))
|
[
"nemes@sch.bme.hu"
] |
nemes@sch.bme.hu
|
0ad05c0dfd6e48742b121e8c44fddf8a92224af1
|
dd770e697daddab20e09fbf8ce199c97ee540c37
|
/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py
|
99c604f9ffd781c069a3d253e4eb43d243ba046e
|
[
"Apache-2.0",
"FreeBSD-DOC",
"MIT",
"DOC"
] |
permissive
|
PKConsul/bigtop
|
0e7b5133be17a2093c0d5279b000c60b67072a16
|
2f8311b184bf0c5d25756b098895e43b1dbc3c2e
|
refs/heads/master
| 2021-01-20T02:08:29.012667
| 2017-04-22T17:44:30
| 2017-04-23T06:27:13
| 89,379,381
| 1
| 0
| null | 2017-04-25T15:53:29
| 2017-04-25T15:53:29
| null |
UTF-8
|
Python
| false
| false
| 3,718
|
py
|
#!/usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import requests
import time
import unittest
class TestDeployment(unittest.TestCase):
"""
Test scaling of Apache Spark in HA mode.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('spark-test-ha', 'cs:xenial/spark', units=3)
cls.d.add('zk-test', 'cs:xenial/zookeeper')
cls.d.relate('zk-test:zookeeper', 'spark-test-ha:zookeeper')
cls.d.expose('spark-test-ha')
cls.d.setup(timeout=3600)
cls.d.sentry.wait(timeout=3600)
@classmethod
def tearDownClass(cls):
# NB: seems to be a remove_service issue with amulet. However, the
# unit does still get removed. Pass OSError for now:
# OSError: juju command failed ['remove-application', 'zk-test']:
# ERROR allocation for service ...zk-test... owned by ... not found
try:
cls.d.remove_service('spark-test-ha', 'zk-test')
except OSError as e:
print("IGNORE: Amulet remove_service failed: {}".format(e))
pass
def test_master_selected(self):
"""
Wait for all three spark-test-ha units to agree on a master leader.
Remove the leader unit.
Check that a new leader is elected.
"""
self.d.sentry.wait_for_messages({"spark-test-ha": ["ready (standalone - HA)",
"ready (standalone - HA)",
"ready (standalone - HA)"]}, timeout=900)
print("Waiting for units to agree on master.")
time.sleep(120)
master = ''
masters_count = 0
for unit in self.d.sentry['spark-test-ha']:
ip = unit.info['public-address']
url = 'http://{}:8080'.format(ip)
homepage = requests.get(url)
if 'ALIVE' in homepage.text:
masters_count += 1
master = unit.info['unit_name']
else:
assert 'STANDBY' in homepage.text
assert masters_count == 1
print("Removing master: {} ".format(master))
self.d.remove_unit(master)
time.sleep(120)
self.d.sentry.wait_for_messages({"spark-test-ha": ["ready (standalone - HA)",
"ready (standalone - HA)"]}, timeout=900)
masters_count = 0
for unit in self.d.sentry['spark-test-ha']:
ip = unit.info['public-address']
url = 'http://{}:8080'.format(ip)
homepage = requests.get(url)
if 'ALIVE' in homepage.text:
print("New master is {}".format(unit.info['unit_name']))
masters_count += 1
else:
assert 'STANDBY' in homepage.text
assert masters_count == 1
if __name__ == '__main__':
unittest.main()
|
[
"kevin.monroe@canonical.com"
] |
kevin.monroe@canonical.com
|
fc3ef945c17fc0a2cc07ebc2bd0847622a35d8e1
|
778a420262c8d1547cbcbbe3143cd73a94d9cff6
|
/test/test_common.py
|
bea1a3d8c600fa0d2718f9889976a7f6782f3467
|
[
"Apache-2.0"
] |
permissive
|
ddierkes/bdbag
|
a59ef7c4a7207dd9a90374a021c682b0b4642c54
|
bc35ddcb544f7fa923d4649835ad99a15a778bce
|
refs/heads/master
| 2020-03-17T22:06:22.942490
| 2018-05-03T11:05:57
| 2018-05-03T11:05:57
| 133,989,136
| 0
| 0
| null | 2018-05-18T18:14:03
| 2018-05-18T18:14:03
| null |
UTF-8
|
Python
| false
| false
| 4,281
|
py
|
import os
import gc
import sys
import shutil
import tempfile
import unittest
class BaseTest(unittest.TestCase):
def setUp(self):
if sys.version_info < (3,):
self.assertRaisesRegex = self.assertRaisesRegexp
self.tmpdir = tempfile.mkdtemp(prefix="bdbag_test_")
shutil.copytree(os.path.abspath(os.path.join('test', 'test-data')), os.path.join(self.tmpdir, 'test-data'))
self.test_data_dir = os.path.join(self.tmpdir, 'test-data', 'test-dir')
self.assertTrue(os.path.isdir(self.test_data_dir))
self.test_archive_dir = os.path.join(self.tmpdir, 'test-data', 'test-archives')
self.assertTrue(os.path.isdir(self.test_archive_dir))
self.test_config_dir = os.path.join(self.tmpdir, 'test-data', 'test-config')
self.assertTrue(os.path.isdir(self.test_config_dir))
self.test_http_dir = os.path.join(self.tmpdir, 'test-data', 'test-http')
self.assertTrue(os.path.isdir(self.test_http_dir))
self.test_bag_dir = os.path.join(self.tmpdir, 'test-data', 'test-bag')
self.assertTrue(os.path.isdir(self.test_bag_dir))
self.test_bag_incomplete_dir = os.path.join(self.tmpdir, 'test-data', 'test-bag-incomplete')
self.assertTrue(os.path.isdir(self.test_bag_incomplete_dir))
self.test_bag_fetch_http_dir = os.path.join(self.tmpdir, 'test-data', 'test-bag-fetch-http')
self.assertTrue(os.path.isdir(self.test_bag_fetch_http_dir))
self.test_bag_fetch_ark_dir = os.path.join(self.tmpdir, 'test-data', 'test-bag-fetch-ark')
self.assertTrue(os.path.isdir(self.test_bag_fetch_ark_dir))
self.test_bag_fetch_minid_dir = os.path.join(self.tmpdir, 'test-data', 'test-bag-fetch-minid')
self.assertTrue(os.path.isdir(self.test_bag_fetch_minid_dir))
self.test_bag_fetch_ftp_dir = os.path.join(self.tmpdir, 'test-data', 'test-bag-fetch-ftp')
self.assertTrue(os.path.isdir(self.test_bag_fetch_ftp_dir))
self.test_bag_invalid_structure_manifest_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-invalid-structure-manifest')
self.assertTrue(os.path.isdir(self.test_bag_invalid_structure_manifest_dir))
self.test_bag_invalid_structure_filesystem_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-invalid-structure-filesystem')
self.assertTrue(os.path.isdir(self.test_bag_invalid_structure_filesystem_dir))
self.test_bag_invalid_structure_fetch_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-invalid-structure-fetch')
self.assertTrue(os.path.isdir(self.test_bag_invalid_structure_fetch_dir))
self.test_bag_invalid_state_manifest_fetch_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-invalid-state-manifest-fetch')
self.assertTrue(os.path.isdir(self.test_bag_invalid_state_manifest_fetch_dir))
self.test_bag_invalid_state_fetch_filesize_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-invalid-state-fetch-filesize')
self.assertTrue(os.path.isdir(self.test_bag_invalid_state_fetch_filesize_dir))
self.test_bag_update_invalid_fetch_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-update-invalid-fetch')
self.assertTrue(os.path.isdir(self.test_bag_update_invalid_fetch_dir))
self.test_bag_invalid_state_duplicate_manifest_fetch_dir = os.path.join(
self.tmpdir, 'test-data', 'test-bag-invalid-state-duplicate-manifest-fetch')
self.assertTrue(os.path.isdir(self.test_bag_invalid_state_duplicate_manifest_fetch_dir))
def tearDown(self):
if os.path.isdir(self.tmpdir):
shutil.rmtree(self.tmpdir)
gc.collect()
def assertExpectedMessages(self, messages, output):
for expected in messages:
self.assertIn(expected, output, "Expected \'%s\' in output string." % expected)
def assertUnexpectedMessages(self, messages, output):
for unexpected in messages:
self.assertNotIn(unexpected, output, "Unexpected \'%s\' in output string." % unexpected)
def getTestHeader(self, desc, args=None):
return str('\n\n[%s: %s]\n%s') % (self.__class__.__name__, desc, (' '.join(args) + '\n') if args else "")
|
[
"mikedarcy@users.noreply.github.com"
] |
mikedarcy@users.noreply.github.com
|
e209a95d690abfd4189adb9be07986331f77a9b4
|
f889bc01147869459c0a516382e7b95221295a7b
|
/test/test_customer_data_customer_search_results_interface.py
|
edf62d3b5d9ee180f4386a1c633555c9f563ed2b
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.customer_data_customer_search_results_interface import CustomerDataCustomerSearchResultsInterface
class TestCustomerDataCustomerSearchResultsInterface(unittest.TestCase):
""" CustomerDataCustomerSearchResultsInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCustomerDataCustomerSearchResultsInterface(self):
"""
Test CustomerDataCustomerSearchResultsInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.customer_data_customer_search_results_interface.CustomerDataCustomerSearchResultsInterface()
pass
if __name__ == '__main__':
unittest.main()
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
26a913ad3667d31c973daeb7828df81b0f4ab511
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/134.py
|
085bae10457bf1468888eded9f5f112884ecf5f6
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
import numpy as np
def mainFunc(S, K):
ret = 0
for i in range(len(S)-K+1):
if S[i]=="-":
ret+=1
for j in range(K):
if S[i+j]=='-': S[i+j]='+'
else: S[i+j]='-'
for i in range(K):
if S[len(S)-1-i]=='-': return "IMPOSSIBLE"
return str(ret)
T = int(raw_input())
for t in range(T):
P = raw_input().split(' ')
print 'Case #' + str(t+1) + ': ' + mainFunc(list(P[0]), int(P[1]))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2dbd39a7be367e0af9174da5b1b0cbfe9ed4b588
|
8d30f2d627eb15bf0a3ff541812af3a289fffcfc
|
/backend/yfkyfkfu_1352/settings.py
|
bed71fd8664c0a8b11e6d92e1c95522d7680c020
|
[] |
no_license
|
crowdbotics-apps/yfkyfkfu-1352
|
2eda6d948adf2fd9fcace7997d606ff057168e03
|
fd53b7f60397737ad99e7cae6e81c1b91c4ae22c
|
refs/heads/master
| 2022-12-15T03:23:19.526405
| 2019-03-13T18:48:22
| 2019-03-13T18:48:22
| 175,480,158
| 0
| 0
| null | 2022-12-06T15:10:19
| 2019-03-13T18:48:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,746
|
py
|
"""
Django settings for yfkyfkfu_1352 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'yfkyfkfu_1352.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yfkyfkfu_1352.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'yfkyfkfu_1352',
'USER': 'yfkyfkfu_1352',
'PASSWORD': 'yfkyfkfu_1352',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f3234921e0a1cd9eaf4c40340a44f85a9b4c646b
|
74c8f10bfc82e762d83db424e5d4f4b3681ffba0
|
/venv/Lib/site-packages/winrt/windows/security/cryptography/certificates/__init__.py
|
df6f418b8fd0c94949f27c7b4d23e5ac4cb7a211
|
[] |
no_license
|
meghnaraswan/HEARTLabHapticSleeveProject
|
1e7bd0710c26dad6588f73f6b1f0741c8e31334d
|
6c2c9a227aaacf34b2205bcb1a856cc70ceccd55
|
refs/heads/main
| 2023-07-04T01:51:01.462007
| 2021-08-17T13:34:43
| 2021-08-17T13:34:43
| 390,823,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
# WARNING: Please don't edit this file. It was generated by Python/WinRT v0.9.210202.1
import typing, winrt
import enum
_ns_module = winrt._import_ns_module("Windows.Security.Cryptography.Certificates")
try:
import winrt.windows.foundation
except:
pass
try:
import winrt.windows.foundation.collections
except:
pass
try:
import winrt.windows.networking
except:
pass
try:
import winrt.windows.storage.streams
except:
pass
class CertificateChainPolicy(enum.IntEnum):
BASE = 0
SSL = 1
N_T_AUTHENTICATION = 2
MICROSOFT_ROOT = 3
class ChainValidationResult(enum.IntEnum):
SUCCESS = 0
UNTRUSTED = 1
REVOKED = 2
EXPIRED = 3
INCOMPLETE_CHAIN = 4
INVALID_SIGNATURE = 5
WRONG_USAGE = 6
INVALID_NAME = 7
INVALID_CERTIFICATE_AUTHORITY_POLICY = 8
BASIC_CONSTRAINTS_ERROR = 9
UNKNOWN_CRITICAL_EXTENSION = 10
REVOCATION_INFORMATION_MISSING = 11
REVOCATION_FAILURE = 12
OTHER_ERRORS = 13
class EnrollKeyUsages(enum.IntFlag):
NONE = 0
DECRYPTION = 0x1
SIGNING = 0x2
KEY_AGREEMENT = 0x4
ALL = 0xffffff
class ExportOption(enum.IntEnum):
NOT_EXPORTABLE = 0
EXPORTABLE = 1
class InstallOptions(enum.IntFlag):
NONE = 0
DELETE_EXPIRED = 0x1
class KeyProtectionLevel(enum.IntEnum):
NO_CONSENT = 0
CONSENT_ONLY = 1
CONSENT_WITH_PASSWORD = 2
CONSENT_WITH_FINGERPRINT = 3
class KeySize(enum.IntEnum):
INVALID = 0
RSA2048 = 2048
RSA4096 = 4096
class SignatureValidationResult(enum.IntEnum):
SUCCESS = 0
INVALID_PARAMETER = 1
BAD_MESSAGE = 2
INVALID_SIGNATURE = 3
OTHER_ERRORS = 4
Certificate = _ns_module.Certificate
CertificateChain = _ns_module.CertificateChain
CertificateEnrollmentManager = _ns_module.CertificateEnrollmentManager
CertificateExtension = _ns_module.CertificateExtension
CertificateKeyUsages = _ns_module.CertificateKeyUsages
CertificateQuery = _ns_module.CertificateQuery
CertificateRequestProperties = _ns_module.CertificateRequestProperties
CertificateStore = _ns_module.CertificateStore
CertificateStores = _ns_module.CertificateStores
ChainBuildingParameters = _ns_module.ChainBuildingParameters
ChainValidationParameters = _ns_module.ChainValidationParameters
CmsAttachedSignature = _ns_module.CmsAttachedSignature
CmsDetachedSignature = _ns_module.CmsDetachedSignature
CmsSignerInfo = _ns_module.CmsSignerInfo
CmsTimestampInfo = _ns_module.CmsTimestampInfo
KeyAlgorithmNames = _ns_module.KeyAlgorithmNames
KeyAttestationHelper = _ns_module.KeyAttestationHelper
KeyStorageProviderNames = _ns_module.KeyStorageProviderNames
PfxImportParameters = _ns_module.PfxImportParameters
StandardCertificateStoreNames = _ns_module.StandardCertificateStoreNames
SubjectAlternativeNameInfo = _ns_module.SubjectAlternativeNameInfo
UserCertificateEnrollmentManager = _ns_module.UserCertificateEnrollmentManager
UserCertificateStore = _ns_module.UserCertificateStore
|
[
"raswan@chapman.edu"
] |
raswan@chapman.edu
|
f7ad37c473eb788222d0d0e5934e021dc5afeffc
|
238e46a903cf7fac4f83fa8681094bf3c417d22d
|
/VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/twisted/lore/_version.py
|
c81adaac2b5e3ff5d0fc8545fdf68c50dfda705d
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
baojunli/FastCAE
|
da1277f90e584084d461590a3699b941d8c4030b
|
a3f99f6402da564df87fcef30674ce5f44379962
|
refs/heads/master
| 2023-02-25T20:25:31.815729
| 2021-02-01T03:17:33
| 2021-02-01T03:17:33
| 268,390,180
| 1
| 0
|
BSD-3-Clause
| 2020-06-01T00:39:31
| 2020-06-01T00:39:31
| null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted.lore', 14, 0, 0)
|
[
"l”ibaojunqd@foxmail.com“"
] |
l”ibaojunqd@foxmail.com“
|
cd4d183ad4e4c95ed4724893de2871e7b5527640
|
f26ea24cebb60b8b6176ee4d3ecbec477be9b7c6
|
/native_tags/contrib/mapreduce.py
|
50346ee6e3655e78113e5d8117322daf7447de1a
|
[
"BSD-3-Clause"
] |
permissive
|
lukaszb/django-native-tags
|
994fcc0e382ebfd8dfed868a4caeeb33aa78aee5
|
e1987497b652b1939d152cbc6bc281daf727b34c
|
refs/heads/master
| 2021-01-09T06:13:18.485447
| 2010-05-04T20:08:15
| 2010-05-04T20:09:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
from native_tags.decorators import function
def get_func(func_name, op=True):
import operator
from native_tags.registry import register
try:
return register['function'][func_name]
except KeyError:
pass
if func_name in __builtins__:
return __builtins__[func_name]
elif hasattr(operator, func_name):
return getattr(operator, func_name)
return lambda: None
def do_map(func_name, *sequence):
"""
Return a list of the results of applying the function to the items of
the argument sequence(s).
Functions may be registered with ``native_tags``
or can be ``builtins`` or from the ``operator`` module
If more than one sequence is given, the
function is called with an argument list consisting of the corresponding
item of each sequence, substituting None for missing values when not all
sequences have the same length. If the function is None, return a list of
the items of the sequence (or a list of tuples if more than one sequence).
Syntax::
{% map [function] [sequence] %}
{% map [function] [item1 item2 ...] %}
For example::
{% map sha1 hello world %}
calculates::
[sha1(hello), sha1(world)]
"""
if len(sequence)==1:
sequence = sequence[0]
return map(get_func(func_name, False), sequence)
do_map = function(do_map, name='map')
def do_reduce(func_name, *sequence):
"""
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
Functions may be registered with ``native_tags``
or can be ``builtins`` or from the ``operator`` module
Syntax::
{% reduce [function] [sequence] %}
{% reduce [function] [item1 item2 ...] %}
For example::
{% reduce add 1 2 3 4 5 %}
calculates::
((((1+2)+3)+4)+5) = 15
"""
if len(sequence)==1:
sequence = sequence[0]
return reduce(get_func(func_name), sequence)
do_reduce = function(do_reduce, name='reduce')
|
[
"justquick@gmail.com"
] |
justquick@gmail.com
|
808cf496a8bf650c2d81d9c2043e69ea1f1164d0
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_dataset_cifar.py
|
1fca233d3be786b2a42f7dea6aed8dae58d6c25b
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 4,284
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from paddle.vision.datasets import Cifar10, Cifar100
class TestCifar10Train(unittest.TestCase):
def test_main(self):
cifar = Cifar10(mode='train')
self.assertTrue(len(cifar) == 50000)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 50000)
data, label = cifar[idx]
data = np.array(data)
self.assertTrue(len(data.shape) == 3)
self.assertTrue(data.shape[2] == 3)
self.assertTrue(data.shape[1] == 32)
self.assertTrue(data.shape[0] == 32)
self.assertTrue(0 <= int(label) <= 9)
class TestCifar10Test(unittest.TestCase):
def test_main(self):
cifar = Cifar10(mode='test')
self.assertTrue(len(cifar) == 10000)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 10000)
data, label = cifar[idx]
data = np.array(data)
self.assertTrue(len(data.shape) == 3)
self.assertTrue(data.shape[2] == 3)
self.assertTrue(data.shape[1] == 32)
self.assertTrue(data.shape[0] == 32)
self.assertTrue(0 <= int(label) <= 9)
# test cv2 backend
cifar = Cifar10(mode='test', backend='cv2')
self.assertTrue(len(cifar) == 10000)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 10000)
data, label = cifar[idx]
self.assertTrue(len(data.shape) == 3)
self.assertTrue(data.shape[2] == 3)
self.assertTrue(data.shape[1] == 32)
self.assertTrue(data.shape[0] == 32)
self.assertTrue(0 <= int(label) <= 99)
with self.assertRaises(ValueError):
cifar = Cifar10(mode='test', backend=1)
class TestCifar100Train(unittest.TestCase):
def test_main(self):
cifar = Cifar100(mode='train')
self.assertTrue(len(cifar) == 50000)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 50000)
data, label = cifar[idx]
data = np.array(data)
self.assertTrue(len(data.shape) == 3)
self.assertTrue(data.shape[2] == 3)
self.assertTrue(data.shape[1] == 32)
self.assertTrue(data.shape[0] == 32)
self.assertTrue(0 <= int(label) <= 99)
class TestCifar100Test(unittest.TestCase):
def test_main(self):
cifar = Cifar100(mode='test')
self.assertTrue(len(cifar) == 10000)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 10000)
data, label = cifar[idx]
data = np.array(data)
self.assertTrue(len(data.shape) == 3)
self.assertTrue(data.shape[2] == 3)
self.assertTrue(data.shape[1] == 32)
self.assertTrue(data.shape[0] == 32)
self.assertTrue(0 <= int(label) <= 99)
# test cv2 backend
cifar = Cifar100(mode='test', backend='cv2')
self.assertTrue(len(cifar) == 10000)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 10000)
data, label = cifar[idx]
self.assertTrue(len(data.shape) == 3)
self.assertTrue(data.shape[2] == 3)
self.assertTrue(data.shape[1] == 32)
self.assertTrue(data.shape[0] == 32)
self.assertTrue(0 <= int(label) <= 99)
with self.assertRaises(ValueError):
cifar = Cifar100(mode='test', backend=1)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
a108c73292745efbb8c63a7021a928c423ba5f28
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/3242.py
|
f20b77deb7902b6a054756552bf203772edf07d0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
#!/usr/bin/python
def solve_n(case, C, F, X):
t = 0.0 # Time stamp
cc = 0.0 # Number of cookies
cookie_rate = 2.0
solved = False
while not solved:
time_to_next_event = C / cookie_rate
## Will I solve in this round
time_to_solve = t + (X - cc) / cookie_rate
if cc + cookie_rate * time_to_next_event >= X:
t += (X - cc)/cookie_rate
break
cc += cookie_rate * time_to_next_event
t += time_to_next_event
buy_cookie = True
## Should I buy a cookie?
# print "Before Buy cookies: %0.7f at t: %0.7f" % (cc, t)
if buy_cookie:
cc -= C
cookie_rate += F
# print "After Buy cookies: %f at t: %0.7f, rate: %0.7f" % (cc, t, cookie_rate)
new_time_to_solve = t + (X - cc) / cookie_rate
# print time_to_solve, new_time_to_solve
if new_time_to_solve > time_to_solve:
t = time_to_solve
break
print "Case #%d: %0.7f" % (case, t)
def solve(ip):
count = int(ip.readline())
# solve_n(-1, 500, 4.0, 2000.0)
for case in range(count):
C, F, X = map(float, ip.readline().split())
solve_n(case + 1, C, F, X)
if __name__ == "__main__":
import sys
solve(open(sys.argv[1], "r"))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
b270e0997d1a0890099ab916b7ac8762bc7f9c20
|
3fa27b3ad1c1ca90f2bcf311d89fe8c2ca241cb4
|
/Stores/migrations/0002_auto_20201004_2348.py
|
9df9d2685983fb78f3531d6d90e5557aaf8fa728
|
[] |
no_license
|
emperorDuke/django-backend-for-ecommerce
|
717e15d7be899abcd5a4b7a7d2203c612f001aeb
|
83c1ca4d016d876a5c8711ac5cdc448d5a4a533d
|
refs/heads/master
| 2023-02-10T08:57:17.852721
| 2021-01-02T15:49:07
| 2021-01-02T15:49:07
| 271,039,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
# Generated by Django 2.2.7 on 2020-10-04 22:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Users', '0001_initial'),
('Stores', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='store',
name='address',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Users.Address'),
),
migrations.AddField(
model_name='store',
name='merchant',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='advert',
name='store',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='Stores.Store', verbose_name='store'),
),
]
|
[
"effiomduke@gmail.com"
] |
effiomduke@gmail.com
|
9b71bfc82bf69157a0a2f7f8ecb174d1811970bd
|
f5d4863b6a62ef19ffc98e4f94f6ade1bc8810d3
|
/Hash Table/274_H-Index.py
|
76ba54016e28d234d66981f9499e284a2c30bef6
|
[] |
no_license
|
xiaomojie/LeetCode
|
138808eb83938f9bd3c2e8a755d908509dff0fd3
|
eedf73b5f167025a97f0905d3718b6eab2ee3e09
|
refs/heads/master
| 2021-06-12T09:26:01.257348
| 2019-10-23T10:41:06
| 2019-10-23T10:41:06
| 76,184,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
"""
Given an array of citations (each citation is a non-negative integer) of a researcher, write a function to compute the researcher's h-index.
According to the definition of h-index on Wikipedia: "A scientist has index h if h of his/her N papers have at least h citations each, and the other N − h papers have no more than h citations each."
Example:
Input: citations = [3,0,6,1,5]
Output: 3
Explanation: [3,0,6,1,5] means the researcher has 5 papers in total and each of them had
received 3, 0, 6, 1, 5 citations respectively.
Since the researcher has 3 papers with at least 3 citations each and the remaining
two with no more than 3 citations each, her h-index is 3.
Note: If there are several possible values for h, the maximum one is taken as the h-index.
"""
class Solution(object):
# 法一
def hIndex1(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
h = 0
citations.sort()
for i in range(len(citations)):
# if len(citations) - i >= citations[i]:
h = max(h, min(citations[i], len(citations) - i))
return h
def hIndex(self, citations):
h = len(citations)
citations.sort()
for i in range(len(citations)):
if citations[i] >= len(citations) - i:
return len(citations) - i
return 0
|
[
"519399762@qq.com"
] |
519399762@qq.com
|
8ae50cd6b83ea1fc1e05b6127155a800c38eb5e1
|
a3f1e80179c23d9202d72b75dd37a49b44785f45
|
/api/client/swagger_client/models/api_model_script.py
|
a86fc78a1cbbdf8bc28909ede0b049bcda2e34cf
|
[
"Apache-2.0"
] |
permissive
|
pvaneck/mlx
|
b1e82fae5ac8aaa1dddac23aaa38c46f6e6cfc27
|
6edaa0bd77787c56b737322a0c875ae30de6cd49
|
refs/heads/main
| 2023-05-14T06:08:38.404133
| 2021-05-04T01:41:11
| 2021-05-04T01:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,189
|
py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1.25-related-assets
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiModelScript(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pipeline_stage': 'str',
'execution_platform': 'str',
'script_code': 'str'
}
attribute_map = {
'pipeline_stage': 'pipeline_stage',
'execution_platform': 'execution_platform',
'script_code': 'script_code'
}
def __init__(self, pipeline_stage=None, execution_platform=None, script_code=None): # noqa: E501
"""ApiModelScript - a model defined in Swagger""" # noqa: E501
self._pipeline_stage = None
self._execution_platform = None
self._script_code = None
self.discriminator = None
self.pipeline_stage = pipeline_stage
self.execution_platform = execution_platform
self.script_code = script_code
@property
def pipeline_stage(self):
"""Gets the pipeline_stage of this ApiModelScript. # noqa: E501
pipeline stage that this code sample applies to, either 'train' or 'serve' # noqa: E501
:return: The pipeline_stage of this ApiModelScript. # noqa: E501
:rtype: str
"""
return self._pipeline_stage
@pipeline_stage.setter
def pipeline_stage(self, pipeline_stage):
"""Sets the pipeline_stage of this ApiModelScript.
pipeline stage that this code sample applies to, either 'train' or 'serve' # noqa: E501
:param pipeline_stage: The pipeline_stage of this ApiModelScript. # noqa: E501
:type: str
"""
if pipeline_stage is None:
raise ValueError("Invalid value for `pipeline_stage`, must not be `None`") # noqa: E501
self._pipeline_stage = pipeline_stage
@property
def execution_platform(self):
"""Gets the execution_platform of this ApiModelScript. # noqa: E501
execution platform that this code sample applies to, i.e. 'kubernetes', 'knative' # noqa: E501
:return: The execution_platform of this ApiModelScript. # noqa: E501
:rtype: str
"""
return self._execution_platform
@execution_platform.setter
def execution_platform(self, execution_platform):
"""Sets the execution_platform of this ApiModelScript.
execution platform that this code sample applies to, i.e. 'kubernetes', 'knative' # noqa: E501
:param execution_platform: The execution_platform of this ApiModelScript. # noqa: E501
:type: str
"""
if execution_platform is None:
raise ValueError("Invalid value for `execution_platform`, must not be `None`") # noqa: E501
self._execution_platform = execution_platform
@property
def script_code(self):
"""Gets the script_code of this ApiModelScript. # noqa: E501
the source code to run the model in a pipeline stage # noqa: E501
:return: The script_code of this ApiModelScript. # noqa: E501
:rtype: str
"""
return self._script_code
@script_code.setter
def script_code(self, script_code):
"""Sets the script_code of this ApiModelScript.
the source code to run the model in a pipeline stage # noqa: E501
:param script_code: The script_code of this ApiModelScript. # noqa: E501
:type: str
"""
if script_code is None:
raise ValueError("Invalid value for `script_code`, must not be `None`") # noqa: E501
self._script_code = script_code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiModelScript, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiModelScript):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"82406273+mlx-bot@users.noreply.github.com"
] |
82406273+mlx-bot@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.