blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8876284a7a38ed4b8daedc0a42e8722bf52cf232
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/AbnormalRecovery-bak/bak/XOGW_RESTART_PGJK_IPO.py
|
4445242cb1451a5c0c23ad77987bf2e8101fe124
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,296
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from Autocase_Result.AbnormalRecovery.ARservice.ARmainservice import *
from utils.env_restart import *
from service.ServiceConfig import *
from xtp.api.xtp_test_case import *
from xtp.api.config import ALL_USER
# 执行用例前,将所有数据清除
clear_data_and_restart_all()
class tradeApi(object):
const = XTPConst()
trade = XTPTradeApi(ALL_USER[0])
class XOGW_RESTART_PGJK_IPO(unittest.TestCase):
def test_XOGW_RESTART_PGJK_IPO(self):
title = '异常恢复:重启报盘-配股缴款、新股申购'
logger.warning(title)
for user in ALL_USER:
# 当前用户登录
tradeApi.trade.Login(user)
wt_reqs = {
'business_type': tradeApi.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_ALLOTMENT'],
'order_client_id': 1,
'market': tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': '080001',
'side': tradeApi.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': tradeApi.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': 0,
'quantity': 1000
}
# 配股缴款下单,深圳配股缴款仅有未成交和已撤两种状态
service_insertorder(tradeApi, wt_reqs, user)
service_insertorder(tradeApi, wt_reqs, user)
service_cancleorder(tradeApi, wt_reqs, user)
# 配股缴款下单,上海配股缴款仅有未成交和全成两种状态,无法撤单
wt_reqs['ticker'] = '700001'
wt_reqs['market'] = tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SH_A']
for client_id in range(1,3):
wt_reqs['order_client_id'] = client_id
service_insertorder(tradeApi, wt_reqs, user)
# 新股申购下单,新股申购仅有未成交状态
wt_reqs['business_type'] = tradeApi.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_IPOS']
wt_reqs['order_client_id'] = 1
wt_reqs['market'] = tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A']
wt_reqs['ticker'] = '002846'
service_insertorder(tradeApi, wt_reqs, user)
wt_reqs['market'] = tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SH_A']
wt_reqs['ticker'] = '732818'
service_insertorder(tradeApi, wt_reqs, user)
# 查询当前用户的资金和持仓
query_capital_stock(tradeApi,order_info ,user,wt_reqs['ticker'])
# 当前用户登出
tradeApi.trade.Logout()
# 重启环境
xogwsh_restart()
xogwsz_restart()
time.sleep(3)
for user in ALL_USER:
# 重启后用户登录,接收OMS推送的订单信息
service_restart(tradeApi,user)
# 查询当前用户的资金和持仓
query_capital_stock(tradeApi,restart_info ,user,wt_reqs['ticker'])
# 重启环境前后,各用户订单信息校验
result = check_result(order_info, restart_info)
self.assertEqual(result['结果'], True)
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
315aad6b034c11627f71ff2689bea84cf59bba2b
|
4a48593a04284ef997f377abee8db61d6332c322
|
/python/pyqt/pyqt5/widget_QTableWidget.py
|
ac6b9f412cde6986dc866f49d1e4a4e191922386
|
[
"MIT"
] |
permissive
|
jeremiedecock/snippets
|
8feaed5a8d873d67932ef798e16cb6d2c47609f0
|
b90a444041c42d176d096fed14852d20d19adaa7
|
refs/heads/master
| 2023-08-31T04:28:09.302968
| 2023-08-21T07:22:38
| 2023-08-21T07:22:38
| 36,926,494
| 26
| 9
|
MIT
| 2023-06-06T02:17:44
| 2015-06-05T10:19:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See https://pythonspot.com/en/pyqt5-table/
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QVBoxLayout
from PyQt5.QtCore import pyqtSlot
class MyTableWidget(QWidget):
def __init__(self):
super().__init__()
# Create a table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(4)
self.tableWidget.setColumnCount(2)
# Add data
self.tableWidget.setItem(0, 0, QTableWidgetItem("Cell (1,1)"))
self.tableWidget.setItem(0, 1, QTableWidgetItem("Cell (1,2)"))
self.tableWidget.setItem(1, 0, QTableWidgetItem("Cell (2,1)"))
self.tableWidget.setItem(1, 1, QTableWidgetItem("Cell (2,2)"))
self.tableWidget.setItem(2, 0, QTableWidgetItem("Cell (3,1)"))
self.tableWidget.setItem(2, 1, QTableWidgetItem("Cell (3,2)"))
self.tableWidget.setItem(3, 0, QTableWidgetItem("Cell (4,1)"))
self.tableWidget.setItem(3, 1, QTableWidgetItem("Cell (4,2)"))
# Table selection callback
self.tableWidget.doubleClicked.connect(self.on_click)
# Set the layout
layout = QVBoxLayout()
layout.addWidget(self.tableWidget)
self.setLayout(layout)
@pyqtSlot()
def on_click(self):
for currentQTableWidgetItem in self.tableWidget.selectedItems():
print(currentQTableWidgetItem.row(), currentQTableWidgetItem.column(), currentQTableWidgetItem.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyTableWidget()
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
[
"jd.jdhp@gmail.com"
] |
jd.jdhp@gmail.com
|
9e90bb8d779df640147384a893d77b07f2666499
|
e44d00ffcea03f8656c40b3d4d993d51a38af3b0
|
/leetcode/June/J30_WordSearch.py
|
b5d00f420d9170ce3aba83d26afc9c78c2c1d6ed
|
[] |
no_license
|
Ayushmanglani/competitive_coding
|
d6beec4f2b24aef34ea44c3a4a72074985b4a766
|
12325b09ae2bc6b169578b6a0a091069e14c9227
|
refs/heads/master
| 2023-06-12T04:43:41.130774
| 2021-07-03T13:01:37
| 2021-07-03T13:01:37
| 262,079,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
WORD_KEY = '$'
trie = {}
for word in words:
node = trie
for letter in word:
# retrieve the next node; If not found, create a empty node.
node = node.setdefault(letter, {})
# mark the existence of a word in trie node
node[WORD_KEY] = word
rowNum = len(board)
colNum = len(board[0])
matchedWords = []
def backtracking(row, col, parent):
letter = board[row][col]
currNode = parent[letter]
# check if we find a match of word
word_match = currNode.pop(WORD_KEY, False)
if word_match:
# also we removed the matched word to avoid duplicates,
# as well as avoiding using set() for results.
matchedWords.append(word_match)
# Before the EXPLORATION, mark the cell as visited
board[row][col] = '#'
# Explore the neighbors in 4 directions, i.e. up, right, down, left
for (rowOffset, colOffset) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newRow, newCol = row + rowOffset, col + colOffset
if newRow < 0 or newRow >= rowNum or newCol < 0 or newCol >= colNum:
continue
if not board[newRow][newCol] in currNode:
continue
backtracking(newRow, newCol, currNode)
# End of EXPLORATION, we restore the cell
board[row][col] = letter
# Optimization: incrementally remove the matched leaf node in Trie.
if not currNode:
parent.pop(letter)
for row in range(rowNum):
for col in range(colNum):
# starting from each of the cells
if board[row][col] in trie:
backtracking(row, col, trie)
return matchedWords
|
[
"ayush.manglani@gmail.com"
] |
ayush.manglani@gmail.com
|
bcf362efa27bf9b944a7809c71d7d948778c7f5b
|
6351221d588668804e2df01936732eede4d96ed0
|
/leetcode-cn/Python/75.颜色分类.py
|
4313aa9c973f61f6ebfa98ea1f9aed7a874433b6
|
[] |
no_license
|
LogicJake/code-for-interview
|
8e4ec9e24ec661a443ad42aa2496d78a1fbc8a3f
|
5990b09866696c2f3e845047c755fa72553dd421
|
refs/heads/master
| 2021-09-20T20:19:17.118333
| 2021-09-14T13:46:30
| 2021-09-14T13:46:30
| 102,202,212
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
#
# @lc app=leetcode.cn id=75 lang=python3
#
# [75] 颜色分类
#
# @lc code=start
from typing import List
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
p0 = 0
p2 = len(nums) - 1
i = 0
while i <= p2:
while i <= p2 and nums[i] == 2:
nums[i], nums[p2] = nums[p2], nums[i]
p2 -= 1
if nums[i] == 0:
nums[p0], nums[i] = nums[i], nums[p0]
p0 += 1
i += 1
# @lc code=end
|
[
"835410808@qq.com"
] |
835410808@qq.com
|
39cf2d0c2245eb8d9b2517f31f7b202604cb3c5d
|
7f6ad639d41ad522ae73cb87ee61da48d83dcd27
|
/hamnadmin/hamnadmin/mailqueue/management/commands/send_queued_mail.py
|
301c769bebde70fefdcdf2b48a8d818b20aa583a
|
[] |
no_license
|
mhagander/hamn
|
0aedaea24c32903480b580273ce272e26cc25d5b
|
c7271662c7726749d11e47f3064bec80b0e95c4a
|
refs/heads/master
| 2023-08-31T05:05:07.160357
| 2023-08-24T09:02:52
| 2023-08-24T09:02:52
| 729,253
| 2
| 2
| null | 2017-06-08T07:32:48
| 2010-06-19T13:48:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
# Script to send off all queued email.
#
# This script is intended to be run frequently from cron. We queue things
# up in the db so that they get automatically rolled back as necessary,
# but once we reach this point we're just going to send all of them one
# by one.
#
from django.core.management.base import BaseCommand, CommandError
from django.db import connection
import smtplib
from hamnadmin.mailqueue.models import QueuedMail
class Command(BaseCommand):
help = 'Send queued mail'
def handle(self, *args, **options):
# Grab advisory lock, if available. Lock id is just a random number
# since we only need to interlock against ourselves. The lock is
# automatically released when we're done.
curs = connection.cursor()
curs.execute("SELECT pg_try_advisory_lock(72181378)")
if not curs.fetchall()[0][0]:
raise CommandError("Failed to get advisory lock, existing send_queued_mail process stuck?")
for m in QueuedMail.objects.all():
# Yes, we do a new connection for each run. Just because we can.
# If it fails we'll throw an exception and just come back on the
# next cron job. And local delivery should never fail...
smtp = smtplib.SMTP("localhost")
smtp.sendmail(m.sender, m.receiver, m.fullmsg.encode('utf-8'))
smtp.close()
m.delete()
|
[
"magnus@hagander.net"
] |
magnus@hagander.net
|
c476dffe1424e9b39be4b00edfc9aad451a77a0f
|
a7c4478e6fdec7cf1a5f22b9eba5e11afc537503
|
/app/main/errors.py
|
129fff301ce85ae5f1f6f5d546092fe5a73a525a
|
[] |
no_license
|
deveshaggrawal19/waasle
|
457fe686a18ce9d5162abc9b3fd5041d7938ee23
|
69e00a29175d0771d8ff920397dc08d37d3cc3dc
|
refs/heads/master
| 2021-04-26T22:19:32.317110
| 2016-11-17T18:51:17
| 2016-11-17T18:51:17
| 71,818,197
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
from flask import render_template
from ..main import main
@main.app_errorhandler(404) # Will route all the errors
def page_not_found(e):
return render_template('404.html'), 404
|
[
"deveshaggrawal19@gmail.com"
] |
deveshaggrawal19@gmail.com
|
ea193671f595da467bd64814a0cf7ba0d1e8566d
|
74515f9e059aa8a73e63d735abbac69d99713c69
|
/src/tournai/urban/dataimport/interfaces.py
|
c03599111093db2e481981427c2974da1bb70e5b
|
[] |
no_license
|
IMIO/tournai.urban.dataimport_22
|
b7285eaf15aec02dfa778881d4c53b02cfcc1466
|
c1e9db3edeab1da154fdff2d078d88802ea7bb24
|
refs/heads/master
| 2020-12-30T16:02:30.853395
| 2018-03-23T14:14:23
| 2018-03-23T14:14:23
| 90,954,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
# -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from plone.theme.interfaces import IDefaultPloneLayer
from imio.urban.dataimport.interfaces import IUrbanDataImporter
class ITournaiUrbanDataimportLayer(IDefaultPloneLayer):
""" Marker interface that defines a Zope 3 browser layer."""
class ITournaiDataImporter(IUrbanDataImporter):
""" Marker interface for ITournai agorawin importer """
|
[
"julien.jaumotte@imio.be"
] |
julien.jaumotte@imio.be
|
d98242cf54552fe3ac8c77d0b97a6bdf536e0756
|
f93998e1c5c5c50bf20aed8d5b3517b12c333fdb
|
/wellsfargo/migrations/0003_auto_20160524_1127.py
|
2d1bd6aa4fbbf69c580061fe78dd97723e4b7eab
|
[
"ISC"
] |
permissive
|
pombredanne/django-oscar-wfrs
|
637130651ab0d15289c4b3b3b86a42ada306fe96
|
991b79d2bd8a22512861bb3117c2bb5444c467b2
|
refs/heads/master
| 2021-01-17T23:41:10.424343
| 2016-05-28T01:20:30
| 2016-05-28T01:20:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-24 11:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wellsfargo', '0002_auto_20160523_2127'),
]
operations = [
migrations.AddField(
model_name='cacreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='cajointcreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='uscreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='usjointcreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
]
|
[
"crgwbr@gmail.com"
] |
crgwbr@gmail.com
|
6d5fc62c7c29032963e791bf480302878fd25bf3
|
e70a5960b60bf6c11df4248625d0188ededdd4c7
|
/Function/GUI/GUI_main/note_string.py
|
f13b73e15b9279706e1d53de8d8a68e661cfbc22
|
[] |
no_license
|
wx2000qq/MoDeng
|
70be2802b6191855667cce5fe3cd89afda5fb9a9
|
9144bb79c237c0361b40f314b2c3123d58ac71cc
|
refs/heads/master
| 2020-11-24T11:52:08.829630
| 2019-12-15T04:54:25
| 2019-12-15T04:54:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,525
|
py
|
# encoding=utf-8
"""
将提示字符串规整在一起,防止在代码中占用太多地方
统一用note开头
"""
# 初始化图片提示
note_init_pic = \
"""
----------------------------------------------------------------------------------
小提示:
当前正在生成“三大指数”、“持仓stk”和“关注stk”的小时、日、周和月的MACD图!
以及日级别的KD、MOM、RSI、SAR等指标的图。
小时级别的MACD图半小时更新一次,其余图片启动软件时生成最新的,软件开启后暂不更新!
----------------------------------------------------------------------------------
""" + '\n'
# 软件初启动时的日线判断提示
note_day_analysis = \
"""
----------------------------------------------------------------------------------
小提示:
以下是日线的MACD拐点判断和SAR指标的判断。
红色:未来数日看多
绿色:未来数日看空
需要观察走势图做进一步的主观判断!
----------------------------------------------------------------------------------
""" + '\n'
note_middle_rank = \
"""
----------------------------------------------------------------------------------
小提示:
所谓“中期水平检测”是对自己的“持仓stk”和“关注stk”的当前价格在两个月内的水平
进行统计排名,由低到高排序,越在前面的,表示当前价格越是处于低位!
level这一列表示处于低位的实际情况,是一个0~100的数,比如12.2表示当前价格只超过了两
个月来12.2%的时间!
----------------------------------------------------------------------------------
""" + '\n'
note_macd_inflection_point = \
"""
----------------------------------------------------------------------------------
小提示:
所谓“拐点检测”是对自己的“持仓stk”和“关注stk”以及“三大指数”的小时级别和半
小时级别的MACD柱子进行分析,找出“开始上涨”和“开始下跌”的情况,在控制台向用户提
示,用户收到提示后可以查看其相应的MACD图,以便对价格走势做进一步的判断!
----------------------------------------------------------------------------------
""" + '\n'
note_sar_inflection_point = \
"""
----------------------------------------------------------------------------------
小提示:
当前正在对半小时指标进行拐点检测...
所谓“拐点检测”是对自己的“持仓stk”和“关注stk”以及“三大指数”的半
小时级别的SAR等指标进行分析,找出“开始上涨”和“开始下跌”的情况,在控制台向用户提
示,用户收到提示后可以查看其相应的指标图,以便对价格走势做进一步的判断!
----------------------------------------------------------------------------------
""" + '\n'
note_dengshen_welcome = \
"""
==================================================
小主您好,我是灯神,很乐意为您效劳!
我的技能:
@ 配置
---------
您可以通过输入
增加持仓 美的集团
删除持仓 美的集团
增加关注 格力电器
删除关注 格力电器
查看关注
查看持仓
来增删、查看 “持仓” 和 “关注”的股票
@ 预测明日大盘
-----------------
输入“预测明日大盘”可以预测明日上证、深证和创业板三大板指的 最高点 最低点 和 收盘价,
可对明日走势略窥一二。
@ 清理
----------
输入“清理”进行清屏
@ 帮助
--------
输入“帮助”打印命令帮助
==================================================
小主请吩咐:
"""
total_cmd = \
"""
所有命令(以美的集团为例)
==================================================
增加持仓 美的集团
删除持仓 美的集团
增加关注 美的集团
删除关注 美的集团
查看关注
查看持仓
查看b记录 美的集团
美的集团 买入 300 54.5 (以一股54.5块钱买入300股美的集团的股票)
美的集团 卖出 500 16.4
清理
帮助
预测大盘指数
==================================================
"""
|
[
"1210055099@qq.com"
] |
1210055099@qq.com
|
2644c8ca38324e9a27a0a32fe48c7fa1e3a4b2ca
|
9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af
|
/services/web__rionegro_com_ar.py
|
cd01900105e6f20fc372cc37095e8cfcbc691854
|
[] |
no_license
|
rudolphos/NewsGrabber
|
f9bddc9a9b3a9e02f716133fd746f48cee635b36
|
86354fb769b2710ac7cdd5bd8795e43158b70ad2
|
refs/heads/master
| 2021-01-12T12:07:55.335079
| 2016-10-09T22:39:17
| 2016-10-09T22:39:17
| 72,316,773
| 0
| 0
| null | 2016-10-30T00:35:08
| 2016-10-30T00:35:08
| null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
refresh = 4
version = 20160403.01
urls = ['http://www.rionegro.com.ar/',
'http://www.rionegro.com.ar/diario/ultimas-noticias.aspx']
regex = [r'^https?:\/\/[^\/]*rionegro\.com\.ar']
videoregex = []
liveregex = []
|
[
"Arkiver@hotmail.com"
] |
Arkiver@hotmail.com
|
28bafc6808151dfca0608b676e7311af110fe7cd
|
926b3c52070f6e309567c8598248fd5c57095be9
|
/src/mmdeploy/mmdeploy/backend/ncnn/quant.py
|
7bddda80b7addab282d7ccd92746cae829ba53ec
|
[
"Apache-2.0"
] |
permissive
|
fengbingchun/PyTorch_Test
|
410f7cd2303707b0141d433fb9d144a961e1f4c8
|
df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348
|
refs/heads/master
| 2023-05-23T16:42:29.711338
| 2023-03-25T11:31:43
| 2023-03-25T11:31:43
| 167,339,907
| 15
| 4
| null | 2023-03-25T11:31:45
| 2019-01-24T09:24:59
|
C++
|
UTF-8
|
Python
| false
| false
| 2,119
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from subprocess import call
from typing import List
import mmcv
from .init_plugins import get_ncnn2int8_path
def get_quant_model_file(onnx_path: str, work_dir: str) -> List[str]:
"""Returns the path to quant onnx and table with export result.
Args:
onnx_path (str): The path to the fp32 onnx model.
work_dir (str): The path to the directory for saving the results.
Returns:
List[str]: The path to the files where the export result will be
located.
"""
mmcv.mkdir_or_exist(osp.abspath(work_dir))
base_name = osp.splitext(osp.split(onnx_path)[1])[0]
quant_onnx = osp.join(work_dir, base_name + '_quant.onnx')
quant_table = osp.join(work_dir, base_name + '.table')
quant_param = osp.join(work_dir, base_name + '_int8.param')
quant_bin = osp.join(work_dir, base_name + '_int8.bin')
return [quant_onnx, quant_table, quant_param, quant_bin]
def ncnn2int8(param: str, bin: str, table: str, int8_param: str,
int8_bin: str):
"""Convert ncnn float model to quantized model.
The inputs of ncnn include float model and weight file. We need to use
a executable program to convert the float model to int8 model with
calibration table.
Example:
>>> from mmdeploy.backend.ncnn.quant import ncnn2int8
>>> param = 'work_dir/end2end.param'
>>> bin = 'work_dir/end2end.bin'
>>> table = 'work_dir/end2end.table'
>>> int8_param = 'work_dir/end2end_int8.param'
>>> int8_bin = 'work_dir/end2end_int8.bin'
>>> ncnn2int8(param, bin, table, int8_param, int8_bin)
Args:
param (str): The path of ncnn float model graph.
bin (str): The path of ncnn float weight model weight.
table (str): The path of ncnn calibration table.
int8_param (str): The path of ncnn low bit model graph.
int8_bin (str): The path of ncnn low bit weight model weight.
"""
ncnn2int8 = get_ncnn2int8_path()
call([ncnn2int8, param, bin, int8_param, int8_bin, table])
|
[
"fengbingchun@163.com"
] |
fengbingchun@163.com
|
612a9cfe6c7e2307b32cf0a91d982b8221012697
|
5a648d5c62e640a8df8d18549eaf6e84a36dbd28
|
/findk.py
|
eb35c580c67910fc18867b2273ac68599fcf99ef
|
[
"MIT"
] |
permissive
|
quake0day/oj
|
f5f8576f765a76f0f3a8b2c559db06279e93ef25
|
c09333d1738f8735de0d5d825db6f4b707585670
|
refs/heads/master
| 2021-01-21T04:27:34.035319
| 2016-03-30T02:19:15
| 2016-03-30T02:19:15
| 30,592,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
a = "leetcode"
b = "codyabs"
k = 3
def findK(a,b, k):
m = len(a)
n = len(b)
h = {}
for i in xrange(m-k):
h[a[i:i+k]] = True
#dp = [[False] * n for _ in range(m)]
#dp[0][0] = True
for j in xrange(n-k):
if b[j:j+k] in h:
return True
def findKDP(a,b,k):
m = len(a)
n = len(b)
dp = [[0] * (n+1) for _ in range(m+1)]
for i in xrange(1, m):
for j in xrange(1, n):
if a[i-1] == b[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
if dp[i][j] >= k:
return True
#print dp
return False
print findK(a,b,k)
print findKDP(a,b,k)
|
[
"quake0day@gmail.com"
] |
quake0day@gmail.com
|
fe0cc4da59a94d0df78036600816503cfbc23403
|
b2472967910be9c12576f0f97d33bca0576a8667
|
/atcoder-old/2014/0510_abc008/a.py
|
4994aed624cbe5ab8c5fa344b8f2893e6fc4367a
|
[] |
no_license
|
ykmc/contest
|
85c3d1231e553d37d1235e1b0fd2c6c23f06c1e4
|
69a73da70f7f987eb3e85da503ea6da0744544bd
|
refs/heads/master
| 2020-09-01T22:56:10.444803
| 2020-07-14T11:36:43
| 2020-07-14T11:36:43
| 217,307,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Python3 (3.4.3)
import sys
input = sys.stdin.readline
# -------------------------------------------------------------
# function
# -------------------------------------------------------------
# -------------------------------------------------------------
# main
# -------------------------------------------------------------
S,T = map(int,input().split())
print(T-S+1)
|
[
"34961813+ykmc@users.noreply.github.com"
] |
34961813+ykmc@users.noreply.github.com
|
e72b191ffe48b81cecb98695841bbeb849806378
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/pynput/pynput/_util.pyi
|
4202bcb5464e285fb3440fe793aaaedd606e541d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 2,715
|
pyi
|
import sys
import threading
from _typeshed import Self
from collections.abc import Callable
from queue import Queue
from types import ModuleType, TracebackType
from typing import Any, ClassVar, Generic, TypeVar
from typing_extensions import ParamSpec, TypedDict
_T = TypeVar("_T")
_AbstractListener_T = TypeVar("_AbstractListener_T", bound=AbstractListener)
_P = ParamSpec("_P")
class _RESOLUTIONS(TypedDict):
darwin: str
uinput: str
xorg: str
RESOLUTIONS: _RESOLUTIONS
def backend(package: str) -> ModuleType: ...
def prefix(base: type | tuple[type | tuple[Any, ...], ...], cls: type) -> str | None: ...
class AbstractListener(threading.Thread):
class StopException(Exception): ...
_HANDLED_EXCEPTIONS: ClassVar[tuple[type | tuple[Any, ...], ...]] # undocumented
_suppress: bool # undocumented
_running: bool # undocumented
_thread: threading.Thread # undocumented
_condition: threading.Condition # undocumented
_ready: bool # undocumented
_queue: Queue[sys._OptExcInfo | None] # undocumented
daemon: bool
def __init__(self, suppress: bool = ..., **kwargs: Callable[..., bool | None] | None) -> None: ...
@property
def suppress(self) -> bool: ...
@property
def running(self) -> bool: ...
def stop(self) -> None: ...
def __enter__(self: Self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def wait(self) -> None: ...
def run(self) -> None: ...
@classmethod
def _emitter(cls, f: Callable[_P, _T]) -> Callable[_P, _T]: ... # undocumented
def _mark_ready(self) -> None: ... # undocumented
def _run(self) -> None: ... # undocumented
def _stop_platform(self) -> None: ... # undocumented
def join(self, *args: Any) -> None: ...
class Events(Generic[_T, _AbstractListener_T]):
_Listener: type[_AbstractListener_T] | None # undocumented
class Event:
def __eq__(self, other: object) -> bool: ...
_event_queue: Queue[_T] # undocumented
_sentinel: object # undocumented
_listener: _AbstractListener_T # undocumented
start: Callable[[], None]
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def __enter__(self: Self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def __iter__(self: Self) -> Self: ...
def __next__(self) -> _T: ...
def get(self, timeout: float | None = ...) -> _T | None: ...
def _event_mapper(self, event: Callable[_P, object]) -> Callable[_P, None]: ...
class NotifierMixin: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
251ac003ca5d4741bf87599906a0b4ccc9411585
|
a4f5d92264f6ff32021945fd70041dc90840af49
|
/docstrings/tt_postscript.py
|
55f971456aebc0ad3762348da706a64aa6edf0f4
|
[
"BSD-2-Clause-Views"
] |
permissive
|
matplotlib/freetypy
|
95da1c583f05726de8bd4a18ec5008cd0539909d
|
601be6e816511a304302d6aafdbc24031c4df5df
|
refs/heads/master
| 2023-08-20T05:33:00.601874
| 2017-10-23T18:35:10
| 2017-10-23T18:35:10
| 11,617,229
| 5
| 7
| null | 2017-10-23T18:35:11
| 2013-07-23T19:32:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,488
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
TT_Postscript__init__ = """
TrueType PostScript table.
"""
TT_Postscript_format_type = """
Format of this table.
"""
TT_Postscript_italic_angle = """
Italic angle in degrees.
"""
TT_Postscript_underline_position = """
Underline position.
"""
TT_Postscript_underline_thickness = """
Underline thickness.
"""
TT_Postscript_is_fixed_pitch = """
If `True`, the font is monospaced.
"""
TT_Postscript_min_mem_type42 = """
Minimum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_max_mem_type42 = """
Maximum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_min_mem_type1 = """
Minimum memory usage when the font is downloaded as a Type 1 font.
"""
TT_Postscript_max_mem_type1 = """
Maximum memory usage when the font is downloaded as a Type 1 font.
"""
|
[
"mdboom@gmail.com"
] |
mdboom@gmail.com
|
db77fde274d2b3dadccad3cddd3774d816d1ebe2
|
f571590e3c1787d183e00b81c408362e65671f76
|
/Exercisebolean.py
|
d76a0e8a16cc68826d3b91a1c9b8e7aaaa67d698
|
[] |
no_license
|
neymarthan/project1
|
0b3d108dd8eb4b6fa5093525d469d978faf88b88
|
5e07f9dff181bb310f3ce2c7818a8c6787d4b116
|
refs/heads/master
| 2022-12-26T08:44:53.464398
| 2020-10-06T09:14:29
| 2020-10-06T09:14:29
| 279,528,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
string=input('Enter the string: ')
print('This is what I found about that string:')
if string.isalnum():
print('The string is alphanumeric')
if string.alphanumeric():
print('The string contains only alphabetic characters')
if string.alphanumeric():
print('The letters in the string are all lowercase')
|
[
"INE-02@Admins-iMac-5.local"
] |
INE-02@Admins-iMac-5.local
|
02e6babb21c73f39d790ed41dff2417ab0a89fd8
|
056ff03373c07ec60f715333f8af17ea6ad3c615
|
/labs/test_core.py
|
1bdffa5a16592f3036993621d1b6ab5f58b4bdd1
|
[
"MIT"
] |
permissive
|
MITLLRacecar/racecar-daniel-chuang
|
f7da7f0c6ea7b86c5dff007996d6eb6d7a9de26c
|
5d22aac5cbbd77d9380f3e4afaf3e0009a1791de
|
refs/heads/master
| 2023-06-18T11:10:28.771574
| 2021-07-23T01:30:10
| 2021-07-23T01:30:10
| 383,568,872
| 0
| 0
|
MIT
| 2021-07-23T01:30:10
| 2021-07-06T18:47:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,335
|
py
|
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
A simple program which can be used to manually test racecar_core functionality.
"""
########################################################################################
# Imports
########################################################################################
import sys
sys.path.insert(1, "../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
max_speed = 0
update_slow_time = 0
show_triggers = False
show_joysticks = False
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
global max_speed
global update_slow_time
global show_triggers
global show_joysticks
print("Start function called")
max_speed = 0.25
update_slow_time = 0.5
show_triggers = False
show_joysticks = False
rc.set_update_slow_time(update_slow_time)
rc.drive.set_max_speed(max_speed)
rc.drive.stop()
# Print start message
print(
">> Test Core: A testing program for the racecar_core library.\n"
"\n"
"Controls:\n"
" Right trigger = accelerate forward\n"
" Left trigger = accelerate backward\n"
" Left joystick = turn front wheels\n"
" Left bumper = decrease max speed\n"
" Right bumper = increase max speed\n"
" Left joystick click = print trigger values\n"
" Right joystick click = print joystick values\n"
" A button = Display color image\n"
" B button = Display depth image\n"
" X button = Display lidar data\n"
" Y button = Display IMU data\n"
)
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
global max_speed
global update_slow_time
global show_triggers
global show_joysticks
# Check if each button was_pressed or was_released
for button in rc.controller.Button:
if rc.controller.was_pressed(button):
print(f"Button [{button.name}] was pressed")
if rc.controller.was_released(button):
print(f"Button [{button.name}] was released")
# Click left and right joystick to toggle showing trigger and joystick values
left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT)
right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT)
left_joystick = rc.controller.get_joystick(rc.controller.Joystick.LEFT)
right_joystick = rc.controller.get_joystick(rc.controller.Joystick.RIGHT)
if rc.controller.was_pressed(rc.controller.Button.LJOY):
show_triggers = not show_triggers
if rc.controller.was_pressed(rc.controller.Button.RJOY):
show_joysticks = not show_joysticks
if show_triggers:
print(f"Left trigger: [{left_trigger}]; Right trigger: [{right_trigger}]")
if show_joysticks:
print(f"Left joystick: [{left_joystick}]; Right joystick: [{right_joystick}]")
# Use triggers and left joystick to control car (like default drive)
rc.drive.set_speed_angle(right_trigger - left_trigger, left_joystick[0])
# Change max speed and update_slow time when the bumper is pressed
if rc.controller.was_pressed(rc.controller.Button.LB):
max_speed = max(1 / 16, max_speed / 2)
rc.drive.set_max_speed(max_speed)
update_slow_time *= 2
rc.set_update_slow_time(update_slow_time)
print(f"max_speed set to [{max_speed}]")
print(f"update_slow_time set to [{update_slow_time}] seconds")
if rc.controller.was_pressed(rc.controller.Button.RB):
max_speed = min(1, max_speed * 2)
rc.drive.set_max_speed(max_speed)
update_slow_time /= 2
rc.set_update_slow_time(update_slow_time)
print(f"max_speed set to [{max_speed}]")
print(f"update_slow_time set to [{update_slow_time}] seconds")
# Capture and display color images when the A button is down
if rc.controller.is_down(rc.controller.Button.A):
rc.display.show_color_image(rc.camera.get_color_image())
# Capture and display depth images when the B button is down
elif rc.controller.is_down(rc.controller.Button.B):
depth_image = rc.camera.get_depth_image()
rc.display.show_depth_image(depth_image)
depth_center_distance = rc_utils.get_depth_image_center_distance(depth_image)
print(f"Depth center distance: [{depth_center_distance:.2f}] cm")
# Capture and display Lidar data when the X button is down
elif rc.controller.is_down(rc.controller.Button.X):
lidar = rc.lidar.get_samples()
rc.display.show_lidar(lidar)
lidar_forward_distance = rc_utils.get_lidar_average_distance(lidar, 0)
print(f"LIDAR forward distance: [{lidar_forward_distance:.2f}] cm")
# Show IMU data when the Y button is pressed
if rc.controller.is_down(rc.controller.Button.Y):
a = rc.physics.get_linear_acceleration()
w = rc.physics.get_angular_velocity()
print(
f"Linear acceleration: ({a[0]:5.2f},{a[1]:5.2f},{a[2]:5.2f}); "
+ f"Angular velocity: ({w[0]:5.2f},{w[1]:5.2f},{w[2]:5.2f})"
)
def update_slow():
"""
After start() is run, this function is run at a constant rate that is slower
than update(). By default, update_slow() is run once per second
"""
# Check if each button is_down
for button in rc.controller.Button:
if rc.controller.is_down(button):
print(f"Button [{button.name}] is down")
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, update_slow)
rc.go()
|
[
"66690702+github-classroom[bot]@users.noreply.github.com"
] |
66690702+github-classroom[bot]@users.noreply.github.com
|
fa47f633b556f75ecc66e442fe8d82e3c675b25d
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/model/reverse_protection_group_request_body.py
|
8eae3df950173a01db830f9ccf7363fab8f36972
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,564
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ReverseProtectionGroupRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'reverse_server_group': 'ReverseProtectionGroupRequestParams'
}
attribute_map = {
'reverse_server_group': 'reverse-server-group'
}
def __init__(self, reverse_server_group=None):
"""ReverseProtectionGroupRequestBody
The model defined in huaweicloud sdk
:param reverse_server_group:
:type reverse_server_group: :class:`huaweicloudsdksdrs.v1.ReverseProtectionGroupRequestParams`
"""
self._reverse_server_group = None
self.discriminator = None
self.reverse_server_group = reverse_server_group
@property
def reverse_server_group(self):
"""Gets the reverse_server_group of this ReverseProtectionGroupRequestBody.
:return: The reverse_server_group of this ReverseProtectionGroupRequestBody.
:rtype: :class:`huaweicloudsdksdrs.v1.ReverseProtectionGroupRequestParams`
"""
return self._reverse_server_group
@reverse_server_group.setter
def reverse_server_group(self, reverse_server_group):
"""Sets the reverse_server_group of this ReverseProtectionGroupRequestBody.
:param reverse_server_group: The reverse_server_group of this ReverseProtectionGroupRequestBody.
:type reverse_server_group: :class:`huaweicloudsdksdrs.v1.ReverseProtectionGroupRequestParams`
"""
self._reverse_server_group = reverse_server_group
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReverseProtectionGroupRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
d12b7c803c0ee26c17895955b0173ae850f57ec0
|
96933e173dcebcd611188b6fba982ca9cf975e1c
|
/qa/migrations/0007_remove_question_tags.py
|
fc3e9673ea51d72cfd2f1b2f275a7f9e3423e08b
|
[] |
no_license
|
hift/django-qa
|
7d640181312c672936a6f0b7fa2f8041350a0e4f
|
57bc418e0b9f611872b50968862dd469353cb050
|
refs/heads/master
| 2021-01-10T10:41:16.437404
| 2015-11-22T07:54:48
| 2015-11-22T07:54:48
| 45,435,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('qa', '0006_question_tags'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='tags',
),
]
|
[
"="
] |
=
|
f4d577e2f19b9ff16f9ff16c9b44e3f73ed349cd
|
0facb323be8a76bb4c168641309972fa77cbecf2
|
/Configurations/HWWSemiLepHighMass/nanoAODv4v5/2016/Mix/nuisances.py
|
9e45ef184499d676695781cd4842074beab43449
|
[] |
no_license
|
bhoh/SNuAnalytics
|
ef0a1ba9fa0d682834672a831739dfcfa1e7486b
|
34d1fc062e212da152faa83be50561600819df0e
|
refs/heads/master
| 2023-07-06T03:23:45.343449
| 2023-06-26T12:18:28
| 2023-06-26T12:18:28
| 242,880,298
| 0
| 1
| null | 2020-02-25T01:17:50
| 2020-02-25T01:17:49
| null |
UTF-8
|
Python
| false
| false
| 6,767
|
py
|
import os
SITE=os.uname()[1]
xrootdPath=''
if 'iihe' in SITE :
xrootdPath = 'dcap://maite.iihe.ac.be/'
treeBaseDir = '/pnfs/iihe/cms/store/user/xjanssen/HWW2015/'
elif 'cern' in SITE :
treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/'
elif 'sdfarm' in SITE:
xrootdPath = 'root://cms-xrdr.private.lo:2094'
treeBaseDir = "/xrootd/store/user/jhchoi/Latino/HWWNano/"
eleWP='mva_90p_Iso2016'
muWP='cut_Tight80x'
mc = [skey for skey in samples if skey != 'DATA']
nuisances['lumi_Uncorrelated'] = {
'name': 'lumi_13TeV_2016',
'type': 'lnN',
'samples': dict((skey, '1.022') for skey in mc )
}
nuisances['lumi_XYFact'] = {
'name': 'lumi_13TeV_XYFact',
'type': 'lnN',
'samples': dict((skey, '1.009') for skey in mc)
}
nuisances['lumi_BBDefl'] = {
'name': 'lumi_13TeV_BBDefl',
'type': 'lnN',
'samples': dict((skey, '1.004') for skey in mc )
}
nuisances['lumi_DynBeta'] = {
'name': 'lumi_13TeV_DynBeta',
'type': 'lnN',
'samples': dict((skey, '1.005') for skey in mc )
}
nuisances['lumi_Ghosts'] = {
'name': 'lumi_13TeV_Ghosts',
'type': 'lnN',
'samples': dict((skey, '1.004') for skey in mc )
}
for shift in ['jes', 'lf', 'hf', 'hfstats1', 'hfstats2', 'lfstats1', 'lfstats2', 'cferr1', 'cferr2']:
btag_syst = ['(btagSF%sup)/(btagSF)' % shift, '(btagSF%sdown)/(btagSF)' % shift]
name = 'CMS_btag_%s' % shift
if 'stats' in shift:
name += '_2016'
nuisances['btag_shape_%s' % shift] = {
'name': name,
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, btag_syst) for skey in mc),
}
trig_syst = ['TriggerEffWeight_1l_u/TriggerEffWeight_1l','TriggerEffWeight_1l_d/TriggerEffWeight_1l']
nuisances['trigg'] = {
'name': 'CMS_eff_hwwtrigger_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, trig_syst) for skey in mc),
}
prefire_syst = ['PrefireWeight_Up/PrefireWeight', 'PrefireWeight_Down/PrefireWeight']
nuisances['prefire'] = {
'name': 'CMS_eff_prefiring_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, prefire_syst) for skey in mc),
}
eff_e_syst = ['Lepton_tightElectron_'+eleWP+'_TotSF_Up'+'[0]/Lepton_tightElectron_'+eleWP+'_TotSF'+'[0]','Lepton_tightElectron_'+eleWP+'_TotSF_Down'+'[0]/Lepton_tightElectron_'+eleWP+'_TotSF'+'[0]']
nuisances['eff_e'] = {
'name': 'CMS_eff_e_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, eff_e_syst) for skey in mc),
}
#MCl1loose2016v5__MCCorr2016v5__METup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSel
nuisances['electronpt'] = {
'name': 'CMS_scale_e_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__ElepTup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__ElepTdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
eff_m_syst = ['Lepton_tightMuon_'+muWP+'_TotSF_Up'+'[0]/Lepton_tightMuon_'+muWP+'_TotSF'+'[0]','Lepton_tightMuon_'+muWP+'_TotSF_Down'+'[0]/Lepton_tightMuon_'+muWP+'_TotSF'+'[0]']
nuisances['eff_m'] = {
'name': 'CMS_eff_m_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, eff_m_syst) for skey in mc),
}
nuisances['muonpt'] = {
'name': 'CMS_scale_m_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__MupTup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__MupTdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['jes'] = {
'name': 'CMS_scale_j_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__JESup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__JESdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['fatjes'] = {
'name': 'CMS_scale_fatj_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMass_up__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMass_do__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['fatjer'] = {
'name': 'CMS_scale_fatjres_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMassRes_up__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMassRes_do__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['met'] = {
'name': 'CMS_scale_met_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__METup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__METdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
pu_syst=['puWeightUp/puWeight','puWeightDown/puWeight']
nuisances['PU'] = {
'name': 'CMS_PU_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, pu_syst) for skey in mc),
#'AsLnN': '1',
}
'''
ps_syst=['PSWeight[0]', 'PSWeight[1]', 'PSWeight[2]', 'PSWeight[3]']
nuisances['PS'] = {
'name': 'PS',
'type': 'shape',
'kind': 'weight_envelope',
'samples':dict((skey, ps_syst) for skey in mc),
#'AsLnN': '1'
}
'''
tau21_syst=['tau21SFup','tau21SFdown']
nuisances['tau21'] = {
'name': 'CMS_eff_vtag_tau21_sf_13TeV',
'type': 'lnN',
'samples': dict((skey, '1.04') for skey in mc )
#'Samples': dict((skey, tau21_syst) for skey in mc )
}
|
[
"soarnsoar@gmail.com"
] |
soarnsoar@gmail.com
|
b2ee99ab61869de3e0076e3216e7a06574b7fbc5
|
75e1d9446cb1fca5c6a79ad0ba7f38268df1161f
|
/Python Programs/both-adjacent-elements-odd-or-even.py
|
cc2b13fa70a753c1820a7d007e5bebba5611bfd7
|
[
"CC0-1.0"
] |
permissive
|
muhammad-masood-ur-rehman/Skillrack
|
6e9b6d93680dfef6f40783f02ded8a0d4283c98a
|
71a25417c89d0efab40ee6229ccd758b26ae4312
|
refs/heads/main
| 2023-02-03T16:45:54.462561
| 2020-12-23T08:36:28
| 2020-12-23T08:36:28
| 324,221,340
| 4
| 1
|
CC0-1.0
| 2020-12-24T19:12:54
| 2020-12-24T19:12:54
| null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
Both Adjacent Elements - Odd or Even
Both Adjacent Elements - Odd or Even: Given an array of N positive integers, print the positive integers that have both the adjacent element values as odd or even.
Boundary Condition(s):
3 <= N <= 1000
Input Format:
The first line contains N.
The second line contains N elements separated by space(s).
Output Format:
The first line contains the elements (which have both the adjacent element values as odd or even) separated by a space.
Example Input/Output 1:
Input:
7
10 21 20 33 98 66 29
Output:
21 20 33
Example Input/Output 2:
Input:
5
11 21 30 99 52
Output:
30 99
n=int(input())
l=list(map(int,input().split()))
for i in range(1,len(l)-1):
if( (l[i-1]%2!=0 and l[i+1]%2!=0) or (l[i-1]%2==0 and l[i+1]%2==0)):
print(l[i],end=' ')
a=int(input());l=list(map(int,input().split()))
for i in range(1,a-1):
if (l[i-1]%2 and l[i+1]%2) or (l[i-1]%2==0 and l[i+1]%2==0):
print(l[i],end=' ')
|
[
"36339675+hemanthtejadasari@users.noreply.github.com"
] |
36339675+hemanthtejadasari@users.noreply.github.com
|
226db6ef29278e9c5bed42cffc9f0ecef5632813
|
53c224a6eee8c6869bc5c292cc8783ea934f0656
|
/data_generator.py
|
b0417b0ce56cec970cf6c200a3ac5465138a59c4
|
[
"MIT"
] |
permissive
|
kunato/Deep-Image-Matting
|
05909d276dd86cc3d59eacf1865511375d6b3f54
|
84baf4ce893083a940d9bfe224515f09787e9289
|
refs/heads/master
| 2020-05-22T18:35:43.610713
| 2019-05-14T15:44:34
| 2019-05-14T15:44:34
| 186,475,076
| 0
| 0
|
MIT
| 2019-05-13T18:33:15
| 2019-05-13T18:33:14
| null |
UTF-8
|
Python
| false
| false
| 5,986
|
py
|
import math
import os
import random
from random import shuffle
import cv2 as cv
import numpy as np
from keras.utils import Sequence
from config import batch_size
from config import fg_path, bg_path, a_path
from config import img_cols, img_rows
from config import unknown_code
from utils import safe_crop
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
with open('Combined_Dataset/Training_set/training_fg_names.txt') as f:
fg_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_fg_names.txt') as f:
fg_test_files = f.read().splitlines()
with open('Combined_Dataset/Training_set/training_bg_names.txt') as f:
bg_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_bg_names.txt') as f:
bg_test_files = f.read().splitlines()
def get_alpha(name):
fg_i = int(name.split("_")[0])
name = fg_files[fg_i]
filename = os.path.join('data/mask', name)
alpha = cv.imread(filename, 0)
return alpha
def get_alpha_test(name):
fg_i = int(name.split("_")[0])
name = fg_test_files[fg_i]
filename = os.path.join('data/mask_test', name)
alpha = cv.imread(filename, 0)
return alpha
def composite4(fg, bg, a, w, h):
fg = np.array(fg, np.float32)
bg_h, bg_w = bg.shape[:2]
x = 0
if bg_w > w:
x = np.random.randint(0, bg_w - w)
y = 0
if bg_h > h:
y = np.random.randint(0, bg_h - h)
bg = np.array(bg[y:y + h, x:x + w], np.float32)
alpha = np.zeros((h, w, 1), np.float32)
alpha[:, :, 0] = a / 255.
im = alpha * fg + (1 - alpha) * bg
im = im.astype(np.uint8)
return im, a, fg, bg
def process(im_name, bg_name):
im = cv.imread(fg_path + im_name)
a = cv.imread(a_path + im_name, 0)
h, w = im.shape[:2]
bg = cv.imread(bg_path + bg_name)
bh, bw = bg.shape[:2]
wratio = w / bw
hratio = h / bh
ratio = wratio if wratio > hratio else hratio
if ratio > 1:
bg = cv.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)), interpolation=cv.INTER_CUBIC)
return composite4(im, bg, a, w, h)
def generate_trimap(alpha):
fg = np.array(np.equal(alpha, 255).astype(np.float32))
# fg = cv.erode(fg, kernel, iterations=np.random.randint(1, 3))
unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
unknown = cv.dilate(unknown, kernel, iterations=np.random.randint(1, 20))
trimap = fg * 255 + (unknown - fg) * 128
return trimap.astype(np.uint8)
# Randomly crop (image, trimap) pairs centered on pixels in the unknown regions.
def random_choice(trimap, crop_size=(320, 320)):
crop_height, crop_width = crop_size
y_indices, x_indices = np.where(trimap == unknown_code)
num_unknowns = len(y_indices)
x, y = 0, 0
if num_unknowns > 0:
ix = np.random.choice(range(num_unknowns))
center_x = x_indices[ix]
center_y = y_indices[ix]
x = max(0, center_x - int(crop_width / 2))
y = max(0, center_y - int(crop_height / 2))
return x, y
class DataGenSequence(Sequence):
def __init__(self, usage):
self.usage = usage
filename = '{}_names.txt'.format(usage)
with open(filename, 'r') as f:
self.names = f.read().splitlines()
np.random.shuffle(self.names)
def __len__(self):
return int(np.ceil(len(self.names) / float(batch_size)))
def __getitem__(self, idx):
i = idx * batch_size
length = min(batch_size, (len(self.names) - i))
batch_x = np.empty((length, img_rows, img_cols, 4), dtype=np.float32)
batch_y = np.empty((length, img_rows, img_cols, 2), dtype=np.float32)
for i_batch in range(length):
name = self.names[i]
fcount = int(name.split('.')[0].split('_')[0])
bcount = int(name.split('.')[0].split('_')[1])
im_name = fg_files[fcount]
bg_name = bg_files[bcount]
image, alpha, fg, bg = process(im_name, bg_name)
# crop size 320:640:480 = 1:1:1
different_sizes = [(320, 320), (480, 480), (640, 640)]
crop_size = random.choice(different_sizes)
trimap = generate_trimap(alpha)
x, y = random_choice(trimap, crop_size)
image = safe_crop(image, x, y, crop_size)
alpha = safe_crop(alpha, x, y, crop_size)
trimap = generate_trimap(alpha)
# Flip array left to right randomly (prob=1:1)
if np.random.random_sample() > 0.5:
image = np.fliplr(image)
trimap = np.fliplr(trimap)
alpha = np.fliplr(alpha)
batch_x[i_batch, :, :, 0:3] = image / 255.
batch_x[i_batch, :, :, 3] = trimap / 255.
mask = np.equal(trimap, 128).astype(np.float32)
batch_y[i_batch, :, :, 0] = alpha / 255.
batch_y[i_batch, :, :, 1] = mask
i += 1
return batch_x, batch_y
def on_epoch_end(self):
np.random.shuffle(self.names)
def train_gen():
return DataGenSequence('train')
def valid_gen():
return DataGenSequence('valid')
def shuffle_data():
num_fgs = 431
num_bgs = 43100
num_bgs_per_fg = 100
num_valid_samples = 8620
names = []
bcount = 0
for fcount in range(num_fgs):
for i in range(num_bgs_per_fg):
names.append(str(fcount) + '_' + str(bcount) + '.png')
bcount += 1
from config import num_valid_samples
valid_names = random.sample(names, num_valid_samples)
train_names = [n for n in names if n not in valid_names]
shuffle(valid_names)
shuffle(train_names)
with open('valid_names.txt', 'w') as file:
file.write('\n'.join(valid_names))
with open('train_names.txt', 'w') as file:
file.write('\n'.join(train_names))
if __name__ == '__main__':
filename = 'merged/357_35748.png'
bgr_img = cv.imread(filename)
bg_h, bg_w = bgr_img.shape[:2]
print(bg_w, bg_h)
|
[
"foamliu@yeah.net"
] |
foamliu@yeah.net
|
2e3c056ddb9c2a6b10f4f8034e24097ff42c81da
|
2dfbb97b47fd467f29ffb26faf9a9f6f117abeee
|
/leetcode/1191.py
|
9521fa28400a3f61456c8bb3b23adb9a49256601
|
[] |
no_license
|
liuweilin17/algorithm
|
0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5
|
d3e8669f932fc2e22711e8b7590d3365d020e189
|
refs/heads/master
| 2020-12-30T11:03:40.085105
| 2020-04-10T03:46:01
| 2020-04-10T03:46:01
| 98,844,919
| 3
| 1
| null | 2018-10-05T03:01:02
| 2017-07-31T03:35:14
|
C++
|
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
###########################################
# Let's Have Some Fun
# File Name: 1191.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Sun Sep 15 11:17:08 2019
###########################################
#coding=utf-8
#!/usr/bin/python
#1191. K-Concatenation Maximum Sum
class Solution:
# notice that Kadane's algorithm is used to find the maximum sum of subarray in O(n) time
def kConcatenationMaxSum(self, arr: List[int], k: int) -> int:
N = len(arr)
if N < 1 or k < 1:
return 0
max_so_far = 0
# case 1&2, max arr is in arr or in 2 arr
new_arr = arr if k == 1 else arr * 2
max_end_here = 0
for a in new_arr:
max_end_here = max(a, a+max_end_here)
max_so_far = max(max_end_here, max_so_far)
sum_v = sum(arr)
if sum_v > 0 and k > 2: # several arr in the middle and we remove the smallest prefix and postfix of the first arr and last arr respectively
print(">0")
# minimum prefix sum
min_pre = 0
t = 0
for i in range(N):
t += arr[i]
min_pre = min(min_pre, t)
# minimum postfix sum
min_post = 0
t = 0
for i in range(N-1, -1, -1):
t += arr[i]
min_post = min(min_post, t)
print(min_pre, min_post)
max_so_far = max(max_so_far, sum_v * k - min_pre - min_post)
return max_so_far % (pow(10, 9) + 7)
|
[
"liuweilin17@qq.com"
] |
liuweilin17@qq.com
|
6366a51b34c9707afc49632e677013a815ca55db
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/jSjjhzRg5MvTRPabx_19.py
|
eb3db41a4579e7f71b9bfce3b4dbb68b86841701
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
"""
Given a list of strings (nouns), list them up in a complete sentence.
### Examples
sentence(["orange", "apple", "pear"]) ➞ "An orange, an apple and a pear."
sentence(["keyboard", "mouse"]) ➞ "A keyboard and a mouse."
sentence(["car", "plane", "truck", "boat"]) ➞ "A car, a plane, a truck and a boat."
### Notes
* The sentence starts with a **capital letter**.
* Do not change **the order** of the words.
* **A/An** should be correct in all places.
* Put commas between nouns, except between the last two (there you put "and").
* The sentence ends with a `.`
* There are at least two nouns given.
* Every given word is lowercase.
"""
def sentence(nouns):
nouns = [
"an " + noun if noun[0] in "aeiou" else "a " + noun
for noun in nouns
]
nouns[-1] = "and " + nouns[-1] + "."
nouns[0] = nouns[0][1:]
return "A" + ", ".join(nouns[:-1]) + " " + nouns[-1]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
133ecc63c3b8010b2e081e25503fe33369029499
|
f9a5e7233875989f994438ce267907d8210d60a1
|
/test/pump_sensor/metalearning/knn_ranking/RMSE/k=5/univariate_statistical_test_F-test/sensor_prediction_F-test_AUCROC.py
|
07f3ad3de4c6f7b7527d340a0b8f360e9da3c1b9
|
[] |
no_license
|
renoslyssiotis/When-are-Machine-learning-models-required-and-when-is-Statistics-enough
|
da8d53d44a69f4620954a32af3aacca45e1ed641
|
6af1670a74345f509c86b7bdb4aa0761c5b058ff
|
refs/heads/master
| 2022-08-29T20:21:57.553737
| 2020-05-26T18:03:46
| 2020-05-26T18:03:46
| 256,439,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
import sys, os, pickle
from pathlib import PurePath
current_dir = os.path.realpath(__file__)
p = PurePath(current_dir)
sys.path.append(str(p.parents[7])+'/metalearners/knn_ranking_method/RMSE')
from KNN_ranking_k_5_RMSE import KNN_ranking
#Load the selected meta-dataset after performing zero-variance threshold
with open(str(p.parents[7])+'/analysis/feature_selection/univariate_selection/ANOVA_X_f1_202.pickle', 'rb') as handle:
metadataset_feature_selected = pickle.load(handle)
#=====================META-FEATURE EXTRACTION==================================
with open(str(p.parents[5])+'/actual/sensor_metafeatures_202.pickle', 'rb') as handle:
meta_features = pickle.load(handle)
#nested_results is a nested dictionary with all the AUC-ROC performances for each dataset and all models
with open(str(p.parents[6])+'/nested_results_roc.pickle', 'rb') as handle:
nested_results_roc = pickle.load(handle)
"""
Remove the meta-features which are not in the meta-dataset
(i.e. the features which have not been selected in the feature selection process)
"""
metafeatures_to_be_removed = []
for metafeature in meta_features.keys():
if metafeature in metadataset_feature_selected.columns:
pass
else:
metafeatures_to_be_removed.append(metafeature)
[meta_features.pop(key) for key in metafeatures_to_be_removed]
#========================META-LEARNING: RANKING================================
#KNN Ranking Method
top1, top2, top3 = KNN_ranking(metadataset_feature_selected, meta_features, nested_results_roc)
print("==========================================")
print(" AUC-ROC ")
print("==========================================")
print("Top 1 predicted model: " + top1)
print("Top 2 predicted model: " + top2)
print("Top 3 predicted model: " + top3)
#Actual results
with open(str(p.parents[5])+'/actual/sensor_top_3_roc.pickle', 'rb') as handle:
actual_results = pickle.load(handle)
print("==========================================")
print("Top 1 ACTUAL model: " + actual_results[0])
print("Top 2 ACTUAL model: " + actual_results[1])
print("Top 3 ACTUAL model: " + actual_results[2])
|
[
"rl554@cam.ac.uk"
] |
rl554@cam.ac.uk
|
ccdaa4456177883986864fd3be8c8e5ff907ebe3
|
5e944167564f1c85431b2244cb9181a058b0ceeb
|
/homework1/exercise1.py
|
cdcc8f97bf7ceb91f7685291401cf5dc92596cd6
|
[] |
no_license
|
uwhpsc-2016/homework1_solution
|
4519ddec35e29b0b15561cd5b066a593edb3c499
|
d2b68d2c6aaf6a84d34405ec3b352a6ecc6c346c
|
refs/heads/master
| 2020-12-26T21:37:31.910378
| 2016-05-05T02:58:46
| 2016-05-05T02:58:46
| 55,077,710
| 0
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
def collatz_step(n):
"""Returns the result of the Collatz function.
The Collatz function C : N -> N is used in `collatz` to
generate collatz sequences.
Parameters
----------
n : int
Returns
-------
int
"""
if (n < 1):
raise ValueError('n must be >= 1')
if (n == 1):
return 1
if (n % 2 == 0):
return n/2
elif (n % 2 == 1):
return 3*n + 1
def collatz(n):
"""Returns the Collatz sequence beginning with `n`.
It is conjectured that Collatz sequences all end with `1`.
Parameters
----------
n : int
Returns
-------
sequence : list
A Collatz sequence.
"""
sequence = [n]
while (n > 1):
n = collatz_step(n)
sequence.append(n)
return sequence
|
[
"cswiercz@gmail.com"
] |
cswiercz@gmail.com
|
6ff1497c503be08c386828ec59da8a6dcd17b03b
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/raw/GLX/_glgets.py
|
76bcbaca63d3763cb619e8e125e243064e9565cc
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 279
|
py
|
"""glGet* auto-generation of output arrays (DO NOT EDIT, AUTOGENERATED)"""
try:
from OpenGL.raw.GL._lookupint import LookupInt as _L
except ImportError:
def _L(*args):
raise RuntimeError( "Need to define a lookupint for this api" )
_glget_size_mapping = _m = {}
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
5a8684e4aee28c9d8a04f66bf08b0763bd885b1b
|
d04f2c6d22ec189cd725cf2e7c882e841cbada67
|
/nonlineer-3.py
|
5098029431ddac53df11179b3f63fffc6f3d2471
|
[
"Unlicense"
] |
permissive
|
nyucel/numerical-methods
|
e2d0c13b7ae752da4d765bc76a04499ad998da6f
|
14824fa3b85b4337b9c95c0b79b2b91a644ac18d
|
refs/heads/master
| 2021-12-15T11:55:12.250619
| 2018-04-17T18:58:21
| 2018-04-17T18:58:21
| 82,589,234
| 52
| 83
|
Unlicense
| 2023-08-15T22:28:25
| 2017-02-20T18:28:56
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
def f(x,y):
return(x**2+x*y-10)
def g(x,y):
return(y+3*x*y**2-57)
xi = float(input("x için başlangıç değerini girin: "))
yi = float(input("y için başlangıç değerini girin: "))
print(xi,yi)
|
[
"necdetyucel@gmail.com"
] |
necdetyucel@gmail.com
|
5123cbb3e967f205b9a8fe82e3a467da31dd9ff5
|
1508b3e3f56e750e38db4334343beedcbb2f9c95
|
/519/client.py
|
20c724b5ee583cac67e228e1cc2fa1481b071e6d
|
[] |
no_license
|
kellyseeme/pythonexample
|
3bb325e31c677160c1abd6c3f314f7ef3af55daa
|
3eab43cdfa5c59a0f4553de84c9de21e5ded44bb
|
refs/heads/master
| 2021-01-21T13:52:43.076697
| 2016-05-30T06:32:37
| 2016-05-30T06:32:37
| 51,348,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
#!/usr/bin/env python
import socket
import time
import processbar
import time
HOST = '192.168.1.60'
PORT = 9999
def recv_all(socketobj,file_name,file_size):
f = open(file_name,'w')
while file_size > 0:
if file_size <= 1024:
processbar.progressbar(10,10)
data = socketobj.recv(1024)
f.write(data)
break
elif file_size > 1024:
processbar.progressbar(1024*10/file_size,10)
data = socketobj.recv(1024)
f.write(data)
file_size -= 1024
f.close()
s = socket.socket()
s.connect((HOST,PORT))
while True:
commands = raw_input('>>>')
if commands == 'exit' or not commands:break
s.sendall(commands)
options = commands.strip().split(' ')
if len(options) == 2:
file_name = options[1]
if options[0] == 'put':
f = open(file_name)
data = f.read()
time.sleep(0.2)
s.send(str(len(data)))
time.sleep(0.2)
s.send(data)
print s.recv(1024)
elif options[0] == 'get':
file_size = int(s.recv(1024))
recv_all(s,file_name,file_size)
print s.recv(1024)
else:
pass
|
[
"root@python.(none)"
] |
root@python.(none)
|
1356e86996b557a0fb21231df0e57fbd65351d5c
|
f99cca94f74c69bc518e298c14140534e18eabd3
|
/OrcApi/Run/Test/TestServiceRun.py
|
9c2a51fb2de8a7362bf3f8d3d40beca6dcf424bc
|
[] |
no_license
|
pubselenium/OrcTestToolsKit
|
d6d838d9937d2c4d86941e317cb3ff096b58e52d
|
f3ccbbceaed4f4996f6907a2f4880c2fd3f82bbb
|
refs/heads/master
| 2021-04-29T05:15:53.240714
| 2016-12-30T09:42:53
| 2016-12-30T09:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
import unittest
from OrcLib.LibTest import OrcTest
from OrcApi.Run.RunDefMod import RunCore
class TestService(unittest.TestCase):
def test_get_case_list(self):
"""
Get page usl
:return:
"""
OrcTest.test_print_begin()
_service = RunCore()
_service.search_list("CASE", 2000000001)
_service.save_list("./ccc.xml")
OrcTest.test_print_end()
def test_get_batch_list(self):
"""
Get page usl
:return:
"""
OrcTest.test_print_begin()
_service = RunCore()
_service.search_list("batch", 1000000008)
_service.save_list("./abc.xml")
OrcTest.test_print_end()
|
[
"orange21cn@126.com"
] |
orange21cn@126.com
|
3313864acba61b42751b05e32ea6f94bb50c4c20
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Default/FPythonCode/ColumnDefinitionUtils.py
|
d2f8e37da770a652927cd264b31a059f22ee2a8e
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
def GetPairOffParentReference(settlement):
pairOffReference = None
if settlement.PairOffParent():
pairOffReference = settlement.PairOffParent()
if not pairOffReference:
for child in settlement.Children():
pairOffReference = GetPairOffParentReference(child)
if pairOffReference:
break
return pairOffReference
|
[
"81222178+nenchoabsa@users.noreply.github.com"
] |
81222178+nenchoabsa@users.noreply.github.com
|
0da214ab4195098228be3d27bdd5023c72c5940a
|
b9de33c6fb310ef69cba728b9de1a31165c3a031
|
/chapter_32/class-gotchas-super-multiple-inheritance.py
|
e2a2df1637b44a0d8209f30663b0da3f7c77b3d2
|
[] |
no_license
|
bimri/learning-python
|
2fc8c0be304d360b35020a0dfc16779f78fb6848
|
5f2fcc9a08f14e1d848530f84ce3b523d1f72aad
|
refs/heads/master
| 2023-08-12T20:30:09.754468
| 2021-10-15T20:53:49
| 2021-10-15T20:53:49
| 377,515,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
"Multiple Inheritance: Order Matters"
# Python always searches superclasses from left to right, according to their order in the header line.
# class ListTree:
# def __str__(self): ...
# class Super:
# def __str__(self): ...
# class Sub(ListTree, Super): # Get ListTree's __str__ by listing it first
# x = Sub() # Inheritance searches ListTree before Super
"""
But now suppose Super and ListTree have their own versions of other same-named
attributes, too. If we want one name from Super and another from ListTree, the order
in which we list them in the class header won’t help—we will have to override inheritance
by manually assigning to the attribute name in the Sub class:
"""
# class ListTree:
# def __str__(self): ...
# def other(self): ...
# class Super:
# def __str__(self): ...
# def other(self): ...
# class Sub(ListTree, Super): # Get ListTree's __str__ by listing it first
# other = Super.other # But explicitly pick Super's version of other
# def __init__(self):
# ...
# x = Sub() # Inheritance searches Sub before ListTree/Super
"""
Here, the assignment to other within the Sub class creates Sub.other—a reference back
to the Super.other object. Because it is lower in the tree, Sub.other effectively hides
ListTree.other, the attribute that the inheritance search would normally find. Similarly,
if we listed Super first in the class header to pick up its other, we would need to
select ListTree’s method explicitly:
"""
# class Sub(Super, ListTree): # Get Super's other by order
# __str__ = Lister.__str__ # Explicitly pick Lister.__str__
"Scopes in Methods and Classes"
def generate():
class Spam: # Spam is a name in generate's local scope
count = 1
def method(self):
print(Spam.count) # Visible in generate's scope, per LEGB rule (E)
return Spam()
generate().method()
def generate():
return Spam()
class Spam(): # Define at top level of module
count = 1
def method(self):
print(Spam.count) # Visible in module, per LEGB rule (E)
generate().method()
def generate(label): # Returns a class instead of instance
class Spam: # Define in module scope
count = 1
def method(self):
print("%s=%s" % (label, Spam.count))
return Spam
if __name__ == "__main__":
aClass = generate("Gotchas") # Generate a class
I = aClass() # Create an instance of the class
I.method() # Call the method
|
[
"bimri@outlook.com"
] |
bimri@outlook.com
|
313570f597eb15ae6444830bf79aed976b250e96
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/mmaction/models/localizers/ssn.py
|
3136d651f6d76f4be04410605d7dcf7a2d0a34a4
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,160
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from .. import builder
from ..builder import LOCALIZERS
from .base import BaseTAGClassifier
@LOCALIZERS.register_module()
class SSN(BaseTAGClassifier):
"""Temporal Action Detection with Structured Segment Networks.
Args:
backbone (dict): Config for building backbone.
cls_head (dict): Config for building classification head.
in_channels (int): Number of channels for input data.
Default: 3.
spatial_type (str): Type of spatial pooling.
Default: 'avg'.
dropout_ratio (float): Ratio of dropout.
Default: 0.5.
loss_cls (dict): Config for building loss.
Default: ``dict(type='SSNLoss')``.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
in_channels=3,
spatial_type='avg',
dropout_ratio=0.5,
loss_cls=dict(type='SSNLoss'),
train_cfg=None,
test_cfg=None):
super().__init__(backbone, cls_head, train_cfg, test_cfg)
self.is_test_prepared = False
self.in_channels = in_channels
self.spatial_type = spatial_type
if self.spatial_type == 'avg':
self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
elif self.spatial_type == 'max':
self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
else:
self.pool = None
self.dropout_ratio = dropout_ratio
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.loss_cls = builder.build_loss(loss_cls)
def forward_train(self, imgs, proposal_scale_factor, proposal_type,
proposal_labels, reg_targets, **kwargs):
"""Define the computation performed at every call when training."""
imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
x = self.extract_feat(imgs)
if self.pool:
x = self.pool(x)
if self.dropout is not None:
x = self.dropout(x)
activity_scores, completeness_scores, bbox_preds = self.cls_head(
(x, proposal_scale_factor))
loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
proposal_type, proposal_labels, reg_targets,
self.train_cfg)
loss_dict = dict(**loss)
return loss_dict
def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
proposal_tick_list, reg_norm_consts, **kwargs):
"""Define the computation performed at every call when testing."""
num_crops = imgs.shape[0]
imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
num_ticks = imgs.shape[1]
output = []
minibatch_size = self.test_cfg.ssn.sampler.batch_size
for idx in range(0, num_ticks, minibatch_size):
chunk = imgs[:, idx:idx +
minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
x = self.extract_feat(chunk)
if self.pool:
x = self.pool(x)
# Merge crop to save memory.
x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
output.append(x)
output = torch.cat(output, dim=0)
relative_proposal_list = relative_proposal_list.squeeze(0)
proposal_tick_list = proposal_tick_list.squeeze(0)
scale_factor_list = scale_factor_list.squeeze(0)
reg_norm_consts = reg_norm_consts.squeeze(0)
if not self.is_test_prepared:
self.is_test_prepared = self.cls_head.prepare_test_fc(
self.cls_head.consensus.num_multipliers)
(output, activity_scores, completeness_scores,
bbox_preds) = self.cls_head(
(output, proposal_tick_list, scale_factor_list), test_mode=True)
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
reg_norm_consts = reg_norm_consts.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
reg_norm_consts[0, 0])
bbox_preds[:, :, 1] = (
bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
reg_norm_consts[0, 1])
bbox_preds = bbox_preds.cpu().numpy()
result = [
dict(
relative_proposal_list=relative_proposal_list,
activity_scores=activity_scores,
completeness_scores=completeness_scores,
bbox_preds=bbox_preds)
]
return result
|
[
"caixiaochen@4ParadigmdeMacBook-Pro.local"
] |
caixiaochen@4ParadigmdeMacBook-Pro.local
|
4f7d9e2c17b601aaa0a2a0c3417e9963182cc6cf
|
9b1e97850f55d839c1c6f7d93187af90bf9120a5
|
/0x0F-python-object_relational_mapping/model_state.py
|
ee5227fff95d5feb70eb50c3b7eeefad7a80192e
|
[] |
no_license
|
PilarPinto/holbertonschool-higher_level_programming
|
543271fb7f85a23745f54ac44e2fd1ef0ff452ce
|
8be531a14a280235c2a9cee7f072d88cea8b9921
|
refs/heads/master
| 2020-09-29T00:19:01.460334
| 2020-05-15T01:58:51
| 2020-05-15T01:58:51
| 226,900,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/python3
'''Using sqlalchemy for State definition'''
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class State(Base):
__tablename__ = 'states'
id = Column(Integer, unique=True, nullable=False, primary_key=True)
name = Column(String(128), nullable=False)
|
[
"piapintoch@unal.edu.co"
] |
piapintoch@unal.edu.co
|
237f5c9434aa81b5dc82ca5b556e349347c56299
|
3ae73fa03a2e99bb108a923606c293674b3db304
|
/Django/beltreview bck_up/apps/login_reg/migrations/0001_initial.py
|
aeabc3aa8caef9da17f0c79b92cb6f91ada3f336
|
[] |
no_license
|
asdfkerub/DojoAssignments
|
51bef584783d799469db85ff66983bac4f404e7f
|
1eb0b5fa8ac881ce6d0b6765b104f806bdb71f5c
|
refs/heads/master
| 2021-01-11T16:47:51.207662
| 2017-03-06T01:11:28
| 2017-03-06T01:11:28
| 79,671,651
| 0
| 0
| null | 2017-03-06T01:11:29
| 2017-01-21T20:43:04
|
Python
|
UTF-8
|
Python
| false
| false
| 875
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 20:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('alias', models.CharField(max_length=45)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"q.kerubkim@gmail.com"
] |
q.kerubkim@gmail.com
|
54e239592280383f005aee0b8c80612e8cc24ee2
|
e05e2d26e38ce80530e3458ce3c8e02f16e5cbe6
|
/CoinAnalysis/vs_non.py
|
9d16ac66cce5ad8d5800e2c994c9734be85bd377
|
[] |
no_license
|
jegutman/hearthstone_decks
|
96acca7e040cb9b89253a867217655ce8cdf2756
|
95d4563c46618a9efccc10dbb34094258ec5bce7
|
refs/heads/master
| 2020-12-25T08:16:29.892068
| 2019-09-12T05:28:16
| 2019-09-12T05:28:16
| 102,289,609
| 3
| 0
| null | 2018-07-06T22:35:17
| 2017-09-03T19:40:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,182
|
py
|
from archetypes import aggro
archetypes = []
data = {}
line_data = []
decks = []
with open('CoinData.csv') as f:
for line in f:
if line[0] == "#":
continue
tmp = line.strip().split(',')
deck_a, deck_b, first, pct, games = tmp
if deck_a not in aggro or deck_b in aggro: continue
if deck_a not in decks:
decks.append(deck_a)
print(deck_a)
for d in (deck_a, deck_b):
if d not in archetypes:
assert d != '10', line
archetypes.append(d)
if d not in data:
data[d] = {}
if deck_b not in data[deck_a]:
data[deck_a][deck_b] = [(), ()]
first = int(first)
has_coin = 1 - first
pct = float(pct)
games = int(games)
data[deck_a][deck_b][first] = (pct, games)
line_data.append((deck_a, deck_b, first, pct, games))
diffs = {}
deck_stats = {}
games_count = {}
for deck_a, deck_b, first, pct, games in line_data:
key = (deck_a, first)
deck_stats[key] = deck_stats.get(key, 0) + int(round(pct * games / 100))
games_count[key] = games_count.get(key, 0) + games
overall = []
for i in decks:
pct_1 = round(float(deck_stats[(i, 1)] / games_count[(i,1)]) * 100, 1)
pct_0 = round(float(deck_stats[(i, 0)] / games_count[(i,0)]) * 100, 1)
#min_g = min(games_count[(i,1)], games_count[(i,0)])
g_1 = games_count[(i,1)]
g_0 = games_count[(i,0)]
diff = round(pct_1 - pct_0, 1)
#print("%-25s" % i, pct_1, pct_0, "%5.1f" % diff, "%6s" % min_g)
#overall.append((i, pct_1, pct_0, diff, min_g))
overall.append((i, pct_1, pct_0, diff, g_1, g_0))
#for i, pct_1, pct_0, diff, min_g in sorted(overall, key=lambda x:x[-2], reverse=True):
# print("%-25s" % i, pct_1, pct_0, "%5.1f" % diff, "%6s" % min_g)
i, pct_1, pct_0, diff, g_1, g_0 = "deck,1st ,2nd ,diff,g_1,g_2".split(',')
print("%-25s" % i, pct_1, pct_0, "%5s" % diff, "%6s" % g_1, "%6s" % g_0)
for i, pct_1, pct_0, diff, g_1, g_0 in sorted(overall, key=lambda x:x[3], reverse=True):
print("%-25s" % i.replace(' ', '_'), pct_1, pct_0, "%5.1f" % diff, "%6s" % g_1, "%6s" % g_0)
|
[
"jegutman@gmail.com"
] |
jegutman@gmail.com
|
554fb914629e5e2cba22ade77f00e4a6143b04ab
|
bdce502dce36a5f53ed7e376c5783c8bcbe6a98e
|
/migrations/versions/55bd2e159b91_added_type_to_coach.py
|
0b8c796002c87ae489801a7c293d32952ecaf4fb
|
[
"MIT"
] |
permissive
|
jeffthemaximum/jeffPD
|
b05b02300653b34c235adb2de46c91e18604bcf4
|
4ac2584117c45c70b77bebe64676b0138577e14f
|
refs/heads/master
| 2021-01-01T19:38:30.041034
| 2015-11-17T14:35:57
| 2015-11-17T14:35:57
| 41,260,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
"""added type to coach
Revision ID: 55bd2e159b91
Revises: 1abfb1cdc0ea
Create Date: 2015-10-10 11:52:37.381983
"""
# revision identifiers, used by Alembic.
revision = '55bd2e159b91'
down_revision = '1abfb1cdc0ea'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('coaches', sa.Column('coach_type', sa.String(length=64), nullable=True))
op.create_index('ix_logs_timestamp', 'logs', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_logs_timestamp', 'logs')
op.drop_column('coaches', 'coach_type')
### end Alembic commands ###
|
[
"frey.maxim@gmail.com"
] |
frey.maxim@gmail.com
|
979ad3afa724f60333fec8a0444de42bd250d08f
|
9a46784244d544445c01c6f0d564f4da65efcfaf
|
/CodeUltimateFlaskCourse/06. Member API/authentication/app.py
|
a94fe21a10ee8b8a8bc2c9372c5c21fd4617e606
|
[] |
no_license
|
ammbyrne/Flask
|
f55a606ec234c6a00b4d264a48e11b2f487d4ef7
|
7922ab46b8a4c388346043d2393173e7e49e43bb
|
refs/heads/main
| 2023-04-19T16:07:08.224824
| 2021-05-07T03:21:44
| 2021-05-07T03:21:44
| 365,101,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
from flask import Flask, g, request, jsonify
from database import get_db
app = Flask(__name__)
api_username = 'admin'
api_password = 'password'
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/member', methods=['GET'])
def get_members():
db = get_db()
members_cur = db.execute('select id, name, email, level from members')
members = members_cur.fetchall()
return_values = []
for member in members:
member_dict = {}
member_dict['id'] = member['id']
member_dict['name'] = member['name']
member_dict['email'] = member['email']
member_dict['level'] = member['level']
return_values.append(member_dict)
username = request.authorization.username
password = request.authorization.password
if username == api_username and password == api_password:
return jsonify({'members' : return_values, 'username' : username, 'password' : password})
return jsonify({'message' : 'Authentication failed!'}), 403
@app.route('/member/<int:member_id>', methods=['GET'])
def get_member(member_id):
db = get_db()
member_cur = db.execute('select id, name, email, level from members where id = ?', [member_id])
member = member_cur.fetchone()
return jsonify({'member' : {'id' : member['id'], 'name' : member['name'], 'email' : member['email'], 'level' : member['level']}})
@app.route('/member', methods=['POST'])
def add_member():
new_member_data = request.get_json()
name = new_member_data['name']
email = new_member_data['email']
level = new_member_data['level']
db = get_db()
db.execute('insert into members (name, email, level) values (?, ?, ?)', [name, email, level])
db.commit()
member_cur = db.execute('select id, name, email, level from members where name = ?', [name])
new_member = member_cur.fetchone()
return jsonify({'member' : {'id' : new_member['id'], 'name' : new_member['name'], 'email' : new_member['email'], 'level' : new_member['level']}})
@app.route('/member/<int:member_id>', methods=['PUT', 'PATCH'])
def edit_member(member_id):
new_member_data = request.get_json()
name = new_member_data['name']
email = new_member_data['email']
level = new_member_data['level']
db = get_db()
db.execute('update members set name = ?, email = ?, level = ? where id = ?', [name, email, level, member_id])
db.commit()
member_cur = db.execute('select id, name, email, level from members where id = ?', [member_id])
member = member_cur.fetchone()
return jsonify({'member' : {'id' : member['id'], 'name' : member['name'], 'email' : member['email'], 'level' : member['level']}})
@app.route('/member/<int:member_id>', methods=['DELETE'])
def delete_member(member_id):
db = get_db()
db.execute('delete from members where id = ?', [member_id])
db.commit()
return jsonify({'message' : 'The member has been deleted!'})
if __name__ == '__main__':
app.run(debug=True)
|
[
"andy_m_byrne@yahoo.co.uk"
] |
andy_m_byrne@yahoo.co.uk
|
d21741515b51c9b3f25b2293bec7070258246c98
|
a829617f9ad158df80a569dd02a99c53639fa2c6
|
/test/hep/table/exception1.py
|
826950668cd50b0628b9344a6d73d01ea3f9fb31
|
[] |
no_license
|
alexhsamuel/pyhep
|
6db5edd03522553c54c8745a0e7fe98d96d2b7ae
|
c685756e9065a230e2e84c311a1c89239c5d94de
|
refs/heads/master
| 2021-01-10T14:24:08.648081
| 2015-10-22T13:18:50
| 2015-10-22T13:18:50
| 44,745,881
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
#-----------------------------------------------------------------------
# imports
#-----------------------------------------------------------------------
from __future__ import division
import hep.table
from hep.test import compare, assert_
#-----------------------------------------------------------------------
# test
#-----------------------------------------------------------------------
schema = hep.table.Schema()
schema.addColumn("x", "float32")
schema.addColumn("y", "float32")
table = hep.table.create("exception1.table", schema)
table.append(x=5, y=2)
table.append(x=3, y=4)
table.append(x=0, y=4)
table.append(x=3, y=0)
table.append(x=4, y=3)
sum = 0
def callback(value, weight):
global sum
assert_(weight == 1)
sum += value
hep.table.project(table, [("x / y", callback)],
handle_expr_exceptions=True)
compare(sum, 5 / 2 + 3 / 4 + 0 + 4 / 3)
|
[
"alex@alexsamuel.net"
] |
alex@alexsamuel.net
|
5b22188159510783109706d9d6aee73b30184cd5
|
7ac1f3e38dab2899d6dc0d02cc1ace3934fb0805
|
/pygame/tank_game/code.py
|
250360ff8a4e559005ce17747cd5bdc67f609b6e
|
[] |
no_license
|
amanbhal/pythonCodes
|
3fd9357211fe7d06c6972e7a4f469df1ff3cf60a
|
49d17ce395d15e7c8497af8455790ecb876a0d49
|
refs/heads/master
| 2016-08-12T06:12:19.108863
| 2015-11-16T20:42:11
| 2015-11-16T20:42:11
| 46,301,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
num = int(raw_input())
bnum = bin(num)
convert = []
for x in bnum[2:]:
if x=='0':
convert.append('1')
else:
convert.append('0')
print convert
convert = "".join(convert)
result = int(convert,2)
print result
|
[
"amandeep.bhal92@gmail.com"
] |
amandeep.bhal92@gmail.com
|
b8830ee9a2275eae167cf660353d0f991769fe44
|
19f1612a24a343198302fe1b88d15a2d94a5d91f
|
/Mod_Divmod.py
|
67fedd76cd0160039410121cd5b1209d8232ae5e
|
[] |
no_license
|
TheShubham-K/HackerRank
|
0a8f15051e5466292d880ba3d334bc19733c4ab7
|
a51bcfa4dee85258787cc5bc96976045b05a963f
|
refs/heads/master
| 2022-11-09T16:24:35.595762
| 2020-06-29T15:56:39
| 2020-06-29T15:56:39
| 266,571,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
from __future__ import division
n = int(input())
m = int(input())
ans = divmod(n,m)
print(str(ans[0])+"\n"+str(ans[1])+"\n"+str(ans))
|
[
"subham.kumar032@gmail.com"
] |
subham.kumar032@gmail.com
|
1e3807db28c7349317eeba39285686bc12b95757
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-storage/azure/mgmt/storage/v2019_04_01/models/immutability_policy_properties_py3.py
|
feb15f507cfde35317d9e13017493cd6dc5c0c5a
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImmutabilityPolicyProperties(Model):
"""The properties of an ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param immutability_period_since_creation_in_days: Required. The
immutability period for the blobs in the container since the policy
creation, in days.
:type immutability_period_since_creation_in_days: int
:ivar state: The ImmutabilityPolicy state of a blob container, possible
values include: Locked and Unlocked. Possible values include: 'Locked',
'Unlocked'
:vartype state: str or
~azure.mgmt.storage.v2019_04_01.models.ImmutabilityPolicyState
:ivar etag: ImmutabilityPolicy Etag.
:vartype etag: str
:ivar update_history: The ImmutabilityPolicy update history of the blob
container.
:vartype update_history:
list[~azure.mgmt.storage.v2019_04_01.models.UpdateHistoryProperty]
"""
_validation = {
'immutability_period_since_creation_in_days': {'required': True},
'state': {'readonly': True},
'etag': {'readonly': True},
'update_history': {'readonly': True},
}
_attribute_map = {
'immutability_period_since_creation_in_days': {'key': 'properties.immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'update_history': {'key': 'updateHistory', 'type': '[UpdateHistoryProperty]'},
}
def __init__(self, *, immutability_period_since_creation_in_days: int, **kwargs) -> None:
super(ImmutabilityPolicyProperties, self).__init__(**kwargs)
self.immutability_period_since_creation_in_days = immutability_period_since_creation_in_days
self.state = None
self.etag = None
self.update_history = None
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
8c492766c5f8adb62877bbbcc99d29864d40fc45
|
5a29fbaa46a71eff0ac677b42e393b449e313085
|
/upsea/Ea_11_Dma_pg_01/EA/Analyzer.py
|
2ce51633ca7eb8fbc4a8352f2e2861911de253d8
|
[
"MIT"
] |
permissive
|
UpSea/PyAlgoTradeMid
|
548d181d5d18448f75f205214e9d19b7356a5730
|
c8edcbc089d92dbfbb8bb25af92a039146f6c6da
|
refs/heads/master
| 2021-01-20T19:57:21.406976
| 2016-07-25T17:23:00
| 2016-07-25T17:23:00
| 62,429,518
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,460
|
py
|
import numpy as np
import matplotlib.dates as mpd
import sys,os
xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,os.pardir,'thirdParty','pyqtgraph-0.9.10'))
sys.path.append(xpower)
import pyqtgraph as pg
xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'BaseClass'))
sys.path.append(xpower)
from midBaseAnalyzer import midBaseAnalyzer as midBaseAnalyzer
class Analyzer(midBaseAnalyzer):
#----------------------------------------------------------------------
def indicatorsPlot(self,ax):
""""""
date = np.array([mpd.date2num(date) for date in self.results.index])
if 'long_ema' in self.results and 'short_ema' in self.results:
ax.plot(date,self.results['long_ema'])
ax.plot(date,self.results['short_ema'])
def signalPlot(self,ax,yBuy = None,ySell = None):
date = np.array([mpd.date2num(date) for date in self.results.index])
if 'buy' in self.results and 'sell' in self.results:
if(yBuy == None or ySell == None):
if 'long_ema' in self.results and 'long_ema' in self.results:
yBuy = np.array(self.results['long_ema'][self.results.buy])
ySell = np.array(self.results['long_ema'][self.results.sell])
#yBuy = np.array(self.results.long_ema)
#ySell = np.array(self.results.long_ema)
if(yBuy is not None or ySell is not None):
if 'long_ema' in self.results and 'long_ema' in self.results:
xBuy = np.array([mpd.date2num(date) for date in self.results.ix[self.results.buy].index])
for x1,y1 in zip(xBuy,yBuy):
a1 = pg.ArrowItem(angle=90, tipAngle=60, headLen=5, tailLen=0, tailWidth=5, pen={'color': 'r', 'width': 1})
ax.addItem(a1)
a1.setPos(x1,y1)
xSell = np.array([mpd.date2num(date) for date in self.results.ix[self.results.sell].index])
for x1,y1 in zip(xSell,ySell):
a1 = pg.ArrowItem(angle=-90, tipAngle=60, headLen=5, tailLen=0, tailWidth=5, pen={'color': 'g', 'width': 1})
ax.addItem(a1)
a1.setPos(x1,y1)
|
[
"upsea@upsea.cn"
] |
upsea@upsea.cn
|
3e799491be2198eeecb6afab23a3bc4df7ac236a
|
d785e993ed65049c82607a1482b45bddb2a03dda
|
/nano2017/cfg_fr_2018/ZZTo4L_ext2_cfg.py
|
ada0a293740e2508a83fed849b413d5fd23bc72b
|
[] |
no_license
|
PKUHEPEWK/ssww
|
eec02ad7650014646e1bcb0e8787cf1514aaceca
|
a507a289935b51b8abf819b1b4b05476a05720dc
|
refs/heads/master
| 2020-05-14T04:15:35.474981
| 2019-06-28T23:48:15
| 2019-06-28T23:48:15
| 181,696,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'ZZTo4L_ext2_2018'
config.General.transferLogs= False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script_fr_2018.sh'
config.JobType.inputFiles = ['crab_script_fr_2018.py','ssww_keep_and_drop_2018.txt','ssww_output_branch_selection_2018.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/ZZTo4L_TuneCP5_13TeV_powheg_pythia8/RunIIAutumn18NanoAODv4-Nano14Dec2018_102X_upgrade2018_realistic_v16_ext2-v1/NANOAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 20
config.Data.totalUnits = -1
config.Data.outLFNDirBase ='/store/user/%s/nano_fr_2018_v0' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'ZZTo4L_ext2_2018'
config.section_("Site")
config.Site.storageSite = "T2_CN_Beijing"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
f74eb99bfc1bda4bca7cabb334cbd30400f2bc04
|
73e53e16fc1557447ac8b6d280d916adaa36c846
|
/server
|
e15c737999e7404d2c732e48fad8b6e4ab1df6f6
|
[] |
no_license
|
apkallum/monadical.com
|
abc92cdd6ce49c7d6024df710ec67de102c787ed
|
7b8fa76072ad0eeae2cb515591b345ce29a64dd6
|
refs/heads/master
| 2020-08-24T18:53:32.470134
| 2019-10-22T03:42:32
| 2019-10-22T03:42:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,706
|
#!/usr/bin/env python3
import sys
import json
from datetime import datetime
from flask import Flask, render_template, redirect
### Config
app = Flask(__name__)
CONFIG_FILE = 'content.json'
HOST = 'http://127.0.0.1:5000'
def load_config(fname=CONFIG_FILE):
"""read the content.json file and load it as a dictionary"""
with open(fname, 'r') as f:
return json.load(f)
CONFIG = load_config(CONFIG_FILE)
PAGES = {page['url']: page for page in list(CONFIG['PAGES'].values())} # {url: {page_data}}
POSTS = {post['url']: post for post in list(CONFIG['POSTS'].values())} # {url: {post_data}}
### Routes
# Similar to wordpress, pages and posts are separate. Every page has its own template
# in templates/page.html, but all posts use the same template + an iframe URL for the
# post content
@app.route('/')
def index():
return redirect("/index.html")
@app.route('/favicon.ico')
def favicon():
return redirect("/static/favicon.ico")
@app.route('/<path>')
def render_page(path):
page = PAGES[f'/{path}']
return render_template(page['template'], now=datetime.now(), **CONFIG, **page)
@app.route('/posts/<path>')
def render_post(path):
print(path)
post = POSTS[f'/posts/{path}']
return render_template('post.html', now=datetime.now(), **CONFIG, **post)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--pages':
# just print list of page urls
print('\n'.join(HOST + url for url in PAGES.keys()))
elif len(sys.argv) > 1 and sys.argv[1] == '--posts':
# just print list of post urls
print('\n'.join(HOST + url for url in POSTS.keys()))
else:
# run the flask http server
app.run()
|
[
"git@nicksweeting.com"
] |
git@nicksweeting.com
|
|
5f05a3951089f1baf3863c3630cf00d923676bdb
|
37f1563cdacf4b37b5b927b892538218aae79c77
|
/hard/array/firstMissingPositive.py
|
b4c1f73b2ee27e1d1e11c3720b64cf11a4bd523c
|
[] |
no_license
|
unsortedtosorted/elgoog
|
9dee49a20f981305910a8924d86e8f2a16fe14c2
|
5be9fab24c0c1fd9d5dc7a7bdaca105f1ca873ee
|
refs/heads/master
| 2020-04-15T00:51:12.114249
| 2019-05-19T04:37:24
| 2019-05-19T04:37:24
| 164,254,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
"""
41. First Missing Positive
Runtime : O(N)
"""
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 1
for i, num in enumerate(nums):
while i + 1 != nums[i] and 0 < nums[i] <= len(nums):
v = nums[i]
nums[i] = nums[v - 1]
nums[v - 1] = v
if nums[i] == nums[v - 1]:
break
for i, num in enumerate(nums, 1):
if num != i:
return i
return len(nums) + 1
|
[
"noreply@github.com"
] |
unsortedtosorted.noreply@github.com
|
55412c60ad3960f8a8780d3ffcf2369eac11a1b9
|
98c86ee65aac21c8363f627f99b9da3acd777b35
|
/Actividades en Clases/Actividad 04/Solución/botella.py
|
43df3345b727c6c4c291d9e75fc5811d85f464a2
|
[] |
no_license
|
bcsaldias/syllabus
|
ef7e5eff0c8fc1ab5a28d12cc3f18ae998ad5c52
|
ce30d74fc62861c3464301b5277ca68545209371
|
refs/heads/master
| 2021-01-24T01:11:22.739918
| 2015-03-20T00:24:54
| 2015-03-20T00:24:54
| 32,551,385
| 1
| 0
| null | 2015-03-19T23:20:50
| 2015-03-19T23:20:50
| null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
__author__ = 'patricio_lopez'
class Botella:
def __init__(self, litros=1):
self.litros = litros
@property
def etiqueta(self):
return "DCC-Cola"
def beber(self):
print("Deliciosa bebida {}".format(self.etiqueta))
def __str__(self):
return "{} de {} litros.".format(self.etiqueta, self.litros)
|
[
"lopezjuripatricio@gmail.com"
] |
lopezjuripatricio@gmail.com
|
cbe81a3493a79fc65b094d0b27ab6eec20764273
|
638929e3a47b9ea8c0cc98336edca104c6af5e3a
|
/lib_catalog/catalog/migrations/0001_initial.py
|
172abe90b7e0935c09374bc69fe83df2b3708d7c
|
[] |
no_license
|
burbaljaka/lib_catalog
|
190e944c798c8d80685c5c9a65b663fa116f5404
|
15e971b6d17dfc8f01959ba538b304969c0f51a9
|
refs/heads/master
| 2023-06-01T06:39:47.841908
| 2022-05-17T19:52:46
| 2022-05-17T19:52:46
| 217,097,504
| 0
| 1
| null | 2023-05-07T02:35:21
| 2019-10-23T15:57:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
# Generated by Django 3.0.5 on 2020-05-02 10:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('author_code', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='BBK',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('description', models.CharField(blank=True, max_length=2000, null=True)),
],
),
migrations.CreateModel(
name='IssueCity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='KeyWord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='PublishingHouse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('issue_year', models.IntegerField(blank=True, null=True)),
('description', models.CharField(blank=True, max_length=2000, null=True)),
('place', models.CharField(blank=True, max_length=200, null=True)),
('pages', models.IntegerField(blank=True, null=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author')),
('bbk', models.ManyToManyField(blank=True, to='catalog.BBK')),
('issue_city', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.IssueCity')),
('keywords', models.ManyToManyField(blank=True, to='catalog.KeyWord')),
('publishing_house', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.PublishingHouse')),
],
),
]
|
[
"kapitonov.timur@gmail.com"
] |
kapitonov.timur@gmail.com
|
7e125b5f5c7032b1a6311ab010c2cd68bed0d063
|
4fdb8e90ab2bed9bc534155806314d4b6d0047ae
|
/database/migrations/0009_auto_20150224_1925.py
|
e56dc8666a88493c29a8c0aee5569d0da248cc57
|
[] |
no_license
|
gbriones1/MODB
|
8ca04df5bc665d5b3dcc3a4f89fa167b21047d7d
|
b2aa15efe155a1e813917c720107c33cb56eef1b
|
refs/heads/master
| 2021-01-18T23:21:16.545638
| 2016-07-28T15:42:37
| 2016-07-28T15:42:37
| 32,994,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('database', '0008_auto_20150223_1141'),
]
operations = [
migrations.RenameField(
model_name='lending_product',
old_name='lending_reg',
new_name='lending',
),
migrations.AddField(
model_name='product',
name='stock',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='product',
name='is_used',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
[
"gabriel.briones.sayeg@intel.com"
] |
gabriel.briones.sayeg@intel.com
|
64ac34971a4420b2371d118512fb9cd91ef116ce
|
5a9464a9d1543a072adf9c0dc07cbf3d3f5e5207
|
/src/examples_sensor_tests.py
|
45016d5ba083c576414ced1228d1329e54f55497
|
[] |
no_license
|
moyersjm/rosebotics2
|
e4f30196fd153a3ef86de4157cbfef31b6622096
|
15a008fba44b770540ea943fe495629a368af560
|
refs/heads/master
| 2020-04-02T13:45:37.339009
| 2018-11-14T19:43:53
| 2018-11-14T19:43:53
| 154,495,978
| 1
| 1
| null | 2018-10-24T12:20:01
| 2018-10-24T12:20:01
| null |
UTF-8
|
Python
| false
| false
| 2,216
|
py
|
"""
Capstone Project. Code for testing basics.
Author: David Mutchler, based on work by Dave Fisher and others.
Fall term, 2018-2019.
"""
import rosebotics_even_newer as rb
import time
def main():
""" Runs tests. """
run_test_sensors()
def run_test_sensors():
""" Print sensor values each time the user presses the ENTER key. """
robot = rb.Snatch3rRobot()
while True:
print()
print("Touch sensor (value, is_pressed):",
robot.touch_sensor.get_value(),
robot.touch_sensor.is_pressed())
print("Color sensor (reflected intensity, color):",
robot.color_sensor.get_reflected_intensity(),
robot.color_sensor.get_color())
print("Camera:", robot.camera.get_biggest_blob())
print("Brick buttons:",
robot.brick_button_sensor.is_back_button_pressed(),
robot.brick_button_sensor.is_top_button_pressed(),
robot.brick_button_sensor.is_bottom_button_pressed(),
robot.brick_button_sensor.is_left_button_pressed(),
robot.brick_button_sensor.is_right_button_pressed())
# ----------------------------------------------------------------------
# On each run, use just ONE of the following 3 sensors:
# ----------------------------------------------------------------------
print("Proximity sensor (inches):",
robot.proximity_sensor.get_distance_to_nearest_object_in_inches())
# print("Beacon sensor (cm, degrees):",
# robot.beacon_sensor.get_distance_to_beacon(),
# robot.beacon_sensor.get_heading_to_beacon())
# print("Beacon button sensor (top/bottom red, top/bottom blue):",
# robot.beacon_button_sensor.is_top_red_button_pressed(),
# robot.beacon_button_sensor.is_bottom_red_button_pressed(),
# robot.beacon_button_sensor.is_top_blue_button_pressed(),
# robot.beacon_button_sensor.is_bottom_blue_button_pressed())
character = input(
"Press the ENTER (return) key to get next sensor reading, or q to quit: ")
if character == "q":
break
main()
|
[
"mutchler@rose-hulman.edu"
] |
mutchler@rose-hulman.edu
|
54448b37275c1c6533fe3de3b724a8161ddad67e
|
caceb60f71165772b6d6155f619e79189e7c80a9
|
/第一期/上海-棒棒糖/第二次任务-每日代码练习/2017-1/1-24/str__test.py
|
92856b7febfee9c2c466704f18d9f9ea1ffca57c
|
[
"Apache-2.0"
] |
permissive
|
beidou9313/deeptest
|
ff41999bb3eb5081cdc8d7523587d7bc11be5fea
|
e046cdd35bd63e9430416ea6954b1aaef4bc50d5
|
refs/heads/master
| 2021-04-26T23:06:08.890071
| 2019-04-03T02:18:44
| 2019-04-03T02:18:44
| 123,931,080
| 0
| 0
|
Apache-2.0
| 2018-03-05T14:25:54
| 2018-03-05T14:25:53
| null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
class Student(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
# __repr__ = __str__
#
s=Student('Michael')
print(s)
|
[
"879106261@qq.com"
] |
879106261@qq.com
|
6dc60740658b89808ba69fe241f7f9dd670cacca
|
2acf2f926441eadb1c32879bfa6f0e800055b9d9
|
/oblig6/gaussian_white_noise.py
|
475e30a109b61bf14bcbcf5cec905bcee957d390
|
[] |
no_license
|
Linueks/fys2130
|
26b400bbf878ef56d26fdc618f85b62a44515eff
|
761bef68476cb210266758ea00e17020e417a174
|
refs/heads/main
| 2023-02-24T10:05:33.951748
| 2021-01-23T13:23:34
| 2021-01-23T13:23:34
| 332,214,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
'''
Program that generates Gaussian white noise.
Each frequency component is generated randomly b/w 0 and
the a value assigne by Normal distribution.
Each phase component is generated randombly b/w 0 and 2pi.
The finished signal is Fourier transformed.
Sebastian G. Winther-Larsen (2017)
'''
import numpy as np
from matplotlib import pyplot as plt
def gaussian_white_noise(f_sample, N, f_center, f_width):
# Parameters and necessary arrays
f_sigma = f_width / 2
y = np.zeros(N, 'complex')
T = N / f_sample
t = np.linspace(0, T*(N-1)/N, N)
f = np.linspace(0, f_sample*(N-1)/N, N)
n_center = np.floor(N*f_center / (f_sample*(N-1) / N))
n_sigma = np.floor(N*f_sigma / (f_sample*(N-1) / N))
# Computations
gauss = np.exp(-(f - f_center) * (f - f_center) / (f_sigma*f_sigma))
amplitude = np.random.rand(N) * gauss
phase = np.random.rand(N) * 2*np.pi
y.real = amplitude * np.cos(phase)
y.imag = amplitude * np.sin(phase)
# Must mirror lower half to get correct result
n_half = np.round(N/2)
for i in range(int(n_half - 1)):
#if (i == 0 or i == (n_half - 2)):
#print("First index: ", N - i - 1)
#print("Second index: ", i + 1)
y[int(N - i - 1)] = np.conjugate(y[int(i + 1)])
y[int(n_half)] = y[int(n_half)].real
y[0] = 0.0
q = np.real(np.fft.ifft(y)*200)
return y, q
if __name__ == '__main__':
y, q = gaussian_white_noise(44100, 2**16, 5000, 500)
#plt.plot(y.real)
plt.plot(q)
plt.show()
|
[
"noreply@github.com"
] |
Linueks.noreply@github.com
|
9faa3bfbaffa598e35f9d61fdffcf2ea69476498
|
c0d9e2b2135956031bbad6abef22be5a205696db
|
/src/Inc/Import.py
|
af20358268dbff2b4c09395c2ae747c47d55b783
|
[] |
no_license
|
VladVons/py-relay
|
22758805b796c23546c97f8f42c664a2ff1b4fba
|
c57c205c49b7bbb6a91c98ec326b02a36c3daaef
|
refs/heads/master
| 2023-02-05T22:15:40.884753
| 2019-03-08T06:48:07
| 2019-03-08T06:48:07
| 127,041,974
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
'''
Copyright: (c) 2017, Vladimir Vons
Author: Vladimir Vons <VladVons@gmail.com>
Created: 2017.10.20
License: GNU, see LICENSE for more details
Description:
Import = TDynImport()
Import.ParseDir('Plugin/Devices')
Import.GetInstance(ClassName)
TClass = Import.GetInstance(ClassName)
Result = TClass(None)
'''
import os
import sys
import re
#
from Inc.Log import Log
# nuitka --module Manager.py
# from XXX import *
class TDynImport():
def __init__(self):
self.Clear()
def Clear(self):
self.Classes = {}
def AddClass(self, aClassName, aModule, aPath = './'):
Data = self.Classes.get(aClassName)
if (Data):
Msg = Log.Print(1, 'e', self.__class__.__name__, 'AddClass()', 'Class %s already exists in' % aClassName, Data)
raise Exception(Msg)
self.Classes[aClassName] = {'Module':aModule, 'Path': aPath}
if (aPath not in sys.path):
sys.path.insert(0, aPath)
def ParseDir(self, aDir = '.'):
for Root, Dirs, Files in os.walk(aDir):
for File in Files:
FilePath = Root + '/' + File
FileName, FileExt = os.path.splitext(File)
if (FileExt == '.py'):
hFile = open(FilePath, "r")
Lines = hFile.readlines()
hFile.close()
for Line in Lines:
if ('class ' in Line):
Data = re.search('(class\s+)(.+)\(', Line)
if (Data):
ClassName = Data.group(2)
self.AddClass(ClassName, FileName, Root)
def GetAttr(self, aClassName, aModuleName):
Module = __import__(aModuleName)
Result = getattr(Module, aClassName)
return Result
def GetInstance(self, aClassName, aModuleName = ''):
if (aModuleName):
Result = self.FromModule(aClassName, aModuleName)
elif (aClassName in self.Classes):
Module = self.Classes[aClassName]['Module']
Result = self.GetAttr(aClassName, Module)
else:
Result = globals()[aClassName]
return Result
|
[
"vladvons@gmail.com"
] |
vladvons@gmail.com
|
b3b0f9e662f65e09d8aae750a859c059a09e7cb8
|
0ea22107790ef695ad80ddba9d6a6c1ae95e7c6e
|
/kalibr-cde/cde-root/opt/ros/hydro/lib/python2.7/dist-packages/geometry_msgs/msg/_PointStamped.py
|
b28032463564931641d9984243a9a26ef4912b22
|
[] |
no_license
|
wangrui996/camera_imu_calibration
|
0f9bc0cf737641b352fa71ae9710c735da69a732
|
4296aeac1001f21502355d8ca98d4ae214e30ffc
|
refs/heads/main
| 2023-06-19T15:19:42.618423
| 2021-07-15T06:52:20
| 2021-07-15T06:52:20
| 386,193,412
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,270
|
py
|
"""autogenerated by genpy from geometry_msgs/PointStamped.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class PointStamped(genpy.Message):
_md5sum = "c63aecb41bfdfd6b7e1fac37c7cbe7bf"
_type = "geometry_msgs/PointStamped"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# This represents a Point with reference coordinate frame and timestamp
Header header
Point point
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
"""
__slots__ = ['header','point']
_slot_types = ['std_msgs/Header','geometry_msgs/Point']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,point
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointStamped, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.point is None:
self.point = geometry_msgs.msg.Point()
else:
self.header = std_msgs.msg.Header()
self.point = geometry_msgs.msg.Point()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3d.pack(_x.point.x, _x.point.y, _x.point.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.point is None:
self.point = geometry_msgs.msg.Point()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 24
(_x.point.x, _x.point.y, _x.point.z,) = _struct_3d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3d.pack(_x.point.x, _x.point.y, _x.point.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.point is None:
self.point = geometry_msgs.msg.Point()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 24
(_x.point.x, _x.point.y, _x.point.z,) = _struct_3d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_3d = struct.Struct("<3d")
|
[
"wangrui957@163.com"
] |
wangrui957@163.com
|
5714dbdd0d286db2b3c543a6e67af447e969ff56
|
7780f59da5cac72501b4f9b0bb0b96e8f3ded6e1
|
/tests/test_cinema.py
|
446816a08120ffe33fed7e2b9150514842fff8fb
|
[] |
no_license
|
kimbugp/movie-bookings
|
7286593b10897c27d936650e538e84e2cbd12791
|
63121b88c6e022fcb849ff7ab8da7be9844f391a
|
refs/heads/master
| 2022-12-10T01:41:01.648098
| 2020-01-21T08:36:17
| 2020-01-21T08:36:17
| 199,335,252
| 0
| 2
| null | 2022-12-08T03:17:06
| 2019-07-28T20:43:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,209
|
py
|
import json
from .basetest import BaseTestCase
class TestCinema(BaseTestCase):
def test_create_cinema_fails_with_no_authentication(self, test_client):
data = json.dumps({})
response = test_client.post(
"/api/v1/cinema",
data=data,
headers={"Content-Type": "application/json"},
)
self.assertEqual(response.status_code, 401)
def test_create_cinema_with_no_permissions_fails(self, test_client):
response = test_client.post("/api/v1/cinema")
self.assertEqual(response.status_code, 401)
def test_get_cinema(self, test_client, auth_header):
response = test_client.get("/api/v1/cinema", headers=auth_header)
self.assertEqual(response.status_code, 200)
def test_create_show_time_fails_with_cinema_hall_already_filled(
self, test_client, auth_header, cinema
):
_, data = cinema
response = test_client.post(
"/api/v1/cinema", data=data, headers=auth_header
)
assert response.status_code == 400
def create_cinema_succeeds(self, cinema):
response, data = cinema
self.assertEqual(response.status_code, 201)
self.assertEqual(
response.json,
{
"seats": [
{"name": "A", "number": [1, 2]},
{"name": "B", "number": [1, 2]},
],
"id": 4,
"name": "Simon Peter",
"description": "sdfgd",
},
)
class TestUpdateCinema(BaseTestCase):
def test_update_cinema_by_id_succeeds(
self, test_client, cinema, auth_header
):
data = json.dumps(
{
"seats": [
{"name": "C", "number": [1, 2]},
{"name": "D", "number": [1, 2]},
]
}
)
response = test_client.put(
"/api/v1/cinema/1", data=data, headers=auth_header
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json,
{
"cinema": {
"id": 1,
"name": "Cinema1",
"description": "SOme data",
"seats": [
{
"id": 12,
"name": "C",
"number": "1",
"cinema_hall": 1,
},
{
"id": 13,
"name": "C",
"number": "2",
"cinema_hall": 1,
},
{
"id": 14,
"name": "D",
"number": "1",
"cinema_hall": 1,
},
{
"id": 15,
"name": "D",
"number": "2",
"cinema_hall": 1,
},
],
}
},
)
def test_update_cinema_by_id_fails(self, test_client, cinema, auth_header):
_, data = cinema
response = test_client.put(
"/api/v1/cinema/100", data=data, headers=auth_header
)
self.assertEqual(response.status_code, 404)
def test_update_cinema_by_id_fails_wth_same_seats(
self, test_client, auth_header
):
data = json.dumps(
{
"seats": [
{"name": "A", "number": [1, 2]},
{"name": "B", "number": [1, 2]},
]
}
)
response = test_client.put(
"/api/v1/cinema/4", data=data, headers=auth_header
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json,
{
"error": " Key (name, cinema_hall, number)=(A, 4, 1) already exists.\n",
"message": "",
},
)
|
[
"kimbsimon2@gmail.com"
] |
kimbsimon2@gmail.com
|
d3f3032c4627dd131772f1abe4d43e3da33a3083
|
cfd4cc45dc558eba3c45797dbb5335e51a90b581
|
/gopython3/core/tests/test_unit.py
|
02e68291594bb6c1efdd15444a5a57d4ed507f59
|
[
"MIT"
] |
permissive
|
futurecolors/gopython3
|
8724ae4e458e156b82161d6b4083ac7c5a4f3eeb
|
cfff51f86edb962bba2a51c8f7691454af54809d
|
refs/heads/dev
| 2020-05-18T21:50:11.638433
| 2014-01-21T18:28:01
| 2014-01-21T18:28:01
| 12,966,604
| 1
| 0
| null | 2014-05-17T08:38:23
| 2013-09-20T05:52:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,991
|
py
|
# coding: utf-8
from unittest.mock import patch
from django.utils import timezone
import warnings
from collections import namedtuple
from django.test import TestCase
from ..factories import SpecFactory, JobFactory
from ..models import Job, Package, Spec
from ..tasks import query_pypi
def fake_distributions(*distributions):
Distribution = namedtuple('Distribution', ['name', 'version'])
result = []
for dist in distributions:
name, version = dist.split('==')
result.append(Distribution(name, version))
return result
def fake_requirement(name, specs):
Requirement = namedtuple('Requirement', ['name', 'specs', 'extras'])
return Requirement(name, specs, extras=[])
class JobTest(TestCase):
def setUp(self):
self.reqs_txt = """
-r some_missing_file
django>=1.4,<1.5
Django-Geoip==0.3
# tests below
coverage
coveralls>0.2
# TODO: VCS
"""
def test_can_be_created_from_requirements_txt(self):
with warnings.catch_warnings():
# We're ignoring -r not being parsed
# "Recursive requirements not supported. Skipping."
warnings.simplefilter("ignore", category=UserWarning)
job = Job.objects.create_from_requirements(self.reqs_txt)
assert job.requirements == self.reqs_txt
assert list(map(str, job.lines.all().order_by('pk'))) == [
'django>=1.4,<1.5',
'Django-Geoip==0.3',
'coverage',
'coveralls>0.2']
class JobStatusTest(TestCase):
def test_completed_if_no_specs_no_lines(self):
job = JobFactory()
assert job.status == 'success', 'No specs, no lines'
def test_pending_if_unparsed_lines(self):
job = JobFactory(lines=['spanish=42,inquisition==7'])
assert job.status == 'pending', 'It has 2 unparsed lines'
def test_pending_if_pending_specs(self):
job = JobFactory(specs=['foo=1,bar==2'])
assert job.status == 'running', 'It has 2 unfinished specs, but lines are parsed'
def test_running_if_running_and_finished_specs(self):
job = JobFactory(specs=['foo=1,bar==2'])
spec = job.specs.first()
spec.status = 'running'
spec.save()
job = Job.objects.get(pk=job.pk)
assert job.status == 'running', 'Job has started, but has not finished yet'
def test_running_if_one_spec_pending(self):
job = JobFactory(specs=['foo=1,bar==2'])
job.specs.all().update(status='success')
job = Job.objects.get(pk=job.pk)
assert job.status == 'success', 'One spec pending'
def test_running_if_finished_and_pending_specs(self):
job = JobFactory(specs=['steve==1', 'jobs==2'])
spec = job.specs.first()
spec.status = 'finished'
spec.save()
assert job.status == 'running', 'One spec has finished, but 1 line is not parsed yet'
def test_completed_if_specs_completed(self):
job = JobFactory(specs=['foo=1,bar==2'])
job.specs.all().update(status='success')
job = Job.objects.get(pk=job.pk)
assert job.status == 'success', 'All specs have finished'
class JobSpecTest(TestCase):
def test_process_requirement(self):
job = JobFactory(lines=['Django==1.5.4'])
package, package_created, spec, spec_created = job.lines.all()[0].set_distribution(*fake_distributions('Django==1.5.4'))
assert list(map(str, Package.objects.all())) == ['Django']
assert list(map(str, job.specs.all())) == ['Django==1.5.4']
assert package_created
assert spec_created
def test_does_not_create_duplicate_specs(self):
spec = SpecFactory(version='0.2.19', package__name='lettuce')
job = JobFactory(lines=['lettuce==0.2.19'])
same_package, package_created, same_spec, spec_created = job.lines.all()[0].set_distribution(*fake_distributions('lettuce==0.2.19'))
assert not package_created
assert not spec_created
assert Spec.objects.count() == 1
assert Package.objects.count() == 1
assert job.specs.all().first().version == spec.version
assert job.specs.all().first().package.name == spec.package.name
assert spec.pk == same_spec.pk
assert same_package.pk == same_spec.package.pk
class PypiTaskTest(TestCase):
@patch('api.PyPI.get_info')
def test_updates_spec(self, get_info_mock):
last_release_date = timezone.now()
py3_versions = ['3', '3.2', '3.3']
get_info_mock.return_value = {
'last_release_date': last_release_date,
'py3_versions': py3_versions,
}
spec = SpecFactory(version='0.2.19', package__name='lettuce')
assert query_pypi(spec.pk) == get_info_mock.return_value
spec = Spec.objects.get(pk=spec.pk)
assert spec.release_date == last_release_date
assert spec.python_versions == py3_versions
|
[
"baryshev@gmail.com"
] |
baryshev@gmail.com
|
675b01dcde98168800671ad211778faa2ce9b622
|
5b9ac627bf39b01917f75d18d8ca83211a04c718
|
/cahoots/confidence/normalizers/character.py
|
eb3454f1cbed230ef870e1d916a85b2295aff56f
|
[
"MIT"
] |
permissive
|
SerenitySoftware/cahoots
|
dbfa109e4c65d20ef01c2d97d3087e7a8aede838
|
866336c51436343ff5e56f83f89dddc82a5693a3
|
refs/heads/master
| 2021-05-28T20:00:32.827485
| 2015-08-23T00:31:11
| 2015-08-23T00:31:11
| 21,884,335
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
"""
The MIT License (MIT)
Copyright (c) Serenity Software, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from cahoots.confidence.normalizers.base import BaseNormalizer
class CharacterWithoutBoolean(BaseNormalizer):
"""If we get a character and not a boolean, we boost char confidence"""
@staticmethod
def test(types, _):
"""
We want to normalize if there is a character and not a boolean
:param types: list of result types
:type types: list
:param all_types: list of result types + subtypes
:type all_types: list
:return: if this normalizer should normalize this result set
:rtype: bool
"""
return 'Character' in types and 'Boolean' not in types
@staticmethod
def normalize(results):
"""
setting char confidence to 100% if there's no boolean result
:param results: list of results we want to normalize
:type results: list
:return: the normalized results
:rtype: list
"""
for result in [r for r in results if r.type == 'Character']:
result.confidence = 100
return results
|
[
"ryan.vennell@gmail.com"
] |
ryan.vennell@gmail.com
|
fcc28c4fb295fc56e743a94f929317d9aac54d4f
|
085ce75a507df6e755cabb7a65c4a2a8c98762ba
|
/dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_yaml/__init__/SequenceNode.py
|
2cc52374a86f60654ef8c8168c3539260e1b19ab
|
[] |
no_license
|
Arhzi/habr-docker-article
|
d44302db1fe157d81fe0818e762e82218f50e31f
|
6fb094860b612e307beadaeb22981aa0ee64e964
|
refs/heads/master
| 2021-01-23T20:41:47.398025
| 2015-12-10T08:56:33
| 2015-12-10T08:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
# encoding: utf-8
# module _yaml
# from /usr/local/lib/python2.7/site-packages/_yaml.so
# by generator 1.137
# no doc
# imports
import yaml as yaml # /usr/local/lib/python2.7/site-packages/yaml/__init__.pyc
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
import yaml.error as __yaml_error
import yaml.events as __yaml_events
import yaml.nodes as __yaml_nodes
import yaml.tokens as __yaml_tokens
class SequenceNode(__yaml_nodes.CollectionNode):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
id = 'sequence'
|
[
"sirnikolasd@yandex.ru"
] |
sirnikolasd@yandex.ru
|
dbed30c53f73497797ddbe0811d3e196d96974ef
|
288865b3b519222370b00cda04ffab96f46b046d
|
/dd/deal/urls.py
|
96efada9c080d2357cb3a4f0720c588ba3262eac
|
[] |
no_license
|
bcattle/dolores-deals
|
9c49daefb83f35eff65262dd14d5756a06eea66f
|
d45914c9afbeca9dbd655eee5b8ba021b2e07760
|
refs/heads/master
| 2020-12-24T16:59:28.981973
| 2011-05-08T08:29:47
| 2011-05-08T08:29:47
| 1,445,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('deal.views',
# regex, view fxn, args, label for get_absolute_url()
(r'^(?P<city_slug>[-\w]+)/(?P<neighborhood_slug>[-\w]+)/(?P<deal_slug>[-\w]+)/$',
'show_deal', { 'template_name': 'deal.html' }, 'deal_page'),
(r'^$', 'default_deal'),
)
|
[
"bryan.cattle@gmail.com"
] |
bryan.cattle@gmail.com
|
41eeb3b1e3a7cf61c0e2b16ae63ce4c9826894f2
|
d785e993ed65049c82607a1482b45bddb2a03dda
|
/loose/loose_SingleMuon_G_cfg.py
|
46a90679fcd22958110855f1922ec226777629c0
|
[] |
no_license
|
PKUHEPEWK/ssww
|
eec02ad7650014646e1bcb0e8787cf1514aaceca
|
a507a289935b51b8abf819b1b4b05476a05720dc
|
refs/heads/master
| 2020-05-14T04:15:35.474981
| 2019-06-28T23:48:15
| 2019-06-28T23:48:15
| 181,696,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'loose_SingleMuon_G'
config.General.transferLogs= True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script.sh'
config.JobType.inputFiles = ['crab_loose_data_script.py','ssww_keep_and_drop.txt','ssww_output_branch_selection.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/SingleMuon/Run2016G-Nano14Dec2018-v1/NANOAOD'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 60
config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'
config.Data.outLFNDirBase = '/store/user/%s/nano2016' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'loose_SingleMuon_G'
config.section_("Site")
config.Site.storageSite = "T2_CH_CERNBOX"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
5f8ee58ee1001869ac9653bf17b97fc00ea6d69b
|
c9952dcac5658940508ddc139344a7243a591c87
|
/tests/lab09/test_ch09_t01_befor_we_begin.py
|
deb2a8cafff91ac4cfaf68983409b92dd410c622
|
[] |
no_license
|
wongcyrus/ite3101_introduction_to_programming
|
5da1c15212528423b3df91997327fe148abef4de
|
7cd76d0861d5355db5a6e2e171735bee2e78f829
|
refs/heads/master
| 2023-08-31T17:27:06.193049
| 2023-08-21T08:30:26
| 2023-08-21T08:30:26
| 136,574,036
| 3
| 2
| null | 2023-08-21T08:30:28
| 2018-06-08T06:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 384
|
py
|
import unittest
from tests.unit_test_helper.console_test_helper import *
class TestOutput(unittest.TestCase):
def test(self):
temp_globals, temp_locals, content, output = execfile("lab09/ch09_t01_befor_we_begin.py")
expected = """Adam
Alex
Mariah
Martine
Columbus
"""
self.assertEqual(expected, output)
if __name__ == '__main__':
unittest.main()
|
[
"cywong@vtc.edu.hk"
] |
cywong@vtc.edu.hk
|
9bd90299eaa46d54955252ca6e3183a2d1ae3d21
|
f72c689bd0d756b4817cc03cb434a228343c8936
|
/test/functional/rpc_getchaintips.py
|
c71c2d08d2237c1b28f7c8b61f61971edd4ffe9d
|
[
"MIT"
] |
permissive
|
CircuitProject/Circuit-Core
|
7f68a8b4cb180a715cb24e247b899d8d8dc29e95
|
831dc33d57050ea2955983b2e8f1fc088a819e97
|
refs/heads/main
| 2023-04-09T00:08:37.954538
| 2021-04-12T19:09:42
| 2021-04-12T19:09:42
| 357,308,816
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import CircuitTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (CircuitTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
[
"development@SpectreSecurity.io"
] |
development@SpectreSecurity.io
|
5f6cb6c94ca4f16d8f6f26845918c9f4b4708db8
|
54d2887e3c910f68366bd0aab3c692d54245e22a
|
/abc/abc_042_125/abc067/c.py
|
b4ed060ee9fec1215b76ac1a828de13f9a096cb0
|
[] |
no_license
|
Kevinrobot34/atcoder
|
7aec367fd2c6b589e9d583dae7b3c7520ce9fa12
|
482ea508f098f81e4f19522fe518dd22c781aca9
|
refs/heads/master
| 2022-07-10T23:44:45.290022
| 2022-06-29T11:30:26
| 2022-06-29T11:30:26
| 158,081,477
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
n = int(input())
a = list(map(int, input().split()))
s = sum(a)
s1 = 0
ans = 10**15
for i in range(n-1):
s1 += a[i]
ans = min(ans, abs(s1 - (s-s1)))
print(ans)
|
[
"kevinrobot34@yahoo.co.jp"
] |
kevinrobot34@yahoo.co.jp
|
59cbd00de94ea0b4a3b7608732f8325b879b67fe
|
1ddbd4f7194fb52ea8344e8f80dcbd87e8d41cfc
|
/restconf/pagination.py
|
67556c262f5a6216f1bf85d067790445fbcf1af1
|
[] |
no_license
|
TruthTheDeveloper/Huggie-backend
|
cc9444571a5e147e789c2dcfaae51a694d37d917
|
8a686beb2635557f4235047cde3eccd79d3ea3b7
|
refs/heads/master
| 2023-08-20T06:12:31.465811
| 2021-10-31T18:21:15
| 2021-10-31T18:21:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from rest_framework import pagination
class CFEAPIPagination(pagination.PageNumberPagination):
page_size = 10
# default_limit = 6
# max_limit = 20
# limit_query_param = 'lim'
|
[
"henrysempire111gmail.com"
] |
henrysempire111gmail.com
|
8ceb95bec95f8eb8abb7c04384ce7ca03d720ffd
|
48faee5b845e43e6c102cb027f43c8b886ecaa5e
|
/utils/ansible_drive/test/ansibleApi_pbtest.py
|
106e2ab5ed5e1582930d2804371155ee451f57a4
|
[] |
no_license
|
hornLK/LonedayAdmin
|
66c0a8b978967a0144a216f621c872a6d2197229
|
36ba3fe763788423801ad5ab14462624114da804
|
refs/heads/master
| 2022-12-26T06:57:47.675915
| 2018-05-15T13:08:34
| 2018-05-15T13:08:34
| 131,375,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
import json
from ansible import constants as C
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.utils.ssh_functions import check_for_controlpersist
from ansible.plugins.callback import CallbackBase
def inventory_list(url,api_key):
SECRET_API_KEY = api_key
time_span = time.time()
secret_data = "%s|%f" % (SECRET_API_KEY,time_span)
hash_obj = hashlib.md5(secret_data.encode("utf-8"))
encryption = hash_obj.hexdigest()
send_data = encryption+"|"+str(time_span)
headers = {'content-type':'application/json',"X-Http-Secretkey":send_data}
res = requests.get(url,headers=headers)
dict_json = [host.get("hostIP") for host in json.loads(res.text)]
re_str = ",".join(dict_json)
return re_str
loader = DataLoader()
api_key = "0a37511d-be7d-4fdd-ab17-28b6c659d763"
url = "http://192.168.220.3:8890/apiv1/auths/host/list/"
Options = namedtuple('Options', ['connection', 'module_path', 'forks',
'become', 'become_method', 'become_user',
'check', 'diff'])
options = Options(connection='192.168.220.3',
module_path=['/path/to/mymodules'], forks=100, become=None,
become_method=None, become_user=None, check=False,
diff=False)
passwords = dict(vault_pass='123123')
results_callback = ResultCallback()
class ResultCallback():
def v2_playbook_on_start(self,playbook):
print(playbook)
print(dir(playbook))
class LkPlaybookExecutor(PlaybookExecutor):
def __init__(self,playbooks,inventory,variable_manager,loader,options,passwords,stdout_callback=None):
self._stdout_callback=stdout_callback
super(LkPlaybookExecutor,self).__init__(playbooks, inventory,
variable_manager, loader,
options, passwords)
|
[
"bjlkq546449541@gmail.com"
] |
bjlkq546449541@gmail.com
|
08442036c4b07c98ed97b71bbee402abbcfc2004
|
be9a56d49a308b5d70c57989d11c7e6207d9d349
|
/pynext/stats.py
|
45a94ac1e650e4253f9a9df750d0fddbc56f5793
|
[] |
no_license
|
jjgomezcadenas/pynextsw
|
ab7e9823f8eb12424084c849c7c099ac6a64351b
|
84db6ce3eb2cac3567dce9950a35fbbe4027f0fd
|
refs/heads/master
| 2020-12-14T14:38:48.940016
| 2020-02-10T17:39:51
| 2020-02-10T17:39:51
| 234,772,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,040
|
py
|
import numpy as np
from typing import Tuple, List
from numpy import sqrt
NN = np.nan
from . pynext_types import Number, Array, Str, Range
def in_range(data, minval=-np.inf, maxval=np.inf):
"""
Find values in range [minval, maxval).
Parameters
---------
data : np.ndarray
Data set of arbitrary dimension.
minval : int or float, optional
Range minimum. Defaults to -inf.
maxval : int or float, optional
Range maximum. Defaults to +inf.
Returns
-------
selection : np.ndarray
Boolean array with the same dimension as the input. Contains True
for those values of data in the input range and False for the others.
"""
return (minval <= data) & (data < maxval)
def relative_error_ratio(a : float, sigma_a: float, b :float, sigma_b : float) ->float:
return sqrt((sigma_a / a)**2 + (sigma_b / b)**2)
def mean_and_std(x : np.array, range_ : Tuple[Number, Number])->Tuple[Number, Number]:
"""Computes mean and std for an array within a range: takes into account nans"""
mu = NN
std = NN
if np.count_nonzero(np.isnan(x)) == len(x): # all elements are nan
mu = NN
std = NN
elif np.count_nonzero(np.isnan(x)) > 0:
mu = np.nanmean(x)
std = np.nanstd(x)
else:
x = np.array(x)
if len(x) > 0:
y = x[in_range(x, *range_)]
if len(y) == 0:
print(f'warning, empty slice of x = {x} in range = {range_}')
print(f'returning mean and std of x = {x}')
y = x
mu = np.mean(y)
std = np.std(y)
return mu, std
def gaussian_experiment(nevt : Number = 1e+3,
mean : float = 100,
std : float = 10)->np.array:
Nevt = int(nevt)
e = np.random.normal(mean, std, Nevt)
return e
def gaussian_experiments(mexperiments : Number = 1000,
nsample : Number = 1000,
mean : float = 1e+4,
std : float = 100)->List[np.array]:
return [gaussian_experiment(nsample, mean, std) for i in range(mexperiments)]
def gaussian_experiments_variable_mean_and_std(mexperiments : Number = 1000,
nsample : Number = 100,
mean_range : Range =(100, 1000),
std_range : Range =(1, 50))->List[np.array]:
Nevt = int(mexperiments)
sample = int(nsample)
stds = np.random.uniform(low=std_range[0], high=std_range[1], size=sample)
means = np.random.uniform(low=mean_range[0], high=mean_range[1], size=sample)
exps = [gaussian_experiment(Nevt, mean, std) for mean in means for std in stds]
return means, stds, exps
def smear_e(e : np.array, std : float)->np.array:
return np.array([np.random.normal(x, std) for x in e])
|
[
"jjgomezcadenas@gmail.com"
] |
jjgomezcadenas@gmail.com
|
0ad83e8f3d57405b7257baea33455b48fb6456a6
|
efb9647a0c0f8f80e5c25abacbb097e9d74dc042
|
/hooks/push-git-commit-ectomy-gh-pages
|
0a8f6f141f5c5c8113aa28c73a0de44df1effa49
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] |
permissive
|
charlesreid1/b-captain-hook
|
3bf38f24b9cd017e36f90e3481dd2780e553c8bc
|
361f59c21a733a484f48e9bd60bce2d94dbf7b1b
|
refs/heads/master
| 2020-03-17T06:00:49.865927
| 2019-07-13T06:06:30
| 2019-07-13T06:06:30
| 133,337,907
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
#!/usr/bin/env python
from datetime import datetime
repo = "git-commit-ectomy"
org = "charlesreid1"
branch = "gh-pages"
action = 'push'
name = '%s'%(repo)
git_url = 'https://git.charlesreid1.com/%s/%s.git'%(org,repo)
logfile = '/tmp/{action}-{name}-{branch}.log'.format(action=action,
name=name,
branch=branch)
with open(logfile,'w') as f:
f.write("\n")
f.write("-"*40)
f.write(datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
f.write("\n")
import subprocess
import os
# ------------------
# pages
#
# for a hypothetical repo "project":
#
# .git dir: /www/pages.charlesreid1.com/git.project
# htdocs dir: /www/pages.charlesreid1.com/htdocs/project
root = '/www'
pages = 'pages.charlesreid1.com'
basedir = os.path.join(root,pages)
workdir = os.path.join(basedir,"htdocs",name)
gitdir = os.path.join(basedir,"git.%s"%(name))
if( os.path.isdir( gitdir )
and os.path.isdir( os.path.join(basedir,"htdocs")) ):
# pull
pullcmd = ["git","--git-dir=%s"%(gitdir),"--work-tree=%s"%(workdir),"pull","origin","gh-pages"]
f.write("About to run the command:\n")
f.write(" $ " + " ".join(pullcmd))
f.write("\n")
#subprocess.call(pullcmd)
p = subprocess.Popen(pullcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
f.write(p.stdout.readline())
f.write(p.stderr.readline())
else:
# clone
mkdircmd = ["mkdir","-p",basedir]
clonecmd = ["git","clone","--separate-git-dir=%s"%(gitdir),"-b","gh-pages",git_url,workdir]
f.write("About to run the command:\n")
f.write(" $ " + " ".join(clonecmd))
f.write("\n")
p = subprocess.Popen(clonecmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
f.write(p.stdout.readline())
f.write(p.stderr.readline())
|
[
"charlesreid1@gmail.com"
] |
charlesreid1@gmail.com
|
|
32c3a1099738887f52f94cc6ce3f142833c8b14a
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.5_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=68/params.py
|
3d804381e49cd62d377929a2bbcf69c13ca14dea
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '2.537238',
'max_util': '2.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 68,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
2661e70feec5cebeefae3f67c6b64bbe35929ef3
|
f700710ad4f7b776a2715c3bded94f6e763703b3
|
/BucketConfig.py
|
426a618961853f48fcbef5b45de3590a09d85638
|
[
"Apache-2.0"
] |
permissive
|
hasithadkr7/udp_150
|
445496d7d1eb316dd787a1fadafc70627cad9abb
|
b88e27cd254e12c97a4120e311d7269b1f7cf724
|
refs/heads/master
| 2020-03-09T11:05:13.655372
| 2018-04-26T06:51:25
| 2018-04-26T06:51:25
| 128,752,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
#!/bin/python
#Retrieving rain cell and men-ref files from google buckets.
# Common.
FILE_GEN_TIME = '18:00'
BUCKET_NAME = 'curwsl_nfs_1'
WRF_NODE = 'wrf0'
INITIAL_PATH_PREFIX = 'results/'
KEY_FILE_PATH = '/hec-hms/uwcc-admin.json'
# For Rain cell
RAIN_CELL_DIR = '/hec-hms/Raincell/'
WRF_RAINCELL_FILE_ZIP = 'RAINCELL_150m.zip'
WRF_RAIN_CELL_FILE = 'RAINCELL_150m.DAT'
RAIN_CELL_FILE = 'RAINCELL.DAT'
DEFAULT_DATE_SHIFT = 1
# For Mean-Rf
MEAN_REF_DIR = '/hec-hms/Meanref/'
MEAN_REF_FILE = 'kub_mean_rf.txt'
# For RF data
RF_DIR = '/hec-hms/Rainfall/'
RF_FILE = 'Norwood_stations_rf.txt'
# For RF data
SUB_REF_DIR = '/hec-hms/Subref/'
SUB_REF_FILE = 'klb_mean_rf.txt'
DEFAULT_DATE_SHIFT = 1
RF_FILE_SUFFIX = 'stations_rf.txt'
|
[
"hasithadkr7@gmail.com"
] |
hasithadkr7@gmail.com
|
2d2d33b1f96726237fe2033b2cfd6180cb799052
|
74768f285874ee5d7606cde6efc21e291782996b
|
/web/dispatch/resource/dispatch.py
|
7b2cc6650ae1e75a541e322212cc6912ee919c08
|
[
"MIT"
] |
permissive
|
marrow/web.dispatch.resource
|
7354ec6b124b7c17744a810f5823c7856a2b6992
|
5f4e0a8ddbedba2390d9aaa0b8bf26292e8605f9
|
refs/heads/master
| 2023-01-24T11:26:03.140864
| 2016-09-26T15:02:39
| 2016-09-26T15:02:39
| 32,566,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
# encoding: utf-8
import warnings
if __debug__:
from collections import deque
from functools import partial
from inspect import isclass, ismethod
from .exc import InvalidMethod
log = __import__('logging').getLogger(__name__)
def invalid_method(*args, **kw):
raise InvalidMethod()
class ResourceDispatch(object):
__slots__ = ()
def __repr__(self):
return "ResourceDispatch(0x{id})".format(id=id(self), self=self)
def __call__(self, context, obj, path):
verb = getattr(context, 'environ', context)['REQUEST_METHOD'].lower()
if __debug__:
if not isinstance(path, deque): # pragma: no cover
warnings.warn(
"Your code is not providing the path as a deque; this will be cast in development but"
"will explode gloriously if run in a production environment.",
RuntimeWarning, stacklevel=1
)
if isinstance(path, str):
path = deque(path.split('/')[1 if not path or path.startswith('/') else 0:])
else:
path = deque(path)
log.debug("Preparing resource dispatch. " + repr(obj), extra=dict(
dispatcher = repr(self),
context = repr(context),
obj = repr(obj),
path = list(path),
verb = verb,
))
if isclass(obj):
obj = obj(context, None, None)
yield None, obj, False # Announce class instantiation.
context.resource = obj
consumed = None
Resource = getattr(obj, '__resource__', None)
safe = {i for i in dir(obj) if i[0] != '_'} | {'options'}
if 'get' in safe: safe.add('head')
if 'collection' not in context:
context.collection = None
if 'response' in context:
context.response.allow = {i.upper() for i in safe if ismethod(getattr(obj, i, None)) or i in {'head', 'options'}}
if path and path[0] in safe:
consumed = attr = path.popleft()
attr = getattr(obj, attr, None)
if not attr and consumed in {'head', 'options'}:
attr = partial(getattr(self, consumed), obj)
if isclass(attr):
yield consumed, attr(context, obj, None), False
return
yield consumed, attr, True
return
if path and Resource:
context.collection = obj
try:
obj = Resource(context, obj, obj[path[0]])
except KeyError:
pass
else:
yield path.popleft(), obj, False
return
if verb and verb in safe:
obj = getattr(obj, verb, None)
if not obj and verb in {'head', 'options'}:
obj = partial(getattr(self, verb), obj)
yield None, obj, True
return
yield None, invalid_method, True
def head(self, obj, *args, **kw):
"""Allow the get method to set headers, but return no content.
This performs an internal GET and strips the body from the response.
"""
obj.get(*args, **kw)
return
def options(self, obj, *args, **kw):
"""The allowed methods are present in the returned headers."""
return None
|
[
"alice@gothcandy.com"
] |
alice@gothcandy.com
|
4e7c5cb3a9bddaad548a8537a1f053a9bb28304a
|
3416464630bc3322dd677001811de1a6884c7dd0
|
/dynamic_program/q121_bestTimeToBuyAndSellStock/dp_solution.py
|
e9bd3a005e1a0987481b0a013628cfb1511719b8
|
[] |
no_license
|
ttomchy/LeetCodeInAction
|
f10403189faa9fb21e6a952972d291dc04a01ff8
|
14a56b5eca8d292c823a028b196fe0c780a57e10
|
refs/heads/master
| 2023-03-29T22:10:04.324056
| 2021-03-25T13:37:01
| 2021-03-25T13:37:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
FileName: dp_solution.py
Description:
Author: Barry Chow
Date: 2020/10/19 10:52 PM
Version: 0.1
"""
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0:
return 0
max_profit = 0
min_price = prices[0]
for i in range(1, len(prices)):
if prices[i] > min_price:
if (prices[i] - min_price) > max_profit:
max_profit = prices[i] - min_price
else:
min_price = prices[i]
return max_profit
|
[
"zhouenguo@163.com"
] |
zhouenguo@163.com
|
5052bed389896a4f70a830f17f2280b6968dce56
|
38238f576b302835a285954711c62c69e65009c0
|
/about_page/migrations/0007_auto_20201124_1313.py
|
02fac6e66687346980f0c787624df864fb9ac062
|
[] |
no_license
|
iamdarshan7/New
|
ca107680c247fa94340bfc3937edc6dff7b8060e
|
9075f50438e3f9911dd0b27d7c5e2806f25f4d3c
|
refs/heads/master
| 2023-01-21T00:01:31.489631
| 2020-11-27T05:00:48
| 2020-11-27T05:00:48
| 316,404,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
# Generated by Django 2.2.17 on 2020-11-24 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('about_page', '0006_auto_20201124_0926'),
]
operations = [
migrations.CreateModel(
name='Teamsec1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sec1_title', models.CharField(blank=True, max_length=100, null=True)),
('sec1_image', models.ImageField(upload_to='Images/')),
],
),
migrations.CreateModel(
name='Teamsec2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sec2_title', models.CharField(blank=True, max_length=100, null=True)),
('sec2_name', models.CharField(blank=True, max_length=100, null=True)),
('sec2_image', models.ImageField(upload_to='Images/')),
],
),
migrations.AlterField(
model_name='aboutsec6',
name='sec6_title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
[
"darshanthapa872@gmail.com"
] |
darshanthapa872@gmail.com
|
7c81a30415a63c9cc197660d8bea9891378f1cb6
|
de712ec0d239fc36f7d7b4b11e9c0e6d6a45458b
|
/src/aptus/gui/help.py
|
d48d15c5dc912347cfb79a493963db3f7e5c2509
|
[] |
no_license
|
nedbat/aptus
|
b76f241df5aedc2dc92ffe1f6b6bfe222aca0810
|
b58a914efa868ce85151ba8f0361912d77c3a2cb
|
refs/heads/master
| 2023-08-20T03:40:57.405977
| 2023-08-08T14:13:26
| 2023-08-08T14:13:26
| 276,628,031
| 21
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,265
|
py
|
""" Help dialog for Aptus.
"""
import webbrowser
import sys
import numpy
import wx
import wx.html2
import wx.lib.layoutf
from PIL import Image
from aptus import data_file, __version__
from aptus.options import AptusOptions
class HtmlDialog(wx.Dialog):
""" A simple dialog for displaying HTML, with clickable links that launch
a web browser, or change the page displayed in the dialog.
"""
def __init__(self, parent, caption, pages, subs=None,
pos=wx.DefaultPosition, size=(500,530),
style=wx.DEFAULT_DIALOG_STYLE):
wx.Dialog.__init__(self, parent, -1, caption, pos, size, style)
if pos == (-1, -1):
self.CenterOnScreen(wx.BOTH)
self.pages = pages
self.subs = subs or {}
self.html = wx.html2.WebView.New(self)
self.html.Bind(wx.html2.EVT_WEBVIEW_NAVIGATING, self.on_navigating)
ok = wx.Button(self, wx.ID_OK, "OK")
ok.SetDefault()
lc = wx.lib.layoutf.Layoutf('t=t#1;b=t5#2;l=l#1;r=r#1', (self,ok))
self.html.SetConstraints(lc)
self.set_page('interactive')
lc = wx.lib.layoutf.Layoutf('b=b5#1;r=r5#1;w!80;h*', (self,))
ok.SetConstraints(lc)
self.SetAutoLayout(1)
self.Layout()
def on_navigating(self, event):
url = event.GetURL()
if url == "":
event.Veto()
elif url.startswith(("http:", "https:")):
webbrowser.open(url)
event.Veto()
elif url.startswith('internal:'):
self.set_page(url.split(':')[1])
def set_page(self, pagename):
html = self.pages['head'] + self.pages[pagename]
html = html % self.subs
self.html.SetPage(html, "")
# The help text
is_mac = ('wxMac' in wx.PlatformInfo)
TERMS = {
'ctrl': 'cmd' if is_mac else 'ctrl',
'iconsrc': data_file('icon48.png'),
'version': __version__,
'python_version': sys.version,
'wx_version': wx.__version__,
'numpy_version': numpy.__version__,
'pil_version': Image.__version__,
}
HELP_PAGES = {
'head': """\
<style>
kbd {
display: inline-block;
background: #f0f0f0;
border: 2px solid #888;
border-color: #888 #333 #333 #888;
border-radius: .25em;
padding: .1em .25em;
margin: .1em;
}
</style>
<table width='100%%'>
<tr>
<td width='50' valign='top'><img src='%(iconsrc)s'/></td>
<td valign='top'>
<b>Aptus %(version)s</b>, Mandelbrot set explorer.<br>
Copyright 2007-2020, Ned Batchelder.<br>
<a href='https://nedbatchelder.com/code/aptus'>http://nedbatchelder.com/code/aptus</a>
</td>
</tr>
</table>
<p>
<a href='internal:interactive'>Interactive</a> |
<a href='internal:command'>Command line</a> |
<a href='internal:about'>About</a></p>
<hr>
""",
'interactive': """
<p><b>Interactive controls:</b></p>
<blockquote>
<kbd>a</kbd>: set the angle of rotation.<br>
<kbd>c</kbd>: toggle continuous coloring.<br>
<kbd>f</kbd>: toggle full-screen display.<br>
<kbd>h</kbd> or <kbd>?</kbd>: show this help.<br>
<kbd>i</kbd>: set the limit on iterations.<br>
<kbd>j</kbd>: jump among a few pre-determined locations.<br>
<kbd>n</kbd>: create a new window.<br>
<kbd>o</kbd>: open a saved settings or image file.<br>
<kbd>r</kbd>: redraw the current image.<br>
<kbd>s</kbd>: save the current image or settings.<br>
<kbd>w</kbd>: set the window size.<br>
<kbd><</kbd> or <kbd>></kbd>: switch to the next palette.<br>
<kbd>,</kbd> or <kbd>.</kbd>: cycle the current palette one color.<br>
<kbd>;</kbd> or <kbd>'</kbd>: stretch the palette colors (+%(ctrl)s: just a little), if continuous.<br>
<kbd>[</kbd> or <kbd>]</kbd>: adjust the hue of the palette (+%(ctrl)s: just a little).<br>
<kbd>{</kbd> or <kbd>}</kbd>: adjust the saturation of the palette (+%(ctrl)s: just a little).<br>
<kbd>0</kbd> (zero): reset all palette adjustments.<br>
<kbd>space</kbd>: drag mode: click to drag the image to a new position.<br>
<kbd>shift</kbd>: indicate a point of interest for Julia set and point info.<br>
<b>left-click</b>: zoom in (+%(ctrl)s: just a little).<br>
<b>right-click</b>: zoom out (+%(ctrl)s: just a little).<br>
<b>left-drag</b>: select a new rectangle to display.<br>
<b>middle-drag</b>: drag the image to a new position.<br>
</blockquote>
<p><b>Tool windows: press a key to toggle on and off:</b></p>
<blockquote>
<kbd>J</kbd> (shift-j): Show a Julia set for the current (shift-hovered) point.<br>
<kbd>l</kbd> (ell): Show zoom snapshots indicating the current position.<br>
<kbd>p</kbd>: Show a list of palettes that can be applied to the current view.<br>
<kbd>q</kbd>: Show point info for the current (shift-hovered) point.<br>
<kbd>v</kbd>: Show statistics for the latest calculation.
</blockquote>
""",
'command': """
<p>On the command line, use <tt><b>--help</b></tt> to see options:</p>
<pre>""" + AptusOptions(None).options_help() + "</pre>",
'about': """
<p>Built with
<a href='http://python.org'>Python</a>, <a href='http://wxpython.org'>wxPython</a>,
<a href='http://numpy.scipy.org/'>numpy</a>, and
<a href='http://www.pythonware.com/library/pil/handbook/index.htm'>PIL</a>.</p>
<p>Thanks to Rob McMullen and Paul Ollis for help with the drawing code.</p>
<hr>
<p>Installed versions:</p>
<p>
Aptus: %(version)s<br>
Python: %(python_version)s<br>
wx: %(wx_version)s<br>
numpy: %(numpy_version)s<br>
PIL: %(pil_version)s
</p>
""",
}
class HelpDlg(HtmlDialog):
""" The help dialog for Aptus.
"""
def __init__(self, parent):
HtmlDialog.__init__(self, parent, "Aptus", HELP_PAGES, subs=TERMS, size=(650,530))
|
[
"ned@nedbatchelder.com"
] |
ned@nedbatchelder.com
|
ed02247eb0a5e87b05a7dea03227101bca64ab60
|
71ed291b47017982a38524b4ff8fe94aa947cc55
|
/String/LC389. Find the difference.py
|
46b2982b623687adc315be5bab3e80fd64c6bc44
|
[] |
no_license
|
pingting420/LeetCode_Algorithms
|
da83b77e8f37bd4f461b0a7e59c804871b6151e5
|
f8786864796027cf4a7a8b0ad76e0b516cd99b54
|
refs/heads/main
| 2023-07-17T22:46:08.803128
| 2021-09-02T22:06:38
| 2021-09-02T22:06:38
| 375,401,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
def findTheDifference(s,t):
s = Counter(s)
t = COunter(t)
for i in (t-s):
return i
def findTheDifference(s,t):
return list(Counter(t) - Counter(s))[0]
def findTheDifference(s,t):
for i in set(t):
if s.count(i) != t.count(i):
return i
|
[
"bellapingting@gmial.com"
] |
bellapingting@gmial.com
|
dd408d7bb7c75f2b873fb135914f22ae124a2df8
|
5d622c4b5df54f880f9476931ffb697afc63a9e2
|
/src/runtime/workflows/guard_engine/guard_engine_wf.py
|
e1df46e5ac3932440dee2b15b1ee176242671edd
|
[] |
no_license
|
anirudh458/final-lab-test
|
96d27219387c91f7f7fd346899324dd672eb21cb
|
e9aab1cd5c82993941d605cfa4a045a8db01036f
|
refs/heads/master
| 2021-06-16T19:06:41.569044
| 2017-05-31T19:22:59
| 2017-05-31T19:22:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
from runtime.components.guard.guard import Guard
from runtime.components.engine.engine import Guard
from runtime.emgrs.svem.svem import EntityMgr
from runtime.components import guard
from runtime.components import engine
class GuardSysWf():
def __init__(self):
em = EntityMgr()
guard = Guard(em)
engine = Engine(em)
# set up routes in the guard
guard.add_command_handler(Cmd.add_user, guard.add_user.add_user.AddUser.do)
guard.add_command_handler(Cmd.del_user, guard.del_user.del_user.DelUser.do)
guard.add_command_handler(Cmd.show_users, guard.show_users.show_users.ShowUsers.do)
# set up routes in the engine
engine.add_command_handler(Cmd.add_user, engine.add_user.add_user.AddUser.do)
engine.add_command_handler(Cmd.del_user, engine.del_user.del_user.DelUser.do)
engine.add_command_handler(Cmd.show_users, engine.show_users.show_users.ShowUsers.do)
self.em = em
self.guard = guard
self.engine = engine
def run(instr):
result = None
try:
# action same as instr
action = self.guard.do(instr)
result = self.sys.do(action)
except Exception as e:
result = e
finally:
return result
|
[
"ravulaanirudh7@gmail.com"
] |
ravulaanirudh7@gmail.com
|
44f7af07f42fb608cedc3c5e1f67676fcd65524f
|
ebc00ddf4c8c5f5076471e8b8d56c2b634c51230
|
/test/functional/mempool_persist.py
|
9e50e3a3ba0e5b22e31c19a362dc431b32c0d05b
|
[
"MIT"
] |
permissive
|
BlockMechanic/rain
|
584a9e245cfb7ab5fb1add97b699b86833bfbc5b
|
e8818b75240ff9277b0d14d38769378f05d0b525
|
refs/heads/master
| 2021-07-03T03:48:53.977665
| 2021-03-04T01:28:20
| 2021-03-04T01:28:20
| 228,412,343
| 0
| 0
|
MIT
| 2019-12-16T15:03:28
| 2019-12-16T15:03:27
| null |
UTF-8
|
Python
| false
| false
| 6,556
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Rain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, raind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transactions in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
from decimal import Decimal
import os
from test_framework.test_framework import RainTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class MempoolPersistTest(RainTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"], timeout=1)
wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: self.nodes[1].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prevent raind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
|
[
"blockmecha@gmail.com"
] |
blockmecha@gmail.com
|
1362efbb6d53f3383cea29321ab304f0e370154a
|
8dc6423cca2eb626b1f9ce76d576e95ac17181f8
|
/news/migrations/0003_auto_20180919_1420.py
|
0e3a984792ef8d8547e078b029e9116b1aab56dd
|
[] |
no_license
|
codeSapience/django_news_app
|
07d67c44105ee30626e740ec6c534d7d6cc07ee4
|
2a55d0503d128d88d4c7b03b766d68c9c99516a0
|
refs/heads/master
| 2020-03-29T01:49:12.729978
| 2018-10-05T00:00:15
| 2018-10-05T00:00:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-19 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0002_auto_20180919_1304'),
]
operations = [
migrations.AlterModelOptions(
name='news',
options={'ordering': ['-pub_date']},
),
migrations.AlterField(
model_name='news',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
|
[
"brittoakintade@gmail.com"
] |
brittoakintade@gmail.com
|
35123285e1569f4fd529e804592e81ec15765527
|
2a61b02c26e77686e38cd9039e6f4b0530ddb7c9
|
/bitbots_navigation/bitbots_localization/src/bitbots_localization/localization_dsd/actions/initialize.py
|
01deaa97243938ce01c27e519039c7f8aa8a5bea
|
[
"MIT"
] |
permissive
|
fly-pigTH/bitbots_thmos_meta
|
931413e86929751024013b8e35f87b799243e22c
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
refs/heads/master
| 2023-08-27T02:58:08.397650
| 2021-10-22T17:17:11
| 2021-10-22T17:17:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
import rospy
from dynamic_stack_decider.abstract_action_element import AbstractActionElement
from bitbots_localization.srv import ResetFilter
class AbstractInitialize(AbstractActionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(AbstractInitialize, self).__init__(blackboard, dsd, parameters=None)
self.called = False
self.last_service_call = 0
self.time_between_calls = 2 # [s]
self.first_perform = True
def perform(self, reevaluate=False):
raise NotImplementedError
class DoNothing(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("doing nothing")
return
class InitPose(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("initializing pose")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(0, None, None)
return resp.success
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
class InitLeftHalf(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("initializing left half")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(1, None, None)
return resp.success
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
class InitRightHalf(AbstractInitialize):
def perform(self, reevaluate=False):
rospy.logdebug("initializing right half")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(2, None, None)
return resp.success
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
class InitPosition(AbstractInitialize):
def perform(self, reevaluate=False):
self.do_not_reevaluate()
rospy.logdebug("initializing position")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(
3,
self.blackboard.poseX,
self.blackboard.poseY)
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
return self.pop()
class InitSide(AbstractInitialize):
def perform(self, reevaluate=False):
self.do_not_reevaluate()
rospy.logdebug("initializing on the side line of our half")
rospy.wait_for_service('reset_localization')
reset_filter_proxy = rospy.ServiceProxy('reset_localization', ResetFilter)
try:
resp = reset_filter_proxy(0, None, None)
except rospy.ServiceException as e:
rospy.logerr(f"Service call failed: {e}")
return self.pop()
|
[
"759074800@qq.com"
] |
759074800@qq.com
|
e9a5a4858ee18294253987862fd245e034788500
|
57c13a2500561e72e382489c23e9c0b8347be605
|
/network_programming/chat_project/chat_server.py
|
bd32ca8fa7b9bb2c5a8bc51e0d00e53983448314
|
[] |
no_license
|
linheimx/python_master
|
7403d7af639e31810c90b2fba14972a6d3dcfcec
|
7fb7c467bedaff1515975807552a0ba05e30f15e
|
refs/heads/master
| 2021-01-21T21:54:55.537994
| 2016-12-23T15:05:14
| 2016-12-23T15:05:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
import sys
import socket
import select
HOST = ""
PORT = 9090
SOCKET_LIST = []
RECV_BUFFER = 4096
def chat_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
SOCKET_LIST.append(server_socket)
print("Chat server started on port ", str(PORT))
while True:
ready_to_read, write_to_ready, in_error = select.select(SOCKET_LIST, [], [], 0)
for sock in ready_to_read:
if sock == server_socket:
# a enw connection request recieved
sockfd, addr = server_socket.accept()
SOCKET_LIST.append(sockfd)
print("Client {} connected".format(addr))
msg = "[{}] entered our chatting room".format(addr)
print(msg)
broadcast(server_socket, sockfd, msg)
else:
# process data recieved from client
try:
data = sock.recv(RECV_BUFFER)
if data:
msg = "[{}]{}".format(sock.getpeername(), data.decode("utf-8"))
print(msg)
broadcast(server_socket, sock, msg)
else:
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock)
msg = "Client offline\n"
broadcast(server_socket, sock, msg)
except:
msg = "Client offline\n"
broadcast(server_socket, sock, msg)
server_socket.close()
def broadcast(server_sock, sock, msg):
for s in SOCKET_LIST:
if s != server_sock and s != sock:
try:
s.send(msg.encode("utf-8"))
except Exception as e:
print(e)
SOCKET_LIST.remove(s)
s.close()
if __name__ == "__main__":
sys.exit(chat_server())
|
[
"bonwho09@gmail.com"
] |
bonwho09@gmail.com
|
7ce31686205d472fb1883b4327ca1d1dd6db0ec6
|
d859e135cb2c7bc4b5d3c62c99c3ca49784b6ca3
|
/linehaul/cli.py
|
c4e3fbce803f1a4da2f591f4440d478ca743b71a
|
[
"Apache-2.0"
] |
permissive
|
reaperhulk/linehaul
|
c7dfe2de163d5062572b2fd1626c69d3fae592fd
|
1058adfdedec3c75f5e4f32108ff727fcddd4d9d
|
refs/heads/master
| 2021-01-13T03:18:07.795816
| 2016-07-01T12:31:36
| 2016-07-01T12:31:36
| 77,585,767
| 0
| 0
| null | 2016-12-29T05:37:23
| 2016-12-29T05:37:23
| null |
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
#!/usr/bin/env python3.5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging.config
import click
import prometheus_client
import raven
from . import _tls as tls
from ._click import AsyncCommand
from ._server import Server
from .bigquery import BigQueryClient
from .core import Linehaul
__version__ = raven.fetch_package_version("linehaul")
@click.command(
cls=AsyncCommand,
context_settings={"auto_envvar_prefix": "LINEHAUL"},
)
@click.option("--bind", default="0.0.0.0")
@click.option("--port", type=int, default=512)
@click.option("--token")
@click.option("--account")
@click.option("--key", type=click.File("r"))
@click.option("--reuse-port/--no-reuse-port", default=True)
@click.option(
"--tls-ciphers",
default="ECDHE+CHACHA20:ECDH+AES128GCM:ECDH+AES128:!SHA:!aNULL:!eNULL",
)
@click.option(
"--tls-certificate",
type=click.Path(
exists=True,
dir_okay=False,
readable=True,
resolve_path=True,
),
)
@click.option("--metrics-port", type=int, default=12000)
@click.option("--sentry-dsn")
@click.option("--sentry-ua-dsn")
@click.option("--log-file")
@click.argument("table", envvar="BIGQUERY_TABLE")
@click.pass_context
async def main(ctx, bind, port, token, account, key, reuse_port, tls_ciphers,
tls_certificate, metrics_port, sentry_dsn, sentry_ua_dsn,
log_file, table):
# Configure logging
target_logger = "logfile" if log_file else "console"
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"console": {
"format": "[%(asctime)s][%(levelname)s] %(name)s "
"%(filename)s:%(funcName)s:%(lineno)d | %(message)s",
"datefmt": "%H:%M:%S",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "console",
},
"logfile": {
"level": "DEBUG",
"class": "logging.handlers.WatchedFileHandler",
"formatter": "console",
"filename": log_file or "/dev/null",
},
"sentry": {
"level": "ERROR",
"class": "raven.handlers.logging.SentryHandler",
"dsn": sentry_dsn,
"release": __version__,
},
"ua_sentry": {
"level": "ERROR",
"class": "raven.handlers.logging.SentryHandler",
"dsn": sentry_ua_dsn,
"release": __version__,
},
},
"loggers": {
"": {
"handlers": [target_logger, "sentry"],
"level": "DEBUG",
"propagate": False,
},
"linehaul.user_agents": {
"handlers": [target_logger, "ua_sentry"],
"level": "DEBUG",
"propagate": False,
},
},
})
# Start up our metrics server in another thread.
prometheus_client.start_http_server(metrics_port)
bqc = BigQueryClient(*table.split(":"), client_id=account, key=key.read())
if tls_certificate is not None:
ssl_context = tls.create_context(tls_certificate, tls_ciphers)
else:
ssl_context = None
with Linehaul(token=token, bigquery=bqc, loop=ctx.event_loop) as lh:
async with Server(lh, bind, port,
reuse_port=reuse_port,
ssl=ssl_context,
loop=ctx.event_loop) as s:
try:
await s.wait_closed()
except asyncio.CancelledError:
click.echo(click.style("Shutting Down...", fg="yellow"))
|
[
"donald@stufft.io"
] |
donald@stufft.io
|
0b5ef3d75664e973d42db706c3d83768ccc1934e
|
4f74e6d72b98cd1da2190313e4a7eb9d342cc93d
|
/environments/admin.py
|
352cb786ec086a6c618fb44c7ec20f14f7ba1fc8
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
adamgogogo/glitchtip-backend
|
ef0c529b71d5a4632a235b40a10e0b428a1cee3a
|
ee71d1b732d92868189d520aa111c09b116b7b22
|
refs/heads/master
| 2023-02-01T23:10:53.734450
| 2020-12-19T19:32:10
| 2020-12-19T19:32:10
| 323,588,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.contrib import admin
from .models import Environment, EnvironmentProject
class EnvironmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Environment, EnvironmentAdmin)
class EnvironmentProjectAdmin(admin.ModelAdmin):
pass
admin.site.register(EnvironmentProject, EnvironmentProjectAdmin)
|
[
"david@burkesoftware.com"
] |
david@burkesoftware.com
|
7f4adc0c433e1c8f76de5eb5b022daf9210bc848
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_1674486_0/Python/fantastication/diamond.py
|
4d2de5d2b9dc57390ec5ef8ae812eb4b981604ce
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
def check(n, path):
#print "path to", n, ": ", path
parents = classes[n-1]
for parent in parents:
if parent == -1:
continue
if parent in path:
#print "FOUND", path
return True
else:
path.append(parent)
#print "adding", parent
for parent in parents:
if check(parent, path):
return True
return False
inp = open("input.txt", "r")
out = open("output.txt", "w")
num_cases = int(inp.readline())
for case in xrange(num_cases):
#print "\nnew case:", case+1
num_classes = int(inp.readline())
classes = []
for i in xrange(num_classes):
cl = [int(a) for a in inp.readline().split()]
if cl[0]:
classes.append(cl[1:])
else:
classes.append([])
#print "classes:", classes
hooray = False
for i in xrange(num_classes):
if check(i + 1, []):
hooray = True
break
if hooray:
out.write("Case #{0}: Yes\n".format(case+1))
else:
out.write("Case #{0}: No\n".format(case+1))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
f2776cf8026c189c2a2aa1e53aa4d94fd55e6f58
|
c825ab84e533f4f306656a758ee469a27c5b232b
|
/mysite/settings.py
|
1833cee2fa370af6a81c1ff217ece67f36bc030f
|
[] |
no_license
|
wataru-ikeda/my-first-blog
|
e6244c54b509d0c3e6dd42c1c49f90b6ec93397a
|
d2f293a112ae9454006edd5647fc5c992673c0f9
|
refs/heads/master
| 2021-05-25T07:56:25.395351
| 2020-04-08T03:12:01
| 2020-04-08T03:12:01
| 253,728,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,200
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ekqh#8x!38-pkcd0%m1u4!32-l!mc^%o-@6*@@176$#1+)=ag0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
c2867740cb5f78444e1cd652e06ea817001d4ae3
|
07bdfcb29eda2048278dff96f7e2d3a52a199ece
|
/Backend Python/ExamSystemPython/examsystemapp/api/college.py
|
d67de71b0f26e4f0526d89cd1be488c466285a78
|
[] |
no_license
|
shreyassiddanagoudar/shreya
|
5ac11e4ba0312b55c7a221d82b6c36cafcd291ab
|
c51bc363e74a300d97b6d78d4f5aee043762ac38
|
refs/heads/master
| 2023-07-15T09:38:19.653419
| 2021-08-28T09:48:09
| 2021-08-28T09:48:09
| 361,653,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,134
|
py
|
"""
Created By : <Auto generated code>
Created On :
Reviewed By :
Reviewed On :
Version :
"""
import json
from django.http import HttpRequest
from examsystemapp.api.base_controller import BaseController
from examsystemapp.models.college import CollegeModel
from examsystemapp.services.college_service import CollegeService
from examsystemapp.utils.constants.constants import DataTypes, HttpMethodType, AppConstants
from examsystemapp.utils.helpers.general_helper import IntHelper, FloatHelper
from examsystemapp.utils.helpers.request_helper import RequestConfig, ParamsObject
class College(BaseController):
def __init__(self, request):
BaseController.__init__(self, request)
def add(self, request: HttpRequest):
college_json = json.loads(request.POST.get("college_json"))
college_object: CollegeModel = CollegeModel()
# college_object.collegeid = college_json.get("collegeid")
college_object.universityid = college_json.get("universityid")
college_object.name = college_json.get("name")
college_object.code = college_json.get("code")
college_object.addr1 = college_json.get("addr1")
college_object.addr2 = college_json.get("addr2")
college_object.addr3 = college_json.get("addr3")
college_object.cityid = college_json.get("cityid")
college_object.stateid = college_json.get("stateid")
college_object.pincode = college_json.get("pincode")
college_object.phone = college_json.get("phone")
college_object.email = college_json.get("email")
college_object.logo = college_json.get("logo")
college_object.url = college_json.get("url")
college_service: CollegeService = CollegeService()
college_object = college_service.add(college_object)
return self.send_response(college_object)
def update(self, request: HttpRequest):
college_json = json.loads(request.POST.get("college_json"))
college_object: CollegeModel = CollegeModel()
college_object.collegeid = college_json.get("collegeid")
college_object.universityid = college_json.get("universityid")
college_object.name = college_json.get("name")
college_object.code = college_json.get("code")
college_object.addr1 = college_json.get("addr1")
college_object.addr2 = college_json.get("addr2")
college_object.addr3 = college_json.get("addr3")
college_object.cityid = college_json.get("cityid")
college_object.stateid = college_json.get("stateid")
college_object.pincode = college_json.get("pincode")
college_object.phone = college_json.get("phone")
college_object.email = college_json.get("email")
college_object.logo = college_json.get("logo")
college_object.url = college_json.get("url")
college_service: CollegeService = CollegeService()
college_object = college_service.update(college_object)
return self.send_response(college_object)
def delete(self, request: HttpRequest):
college_json = json.loads(request.POST.get("college_json"))
college_object: CollegeModel = CollegeModel()
college_object.collegeid = college_json.get("collegeid")
college_service: CollegeService = CollegeService()
college_object = college_service.delete(college_object)
return self.send_response(college_object)
def get(self, request: HttpRequest):
params = [
{"id": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.INT)}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get(params)
return self.send_response(data)
def get_list(self, request: HttpRequest):
params = [
{"ids": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.STRING, default='')}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_list(params)
return self.send_response(data)
def get_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_object(params)
return self.send_response(data)
def get_list_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_list_object(params)
return self.send_response(data)
def get_list_object_page(self, request: HttpRequest):
params = [
{"CollegeName ": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.STRING, default=None)},
{"Code": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"UniversityID ": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"StateID": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"CityID ": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"page_num": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=1)},
{"page_size": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=10)},
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
college_service: CollegeService = CollegeService()
data = college_service.get_list_object_paginated(params)
return self.send_response(data)
|
[
"noreply@github.com"
] |
shreyassiddanagoudar.noreply@github.com
|
4ef1d2aaba39ed43d41d7f9b2fb9488b5ee99223
|
cf0f3f1bb02048d99be4e74254a4e48f4ca78ac6
|
/0x1F-pascal_triangle/0-pascal_triangle.py
|
0a8d2ae715fa29481d1397bef23064c44f83ae94
|
[] |
no_license
|
andreammgcol/holbertonschool-interview
|
89277dc9aebb0f36d77b995b58f6d060c48692bc
|
01bc3b29f44f8b76a56879b00bc77d2f9a919306
|
refs/heads/master
| 2023-07-20T13:23:31.172414
| 2021-08-26T04:42:55
| 2021-08-26T04:42:55
| 280,991,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
#!/usr/bin/python3
""" Pascal triangle """
def pascal_triangle(n):
""" Function that returns a list of lists of integers
representing the Pascal’s triangle of n """
triangle = []
if n <= 0:
return triangle
for i in range(1, (n + 1)):
sub = []
for j in range(i):
sub.append(1)
triangle.append(sub)
for i in range(len(triangle)):
for j in range(i):
if j != 0:
triangle[i][j] = triangle[i - 1][j] + triangle[i - 1][j - 1]
return triangle
|
[
"amendez72@misena.edu.co"
] |
amendez72@misena.edu.co
|
0ec96347624c3779acb977f9f453bee286bcb934
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/examples/v2/incidents/DeleteIncident.py
|
a134c05b864f6bbffc44a61f18cc8ae6dcb87435
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 555
|
py
|
"""
Delete an existing incident returns "OK" response
"""
from os import environ
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v2.api.incidents_api import IncidentsApi
# there is a valid "incident" in the system
INCIDENT_DATA_ID = environ["INCIDENT_DATA_ID"]
configuration = Configuration()
configuration.unstable_operations["delete_incident"] = True
with ApiClient(configuration) as api_client:
api_instance = IncidentsApi(api_client)
api_instance.delete_incident(
incident_id=INCIDENT_DATA_ID,
)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
0ea01cbe7bd8170f064d5e9e92d39c5a5fe6765e
|
bdc12ac21a4c7b83a43258b46d6008c5f36a71e6
|
/edmondderothschild/spiders/spider.py
|
9d9460836280ea4016ffed933a6d247278938580
|
[] |
no_license
|
hristo-grudev/edmondderothschild
|
068132e79dd176721b885f238f0cf342e7e57988
|
da231da1f56c62760872f5bcc3a70aaae040add8
|
refs/heads/main
| 2023-03-24T06:17:20.193318
| 2021-03-26T08:49:17
| 2021-03-26T08:49:17
| 351,717,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
import re
import scrapy
from scrapy.loader import ItemLoader
from scrapy.spiders import XMLFeedSpider
from w3lib.html import remove_tags
from ..items import EdmondderothschildItem
from itemloaders.processors import TakeFirst
import requests
import xmltodict
class EdmondderothschildSpider(XMLFeedSpider):
name = 'edmondderothschild'
start_urls = ['https://news.edmond-de-rothschild.com/api/ComNewsClient/News/GetAll?languageCode=fr&idPictureFormat=2&countryId=1&pageSize=999999&pageIndex=0&tags=undefined&businessId=undefined']
itertag = 'IdNewsContent'
def parse_node(self, response, node):
_id = node.xpath('//text()').get()
url = f'https://news.edmond-de-rothschild.com/api/ComNewsClient/News/GetByID?IdNews={_id}'
yield scrapy.Request(url, callback=self.parse_link)
def parse_link(self, response):
# data = scrapy.Selector(response, type='xml')
data = response.xpath('//*').get()
title = re.findall(r'<Title>(.*?)</Title>', data, re.DOTALL)[0]
date = re.findall(r'<PublishingDate>(.*?)</PublishingDate>', data, re.DOTALL)[0]
description = re.findall(r'<Content>(.*?)</Content>', data, re.DOTALL)[0]
dict_of_chars = {'#58;': ':', 'quot;': '"', '#160;': '', '<': '<', '>': '>', '&': '', 'bull;': '', 'acute;': '´', 'grave;': '`', 'rsquo;': '`', 'circ;': 'ˆ', 'nbsp;': ' '}
for char in dict_of_chars:
description = re.sub(rf'{char}', f'{dict_of_chars[char]}', description)
description = remove_tags(description)
print(description)
item = ItemLoader(item=EdmondderothschildItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
632e2a617586abeeb950bd4e2968256a403701b3
|
4a014a10f3e144bc778149f8bf6f763581ece2b0
|
/src/latexify/config.py
|
88ab7e04d0614f1cb1c100563ad6a6c4acad5b3e
|
[
"Apache-2.0"
] |
permissive
|
google/latexify_py
|
394f4608e09e45ac6b8091d6734cf6bf06fa548d
|
b3ae7fa9483f7055e692bef8acc9c0ec8e91c51f
|
refs/heads/main
| 2023-09-02T06:49:55.607974
| 2023-01-14T10:57:55
| 2023-01-14T10:57:55
| 282,443,202
| 5,202
| 265
|
Apache-2.0
| 2023-01-14T10:57:56
| 2020-07-25T12:50:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
"""Definition of the Config class."""
from __future__ import annotations
import dataclasses
from typing import Any
@dataclasses.dataclass(frozen=True)
class Config:
"""Configurations to control the behavior of latexify.
Attributes:
expand_functions: If set, the names of the functions to expand.
identifiers: If set, the mapping to replace identifier names in the
function. Keys are the original names of the identifiers,
and corresponding values are the replacements.
Both keys and values have to represent valid Python identifiers:
^[A-Za-z_][A-Za-z0-9_]*$
prefixes: Prefixes of identifiers to trim. E.g., if "foo.bar" in prefixes, all
identifiers with the form "foo.bar.suffix" will be replaced to "suffix"
reduce_assignments: If True, assignment statements are used to synthesize
the final expression.
use_math_symbols: Whether to convert identifiers with a math symbol surface
(e.g., "alpha") to the LaTeX symbol (e.g., "\\alpha").
use_set_symbols: Whether to use set symbols or not.
use_signature: Whether to add the function signature before the expression
or not.
"""
expand_functions: set[str] | None
identifiers: dict[str, str] | None
prefixes: set[str] | None
reduce_assignments: bool
use_math_symbols: bool
use_set_symbols: bool
use_signature: bool
def merge(self, *, config: Config | None = None, **kwargs) -> Config:
"""Merge configuration based on old configuration and field values.
Args:
config: If None, the merged one will merge defaults and field values,
instead of merging old configuration and field values.
**kwargs: Members to modify. This value precedes both self and config.
Returns:
A new Config object
"""
def merge_field(name: str) -> Any:
# Precedence: kwargs -> config -> self
arg = kwargs.get(name)
if arg is None:
if config is not None:
arg = getattr(config, name)
else:
arg = getattr(self, name)
return arg
return Config(**{f.name: merge_field(f.name) for f in dataclasses.fields(self)})
@staticmethod
def defaults() -> Config:
"""Generates a Config with default values.
Returns:
A new Config with default values
"""
return Config(
expand_functions=None,
identifiers=None,
prefixes=None,
reduce_assignments=False,
use_math_symbols=False,
use_set_symbols=False,
use_signature=True,
)
|
[
"noreply@github.com"
] |
google.noreply@github.com
|
623feb305b11da8bd2283836b273711652544b52
|
f64e31cb76909a6f7fb592ad623e0a94deec25ae
|
/leetcode/p1710_maximum_units_on_a_truck.py
|
8e89618e55fad5153cf50d1bc81468436154aa8e
|
[] |
no_license
|
weak-head/leetcode
|
365d635cb985e1d154985188f6728c18cab1f877
|
9a20e1835652f5e6c33ef5c238f622e81f84ca26
|
refs/heads/main
| 2023-05-11T14:19:58.205709
| 2023-05-05T20:57:13
| 2023-05-05T20:57:13
| 172,853,059
| 0
| 1
| null | 2022-12-09T05:22:32
| 2019-02-27T05:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 460
|
py
|
from typing import List
import heapq
def maximumUnits(boxTypes: List[List[int]], truckSize: int) -> int:
"""
Time: O(n)
Space: O(n)
n - number of boxes
"""
q = [(-v[1], v[0]) for v in boxTypes]
heapq.heapify(q)
max_units = 0
left_space = truckSize
while left_space > 0 and q:
units, cnt = heapq.heappop(q)
max_units += -units * min(cnt, left_space)
left_space -= cnt
return max_units
|
[
"zinchenko@live.com"
] |
zinchenko@live.com
|
3c3751974ba7fde06708dc6a41c2bedc1bc225c7
|
159aed4755e47623d0aa7b652e178296be5c9604
|
/data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_bones_large_evil_fire_red.py
|
0d8c6fc34668df5aa6df3b2b1c35b2da4808979c
|
[
"MIT"
] |
permissive
|
anhstudios/swganh
|
fb67d42776864b1371e95f769f6864d0784061a3
|
41c519f6cdef5a1c68b369e760781652ece7fec9
|
refs/heads/develop
| 2020-12-24T16:15:31.813207
| 2016-03-08T03:54:32
| 2016-03-08T03:54:32
| 1,380,891
| 33
| 44
| null | 2016-03-08T03:54:32
| 2011-02-18T02:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_bones_large_evil_fire_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","bones")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
[
"rwl3564@rit.edu"
] |
rwl3564@rit.edu
|
2ed0e34a43fa2c125006f672440b3da4ab09d4ba
|
cba7110bb180886c22bb3cb844d7f9ff5efee428
|
/petit_lisp.py
|
6ab3ee8d08b510a5981f4dca6235640722ddfbbd
|
[
"CC0-1.0"
] |
permissive
|
aroberge/lispy-experiments
|
d41df042e5737d7d99ac1f03a081e8ce5aed2585
|
c54da34500e95150c2ef9c6057339525edf1e03f
|
refs/heads/master
| 2021-01-19T21:51:58.194552
| 2015-02-25T21:41:55
| 2015-02-25T21:41:55
| 31,174,203
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,700
|
py
|
'''New class based version
'''
import sys
from src.file_loader import FileLoader
from src.python_utils import python_fns
from src.parser import Parser
from src.repl import InteractiveInterpreter
loader = FileLoader()
STRINGS = {}
class Env(dict):
"An environment: a dict of {'var': val} pairs, with an outer Env."
def __init__(self, params=(), args=(), outer=None):
self.update(zip(params, args))
self.outer = outer
def find(self, var):
"Find the innermost Env where var appears."
if var in self:
return self
elif self.outer is not None:
return self.outer.find(var)
else:
raise ValueError("{} is not defined".format(var))
class Procedure(object):
"A user-defined procedure."
def __init__(self, params, body, env,
opt_param=False,
evaluate=None,
env_cls=None):
self.params, self.body, self.env = params, body, env
self.opt_param = opt_param
self.evaluate = evaluate
self.env_cls = env_cls
def __call__(self, *args):
if self.opt_param:
args = self.pack_args(args)
return self.evaluate(self.body, self.env_cls(self.params, args, self.env))
def pack_args(self, args):
'''ensures that any extra arguments are packed into a list'''
if len(args) < self.opt_param:
raise Exception("Not enough arguments supplied to procedure.")
elif len(args) == self.opt_param:
newargs = list(args)
newargs.append([])
return tuple(newargs)
elif ((len(args) > self.opt_param + 1) or
(not isinstance(args[self.opt_param], list))):
newargs = [arg for arg in args[:self.opt_param]]
newargs.append(list(args[self.opt_param:]))
return tuple(newargs)
else:
return args
@staticmethod
def set_docstring(obj, s):
'''Sets the docstring of an object; useful for user-defined procedures'''
# strings are stored with enclosing double quote characters
obj.__doc__ = s[1:-1]
class Lisp:
'''Grouping some basic lisp procedures into logical unit
The following static methods are invoked within a lisp program as:
(proc expr1 expr2 expr3 ...)
which we denote below as (proc exprs*). They are then evaluated
exps = [evaluate(exp, env) for exp in exprs*]
and dispatched to the relevant static method as
proc(*exps)
'''
@staticmethod
def begin(*expr):
'''(begin expr1 ... expr_last) ==> evaluates all and returns expr_last'''
return expr[-1]
@staticmethod
def is_atom(atom):
'''(atom? expr) ==> true if expr is not a list'''
return not isinstance(atom, list)
@staticmethod
def are_equal(val1, val2):
'''(eq? expr1 expr2) ==> true if both are atoms and equal'''
return (not isinstance(val1, list)) and (val1 == val2)
@staticmethod
def car(*expr):
'''(car (exp1 exp2 exp3 ...)) ==> exp1'''
return expr[0][0]
@staticmethod
def cdr(*expr):
'''(car (exp1 exp2 exp3 ...)) ==> (exp2 exp3 ...)'''
return list(expr[0][1:])
@staticmethod
def cons(*expr):
'''Usage (cons expr list) => (expr list) '''
if not isinstance(expr[1], list):
raise ValueError("Second argument of cons must be a list.")
return [expr[0]] + expr[1]
lisp_procs = {
'begin': Lisp.begin,
'atom?': Lisp.is_atom,
'eq?': Lisp.are_equal,
'car': Lisp.car,
'cdr': Lisp.cdr,
'cons': Lisp.cons
}
def display(s):
'''Prints a single string. Strings are enclosed between double quotes
and do not allow escaped double quote characters'''
print(s[1:-1]) # strings are stored with enclosing double quote characters
def common_env(env):
"Add some built-in procedures and variables to the environment."
env.update({
'__True__': True,
'__False__': False,
'_DEBUG': False,
'quit': exit,
'print': display,
'load': loader.load,
'set-docstring': Procedure.set_docstring
})
env.update(python_fns)
env.update(lisp_procs)
return env
exit.__doc__ = "Quits the repl."
global_env = common_env(Env())
def evaluate(x, env=None):
"Evaluate an expression in an environment."
if env is None:
env = global_env
if isinstance(x, str): # variable reference
if x in STRINGS:
return STRINGS[x]
return env.find(x)[x]
elif not isinstance(x, list): # constant literal
return x
elif x[0] == 'undefined?': # (undefined? x)
try:
_ = env.find(x[1])
return False # found ... so it is defined
except ValueError:
return True
elif x[0] == 'quote': # (quote exp), or 'exp
(_, exp) = x
return exp
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
env[var] = evaluate(exp, env)
elif x[0] == 'set!': # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = evaluate(exp, env)
elif x[0] == 'lambda': # (lambda (params*) body)
(_, params, body) = x
opt_param = False
if '.' in params:
opt_param = params.index('.')
params.pop(opt_param)
return Procedure(params, body, env, opt_param, evaluate, Env)
elif x[0] == 'cond': # (cond (p1 e1) ... (pn en))
for (p, e) in x[1:]:
if evaluate(p, env):
return evaluate(e, env)
elif x[0] == 'if': # (if test if_true other)
(_, test, if_true, other) = x
return evaluate((if_true if evaluate(test, env) else other), env)
else: # ("procedure" exp*)
exps = [evaluate(exp, env) for exp in x]
procedure = exps.pop(0)
if (hasattr(procedure, '__kwdefaults__')
and procedure.__kwdefaults__ is not None
and "env" in procedure.__kwdefaults__):
if env is None:
env = global_env
return procedure(*exps, env=env)
else:
return procedure(*exps)
parse = Parser(STRINGS).parse
loader.evaluate = evaluate
loader.parse = parse
if __name__ == "__main__":
if len(sys.argv) > 1:
loader.load(sys.argv[1])
else:
loader.load("src/default_language.lisp")
interpreter = InteractiveInterpreter(evaluate, parse, global_env)
interpreter.start()
|
[
"andre.roberge@gmail.com"
] |
andre.roberge@gmail.com
|
e33f6c598c699f04928d5a390d5e4325a4948d24
|
9b4de05054f37a65dce49857fb6a809a370b23ca
|
/gd/migrations/0017_auto_20171223_1605.py
|
97c3517179868639f2ab467e37bf45e031c80896
|
[] |
no_license
|
susahe/gis
|
f6b03b8f23abf7ca22c0069a4cdf603bfe879808
|
6b8d433cd5f672994ac138c1b656136425d0c345
|
refs/heads/master
| 2021-05-12T01:50:12.862559
| 2018-01-27T02:25:31
| 2018-01-27T02:25:31
| 117,569,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
# Generated by Django 2.0 on 2017-12-23 16:05
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('gd', '0016_auto_20171223_1601'),
]
operations = [
migrations.AlterField(
model_name='gramasevadivision',
name='gs_end_date',
field=models.DateTimeField(blank=True, verbose_name='සේවය අවසන් කල දිනය '),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_fname',
field=models.CharField(max_length=100, verbose_name='ග්\u200dරාමසේවක මහතාගේ මුල් නම'),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_lname',
field=models.CharField(max_length=300, verbose_name='ග්\u200dරාමසේවක මහතාගේ වාසගම'),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_oname',
field=models.CharField(max_length=300, verbose_name='ග්\u200dරාමසේවක මහතාගේ අනිකුත් නම්'),
),
migrations.AlterField(
model_name='gramasevadivision',
name='gs_start_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='සේවය පටන් ගත් දිනය'),
),
]
|
[
"sumudu.susahe@gmail.com"
] |
sumudu.susahe@gmail.com
|
54c6f8673ede0ff92aae2a33401611442277cef8
|
c8e3ce59771a46723eb460dadc7136ce4337567b
|
/wordcloud_yelp_pos.py
|
df0e8b9012472da71ebf042b39ed6ff59d675dfb
|
[
"MIT"
] |
permissive
|
elisetnp/stylistic-word-clouds
|
658c4b5e4bcf903f670078d8d6ebd4a25224afd1
|
0ecc7fa5632cd21ed9b24ccad9e27448a15eed81
|
refs/heads/master
| 2022-01-18T17:18:51.055745
| 2016-05-08T04:46:00
| 2016-05-08T04:46:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
import numpy as np
import csv
import random
from PIL import Image
from wordcloud import WordCloud, STOPWORDS
from palettable.colorbrewer.sequential import Greens_9
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return tuple(Greens_9.colors[random.randint(2,8)])
csv_path = "yelp_words_by_stars_1gram.csv"
fa_path = "/Users/maxwoolf/Downloads/exported2048/"
font_path = "/Users/maxwoolf/Fonts/OpenSans-CondBold.ttf"
icon = "smile-o"
words_array = []
with open(csv_path, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['stars'] is '5' and row['word'] not in STOPWORDS:
words_array.append((row['word'].upper(), float(row['count'])))
# http://stackoverflow.com/questions/7911451/pil-convert-png-or-gif-with-transparency-to-jpg-without
icon_path = fa_path + "%s.png" % icon
icon = Image.open(icon_path)
mask = Image.new("RGB", icon.size, (255,255,255))
mask.paste(icon,icon)
mask = np.array(mask)
wc = WordCloud(font_path=font_path, background_color="white", max_words=2000, mask=mask,
max_font_size=300, random_state=42)
# generate word cloud
wc.generate_from_frequencies(words_array)
wc.recolor(color_func=color_func, random_state=3)
wc.to_file("yelp_pos_wordcloud.png")
|
[
"max@minimaxir.com"
] |
max@minimaxir.com
|
74db49958179e9efa98ebfc30bb65ded9c8eee31
|
08b0c27ce98495c0889d7b768ac7d2a97beff158
|
/廖雪峰-python/廖雪峰-面向对象之多重继承.py
|
aa05ccb03ba657e9c34a5bba6dd5ff9567191adf
|
[] |
no_license
|
jetli123/python_files
|
7d3834b8e3f8b8dca5109c2d6aeb8d0fcdb852c3
|
7f5b787820cca2cf5820a1cdf3fed77e5185f04e
|
refs/heads/master
| 2020-03-27T01:36:02.526294
| 2019-07-11T06:58:35
| 2019-07-11T06:58:35
| 145,726,196
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'JetLi'
"""继承是面向对象编程的一个重要的方式,因为通过继承,子类就可以扩
展父类的功能。"""
"""
Dog - 狗狗;
Bat - 蝙蝠;
Parrot - 鹦鹉;
Ostrich - 鸵鸟。
Mammal 哺乳类:能跑的哺乳类,能飞的哺乳类;
Bird 鸟类:能跑的鸟类,能飞的鸟类。
"""
# 采用多重继承。首先,主要的类层次仍按照哺乳类和鸟类设计
class Animal(object):
pass
class Mammal(Animal): # 大类
pass
class Bird(Animal): # 大类
pass
"""现在,我们要给动物再加上 Runnable 和 Flyable 的功能,只需要先定义
好 Runnable 和 Flyable 的类:"""
class RunnableMixin(object):
@staticmethod
def run():
print 'Running...'
class FlyableMixin(object):
@staticmethod
def fly():
print 'Flying...'
class Dog(Mammal, RunnableMixin): # 对于需要 Runnable 功能的动物,就多继承一个 Runnable,例如 Dog
pass
class Bat(Mammal, FlyableMixin): # 对于需要 Flyable 功能的动物,就多继承一个 Flyable,例如 Bat:
pass
class Parrot(Bird, FlyableMixin):
pass
class Ostrich(Bird, RunnableMixin):
pass
b = Bat()
b.fly()
c = Dog()
c.run()
d = Parrot()
d.fly()
e = Ostrich()
e.run()
"""如果需要“混入”额外的功能,通过多重继承就可以Python3 基础教程【完整版】 http://www.yeayee.com/
195/531
实现,比如,让 Ostrich 除了继承自 Bird 外,再同时继承 Runnable。这
种设计通常称之为 MixIn。"""
"""MixIn 的目的就是给一个类增加多个功能,这样,在设计类的时候,我
们优先考虑通过多重继承来组合多个 MixIn 的功能,而不是设计多层次
的复杂的继承关系"""
# 比如,编写一个多进程模式的 TCP 服务,定义如下:
class ForkingMixin(object):
pass
class TcpServer(object):
pass
class MyTCPServer(TcpServer, ForkingMixin):
pass
"""小结
由于 Python 允许使用多重继承,因此, MixIn 就是一种常见的设计。
只允许单一继承的语言(如 Java)不能使用 MixIn 的设计。"""
|
[
"Jet_Oracle@outlook.com"
] |
Jet_Oracle@outlook.com
|
919ee4eab014fe4cd9950bf760b3ed95385fdfe2
|
79f42fd0de70f0fea931af610faeca3205fd54d4
|
/base_lib/daemonize.py
|
bcabe9a83f6ac73cf1fcc28f101b0bb623286a53
|
[] |
no_license
|
fanwen390922198/ceph_pressure_test
|
a900a6dc20473ae3ff1241188ed012d22de2eace
|
b6a5b6d324e935915090e791d9722d921f659b26
|
refs/heads/main
| 2021-08-27T16:26:57.500359
| 2021-06-02T05:18:39
| 2021-06-02T05:18:39
| 115,672,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2018 - All Rights Reserved
# project: ceph_pressure_test
# file: daemonize.py
# time: 2019/9/5 14:05
# author: fanwen
# desc:
# !/usr/bin/env python
# coding: utf-8
import sys
import os
# 将当前进程fork为一个守护进程
# 注意:如果你的守护进程是由inetd启动的,不要这样做!inetd完成了
# 所有需要做的事情,包括重定向标准文件描述符,需要做的事情只有chdir()和umask()了
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
# 重定向标准文件描述符(默认情况下定向到/dev/null)
try:
pid = os.fork()
# 父进程(会话组头领进程)退出,这意味着一个非会话组头领进程永远不能重新获得控制终端。
if pid > 0:
sys.exit(0) # 父进程退出
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# 从母体环境脱离
os.chdir("/") # chdir确认进程不保持任何目录于使用状态,否则不能umount一个文件系统。也可以改变到对于守护程序运行重要的文件所在目录
os.umask(0) # 调用umask(0)以便拥有对于写的任何东西的完全控制,因为有时不知道继承了什么样的umask。
os.setsid() # setsid调用成功后,进程成为新的会话组长和新的进程组长,并与原来的登录会话和进程组脱离。
# 执行第二次fork
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # 第二个父进程退出
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# 进程已经是守护进程了,重定向标准文件描述符
for f in sys.stdout, sys.stderr: f.flush()
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno()) # dup2函数原子化关闭和复制文件描述符
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# 示例函数:每秒打印一个数字和时间戳
def main():
import time
sys.stdout.write('Daemon started with pid %d\n' % os.getpid())
sys.stdout.write('Daemon stdout output\n')
sys.stderr.write('Daemon stderr output\n')
c = 0
while True:
sys.stdout.write('%d: %s\n' % (c, time.ctime()))
sys.stdout.flush()
c = c + 1
time.sleep(1)
if __name__ == "__main__":
daemonize('/dev/null', '/tmp/daemon_stdout.log', '/tmp/daemon_error.log')
main()
|
[
"fanwen@sscc.com"
] |
fanwen@sscc.com
|
85d021bf63d7f990e9182b73040daae662c6324f
|
82e19f3738f47bc517fcb6dd1bf480117bdc8825
|
/0x07-python-test_driven_development/5-text_indentation.py
|
9457c9692e0add48c1f4ddabf3c780864188b0bb
|
[] |
no_license
|
PierreBeaujuge/holbertonschool-higher_level_programming
|
ce6cfaf09fd0fefff8047c23320009ffae9f6e79
|
a133bfd68e3ec1f9430d6c722dd96d13f117c8cf
|
refs/heads/master
| 2021-07-08T00:10:20.568509
| 2020-11-14T20:26:56
| 2020-11-14T20:26:56
| 207,353,878
| 0
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
#!/usr/bin/python3
"""
Module that handles
printing
of a text
"""
def text_indentation(text):
"""
Function that prints a text with 2 new lines after ., ? and :
"""
new_text = ""
if not isinstance(text, str):
raise TypeError("text must be a string")
i = 0
while i < len(text):
if text[i] is '.' or text[i] is '?' or text[i] is ':':
new_text += text[i]
new_text += '\n\n'
if i < len(text) - 1 and (text[i + 1] == ' ' or
text[i + 1] == '\t'):
i += 1
while i < len(text) - 1 and (text[i + 1] == ' ' or
text[i + 1] == '\t'):
i += 1
else:
new_text += text[i]
i += 1
print(new_text, end='')
|
[
"pierre.beaujuge@gmail.com"
] |
pierre.beaujuge@gmail.com
|
c8e277f57fbd493bde3ddc1f1f68158067e231ac
|
e48375c39c0d1fc71742b1964dffdd3af0ff86c0
|
/nlu/components/classifiers/token_bert_healthcare/token_bert_healthcare.py
|
dfcbf72f27c66728a4a68aba94343643ceb788d4
|
[
"Apache-2.0"
] |
permissive
|
ahmedlone127/nlu
|
b8da5a84f0e47640cb09616559bf8b84c259f278
|
614bc2ff94c80a7ebc34a78720ef29a1bf7080e0
|
refs/heads/master
| 2023-02-09T05:10:29.631583
| 2022-05-20T15:16:33
| 2022-05-20T15:16:33
| 325,437,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
class TokenBertHealthcare:
@staticmethod
def get_default_model():
from sparknlp_jsl.annotator import MedicalBertForTokenClassifier
return MedicalBertForTokenClassifier.pretrained() \
.setInputCols("sentence", "token") \
.setOutputCol("ner")
@staticmethod
def get_pretrained_model(name, language, bucket=None):
from sparknlp_jsl.annotator import MedicalBertForTokenClassifier
return MedicalBertForTokenClassifier.pretrained(name, language, bucket) \
.setInputCols("sentence", "token") \
.setOutputCol("ner")
|
[
"christian.kasim.loan@gmail.com"
] |
christian.kasim.loan@gmail.com
|
373e06129c3d09a2092239b0a9dd19e72f5ca703
|
e8e9bab84754786e68e32ad5bba9a1f93dd36df1
|
/python/rr.py
|
b851fac9403a587338b012a5479bea4efa44772f
|
[] |
no_license
|
0vermind/eddie
|
98026246bca34dd9a67b91113cf03bce6743489d
|
38f43fb296a916fde7721543b942a59fffb9e871
|
refs/heads/master
| 2021-05-10T00:23:39.336437
| 2018-01-24T12:26:31
| 2018-01-24T12:26:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,921
|
py
|
"""
Usage:
df = pd.read_csv("data.csv")
renko = Renko(df)
renko.brick_size = 2
bricks = renko.get_bricks()
print(bricks)
"""
import sys
import datetime as dt
import numpy as np
import pandas as pd
import nsepy
class Renko:
PERIOD_CLOSE = 1
PRICE_MOVEMENT = 2
TREND_CHANGE_DIFF = 2
brick_size = 1
chart_type = PERIOD_CLOSE
required_columns = {'open', 'high', 'low', 'close'}
def __init__(self, df):
self.df = df
self._validate_df()
self.rdf = df
self.bdf = None
def _validate_df(self):
if not self.required_columns.issubset(self.df.columns):
raise ValueError('DataFrame should have OHLC {} columns'.format(self.required_columns))
def get_bricks(self):
if self.chart_type == self.PERIOD_CLOSE:
self.period_close_bricks()
else:
self.price_movement_bricks()
return self.bdf
def period_close_bricks(self):
brick_size = self.brick_size
self.rdf = self.rdf[['date', 'close']]
self.rdf.loc[:, 'close_s1'] = self.rdf['close'] - self.rdf['close'].shift()
# self.rdf.dropna(inplace=True)
self.rdf.loc[:, 'close_r'] = (self.rdf['close'] // self.brick_size) * self.brick_size
self.rdf.loc[:, 'close_r_s1'] = (self.rdf['close_s1'] // self.brick_size) * self.brick_size
self.filter_noise()
bricks = self.rdf['bricks']
asign = np.sign(bricks)
self.rdf.loc[:, 'rtc'] = ((np.roll(asign, 1) - asign) != 0).astype(int)
self.rdf.loc[:, 'u_bricks'] = self.rdf.loc[self.rdf['rtc'] == 1, 'bricks']
self.rdf.loc[:, 'u_bricks'] = self.rdf['u_bricks'].apply(
lambda x: x - self.TREND_CHANGE_DIFF if x > 0 else x + self.TREND_CHANGE_DIFF
)
self.rdf.loc[self.rdf['rtc'] == 0, 'u_bricks'] = self.rdf['bricks']
self.rdf = self.rdf[['close_r', 'u_bricks', 'date']]
self.rdf = self.rdf[self.rdf['u_bricks'] != 0]
self.rdf.reset_index(inplace=True)
self.rdf.dropna(inplace=True)
self.calculate_bricks_from_diff()
self.shift_bricks()
def shift_bricks(self):
shift = self.df['close'].iloc[-1] - self.bdf['close'].iloc[-1]
if abs(shift) < self.brick_size:
return
step = shift // self.brick_size
self.bdf[['open', 'close']] += step * self.brick_size
def calculate_bricks_from_diff(self):
brick_size = self.brick_size
columns = ['open', 'close', 'date']
self.bdf = pd.DataFrame(
columns=columns,
data=[[0, 0, 0]],
)
prev_bricks = 1
cls = (self.df['close'].iloc[0] // brick_size) * brick_size
for index, row in self.rdf.iterrows():
bricks = row['u_bricks']
date = row['date']
data = []
for i in range(int(abs(bricks))):
if prev_bricks * bricks < 0 and i == 0 :
cls = cls + brick_size * (bricks / abs(bricks))
r = [
cls,
cls + (brick_size * (bricks / abs(bricks))),
date
]
data.append(r)
cls = r[1]
prev_bricks = bricks
# print(data)
sdf = pd.DataFrame(data=data, columns=columns)
self.bdf = pd.concat([self.bdf, sdf])
return self.bdf
def filter_noise(self):
df = self.rdf
brick_size = self.brick_size
df.loc[:, 'cr_diff'] = df['close_r'] - df['close_r'].shift()
df = df[df['cr_diff'] != 0]
df.loc[:, 'bricks'] = df.loc[:, ('cr_diff', )] / brick_size
df.loc[:, 'bricks_s1'] = df['bricks'].shift()
df.loc[:, 'tc'] = np.where((df['bricks'] * df['bricks_s1']) < 0, True, False)
while True:
df.loc[:, 'cr_diff'] = df['close_r'] - df['close_r'].shift()
df = df[df['cr_diff'] != 0]
df['bricks'] = df.loc[:, ('cr_diff', )] / brick_size
df['bricks_s1'] = df['bricks'].shift()
df['tc'] = np.where((df['bricks'] * df['bricks_s1']) < 0, True, False)
filtered_df = df[(~df['tc']) | ~(abs(df['bricks']) == 1)]
if len(df) == len(filtered_df):
break
df = filtered_df
self.rdf = df
if len(sys.argv) > 1:
fname = sys.argv[1]
print('Reading local file {}'.format(fname))
df = pd.read_csv(sys.argv[1])
else:
print('Downloading data from nsepy')
df = nsepy.get_history(
symbol='SBIN',
start=dt.date(2017,1,1),
end=dt.date(2018,1,19)
)
if df.empty:
print('No data is received from nsepy. Exiting...')
sys.exit()
df.reset_index(inplace=True)
df.columns = [i.lower() for i in df.columns]
renko = Renko(df)
renko.brick_size = 4
r = renko.get_bricks()
print(r.tail(20))
|
[
"anand21nanda@gmail.com"
] |
anand21nanda@gmail.com
|
baf9232934abe8f004c202e7807716b4d6876a09
|
a5a943391577a3b7222533d335ec0eda6cc1bc33
|
/src/robot/version.py
|
3be9a14084a8b351caae9ad094fb248c1aa7bd42
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
miktuy/robotframework
|
2164aa67d779ec2a3511181bb7d01a7ad2bc45a9
|
15e11c63be0e7a4ce8401c7d47346a7dc8c81bf5
|
refs/heads/master
| 2023-01-31T11:19:18.816499
| 2023-01-18T17:34:13
| 2023-01-18T17:34:13
| 298,874,288
| 0
| 0
|
Apache-2.0
| 2023-01-26T12:49:01
| 2020-09-26T18:20:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
# Version number typically updated by running `invoke set-version <version>`.
# Run `invoke --help set-version` or see tasks.py for details.
VERSION = '6.1.dev1'
def get_version(naked=False):
if naked:
return re.split('(a|b|rc|.dev)', VERSION)[0]
return VERSION
def get_full_version(program=None, naked=False):
version = '%s %s (%s %s on %s)' % (program or '',
get_version(naked),
get_interpreter(),
sys.version.split()[0],
sys.platform)
return version.strip()
def get_interpreter():
if 'PyPy' in sys.version:
return 'PyPy'
return 'Python'
|
[
"peke@iki.fi"
] |
peke@iki.fi
|
a816b9eb7cb4c15572bcc2fde5429a9a3fef50b8
|
2827d7a837eb29c3cb07793ab6d3d5a753e18669
|
/alipay/aop/api/request/AlipayOfflineMarketProductBatchqueryRequest.py
|
5b6524909b5e0b93c45834d283793cf3e6e236ce
|
[
"Apache-2.0"
] |
permissive
|
shaobenbin/alipay-sdk-python
|
22e809b8f5096bec57d2bb25414f64bdc87fa8b3
|
5232ad74dff2e8a6e0e7646ab3318feefa07a37d
|
refs/heads/master
| 2020-03-21T04:51:39.935692
| 2018-06-21T07:03:31
| 2018-06-21T07:03:31
| 138,131,022
| 0
| 0
| null | 2018-06-21T06:50:24
| 2018-06-21T06:50:24
| null |
UTF-8
|
Python
| false
| false
| 4,015
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOfflineMarketProductBatchqueryModel import AlipayOfflineMarketProductBatchqueryModel
class AlipayOfflineMarketProductBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOfflineMarketProductBatchqueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOfflineMarketProductBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.offline.market.product.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.