content
stringlengths 7
1.05M
|
|---|
ledger = {n: idx + 1 for idx, n in enumerate([1, 17, 0, 10, 18, 11, 6])}
spoken_number = list(ledger)[-1]
for turn in range(len(ledger), 30000000):
ledger[spoken_number], spoken_number = turn, turn - ledger.get(spoken_number, turn)
if turn == 2020 - 1:
print(f"Part 1: {spoken_number}") # 595
print(f"Part 2: {spoken_number}") # 1708310
|
error_map = {
0: None,
-1: "连接服务失败",
-2: "链路认证失败",
-3: "主机地址不可用",
-4: "发送数据错误",
-5: "测试编号不合法",
-6: "没准备好测试网络",
-7: "当前网络测试还没结束",
-8: "没用可用的接入前置",
-9: "数据路径不可用",
-10: "重复登录",
-11: "内部错误",
-12: "上一次请求还没有结束",
-13: "输入参数非法",
-14: "授权码不合法",
-15: "授权码超期",
-16: "授权码类型不匹配",
-17: "API还没有准备好",
-18: "UDP端口监听失败",
-19: "UDP正在监听",
-20: "接口未实现",
-21: "每次登陆只允许调用一次",
-22: "超过下单频率。",
-10000: "输入数据为NULL",
-10001: "输入错误的:TAPIYNFLAG",
-10002: "输入错误的:TAPILOGLEVEL",
-10003: "输入错误的:TAPICommodityType",
-10004: "输入错误的:TAPICallOrPutFlagType",
-12001: "输入错误的:TAPIAccountType",
-12003: "输入错误的:TAPIAccountState",
-12004: "输入错误的:TAPIAccountFamilyType",
-12005: "输入错误的:TAPIOrderTypeType",
-12006: "输入错误的:TAPIOrderSourceType",
-12007: "输入错误的:TAPITimeInForceType",
-12008: "输入错误的:TAPISideType",
-12009: "输入错误的:TAPIPositionEffectType",
-12010: "输入错误的:TAPIHedgeFlagType",
-12011: "输入错误的:TAPIOrderStateType",
-12012: "输入错误的:TAPICalculateModeType",
-12013: "输入错误的:TAPIMatchSourceType",
-12014: "输入错误的:TAPIOpenCloseModeType",
-12015: "输入错误的:TAPIFutureAlgType",
-12016: "输入错误的:TAPIOptionAlgType",
-12017: "输入错误的:TAPIBankAccountLWFlagType",
-12021: "输入错误的:TAPIMarginCalculateModeType",
-12022: "输入错误的:TAPIOptionMarginCalculateModeType",
-12023: "输入错误的:TAPICmbDirectType",
-12024: "输入错误的:TAPIDeliveryModeType",
-12025: "输入错误的:TAPIContractTypeType",
-12035: "输入错误的:TAPITacticsTypeType",
-12036: "输入错误的:TAPIORDERACT",
-12041: "输入错误的:TAPITriggerConditionType",
-12042: "输入错误的:TAPITriggerPriceTypeType",
-12043: "输入错误的:TAPITradingStateType",
-12044: "输入错误的:TAPIMarketLevelType",
-12045: "输入错误的:TAPIOrderQryTypeType",
1: "主动断开",
2: "被动断开",
3: "读错误",
4: "写错误",
5: "缓冲区满",
6: "异步操作错误",
7: "解析数据错误",
8: "连接超时",
9: "初始化失败",
10: "已经连接",
11: "工作线程已结束",
12: "操作正在进行,请稍后重试",
13: "心跳检测失败",
10001: "登录过程执行错误",
10002: "登录用户不存在",
10003: "需要进行动态认证",
10004: "登录用户未授权",
10005: "登录模块不正确",
10006: "需要强制修改密码",
10007: "登录状态禁止登陆",
10008: "登录密码不正确",
10009: "没有该模块登录权限",
10010: "登录数量超限",
10011: "登录用户不在服务器标识下可登录用户列表中",
10012: "登录用户已被冻结",
10013: "密码错误,用户冻结",
10014: "客户状态不允许登录",
10015: "需要进行二次认证",
10016: None,
10017: None,
10018: "登录用户密码超过有效天数",
10101: "登录用户信息查询失败",
11001: "数据库操作失败",
11501: "登录用户下属所有资金账号查询失败",
11701: "登录用户密码修改失败",
11702: "登录用户密码修改失败——原始密码错误",
11703: "登录用户密码修改失败——不能与前n次密码相同",
11704: "新密码不符合密码复杂度要求",
20201: "资金账号信息查询失败",
20701: "客户交易编码查询失败",
22801: "合约信息查询失败",
22901: "特殊期权标的查询失败",
25501: "品种委托类型查询失败",
25601: "品种委托时间有效性查询失败",
28901: "用户下单频率查询失败",
60001: "资金账号不存在",
60002: "资金账号状态不正确",
60003: "资金账号交易中心不一致",
60004: "资金账号无期权交易权限",
60005: "资金账号无品种交易权限",
60006: "资金账号无开仓权限",
60007: "资金账号风控项检查失败",
60011: "下单无效的合约",
60021: "客户权限禁止交易",
60022: "客户品种分组禁止交易",
60023: "客户合约特设禁止交易",
60024: "系统权限禁止交易",
60031: "持仓量超过最大限制",
60032: "下单超过单笔最大量",
60033: "下单合约无交易路由",
60034: "委托价格超出偏离范围",
60035: "超过GiveUp最大持仓量",
60036: "下单自动审核失败",
60037: "LME未准备就绪",
60038: "平仓方式错误",
60039: "下单对应的父账号资金不足",
60040: "互换单的合约格式错误",
60051: "下单资金不足",
60052: "手续费参数错误",
60053: "保证金参数错误",
60061: "撤单无此系统号",
60062: "此状态不允许撤单",
60063: "录单不允许撤单",
60071: "此状态不允许改单",
60072: "人工单不允许改单",
60081: "已删除报单不能转移",
60082: "人工单不允许改单",
60091: "录单重复",
60092: "保证金参数错误",
60100: "操作账号只可查询",
60101: "合约行情价格修改失败",
60102: "即使子帐号又是做市商不能应价",
60103: "下单找不到交易编码",
60104: "操作账号只可开仓",
60105: "操作账号没有上期挂单查询权限",
60106: "限期有效单不能小于当前交易日",
60107: "该编码不允许申请或拆分组合",
60108: "非本服务器标记下的账号不允许操作",
60109: "行权或弃权量超过可用量",
60110: "没有订单审核权限",
60111: "下单超过上手单笔最大量",
60115: "非大连应价单不允许两笔委托量不一致",
60117: "申请不允许重复提交",
60118: "超过账号下单频率限制",
60119: "组合表不存在的组合方向或投保标志",
61001: "订单操作频率过高",
61002: "委托查询返回前不能进行下次查询",
72001: "超过行情最大总订阅数",
72002: "超过该交易所行情最大订阅数",
72101: "没有该行情的订阅权限",
72102: "没有该交易所下行情的订阅权限",
72103: "品种不存在",
72104: "合约可能不存在",
83001: "不支持的行情协议",
14001: "二次验证失败",
14002: "二次验证超时",
11000: "数据库连接失败",
11002: "不允许一对多",
11003: "删除失败-存在关联信息,",
11004: "删除分组失败-分组有下属或在操作员下属中",
12001: "登录用户密码修改失败-原始密码错误",
12002: "登录用户密码修改失败-不能与前n次密码相同",
12003: "登录用户密码修改失败-新密码不符合密码复杂度要求",
13001: "一个币种组只能设置一个基币",
13002: "基币只能是美元或港币",
60012: "LME未准备就绪",
60013: "不支持的下单类型",
60014: "错误的埋单类型",
60015: "不合法的委托类型",
60025: "客户权限只可平仓",
60026: "客户合约特设只可平仓",
60027: "系统权限只可平仓",
60028: "只可平仓提前天数限制只可平仓",
60029: "客户品种风控权限禁止交易",
60030: "客户品种风控权限只可平仓",
60041: "未登录网关",
60042: "未找到网关信息",
60054: "总基币资金不足",
60055: "超过保证金额度",
60056: "总基币超过开仓比例限制",
60057: "独立币种组超过开仓比例限制",
60058: "风险阵列参数错误",
60073: "风险报单不允许改单",
60074: "成交量大于改单量",
60075: "预埋单不允许改单",
60112: "下单超过上手最大持仓量",
60121: "开平方式错误",
60122: "委托平仓持仓不足",
60123: "成交平仓失败",
60131: "未找到本地委托",
60132: "与网关断开连接",
60141: "录单成交重复",
60142: "录单成交未找到对应委托",
60143: "录单成交合约不存在",
60144: "录单成交参数错误",
60145: "录单成交委托状态错误",
60151: "成交删除未找到成交",
60152: "此状态成交不可删",
60161: "不允许录入此状态订单",
60162: "错误的修改订单请求",
60163: "订单不可删,存在对应成交",
60164: "不合法的委托状态",
60165: "此状态不允许订单转移",
60166: "订单不允许删除",
60171: "做市商双边撤单未找到委托",
60172: "做市商双边撤单客户不一致",
60173: "做市商双边撤单品种不一致",
60174: "做市商双边撤单合约不一致",
60175: "做市商双边撤单买卖方向相同",
60176: "做市商双边撤单买卖方向错误",
60177: "做市商单边检查未通过",
60181: "埋单激活失败,订单未找到",
60182: "埋单激活失败,非有效状态",
80001: "网关未就绪,未连接上手",
80002: "品种错误",
80003: "合约错误",
80004: "报单字段有误",
80005: "价格不合法",
80006: "数量不合法",
80007: "报单类型不合法",
80008: "委托模式不合法",
80009: "委托不存在(改单、撤单)",
80010: "发送报单失败",
80011: "被上手拒绝",
90001: "前置不允许该模块登录",
90002: "一次请求太多数据",
90003: "前置没有所要数据",
90004: "所查询的操作员信息不存在",
90011: "前置与交易断开",
90012: "前置与管理断开",
90021: "下属资金账号不存在",
90022: "该操作员不允许交易",
90023: "查询频率过快",
90024: "该授权不予许登录",
90025: "自成交验证不通过",
-23: "查询频率太快。",
-24: "不符合调用条件。",
-25: "改单撤单时没有找到对应订单。",
-26: "日志路径为空。",
-27: "打开日志文件失败",
-28: "没有交易员登录权限",
-29: "没有订单录入或者成交录入",
-30: "没有订单修改和订单删除权限,成交删除权限",
-31: "没有订单转移权限",
-32: "成交录入时系统号为空",
-33: "成交删除时成交号为空。",
-34: "成交删除时没有找到对应的成交",
-35: "订单修改时客户账号变动。",
-36: "订单转移时客户账号没有变动",
-37: "修改的电话密码位数不对或者包含特殊字符。",
-38: "未绑定的二次认证信息",
-39: "二次认证有效期内不能再申请二次认证码",
-40: "没有设置客户密码的权限。",
-41: "风险保单单客户无法撤销或更改",
-42: "改单是客户账号填写与订单客户账号不一致",
-11001: "输入错误的:TAPIBucketDateFlag",
-11002: "输入错误的:TAPIHisQuoteType",
-12002: "输入错误的:TAPIUserTypeType",
-12018: "输入错误的:TAPIBankAccountStateType",
-12019: "输入错误的:TAPIBankAccountSwapStateType",
-12020: "输入错误的:TAPIBankAccountTransferStateType",
-12026: "输入错误的:TAPIPartyTypeType",
-12027: "输入错误的:TAPIPartyCertificateTypeType",
-12028: "输入错误的:TAPIMsgReceiverType",
-12029: "输入错误的:TAPIMsgTypeType",
-12030: "输入错误的:TAPIMsgLevelType",
-12031: "输入错误的:TAPITransferDirectType",
-12032: "输入错误的:TAPITransferStateType",
-12033: "输入错误的:TAPITransferTypeType",
-12034: "输入错误的:TAPITransferDeviceIDType",
-12037: "输入错误的:TAPIBillTypeType",
-12038: "输入错误的:TAPIBillFileTypeType",
-12039: "输入错误的:TAPIOFFFlagType",
-12040: "输入错误的:TAPICashAdjustTypeType",
-12046: "输入错误的: ClientID,ClientID包含特殊字符。",
-13001: "历史行情查询参数不合法",
-13002: "价格和数量中包含NAN或者INF不合法的数值",
-12047: "输入错误的到期日",
-12048: "错误的密码类型",
-12049: "错误的结算数据类型",
}
|
n = int(input())
arr = map(int, input().split())
list1 = list(arr)
print(list1)
new_list = set(list1)
print(new_list)
new_list.remove(max(new_list))
print(max(new_list))
|
MODEL_FOLDER_NAME = 'models'
FASTTEXT_MODEL_NAME = 'fasttext.magnitude'
GLOVE_MODEL_NAME = 'glove.magnitude'
WORD2VEC_MODEL_NAME = 'word2vec.magnitude'
ELMO_MODEL_NAME = 'elmo.magnitude'
BERT_MODEL_NAME = ''
FLAIR_MODEL_NAME = 'news-forward'
COVE_MODEL_NAME = ''
UNIVERSAL_SENTENCE_ENCODER_MODEL_NAME = 'https://tfhub.dev/google/universal-sentence-encoder/1'
CLASSIFIER_ALIAS_DICT = dict({'Random Forest': 'randomforest',
'Logistic Regression': 'logitreg',
'AdaBoost':'adaboost',
'GradientBoost':'gradboost',
'Support Vector Machine (Linear Kernel)':'svm',
'Stochastic Gradient Descent': 'svm',
'SGD Classifier': 'svm'})
|
# coding=utf-8
class App:
TESTING = True
HOST_URL = "http://pay.lvye.com"
PAYEE = '169658002'
class PayClientConfig:
CHANNEL_NAME = 'lvye_pay_test'
ROOT_URL = "http://pay.lvye.com/api/__"
CHECKOUT_URL = 'http://pay.lvye.com/__/checkout/{sn}'
|
def calc_gc(sequence):
sequence = sequence.upper() # make all chars uppercase
n = sequence.count('T') + sequence.count('A') # count only A, T,
m = sequence.count('G') + sequence.count('C') # C, and G -- nothing else (no Ns, Rs, Ws, etc.)
return float(m) / float(n + m) if n+m else 0
def test_1(): # test handling N
result = round(calc_gc('NATGC'), 2)
assert result == 0.5, result
def test_2(): # test handling lowercase
result = round(calc_gc('natgc'), 2)
assert result == 0.5, result
|
# Use this to take notes on the Edpuzzle video. Try each example rather than just watching it - you will get much more out of it!
#
student = {'name': 'John', 'age': 25, 'courses': ['math', 'CompSci']}
for key, value in student.items():
print(key, value)
|
# 比較演算子
def operate_compare(num):
if num >= 100: # numが100以上(100を含む)
print(f' 100 <= num({num})')
elif num >= 50: # numが50より大きい(50を含む)
print(f' 50 <= num({num}) < 100')
elif num > 0: # numが0より大きい(0を含まない)
print(f' 0 < num({num}) <= 50')
elif num == 0: # numが0である
print(f' num({num}) == 0')
else: # numが0より小さい
print(f' num({num}) < 0')
print('num = 1000')
operate_compare(1000)
print('num = 70')
operate_compare(70)
print('num = 0')
operate_compare(0)
print('num = -100')
operate_compare(-100)
|
PARSING_SCHEME = {
'name': 'a',
'games_played': 'td[data-stat="g"]:first',
'minutes_played': 'td[data-stat="mp"]:first',
'field_goals': 'td[data-stat="fg"]:first',
'field_goal_attempts': 'td[data-stat="fga"]:first',
'field_goal_percentage': 'td[data-stat="fg_pct"]:first',
'three_point_field_goals': 'td[data-stat="fg3"]:first',
'three_point_field_goal_attempts': 'td[data-stat="fg3a"]:first',
'three_point_field_goal_percentage': 'td[data-stat="fg3_pct"]:first',
'two_point_field_goals': 'td[data-stat="fg2"]:first',
'two_point_field_goal_attempts': 'td[data-stat="fg2a"]:first',
'two_point_field_goal_percentage': 'td[data-stat="fg2_pct"]:first',
'free_throws': 'td[data-stat="ft"]:first',
'free_throw_attempts': 'td[data-stat="fta"]:first',
'free_throw_percentage': 'td[data-stat="ft_pct"]:first',
'offensive_rebounds': 'td[data-stat="orb"]:first',
'defensive_rebounds': 'td[data-stat="drb"]:first',
'total_rebounds': 'td[data-stat="trb"]:first',
'assists': 'td[data-stat="ast"]:first',
'steals': 'td[data-stat="stl"]:first',
'blocks': 'td[data-stat="blk"]:first',
'turnovers': 'td[data-stat="tov"]:first',
'personal_fouls': 'td[data-stat="pf"]:first',
'points': 'td[data-stat="pts"]:first',
'opp_minutes_played': 'td[data-stat="mp"]:first',
'opp_field_goals': 'td[data-stat="opp_fg"]:first',
'opp_field_goal_attempts': 'td[data-stat="opp_fga"]:first',
'opp_field_goal_percentage': 'td[data-stat="opp_fg_pct"]:first',
'opp_three_point_field_goals': 'td[data-stat="opp_fg3"]:first',
'opp_three_point_field_goal_attempts': 'td[data-stat="opp_fg3a"]:first',
'opp_three_point_field_goal_percentage':
'td[data-stat="opp_fg3_pct"]:first',
'opp_two_point_field_goals': 'td[data-stat="opp_fg2"]:first',
'opp_two_point_field_goal_attempts': 'td[data-stat="opp_fg2a"]:first',
'opp_two_point_field_goal_percentage': 'td[data-stat="opp_fg2_pct"]:first',
'opp_free_throws': 'td[data-stat="opp_ft"]:first',
'opp_free_throw_attempts': 'td[data-stat="opp_fta"]:first',
'opp_free_throw_percentage': 'td[data-stat="opp_ft_pct"]:first',
'opp_offensive_rebounds': 'td[data-stat="opp_orb"]:first',
'opp_defensive_rebounds': 'td[data-stat="opp_drb"]:first',
'opp_total_rebounds': 'td[data-stat="opp_trb"]:first',
'opp_assists': 'td[data-stat="opp_ast"]:first',
'opp_steals': 'td[data-stat="opp_stl"]:first',
'opp_blocks': 'td[data-stat="opp_blk"]:first',
'opp_turnovers': 'td[data-stat="opp_tov"]:first',
'opp_personal_fouls': 'td[data-stat="opp_pf"]:first',
'opp_points': 'td[data-stat="opp_pts"]:first'
}
SCHEDULE_SCHEME = {
'game': 'th[data-stat="g"]:first',
'date': 'td[data-stat="date_game"]:first',
'time': 'td[data-stat="game_start_time"]:first',
'boxscore': 'td[data-stat="box_score_text"]:first',
'location': 'td[data-stat="game_location"]:first',
'opponent_abbr': 'td[data-stat="opp_id"]:first',
'opponent_name': 'td[data-stat="opp_name"]:first',
'result': 'td[data-stat="game_result"]:first',
'points_scored': 'td[data-stat="pts"]:first',
'points_allowed': 'td[data-stat="opp_pts"]:first',
'wins': 'td[data-stat="wins"]:first',
'losses': 'td[data-stat="losses"]:first',
'streak': 'td[data-stat="game_streak"]:first'
}
BOXSCORE_SCHEME = {
'date': 'div[class="scorebox_meta"]',
'location': 'div[class="scorebox_meta"]',
'away_name': 'a[itemprop="name"]:first',
'home_name': 'a[itemprop="name"]:last',
'winning_name': '',
'winning_abbr': '',
'losing_name': '',
'losing_abbr': '',
'summary': 'table#line_score',
'pace': 'td[data-stat="pace"]:first',
'away_record': 'div[class="table_wrapper"] h2',
'away_minutes_played': 'tfoot td[data-stat="mp"]',
'away_field_goals': 'tfoot td[data-stat="fg"]',
'away_field_goal_attempts': 'tfoot td[data-stat="fga"]',
'away_field_goal_percentage': 'tfoot td[data-stat="fg_pct"]',
'away_two_point_field_goals': 'tfoot td[data-stat="fg2"]',
'away_two_point_field_goal_attempts': 'tfoot td[data-stat="fg2a"]',
'away_two_point_field_goal_percentage': 'tfoot td[data-stat="fg2_pct"]',
'away_three_point_field_goals': 'tfoot td[data-stat="fg3"]',
'away_three_point_field_goal_attempts': 'tfoot td[data-stat="fg3a"]',
'away_three_point_field_goal_percentage': 'tfoot td[data-stat="fg3_pct"]',
'away_free_throws': 'tfoot td[data-stat="ft"]',
'away_free_throw_attempts': 'tfoot td[data-stat="fta"]',
'away_free_throw_percentage': 'tfoot td[data-stat="ft_pct"]',
'away_offensive_rebounds': 'tfoot td[data-stat="orb"]',
'away_defensive_rebounds': 'tfoot td[data-stat="drb"]',
'away_total_rebounds': 'tfoot td[data-stat="trb"]',
'away_assists': 'tfoot td[data-stat="ast"]',
'away_steals': 'tfoot td[data-stat="stl"]',
'away_blocks': 'tfoot td[data-stat="blk"]',
'away_turnovers': 'tfoot td[data-stat="tov"]',
'away_personal_fouls': 'tfoot td[data-stat="pf"]',
'away_points': 'tfoot td[data-stat="pts"]',
'away_true_shooting_percentage': 'tfoot td[data-stat="ts_pct"]',
'away_effective_field_goal_percentage': 'tfoot td[data-stat="efg_pct"]',
'away_three_point_attempt_rate': 'tfoot td[data-stat="fg3a_per_fga_pct"]',
'away_free_throw_attempt_rate': 'tfoot td[data-stat="fta_per_fga_pct"]',
'away_offensive_rebound_percentage': 'tfoot td[data-stat="orb_pct"]',
'away_defensive_rebound_percentage': 'tfoot td[data-stat="drb_pct"]',
'away_total_rebound_percentage': 'tfoot td[data-stat="trb_pct"]',
'away_assist_percentage': 'tfoot td[data-stat="ast_pct"]',
'away_steal_percentage': 'tfoot td[data-stat="stl_pct"]',
'away_block_percentage': 'tfoot td[data-stat="blk_pct"]',
'away_turnover_percentage': 'tfoot td[data-stat="tov_pct"]',
'away_offensive_rating': 'tfoot td[data-stat="off_rtg"]',
'away_defensive_rating': 'tfoot td[data-stat="def_rtg"]',
'home_record': 'div[class="table_wrapper"] h2',
'home_minutes_played': 'tfoot td[data-stat="mp"]',
'home_field_goals': 'tfoot td[data-stat="fg"]',
'home_field_goal_attempts': 'tfoot td[data-stat="fga"]',
'home_field_goal_percentage': 'tfoot td[data-stat="fg_pct"]',
'home_two_point_field_goals': 'tfoot td[data-stat="fg2"]',
'home_two_point_field_goal_attempts': 'tfoot td[data-stat="fg2a"]',
'home_two_point_field_goal_percentage': 'tfoot td[data-stat="fg2_pct"]',
'home_three_point_field_goals': 'tfoot td[data-stat="fg3"]',
'home_three_point_field_goal_attempts': 'tfoot td[data-stat="fg3a"]',
'home_three_point_field_goal_percentage': 'tfoot td[data-stat="fg3_pct"]',
'home_free_throws': 'tfoot td[data-stat="ft"]',
'home_free_throw_attempts': 'tfoot td[data-stat="fta"]',
'home_free_throw_percentage': 'tfoot td[data-stat="ft_pct"]',
'home_offensive_rebounds': 'tfoot td[data-stat="orb"]',
'home_defensive_rebounds': 'tfoot td[data-stat="drb"]',
'home_total_rebounds': 'tfoot td[data-stat="trb"]',
'home_assists': 'tfoot td[data-stat="ast"]',
'home_steals': 'tfoot td[data-stat="stl"]',
'home_blocks': 'tfoot td[data-stat="blk"]',
'home_turnovers': 'tfoot td[data-stat="tov"]',
'home_personal_fouls': 'tfoot td[data-stat="pf"]',
'home_points': 'div[class="score"]',
'home_true_shooting_percentage': 'tfoot td[data-stat="ts_pct"]',
'home_effective_field_goal_percentage': 'tfoot td[data-stat="efg_pct"]',
'home_three_point_attempt_rate': 'tfoot td[data-stat="fg3a_per_fga_pct"]',
'home_free_throw_attempt_rate': 'tfoot td[data-stat="fta_per_fga_pct"]',
'home_offensive_rebound_percentage': 'tfoot td[data-stat="orb_pct"]',
'home_defensive_rebound_percentage': 'tfoot td[data-stat="drb_pct"]',
'home_total_rebound_percentage': 'tfoot td[data-stat="trb_pct"]',
'home_assist_percentage': 'tfoot td[data-stat="ast_pct"]',
'home_steal_percentage': 'tfoot td[data-stat="stl_pct"]',
'home_block_percentage': 'tfoot td[data-stat="blk_pct"]',
'home_turnover_percentage': 'tfoot td[data-stat="tov_pct"]',
'home_offensive_rating': 'tfoot td[data-stat="off_rtg"]',
'home_defensive_rating': 'tfoot td[data-stat="def_rtg"]'
}
BOXSCORE_ELEMENT_INDEX = {
'date': 0,
'location': 1,
'home_record': -1,
'home_minutes_played': 7,
'home_field_goals': 7,
'home_field_goal_attempts': 7,
'home_field_goal_percentage': 7,
'home_two_point_field_goals': 7,
'home_two_point_field_goal_attempts': 7,
'home_two_point_field_goal_percentage': 7,
'home_three_point_field_goals': 7,
'home_three_point_field_goal_attempts': 7,
'home_three_point_field_goal_percentage': 7,
'home_free_throws': 7,
'home_free_throw_attempts': 7,
'home_free_throw_percentage': 7,
'home_offensive_rebounds': 7,
'home_defensive_rebounds': 7,
'home_total_rebounds': 7,
'home_assists': 7,
'home_steals': 7,
'home_blocks': 7,
'home_turnovers': 7,
'home_personal_fouls': 7,
'home_points': -1,
'home_true_shooting_percentage': 7,
'home_effective_field_goal_percentage': 7,
'home_three_point_attempt_rate': 7,
'home_free_throw_attempt_rate': 7,
'home_offensive_rebound_percentage': 7,
'home_defensive_rebound_percentage': 7,
'home_total_rebound_percentage': 7,
'home_assist_percentage': 7,
'home_steal_percentage': 7,
'home_block_percentage': 7,
'home_turnover_percentage': 7,
'home_offensive_rating': 7,
'home_defensive_rating': 7
}
PLAYER_SCHEME = {
'summary': '[data-template="Partials/Teams/Summary"]',
'season': 'th[data-stat="season"]:first',
'name': 'h1',
'team_abbreviation': 'td[data-stat="team_id"]',
'position': 'td[data-stat="pos"]',
'height': 'span[itemprop="height"]',
'weight': 'span[itemprop="weight"]',
'birth_date': 'td[data-stat=""]',
'nationality': 'td[data-stat=""]',
'age': 'nobr',
'games_played': 'td[data-stat="g"]',
'games_started': 'td[data-stat="gs"]',
'minutes_played': 'td[data-stat="mp"]',
'field_goals': 'td[data-stat="fg"]',
'field_goal_attempts': 'td[data-stat="fga"]',
'field_goal_percentage': 'td[data-stat="fg_pct"]',
'three_pointers': 'td[data-stat="fg3"]',
'three_point_attempts': 'td[data-stat="fg3a"]',
'three_point_percentage': 'td[data-stat="fg3_pct"]',
'two_pointers': 'td[data-stat="fg2"]',
'two_point_attempts': 'td[data-stat="fg2a"]',
'two_point_percentage': 'td[data-stat="fg2_pct"]',
'effective_field_goal_percentage': 'td[data-stat="efg_pct"]',
'free_throws': 'td[data-stat="ft"]',
'free_throw_attempts': 'td[data-stat="fta"]',
'free_throw_percentage': 'td[data-stat="ft_pct"]',
'offensive_rebounds': 'td[data-stat="orb"]',
'defensive_rebounds': 'td[data-stat="drb"]',
'total_rebounds': 'td[data-stat="trb"]',
'assists': 'td[data-stat="ast"]',
'steals': 'td[data-stat="stl"]',
'blocks': 'td[data-stat="blk"]',
'turnovers': 'td[data-stat="tov"]',
'personal_fouls': 'td[data-stat="pf"]',
'points': 'td[data-stat="pts"]',
'player_efficiency_rating': 'td[data-stat="per"]',
'true_shooting_percentage': 'td[data-stat="ts_pct"]',
'three_point_attempt_rate': 'td[data-stat="fg3a_per_fga_pct"]',
'free_throw_attempt_rate': 'td[data-stat="fta_per_fga_pct"]',
'offensive_rebound_percentage': 'td[data-stat="orb_pct"]',
'defensive_rebound_percentage': 'td[data-stat="drb_pct"]',
'total_rebound_percentage': 'td[data-stat="trb_pct"]',
'assist_percentage': 'td[data-stat="ast_pct"]',
'steal_percentage': 'td[data-stat="stl_pct"]',
'block_percentage': 'td[data-stat="blk_pct"]',
'turnover_percentage': 'td[data-stat="tov_pct"]',
'usage_percentage': 'td[data-stat="usg_pct"]',
'offensive_win_shares': 'td[data-stat="ows"]',
'defensive_win_shares': 'td[data-stat="dws"]',
'win_shares': 'td[data-stat="ws"]',
'win_shares_per_48_minutes': 'td[data-stat="ws_per_48"]',
'offensive_box_plus_minus': 'td[data-stat="obpm"]',
'defensive_box_plus_minus': 'td[data-stat="dbpm"]',
'box_plus_minus': 'td[data-stat="bpm"]',
'defensive_rating': 'td[data-stat="def_rtg"]',
'offensive_rating': 'td[data-stat="off_rtg"]',
'boxscore_box_plus_minus': 'td[data-stat="plus_minus"]',
'value_over_replacement_player': 'td[data-stat="vorp"]',
'shooting_distance': 'td[data-stat="avg_dist"]',
'percentage_shots_two_pointers': 'td[data-stat="fg2a_pct_fga"]',
'percentage_zero_to_three_footers': 'td[data-stat="pct_fga_00_03"]',
'percentage_three_to_ten_footers': 'td[data-stat="pct_fga_03_10"]',
'percentage_ten_to_sixteen_footers': 'td[data-stat="pct_fga_10_16"]',
'percentage_sixteen_foot_plus_two_pointers':
'td[data-stat="pct_fga_16_xx"]',
'percentage_shots_three_pointers': 'td[data-stat="fg3a_pct_fga"]',
'field_goal_perc_zero_to_three_feet': 'td[data-stat="fg_pct_00_03"]',
'field_goal_perc_three_to_ten_feet': 'td[data-stat="fg_pct_03_10"]',
'field_goal_perc_ten_to_sixteen_feet': 'td[data-stat="fg_pct_10_16"]',
'field_goal_perc_sixteen_foot_plus_two_pointers':
'td[data-stat="fg_pct_16_xx"]',
'two_pointers_assisted_percentage': 'td[data-stat="fg2_pct_ast"]',
'percentage_field_goals_as_dunks': 'td[data-stat="pct_fg2_dunk"]',
'dunks': 'td[data-stat="fg2_dunk"]',
'three_pointers_assisted_percentage': 'td[data-stat="fg3_pct_ast"]',
'percentage_of_three_pointers_from_corner':
'td[data-stat="pct_fg3a_corner"]',
'three_point_shot_percentage_from_corner':
'td[data-stat="fg3_pct_corner"]',
'half_court_heaves': 'td[data-stat="fg3a_heave"]',
'half_court_heaves_made': 'td[data-stat="fg3_heave"]',
'point_guard_percentage': 'td[data-stat="pct_1"]',
'shooting_guard_percentage': 'td[data-stat="pct_2"]',
'small_forward_percentage': 'td[data-stat="pct_3"]',
'power_forward_percentage': 'td[data-stat="pct_4"]',
'center_percentage': 'td[data-stat="pct_5"]',
'on_court_plus_minus': 'td[data-stat="plus_minus_on"]',
'net_plus_minus': 'td[data-stat="plus_minus_net"]',
'passing_turnovers': 'td[data-stat="tov_bad_pass"]',
'lost_ball_turnovers': 'td[data-stat="tov_lost_ball"]',
'other_turnovers': 'td[data-stat="tov_other"]',
'shooting_fouls': 'td[data-stat="fouls_shooting"]',
'blocking_fouls': 'td[data-stat="fouls_blocking"]',
'offensive_fouls': 'td[data-stat="fouls_offensive"]',
'take_fouls': 'td[data-stat="fouls_take"]',
'points_generated_by_assists': 'td[data-stat="astd_pts"]',
'shooting_fouls_drawn': 'td[data-stat="drawn_shooting"]',
'and_ones': 'td[data-stat="and1s"]',
'shots_blocked': 'td[data-stat="fga_blkd"]',
'salary': 'td[data-stat="salary"]',
'field_goals_per_poss': 'td[data-stat="fg_per_poss"]',
'field_goal_attempts_per_poss': 'td[data-stat="fga_per_poss"]',
'three_pointers_per_poss': 'td[data-stat="fg3_per_poss"]',
'three_point_attempts_per_poss': 'td[data-stat="fg3a_per_poss"]',
'two_pointers_per_poss': 'td[data-stat="fg2_per_poss"]',
'two_point_attempts_per_poss': 'td[data-stat="fg2a_per_poss"]',
'free_throws_per_poss': 'td[data-stat="ft_per_poss"]',
'free_throw_attempts_per_poss': 'td[data-stat="fta_per_poss"]',
'offensive_rebounds_per_poss': 'td[data-stat="orb_per_poss"]',
'defensive_rebounds_per_poss': 'td[data-stat="drb_per_poss"]',
'total_rebounds_per_poss': 'td[data-stat="trb_per_poss"]',
'assists_per_poss': 'td[data-stat="ast_per_poss"]',
'steals_per_poss': 'td[data-stat="stl_per_poss"]',
'blocks_per_poss': 'td[data-stat="blk_per_poss"]',
'turnovers_per_poss': 'td[data-stat="tov_per_poss"]',
'personal_fouls_per_poss': 'td[data-stat="pf_per_poss"]',
'points_per_poss': 'td[data-stat="pts_per_poss"]'
}
NATIONALITY = {
'ao': 'Angola',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'au': 'Australia',
'at': 'Austria',
'bs': 'Bahamas',
'be': 'Belgium',
'ba': 'Bosnia and Herzegovina',
'br': 'Brazil',
'bg': 'Bulgaria',
'cm': 'Cameroon',
'ca': 'Canada',
'td': 'Chad',
'co': 'Colombia',
'cv': 'Cape Verde',
'cn': 'China',
'hr': 'Croatia',
'cu': 'Cuba',
'cz': 'Czech Republic',
'cd': 'Democratic Replubic of Congo',
'dk': 'Denmark',
'dm': 'Dominica',
'do': 'Dominican Replubic',
'eg': 'Egypt',
'ee': 'Estonia',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'ga': 'Gabon',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gr': 'Greece',
'gp': 'Guadeloupe',
'gn': 'Guinea',
'gy': 'Guyana',
'ht': 'Haiti',
'hu': 'Hungary',
'is': 'Iceland',
'ie': 'Ireland',
'ir': 'Islamic Replubic of Iran',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'lv': 'Latvia',
'lb': 'Lebanon',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'ml': 'Mali',
'mq': 'Martinique',
'mx': 'Mexico',
'me': 'Montenegro',
'ma': 'Morocco',
'nl': 'Netherlands',
'nz': 'New Zealand',
'ng': 'Nigeria',
'no': 'Norway',
'pa': 'Panama',
'pl': 'Poland',
'pr': 'Puerto Rico',
'ke': 'Kenya',
'kr': 'Republic of Korea',
'mk': 'Republic of Macedonia',
'cg': 'Republic of Congo',
'ro': 'Romania',
'ru': 'Russian Federation',
'lc': 'Saint Lucia',
'vc': 'Saint Vincent and the Grenadines',
'sd': 'Sudan',
'sn': 'Senegal',
'rs': 'Serbia',
'sk': 'Slovakia',
'si': 'Slovenia',
'za': 'South Africa',
'ss': 'South Sudan',
'es': 'Spain',
'se': 'Sweden',
'ch': 'Switzerland',
'tw': 'Taiwan',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'us': 'United States of America',
'vi': 'U.S. Virgin Islands',
'ua': 'Ukraine',
'gb': 'United Kingdom',
'tz': 'United Republic of Tanzania',
'uy': 'Uruguay',
've': 'Venezuela'
}
SEASON_PAGE_URL = 'http://www.basketball-reference.com/leagues/NBA_%s.html'
SCHEDULE_URL = 'http://www.basketball-reference.com/teams/%s/%s_games.html'
BOXSCORE_URL = 'https://www.basketball-reference.com/boxscores/%s.html'
BOXSCORES_URL = ('https://www.basketball-reference.com/boxscores/'
'?month=%s&day=%s&year=%s')
PLAYER_URL = 'https://www.basketball-reference.com/players/%s/%s.html'
ROSTER_URL = 'https://www.basketball-reference.com/teams/%s/%s.html'
|
class NuGetPackage(GitHubTarballPackage):
def __init__(self):
GitHubTarballPackage.__init__(self,
'mono', 'nuget',
'2.8.5',
'ea1d244b066338c9408646afdcf8acae6299f7fb',
configure = '')
def build(self):
self.sh ('%{make} PREFIX=%{package_prefix}')
def install(self):
self.sh ('%{makeinstall} PREFIX=%{staged_prefix}')
NuGetPackage()
|
def test():
assert (
"doc1.similarity(doc2)" in __solution__ or "doc2.similarity(doc1)" in __solution__
), "你有计算两个doc之间的相似度吗?"
assert (
0 <= float(similarity) <= 1
), "相似度分数是一个浮点数。你确定你计算正确了吗?"
__msg__.good("棒棒哒!")
|
class Cat:
def __init__(self, name, age):
self.name = name
self.age = age
def info(self):
print(f"I am a cat. My name is {self.name}. I am {self.age} years old.")
def make_sound(self):
print("Meow")
class Dog:
def __init__(self, name, age):
self.name = name
self.age = age
def info(self):
print(f"I am a dog. My name is {self.name}. I am {self.age} years old.")
def make_sound(self):
print("Bark")
cat1 = Cat("Kitty", 2.5)
cat2 = Cat("Catty", 3.0)
dog1 = Dog("Fluffy", 4)
dog2 = Dog("Doggy", 4.5)
animals = [cat1, cat2, dog1, dog2]
for animal in animals:
animal.make_sound()
animal.info()
animal.make_sound()
|
#!/usr/bin/python
# Copyright 2015 Neuhold Markus and Kleinsasser Mario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
smsgatewayabspath = None
watchdogThread = None
watchdogThreadNotify = None
# For route-based watchdogs
watchdogRouteThread = {}
watchdogRouteThreadNotify = {}
watchdogRouteThreadQueue = {}
routerThread = None
rdb = None
cleanupseconds = None
wisid = None
wisport = None
wisipaddress = None
pissendtimeout = None
ldapenabled = None
ldapserver = None
ldapbasedn = None
ldapusers = None
sslenabled = None
sslcertificate = None
sslprivatekey = None
sslcertificatechain = None
validusernameregex = None
validusernamelength = None
version = None
|
class DistributedRouter:
def allow_migrate(self, db, app_label, model_name=None, **hints):
if model_name in ['user', 'settings']:
return db == 'parser'
if model_name in ['search', 'betdata']:
return db == 'betdata'
return True
|
contador_externo = 0
contador_interno = 0
while contador_externo < 5:
while contador_interno < 6:
print(contador_externo, contador_interno)
contador_interno += 1
contador_externo += 1
contador_interno = 0
|
# Python: QuickSort
def quick_sort(arr):
start = 0
end = len(arr) - 1
__quick_sort(arr, start, end)
def __quick_sort(arr, start, end):
if start < end:
pertition_index = __pertition(arr, start, end)
__quick_sort(arr, start, pertition_index - 1)
__quick_sort(arr, pertition_index + 1, end)
def __pertition(arr, start, end):
pivot_elm = arr[end]
pertition_index = start
for i in range(start, end):
if arr[i] <= pivot_elm:
arr[i], arr[pertition_index] = arr[pertition_index], arr[i]
pertition_index += 1
arr[end], arr[pertition_index] = arr[pertition_index], arr[end]
return pertition_index
|
def heapify(array, size, index):
largest = index
left = 2 * index + 1
right = 2 * index + 2
if left < size and array[index] < array[left]:
largest = left
if right < size and array[largest] < array[right]:
largest = right
if largest != index:
array[index], array[largest] = array[largest], array[index]
heapify(array, size, largest)
def heap_sort(array):
size = len(array)
for index in range(size//2 - 1, -1, -1):
heapify(array, size, index)
for index in range(size - 1, 0, -1):
array[index], array[0] = array[0], array[index]
heapify(array, index, 0)
if __name__ == '__main__':
array = [19, 50, 27, 7, 2020, 14, 18, 1, 12, 23, 200, 2201]
heap_sort(array)
print(array)
|
'''
Description : Use Of Local Scope
Function Date : 05 Feb 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
x = 50
def func(x):
print ('x is', x)
x = 2
print ('Changed local x to', x)
func(x)
print ('x is still', x)
|
"""
321. Create Maximum Number
Given two arrays of length m and n with digits 0-9 representing two numbers. Create the maximum number of length k <= m + n from digits of the two. The relative order of the digits from the same array must be preserved. Return an array of the k digits.
Note: You should try to optimize your time and space complexity.
"""
# simple math problem
# Runtime: 420 ms, faster than 69.02% of Python3 online submissions for Create Maximum Number.
# Memory Usage: 14 MB, less than 53.99% of Python3 online submissions for Create Maximum Number.
class Solution:
def maxNumber(self, nums1: List[int], nums2: List[int], k: int) -> List[int]:
def getK(nums, k):
n = len(nums)
to_pop = n - k
ans = []
for num in nums:
while len(ans) > 0 and num > ans[-1] and to_pop > 0:
to_pop -= 1
ans.pop()
ans.append(num)
return ans[:k]
def getMax(nums1, nums2):
ans = []
while nums1 and nums2:
if nums1 > nums2:
ans.append(nums1.pop(0))
else:
ans.append(nums2.pop(0))
if nums1:
ans += nums1
else:
ans += nums2
return ans
n1 = len(nums1)
n2 = len(nums2)
ans = []
for k1 in range(k+1):
k2 = k - k1
if k1 > n1 or k2 > n2:
continue
ans = max(ans, getMax(getK(nums1, k1), getK(nums2, k2)))
return ans
|
catName1 = input()
print('Enter the name of cat 2:')
catName2 = input()
print('Enter the name of cat 3:')
catName3 = input()
print('Enter the name of cat 4:')
catName4 = input()
print('Enter the name of cat 5:')
catName5 = input()
print('Enter the name of cat 6:')
catName6 = input()
print(catName1 + ' ' + catName2 + ' ' + catName3 + ' ' +
catName4 + ' ' + catName5 + ' ' + catName6)
|
# -*- coding: utf-8 -*-
#
# __init__.py
#
# This module is part of skxtend.
#
"""
Initializer of skxtend tests.
"""
__author__ = 'Severin E. R. Langberg'
__email__ = 'Langberg91@gmail.no'
__status__ = 'Operational'
|
TYPE_NAME = "mock"
def handler(value, **kwargs):
return "mock"
|
# ETA represents the learning rate. Higher values penalize feature weights more strongly
# Create your housing DMatrix: housing_dmatrix
housing_dmatrix = xgb.DMatrix(data=X, label=y)
# Create the parameter dictionary for each tree (boosting round)
params = {"objective":"reg:linear", "max_depth":3}
# Create list of eta values and empty list to store final round rmse per xgboost model
eta_vals = [0.001, 0.01, 0.1]
best_rmse = []
# Systematically vary the eta
for curr_val in eta_vals:
params["eta"] = curr_val
# Perform cross-validation: cv_results
cv_results = xgb.cv(
dtrain=housing_dmatrix, params=params, nfold=3, num_boost_round=10,
early_stopping_rounds=5, metrics="rmse", seed=123, as_pandas=True
)
# Append the final round rmse to best_rmse
best_rmse.append(cv_results["test-rmse-mean"].tail().values[-1])
# Print the resultant DataFrame
print(pd.DataFrame(list(zip(eta_vals, best_rmse)), columns=["eta","best_rmse"]))
|
class ProjectAttributes:
def __init__(self, project_instance):
self.project_instance = project_instance
self.proj_name = project_instance.get_project_name()
self.proj_loc = project_instance.get_project_loc()
self.source_file_count = len(project_instance.get_project_source_files().get_files())
def get_project_name(self):
"""
Returns the project name.
:param: None
:returns: Name of the project
:rtype: str
"""
return self.proj_name
def get_project_loc(self):
"""
Returns the project kloc.
:param: None
:returns: KLOC of the project
:rtype: float
"""
return self.proj_loc
def get_source_file_count(self):
"""
Returns the count of source files in the project.
:param: None
:returns: Count of source files in the project
:rtype: int
"""
return self.source_file_count
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
# Solution1: No more to say.
class Solution:
"""
@param inorder: A list of integers that inorder traversal of a tree
@param postorder: A list of integers that postorder traversal of a tree
@return: Root of a tree
"""
def buildTree(self, inorder, postorder):
if inorder and postorder:
middle = TreeNode(postorder[-1])
index = inorder.index(postorder[-1])
middle.left = self.buildTree(inorder[:index], postorder[:index])
middle.right = self.buildTree(inorder[index+1:], postorder[index:-1])
return middle
|
# GB :国家标准的缩写
def make_page_type(txt,doc_type):
if "目次" in txt:
return "GB_ML"
if "术语" in txt and "条文说明" not in txt:
return "GB_SY"
else:
return "GB_ZW"
|
#
# PySNMP MIB module NBASE-EXP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NBASE-EXP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:17:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, enterprises, iso, Integer32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter32, TimeTicks, Counter64, Gauge32, ModuleIdentity, Unsigned32, IpAddress, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "enterprises", "iso", "Integer32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter32", "TimeTicks", "Counter64", "Gauge32", "ModuleIdentity", "Unsigned32", "IpAddress", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
nbase = MibIdentifier((1, 3, 6, 1, 4, 1, 629))
nbSwitchG1 = MibIdentifier((1, 3, 6, 1, 4, 1, 629, 1))
nbsMegaMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 629, 1, 16))
nbsExpansionPortMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 629, 1, 16, 1))
nbsAtmLanePortMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 629, 1, 16, 2))
nbsFddiPortMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 629, 1, 16, 3))
nbsExpPortMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortMaxNum.setStatus('mandatory')
nbsExpPortTable = MibTable((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2), )
if mibBuilder.loadTexts: nbsExpPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTable.setDescription('A table of Expansion Ports in the devices.')
nbsExpPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1), ).setIndexNames((0, "NBASE-EXP-MIB", "nbsExpPortTblPortNumber"))
if mibBuilder.loadTexts: nbsExpPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortEntry.setDescription('Contains the features general to NBase Expansion port modules.')
nbsExpPortTblPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblPortNumber.setDescription('The Port Number of the Expansion Port. This port number is the same as the port number used for all other purposes.')
nbsExpPortTblHwType = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("cpu-card", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblHwType.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblHwType.setDescription('The Hardware Type of the Expansion port.')
nbsExpPortTblSwType = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("atm-lec", 2), ("atm-mpoa", 3), ("fddi", 4), ("wan-router", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblSwType.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblSwType.setDescription('The Software Type of the Expansion port.')
nbsExpPortTblSquall = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblSquall.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblSquall.setDescription('The Squall Module, if any, attached to this Expansion Port.')
nbsExpPortTblHwVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblHwVersion.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblHwVersion.setDescription('A description of the Hardware Version of the Expansion Port.')
nbsExpPortTblMCodeVrsn = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblMCodeVrsn.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblMCodeVrsn.setDescription('A description of the Hardware Version of the Expansion Port.')
nbsExpPortTblSwVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblSwVersion.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblSwVersion.setDescription('A description of the Software Version of the Expansion Port.')
nbsExpPortTblStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("ok", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsExpPortTblStatus.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTblStatus.setDescription('The status of the Expansion Port.')
nbsExpPortTftpSwFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 9), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nbsExpPortTftpSwFileName.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortTftpSwFileName.setDescription('The Software File Name for the Expansion Port. This is the remote file name string provided to the TFTP client application when starting a Firmware Update process. This value is stored in the system NVRAM as well as in the SNMP Agent current configuration.')
nbsExpPortInitDownload = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 1, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nbsExpPortInitDownload.setStatus('mandatory')
if mibBuilder.loadTexts: nbsExpPortInitDownload.setDescription('This is used to initiate a download session from the TFTP server. The filename which will be requested my be modified via the nbsExpPortTftpSwFileName object. Note that the only writeable value is active(1), if no session is active at this moment.')
nbsAtmLanePortMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsAtmLanePortMaxNum.setStatus('mandatory')
nbsAtmLanePortTable = MibTable((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 2), )
if mibBuilder.loadTexts: nbsAtmLanePortTable.setStatus('mandatory')
if mibBuilder.loadTexts: nbsAtmLanePortTable.setDescription('A table of Lan Emulation Clients, indexed by the physical port number.')
nbsAtmLanePortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 2, 1), ).setIndexNames((0, "NBASE-EXP-MIB", "nbsAtmLanePortNumber"))
if mibBuilder.loadTexts: nbsAtmLanePortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nbsAtmLanePortEntry.setDescription('Contains the features specific to the ATM Lan Emulation Client.')
nbsAtmLanePortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsAtmLanePortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: nbsAtmLanePortNumber.setDescription('The Port Number of the Lan Emulation Client.')
laneLecsAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(20, 20)).setFixedLength(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: laneLecsAddress.setStatus('mandatory')
if mibBuilder.loadTexts: laneLecsAddress.setDescription('The ATM Address (20 Octet string) of the desired Lan Emulation Configuration Server.')
sonetCircuitId = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 2, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonetCircuitId.setStatus('mandatory')
if mibBuilder.loadTexts: sonetCircuitId.setDescription('The Circuit Identifier, if any for the SONET interface. This information is typically provided by the owner of the SONET physical line.')
signalingStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: signalingStatus.setStatus('mandatory')
if mibBuilder.loadTexts: signalingStatus.setDescription('The Status of the ATM UNI signaling between the uplink and the ATM switch')
nbsFddiPortMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 629, 1, 16, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsFddiPortMaxNum.setStatus('mandatory')
nbsFddiPortTable = MibTable((1, 3, 6, 1, 4, 1, 629, 1, 16, 3, 2), )
if mibBuilder.loadTexts: nbsFddiPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: nbsFddiPortTable.setDescription('A table of FDDI ports, indexed by the physical port number.')
nbsFddiPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 629, 1, 16, 3, 2, 1), ).setIndexNames((0, "NBASE-EXP-MIB", "nbsFddiPortNumber"))
if mibBuilder.loadTexts: nbsFddiPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nbsFddiPortEntry.setDescription('Contains the features specific to the FDDI Port.')
nbsFddiPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsFddiPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: nbsFddiPortNumber.setDescription('The Port Number of the Lan Emulation Client.')
nbsFddiSmtIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 629, 1, 16, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nbsFddiSmtIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nbsFddiSmtIndex.setDescription('The FDDI MIB SMT index number of this port')
mibBuilder.exportSymbols("NBASE-EXP-MIB", nbsFddiPortMIB=nbsFddiPortMIB, nbsFddiPortEntry=nbsFddiPortEntry, laneLecsAddress=laneLecsAddress, nbsExpPortInitDownload=nbsExpPortInitDownload, signalingStatus=signalingStatus, nbsMegaMibs=nbsMegaMibs, nbsAtmLanePortNumber=nbsAtmLanePortNumber, nbsExpPortMaxNum=nbsExpPortMaxNum, nbsAtmLanePortMIB=nbsAtmLanePortMIB, nbsExpPortTftpSwFileName=nbsExpPortTftpSwFileName, nbsFddiPortTable=nbsFddiPortTable, nbsFddiPortNumber=nbsFddiPortNumber, nbSwitchG1=nbSwitchG1, nbsExpPortTable=nbsExpPortTable, nbsExpPortTblStatus=nbsExpPortTblStatus, nbsExpansionPortMIB=nbsExpansionPortMIB, nbsExpPortTblPortNumber=nbsExpPortTblPortNumber, nbsExpPortTblSwType=nbsExpPortTblSwType, nbsExpPortEntry=nbsExpPortEntry, sonetCircuitId=sonetCircuitId, nbase=nbase, nbsAtmLanePortTable=nbsAtmLanePortTable, nbsExpPortTblHwVersion=nbsExpPortTblHwVersion, nbsExpPortTblSquall=nbsExpPortTblSquall, nbsFddiPortMaxNum=nbsFddiPortMaxNum, nbsExpPortTblHwType=nbsExpPortTblHwType, nbsExpPortTblSwVersion=nbsExpPortTblSwVersion, nbsExpPortTblMCodeVrsn=nbsExpPortTblMCodeVrsn, nbsAtmLanePortEntry=nbsAtmLanePortEntry, nbsFddiSmtIndex=nbsFddiSmtIndex, nbsAtmLanePortMaxNum=nbsAtmLanePortMaxNum)
|
#
# PySNMP MIB module EXTREME-LACP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EXTREME-LACP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:54:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
extremeAgent, = mibBuilder.importSymbols("EXTREME-BASE-MIB", "extremeAgent")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Integer32, Counter64, ObjectIdentity, ModuleIdentity, NotificationType, IpAddress, TimeTicks, Gauge32, Counter32, Unsigned32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "Counter64", "ObjectIdentity", "ModuleIdentity", "NotificationType", "IpAddress", "TimeTicks", "Gauge32", "Counter32", "Unsigned32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
TruthValue, RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "DisplayString", "TextualConvention")
extremeLacp = ModuleIdentity((1, 3, 6, 1, 4, 1, 1916, 1, 19))
if mibBuilder.loadTexts: extremeLacp.setLastUpdated('0502151530Z')
if mibBuilder.loadTexts: extremeLacp.setOrganization('Extreme Networks, Inc.')
class LacpGroupId(DisplayString):
status = 'current'
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 32)
class LacpMemberPort(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
extremeLacpTable = MibTable((1, 3, 6, 1, 4, 1, 1916, 1, 19, 1), )
if mibBuilder.loadTexts: extremeLacpTable.setStatus('current')
extremeLacpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1916, 1, 19, 1, 1), ).setIndexNames((0, "EXTREME-LACP-MIB", "extremeLacpGroup"), (0, "EXTREME-LACP-MIB", "extremeLacpMemberPort"))
if mibBuilder.loadTexts: extremeLacpEntry.setStatus('current')
extremeLacpGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 19, 1, 1, 1), LacpGroupId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeLacpGroup.setStatus('current')
extremeLacpMemberPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 19, 1, 1, 2), LacpMemberPort()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeLacpMemberPort.setStatus('current')
extremeLacpAggStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 19, 1, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: extremeLacpAggStatus.setStatus('current')
mibBuilder.exportSymbols("EXTREME-LACP-MIB", extremeLacpGroup=extremeLacpGroup, LacpGroupId=LacpGroupId, extremeLacpAggStatus=extremeLacpAggStatus, extremeLacpEntry=extremeLacpEntry, extremeLacpTable=extremeLacpTable, LacpMemberPort=LacpMemberPort, extremeLacpMemberPort=extremeLacpMemberPort, PYSNMP_MODULE_ID=extremeLacp, extremeLacp=extremeLacp)
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is used as the top-level gyp file for building WebView in the Android
# tree. It should depend only on native code, as we cannot currently generate
# correct makefiles to build Java code via gyp in the Android tree.
{
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'android_webview.gyp:libwebviewchromium',
# Needed by android_webview_java
'../base/base.gyp:base_java_activity_state',
'../base/base.gyp:base_java_memory_pressure_level_list',
'../content/content.gyp:page_transition_types_java',
'../content/content.gyp:result_codes_java',
'../content/content.gyp:speech_recognition_error_java',
'../net/net.gyp:certificate_mime_types_java',
'../net/net.gyp:cert_verify_result_android_java',
'../net/net.gyp:net_errors_java',
'../net/net.gyp:private_key_types_java',
],
}, # target_name: All
], # targets
}
|
class Solution:
# @param {integer[]} nums
# @return {integer}
def majorityElement(self, nums):
candidate = None
count = 0
for num in nums:
if num == candidate:
count += 1
elif count == 0:
candidate = num
count = 1
else:
count -= 1
return candidate
|
class Sql:
custlist = "SELECT * FROM cust";
custlistone = "SELECT * FROM cust WHERE id= '%s' ";
custinsert = "INSERT INTO cust VALUES ('%s','%s','%s')";
custdelete = "DELETE FROM cust WHERE id= '%s' ";
custupdate = "UPDATE cust SET pwd='%s',name='%s' WHERE id='%s' ";
itemlist = "SELECT * FROM item";
itemlistone = "SELECT * FROM item WHERE id= %d ";
iteminsert = "INSERT INTO item VALUES (NULL,'%s',%d,'%s',CURRENT_DATE())";
itemdelete = "DELETE FROM item WHERE id= %d ";
itemupdate = "UPDATE item SET name='%s',price=%d, imgname='%s' WHERE id= %d ";
|
'''
@author: Tibor Hercz // Tiboonn
@Link: https://github.com/Tiboonn/AWS-DeepRacer
@License: N/D
'''
def reward_function(params):
'''
Example of rewarding the agent to follow center line
'''
# Read input parameters
track_width = params['track_width']
distance_from_center = params['distance_from_center']
all_wheels_on_track = params['all_wheels_on_track']
steering = abs(params['steering_angle'])
speed = params['speed']
is_left_of_center = params['is_left_of_center']
# Calculate 3 markers that are at varying distances away from the center line
marker_1 = 0.1 * track_width
marker_2 = 0.15 * track_width
marker_3 = 0.25 * track_width
marker_4 = 0.5 * track_width
# Give higher reward if the car is closer to center line and vice versa
if not all_wheels_on_track:
reward = 1e-3
return reward
elif distance_from_center <= marker_1:
reward = 1.0 * speed
if is_left_of_center:
reward = reward + 0.1
elif distance_from_center <= marker_2:
reward = 0.8 * speed
if is_left_of_center:
reward = reward + 0.1
elif distance_from_center <= marker_3:
reward = 0.3 * speed
if is_left_of_center:
reward = reward + 0.1
elif distance_from_center <= marker_4:
reward = 0.1 * speed
if is_left_of_center:
reward = reward + 0.1
else:
reward = 1e-3 # likely crashed/ close to off track
ABS_STEERING_THRESHOLD = 15
if steering > ABS_STEERING_THRESHOLD:
reward *= 0.8
return float(reward)
|
vowels = set(['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'])
punc = set(['.', '!', '?', ' '])
while True:
sIn = input().strip()
lis = []
last = 0
for i, char in enumerate(sIn):
if char in punc:
if i - last > 0:
lis.append(sIn[last: i])
last = i + 1
if sIn[-1] not in punc:
lis.append(sIn[last:])
wordVal = 0
hashOf = 0
for word in lis:
hashOf += wordVal
wordVal = 0
vowelC = 1
for letter in word:
if letter in vowels:
wordVal += vowelC
vowelC += 1
else:
wordVal += ord(letter)
hashOf += wordVal
if sIn[-1] in punc:
hashOf += wordVal
for char in sIn:
if char in punc:
hashOf += ord(char)
print(f'The hash is {hashOf % 100}.')
if input().strip() == 'n': break
|
"""
Defines two dictionaries for converting
between text and integer sequences.
"""
char_map_str = """
' 0
<SPACE> 1
a 2
b 3
c 4
d 5
e 6
f 7
g 8
h 9
i 10
j 11
k 12
l 13
m 14
n 15
o 16
p 17
q 18
r 19
s 20
t 21
u 22
v 23
w 24
x 25
y 26
z 27
"""
# the "blank" character is mapped to 28
char_map = {}
index_map = {}
for line in char_map_str.strip().split('\n'):
ch, index = line.split()
char_map[ch] = int(index)
index_map[int(index)+1] = ch
index_map[2] = ' '
|
# -*- coding: utf-8 -*-
"""
Escribir SCRIPT que pregunte el nombre del usuario en la consola y un numero entero e imprima
por pantalla en lineas distintas el nombre del usuario tantas veces como el numero
"""
name = input("¿Como te llamas? ") # input campo de entrada de datos, en python no se pone el punto y coma
n = input("Introduce un numero entero: ") # input campo de entrda de datos
print ((name + "\n") * int(n)) # /n es un salto de linea, * multiplicacion, con int decimos que n es entero
|
@graph
def context_from_path():
sg = Shotgun()
sgfs = SGFS(root=sandbox, shotgun=sg)
fix = Fixture(sg)
proj = fix.Project('Example Project')
seq = proj.Sequence("AA")
shot = seq.Shot('AA_001')
step = fix.Step('Anm')
task = shot.Task('Do Work', id=123, step=step)
task2 = shot.Task('Do More Work', id=234, step=step)
ctx = sgfs.context_from_entities([task])
yield ctx.dot()
ctx = sgfs.context_from_entities([task, task2])
yield ctx.dot()
|
# -*- coding: utf-8 -*-
def command():
return "create-farm"
def init_argument(parser):
parser.add_argument("--farm-name", required=True)
parser.add_argument("--template-no", required=True)
parser.add_argument("--comment")
def execute(requester, args):
farm_name = args.farm_name
template_no = args.template_no
comment = args.comment
parameters = {}
parameters["FarmName"] = farm_name
parameters["TemplateNo"] = template_no
if (comment != None):
parameters["Comment"] = comment
return requester.execute("/CreateFarm", parameters)
|
#!/usr/bin/env python
print("This is example file 3")
|
#!/usr/bin/env python
#####################################
# Installation module for empire
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="Ian Smith"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update Empire - post exploitation python/powershell for windows and nix/osx"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/BC-SECURITY/Empire"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="empire3"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS='cd {INSTALL_LOCATION},echo -e "\n" | ./setup/install.sh'
# DON'T RUN AFTER COMMANDS ON UPDATE
BYPASS_UPDATE="NO"
# LAUNCHER
LAUNCHER="empire"
|
def imosh_test(
name,
srcs=[],
data=[],
**kargs):
if len(srcs) != 1:
fail("Exactly one source file must be given.")
native.genrule(
name = name + "_genrule_sh",
srcs = ["//bin:imosh_test_generate"],
outs = [name + "_genrule.sh"],
cmd = "$(BINDIR)/bin/imosh_test_generate " +
PACKAGE_NAME + "/" + srcs[0] + " >$@",
)
native.sh_test(
name = name,
srcs = [name + "_genrule.sh"],
data = [":" + srcs[0]] + data + ["//bin:imosh"],
**kargs)
|
# -*- coding: utf-8 -*-
name = 'usdview'
version = '20.05'
requires = [
'pyside-1.2',
'usd-20.05',
'ocio_configs',
'turret_usd'
]
def commands():
env.DEFAULT_USD.set('{root}/bin/DefaultUSD.usda')
|
'''5.10 Modifique o programa da listagem para que aceite respostas com letras maiúsculas e minúsculas em todas as questões.
Listagem 5.10 – Contagem de questões corretas
pontos = 0
questão = 1
while questão <= 3:
resposta = input(f"Resposta da questão {questão}: ")
if questão == 1 and resposta == "b":
pontos = pontos + 1
if questão == 2 and resposta == "a":
pontos = pontos + 1
if questão == 3 and resposta == "d":
pontos = pontos + 1
questão = questão + 1
print(f"O aluno fez {pontos} ponto(s)") '''
pontos = 0
questão = 1
while questão <= 3:
resposta = input(f"Resposta da questão {questão}: ")
if questão == 1 and (resposta == "b" or resposta == "B"):
pontos = pontos + 1
if questão == 2 and (resposta == "a" or resposta == "A"):
pontos = pontos + 1
if questão == 3 and (resposta == "d" or resposta == "D"):
pontos = pontos + 1
questão = questão + 1
print(f"O aluno fez {pontos} ponto(s)")
|
def make_ends(nums):
first = nums[0]
last = nums[len(nums)-1]
newArr = []
newArr.append(first)
newArr.append(last)
return newArr
|
class EnumBase(object):
class Meta:
allowed_types = tuple()
zero_value = None
@classmethod
def names(klass, with_zero_value=True):
def _get_names():
names = []
for n in dir(klass):
if '__' not in n and n != 'Meta':
value = getattr(klass, n)
if isinstance(value, klass.Meta.allowed_types):
if with_zero_value or value != klass.Meta.zero_value:
names.append(n)
return names
if with_zero_value:
if not hasattr(klass, '_cache__iterable_names_with_zero'):
klass._cache__iterable_names_with_zero = _get_names()
return klass._cache__iterable_names_with_zero
else:
if not hasattr(klass, '_cache__iterable_names_without_zero'):
klass._cache__iterable_names_without_zero = _get_names()
return klass._cache__iterable_names_without_zero
@classmethod
def values(klass):
return tuple(klass.iter())
@classmethod
def lookup(klass, instance):
d = {}
for n in klass.names():
v = getattr(klass, n)
if not isinstance(v, klass.Meta.allowed_types):
continue
d[v] = n
return d[instance]
@classmethod
def reverse_lookup(klass, name):
return dict(klass.choices(with_zero_value=True))[name]
@classmethod
def iter(klass):
values = [getattr(klass, n) for n in klass.names()]
values.sort()
for v in values:
if v == klass.Meta.zero_value:
continue
yield v
@classmethod
def next_value(cls, cur_value):
index_of = cls.all().index(cur_value)
return cls.all()[index_of + 1 % len(cls.all())]
@classmethod
def all(klass, with_zero_value=True):
def _get_values():
values = [getattr(klass, n) for n in klass.names(with_zero_value=with_zero_value) if isinstance(getattr(klass, n), klass.Meta.allowed_types)]
values.sort()
return values
if with_zero_value:
if not hasattr(klass, '_cache__iterable_values_with_zero'):
klass._cache__iterable_values_with_zero = _get_values()
return klass._cache__iterable_values_with_zero
else:
if not hasattr(klass, '_cache__iterable_values_without_zero'):
klass._cache__iterable_values_without_zero = _get_values()
return klass._cache__iterable_values_without_zero
@classmethod
def all_set(cls):
if not hasattr(cls, "_cache__iterable_values_set"):
cls._cache__iterable_values_set = set(cls.all())
return cls._cache__iterable_values_set
@classmethod
def choices(klass, reverse=False, with_zero_value=False):
lst = []
for n in klass.names(with_zero_value=with_zero_value):
v = getattr(klass, n)
if reverse:
lst.append((v, n))
else:
lst.append((n, v))
return lst
class IntEnum(EnumBase):
class Meta:
allowed_types = (int, long,)
zero_value = 0
class StringEnum(EnumBase):
class Meta:
allowed_types = (basestring,)
zero_value = ''
class BooleanEnum(EnumBase):
class Meta:
allowed_types = (bool,)
zero_value = None
class ListEnum(EnumBase):
class Meta:
allowed_types = (list, tuple,)
zero_value = []
class FloatEnum(EnumBase):
class Meta:
allowed_types = (float,)
zero_value = 0.0
"""
Some helper functions that makes using `enum` classes in python a little easier
"""
def generate_enum_reverse_lookup(klass):
"""
Returns a lookup to verify that a certain value exists in an enum class
And also returns to you it's Attribute Name in that class
"""
dct = {}
for k in dir(klass):
if k.startswith('__'):
continue
val = getattr(klass, k, None)
if not isinstance(val, (int, long)):
continue
if val in dct:
raise ValueError('Can only have a value in a reverse lookup once, sorry dude')
dct[val] = k
return dct
def generate_choices_tuple(klass, reverse=False):
lst = []
for prop in dir(klass):
if prop.startswith('_'):
continue
attr = getattr(klass, prop)
if not isinstance(attr, (long,int)):
continue
if reverse:
lst.append((attr, prop))
else:
lst.append((prop, attr))
return tuple(lst)
|
# intro to function
def my_function():
print("Hello, this is function")
# calling function
my_function()
|
class Solution:
def numDifferentIntegers(self, word: str) -> int:
stripped = {}
i = 0
for c in word:
if c in {"0":1, "1":1, "2":1, "3":1, "4":1, "5":1, "6":1, "7":1, "8":1, "9":1}:
if i in stripped:
if stripped[i] == "0":
stripped[i] = c
else:
stripped[i] = stripped[i] + c
else:
stripped[i] = c
else:
i = i + 1
counter = {}
for key in stripped:
if stripped[key] not in counter:
counter[stripped[key]] = 1
else:
counter[stripped[key]] = counter[stripped[key]] + 1
return len(counter)
|
# -*- coding: utf-8 -*-
# 定义函数
def my_abs(x):
if x >= 0:
return x
else:
return -x
print('end.') #永远也不会执行
print(my_abs(-10))
def nop():
pass
def my_opt_abs(x):
if not isinstance(x, (int, float)):
raise TypeError('bad operand type for my_opt_abs(): %s' % type(x))
if x >= 0:
return x
else:
return -x
print(my_opt_abs((1, 2, 3)))
|
"""
Illustrates how to embed Beaker cache functionality within
the Query object, allowing full cache control as well as the
ability to pull "lazy loaded" attributes from long term cache
as well.
In this demo, the following techniques are illustrated:
* Using custom subclasses of Query
* Basic technique of circumventing Query to pull from a
custom cache source instead of the database.
* Rudimental caching with Beaker, using "regions" which allow
global control over a fixed set of configurations.
* Using custom MapperOption objects to configure options on
a Query, including the ability to invoke the options
deep within an object graph when lazy loads occur.
E.g.::
# query for Person objects, specifying cache
q = Session.query(Person).options(FromCache("default", "all_people"))
# specify that each Person's "addresses" collection comes from
# cache too
q = q.options(RelationCache("default", "by_person", Person.addresses))
# query
print q.all()
To run, both SQLAlchemy and Beaker (1.4 or greater) must be
installed or on the current PYTHONPATH. The demo will create a local
directory for datafiles, insert initial data, and run. Running the
demo a second time will utilize the cache files already present, and
exactly one SQL statement against two tables will be emitted - the
displayed result however will utilize dozens of lazyloads that all
pull from cache.
The demo scripts themselves, in order of complexity, are run as follows::
python examples/beaker_caching/helloworld.py
python examples/beaker_caching/relation_caching.py
python examples/beaker_caching/advanced.py
python examples/beaker_caching/local_session_caching.py
Listing of files:
environment.py - Establish data / cache file paths, and configurations,
bootstrap fixture data if necessary.
meta.py - Represent persistence structures which allow the usage of
Beaker caching with SQLAlchemy. Introduces a query option called
FromCache.
model.py - The datamodel, which represents Person that has multiple
Address objects, each with PostalCode, City, Country
fixture_data.py - creates demo PostalCode, Address, Person objects
in the database.
helloworld.py - the basic idea.
relation_caching.py - Illustrates how to add cache options on
relation endpoints, so that lazyloads load from cache.
advanced.py - Further examples of how to use FromCache. Combines
techniques from the first two scripts.
local_session_caching.py - Grok everything so far ? This example
creates a new Beaker container that will persist data in a dictionary
which is local to the current session. remove() the session
and the cache is gone.
"""
|
## @package serde
# Module caffe2.python.predictor.serde
def serialize_protobuf_struct(protobuf_struct):
return protobuf_struct.SerializeToString()
def deserialize_protobuf_struct(serialized_protobuf, struct_type):
deser = struct_type()
deser.ParseFromString(serialized_protobuf)
return deser
|
# You can import and use this in a DAG in the parent folder like usual in
# Python, i.e. `import python_callables.compliance`
def check_port_22_open():
pass
|
#!/usr/bin/python
with open("vita.md") as fp:
lines = fp.readlines()
lines2 = lines[6:]
lines3 = lines[8:]
# print lines2
with open("vita_noyaml.md", "w") as fp:
fp.writelines(lines2)
# print lines3
with open("vita_noyaml_nocvaspdf.md", "w") as fp:
fp.writelines(lines3)
# onepage
with open("vita_onepage.md") as fp:
lines = fp.readlines()
lines2 = lines[5:]
lines3 = lines[7:]
with open("vita_onepage_noyaml.md", "w") as fp:
fp.writelines(lines2)
with open("vita_onepage_nocvaspdf.md", "w") as fp:
fp.writelines(lines3)
|
__author__ = 'nikaashpuri'
'''
TCP_SERVER_IP = '162.251.84.104'
SYSTEM_PATH_TO_APPEND = '/public_html/aquabrim_project'
LOG_FILE_LOCATION = '/logs/django_log'
TCP_SERVER_FILE_PATH = '/public_html/aquabrim_project/machine/tcp_ip_server.py'
DATABASE_PATH = '/sites_database/dev.db'
'''
TCP_SERVER_IP = 'localhost'
LOG_FILE_LOCATION = '/Users/nikaashpuri/Documents/alibi_projects/aquabrim_project/logs/django_log'
TCP_SERVER_FILE_PATH = '/Users/nikaashpuri/Documents/alibi_projects/aquabrim_project/machine/tcp_ip_server.py'
SYSTEM_PATH_TO_APPEND = '/Users/nikaashpuri/Documents/alibi_projects/aquabrim_project/'
DATABASE_PATH = 'dev.db'
TCP_SERVER_PORT = 40000
NUMBER_OF_SERVER_START_ATTEMPTS = 5
|
class Solution:
# @param candidates, a list of integers
# @param target, integer
# @return a list of lists of integers
def combinationSum(self, candidates, target):
if not candidates:
return []
candidates.sort()
n = len(candidates)
res = []
def solve(start, target, tmp):
if target < 0:
return
if target == 0:
res.append(tmp[:])
return
for i in xrange(start, n):
tmp.append(candidates[i])
solve(i, target-candidates[i], tmp)
tmp.pop()
solve(0, target, [])
return res
|
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2018 Datadog, Inc.
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
BASIC_METRICS = {
'cpu.extra': {
's_type' : 'delta',
'unit' : 'millisecond',
'rollup' : 'summation',
'entity' : ['VirtualMachine']
},
'cpu.ready': {
's_type' : 'delta',
'unit' : 'millisecond',
'rollup' : 'summation',
'entity' : ['VirtualMachine', 'HostSystem']
},
'cpu.usage': {
's_type' : 'rate',
'unit' : 'percent',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem']
},
'cpu.usagemhz': {
's_type' : 'rate',
'unit' : 'megaHertz',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem', 'ResourcePool']
},
'disk.commandsAborted': {
's_type' : 'delta',
'unit' : 'number',
'rollup' : 'summation',
'entity' : ['VirtualMachine', 'HostSystem', 'Datastore']
},
'disk.deviceLatency': {
's_type' : 'absolute',
'unit' : 'millisecond',
'rollup' : 'average',
'entity' : ['HostSystem']
},
'disk.deviceReadLatency': {
's_type' : 'absolute',
'unit' : 'millisecond',
'rollup' : 'average',
'entity' : ['HostSystem']
},
'disk.deviceWriteLatency': {
's_type' : 'absolute',
'unit' : 'millisecond',
'rollup' : 'average',
'entity' : ['HostSystem']
},
'disk.queueLatency': {
's_type' : 'absolute',
'unit' : 'millisecond',
'rollup' : 'average',
'entity' : ['HostSystem']
},
'disk.totalLatency': {
's_type' : 'absolute',
'unit' : 'millisecond',
'rollup' : 'average',
'entity' : ['HostSystemDatastore']
},
'mem.active': {
's_type' : 'absolute',
'unit' : 'kiloBytes',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem', 'ResourcePool']
},
'mem.compressed': {
's_type' : 'absolute',
'unit' : 'kiloBytes',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem', 'ResourcePool']
},
'mem.consumed': {
's_type' : 'absolute',
'unit' : 'kiloBytes',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem', 'ResourcePool']
},
'mem.overhead': {
's_type' : 'absolute',
'unit' : 'kiloBytes',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem', 'ResourcePool']
},
'mem.vmmemctl': {
's_type' : 'absolute',
'unit' : 'kiloBytes',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem', 'ResourcePool']
},
'network.received': {
's_type' : 'rate',
'unit' : 'kiloBytesPerSecond',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem']
},
'network.transmitted': {
's_type' : 'rate',
'unit' : 'kiloBytesPerSecond',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem']
},
'net.received': {
's_type' : 'rate',
'unit' : 'kiloBytesPerSecond',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem']
},
'net.transmitted': {
's_type' : 'rate',
'unit' : 'kiloBytesPerSecond',
'rollup' : 'average',
'entity' : ['VirtualMachine', 'HostSystem']
},
}
|
# !/usr/bin/env python3
# -*- cosing: utf-8 -*-
fileptr = open("file2.txt", "a")
fileptr.write("Python has an easy syntax and user-friendly interaction.")
fileptr.close()
|
main_menu = [
["1", "Spam Tools", "Amino-Tools"],
["2", "Chat Tools"],
["3", "Activity Tools"],
["4", "profile Tools"],
["5", "raid Tools"],
["0", "Exit"]
]
spam_tools_menu = [
["1", "Spam Bot", "Amino-Tools"],
["2", "Wiki Spam Bot"],
["3", "Wall Spam Bot"],
["4", "Blog Spam Bot"]
]
chat_tools_menu = [
["1", "ChatId Finder", "Amino-Tools"],
["2", "Crash Chat Description"],
["3", "Transfer Fake Coins"]
]
activity_tools_menu = [
["1", "Invite Bot", "Amino-Tools"],
["2", "Like Bot"],
["3", "Follow Bot"],
["4", "Unfollow Bot"]
]
profile_tools_menu = [
["1", "Blogs Spam Bot", "Amino-Tools"],
["2", "Wiki Spam Bot"]
]
raid_tools_menu = [
["1", "Spam System Messages", "Amino-Tools"],
["2", "Send System Message"],
["3", "Spam With Join And Leave"],
["4", "Join Active Chats"]
]
chat_id_finder_menu = [
["1", "Get Public Chats ChatId", "Amino-Tools"],
["2", "Get Joined Chats ChatId"]
]
chat_invite_bot_menu = [
["1", "Invite Online Users", "Amino-Tools"],
["2", "Invite Recent Users"]
]
follow_bot_menu = [
["1", "Follow Online Users", "Amino-Tools"],
["2", "Follow Recent Users"]
]
|
#Faça um programa com uma função chamada somaImposto. A função possui dois parâmetros formais: taxaImposto, que é a quantia de imposto sobre vendas expressa em porcentagem e custo, que é o custo de um item antes do imposto. A função “altera” o valor de custo para incluir o imposto sobre vendas.
def somaImposto(taxaImposto,custo):
taxaImposto = custo*taxaImposto/100
custo = custo+taxaImposto
return custo
taxaImposto = float(input('Digite o valor do imposto: '))
custo = float(input('E o custo: '))
print('O valor final é {}'.format(somaImposto(taxaImposto,custo)))
|
# Good morning! Here's your coding interview problem for today.
# This problem was asked by Google.
# A unival tree (which stands for "universal value") is a tree where all nodes under it have the same value.
# Given the root to a binary tree, count the number of unival subtrees.
# For example, the following tree has 5 unival subtrees:
# 0
# / \
# 1 0
# / \
# 1 0
# / \
# 1 1
class node:
def __init__(self,value,left=None,right=None):
self.value = value
self.left = left
self.right = right
def print(self):
print(self.left, '<--',self.value, '-->',self.right)
tree = node(False,node(True),node(False,node(True,node(True),node(True)),node(False)))
count = 0
def unival(tree):
global count
if tree == None:
return True
else :
if unival(tree.left) == unival(tree.right):
count +=1
return tree.value
unival(tree)
print(count)
|
"""
By default, Disco looks at an input URL and extracts its scheme in order to figure out which input stream to use.
When Disco determines the URL scheme, it tries to import the name `input_stream` from `disco.schemes.scheme_[SCHEME]`, where `[SCHEME]` is replaced by the scheme identified.
For instance, an input URL of `http://discoproject.org` would import and use :func:`disco.schemes.scheme_http.input_stream`.
"""
|
# -*- coding: utf-8 -*-
# 声明字典
dict1 = {'key1': 'value1', 'key2': 1}
print("字典示例:" + str(dict1))
# 添加键值对
dict1['key3'] = 'value3'
print("添加键值对后:" + str(dict1))
# 修改键对应的值
dict1['key1'] = 'new_value1'
print("修改键对应值后:" + str(dict1))
# 删除键值对
del dict1['key1']
print("删除键值对后:" + str(dict1))
# 遍历字典
for key, value in dict1.items():
print("%s=%s" % (key, str(value)))
|
# Natural Language Toolkit: Shoebox Errors
#
# Copyright (C) 2001-2006 NLTK Project
# Author: Stuart Robinson <Stuart.Robinson@mpi.nl>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
This module provides Shoebox exceptions.
"""
# ---------------------------------------------------------------------
# CLASS: ShoeboxError
# DESC: ???
# ---------------------------------------------------------------------
class ShoeboxError(Exception):
"""
This is the base class for all Shoebox errors.
"""
def __init__(self):
self._msg = ""
# ---------------------------------------------
# CLASS: ValidationError
# DESC: ???
# ---------------------------------------------
class NonUniqueEntryError(ShoeboxError):
"""
???
"""
def __init__(self) :
pass
class ValidationError(ShoeboxError):
def __init__(self):
pass
def setField(self, field):
self._field = field
def getField(self):
return self._field
# ---------------------------------------------
# CLASS: NoMetadataFound
# DESC: ???
# ---------------------------------------------
class NoMetadataFound(ValidationError):
def __init__(self, field):
self._field = field
class FieldError(ShoeboxError):
def __init__(self):
pass
def __str__(self) :
return self.get_message()
class NonUniqueFieldError(FieldError):
"""
Error raised when an attempt is made to retrieve a unique field which has more than one value
"""
def __init__(self, entry):
self._entry = entry
def setEntry(self, entry):
self._entry = entry
def getEntry(self):
return self._entry
# ---------------------------------------------
# CLASS: BadFieldValue
# DESC: ???
# ---------------------------------------------
class BadFieldValueError(ValidationError, FieldError):
FIELD_VALUE_ERROR_RANGE_SET = '1'
FIELD_VALUE_ERROR_NO_WORD_WRAP = '2'
FIELD_VALUE_ERROR_EMPTY_VALUE = '3'
FIELD_VALUE_ERROR_SINGLE_WORD = '4'
errorTypes = {
'1': "Range Set",
'2': "No Word Wrap",
'3': "Empty Value",
'4': "Single Word"
}
def __init__(self, errorType, entry, field, fmMetadata):
self._entry = entry
self._errorType = errorType
self._field = field
self._fmMetadata = fmMetadata
def __str__(self):
e = self.getEntry()
f = self.getField()
typ = self.getErrorDescription()
s = "'%s' error in '\\%s' field of record %i!\nRecord:\n%s" % (typ, f.getMarker(), e.getNumber(), e.getRawText())
return s
def getFieldMarkerMetadata(self):
return self._fmMetadata
def setFieldMarkerMetadata(self, fmMetadata):
self._fmMetadata = fmMetadata
def getErrorDescription(self):
try:
return self.errorTypes[self.getErrorType()]
except:
return None
def getErrorType(self):
return self._errorType
def setErrorType(self, errorType):
self._errorType = errorType
def getEntry(self):
return self._entry
def setEntry(self, entry):
self._entry = entry
|
#Declare variables to hold the file name and access mode
fileName = "GuestList.txt"
accessMode = "w"
#Open the file for writing
myFile = open(fileName, accessMode)
#Write the guest names and ages to the file
#I can write an entire record in one write statement
myFile.write("Doyle McCarty,27\n")
myFile.write("Jodi Mills,25\n")
myFile.write("Nicholas Rose,32\n")
#I could write the name and age in separate write statements
myFile.write("Kian Goddard")
myFile.write(",36\n")
myFile.write("Zuha Hanania")
myFile.write(",26\n")
#Close the file
myFile.close()
|
""" fizzbuzz for 1 to 100
fizz on 3
buzz on 5
fiizzbuzz on 15
"""
for i in range(1, 101):
if i % 15 == 0:
print("fizzbuzz")
continue
if i % 3 == 0:
print('fizz')
continue
if i % 5 == 0:
print('buzz')
continue
print(i)
|
##################################################
# This script was written to present informations and conditionals about my hometown, Goiânia,
# as the second submission for the pre-course of Programming at IAAC, by professor Diego Pajarito.
##################################################
#
##################################################
# Author: Laura Guimarães
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# Maintainer: Laura Guimarães
# Email: laura.figueiredo@students.iaac.net
# Status: development
##################################################
# End of header section
my_city_name = 'Goiânia'
my_city_population = 1536000
my_current_city_name = 'Barcelona'
my_current_city_population = 5500000
if my_city_population > 10000000:
print('My city is a megacity.')
elif my_city_population > 1500000:
print('My city is a large metropolitan area.')
if my_city_population > my_current_city_population:
print('My city is bigger than my current city.')
else:
print('My current city is bigger than my city.')
elif my_city_population > 500000:
print('My city is a metropolitan area.')
elif my_city_population > 200000:
print('My city is a medium size urban area.')
elif my_city_population > 50000:
print('My city is a small urban area.')
else:
print ('My city is a small city.')
|
#Date: 033122
#Difficulty: Medium
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
start=end=0
for i in range(len(s)):
length1=self.expandFromMiddle(s,i,i)
length2=self.expandFromMiddle(s,i,i+1)
maxLength=max(length1,length2)
if maxLength>end-start+1:
start=i-(maxLength-1)//2
end=i+maxLength//2
return s[start:end+1]
def expandFromMiddle(self,s,left,right):
while left>=0 and right<len(s) and s[left]==s[right]:
left-=1
right+=1
return right-left-1
|
def set_dimensions(dimensions):
"""
Set properly dimensions that we want to load
@dimensions: list
"""
result = []
for i in dimensions:
d = {
'name': i
}
result.append(d)
return result
def set_metrics(metrics):
"""
Set properly metrics that we want to load
@metrics: list
"""
result = []
for i in metrics:
d = {
'expression': i
}
result.append(d)
return result
def set_date_range(start_date, end_date):
"""
Set properly date range
@start_date: string date "yyyy-mm-dd"
@end_date: string date "yyyy-mm-dd"
"""
return [{'startDate': start_date, 'endDate': end_date}]
def get_report(analytics, view_id, dimensions, metrics, start_date, end_date, sampling_level="LARGE",
metric_filter=None, dimension_filter=None, segments=None,page_token=None):
"""
Use the Analytics Service Object to query the Analytics Reporting API V4.
@analytics: result of initialize_api function
@view_id: Id of Customer's Google Analytics View
@dimensions: list of dimensions (set at the top of the script)
@metrics: list of metrics (set at the top of the script)
@start_date: string date "yyyy-mm-dd"
@end_date: string date "yyyy-mm-dd"
@samplingLevel : samplingLevel, "LARGE" by default
return : API response
"""
body = {
'reportRequests': [
{
'viewId': view_id,
'dateRanges': set_date_range(start_date, end_date),
'dimensions': set_dimensions(dimensions),
'metrics': set_metrics(metrics),
'samplingLevel': sampling_level,
"pageToken": page_token
}]
}
if metric_filter:
body["reportRequests"][0]["metricFilterClauses"] = metric_filter
if dimension_filter:
body["reportRequests"][0]["dimensionFilterClauses"] = dimension_filter
if segments:
body["reportRequests"][0]["segments"] = segments
response = analytics.reports().batchGet(body=body).execute()
return response
|
# based on: https://github.com/tigertv/secretpy/blob/master/secretpy/ciphers/autokey.py
class cipher_autokey:
def process(self, alphabet, key, text, isEncrypt):
ans = ""
for i in range(len(text)):
m = text[i]
if i < len(key):
k = key[i]
else:
if isEncrypt == 1:
k = text[i - len(key)]
else:
k = ans[i - len(key)]
try:
alphI = alphabet.index(m)
except ValueError:
wrchar = m.encode('utf-8')
raise Exception("Can't find char '" + wrchar + "' of text in alphabet!")
try:
alphI += isEncrypt * alphabet.index(k)
except ValueError:
wrchar = k.encode('utf-8')
raise Exception("Can't find char '" + wrchar + "' of text in alphabet!")
alphI = alphI % len(alphabet)
enc = alphabet[alphI]
ans += enc
return ans
def encrypt(self, text, key, alphabet=u"abcdefghijklmnopqrstuvwxyz"):
return self.process(alphabet, key, text, 1)
def decrypt(self, text, key, alphabet=u"abcdefghijklmnopqrstuvwxyz"):
return self.process(alphabet, key, text, -1)
|
class Settings:
"""
The Settings class offer a convenient method to child class to attribute
values from a dictionary to the class variables for which name and key match
"""
def setProperties(self, settings: dict):
"""
set variable value with dictionary value if the key is the same than the variable name
---
Parameters:
-settings: dictionary with keys possible equivalent to variable names
and values = values to set in variables
"""
if not settings:
return
for key in settings:
if hasattr(self, key):
setattr(self, key, settings[key])
|
"""
Note: V 1.0.0 Originally, filling data methods was developed by Eric Alfaro and Javier Soley in SCILAB
Python version was developed by Rolando Duarte and Erick Rivera
Centro de Investigaciones Geofísicas (CIGEFI)
Universidad de Costa Rica (UCR)
"""
"""
MIT License
Copyright 2021 Rolando Jesus Duarte Mejias and Erick Rivera Fernandez
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class Preprocessing:
"""
Preprocesses dataframe to change missing values to mean values.
Also, There is a function to reverse dataframe when it is necessary
"""
def __init__(self):
pass
def changeNanMean(self, serie):
"""
Finds missing value indexes and change the to mean values
Parameters
----------
serie: pandas serie
pandas serie with missing values
Returns
-------
serie: pandas serie
pandas serie changing missing values to mean values
nanIndex: array
missing values indexes
"""
serie = serie.copy()
nanIndex = serie[serie.isna()].index
serie.fillna(value = serie.mean(axis=0), axis=0, inplace=True)
return serie, nanIndex
def reverseChangeNanMean(self, serie):
"""
Reverses the pandas serie and finds missing value indexes and change the to mean values
Parameters
----------
serie: pandas serie
pandas serie with missing values
Returns
-------
reverseSerie: pandas serie
Reversed pandas serie changing missing values to mean values
reverseNanIndex: array
missing values indexes
"""
reverseSerie= serie[::-1].copy()
reverseSerie.index = serie.index
reverseSerie, reverseNanIndex = self.changeNanMean(reverseSerie)
return reverseSerie, reverseNanIndex
def changeDfNanMean(self, df):
"""
Finds missing value indexes and change the to mean values
Parameters
----------
df: pandas dataframe
pandas dataframe with missing values
Returns
-------
df: pandas dataframe
pandas dataframe changing missing values to their respective column mean values
nanIndexColumns: array
missing values in each column indexes
"""
df = df.copy()
nanIndexColumns = [df[column][df[column].isna()].index for column in df.columns]
df.fillna(value = df.mean(axis=0), axis=0, inplace=True)
return df, nanIndexColumns
|
class User:
user_list =[]
user_list = []
def __init__(self, user_name, email, password):
'''
saving user credentials into user_list for login
'''
self.user_name = user_name
self.email = email
self.password = password
def save_user(self):
'''
saving a user into our list of users
'''
User.user_list.append(self)
def delete_user(self):
'''
delete a user from our list of users
'''
User.user_list.remove(self)
def check_existing_user(self):
return User.check_existing_user(self)
def display_users(self):
'''
function that saves Users
'''
return User.display_users(self)
@classmethod
def find_by_password(user_name, password):
# '''
# Method that takes in a name and returns a name that matches that user_name.
# Args:
# name: password to search for
# Returns :
# name of person that matches the name.
# '''
for user in cls.user_list:
if user.user_name == User:
return User
|
# @Fábio C Nunes - 19.06.20
pessoa = {}
grupo = []
media = 0
lista_mulheres = []
while True:
#Entrada de dados.
print('-' * 20)
print('Cadastro de pessoas')
pessoa['nome'] = str(input('Nome: '))
pessoa['idade'] = int(input('Idade: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo: ')).upper().strip()[0]
pessoa['sexo'] = sexo
grupo.append(pessoa.copy())
pessoa.clear()
c = ' '
print('-' * 20)
while c not in 'SN':
c = str(input('Deseja continuar? S/N: ')).strip().upper()[0]
if 'N' in c:
break
print('-' * 20)
#número de pessoas cadastradas.
print(f'A.) {len(grupo)} pessoas foram cadastradas')
#média de idade do grupo.
for i in range(0, len(grupo)):
media += (grupo[i]['idade'])
media = media / len(grupo)
print(f'B.) A média de idade do grupo cadastrado é igual {media:.2f} anos.')
#Mostrar lista com todas as mulheres.
for i in range (0, len(grupo)):
if grupo[i]['sexo'] == 'F':
lista_mulheres.append(grupo[i]['nome'])
print('-' * 20)
print('{:^20}'.format('C.) Lista de Mulheres'))
print('-' * 20)
for i in range(0, len(lista_mulheres)):
print(lista_mulheres[i])
#Mostrar lista com pessoas com idade acima da média
print('-' * 20)
print('{:^20}'.format('Pessoas acima da média de idade'))
print('-' * 20)
for i in range(0, len(grupo)):
if grupo[i]['idade'] > media:
print(f'Nome: {grupo[i]["nome"]:<15}', end = '')
print(f'Idade: {grupo[i]["idade"]:<15}', end = '')
print(f'Sexo: {grupo[i]["sexo"]:^2}', end = '')
print('')
print('-' * 20)
|
init_config = {
'username': 'email@gmail.com',
'pwd': 'password',
'mongodb': {
'host': 'mongodb://localhost:27017/'
}
}
|
#!/usr/bin/env python
__all__ = ["dendrogram", "dotplot", "drawable", "letter", "logo"]
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__contributors__ = [
"Peter Maxwell",
"Gavin Huttley",
"Rob Knight",
"Zongzhi Liu",
"Matthew Wakefield",
"Stephanie Wilson",
"Rahul Ghangas",
"Sheng Han Moses Koh",
]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__status__ = "Production"
|
#
# PySNMP MIB module Dell-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Dell-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:55:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, TimeTicks, Counter64, NotificationType, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, iso, Counter32, enterprises, ModuleIdentity, Unsigned32, Gauge32, IpAddress, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "TimeTicks", "Counter64", "NotificationType", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "iso", "Counter32", "enterprises", "ModuleIdentity", "Unsigned32", "Gauge32", "IpAddress", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class Percents(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 100)
class NetNumber(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class VlanPriority(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 7)
rnd = ModuleIdentity((1, 3, 6, 1, 4, 1, 89))
rnd.setRevisions(('2007-01-02 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rnd.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: rnd.setLastUpdated('200701020000Z')
if mibBuilder.loadTexts: rnd.setOrganization('Dell')
if mibBuilder.loadTexts: rnd.setContactInfo('www.dell.com')
if mibBuilder.loadTexts: rnd.setDescription('This private MIB module defines Dell private MIBs.')
rndNotifications = ObjectIdentity((1, 3, 6, 1, 4, 1, 89, 0))
if mibBuilder.loadTexts: rndNotifications.setStatus('current')
if mibBuilder.loadTexts: rndNotifications.setDescription(" All the rnd notifications will reside under this branch as specified in RFC2578 'Structure of Management Information Version 2 (SMIv2)' 8.5")
rndMng = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 1))
rndDeviceParams = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2))
rndBootP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 24))
ipSpec = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 26))
rsTunning = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 29))
rndApplications = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 35))
rsUDP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 42))
swInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 43))
rlIPmulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 46))
rlFFT = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 47))
vlan = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 48))
rlRmonControl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 49))
rlBrgMacSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 50))
rlExperience = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 51))
rlCli = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 52))
rlPhysicalDescription = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 53))
rlIfInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 54))
rlMacMulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 55))
rlGalileo = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 56))
rlpBridgeMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57))
rlTelnet = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 58))
rlPolicy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 59))
rlArpSpoofing = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 60))
rlMir = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 61))
rlIpMRouteStdMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 62))
rl3sw2swTables = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 63))
rlGvrp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 64))
rlDot3adAgg = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 65))
rlEmbWeb = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 66))
rlSwPackageVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 67))
rlBroadcom = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 68))
rlMultiSessionTerminal = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 69))
rlRCli = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 70))
rlBgp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 71))
rlAgentsCapabilitiesGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 72))
rlAggregateVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 73))
rlGmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 75))
rlDhcpCl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 76))
rlStormCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 77))
rlSsh = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 78))
rlAAA = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 79))
rlRadius = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 80))
rlTraceRoute = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 81))
rlSyslog = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 82))
rlEnv = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 83))
rlSmon = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 84))
rlSocket = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 85))
rlDigitalKeyManage = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 86))
rlCopy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 87))
rlQosCliMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 88))
rlMngInf = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 89))
rlPhy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 90))
rlJumboFrames = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 91))
rlTimeSynchronization = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 92))
rlDnsCl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 93))
rlCDB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 94))
rldot1x = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 95))
rlFile = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 96))
rlAAAEap = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 97))
rlSNMP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 98))
rlSsl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 100))
rlMacBasePrio = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 101))
rlWlanAccessPoint = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 102))
rlLocalization = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 103))
rlRs232 = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 104))
rlNicRedundancy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 105))
rlAmap = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 106))
rlStack = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 107))
rlPoe = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 108))
rlUPnP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 109))
rlLldp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 110))
rlOib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 111))
rlBridgeSecurity = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 112))
rlDhcpSpoofing = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 113))
rlBonjour = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 114))
rlLinksysSmartMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 115))
rlBrgMulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 116))
rlBrgMcMngr = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 117))
rlGlobalIpAddrTable = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 118))
dlPrivate = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 119))
rlSecuritySuiteMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 120))
rlIntel = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 121))
rlTunnel = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 122))
rlAutoUpdate = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 123))
rlCpuCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 124))
rlLbd = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 127))
rlErrdisableRecovery = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 128))
rlIPv6 = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 129))
rlActionAcl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 130))
rlSafeGuard = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 131))
rlProtectedPorts = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 132))
rlBanner = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 133))
rlGreenEth = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 134))
rlDlf = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 135))
rlVlanTrunking = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 136))
rlCdp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 137))
rlTrafficSeg = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 138))
rlImpbFeatures = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 139))
rlSmartPorts = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 140))
rlStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 141))
rlDeleteImg = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 142))
rlCustom1BonjourService = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 143))
rlSpecialBpdu = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 144))
rlTBIMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 145))
rlWeightedRandomTailDrop = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 146))
rlsFlowMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 147))
rlPfcMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 148))
rlEee = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 149))
rlEventsMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 150))
rlWlanMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 200))
rlEtsMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 201))
rlQcnMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 202))
rlSctMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 203))
rlSysmngMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 204))
rlFip = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 205))
rlDebugCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 206))
rlIpStdAcl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 207))
rlWBA = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 208))
rlSecSd = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 209))
rlOspf = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 210))
rlRtRedist = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 211))
rlIpPrefList = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 212))
rlVoipSnoop = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 213))
rlDhcpv6 = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 214))
rlIpv6Fhs = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 215))
rlInventoryEnt = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 217))
rlUdld = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 218))
rndEndOfMibGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 1000))
mibBuilder.exportSymbols("Dell-MIB", rlTBIMib=rlTBIMib, rlCDB=rlCDB, rndNotifications=rndNotifications, rlExperience=rlExperience, rlRCli=rlRCli, rlSecuritySuiteMib=rlSecuritySuiteMib, rlDhcpCl=rlDhcpCl, rlWeightedRandomTailDrop=rlWeightedRandomTailDrop, rlAgentsCapabilitiesGroups=rlAgentsCapabilitiesGroups, rlPolicy=rlPolicy, rlGvrp=rlGvrp, rlEtsMib=rlEtsMib, rl3sw2swTables=rl3sw2swTables, rlImpbFeatures=rlImpbFeatures, Percents=Percents, rlTunnel=rlTunnel, rlTrafficSeg=rlTrafficSeg, rlIfInterfaces=rlIfInterfaces, rlGreenEth=rlGreenEth, rlDhcpSpoofing=rlDhcpSpoofing, rlRs232=rlRs232, rlPoe=rlPoe, rlFip=rlFip, rlStormCtrl=rlStormCtrl, rlQosCliMib=rlQosCliMib, rlIpv6Fhs=rlIpv6Fhs, rlCli=rlCli, rlSNMP=rlSNMP, rlFile=rlFile, rlRmonControl=rlRmonControl, NetNumber=NetNumber, rlDigitalKeyManage=rlDigitalKeyManage, rlDhcpv6=rlDhcpv6, rlEmbWeb=rlEmbWeb, rndMng=rndMng, rlIPv6=rlIPv6, rlBgp=rlBgp, rlTimeSynchronization=rlTimeSynchronization, rlIpPrefList=rlIpPrefList, rlDot3adAgg=rlDot3adAgg, rlQcnMib=rlQcnMib, rlBroadcom=rlBroadcom, rlNicRedundancy=rlNicRedundancy, rlCopy=rlCopy, rlTelnet=rlTelnet, rlFFT=rlFFT, rlIpStdAcl=rlIpStdAcl, rlLinksysSmartMIB=rlLinksysSmartMIB, rlAAA=rlAAA, rlCpuCounters=rlCpuCounters, rlDebugCapabilities=rlDebugCapabilities, ipSpec=ipSpec, rsTunning=rsTunning, rlGmrp=rlGmrp, rlCustom1BonjourService=rlCustom1BonjourService, PYSNMP_MODULE_ID=rnd, rlUPnP=rlUPnP, rlVoipSnoop=rlVoipSnoop, rlSmon=rlSmon, rlBrgMacSwitch=rlBrgMacSwitch, rlSecSd=rlSecSd, rlSsl=rlSsl, rlSocket=rlSocket, rlLbd=rlLbd, rlBanner=rlBanner, rlPhysicalDescription=rlPhysicalDescription, rlBridgeSecurity=rlBridgeSecurity, rlEee=rlEee, rlLocalization=rlLocalization, rlSysmngMib=rlSysmngMib, rlRtRedist=rlRtRedist, VlanPriority=VlanPriority, rlAutoUpdate=rlAutoUpdate, rlsFlowMib=rlsFlowMib, rlTraceRoute=rlTraceRoute, rlWlanAccessPoint=rlWlanAccessPoint, rlPhy=rlPhy, dlPrivate=dlPrivate, rnd=rnd, rlBrgMcMngr=rlBrgMcMngr, rlAggregateVlan=rlAggregateVlan, rlAAAEap=rlAAAEap, rlJumboFrames=rlJumboFrames, rlMngInf=rlMngInf, rlSmartPorts=rlSmartPorts, vlan=vlan, rlEnv=rlEnv, rlBrgMulticast=rlBrgMulticast, rlCdp=rlCdp, swInterfaces=swInterfaces, rndEndOfMibGroup=rndEndOfMibGroup, rlSctMib=rlSctMib, rlOspf=rlOspf, rndDeviceParams=rndDeviceParams, rlIpMRouteStdMIB=rlIpMRouteStdMIB, rlGlobalIpAddrTable=rlGlobalIpAddrTable, rlMir=rlMir, rndApplications=rndApplications, rlStack=rlStack, rlProtectedPorts=rlProtectedPorts, rlWlanMIB=rlWlanMIB, rlAmap=rlAmap, rlInventoryEnt=rlInventoryEnt, rlPfcMib=rlPfcMib, rlDeleteImg=rlDeleteImg, rlMacMulticast=rlMacMulticast, rlSwPackageVersion=rlSwPackageVersion, rlMultiSessionTerminal=rlMultiSessionTerminal, rsUDP=rsUDP, rlDnsCl=rlDnsCl, rlSyslog=rlSyslog, rlVlanTrunking=rlVlanTrunking, rndBootP=rndBootP, rlOib=rlOib, rlIPmulticast=rlIPmulticast, rlSsh=rlSsh, rlBonjour=rlBonjour, rlActionAcl=rlActionAcl, rlDlf=rlDlf, rldot1x=rldot1x, rlRadius=rlRadius, rlStatistics=rlStatistics, rlpBridgeMIBObjects=rlpBridgeMIBObjects, rlErrdisableRecovery=rlErrdisableRecovery, rlWBA=rlWBA, rlLldp=rlLldp, rlSpecialBpdu=rlSpecialBpdu, rlGalileo=rlGalileo, rlMacBasePrio=rlMacBasePrio, rlIntel=rlIntel, rlUdld=rlUdld, rlArpSpoofing=rlArpSpoofing, rlSafeGuard=rlSafeGuard, rlEventsMib=rlEventsMib)
|
description = 'setup for the cache server'
group = 'special'
devices = dict(
DB=device('nicos.services.cache.server.FlatfileCacheDatabase',
description='On disk storage for Cache Server',
storepath=configdata('config.DATA_PATH') + 'cache',
loglevel='info', ),
Server=device('nicos.services.cache.server.CacheServer',
db='DB',
server='',
loglevel='info',
),
)
|
# 1.5 Find One Missing Number from 1 to 10
def find_missing_number(list_numbers):
list_sum = 0
for number in list_numbers:
list_sum += number
return 55 - list_sum
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
notes:
- "If a new enough version of the C(cryptography) library
is available (see Requirements for details), it will be used
instead of the C(openssl) binary. This can be explicitly disabled
or enabled with the C(select_crypto_backend) option. Note that using
the C(openssl) binary will be slower and less secure, as private key
contents always have to be stored on disk (see
C(account_key_content))."
- "Although the defaults are chosen so that the module can be used with
the L(Let's Encrypt,https://letsencrypt.org/) CA, the module can in
principle be used with any CA providing an ACME endpoint."
requirements:
- python >= 2.6
- either openssl or L(cryptography,https://cryptography.io/) >= 1.5
options:
account_key_src:
description:
- "Path to a file containing the ACME account RSA or Elliptic Curve
key."
- "RSA keys can be created with C(openssl genrsa ...). Elliptic curve keys can
be created with C(openssl ecparam -genkey ...). Any other tool creating
private keys in PEM format can be used as well."
- "Mutually exclusive with C(account_key_content)."
- "Required if C(account_key_content) is not used."
type: path
aliases: [ account_key ]
account_key_content:
description:
- "Content of the ACME account RSA or Elliptic Curve key."
- "Mutually exclusive with C(account_key_src)."
- "Required if C(account_key_src) is not used."
- "I(Warning): the content will be written into a temporary file, which will
be deleted by Ansible when the module completes. Since this is an
important private key — it can be used to change the account key,
or to revoke your certificates without knowing their private keys
—, this might not be acceptable."
- "In case C(cryptography) is used, the content is not written into a
temporary file. It can still happen that it is written to disk by
Ansible in the process of moving the module with its argument to
the node where it is executed."
type: str
version_added: "2.5"
account_uri:
description:
- "If specified, assumes that the account URI is as given. If the
account key does not match this account, or an account with this
URI does not exist, the module fails."
type: str
version_added: "2.7"
acme_version:
description:
- "The ACME version of the endpoint."
- "Must be 1 for the classic Let's Encrypt ACME endpoint, or 2 for the
new standardized ACME v2 endpoint."
type: int
default: 1
choices: [ 1, 2 ]
version_added: "2.5"
acme_directory:
description:
- "The ACME directory to use. This is the entry point URL to access
CA server API."
- "For safety reasons the default is set to the Let's Encrypt staging
server (for the ACME v1 protocol). This will create technically correct,
but untrusted certificates."
- "For Let's Encrypt, all staging endpoints can be found here:
U(https://letsencrypt.org/docs/staging-environment/)"
- "For Let's Encrypt, the production directory URL for ACME v1 is
U(https://acme-v01.api.letsencrypt.org/directory), and the production
directory URL for ACME v2 is U(https://acme-v02.api.letsencrypt.org/directory)."
- "I(Warning): So far, the module has only been tested against Let's Encrypt
(staging and production) and against the
L(Pebble testing server,https://github.com/letsencrypt/Pebble)."
type: str
default: https://acme-staging.api.letsencrypt.org/directory
validate_certs:
description:
- Whether calls to the ACME directory will validate TLS certificates.
- "I(Warning): Should I(only ever) be set to C(no) for testing purposes,
for example when testing against a local Pebble server."
type: bool
default: yes
version_added: "2.5"
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to
C(openssl).
- If set to C(openssl), will try to use the C(openssl) binary.
- If set to C(cryptography), will try to use the
L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, openssl ]
version_added: "2.7"
'''
|
## Prime number is divide by 1 and itself
## Display 50 prime numbers in 5 lines, each containing 10 numbers
NUMBER_OF_PRIMES = 50 # Number of primes to display
NUMBER_OF_PRIMES_PER_LINE = 10 # Display 10 per line
count = 0 # Count number of prime numbers
number = 2 # a number to test prime number
while count < NUMBER_OF_PRIMES:
## Check condition to see isPrime = True or False
## Once number % a divisor, isPrime = False immediately and break out of loop; then continue increase number (until number < 50) to check
## If number % a divisor != 0, continue increase divisor (until divisor <= number/2) to check. Until divisor <= number/2 finishes and not break, isPrime = True; and count increases. Then, then continue increase number (until number < 50) to check
isPrime = True
divisor = 2
while divisor <= number / 2:
if number % divisor == 0:
isPrime = False
break
divisor += 1
if isPrime:
count += 1
print(format(number, "2d"), end = ' ')
if count % NUMBER_OF_PRIMES_PER_LINE == 0:
print()
number += 1
|
"""
C++ FOREVER
"""
def hello():
"""
UNREACHABLE GNU Lesser General Public License v3.0
http://opensource.org/licenses/MIT-LICENSE
"""
print("Haskell TOP")
|
"""Arbre binaire - Application"""
# Fonctions utilitaires
# fonction 'arbre_binaire(r)' : construit un arbre sous forme de liste qui contient un noeud racine avec 2 sous-arbres vides (valeur à None)
def arbre_binaire(r):
return [r, [None], [None]]
# fonction 'change_racine(arbre, valeur)' : change la valeur du noeud racine en la remplaçant par l'élément correspondant au paramètre "valeur"
def change_racine(arbre, valeur):
arbre[0] = valeur
return arbre
# fonction 'valeur_racine(arbre)' : affiche la valeur de la racine
def valeur_racine(arbre):
return arbre[0]
# fonction 'sous_arbre_gauche(arbre)' : retourne le sous-arbre gauche d'un arbre passé en paramètre
def sous_arbre_gauche(arbre):
return arbre[1]
# fonction 'sous_arbre_droit(arbre)': retourne le sous-arbre droit d'un arbre passé en paramètre
def sous_arbre_droit(arbre):
return arbre[2]
# fonction 'ajouter_sous_arbre_gauche(arbre)' : ajoute le sous-arbre gauche d'un arbre passé en paramètre
def ajouter_sous_arbre_gauche(arbre, sous_arbre):
arbre[1] = sous_arbre
return arbre
# fonction 'ajouter_sous_arbre_droit(arbre)': ajoute le sous-arbre droit d'un arbre passé en paramètre
def ajouter_sous_arbre_droit(arbre, sous_arbre):
arbre[2] = sous_arbre
return arbre
# fonction 'prefixe' qui affiche les éléments d'un arbre en parcours préfixe (NGD)
def prefixe(arbre):
if valeur_racine(arbre) is None:
print(end='')
else:
print([valeur_racine(arbre)], end=' ')
prefixe(sous_arbre_gauche(arbre))
prefixe(sous_arbre_droit(arbre))
# fonction 'infixe' qui affiche les éléments d'un arbre en parcours infixe (GND)
def infixe(arbre):
if valeur_racine(arbre) is None:
print(end='')
else:
infixe(sous_arbre_gauche(arbre))
print([valeur_racine(arbre)], end=' ')
infixe(sous_arbre_droit(arbre))
# programme principal
arbre = [15, [7, [6, [None], [None]], [9, [None], [None]]], [20, [None], [25, [None], [None]]]]
prefixe(arbre)
input('\n\nTapez "Entree" pour la suite\n')
infixe(arbre)
|
"""
🚧 About
--------
dev is a collection of Python developer tools presented as a
modest alternative to the standard library's offering.
.. warning:: dev is a work in progress
Testing
-------
:py:mod`dev.libtest` is a protocol driven testing library. Users need not import libtest
in order to define their tests, only to run them. This means that test modules can be
imported without `dev` being available.
:py:mod:`dev.libtest` attempts to make constructing test runners as simple as possible by
keeping the interface as simple as possible.
`fail_if_*` is the designated prefix::
import something
def test_something(test):
expectation = (2,5)
test.fail_if_not_equal(expectation, something.calculate(1))
if __name__ == '__main__':
import dev.libtest; dev.libtest.execmodule()
Skeletons
---------
The executable module `dev.bin.init` initializes a new package directory
complete with `setup.py` script. The following is the consistency of the
layout from a `python -m dev.bin.init package` run::
package/
__init__.py
lib.py [empty "primary" module]
test/
test_lib.py
bin/
release/
xdistutils.py [module distutils data]
pypi.py [pypi specific data goes here]
documentation/
usage.rst
project.rst
reference.rst
index.rst
sphinx/
conf.py
build.sh
"""
__pkg_bottom__ = True
|
a =[72,73,75,84,85,87,104,105,107,116,117,119]
b =[97,98,99,100,101,102,103]
c =[97,98,99,100,101,103,106]
d =[65,72,74,75,77,79,90,97,104,106,107,109,111,122]
e =[66,67,78,79,84,85,88,89,98,99,110,111,116,117,120,121]
f =[104,105,106,107,108,109,110,111]
g =[112,113,114,117,118,119]
res = [bytearray([a1, b1, c1, d1, e1, f1, g1]) for a1 in a for b1 in b for c1 in c for d1 in d for e1 in e for f1 in f for g1 in g]
enc_bytes = bytearray([13,23,6,25,0,95,27,13,80,16,14,23,5,69,17,8,13,12,13,7,70,6,81,83,9,58,59,51,28,9,82,24,0,92,20,26])
def xor(data, key):
l = len(key)
decoded = bytearray()
for i in range(0, len(data)):
decoded.append(data[i]^key[i % l])
return decoded.decode("ascii")
for key in res:
ka = xor(enc_bytes,key)
if "n00bCTF"in ka:
print(ka)
break
|
try:
a = int(input('Numerador: '))
b = int(input('Denominador: '))
r = a / b
except Exception as erro:
print(f'Problema encontrado foi {erro}')
else:
print(f'O resultado é {r}')
finally:
print('O comando foi executado!')
|
class AttnDecoderRNN(nn.Module):
def __init__(self, dec_hidden_size, enc_hidden_size, dec_output_size, dropout_p=0):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = dec_hidden_size
self.output_size = dec_output_size
self.dropout_p = dropout_p
self.lin_decoder = nn.Linear(dec_hidden_size, dec_hidden_size) # to convert input
self.lin_encoder = nn.Linear(enc_hidden_size, dec_hidden_size) # to convert encoder_outputs
self.linear_out = nn.Linear(dec_hidden_size + enc_hidden_size , dec_hidden_size, bias=False)
def forward(self, input, hidden, encoder_outputs):
# hidden not a tuple (htx, ctx) -> batch X dec_hid
# encoder_outputs -> batch X seq X enc_hid
# input -> batch x 1 X dec_hid
projected_context = F.tanh(self.lin_encoder(encoder_outputs)) # # batch X seq X enc_hid -> batch X seq X dec_hid
projected_input = F.tanh(self.lin_decoder(input)).unsqueeze(2) # batch X dec_hid X 1
# RAW ATTENTION
attn_dot = torch.bmm(projected_context, projected_input).squeeze(2) # batch X seq X 1 -> batch X seq
attn = F.softmax(attn_dot, dim=1) # NORMALIZED ATTENTION
reshaped_attn = attn.unsqueeze(1) # batch X 1 X seq
weighted_context = torch.bmm(reshaped_attn, encoder_outputs).squeeze(1) # batch X 1 X seq * batch X seq X enc_hid
# -> batch X 1 x enc_hid -> batch X enc_hid
h_tilde = torch.cat((weighted_context, hidden), 1) # -> batch X dec_hid+enc_hid
h_tilde = F.tanh(self.linear_out(h_tilde))
return h_tilde, attn
|
class FailureMessageAccessor(object,IDisposable):
""" Restricted accessor for FailureMessage. """
def CloneFailureMessage(self):
"""
CloneFailureMessage(self: FailureMessageAccessor) -> FailureMessage
Creates a copy of the FailureMessage.
Returns: Copy of the FailureMesassge.
"""
pass
def Dispose(self):
""" Dispose(self: FailureMessageAccessor) """
pass
def GetAdditionalElementIds(self):
"""
GetAdditionalElementIds(self: FailureMessageAccessor) -> ICollection[ElementId]
Retrieves Ids of Elements that have not caused the failure but are related to
it
Checks if the failure has resolution of a given resolution type.
Returns: Ids of Elements related to the failure
"""
pass
def GetCurrentResolutionType(self):
"""
GetCurrentResolutionType(self: FailureMessageAccessor) -> FailureResolutionType
Retrieves the type of resolution to be used to resolve the failure.
Returns: The type of failure resolution to be used to resolve the failure.
"""
pass
def GetDefaultResolutionCaption(self):
"""
GetDefaultResolutionCaption(self: FailureMessageAccessor) -> str
Retrieves the caption of default resolution of the failure.
Returns: The caption of default resolution of the failure.
"""
pass
def GetDescriptionText(self):
"""
GetDescriptionText(self: FailureMessageAccessor) -> str
Retrieves the description of the failure.
Returns: The description text.
"""
pass
def GetFailingElementIds(self):
"""
GetFailingElementIds(self: FailureMessageAccessor) -> ICollection[ElementId]
Retrieves Ids of Elements that have caused the failure.
Returns: Ids of Elements that have caused the failure.
"""
pass
def GetFailureDefinitionId(self):
"""
GetFailureDefinitionId(self: FailureMessageAccessor) -> FailureDefinitionId
Retrieves the Id of the FailureDefinition of the failure.
Returns: The Id of the FailureDefinition of the failure.
"""
pass
def GetNumberOfResolutions(self):
"""
GetNumberOfResolutions(self: FailureMessageAccessor) -> int
Retrieves number of resolutions that can be used to resolve failure.
Returns: Number of resolutions that can be used to resolve failure
"""
pass
def GetSeverity(self):
"""
GetSeverity(self: FailureMessageAccessor) -> FailureSeverity
Retrieves the severity of the failure.
Returns: The severity of the failure.
"""
pass
def HasResolutionOfType(self,type):
"""
HasResolutionOfType(self: FailureMessageAccessor,type: FailureResolutionType) -> bool
Checks if failure has a resolution of a given type.
type: The type of resolution.
Returns: True if failure has a resolution of a given type,false otherwise.
"""
pass
def HasResolutions(self):
"""
HasResolutions(self: FailureMessageAccessor) -> bool
Checks if the failure has any resolutions.
Returns: True if the failure has any resolutions,false otherwise.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FailureMessageAccessor,disposing: bool) """
pass
def SetCurrentResolutionType(self,resolutionType):
"""
SetCurrentResolutionType(self: FailureMessageAccessor,resolutionType: FailureResolutionType)
Sets the type of a resolution to be used to resolve the failure.
resolutionType: The type of failure resolution to be used to resolve the failure.
"""
pass
def ShouldMergeWithMessage(self,messageToMergeWith):
"""
ShouldMergeWithMessage(self: FailureMessageAccessor,messageToMergeWith: FailureMessageAccessor) -> bool
Checks if the FailureMessage should be merged with the other FailureMessage for
better user experience.
Returns: True if messages should be merged
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: FailureMessageAccessor) -> bool
"""
|
"""
This module provides the Status class, which encapsulates
a status code for Icinga.
"""
class Status(object):
"""
Encapsulates an Icinga status, which holds a name and
an exit code.
"""
def __init__(self, name, exit_code):
"""
Creates a new status object for Icinga with the given name and
exit code.
**Note**: In general, this should never be called since the standard
statuses are exported from ``pycinga``.
"""
if not isinstance(exit_code, int):
raise ValueError("exit_code must be an int, not %s" % type(exit_code))
if not isinstance(name, str):
raise ValueError("name must be a str, not %s" % type(exit_code))
self.name = name
self.exit_code = exit_code
def __repr__(self):
return "Status(name=%s, exit_code=%d)" % (repr(self.name), self.exit_code)
def __lt__(self, other):
return (self.exit_code < other.exit_code)
def __eq__(self, other):
return (self.exit_code == other.exit_code)
def __ne__(self, other):
return (self.exit_code != other.exit_code)
def __gt__(self, other):
return (self.exit_code > other.exit_code)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='FasterRCNN',
backbone=dict(
type='SwinTransformer',
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=True,
init_cfg=dict(type='Pretrained', checkpoint='https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth')),
neck=dict(
type='FPN',
in_channels=[128, 256, 512, 1024],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 51.0, 2.0, 3.0, 4.0, 5.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=34,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
reg_decoded_bbox=True,
loss_bbox=dict(type='CIoULoss', loss_weight=12.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
nms_post=1000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
nms_post=1000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
# data setting
dataset_type = 'CocoDataset'
data_root = '/content/data/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0, rotate_limit=0, interpolation=1, p=0.5, border_mode = 0),
dict(type='RandomBrightnessContrast', brightness_limit=0.1, contrast_limit=0.1),
dict(type='RGBShift', r_shift_limit=10, g_shift_limit=10, b_shift_limit=10),
dict(type='HueSaturationValue', hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20),
dict(type='ChannelShuffle'),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomCrop',
crop_type='relative_range',
crop_size=(0.9, 0.9)),
dict(
type='Resize',
img_scale=[(640, 640), (800, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='CutOut',
n_holes=(5, 10),
cutout_shape=[(4, 4), (4, 8), (8, 4), (8, 8),
(16, 8), (8, 16), (16, 16), (16, 32), (32, 16), (32, 32),
(32, 48), (48, 32), (48, 48)]),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='Pad', size_divisor=800),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=12,
workers_per_gpu=4,
train=dict(type = dataset_type,
ann_file = data_root + '/annotations/instances_train2017.json',
img_prefix = 'train_images/',
pipeline=train_pipeline),
val=dict(type = dataset_type,
ann_file = data_root + '/annotations/instances_val2017.json',
img_prefix = 'val_images/',
pipeline=test_pipeline,
samples_per_gpu = 24),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
_delete_ = True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
optimizer_config = dict(grad_clip=None)
log_config = dict(interval = 10)
# learning policy
lr_config = dict(
_delete_ = True,
policy='CosineAnnealing',
min_lr_ratio = 0.12,
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
)
runner = dict(type='IterBasedRunner', max_iters=10000, max_epochs = None)
checkpoint_config = dict(interval = 100)
evaluation = dict(interval = 100, metric = 'bbox')
fp16 = dict(loss_scale = 512.)
# runtime
load_from = None
resume_from = None
workflow = [('train', 1)]
|
#!/usr/bin/env python3
# '+=' for variables, this operator adds a value to,
# then sets variable name to new value.
# this formula shortcut can be used with any
# mathmatical operator.
# string example
y = 'one'
y += 'two' # adding to and equaling
print(y)
# int example
x = 1 # x has value of one
x += 2 # same as x = x+2, now x = 3
print(x)
|
class SimpleButton(object):
"""Represents a single button that is connected to the ESP32"""
def __init__(self, pin):
self.pin = pin
def read_pressed(self):
print("Reading button on pin {}".format(self.pin))
# TODO: Actually call MicroPython code to get the value
return False
|
def execute(fn, *args):
return fn(*args)
def say_hello(name, my_name):
print(f"Hello, {name}, I am {my_name}")
def say_bye(name):
print(f"Bye, {name}")
execute(say_hello, "Peter", "George")
execute(say_bye, "Peter")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 11:35:11 2021
@author: shangfr
"""
def ui_prediction():
pass
if __name__ == "__main__":
ui_prediction()
|
#Write a function that finds if a given string argument is Palindrome. A Palindrome string is equal to its reverse, that is its reading is the same backward as forward.
#For example: efe, hannah, ava, anna are palindromes.
#Test your function with above examples and test with at least 3 different
# non-Palindrome examples. nixon, example, xxxzz
#You may use string functions in this function.
def check_palindrome(str_to_test):
reverse_str = str_to_test[::-1]
if reverse_str == str_to_test:
print(f"{str_to_test} is a palindrome")
else:
print(f"{str_to_test} is NOT a palindrome")
check_palindrome("efe")
check_palindrome("hannah")
check_palindrome("ava")
check_palindrome("anna")
check_palindrome("nixon")
check_palindrome( "example")
check_palindrome( "xxxzz")
check_palindrome("xxxzzxxx")
|
CATEGORIES = [
('SOLUTIONS', 'SOLUTIONS'),
('PRODUITS', 'PRODUITS'),
('PARTENAIRES', 'PARTENAIRES'),
('INVESTISSEURS', 'INVESTISSEURS'),
]
SECTEUR_ENTREPRISES = [
('Agroalimentaire', 'Agroalimentaire'),
('Banque / Assurance', 'Banque / Assurance'),
('Bois / Papier / Carton / Imprimerie', 'Bois / Papier / Carton / Imprimerie'),
('BTP / Matériaux de construction', 'BTP / Matériaux de construction'),
('Chimie / Parachimie', 'Chimie / Parachimie'),
('Commerce / Négoce / Distribution', 'Commerce / Négoce / Distribution'),
('Édition / Communication / Multimédia', 'Édition / Communication / Multimédia'),
('Électronique / Électricité', 'Électronique / Électricité'),
('Études et conseils', 'Études et conseils'),
('Industrie pharmaceutique', 'Industrie pharmaceutique'),
('Informatique / Télécoms', 'Informatique / Télécoms'),
('Machines et équipements / Automobile', 'Machines et équipements / Automobile'),
('Métallurgie / Travail du métal', 'Métallurgie / Travail du métal'),
('Plastique / Caoutchouc', 'Plastique / Caoutchouc'),
('Services aux entreprises', 'Services aux entreprises'),
('Textile / Habillement / Chaussure', 'Textile / Habillement / Chaussure'),
('Transports / Logistique', 'Transports / Logistique'),
]
|
c_keyword_set = {
'auto',
'break',
'case',
'char',
'const',
'continue',
'default',
'define',
'do',
'double',
'elif',
'else',
'endif',
'enum',
'error',
'extern',
'float',
'for',
'goto',
'if',
'ifdef',
'ifndef',
'include',
'inline',
'int',
'line',
'long',
'noalias',
'pragma',
'register',
'restrict',
'return',
'short',
'signed',
'sizeof',
'static',
'struct',
'switch',
'typedef',
'undef',
'union',
'unsigned',
'void',
'volatile',
'while'
}
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake groups data.
TODO: consolidate with other fake group test data.
"""
FAKE_GROUPS_DB_ROWS = [
{
'group_id': '1111aaaa1',
'member_role': 'OWNER',
'member_type': 'USER',
'member_email': 'owneruser@foo.xyz'
},
{
'group_id': '2222bbbb2',
'member_role': 'MEMBER',
'member_type': 'GROUP',
'member_email': 'group2@foo.xyz'
},
{
'group_id': '2222bbbb2',
'member_role': 'OWNER',
'member_type': 'GROUP',
'member_email': 'ownergroup@foo.xyz'
},
{
'group_id': '1111aaaa1',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_email': 'user4@foo.xyz'
},
{
'group_id': '1111aaaa1',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_email': 'user5@foo.xyz'
},
]
|
__version_tuple__ = (0, 6, 0, 'alpha.5')
__version__ = '0.6.0-alpha.5'
__version_tuple_js__ = (0, 6, 0, 'alpha.4')
__version_js__ = '0.6.0-alpha.4'
# kept for embedding in offline mode, we don't care about the patch version since it should be compatible
__version_threejs__ = '0.97'
|
def read():
for dx in [1, 3, 5, 7]:
x = 0
tree = 0
with open('input.txt') as fh:
first_line = True
for line in fh.readlines():
if first_line:
first_line = False
continue
x = (x + dx) % len(line.strip())
if line[x] == '#':
tree += 1
print(dx, tree)
def read_dy2():
for dx in [1]:
x = 0
tree = 0
with open('input.txt') as fh:
first_line = True
i = 0
for line in fh.readlines():
if first_line:
first_line = False
continue
i += 1
x = (x + dx) % len(line.strip())
if i % 2 == 1:
# "going down 2"
continue
if line[x] == '#':
tree += 1
print("for dy=2")
print(dx, tree)
if __name__ == '__main__':
read()
read_dy2()
print(62*184*80*74*36)
|
""" dict functions """
def flatten_dict(nested: dict) -> dict:
"""Take a nested dictionary and flatten it. For example:
{'a': {'b': 'c'}} will be flattened to {'a_b': c}
Args:
nested: a dictionary to be flattened
Returns:
Dict. flattened version of the original dictionary
"""
ans = {}
for key, val in nested.items():
# if val is a dict, unflatten val, recursively
if isinstance(val, dict):
flattened = flatten_dict(val)
for subkey, subval in flattened.items():
flattened_key = f"{key}_{subkey}"
ans[flattened_key] = subval
else:
ans[key] = val
return ans
|
class Solution:
def dfs(self, s):
if(s == len(self.graph) - 1):
self.path.append(len(self.graph)-1)
self.res.append(self.path[::])
self.path.pop()
return;
self.path.append(s)
for i in range(len(self.graph[s])):
self.dfs(self.graph[s][i])
self.path.pop()
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
self.res = []
self.graph = graph
self.path = []
self.dfs(0)
return self.res
|
"""
# REPEATED DNA SEQUENCES
All DNA is composed of a series of nucleotides abbreviated as 'A', 'C', 'G', and 'T', for example: "ACGAATTCCG". When studying DNA, it is sometimes useful to identify repeated sequences within the DNA.
Write a function to find all the 10-letter-long sequences (substrings) that occur more than once in a DNA molecule.
Example 1:
Input: s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
Output: ["AAAAACCCCC","CCCCCAAAAA"]
Example 2:
Input: s = "AAAAAAAAAAAAA"
Output: ["AAAAAAAAAA"]
Constraints:
0 <= s.length <= 105
s[i] is 'A', 'C', 'G', or 'T'.
"""
class Solution:
def findRepeatedDnaSequences(self, s: str):
res = {}
result = []
i = 0
while i <= len(s) - 10:
st = s[i:i+10]
i += 1
if st in res:
res[st] += 1
else:
res[st] = 1
print(res)
for x in res:
if res[x] > 1:
result.append(x)
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.