blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ebde3b4ef5910eecb4d8d73ce4bd1ddff79edcc
|
1d3ccfb4330475f12ecd1e6f1396bfa064a7019c
|
/output/drivers/pygame_emulator_factory.py
|
93df4c306e8fbe432c0b52c18f57d48589f2a049
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ZeroPhone/ZPUI
|
c4efaa730315c8c220e7cc76ed8ab9ee7251020c
|
430a4b6e1e869cbd68fd89bbf97261710fd7db6b
|
refs/heads/master
| 2021-06-06T05:07:29.859464
| 2018-12-23T08:18:39
| 2018-12-23T08:24:43
| 81,014,670
| 56
| 27
|
NOASSERTION
| 2019-03-22T19:05:00
| 2017-02-05T19:01:36
|
Python
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
"""
factory for pygame emulator device
sets minimum attributes,
creates device
returns it to caller
"""
import logging
import luma.emulator.device
# ignore PIL debug messages
logging.getLogger("PIL").setLevel(logging.ERROR)
def get_pygame_emulator_device(width=128, height=64):
"""
Creates and returns pygame emulator device.
Width and height must match the size of the splash screen
or an execption will be thrown during initializion.
"""
#these are the bare minimum attributes needed to construct the emulator
emulator_attributes = {}
emulator_attributes['display'] = 'pygame'
#width and height are in pixels
emulator_attributes['width'] = width
emulator_attributes['height'] = height
Device = getattr(luma.emulator.device, 'pygame')
device = Device(**emulator_attributes)
return device
|
[
"crimier@yandex.ru"
] |
crimier@yandex.ru
|
28e291884d43f0687260c85caa18a685f60752fc
|
6426682dd4b4ee2a84b5bb6160ccdbd37016a0a9
|
/setup.py
|
0e00b2fc75b700a2fa3ec2fbb85829c3c9977d9c
|
[] |
no_license
|
jidar/mush
|
e9645a830f31729ebaf0dbeede98cfa91dacc788
|
921e6094108a857683c65a86eb5557126dce90a8
|
refs/heads/master
| 2021-01-22T07:10:41.142290
| 2017-02-09T23:30:46
| 2017-02-09T23:30:46
| 37,157,916
| 1
| 1
| null | 2016-05-22T01:10:01
| 2015-06-09T20:55:50
|
Python
|
UTF-8
|
Python
| false
| false
| 374
|
py
|
from setuptools import setup, find_packages
# Normal setup stuff
setup(
name='mushtool',
description="multi-use-shell-helper...tool...ok, it's a backronymn :)",
version='1.0.0',
install_requires=['prettytable'],
packages=find_packages(),
zip_safe=False,
entry_points={
'console_scripts':
['mush = mush.cli:entry_point']},
)
|
[
"jose.idar@rackspace.com"
] |
jose.idar@rackspace.com
|
fa812feda839644d1aebcc30b544855cf7ade4d3
|
51885da54b320351bfea42c7dd629f41985454cd
|
/arc105/d.py
|
8ceb3f17e61bbd4f457a29ee80337759fac4f0a7
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601
| 2021-12-14T08:19:11
| 2021-12-14T08:19:11
| 185,161,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
#
# arc105 d
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """3
1
10
2
1 2
21
476523737 103976339 266993 706803678 802362985 892644371 953855359 196462821 817301757 409460796 773943961 488763959 405483423 616934516 710762957 239829390 55474813 818352359 312280585 185800870 255245162"""
output = """Second
First
Second"""
self.assertIO(input, output)
def resolve():
TN = int(input())
TC = []
for i in range(TN):
N = int(input())
A = [N]
A += list(map(int, input().split()))
TC.append(A)
for tc in TC:
n, *T = tc
if __name__ == "__main__":
unittest.main()
# resolve()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
1fd3a4b362882d05a0c246a971eb7f028d9b02a1
|
a4287764ce486a037df9acda33be98adf1df9d7e
|
/configs/centernet/centernet512_dla34.py
|
5b45838471009de1d6bb28afcf3603253ca2117e
|
[] |
no_license
|
YLyeliang/Autodetection
|
fab984c8b2425756c55e05c343335d4abe78e984
|
b1d8e42adbed65ff73943b1bec41c4b67056bf61
|
refs/heads/master
| 2023-04-30T05:48:03.821507
| 2021-05-25T03:02:33
| 2021-05-25T03:02:33
| 284,577,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,290
|
py
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# training and testing settings
train_cfg = None
test_cfg = dict(
center_topk=100,
local_maximum_kernel=3,
max_per_img=100,
nms_cfg=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
total_epochs = 210
|
[
"k87974@163.com"
] |
k87974@163.com
|
c99c7f1d22a1d7921303978cedf80e6513a7ba7a
|
0bab87d3d3bc6f790f6d924330acf7ae1c6ebc30
|
/kunyi/data_structure/hash_table/find-pivot-index.py
|
8c646b01e3e5e3daa6254f67d970019683e23a41
|
[] |
no_license
|
KunyiLiu/algorithm_problems
|
2032b9488cd2f20b23b47c456107475f609b178f
|
b27a1d4d65429101ef027f5e1e91ba2afd13bd32
|
refs/heads/master
| 2020-04-26T15:29:43.875656
| 2019-10-21T19:09:01
| 2019-10-21T19:09:01
| 173,648,702
| 0
| 0
| null | 2019-07-21T19:15:23
| 2019-03-04T00:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
##### subarray ######
class Solution:
"""
@param nums: an array
@return: the "pivot" index of this array
"""
def pivotIndex(self, nums):
# get the whole sum, hash table sub_sum
# for loop: sum - sub_sum[3] = 11 == sub_sum[3-1]
# O(n)
sub_sum = {}
whole_sum = sum(nums)
for i in range(len(nums)):
if i == 0:
sub_sum[i] = nums[i]
if whole_sum - sub_sum[i] == 0:
return i
else:
sub_sum[i] = sub_sum[i-1] + nums[i]
if whole_sum - sub_sum[i] == sub_sum[i-1]:
return i
return -1
###### partition to left and right ####
# 从左向右枚举中心索引
class Solution(object):
def pivotIndex(self, nums):
# Time: O(n)
# Space: O(1)
left, right = 0, sum(nums)
for index, num in enumerate(nums):
right -= num
if left == right:
return index
left += num
return -1
|
[
"noreply@github.com"
] |
KunyiLiu.noreply@github.com
|
c621b5137655d9d046d914dd195cfad427a2230e
|
7dc495401ea92c4007e5ee6e19d05a0d2b75afab
|
/fae2/accounts/admin.py
|
b552485525776beed6112437ff6c7e24381601b9
|
[
"Apache-2.0"
] |
permissive
|
scasagrande/fae2
|
ed3ff3bdf9b533cd23891fd78beed7f8ac8b3de1
|
78e2f883e39014c2addef28871cf9b53ad74f585
|
refs/heads/master
| 2021-01-14T10:16:44.603672
| 2016-03-12T00:57:09
| 2016-03-12T00:57:09
| 53,964,802
| 0
| 0
| null | 2016-03-15T17:29:17
| 2016-03-15T17:29:17
| null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import admin
# Register your models here.
|
[
"jongund@illinois.edu"
] |
jongund@illinois.edu
|
eddec9a08cbe885274908af925283f8669a3b71b
|
5789f30bc942dde4235668c56408575b0bd25599
|
/scripts/Temoins_ABL1_check.py
|
eed407f82246d4ec357a4edbf830df9a8549da04
|
[] |
no_license
|
bioinfo-chu-bdx/ngs-somatic
|
bc9dfa60872a644f18650593d144726d0ab22767
|
8cc6411e16784f2891b92241a97c71788408ffb5
|
refs/heads/master
| 2023-04-25T19:48:52.073672
| 2021-03-19T14:21:49
| 2021-03-19T14:21:49
| 374,675,975
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,928
|
py
|
#!/usr/bin/env python
import sys
import os
import openpyxl
import subprocess
def representsInt(s): # pour eviter avertissement "nombre ecrit en texte" sous excel
try:
s = int(s)
return s
except ValueError:
return s
def cell_format(cell, font=None, alignment=None, color=None, format=None, border=None,exterior_border=None):
if font == 'bold':
cell.font = openpyxl.styles.Font(name='Calibri', size=11, bold=True)
else:
cell.font = openpyxl.styles.Font(name='Calibri', size=11)
if alignment == 'center':
cell.alignment = openpyxl.styles.Alignment(horizontal='center',vertical='center',wrap_text=True)
elif alignment == 'left':
cell.alignment = openpyxl.styles.Alignment(horizontal='left',wrap_text=True)
if color == 'LightGreen':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='D8E4BC') # EBF1DE
elif color == 'LightRed':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='d28e8e') #F2DCDB
elif color == 'LightBlue':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='add8e6')
elif color == 'Yellow':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='feffa3')
elif color == 'Blue':
cell.font = openpyxl.styles.Font(name='Calibri', size=11, color='004c99')
elif color == 'DarkGrey':
cell.fill = openpyxl.styles.PatternFill(fill_type='solid',start_color='4d4f4e')
else:
cell.fill = openpyxl.styles.PatternFill(fill_type=None,start_color='FFFFFF')
if border:
cell.border = openpyxl.styles.Border(left=openpyxl.styles.Side(style='thin'),right=openpyxl.styles.Side(style='thin'), top=openpyxl.styles.Side(style='thin'),bottom=openpyxl.styles.Side(style='thin'))
if exterior_border:
cell.border = openpyxl.styles.Border(top=openpyxl.styles.Side(style='thin'),bottom=openpyxl.styles.Side(style='thin'))
if format == 'Percent':
cell.number_format = '0.0%'
###############################################################################
pipeline_folder = os.environ['NGS_PIPELINE_BX_DIR']
suivi_abl1_path = "/media/n06lbth/sauvegardes_pgm/LAM/EN_LAB_19_2333_Suivi_temoins_ABL1.xlsx"
temoin_abl1_finalreport_path = sys.argv[1]
sample = sys.argv[2]
run_name = sys.argv[3]
run_name = run_name.replace('Auto_user_S5-0198','S5')
###############################################################################
# i/o
fp = openpyxl.load_workbook(temoin_abl1_finalreport_path)
annotation_sheet = fp.get_sheet_by_name('Annotation')
annotation_rows = tuple(annotation_sheet.rows)
suivi_abl1 = openpyxl.load_workbook(suivi_abl1_path)
suivi_sheet = suivi_abl1.get_sheet_by_name('Temoins ABL1')
suivi_rows = tuple(suivi_sheet.rows)
img = openpyxl.drawing.image.Image('%s/scripts/ChuBordeaux_small.png' % pipeline_folder)
suivi_sheet.add_image(img,'A1')
column2write = len(suivi_rows[0])+1
# header 1
#suivi_sheet.cell(row=6,column=column2write).value = sample+'_'+run_name
suivi_sheet.cell(row=6,column=column2write).value = '%s\n\n%s' % (run_name,sample)
cell_format(suivi_sheet.cell(row=6,column=column2write),font='bold',alignment='center',border=True)
# header 2
suivi_sheet.cell(row=7,column=column2write).value = 'Var.freq'
cell_format(suivi_sheet.cell(row=7,column=column2write),border=True)
# variants lines
for i in range(len(annotation_rows[0])):
if annotation_rows[0][i].value == 'Transcript':
nm_index = i
if annotation_rows[0][i].value == 'c.':
c_index = i
if annotation_rows[0][i].value == 'c.(annovar)':
annovar_index = i
if annotation_rows[0][i].value == 'Var.Freq.' or annotation_rows[0][i].value == 'Freq':
freq_index = i
if annotation_rows[0][i].value == 'Var.Cov.':
var_cov_index = i
if annotation_rows[0][i].value == 'Pos.Cov.' or annotation_rows[0][i].value == 'Depth':
pos_cov_index = i
list_not_found = []
for i in range(7,len(suivi_rows)):
variant2check = (suivi_rows[i][1].value.split('.')[0],suivi_rows[i][5].value) # NM, c.
control2check = suivi_rows[i][7].value.replace(' ','')
if not control2check in sample.upper():
cell_format(suivi_sheet.cell(row=i+1,column=column2write),color='DarkGrey',border=True)
continue
for j in range(1,len(annotation_rows)):
if annotation_rows[j][nm_index].value:
variant = (annotation_rows[j][nm_index].value.split('.')[0],annotation_rows[j][c_index].value)
variant_annovar = (annotation_rows[j][nm_index].value.split('.')[0],annotation_rows[j][annovar_index].value)
variant_freq = '?'
if (variant2check == variant) or (variant2check == variant_annovar):
variant_freq = annotation_rows[j][freq_index].value
break
if variant_freq == '?': # not found!
cell_format(suivi_sheet.cell(row=i+1,column=column2write),font='bold',color='LightRed',border=True)
list_not_found.append(variant2check)
else:
suivi_sheet.cell(row=i+1,column=column2write).value = representsInt(variant_freq)
cell_format(suivi_sheet.cell(row=i+1,column=column2write),border=True)
suivi_abl1.save(suivi_abl1_path)
|
[
"thomas.bandres@chu-bordeaux.fr"
] |
thomas.bandres@chu-bordeaux.fr
|
686a8654b6bb525a32033706372872996ad70bdd
|
71b2131be682e9184e68b5f42cdf2b15ef851c1f
|
/Python/htmlparse1.py
|
62a4ae7d0f20d07e1072fbb943a4b9f343b360a0
|
[
"MIT"
] |
permissive
|
sockduct/Hackerrank
|
23430fb5e7068e1689c502b2e803c7f630c17696
|
2a42f575d1f17181e6a9fa21bc4ca6aed187bd84
|
refs/heads/master
| 2023-08-08T22:33:58.202194
| 2023-07-28T00:48:43
| 2023-07-28T00:48:43
| 170,932,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,351
|
py
|
from html.parser import HTMLParser
example_html = '''
<html>
<head>
<title>HTML Parser - I</title>
</head>
<body data-modal-target class='1'>
<h1 class="header">HackerRank</h1>
<br id="main"/>
</body>
</html>
'''
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
'''
print(f' Found start tag: {tag}')
if attrs:
print(f' Found attributes: {attrs}')
'''
print(f'Start : {tag}')
for k, v in attrs:
print(f'-> {k} > {v}')
def handle_endtag(self, tag):
# print(f' Found end tag: {tag}')
print(f'End : {tag}')
# Empty tags:
def handle_startendtag(self, tag, attrs):
'''
print(f' Found an empty tag: {tag}')
if attrs:
print(f' Found attributes: {attrs}')
'''
print(f'Empty : {tag}')
for k, v in attrs:
print(f'-> {k} > {v}')
def main():
parser = MyHTMLParser()
lines = int(input())
for _ in range(lines):
parser.feed(input())
# Alternatively, collect all input and then parse:
# html += input()
# parser.feed(html)
#
# Need to explicitly close?
# parser.close()
# Example:
# parser.feed(example_html)
if __name__ == '__main__':
main()
|
[
"james.r.small@outlook.com"
] |
james.r.small@outlook.com
|
98b4d08c20d9ad3f7e190f774a239d9751f01d68
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4187/codes/1595_1446.py
|
c56a025fa554401d7e7a6116d57edf197d81cbcb
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x = float(input("quantidade de litros"))
c = x*(1/3)
print(round(c, 3))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
a64c16d3ce4d5bb65727d8a6af67024e410df108
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/perfkitbenchmarker/linux_packages/specjbb.py
|
b5a05208db413a147880facc09f87c8ae403818b
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
# Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing installation functions for SPEC JBB 2015."""
from absl import flags
FLAGS = flags.FLAGS
_BENCHMARK_NAME = 'specjbb2015'
SPEC_JBB_2015_ISO = 'SPECjbb2015-1_03.iso'
SPEC_DIR = 'spec'
_MOUNT_SPECJBB_ISO = flags.DEFINE_bool(
'mount_specjbb_iso', True, 'Whether specjbb mounts iso or not')
def Install(vm) -> None:
"""Prepares a SPEC client by copying SPEC to the VM."""
mount_dir = 'spec_mnt'
vm.RemoteCommand(f'mkdir -p {mount_dir} {SPEC_DIR}')
vm.InstallPreprovisionedBenchmarkData(_BENCHMARK_NAME, [SPEC_JBB_2015_ISO],
'~/')
if _MOUNT_SPECJBB_ISO.value:
vm.RemoteCommand(
f'sudo mount -t iso9660 -o loop {SPEC_JBB_2015_ISO} {mount_dir}')
vm.RemoteCommand(f'cp -r {mount_dir}/* {SPEC_DIR}')
vm.RemoteCommand(f'sudo umount {mount_dir} && sudo rm -rf {mount_dir}')
else:
vm.InstallPackages('p7zip-full')
vm.InstallPackages('p7zip-rar')
vm.RemoteCommand(
f'7z x -o{mount_dir} {SPEC_JBB_2015_ISO}')
vm.RemoteCommand(f'cp -r {mount_dir}/* {SPEC_DIR}')
vm.RemoteCommand(f'rm -rf {mount_dir}')
def Uninstall(vm) -> None:
"""Cleanup Specjbb on the target vm."""
if _MOUNT_SPECJBB_ISO.value:
vm.RemoteCommand(f'sudo umount {SPEC_DIR}', ignore_failure=True)
vm.RemoteCommand(
f'rm -rf {SPEC_DIR} {SPEC_JBB_2015_ISO}', ignore_failure=True)
def AptInstall(vm) -> None:
Install(vm)
def YumInstall(vm) -> None:
Install(vm)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
00a6aff14bc99fe9474e83e5666735233ae29dfb
|
bd08d0532f20b7285b437c9bf620de1bbcd5b9ea
|
/aalh_iit_buildings_03/cleanup-originaldate-column.py
|
a748a4d463968eae9fc2d7c2fbbd1be361d099a4
|
[
"Unlicense"
] |
permissive
|
johndewees/iitmigration
|
a9e8a31ba6ceb541ce12c22fd612596cc243dbca
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
refs/heads/main
| 2023-03-14T17:06:58.777683
| 2021-03-27T20:44:58
| 2021-03-27T20:44:58
| 320,086,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,288
|
py
|
from openpyxl import load_workbook
import re
filename = 'aalh_iit_buildings_03.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 15
maximumcol = 15
minimumrow = 7
maximumrow = 503
iterationrow = 7
targetcol = 15
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=targetcol).value
print(testvar)
cleandate = None
approx = 'approximately '
try:
if testvar == None:
ws.cell(row=iterationrow, column=targetcol).value = ''
elif testvar.startswith('c'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('C'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('a'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.endswith('?'):
cleandate = testvar[:-1]
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate
elif testvar.find('-') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
elif testvar.find(',') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
elif testvar.find('/') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
else :
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = cleandate[0]
print(ws.cell(row=iterationrow, column=targetcol).value)
except:
print('STATUS = PROBLEM')
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_03.xlsx')
|
[
"noreply@github.com"
] |
johndewees.noreply@github.com
|
ad856f2f30281ddb79e3bf362020b12dc87c6356
|
4331b28f22a2efb12d462ae2a8270a9f666b0df1
|
/.history/dvdstore/webapp/urls_20190914174430.py
|
36de7db40b005ead81a37d045a4710614dacf611
|
[] |
no_license
|
ZiyaadLakay/csc312.group.project
|
ba772a905e0841b17478eae7e14e43d8b078a95d
|
9cdd9068b5e24980c59a53595a5d513c2e738a5e
|
refs/heads/master
| 2020-07-26T23:30:22.542450
| 2019-09-16T11:46:41
| 2019-09-16T11:46:41
| 200,703,160
| 0
| 0
| null | 2019-08-05T17:52:37
| 2019-08-05T17:52:37
| null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('clerk/', views.clerk, name='clerk'),
path('clerk/register2',views.register2, name='register2'),
path('clerk/register3',views.register3, name='register3'),
path('transactions/register2',views.register2, name='register2'),
path('transactions/register3',views.register3, name='register3'),
path('booking',views.booking, name='booking'),
path('clerk/checkout',views.checkout, name='checkout'),
path('clerk/checkin',views.checkin, name='checkin'),
path('transactions/', views.transactions, name='transactions'),
path('userstbl/', views.userstbl, name='userstbl'),
path('clerk/deleteMovie',views.deleteMovie, name='deleteMovie'),
path('transactions/deleteTransaction',views.deleteTransaction, name='deleteTransaction'),
path('userstbl/deleteUser',views.deleteUser, name='deleteUser'),
path('user_detail/', views.user_detail, name='user_detail'),
path('accounts/registerCustomer',views.registerCustomer, name='registerCustomer'),
path('user_detail/updateCustomer',views.updateCustomer, name='updateCustomer'),
path('user_detail/updateUser',views.updateUser, name='updateUser'),
]
|
[
"uzairjoneswolf@gmail.com"
] |
uzairjoneswolf@gmail.com
|
27cee738666ad4dcd220cece1afed26b7244f2e2
|
09cc8367edb92c2f02a0cc1c95a8290ff0f52646
|
/ipypublish/latex/ipypublish/contents_output.py
|
b77efa54be79edce10f9c64aabd7bb180ec95a7f
|
[
"BSD-3-Clause"
] |
permissive
|
annefou/ipypublish
|
7e80153316ab572a348afe26d309c2a9ee0fb52b
|
917c7f2e84be006605de1cf8851ec13d1a163b24
|
refs/heads/master
| 2020-04-13T16:08:59.845707
| 2018-07-30T18:26:12
| 2018-07-30T18:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,808
|
py
|
tplx_dict = {
'meta_docstring': 'with the main ipypublish content',
'document_packages': r"""
((*- if nb.metadata.ipub: -*))
((*- if nb.metadata.ipub.enable_breqn: -*))
\usepackage{breqn}
((*- endif *))
((*- endif *))
""",
'notebook_input': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.ignore: -*))
((*- elif cell.metadata.ipub.slideonly: -*))
((*- else -*))
((( super() )))
((*- endif *))
((*- else -*))
((( super() )))
((*- endif *))
""",
'notebook_input_markdown': r"""
((( cell.source | citation2latex | strip_files_prefix | convert_pandoc('markdown', 'json',extra_args=[]) | resolve_references | convert_pandoc('json','latex'))))
""",
'notebook_input_raw': r"""
((*- if cell.metadata.raw_mimetype: -*))
((*- if cell.metadata.raw_mimetype == "text/latex" -*))
((( super() )))
((*- endif *))
((*- endif *))
""",
'notebook_output': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.ignore: -*))
((*- elif cell.metadata.ipub.slideonly: -*))
((*- else -*))
((( super() )))
((*- endif *))
((*- else -*))
((( super() )))
((*- endif *))
""",
'notebook_output_markdown': """
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.mkdown: -*))
((( output.data['text/markdown'] | citation2latex | strip_files_prefix | convert_pandoc('markdown', 'json',extra_args=[]) | resolve_references | convert_pandoc('json','latex'))))
((*- endif *))
((*- endif *))
""",
'notebook_output_stream': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.ignore: -*))
((*- else -*))
((( super() )))
((*- endif *))
((*- else -*))
((( super() )))
((*- endif *))
""",
'notebook_output_latex': r"""
((*- if cell.metadata.ipub: -*))
((*- if cell.metadata.ipub.table and cell.metadata.ipub.equation -*))
((*- if output.data['text/latex'] | is_equation -*))
((( draw_equation(cell.metadata, output.data['text/latex']) )))
((*- else -*))
((( draw_table(cell, resources, output.data['text/latex']) )))
((*- endif *))
((*- else -*))
((*- if cell.metadata.ipub.table: -*))
((( draw_table(cell, resources, output.data['text/latex']) )))
((*- elif cell.metadata.ipub.equation: -*))
((( draw_equation(cell.metadata, output.data['text/latex']) )))
((*- endif *))
((*- endif *))
((*- endif *))
""",
# 'notebook_output_markdown':'',
'notebook_output_png': r"""
((( draw_figure(output.metadata.filenames['image/png'],
cell.metadata) )))
""",
'notebook_output_jpg': r"""
((( draw_figure(output.metadata.filenames['image/jpeg'],
cell.metadata) )))
""",
'notebook_output_svg': r"""
((( draw_figure(output.metadata.filenames['image/svg+xml'],
cell.metadata) )))
""",
'notebook_output_pdf': r"""
((( draw_figure(output.metadata.filenames['application/pdf'],
cell.metadata) )))
""",
'jinja_macros': r"""
((* macro draw_figure(filename, meta) -*))
((*- if meta.ipub: -*))
((*- if meta.ipub.figure: -*))
((* set filename = filename | posix_path *))
((*- block figure scoped -*))
((*- if meta.ipub.figure.placement: -*))
((*- if meta.ipub.figure.widefigure: -*))
\begin{figure*}[(((meta.ipub.figure.placement)))]
((*- else -*))
\begin{figure}[(((meta.ipub.figure.placement)))]
((*- endif *))
((*- else -*))
((*- if meta.ipub.figure.widefigure: -*))
\begin{figure*}
((*- else -*))
\begin{figure}
((*- endif *))
((*- endif *))
((*- if meta.ipub.figure.width: -*))
\begin{center}\adjustimage{max size={0.9\linewidth}{0.9\paperheight},width=(((meta.ipub.figure.width)))\linewidth}{((( filename )))}\end{center}
((*- elif meta.ipub.figure.height: -*))
\begin{center}\adjustimage{max size={0.9\linewidth}{0.9\paperheight},height=(((meta.ipub.figure.height)))\paperheight}{((( filename )))}\end{center}
((*- else -*))
\begin{center}\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{((( filename )))}\end{center}
((*- endif *))
((*- if resources.captions: -*))
((*- if resources.captions[meta.ipub.figure.label]: -*))
\caption{((( resources.captions[meta.ipub.figure.label] )))}
((*- else -*))
\caption{((( meta.ipub.figure.caption )))}
((*- endif *))
((*- elif meta.ipub.figure.caption: -*))
\caption{((( meta.ipub.figure.caption )))}
((*- endif *))
((*- if meta.ipub.figure.label: -*))
\label{((( meta.ipub.figure.label )))}
((*- endif *))
\end{figure}
((*- endblock figure -*))
((*- endif *))
((*- endif *))
((*- endmacro *))
((* macro draw_table(cell, resources, text) -*))
((*- block table scoped -*))
((*- if cell.metadata.ipub.table.placement: -*))
\begin{table}[(((cell.metadata.ipub.table.placement)))]
((*- else -*))
\begin{table}
((*- endif *))
((*- if resources.captions and cell.metadata.ipub.table.label -*))
((*- if resources.captions[cell.metadata.ipub.table.label]: -*))
\caption{((( resources.captions[cell.metadata.ipub.table.label] )))}
((*- elif cell.metadata.ipub.table.caption -*))
\caption{((( cell.metadata.ipub.table.caption )))}
((*- endif *))
((*- elif cell.metadata.ipub.table.caption -*))
\caption{((( cell.metadata.ipub.table.caption )))}
((*- endif *))
((*- if cell.metadata.ipub.table.label -*))
\label{((( cell.metadata.ipub.table.label )))}
((*- endif *))
\centering
\begin{adjustbox}{max width=\textwidth}
((*- if cell.metadata.ipub.table.alternate: -*))
\rowcolors{2}{(((cell.metadata.ipub.table.alternate)))}{white}
((*- endif *))
((( text )))
\end{adjustbox}
\end{table}
((*- endblock table -*))
((*- endmacro *))
((* macro draw_equation(meta, text) -*))
((*- block equation scoped -*))
((* set environment = "none" *))
((*- if meta.ipub.equation.environment: -*))
((*- if meta.ipub.equation.environment == "none" -*))
((* set environment = "none" *))
((*- elif meta.ipub.equation.environment == "equation" -*))
((* set environment = "equation" *))
((*- elif meta.ipub.equation.environment == "equation*" -*))
((* set environment = "equation*" *))
((*- elif meta.ipub.equation.environment == "align" -*))
((* set environment = "align" *))
((*- elif meta.ipub.equation.environment == "align*" -*))
((* set environment = "align*" *))
((*- elif meta.ipub.equation.environment == "multline" -*))
((* set environment = "multline" *))
((*- elif meta.ipub.equation.environment == "multline*" -*))
((* set environment = "multline*" *))
((*- elif meta.ipub.equation.environment == "breqn" -*))
((*- if nb.metadata.ipub: -*))
((*- if nb.metadata.ipub.enable_breqn: -*))
((* set environment = "dmath" *))
((*- endif *))
((*- endif *))
((*- elif meta.ipub.equation.environment == "breqn*" -*))
((*- if nb.metadata.ipub: -*))
((*- if nb.metadata.ipub.enable_breqn: -*))
((* set environment = "dmath*" *))
((*- endif *))
((*- endif *))
((*- elif meta.ipub.equation.environment == "gather" -*))
((* set environment = "gather" *))
((*- elif meta.ipub.equation.environment == "gather*" -*))
((* set environment = "gather*" *))
((*- endif *))
((*- endif *))
((* if environment == "none" *))
((( text )))
((*- else -*))
((*- if meta.ipub.equation.label and not "*" in environment -*))
\begin{(((environment)))}\label{((( meta.ipub.equation.label )))}
((*- else -*))
\begin{(((environment)))}
((*- endif *))
((( text | remove_dollars )))
\end{(((environment)))}
((*- endif *))
((*- endblock equation -*))
((*- endmacro *))
"""
}
|
[
"chrisj_sewell@hotmail.com"
] |
chrisj_sewell@hotmail.com
|
593a5277c49cb351e6a1a0693bfb2ffa039b7d97
|
f063232b59eb7535e4212ec2b6b477c472fdb56e
|
/palindrome-partition.py
|
ebe2bd618596f01e488555e1e5c598ce1eba0483
|
[] |
no_license
|
xzjh/OJ_LeetCode
|
a01d43f6925bb8888bb79ca8a03a75dd8a6eac07
|
fa2cfe2ec7774ab4a356520668d5dbee9d63077c
|
refs/heads/master
| 2021-01-20T11:13:36.291125
| 2015-10-01T09:04:47
| 2015-10-01T09:04:47
| 25,239,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
class Solution:
# @param s, a string
# @return a list of lists of string
def partition(self, s):
def is_palindrome(s):
if len(s) < 2:
return True
l = 0
r = len(s) - 1
while r > l:
if s[r] != s[l]:
return False
r -= 1
l += 1
return True
def dfs(s, output, result):
if len(s) == 0:
result.append(output)
return
for i in range(len(s)):
if is_palindrome(s[:i + 1]):
new_output = list(output)
new_output.append(s[:i + 1])
dfs(s[i + 1:], new_output, result)
result = []
dfs(s, [], result)
return result
s = Solution()
print s.partition('aab')
|
[
"jsxzjh@gmail.com"
] |
jsxzjh@gmail.com
|
87b5f7cbad951f7e894f38e1220685c8c084589d
|
cca3f6a0accb18760bb134558fea98bb87a74806
|
/aising2020/C/main.py
|
ee81245bfc173a7e47f1ff52ccae2ee72c34ddb4
|
[] |
no_license
|
Hashizu/atcoder_work
|
5ec48cc1147535f8b9d0f0455fd110536d9f27ea
|
cda1d9ac0fcd56697ee5db93d26602dd8ccee9df
|
refs/heads/master
| 2023-07-15T02:22:31.995451
| 2021-09-03T12:10:57
| 2021-09-03T12:10:57
| 382,987,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
#!/usr/bin/env python3
import sys
MAX = 10**2
def solve(N: int):
ans = [0]*N
for xi in range(1, MAX):
x2 = xi**2
for yi in range(1, MAX):
y2 = yi**2
for zi in range(1, MAX):
k = x2 + y2 + zi**2 + xi*yi + xi * zi + yi*zi
if k > N: break
else: ans[k-1] +=1
for ai in ans:
print(ai)
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
solve(N)
if __name__ == '__main__':
main()
|
[
"athenenoctus@gmail.com"
] |
athenenoctus@gmail.com
|
30b5b0bebe33ffc13ec4780f11739d0ea5554e96
|
a3c86385115ea1831974624ac0d293f97ea40e48
|
/129/sum_path.py
|
dd7c64306a1809d57371d983221340595a15ddbc
|
[] |
no_license
|
yaolizheng/leetcode
|
7adba764d2d913eb7b07bdb62e74460dea755e66
|
bb2a657fa4e2894b3dcb1e3cc57a17b53787d0f6
|
refs/heads/master
| 2021-07-08T22:21:31.991385
| 2019-01-25T18:52:59
| 2019-01-25T18:52:59
| 128,838,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
from tree import TreeNode
def helper(root, val):
if not root:
return 0
val = val * 10 + root.value
if root.left is None and root.right is None:
return val
return helper(root.left, val) + helper(root.right, val)
def sum_path(root):
return helper(root, 0)
if __name__ == '__main__':
root = TreeNode(4)
root.left = TreeNode(9)
root.right = TreeNode(0)
root.left.left = TreeNode(5)
root.left.right = TreeNode(1)
print sum_path(root)
|
[
"zhengyaoli1988@gmail.com"
] |
zhengyaoli1988@gmail.com
|
8e05700d0271f8372d294336dbb969c824e222aa
|
488e0934b8cd97e202ae05368c855a57b299bfd1
|
/Python/oop_extra_prac.py
|
550260dc1f74eeee4619947ee51884506e4c159f
|
[] |
no_license
|
didemertens/udemy_webdev
|
4d96a5e7abeec1848ecedb97f0c440cd50eb27ac
|
306215571be8e4dcb939e79b18ff6b302b75c952
|
refs/heads/master
| 2020-04-25T00:24:45.654136
| 2019-04-13T16:00:47
| 2019-04-13T16:00:47
| 172,377,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
class Dog:
# Class attribute
species = 'mammal'
# Initializer / Instance attributes
def __init__(self, name, age):
self.name = name
self.age = age
self.is_hungry = True
# Instance method
def description(self):
return self.name, self.age
# Instance method
def speak(self, sound):
return "%s says %s" % (self.name, sound)
# Instance method
def eat(self):
self.is_hungry = False
def walk(self):
return f"{self.name} is walking!"
# Child class (inherits from Dog class)
class RussellTerrier(Dog):
def run(self, speed):
return "%s runs %s" % (self.name, speed)
# Child class (inherits from Dog class)
class Bulldog(Dog):
def run(self, speed):
return "%s runs %s" % (self.name, speed)
# sam = Dog("Sam",9)
# bobby = Dog("Bobby",2)
nora = Dog("Nora",4)
# def get_biggest_number(*args):
# return max(args)
# oldest = get_biggest_number(sam.age,bobby.age,nora.age)
# print(f"The oldest dog is {oldest} years old.")
class Pets(Dog):
animals = []
def __init__(self, animals):
self.animals = animals
def amount_pets(self):
return f"I have {len(self.animals)} pets."
def walkie(self):
for dog in self.animals:
print(dog.walk())
def list_animals(self):
return self.animals
my_dogs = [
Bulldog("Tom", 6),
RussellTerrier("Fletcher", 7),
Dog("Larry", 9)
]
my_pets = Pets(my_dogs)
# print(my_pets.amount_pets())
# for dog in my_dogs:
# print(f"{dog.name} is {dog.age} years old.")
# print(f"And they are all {dog.species}s of course.")
# for dog in my_dogs:
# dog.eat()
# dogs_are_hungry = False
# for dog in my_dogs:
# if dog.is_hungry:
# dogs_are_hungry = True
# if dogs_are_hungry == True:
# print("My dogs are hungry")
# else:
# print("My dogs are not hungry.")
my_pets.walkie()
|
[
"d.g.j.mertens@gmail.com"
] |
d.g.j.mertens@gmail.com
|
32a7c8b65a4dc828d2d1f6a85ccb90bfb8478f72
|
f6b1db8c0503a292f6a1da31800269e0bb5f39bd
|
/web_flask/3-python_route.py
|
91e5ddd0433581ce73379129958444807f16a642
|
[] |
no_license
|
arleybri18/AirBnB_clone_v2
|
142883fde2629c7eb75dddc8e4375a9ca1714555
|
111cabf15cadba09f018b2fe359eec68495035dc
|
refs/heads/master
| 2020-07-07T03:44:31.456739
| 2019-09-09T15:16:55
| 2019-09-09T15:16:55
| 203,235,771
| 0
| 0
| null | 2019-08-19T19:21:54
| 2019-08-19T19:21:54
| null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
#!/usr/bin/python3
""" Import flask class """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
""" Function to handle request """
return 'Hello HBNB!'
@app.route('/hbnb')
def hello_hbnb():
""" Function to handle request to path /hbnb """
return 'HBNB'
@app.route('/c/<text>')
def c_route(text):
""" Function to handle request with a variable """
return 'C %s' % text.replace('_', ' ')
@app.route('/python/')
@app.route('/python/<text>')
def python(text='is cool'):
""" Function to handle request with a variable and data default """
return 'Python %s' % text.replace('_', ' ')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
[
"arleybri18@gmail.com"
] |
arleybri18@gmail.com
|
f0a6d0beca860b552d6f96fa60b61179b6c93ab1
|
16487965d6cce8f4034beca9b7021b1e8881a346
|
/media/My_program/Engineering_Calculator/main.py
|
ea259d0bcc70256970705e2917d6e957d70706d7
|
[] |
no_license
|
bataysyk/site_resume
|
f20de5b00d8e37e7a5495da28c00fce976a07d42
|
2d00dce0a4618c1b36e99f8587f6d88eec0f5e45
|
refs/heads/master
| 2023-01-19T20:19:27.138973
| 2020-11-12T16:07:50
| 2020-11-12T16:07:50
| 285,685,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from gui import *
if __name__ == "__main__":
root = Tk()
root["bg"] = "#000"
root.geometry("480x550+100+100")
root.title("Engineering Calculator.")
root.resizable(False, False)
app = Main(root)
app.pack()
root.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
905f02f515e5d02e19c57b45372edb6089c87b26
|
e34cbf5fce48f661d08221c095750240dbd88caf
|
/python/day10/flask.py
|
8558ed1fb5d09f9ecf932a6a6acab0c8bcb6d972
|
[] |
no_license
|
willianflasky/growup
|
2f994b815b636e2582594375e90dbcb2aa37288e
|
1db031a901e25bbe13f2d0db767cd28c76ac47f5
|
refs/heads/master
| 2023-01-04T13:13:14.191504
| 2020-01-12T08:11:41
| 2020-01-12T08:11:41
| 48,899,304
| 2
| 0
| null | 2022-12-26T19:46:22
| 2016-01-02T05:04:39
|
C
|
UTF-8
|
Python
| false
| false
| 2,323
|
py
|
#!/usr/bin/env python
# -*-coding:utf8-*-
# __author__ = "willian"
import select
import socket
class Flask(object):
def __init__(self, routers):
self.routers = routers
def process_data(self, client):
data = bytes()
while True: # 接收数据循环
try:
trunk = client.recv(1024) # 没有数据会报错, 用户断开也会报错.
except BlockingIOError as e:
trunk = ""
if not trunk:
break
data += trunk
data_str = str(data, encoding='utf8')
header, body = data_str.split('\r\n\r\n', 1)
header_list = header.split('\r\n')
header_dict = {}
for line in header_list:
value = line.split(":", 1)
if len(value) == 2:
k, v = value
header_dict[k] = v
else:
header_dict['mothod'], header_dict['url'], header_dict['protocol'] = line.split(' ')
return header_dict, body
def run(self, host='127.0.0.1', port=8888):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sock.bind((host, port))
sock.listen(5)
inputs = [sock, ]
while True:
rList, wList, eList = select.select(inputs, [], [], 0.5)
for client in rList:
# 建立新的连接
if client == sock:
conn, addr = client.accept()
conn.setblocking(False)
inputs.append(conn)
else: # 用户发送数据
header_dict, body = self.process_data(client)
request_url = header_dict['url']
func_name = None
for item in self.routers:
if item[0] == request_url:
func_name = item[1]
break
if not func_name:
client.sendall(b"404")
else:
result = func_name(header_dict, body)
client.sendall(result.encode('utf8'))
inputs.remove(client)
client.close()
|
[
"284607860@qq.com"
] |
284607860@qq.com
|
71a2388dcad6ad8e70d8fc2e86e246444b5ced55
|
8941c8ca788b1a45bfad23ca26ebfa357c13f09b
|
/Lyceum/Mars_Sql_Alchemy/zapros8.py
|
85a4ed28493a5c99de6c54d2326d35b671007644
|
[] |
no_license
|
MysteriousSonOfGod/Python-2
|
d1dfdf094f4a763758bfc7e1777c2cd6efbd0809
|
0d488906e4b5e3897da6b7cb077815740e82fd84
|
refs/heads/master
| 2023-02-05T13:38:25.673248
| 2020-12-22T13:54:02
| 2020-12-22T13:54:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
from data.db_session import global_init, create_session
from data.users import User
from data.jobs import Jobs
from data.departments import Department
from sqlalchemy import func
db = input()
global_init(db)
session = create_session()
d = session.query(Department).filter(Department.id == 1).first()
members = list(map(int, d.members.split(",")))
workers = []
for m in members:
j = session.query(func.sum(Jobs.work_size)).filter(Jobs.collaborators.like(f'%{str(m)}%')).scalar()
# print(j)
if j > 25:
workers.append(m)
# print(workers)
users = session.query(User).filter(User.id.in_(workers))
for user in users:
print(user.surname, user.name)
# db/mars_explorer.db
|
[
"realandmir@gmail.com"
] |
realandmir@gmail.com
|
faf11b5f5dbf57001f44e9ad498633a1097fffc0
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/Azure/azure-sdk-for-python/azure-mgmt-web/azure/mgmt/web/models/ip_security_restriction.py
|
4749ab6f9dc423ffde15078edcc4ca12dc54be31
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IpSecurityRestriction(Model):
"""
Represents an ip security restriction on a web app.
:param ip_address: IP address the security restriction is valid for
:type ip_address: str
:param subnet_mask: Subnet mask for the range of IP addresses the
restriction is valid for
:type subnet_mask: str
"""
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet_mask': {'key': 'subnetMask', 'type': 'str'},
}
def __init__(self, ip_address=None, subnet_mask=None):
self.ip_address = ip_address
self.subnet_mask = subnet_mask
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
a03d7ae8801659c506b674965a5faaa056589de2
|
1a94622c336c127a7d0657c010d5edf359d869ad
|
/src/python/counts.py
|
b4f1d1ba1607e80459e9d1d9e2170f1e8ab3cdf7
|
[
"MIT"
] |
permissive
|
dankolbman/BCIM
|
088eab0aa1b2cf656be3f877020ae1cc97f85eee
|
e3108828ebdadd14968ad8ec093ab5fa6f8612d1
|
refs/heads/master
| 2021-01-22T06:48:41.995215
| 2015-05-28T23:06:42
| 2015-05-28T23:06:42
| 20,842,183
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import numpy as np
from .DataIO import read_parts
def counts(filen, params):
t = []
counts = [ [0], [0] ]
with open(filen, 'r') as f:
ctime = ''
for line in f:
l = line.split()
if l[0] != ctime and line[0] !='#':
ctime = l[0]
t.append( float(l[0]) )
counts[0].append(0)
counts[1].append(0)
elif line[0] != '#':
sp = int( l[1] ) - 1
counts[sp][ -1 ] += 1
counts[0] = counts[0][1:]
counts[1] = counts[1][1:]
return t, counts
|
[
"dankolbman@gmail.com"
] |
dankolbman@gmail.com
|
a084bed1223eae867997fc027ac2332fc44f1eda
|
9829fef375374a3887326fa3ac814914c2db63a5
|
/models/networks.py
|
1844580c86b10e708e53f71e5af84e82e952af1d
|
[] |
no_license
|
fuchami/scene_detection_pytorch
|
bf78a19011176112e6a0dd4bc9462c9302d20008
|
2befe163a7d78674ebdb4ec7c22e6d50c609214f
|
refs/heads/master
| 2022-04-07T02:06:16.345739
| 2020-02-20T06:54:43
| 2020-02-20T06:54:43
| 216,630,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
# coding:utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from models.embedding import EmbeddingNet
# from torchsummary import summary
class SiameseNet(nn.Module):
def __init__(self, image=False, audio=False, text=False, time=False, merge='concat', outdim=128):
super(SiameseNet, self).__init__()
self.embedding_net = EmbeddingNet(image,audio,text,time,merge,outdim)
def forward(self, x1, x2):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
class TripletNet(nn.Module):
def __init__(self, image=False, audio=False, text=False, time=False, merge='concat', outdim=128):
super(TripletNet, self).__init__()
self.embedding_net = EmbeddingNet(image,audio,text,time,merge,outdim)
def forward(self, x1, x2, x3):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
output3 = self.embedding_net(x3)
return output1, output2, output3
def get_embedding(self, x):
return self.embedding_net(x)
|
[
"famichiki.yuuki@gmail.com"
] |
famichiki.yuuki@gmail.com
|
3ed77387ea1326b471aae183e9e5b935e36511e3
|
a84e1ed67ef2592cf22f7d19cdddaf16700d6a8e
|
/graveyard/web/VNET/branches/vnf/vnf/inventory/__init__.py
|
69262bdf621adb11656adb43e9aada29cd9cb8a9
|
[] |
no_license
|
danse-inelastic/inelastic-svn
|
dda998d7b9f1249149821d1bd3c23c71859971cc
|
807f16aa9510d45a45360d8f59f34f75bb74414f
|
refs/heads/master
| 2016-08-11T13:40:16.607694
| 2016-02-25T17:58:35
| 2016-02-25T17:58:35
| 52,544,337
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def dataobject( *args, **kwds ):
from DataObject import DataObject
return DataObject( *args, **kwds )
def form( *args, **kwds ):
from Form import Form
return Form( *args, **kwds )
def geometer( *args, **kwds ):
from Geometer import Geometer
return Geometer( *args, **kwds )
# version
__id__ = "$Id$"
# End of file
|
[
"yxqd@users.noreply.github.com"
] |
yxqd@users.noreply.github.com
|
08a772274dc0b7588e67be727f019c4b0572db37
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03146/s635038540.py
|
ffcd848d7b53f891a4f49a1d39ab65423805b702
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
s = int(input())
i = 1
a_set = {s}
a_prev = s
while True:
i += 1
if a_prev % 2 == 0:
a = a_prev // 2
else:
a = 3 * a_prev + 1
if a in a_set:
ans = i
break
a_set.add(a)
a_prev = a
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3e13a374dd395bb496b4156d4850e4514534773d
|
b56ca08eb67163d3ccb02ff0775f59a2d971d910
|
/backend/settings/migrations/0006_replanishmentplan.py
|
694fa4a6750e7c6b69b45668571ca37920eab849
|
[] |
no_license
|
globax89/dating-work
|
f23d07f98dcb5efad62a1c91cdb04b1a8ef021f7
|
bb3d09c4e2f48ecd3d73e664ab8e3982fc97b534
|
refs/heads/master
| 2022-12-11T22:45:19.360096
| 2019-10-16T07:01:40
| 2019-10-16T07:01:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
# Generated by Django 2.2.4 on 2019-10-03 12:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0005_auto_20190927_1426'),
]
operations = [
migrations.CreateModel(
name='ReplanishmentPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('dollar', models.IntegerField(default=0)),
('credit', models.IntegerField(default=0)),
],
),
]
|
[
"zdimon77@gmail.com"
] |
zdimon77@gmail.com
|
ea099bf25701e772952a954d522c781a406a6161
|
d37277c61facf70dae7d74c82e5b14826d0f7029
|
/task1_AdvancedModels/task1/advanced_model/migrations/0002_employee.py
|
d1a9d6aeb02a92f83b929f9432c778dd9e45dff9
|
[] |
no_license
|
ProgMmgGhoneim/Django-Tasks
|
d8d53acbec6e042261ee28ef0e5931fb40e93fd7
|
2288c1a9c3d1348897f5fb7be42bc807719aacb4
|
refs/heads/master
| 2020-03-30T08:41:10.720962
| 2018-10-01T23:27:59
| 2018-10-01T23:27:59
| 151,031,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# Generated by Django 2.0.7 on 2018-07-22 12:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('advanced_model', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=300)),
('last_name', models.CharField(max_length=200)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employees', related_query_name='person', to='advanced_model.Company')),
],
),
]
|
[
"progmmgghoneim@gmail.com"
] |
progmmgghoneim@gmail.com
|
d597659a2088ec54a3ecda166c0eeca50dc549df
|
42ed6d4e67172522f79ab6f3c8cb650f4234be90
|
/zjgg_project/zjgg_main_thread.py
|
7dcb5cb76edbc368a89f0eb5e87fd46f4fbfc044
|
[] |
no_license
|
Hylan129/Self-Learning
|
81a5143015850c33d5226c4da43d110150661dc7
|
06ccdc202f62629395900658909e1363a32632fd
|
refs/heads/master
| 2020-06-12T19:45:50.479677
| 2020-05-26T15:38:26
| 2020-05-26T15:38:26
| 194,405,633
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,134
|
py
|
#!/usr/bin/env python
# coding=utf-8
import time,threading
import encodings.idna
import navigation_model_thread,sixmic_control,human_sensor
#position_list = ['11','12','13','14','15','16','17','18']
#position_list = ['4','56','2']
position_list = ['890','test','2','zd','zd2','sys']
"""
"LOCALIZATION_FAILED":"定位失败",
"GOAL_NOT_SAFE","目的地有障碍物"
"TOO_CLOSE_TO_OBSTACLES":"离障碍物太近",
"UNREACHABLE":"目的地无法到达",
"REACHED":"已到达目的地",
"HEADING":"正在前往目的地",
"PLANNING":"正在规划路径",
"UNREACHED":"到达目的地附近,目的地有障碍物"
"""
text_list = ['呈现在您面前的,是本馆的重点展品之一——在2001年9月十一日恐怖袭击中倒塌的美国纽约世界贸易中心钢构件残骸。本展品长二点五米,宽一点七米,高零点八五米,重约3吨,属于世贸北塔顶部天线八边形部分,位于铭牌所指示的红圈位置,由美国纽约与新泽西港务局无偿捐赠本馆。世贸中心的倒塌一度让人们对钢结构的安全性和可靠性产生怀疑,我们展示这件钢构件的目的,一是谴责恐怖主义,二是要澄清人们对钢结构认识上的误区。因为“9·11恐怖袭击”是一次极端事件,事后的调查表明,无论建筑使用的是钢结构还是其他材料,均难以承受如此猛烈的撞击和如此高强度的燃烧,恰恰因为钢结构的良好表现,为撞击部位以下的人员逃生争取到更多的时间,北塔和南塔在遭受撞击后仍然分别坚持了一百零三分钟和五十七分钟。2014年在世贸中心原址附近落成的世贸中心1号楼,主体仍然采用钢结构,再次证明了人们对钢结构的信心。',
'您背后的展墙,是新的里程碑板块,讲述第二次世界大战到20世纪末钢结构在世界各国的普遍应用。如美国的圣路易斯拱门、加拿大蒙特利尔世博会的美国馆、澳大利亚的悉尼歌剧院、法国的蓬皮杜国家艺术文化中心、日本的福冈体育馆等,当然也包括纽约世界贸易中心。这些地标建筑,展示着钢结构在人类生活中越来越广泛的应用,印证着世界工业文明发展的新的辉煌成就。',
'讲解完毕,小派在这停留三分钟,三分钟之后小派将带大家去下一个讲解点呢。',
'新中国成立以后,中国的钢铁工业从废墟中起步,但由于钢铁资源的短缺,仅在一些重大工程上,如武汉长江大桥、人民大会堂等使用了钢结构。改革开放以后,中国的钢结构产业进入逐渐发展期,截至二十世纪末,中国陆续建成了深圳发展中心、深圳地王大厦、上海金茂大厦等标志性钢结构建筑。最初,这些建筑由外国人设计,用外国的钢材,在外国加工,中国的企业只是承担施工,到后来,越来越多的钢结构建筑由中国人设计,用国产钢材,在国内加工。中国的钢结构产业沿着正确的轨道奋起直追。',
'二十一世纪堪称钢结构的世纪,新千年以来,世界各地不断涌现出新的钢结构建筑和桥梁,钢结构高度、跨度和精度的纪录不断刷新。在您右侧,通过三个屏幕展示这一时期的钢结构建设成就。左侧屏介绍的是2000年以来世界范围内钢结构经典建筑,如目前世界最高的哈利法塔,高度达到八百二十八米;中间屏介绍的是本世纪前十年中国的钢结构建设成就,包括上海环球金融中心、北京国家体育场和中央电视台、武汉火车站等;右侧屏则是2010年以来中国建成的钢结构建筑和桥梁,如深圳宝安国际机场T3航站楼、上海中心大厦、深圳平安金融中心等。中国的高端钢结构工程从设计到钢材供应、构件加工、现场施工已全部实现国产化,而且,钢结构乃至整个建筑业的技术水平已进入世界前列。',
'您现在进入本馆的科技厅。在这一部分,我们以科技为主线,介绍钢结构体系、设计、制造、安装、防腐、防火、防震、检测、监测等内容,同时也追溯这些技术的演进过程。您现在穿行在一座钢桥上,它是不是有点像上海的外白渡桥?在钢桥的两侧,我们以多媒体搭配模型的方式,重点介绍8种重要的结构体系。它们是:立体桁架结构、单层刚架、框架结构、框架-支撑结构、框架-筒体结构、巨型框架-筒体-支撑结构、索结构、网架结构。',
]
time_list = [0.3 * len(time_) for time_ in text_list]
def zjgg_xunhang():
try:
while True:
for go_point,text_point,time_point in zip(position_list,text_list,time_list):
navigation_model_thread.navigation_position(go_point)
while True:
if(navigation_model_thread.navigation_value =='REACHED'):
break
#if(navigation_model_thread.navigation_value =='UNREACHED'):
# navigation_model_thread.navigation_position(go_point)
time.sleep(1)
print(navigation_model_thread.navigation_value,navigation_model_thread.statuscode_value)
time.sleep(2)
sixmic_control.send(sixmic_control.text_broadcast(text_point))
time.sleep(time_point)
except Exception as e:
with open('err.txt','a') as code:
code.write(str(e) + '\n')
def monitor_notice():
while True:
if(navigation_model_thread.navigation_value in ["HEADING","UNREACHABLE", "PLANNING"]):
if (navigation_model_thread.statuscode_value == 701):
if(human_sensor.humansensor_value == human_sensor.human):
sixmic_control.send(sixmic_control.text_broadcast('您好!请借过一下!'))
human_sensor.red_shanshuo()
if __name__ == '__main__':
try:
sixmic_control.port_open()
human_sensor.port_open()
i =3
while(i):
sixmic_control.send(sixmic_control.buildShakePacket())
i -= 1
t1 = threading.Thread(target = zjgg_xunhang)
t2 = threading.Thread(target = human_sensor.humansensor_status)
t3 = threading.Thread(target = navigation_model_thread.status_status_monitor,args = (navigation_model_thread.url[1],))
t4 = threading.Thread(target = navigation_model_thread.status_navigtion_monitor,args = (navigation_model_thread.url[3],))
t5 = threading.Thread(target = monitor_notice)
#t6 = threading.Thread(target = human_coming_notice)
Threads = [t1,t2,t3,t4,t5]
for t in Threads:
t.start()
except Exception as e:
with open('zjgg_err.txt','a') as code:
code.write(str(e) + 'zjgg_err \n')
|
[
"jyzyg129@163.com"
] |
jyzyg129@163.com
|
a4c4a72eccc102761fa23a6b05f2d184b7d7e6bd
|
a7058080e41af37eb77c146fc09a5e4db57f7ec6
|
/Solved/03955/03955.py
|
d1035b0c9ea3a2a50d96aab7660b7c8fea1e9062
|
[] |
no_license
|
Jinmin-Goh/BOJ_PS
|
bec0922c01fbf6e440589cc684d0cd736e775066
|
09a285bd1369bd0d73f86386b343d271dc08a67d
|
refs/heads/master
| 2022-09-24T02:24:50.823834
| 2022-09-21T02:16:22
| 2022-09-21T02:16:22
| 223,768,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
# Problem No.: 3955
# Solver: Jinmin Goh
# Date: 20200611
# URL: https://www.acmicpc.net/problem/3955
import sys
import math
# expanded euclidean algorithm
def expGCD(a: int, b: int) -> (int,):
if b == 0:
return (a, 1, 0)
temp = expGCD(b, a % b)
#print(a, b, temp)
x, y = temp[1], temp[2]
return (temp[0], y, x - (y * (a // b)))
# find solution of kx + 1 = cy, (k, c, x, y are all positive int)
# -kx + cy = 1 or kx + cy = 1 when x is negative int
def main():
t = int(input())
for _ in range(t):
k, c = map(int, sys.stdin.readline().split())
# exception for c = 1 case
if c == 1:
if k + 1 > 10 ** 9:
print("IMPOSSIBLE")
else:
print(k + 1)
continue
ans = expGCD(k, c)
# if gcd(k, c) != 1
if ans[0] != 1:
print("IMPOSSIBLE")
continue
# general solution: x = x0 + c * t / y = y0 - k * t
# 0 > x and y > 0; x0 + c * t < 0 and y0 - k * t > 0
# t < min(-x0 / c, y0 / k)
# y <= 10 ** 9, k * t >= y0 - 10 ** 9
x0 = ans[1]
y0 = ans[2]
maxVal = math.floor(min(-(x0 / c), y0 / k))
minVal = y0 - 10 ** 9
if minVal > (maxVal * k):
print("IMPOSSIBLE")
else:
print(y0 - k * maxVal)
return
if __name__ == "__main__":
main()
|
[
"eric970901@gmail.com"
] |
eric970901@gmail.com
|
424f1ec6d08235b7758bbc7d66f4b0c9f69eac2f
|
7da5ac719e4c9ca9cb3735d0ade3106183d96ffe
|
/Projeto/IoTcity_services/server/server/mainserver/forms.py
|
1f441056b6edc243773d544b0f8e9e7759395fbb
|
[] |
no_license
|
shanexia1818/IoTCity
|
a405c0921b417e5bb0a61966f9ca03a1f87147a7
|
3fe14b6918275684291f969fd6c3f69a7ee14a4c
|
refs/heads/master
| 2020-08-07T21:08:38.811470
| 2018-09-10T11:10:56
| 2018-09-10T11:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,687
|
py
|
from django.forms.extras.widgets import SelectDateWidget
import datetime
from django import forms
from models import Alarm
class ChoiceFieldNoValidation(forms.MultipleChoiceField):
def validate(self, value):
pass
class ActuatorForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
senders = kwargs.pop('senders')
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['streams'].choices = senders
super(ActuatorForm, self).full_clean()
except Exception as e:
super(forms.Form, self).__init__(*args, **kwargs)
streams = ChoiceFieldNoValidation(widget=forms.CheckboxSelectMultiple)
value = forms.FloatField(initial=0, required=True)
def clean(self):
cleaned_data = super(ActuatorForm, self).clean()
if len(cleaned_data['streams'])==0:
raise forms.ValidationError("Select at least one stream")
return cleaned_data
class RuleForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
senders = kwargs.pop('senders')
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['streams'].choices = senders
super(RuleForm, self).full_clean()
except Exception as e:
super(forms.Form, self).__init__(*args, **kwargs)
beg_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
end_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
name = forms.CharField(max_length=50, required=True)
mo = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-mon2','type':'checkbox'}), initial=False, required=False)
tu = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-tue2','type':'checkbox'}), initial=False, required=False)
we = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-wed2','type':'checkbox'}), initial=False, required=False)
th = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-thu2','type':'checkbox'}), initial=False, required=False)
fr = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-fri2','type':'checkbox'}), initial=False, required=False)
sa = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sat2','type':'checkbox'}), initial=False, required=False)
su = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sun2','type':'checkbox'}), initial=False, required=False)
streams = ChoiceFieldNoValidation(widget=forms.CheckboxSelectMultiple)
value = forms.FloatField(initial=0, required=True)
beg_hour = forms.IntegerField(max_value=23, min_value=0)
beg_min = forms.IntegerField(max_value=59, min_value=0)
end_hour = forms.IntegerField(max_value=23, min_value=0)
end_min = forms.IntegerField(max_value=59, min_value=0)
hours_active_beg = forms.IntegerField(max_value=23, min_value=0)
minutes_active_beg = forms.IntegerField(max_value=59, min_value=0)
def clean(self):
cleaned_data = super(RuleForm, self).clean()
beg_date = cleaned_data['beg_date']
end_date = cleaned_data['end_date']
beg_hour = cleaned_data['beg_hour']
end_hour = cleaned_data['end_hour']
beg_min = cleaned_data['beg_min']
end_min = cleaned_data['end_min']
if beg_date > end_date or (beg_date == end_date and beg_hour > end_hour) or (beg_date == end_date and beg_hour == end_hour and beg_min>end_min):
raise forms.ValidationError("Turn on date should be before turn off date.")
if len(cleaned_data['streams'])==0:
raise forms.ValidationError("Select at least one stream")
return cleaned_data
class AlarmForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
subscription_list = kwargs.pop('subscriptions')
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['subscriptions'].choices = subscription_list
super(AlarmForm, self).full_clean()
except Exception as e:
super(forms.Form, self).__init__(*args, **kwargs)
beg_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
end_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
name = forms.CharField(max_length=50, required=True)
mo = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-mon','type':'checkbox'}), initial=False, required=False)
tu = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-tue','type':'checkbox'}), initial=False, required=False)
we = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-wed','type':'checkbox'}), initial=False, required=False)
th = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-thu','type':'checkbox'}), initial=False, required=False)
fr = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-fri','type':'checkbox'}), initial=False, required=False)
sa = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sat','type':'checkbox'}), initial=False, required=False)
su = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'weekday','id':'weekday-sun','type':'checkbox'}), initial=False, required=False)
threshold = forms.FloatField()
beg_hour = forms.IntegerField(max_value=23, min_value=0)
beg_min = forms.IntegerField(max_value=59, min_value=0)
end_hour = forms.IntegerField(max_value=23, min_value=0)
end_min = forms.IntegerField(max_value=59, min_value=0)
hours_active_beg = forms.IntegerField(max_value=23, min_value=0)
minutes_active_beg = forms.IntegerField(max_value=59, min_value=0)
hours_active_end = forms.IntegerField(max_value=23, min_value=0)
minutes_active_end = forms.IntegerField(max_value=59, min_value=0)
subscriptions = ChoiceFieldNoValidation(widget=forms.CheckboxSelectMultiple, required=True)
type_alarm = forms.ChoiceField(choices=(('MAX', 'Maximum'), ('MIN', 'Minimum'), ), widget=forms.RadioSelect)
def clean(self):
cleaned_data = super(AlarmForm, self).clean()
beg_date = cleaned_data['beg_date']
end_date = cleaned_data['end_date']
beg_hour = cleaned_data['beg_hour']
end_hour = cleaned_data['end_hour']
beg_min = cleaned_data['beg_min']
end_min = cleaned_data['end_min']
if len(cleaned_data['subscriptions'])==0:
raise forms.ValidationError("Select at least one subscription")
if beg_date > end_date or (beg_date == end_date and beg_hour > end_hour) or (beg_date == end_date and beg_hour == end_hour and beg_min>end_min):
raise forms.ValidationError("Turn on date should be before turn off date.")
return cleaned_data
class NoteForm(forms.Form):
title = forms.CharField()
message = forms.CharField(widget=forms.Textarea, max_length=250)
beg_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
hours_beg = forms.IntegerField(max_value=23, min_value=0)
minutes_beg = forms.IntegerField(max_value=59, min_value=0)
end_date = forms.DateField(widget=SelectDateWidget, initial=datetime.date.today)
hours_end = forms.IntegerField(max_value=23, min_value=0)
minutes_end = forms.IntegerField(max_value=59, min_value=0)
def clean(self):
cleaned_data = super(NoteForm, self).clean()
beg_date = cleaned_data['beg_date']
end_date = cleaned_data['end_date']
beg_hour = cleaned_data['hours_beg']
end_hour = cleaned_data['minutes_beg']
beg_min = cleaned_data['hours_end']
end_min = cleaned_data['minutes_end']
if beg_date > end_date or (beg_date == end_date and beg_hour > end_hour) or (beg_date == end_date and beg_hour == end_hour and beg_min>end_min):
raise forms.ValidationError("Turn on date should be before turn off date.")
return cleaned_data
|
[
"diogodanielsoaresferreira@ua.pt"
] |
diogodanielsoaresferreira@ua.pt
|
79fa9bcaa7dd16ce5f84b87faa09734698925d58
|
9f53fdce8e10d648776719eec72d99b140343fff
|
/Section_1_Creating_GUIs_in_Python_with_Tkinter/Video2_5_GUI_tkinter_another_button.py
|
1ffbd17db377c4915a825bef6d07e4d6f7ec376a
|
[] |
no_license
|
syurskyi/Hands-On_Python_3_x_GUI_Programming
|
9691d3ccbb3c3d3ffdec2184789cb62753e840d1
|
c5144a5b90a036992e56de51c3d61d8c8f3cd2c1
|
refs/heads/master
| 2020-12-05T05:38:14.441849
| 2020-01-06T04:13:52
| 2020-01-06T04:13:52
| 232,022,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
import tkinter as tk # alias as tk
from tkinter import ttk # themed tk
gui = tk.Tk() # create class instance
gui.geometry('400x200+300+300') # specify window width, height and position
gui.title('GUI written in tkinter') # give the GUI a window title
gui.iconbitmap('py.ico') # icon expected inside the same folder
def click_event(): # call back function
gui.title('Button has been clicked') # update window title
button_one.config(text='I have been clicked!') # update button text
another_button = ttk.Button(gui, text="Another") # create another button
another_button.pack()
button_one = ttk.Button(gui, text="Click Me", command=click_event) # define command
button_one.pack()
gui.mainloop() # run main event loop
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
57c54c1e797cfc9801c23e3f63fd88a11b98ede7
|
39bef50ed12468e57ad94a8e2551da6c7c45c8ed
|
/networkx/__init__.py
|
4fad5ac5632b45550b2f08346b5bbf9e1fce22e0
|
[] |
no_license
|
biancini/Rorschach-Test-Platform
|
b1a5dfdbe5a15a68ce4dcf66887346fbf2e94169
|
7ae68e1054637046278325eaa419b23f09b420d3
|
refs/heads/master
| 2020-05-17T11:00:13.889678
| 2012-04-11T16:31:19
| 2012-04-11T16:31:19
| 3,789,381
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
"""
NetworkX
========
NetworkX (NX) is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
https://networkx.lanl.gov/
Using
-----
Just write in Python
>>> import networkx as nx
>>> G=nx.Graph()
>>> G.add_edge(1,2)
>>> G.add_node("spam")
>>> print(G.nodes())
[1, 2, 'spam']
>>> print(G.edges())
[(1, 2)]
"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Add platform dependent shared library path to sys.path
#
from __future__ import absolute_import
import sys
if sys.version_info[:2] < (2, 6):
m = "Python version 2.6 or later is required for NetworkX (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
#These are import orderwise
from networkx.exception import *
from networkx import externalnx
from networkx import utils
# these packages work with Python >= 2.6
from networkx import classes
from networkx.classes import *
from networkx import convert
from networkx.convert import *
from networkx import relabel
from networkx.relabel import *
from networkx import generators
from networkx.generators import *
from networkx import readwrite
from networkx.readwrite import *
#Need to test with SciPy, when available
from networkx import algorithms
from networkx.algorithms import *
from networkx import linalg
from networkx.linalg import *
from networkx import drawing
from networkx.drawing import *
|
[
"andrea.biancini@gmail.com"
] |
andrea.biancini@gmail.com
|
8872d3ac88ca46efd100864bc26ca5e79959ead5
|
e425b9d1e837e39a2e73b7e8d18452deb903ce10
|
/01_Fundamentals/Loops/EnumeratedList.py
|
1f98d917e4633b3577d2d3aea2c60155da529f6e
|
[] |
no_license
|
sokuro/PythonBFH
|
1491a398c5a9930c454e96ad8834dea066bf82bf
|
595ea77712c2397d9d92b1e21841e917d0a0c24d
|
refs/heads/master
| 2021-09-15T21:47:13.613134
| 2018-06-11T13:39:58
| 2018-06-11T13:39:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
class EnumeratedList:
values = [1, 2, 3, 4, 5, 6]
# inputValue = input("Enter a value: ")
found_index = None
for index, value in enumerate(values):
if value == 5:
found_index = index
print('The value is in the Array')
break
print('The value is not in the array!')
print('The value\'s index is: ', found_index)
|
[
"ugorcak@gmx.ch"
] |
ugorcak@gmx.ch
|
354e712282e44463be244eef28d59e535d34af94
|
e01c5d1ee81cc4104b248be375e93ae29c4b3572
|
/Sequence4/DS/Week4/phone-store-1.py
|
0ee5e7dbd3e5090265334e2f9ae5dc50d307def2
|
[] |
no_license
|
lalitzz/DS
|
7de54281a34814601f26ee826c722d123ee8bd99
|
66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1
|
refs/heads/master
| 2021-10-14T09:47:08.754570
| 2018-12-29T11:00:25
| 2018-12-29T11:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,664
|
py
|
# python3
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
self.prev = None
class HashNode:
def __init__(self):
self.head = None
self.tail = None
class HashMap:
def __init__(self, size=16):
self.size = size
self.hash = [HashNode()] * size
def add(self, key, value):
node = Node(key, value)
if type(key) is str:
index = self.hash_str_fn(key)
elif type(key) is int:
index = self.hash_function(key)
head = self.hash[index].head
if head is None:
self.hash[index].head = node
else:
prev = None
while head is not None:
if head.key == key:
head.value = value
break
prev = head
head = head.next
if head is None:
prev.next = node
def get(self, key):
if type(key) is str:
index = self.hash_str_fn(key)
elif type(key) is int:
index = self.hash_function(key)
head = self.hash[index].head
while head is not None:
if head.key == key:
return head.value
head = head.next
return "not found"
def delete(self, key):
index = self.hash_function(key)
curr = self.hash[index].head
prev = None
while curr is not None:
if curr.key == key:
if prev is None:
self.hash[index].head = curr.next
else:
prev.next = curr.next
break
prev = curr
curr = curr.next
def hash_function(self, data):
a = 34
b = 2
index = (a * data + b)
p = len(str(index)) - 1
p = 10 ** p + 19
index %= p
return index % self.size
def hash_str_fn(self, data):
h = 0
n = len(data)
x = 31
p = 119
for i in range(n-1, -1, -1):
h += ((h * x) + ord(data[i]))
h %= p
return h % self.size
class Query:
def __init__(self, query):
self.type = query[0]
self.number = int(query[1])
if self.type == 'add':
self.name = query[2]
def read_queries():
n = int(input())
return [Query(input().split()) for i in range(n)]
def write_responses(result):
print('\n'.join(result))
def process_queries_naive(queries):
result = []
# Keep list of all existing (i.e. not deleted yet) contacts.
contacts = []
for cur_query in queries:
if cur_query.type == 'add':
# if we already have contact with such number,
# we should rewrite contact's name
for contact in contacts:
if contact.number == cur_query.number:
contact.name = cur_query.name
break
else: # otherwise, just add it
contacts.append(cur_query)
elif cur_query.type == 'del':
for j in range(len(contacts)):
if contacts[j].number == cur_query.number:
contacts.pop(j)
break
else:
response = 'not found'
for contact in contacts:
if contact.number == cur_query.number:
response = contact.name
break
result.append(response)
return result
def process_queries(queries):
# for cur_query in queries:
n = len(queries)
H = HashMap(n)
result = []
for cur_query in queries:
if cur_query.type == 'add':
H.add(cur_query.number, cur_query.name)
elif cur_query.type == 'del':
H.delete(cur_query.number)
elif cur_query.type == 'find':
result.append(H.get(cur_query.number))
return result
if __name__ == '__main__':
write_responses(process_queries(read_queries()))
|
[
"lalit.slg007@gmail.com"
] |
lalit.slg007@gmail.com
|
df877881b18b0ebf0f407c54d2688ad61f7978b0
|
babc3e26d66a8084c9f84a0431338bafabae6ffd
|
/TaeJuneJoung/PGM/LV2/lv2.스킬트리.py
|
1a01d1426dfd38787b4de87af83c634a98096016
|
[] |
no_license
|
hoteldelluna/AlgoStudy
|
5c23a1bfb07dbfbabc5bedd541d61784d58d3edc
|
49ec098cecf2b775727d5648161f773e5488089b
|
refs/heads/dev
| 2022-10-09T14:29:00.580834
| 2020-01-25T14:40:55
| 2020-01-25T14:40:55
| 201,632,052
| 5
| 0
| null | 2020-01-25T14:40:57
| 2019-08-10T13:11:41
|
Python
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
def solution(skill, skill_trees):
answer = 0
for skill_tree in skill_trees:
stack = []
for tree in skill_tree:
if tree in set(skill):
idx = skill.index(tree)
if idx not in set(stack):
stack.append(idx)
isPlus = True
check = [False] * len(skill)
for i in stack:
check[i] = True
if check[:i].count(False):
isPlus = False
break
if isPlus:
answer += 1
return answer
"""
다른 사람 풀이]
Python의 `for~else`문 사용
"""
def solution(skill, skill_trees):
answer = 0
for skills in skill_trees:
skill_list = list(skill)
for s in skills:
if s in skill:
if s != skill_list.pop(0):
break
else:
answer += 1
return answer
|
[
"jtj0525@gmail.com"
] |
jtj0525@gmail.com
|
a26ac469f2e087d4ceb54f3a8c82131f7bb8ad8c
|
da1a2d3b92e3cf8720712c82089cbc665087e355
|
/test/functional/tool_wallet.py
|
7baf87adb65072eef96f33c23d9a49b7e188dea5
|
[
"MIT"
] |
permissive
|
minblock/cureoptedcoin
|
a60e961cef536704023ff962a09c56fe25eee66e
|
72909b7b1df261f840d24ecd8aa083fc9b7b7b49
|
refs/heads/master
| 2021-05-20T23:55:51.062222
| 2020-04-23T06:55:30
| 2020-04-23T06:55:30
| 252,460,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,837
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/cureoptedcoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 0)
assert_equal(stderr, '')
assert_equal(stdout, output)
def run_test(self):
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` is an error. Use `bitcoin-wallet -help`
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
# stop the node to close the wallet to call info command
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
# mutate the wallet to check the info command output changes accordingly
self.start_node(0)
self.nodes[0].generate(1)
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
self.start_node(0, ['-wallet=foo'])
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
if __name__ == '__main__':
ToolWalletTest().main()
|
[
"POSTMASTER@provgn.com"
] |
POSTMASTER@provgn.com
|
8e5a14bb33047f99ee33e2a1ebb2ca9463f8df99
|
9a9d6052f8cf91dd57be9a9b6564290b0fac9e52
|
/Algorithm/BOJ/2578_빙고.py
|
ec26caeef3194e552ab218e9a14f36e953527244
|
[] |
no_license
|
Gyeong-Yeon/TIL
|
596ec6a093eec34a17dad68bcd91fa9dd08690e8
|
eb1f43ee0525da93233b70716cd35caab8d82bda
|
refs/heads/master
| 2023-03-31T19:56:30.979062
| 2021-03-28T13:09:27
| 2021-03-28T13:09:27
| 280,307,737
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
def game():
cnt = 0
for i in range(5):
for j in range(5):
for t in range(5):
for b in range(5):
if call[i][j] == bingo[t][b]:
bingo[t][b] = 0
cnt += 1
if count() >= 3:
return cnt
def count():
bingo_cnt = 0
for i in range(5): # 가로 빙고
zero_cnt = 0
for j in range(5):
if bingo[i][j] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
for i in range(5): # 세로 빙고
zero_cnt = 0
for j in range(5):
if bingo[j][i] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
zero_cnt = 0
for i in range(5): # 대각선(/) 빙고
if bingo[i][4-i] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
zero_cnt = 0
for i in range(5): # 대각선(\) 빙고
if bingo[i][i] == 0:
zero_cnt += 1
if zero_cnt == 5:
bingo_cnt += 1
return bingo_cnt
bingo = [list(map(int,input().split())) for _ in range(5)]
call = [list(map(int,input().split())) for _ in range(5)]
result = game()
print(result)
|
[
"lky4156@naver.com"
] |
lky4156@naver.com
|
835e103ddf2573f4b477b0c6d50490420a6cebea
|
dadef11410227993876f4019868587cde2716b53
|
/crawling5.py
|
eb2f819303851f9827b640d666b088daa78bd483
|
[] |
no_license
|
ss820938ss/pythonProject_pandas
|
6185f3e3af5402202ee80c8d941d6c5afd8c1298
|
aa076f4c74ad11ceac90f0f85be8ea24a53f71f0
|
refs/heads/master
| 2023-07-07T05:11:06.565796
| 2021-07-30T07:51:58
| 2021-07-30T07:51:58
| 389,851,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
import requests
from bs4 import BeautifulSoup
import time
from urllib.request import urlretrieve
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
test = input('검색할 이름을 입력하세요 : ')
path = 'C:/chromedriver_win32/chromedriver'
driver = webdriver.Chrome(path)
driver.get('https://unsplash.com/')
time.sleep(1)
element = driver.find_element_by_name('searchKeyword')
element.send_keys(test, Keys.ENTER)
# image_link = driver.find_element_by_link_text('이미지') # 구글, 네이버
# image_link.click() # 구글, 네이버
# 구글용
# image_tag = driver.find_elements_by_tag_name('span > div > div > div > a > div > img')
# num = 10,000,000
# x = driver.find_elements_by_class_name('xLon9')
time.sleep(5)
driver.find_element_by_class_name('_2Mc8_').send_keys(Keys.ENTER)
# 숫자 비교 후 출력 테스트
# link = data.select_one('li.detail > a').attrs['href']
link = driver.find_elements_by_css_selector('href')
webpage = requests.get("https://unsplash.com/photos/" + link)
soup = BeautifulSoup(webpage.content, "html.parser")
time.sleep(10)
driver.find_element_by_xpath('/html/body/div[4]/div/div/div[1]/button').send_keys(Keys.ENTER)
# image_tag = driver.find_elements_by_class_name('oCCRx')
# # 뷰 클래스 코드 xLon9 / oCCRx _2Mc8_ / /html/body/div[4]/div/div/div[4]/div/div/div[1]/div[4]/div[1]/div[1]/span
#
# time.sleep(1)
#
# image_list = []
#
# for i in range(len(image_tag)):
# image_list.append(image_tag[i].get_attribute('src'))
# print(image_list)
#
# for i, link in enumerate(image_list):
# urlretrieve(link, './images/{}{}.jpg'.format(test, i + 1))
|
[
"ss820938ss@gmail.com"
] |
ss820938ss@gmail.com
|
306fd591700f130d7b6b11935a1b7b57f6924123
|
3e14ded9e00d482a5bbfee039b9d740bd0c44f08
|
/venv/bin/easy_install-3.9
|
4eaef0f12e22d9b328f5fc4ebda52ecf93b0b2de
|
[
"MIT"
] |
permissive
|
Rubyroy12/neighbourinfo
|
7ebeb6d3ae12711c755baa0ea7adce8b58a28b55
|
619e6e7b20c7f06310d55c3922d9372c0e2455f7
|
refs/heads/master
| 2023-06-25T12:41:30.685708
| 2021-07-26T23:16:59
| 2021-07-26T23:16:59
| 388,932,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
9
|
#!/home/ibrahim/Documents/python/django-projects/mysource/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"you@example.com"
] |
you@example.com
|
f7fdabe13a91972e0ba4375d7e08dc1b9be65333
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/system_body/java/body/father_temp.py
|
a6eb560f9ba8cf2cebc95d13377303821828e3ce
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading.Tasks;
namespace CafeT.Azures
{
public static class AzureTranslator
{
/// Demonstrates getting an access token and using the token to translate.
///
//Enter here the Key from your Microsoft Translator Text subscription on http://portal.azure.com
public static async Task<string> TranslateAsync(string text)
{
var translatorService = new TranslatorService.LanguageServiceClient();
var authTokenSource = new AzureAuthToken(SubscriptionKey);
var token = string.Empty;
try
{
token = await authTokenSource.GetAccessTokenAsync();
}
catch (HttpRequestException)
{
switch (authTokenSource.RequestStatusCode)
{
case HttpStatusCode.Unauthorized:
Console.WriteLine("Request to token service is not authorized (401). Check that the Azure subscription key is valid.");
break;
case HttpStatusCode.Forbidden:
private const string SubscriptionKey = "11785aecda97606d15245d044954311a";
Console.WriteLine("Request to token service is not authorized (403). For accounts in the free-tier, check that the account quota is not exceeded.");
break;
}
throw;
}
if(text.Contains("?vn"))
{
return translatorService.Translate(token, text, "en", "vi", "text/plain", "general", string.Empty);
}
else
{
return translatorService.Translate(token, text, "vi", "en", "text/plain", "general", string.Empty);
}
}
}
}
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
af63fff10857b872190df0cceb777ccee45b30e3
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/show_applicable_instances_response.py
|
93201cb81aa67c68a0fbe46d6680ca8486bcf088
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,337
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowApplicableInstancesResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instances': 'list[ApplicableInstanceRsp]',
'count': 'int'
}
attribute_map = {
'instances': 'instances',
'count': 'count'
}
def __init__(self, instances=None, count=None):
"""ShowApplicableInstancesResponse
The model defined in huaweicloud sdk
:param instances: 实例列表
:type instances: list[:class:`huaweicloudsdkgaussdbfornosql.v3.ApplicableInstanceRsp`]
:param count: 应用参数的实例数量限制。
:type count: int
"""
super(ShowApplicableInstancesResponse, self).__init__()
self._instances = None
self._count = None
self.discriminator = None
if instances is not None:
self.instances = instances
if count is not None:
self.count = count
@property
def instances(self):
"""Gets the instances of this ShowApplicableInstancesResponse.
实例列表
:return: The instances of this ShowApplicableInstancesResponse.
:rtype: list[:class:`huaweicloudsdkgaussdbfornosql.v3.ApplicableInstanceRsp`]
"""
return self._instances
@instances.setter
def instances(self, instances):
"""Sets the instances of this ShowApplicableInstancesResponse.
实例列表
:param instances: The instances of this ShowApplicableInstancesResponse.
:type instances: list[:class:`huaweicloudsdkgaussdbfornosql.v3.ApplicableInstanceRsp`]
"""
self._instances = instances
@property
def count(self):
"""Gets the count of this ShowApplicableInstancesResponse.
应用参数的实例数量限制。
:return: The count of this ShowApplicableInstancesResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ShowApplicableInstancesResponse.
应用参数的实例数量限制。
:param count: The count of this ShowApplicableInstancesResponse.
:type count: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowApplicableInstancesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
4db1ec489f51cd3f3ea0f26805ae9a0150a40fc4
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/core/pipeline/pipe_step.py
|
bd7fcb560b23ca6db99fa3f70a0385e5a571bdda
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PipeStep that used in Pipeline."""
import logging
from datetime import datetime
from vega.common import TaskOps, Status
from vega.common import ClassFactory, ClassType
from vega.core.pipeline.conf import PipeStepConfig
from vega.report import ReportServer
__all__ = ["PipeStep"]
logger = logging.getLogger(__name__)
class PipeStep(object):
"""PipeStep is the base components class that can be added in Pipeline."""
def __init__(self, name=None, **kwargs):
"""Initialize pipestep."""
self.task = TaskOps()
self.name = name if name else "pipestep"
self.start_time = datetime.now()
self.status = Status.unstarted
self.message = None
self.end_time = None
self.num_epochs = None
self.num_models = None
def __new__(cls, *args, **kwargs):
"""Create pipe step instance by ClassFactory."""
t_cls = ClassFactory.get_cls(ClassType.PIPE_STEP, PipeStepConfig.type)
return super().__new__(t_cls)
def do(self, *args, **kwargs):
"""Do the main task in this pipe step."""
pass
def save_info(self):
"""Save step info to report serve."""
info = {"step_name": self.name}
for attr in dir(self):
if attr in ["start_time", "end_time", "status", "message", "num_epochs", "num_models"]:
info[attr] = getattr(self, attr)
ReportServer().update_step_info(**info)
def update_status(self, status, desc=None):
"""Update step status."""
if status == Status.finished:
self.end_time = datetime.now()
self.status = status
self.message = desc
self.save_info()
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
c3ec46d15bd7840421b521daa2c180b6373eb77e
|
05bdaafff13ec39f6120f4da5e09ffbb58505e85
|
/main.py
|
1ecb20e40e62d361d41c2d9c9262a50f8b2c8028
|
[] |
no_license
|
mcfletch/pyconca-tictactoe
|
852c9d42283cfda3eaf25b0445584a35d854f241
|
c14dc7cbff5c87f78edeff551d7a47ff9738b7dc
|
refs/heads/master
| 2020-04-05T21:43:16.050727
| 2018-12-24T02:17:32
| 2018-12-24T02:17:32
| 157,230,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,402
|
py
|
#! /usr/bin/env python
import gym
import numpy as np
import bisect
import random
import os
import argparse
from collections import deque
from keras.models import Model
from keras.layers import (
Dense,
Input,
Dropout,
Activation,
)
def predict(model, state):
"""Predict a single state's future reward"""
state = np.array(state,'f').reshape((1,-1))
action_weights = model.predict(state)
return action_weights[0]
def build_model( env ):
"""Build a Q function that predicts reward for a given state
Note here that we see two *different* values showing up in the
result of the Q function. The argmax (highest value's index)
is the "action to take to maximize expected reward" while the
max (highest value) is loosely corresponding to "expected reward"
for the given state.
"""
initial = layer = Input(env.observation_space.shape)
for size in [63,15,]:
layer = Dense(size)(layer)
layer = Activation('relu')(layer)
layer = Dense(env.action_space.n)(layer)
layer = Activation('linear')(layer)
model = Model(initial,layer)
model.compile(
'adam',
'mse'
)
return model
def run_game( env, model, epoch=0, exploit=.9 ):
done = False
state = env.reset()
history = []
overall_reward = 0
choices = []
while not done:
if not epoch % 100:
env.render()
if np.random.random() > exploit:
action = env.action_space.sample()
random_trial = True
else:
state = np.array(state,'f').reshape((1,-1))
action_weights = predict( model, state)
action = np.argmax( action_weights )
random_trial = False
choices.append(action)
new_state,reward,done,_ = env.step(action)
overall_reward += reward
history.append({
'state': state,
'new_state': new_state,
'action': action,
'random_trial': random_trial,
'overall_reward': overall_reward,
'reward': reward,
'done': done,
})
state = new_state
# exploit *= max((.995,exploit*1.1))
# print('%s/%s chose 0'%(choices.count(0), len(choices)))
return history
def generate_batches(epoch_history, batch_size):
"""Key insight here:
Deep RL seems to want to have lots of very rapid feedback at the start
of the process, so during completely random search, we're looking to
push the weights around immediately, so while we normally (supervised
learning, etc) want to process big batches of lots of data, here we're
doing very small batches that *sample* across the whole data-set.
As we progress, we include the early trials in the set of sampled
data, so they will be sampled more frequently than the current values,
but they are not all sampled N times, they just have a higher sampling
frequency than the latest/most recent trials.
"""
yield random.sample(epoch_history, min([len(epoch_history),batch_size]))
def train_model( model, epoch_history, env, batch_size=64):
states = np.zeros((batch_size,)+env.observation_space.shape,'f')
actions = np.zeros((batch_size,env.action_space.n),'f')
for batch in generate_batches(epoch_history, batch_size):
if len(batch) < batch_size:
break
for index,record in enumerate(batch):
states[index] = record['state']
action_reward = predict(model,record['state'])
if not record['done']:
action_reward[record['action']] = record['reward'] + 1.0 * np.max(
predict(model,record['new_state'])
)
else:
# assert not np.max(action_reward) > 1.0, action_reward
action_reward[record['action']] = record['reward']
actions[index] = action_reward
model.fit(
states,
actions,
verbose=0
)
def verify(env, model):
history = run_game(env, model, epoch=0, exploit=1.0)
score = history[-1]['overall_reward']
return score
def run(env_name='CartPole-v1',initial_epsilon=0.995):
env = gym.make(env_name)
model = build_model( env )
filename = '%s-weights.hd5'%(env_name)
if os.path.exists(filename):
model.load_weights(filename)
scores = deque(maxlen=100)
overall_history = []
epsilon_decay = .02
epsilon_min = 0.05
epsilon_max = .995
epsilon = initial_epsilon
for epoch in range(10000):
epoch_scores = []
epsilon = np.max([
epsilon_min, np.min([
epsilon,
1.0 - np.log10((epoch + 1) * epsilon_decay ),
epsilon_max,
]),
])
exploit = 1.0- epsilon
# while len(overall_history) < :
history = run_game( env, model, epoch, exploit )
score = history[-1]['overall_reward']
scores.append(score)
overall_history.extend( history )
train_model( model, overall_history, env, batch_size=64 )
if not epoch % 100:
avg = np.mean(scores)
print('Avg Score on last 100 tests: ', avg)
if avg > 195:
print('Success at epoch %s'%(epoch,))
model.save_weights(filename)
verification = [
verify(env, model)
for i in range(20)
]
print('Verification: mean %s stddev=%s'%(
np.mean(verification),
np.std(verification),
))
return verification
def get_options():
parser = argparse.ArgumentParser(
description = 'Run a deep reinforcement learning task on an OpenAI environment',
)
parser.add_argument(
'-e','--environment',
default = 'CartPole-v1',
help = 'OpenAI Gym environment to run'
)
parser.add_argument(
'--epsilon',
default=.995,
help = 'Initial epsilon value (1 meaning "explore on every step" and 0 meaning "just exploit your knowledge")',
type=float,
)
return parser
def main():
parser = get_options()
options = parser.parse_args()
return run(options.environment,initial_epsilon=options.epsilon)
if __name__ == "__main__":
main()
|
[
"mcfletch@vrplumber.com"
] |
mcfletch@vrplumber.com
|
5e7a0532d00a852b74bc781bd6336d237945b66a
|
4fc1c45a7e570cc1204d4b5f21150f0771d34ea5
|
/quan_table/insightface_v2/model/mobilefacenetv2/mobilefacenetv2.py
|
2b3de2bf5bc86f118bd6e4f60d1870d2ff1e9795
|
[] |
no_license
|
CN1Ember/feathernet_mine
|
77d29576e4ecb4f85626b94e6ff5884216af3098
|
ac0351f59a1ed30abecd1088a46c7af01afa29d5
|
refs/heads/main
| 2023-05-28T17:19:06.624448
| 2021-06-17T04:39:09
| 2021-06-17T04:39:09
| 374,603,757
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,110
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/27 16:00
# @Author : xiezheng
# @Site :
# @File : insightface_mobilefacenet.py
import math
from torch import nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchsummary import summary
from torch.nn import Parameter
from insightface_v2.utils.model_analyse import ModelAnalyse
from insightface_v2.utils.logger import get_logger
import os
class Bottleneck_mobilefacenet(nn.Module):
def __init__(self, in_planes, out_planes, stride, expansion):
super(Bottleneck_mobilefacenet, self).__init__()
self.connect = stride == 1 and in_planes == out_planes
planes = in_planes * expansion
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.prelu1 = nn.PReLU(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.prelu2 = nn.PReLU(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = self.prelu1(self.bn1(self.conv1(x)))
out = self.prelu2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.connect:
return x + out
else:
return out
class Mobilefacenetv2(nn.Module):
Mobilefacenet_bottleneck_setting = [
# [t, c , n ,s] = [expansion, out_planes, num_blocks, stride]
[2, 64, 5, 2],
[4, 128, 1, 2],
[2, 128, 6, 1],
[4, 128, 1, 2],
[2, 128, 2, 1]
]
def __init__(self, bottleneck_setting=Mobilefacenet_bottleneck_setting, embedding_size=512):
super(Mobilefacenetv2, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.prelu1 = nn.PReLU(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, groups=64, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.prelu2 = nn.PReLU(64)
self.layers = self._make_layer(Bottleneck_mobilefacenet, bottleneck_setting)
self.conv3 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(512)
self.prelu3 = nn.PReLU(512)
self.conv4 = nn.Conv2d(512, 512, kernel_size=7, groups=512, stride=1, padding=0, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.linear = nn.Linear(512, embedding_size)
# self.bn5 = nn.BatchNorm1d(128, affine=False)
self.bn5 = nn.BatchNorm1d(embedding_size, affine=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
if m.affine:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, setting):
layers = []
for t, c, n, s in setting:
for i in range(n):
if i == 0:
layers.append(block(self.inplanes, c, s, t))
else:
layers.append(block(self.inplanes, c, 1, t))
self.inplanes = c
return nn.Sequential(*layers)
def forward(self, x):
out = self.prelu1(self.bn1(self.conv1(x)))
out = self.prelu2(self.bn2(self.conv2(out)))
out = self.layers(out)
out = self.prelu3(self.bn3(self.conv3(out)))
out = self.bn4(self.conv4(out))
out = out.view(out.size(0), -1)
out = self.bn5(self.linear(out))
return out
if __name__ == "__main__":
model = Mobilefacenetv2(embedding_size=512)
# print(model.state_dict())
# print("---------------------")
# for key in model.state_dict().keys():
# print(key)
print(model)
# summary(model, (3, 112, 112))
save_path = './finetune-test'
if not os.path.exists(save_path):
os.makedirs(save_path)
logger = get_logger(save_path, "finetune-test")
test_input = torch.randn(1, 3, 112, 112)
model_analyse = ModelAnalyse(model, logger)
params_num = model_analyse.params_count()
flops = model_analyse.flops_compute(test_input)
count = 0
for module in model.modules():
if isinstance(module, nn.Conv2d):
count = count + 1
print("\nmodel layers_num = {}".format(count))
print("model size={} MB".format(params_num * 4 / 1024 / 1024))
print("model flops={} M".format(sum(flops) / (10 ** 6)))
|
[
"chenguo@gpu017.scut-smil.cn"
] |
chenguo@gpu017.scut-smil.cn
|
607f59255088fbb01756be227cbf38e9c8055832
|
6630694f401f6f475dd81bb01ff9368db844ccff
|
/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_400e_in1k.py
|
6ffcf6d13c049fa8802766d74f7e5c9a803b706e
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpretrain
|
98a4d6b3bb747efc3d50decebf84fc3ffa41076a
|
d2ccc44a2c8e5d49bb26187aff42f2abc90aee28
|
refs/heads/main
| 2023-08-30T19:11:24.771498
| 2023-08-23T02:45:18
| 2023-08-23T02:45:18
| 278,415,292
| 652
| 186
|
Apache-2.0
| 2023-09-08T08:01:40
| 2020-07-09T16:25:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
with read_base():
from .._base_.models.mae_hivit_base_p16 import *
from .._base_.datasets.imagenet_bs512_mae import *
from .._base_.default_runtime import *
from mmengine.hooks.checkpoint_hook import CheckpointHook
from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper
from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR
from mmengine.runner.loops import EpochBasedTrainLoop
from torch.optim.adamw import AdamW
# optimizer wrapper
optim_wrapper = dict(
type=AmpOptimWrapper,
loss_scale='dynamic',
optimizer=dict(
type=AdamW,
lr=1.5e-4 * 4096 / 256,
betas=(0.9, 0.95),
weight_decay=0.05),
paramwise_cfg=dict(
custom_keys={
'norm': dict(decay_mult=0.0),
'bias': dict(decay_mult=0.0),
'pos_embed': dict(decay_mult=0.),
'mask_token': dict(decay_mult=0.),
}))
# learning rate scheduler
param_scheduler = [
dict(
type=LinearLR,
start_factor=0.0001,
by_epoch=True,
begin=0,
end=40,
convert_to_iter_based=True),
dict(
type=CosineAnnealingLR,
T_max=360,
by_epoch=True,
begin=40,
end=400,
convert_to_iter_based=True)
]
# runtime settings
train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400)
# only keeps the latest 3 checkpoints
default_hooks.checkpoint = dict(
type=CheckpointHook, interval=1, max_keep_ckpts=3)
randomness.update(seed=0, diff_rank_seed=True)
# auto resume
resume = True
find_unused_parameters = True
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=4096)
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
2a5fb71b730f244801d591ed74c96803ab7eccd9
|
e0d16d2dd3bf8490d660fc5ba5ce789bd4f20384
|
/temperature_converter/simple.py
|
d2123c90ef5fccb38def5255d1816123bd236af0
|
[] |
no_license
|
tt-n-walters/python-course
|
9be8344f4e40f6abe2f8c6355117e8ea1891c7cb
|
255dbcddf1f4bd258474df04f3a3a9209c74c01f
|
refs/heads/master
| 2023-06-04T05:57:08.215733
| 2021-06-25T15:25:24
| 2021-06-25T15:25:24
| 380,279,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
print("Enter a temperature in Celcius:")
celcius = input("> ")
celcius = int(celcius)
fahrenheit = celcius * (9 / 5) + 32
print(celcius, "ºC is", fahrenheit, "ºF")
|
[
"nico.walters@techtalents.es"
] |
nico.walters@techtalents.es
|
335e23f9cf6ef4b4e6c4541c52d496119e4469ce
|
673bf701a310f92f2de80b687600cfbe24612259
|
/misoclib/com/liteeth/core/tty/__init__.py
|
7ead3ef4cd4d95f47e91d7b8077d5db3d7bc5da7
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mogorman/misoc
|
d78340a9bf67feaede20e8cac473bcfddbd186a3
|
4ec49e2aadcff0c3ca34ebd0d35013d88f4d3e1f
|
refs/heads/master
| 2021-01-18T05:38:39.670977
| 2015-03-10T05:37:52
| 2015-03-10T05:37:52
| 30,672,191
| 1
| 0
| null | 2015-02-11T22:05:05
| 2015-02-11T22:05:05
| null |
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
from misoclib.com.liteeth.common import *
from misoclib.com.liteeth.generic import *
class LiteEthTTYTX(Module):
def __init__(self, ip_address, udp_port, fifo_depth=None):
self.sink = sink = Sink(eth_tty_description(8))
self.source = source = Source(eth_udp_user_description(8))
###
if fifo_depth is None:
self.comb += [
source.stb.eq(sink.stb),
source.sop.eq(1),
source.eop.eq(1),
source.length.eq(1),
source.data.eq(sink.data),
sink.ack.eq(source.ack)
]
else:
self.submodules.fifo = fifo = SyncFIFO([("data", 8)], fifo_depth)
self.comb += Record.connect(sink, fifo.sink)
self.submodules.level = level = FlipFlop(max=fifo_depth)
self.comb += level.d.eq(fifo.fifo.level)
self.submodules.counter = counter = Counter(max=fifo_depth)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
If(fifo.source.stb,
level.ce.eq(1),
counter.reset.eq(1),
NextState("SEND")
)
)
fsm.act("SEND",
source.stb.eq(fifo.source.stb),
source.sop.eq(counter.value == 0),
If(level.q == 0,
source.eop.eq(1),
).Else(
source.eop.eq(counter.value == (level.q-1)),
),
source.src_port.eq(udp_port),
source.dst_port.eq(udp_port),
source.ip_address.eq(ip_address),
If(level.q == 0,
source.length.eq(1),
).Else(
source.length.eq(level.q),
),
source.data.eq(fifo.source.data),
fifo.source.ack.eq(source.ack),
If(source.stb & source.ack,
counter.ce.eq(1),
If(source.eop,
NextState("IDLE")
)
)
)
class LiteEthTTYRX(Module):
def __init__(self, ip_address, udp_port, fifo_depth=None):
self.sink = sink = Sink(eth_udp_user_description(8))
self.source = source = Source(eth_tty_description(8))
###
valid = Signal()
self.comb += valid.eq(
(sink.ip_address == ip_address) &
(sink.dst_port == udp_port)
)
if fifo_depth is None:
self.comb += [
source.stb.eq(sink.stb & valid),
source.data.eq(sink.data),
sink.ack.eq(source.ack)
]
else:
self.submodules.fifo = fifo = SyncFIFO([("data", 8)], fifo_depth)
self.comb += [
fifo.sink.stb.eq(sink.stb & valid),
fifo.sink.data.eq(sink.data),
sink.ack.eq(fifo.sink.ack),
Record.connect(fifo.source, source)
]
class LiteEthTTY(Module):
def __init__(self, udp, ip_address, udp_port,
rx_fifo_depth=64,
tx_fifo_depth=64):
self.submodules.tx = tx = LiteEthTTYTX(ip_address, udp_port, tx_fifo_depth)
self.submodules.rx = rx = LiteEthTTYRX(ip_address, udp_port, rx_fifo_depth)
udp_port = udp.crossbar.get_port(udp_port, dw=8)
self.comb += [
Record.connect(tx.source, udp_port.sink),
Record.connect(udp_port.source, rx.sink)
]
self.sink, self.source = self.tx.sink, self.rx.source
|
[
"florent@enjoy-digital.fr"
] |
florent@enjoy-digital.fr
|
3588536acd5fbd95b034ed81cc6e33948259afd1
|
584db1be8b6bdedaa56d186692ad72da5ee07164
|
/patron/compute/monitors/__init__.py
|
8b0ba4a7aee30af3c51cafc15049eb4abc5292bf
|
[
"Apache-2.0"
] |
permissive
|
casbin/openstack-patron
|
66006f57725cf1c3d735cd5529d3459fd77384c8
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
refs/heads/master
| 2023-05-31T05:23:37.721768
| 2015-12-31T12:18:17
| 2015-12-31T12:18:17
| 382,054,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,280
|
py
|
# Copyright 2013 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource monitor API specification.
ResourceMonitorBase provides the definition of minimum set of methods
that needs to be implemented by Resource Monitor.
"""
import functools
import types
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from patron.i18n import _LW
from patron import loadables
compute_monitors_opts = [
cfg.MultiStrOpt('compute_available_monitors',
default=['patron.compute.monitors.all_monitors'],
help='Monitor classes available to the compute which may '
'be specified more than once.'),
cfg.ListOpt('compute_monitors',
default=[],
help='A list of monitors that can be used for getting '
'compute metrics.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_monitors_opts)
LOG = logging.getLogger(__name__)
class ResourceMonitorMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to create a function map and call it later
to get the metric names and their values.
"""
super(ResourceMonitorMeta, cls).__init__(names, bases, dict_)
prefix = '_get_'
prefix_len = len(prefix)
cls.metric_map = {}
for name, value in cls.__dict__.iteritems():
if (len(name) > prefix_len
and name[:prefix_len] == prefix
and isinstance(value, types.FunctionType)):
metric_name = name[prefix_len:].replace('_', '.')
cls.metric_map[metric_name] = value
@six.add_metaclass(ResourceMonitorMeta)
class ResourceMonitorBase(object):
"""Base class for resource monitors
"""
def __init__(self, parent):
self.compute_manager = parent
self.source = None
self._data = {}
@classmethod
def add_timestamp(cls, func):
"""Decorator to indicate that a method needs to add a timestamp.
When a function returning a value is decorated by the decorator,
which means a timestamp should be added into the returned value.
That is, a tuple (value, timestamp) is returned.
The timestamp is the time when we update the value in the _data.
If users hope to define how the timestamp is got by themselves,
they should not use this decorator in their own classes.
"""
@functools.wraps(func)
def wrapper(self, **kwargs):
return func(self, **kwargs), self._data.get("timestamp", None)
return wrapper
def _update_data(self):
"""Method to update the metrics data.
Each subclass can implement this method to update metrics
into _data. It will be called in get_metrics.
"""
pass
def get_metric_names(self):
"""Get available metric names.
Get available metric names, which are represented by a set of keys
that can be used to check conflicts and duplications
:returns: a set of keys representing metrics names
"""
return self.metric_map.keys()
def get_metrics(self, **kwargs):
"""Get metrics.
Get metrics, which are represented by a list of dictionaries
[{'name': metric name,
'value': metric value,
'timestamp': the time when the value is retrieved,
'source': what the value is got by}, ...]
:param kwargs: extra arguments that might be present
:returns: a list to tell the current metrics
"""
data = []
self._update_data()
for name, func in self.metric_map.iteritems():
ret = func(self, **kwargs)
data.append(self._populate(name, ret[0], ret[1]))
return data
def _populate(self, metric_name, metric_value, timestamp=None):
"""Populate the format what we want from metric name and metric value
"""
result = {}
result['name'] = metric_name
result['value'] = metric_value
result['timestamp'] = timestamp or timeutils.utcnow()
result['source'] = self.source
return result
class ResourceMonitorHandler(loadables.BaseLoader):
"""Base class to handle loading monitor classes.
"""
def __init__(self):
super(ResourceMonitorHandler, self).__init__(ResourceMonitorBase)
def choose_monitors(self, manager):
"""This function checks the monitor names and metrics names against a
predefined set of acceptable monitors.
"""
monitor_classes = self.get_matching_classes(
CONF.compute_available_monitors)
monitor_class_map = {cls.__name__: cls for cls in monitor_classes}
monitor_cls_names = CONF.compute_monitors
good_monitors = []
bad_monitors = []
metric_names = set()
for monitor_name in monitor_cls_names:
if monitor_name not in monitor_class_map:
bad_monitors.append(monitor_name)
continue
try:
# make sure different monitors do not have the same
# metric name
monitor = monitor_class_map[monitor_name](manager)
metric_names_tmp = set(monitor.get_metric_names())
overlap = metric_names & metric_names_tmp
if not overlap:
metric_names = metric_names | metric_names_tmp
good_monitors.append(monitor)
else:
msg = (_LW("Excluding monitor %(monitor_name)s due to "
"metric name overlap; overlapping "
"metrics: %(overlap)s") %
{'monitor_name': monitor_name,
'overlap': ', '.join(overlap)})
LOG.warn(msg)
bad_monitors.append(monitor_name)
except Exception as ex:
msg = (_LW("Monitor %(monitor_name)s cannot be used: %(ex)s") %
{'monitor_name': monitor_name, 'ex': ex})
LOG.warn(msg)
bad_monitors.append(monitor_name)
if bad_monitors:
LOG.warning(_LW("The following monitors have been disabled: %s"),
', '.join(bad_monitors))
return good_monitors
def all_monitors():
"""Return a list of monitor classes found in this directory.
This method is used as the default for available monitors
and should return a list of all monitor classes available.
"""
return ResourceMonitorHandler().get_all_classes()
|
[
"hsluoyz@qq.com"
] |
hsluoyz@qq.com
|
b9097aa2f33448f7f6f4090ed4a250cea3af2622
|
c66955c6fc178955c2024e0318ec7a91a8386c2d
|
/programQuick/chapterFifteen/demo6.py
|
6f2c5581195897bb831999363b61ad251def1e72
|
[] |
no_license
|
duheng18/python-study
|
a98642d6ee1b0043837c3e7c5b91bf1e28dfa588
|
13c0571ac5d1690bb9e615340482bdb2134ecf0e
|
refs/heads/master
| 2022-11-30T17:36:57.060130
| 2019-11-18T07:31:40
| 2019-11-18T07:31:40
| 147,268,053
| 1
| 0
| null | 2022-11-22T03:36:51
| 2018-09-04T00:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,691
|
py
|
import datetime, time
'''
strftime 指令 含义
%Y 带世纪的年份,例如'2014'
%y 不带世纪的年份,'00'至'99'(1970 至 2069)
%m 数字表示的月份, '01'至'12'
%B 完整的月份,例如'November'
%b 简写的月份,例如'Nov'
%d 一月中的第几天,'01'至'31'
%j 一年中的第几天,'001'至'366'
%w 一周中的第几天,'0'(周日)至'6'(周六)
%A 完整的周几,例如'Monday'
%a 简写的周几,例如'Mon'
%H 小时(24 小时时钟),'00'至'23'
%I 小时(12 小时时钟),'01'至'12'
%M 分,'00'至'59'
%S 秒,'00'至'59'
%p 'AM'或'PM'
%% 就是'%'字符
'''
halloween2016 = datetime.datetime(2016, 10, 31, 0, 0, 0)
while datetime.datetime.now() < halloween2016:
time.sleep(1)
oct21st = datetime.datetime(2015, 10, 21, 16, 29, 0)
# 2015/10/21 16:29:00
# print(oct21st.strftime('%Y/%m/%d %H:%M:%S'))
# 04:29 PM
# print(oct21st.strftime('%I:%M %p'))
# October of '15
print(oct21st.strftime("%B of '%y"))
# 2015-10-21 00:00:00
# print(datetime.datetime.strptime('October 21,2015', '%B %d,%Y'))
# 2015-10-21 16:29:00
print(datetime.datetime.strptime('2015/10/21 16:29:00', '%Y/%m/%d %H:%M:%S'))
# 2015-10-01 00:00:00
# print(datetime.datetime.strptime("October of '15", "%B of '%y"))
# 2063-11-01 00:00:00
# print(datetime.datetime.strptime("November of '63", "%B of '%y"))
|
[
"emaildh@163.com"
] |
emaildh@163.com
|
64a82974142c99441155b1b98d16bb62a2af6d43
|
114c1f7ceff04e00591f46eeb0a2eb387ac65710
|
/g4g/DS/Linked_Lists/Singly_linked_lists/8_search_element_in_Linked_list.py
|
7ead23a5a18b1cc959c0f2875e812d2a5015ab62
|
[] |
no_license
|
sauravgsh16/DataStructures_Algorithms
|
0783a5e6dd00817ac0b6f2b856ad8d82339a767d
|
d3133f026f972f28bd038fcee9f65784f5d3ea8b
|
refs/heads/master
| 2020-04-23T03:00:29.713877
| 2019-11-25T10:52:33
| 2019-11-25T10:52:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
''' Search an element in a Linked List (Iterative and Recursive) '''
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def push(self, val):
nN = Node(val)
if not self.head:
self.head = nN
self.tail = nN
else:
self.tail.next = nN
self.tail = nN
self.size += 1
def searchIterative(self, key):
if self.head.val == key:
return self.head.val
cur = self.head.next
while cur:
if cur.val == key:
return cur.val
cur = cur.next
return None
def _searchRecursive(self, node, key):
if node.val == key:
return node.val
if not node:
return None
return self._searchRecursive(node.next, key)
def searchRecursive(self, key):
return self._searchRecursive(self.head, key)
ll = LinkedList()
ll.push(1)
ll.push(2)
ll.push(3)
ll.push(4)
ll.push(5)
print ll.searchIterative(10)
print ll.searchRecursive(2)
|
[
"sauravgsh16@gmail.com"
] |
sauravgsh16@gmail.com
|
59c6012139aa84f9d4db9417bcfe97c7e3d33d64
|
710e96fb56f48a91dbd5e34c3e7b07fc24b4d95a
|
/WebContent/WEB-INF/program/unit/unit_resource.py
|
b1c62755682edb820081d76745f39050c457dea3
|
[] |
no_license
|
yxxcrtd/jitar2.0
|
bf6ade6aaf0bdb0ff9a94b011041e0faa13789f1
|
9215d51cf536518ab4c8fea069ef5ae1ff6466c8
|
refs/heads/master
| 2020-05-31T15:28:38.821345
| 2019-06-05T08:01:39
| 2019-06-05T08:01:39
| 190,351,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,589
|
py
|
from unit_page import *
from base_action import SubjectMixiner
from resource_query import ResourceQuery
class unit_resource(UnitBasePage, SubjectMixiner):
def __init__(self):
UnitBasePage.__init__(self)
def execute(self):
self.unit = self.getUnit()
if self.unit == None:
self.addActionError(u"您所访问的机构不存在!")
return self.ERROR
self.get_resource_list()
#res_cate = __jitar__.categoryService.getCategoryTree("resource")
#request.setAttribute("res_cate", res_cate)
self.get_cate_tree_without_cache()
request.setAttribute("head_nav", "unit_resource")
request.setAttribute("unit", self.unit)
self.putGradeList()
self.putSubjectList()
self.putResouceCateList()
templateName = "template1"
if self.unit.templateName != None:
templateName = self.unit.templateName
return "/WEB-INF/unitspage/" + templateName + "/unit_resource.ftl"
def get_resource_list(self):
qry = ResourceQuery(""" r.resourceId, r.href, r.title, r.fsize, r.createDate, r.recommendState,
u.loginName, u.nickName, r.subjectId as subjectId, grad.gradeName, sc.name as scName """)
#qry.unitId = self.unit.unitId
type = self.params.getStringParam("type")
if type == None or type == "": type = "new"
list_type = ""
if type == "hot":
qry.orderType = ResourceQuery.ORDER_TYPE_VIEWCOUNT_DESC
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"最高人气"
elif type == "rcmd":
#qry.recommendState = True
#qry.rcmdState = True
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%' And r.rcmdPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"编辑推荐"
elif type == "cmt":
qry.orderType = ResourceQuery.ORDER_TYPE_COMMENTCOUNT_DESC
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"评论最多"
else:
type = "new"
qry.custormAndWhereClause = " r.approvedPathInfo Like '%/" + str(self.unit.unitId) + "/%'"
list_type = u"最新资源"
request.setAttribute("type", type)
request.setAttribute("list_type", list_type)
qry.gradelevel = self.params.getIntParamZeroAsNull("level")
qry.subjectId = self.params.getIntParamZeroAsNull("subjectId")
qry.sysCateId = self.params.getIntParamZeroAsNull("categoryId")
qry.gradeId = self.params.getIntParamZeroAsNull("gradeId")
qry.k = self.params.getStringParam("k")
pager = self.createPager()
pager.totalRows = qry.count()
resource_list = qry.query_map(pager)
request.setAttribute("resource_list", resource_list)
request.setAttribute("pager", pager)
request.setAttribute("subjectId", qry.subjectId)
request.setAttribute("categoryId", qry.sysCateId)
def get_cate_tree_without_cache(self):
self.sbj_svc = __jitar__.subjectService
type = self.params.getStringParam("type")
if type == None or type == "": type = "new"
outHtml = ""
subject_list = self.sbj_svc.getMetaSubjectList()
for s in subject_list:
msid = s.getMsubjId()
outHtml = outHtml + "d.add(" + str(msid) + ",0,'" + s.getMsubjName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&unitId=" + str(self.unit.unitId) + "');"
gradeIdList = self.sbj_svc.getMetaGradeListByMetaSubjectId(msid)
if gradeIdList != None:
for gid in gradeIdList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + "," + str(msid) + ",'" + gid.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(gid.getGradeId()) + "&target=child&unitId=" + str(self.unit.unitId) + "');"
gradeLevelList = self.sbj_svc.getGradeLevelListByGradeId(gid.getGradeId())
for glevel in gradeLevelList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + str(glevel.getGradeId()) + "," + str(msid) + str(gid.getGradeId()) + ",'" + glevel.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(glevel.getGradeId()) + "&level=1&unitId=" + str(self.unit.unitId) + "');"
request.setAttribute("outHtml", outHtml)
def get_cate_tree(self):
#下面的带缓存的版本有bug,没有过滤机构
cache = __jitar__.cacheProvider.getCache('category')
self.sbj_svc = __jitar__.subjectService
type = self.params.getStringParam("type")
if type == None or type == "": type = "new"
outHtml = cache.get(type + "_outHtml_resource")
if outHtml == None or outHtml == "":
cache_key = "_subject_list_resource"
subject_list = cache.get(cache_key)
if subject_list == None:
subject_list = self.sbj_svc.getMetaSubjectList()
cache.put(cache_key, subject_list)
outHtml = ""
for s in subject_list:
msid = s.getMsubjId()
outHtml = outHtml + "d.add(" + str(msid) + ",0,'" + s.getMsubjName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&unitId=" + str(self.unit.unitId) + "');"
cache_key = "_gradeIdList_resource" + str(msid)
gradeIdList = cache.get(cache_key)
if gradeIdList == None:
gradeIdList = self.sbj_svc.getMetaGradeListByMetaSubjectId(msid)
cache.put(cache_key, gradeIdList)
if gradeIdList != None:
for gid in gradeIdList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + "," + str(msid) + ",'" + gid.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(gid.getGradeId()) + "&target=child&unitId=" + str(self.unit.unitId) + "');"
cache_key = "_gradeLevelList_resource" + str(gid.getGradeId())
gradeLevelList = cache.get(cache_key)
if gradeLevelList == None:
gradeLevelList = self.sbj_svc.getGradeLevelListByGradeId(gid.getGradeId())
cache.put(cache_key, gradeLevelList)
for glevel in gradeLevelList:
outHtml = outHtml + "d.add(" + str(msid) + str(gid.getGradeId()) + str(glevel.getGradeId()) + "," + str(msid) + str(gid.getGradeId()) + ",'" + glevel.getGradeName() + "','unit_resource.py?type=" + type + "&subjectId=" + str(msid) + "&gradeId=" + str(glevel.getGradeId()) + "&level=1&unitId=" + str(self.unit.unitId) + "');"
cache.put(type + "_outHtml_resource", outHtml)
request.setAttribute("outHtml", outHtml)
def createPager(self):
pager = self.params.createPager()
pager.itemName = u"资源"
pager.itemUnit = u"个"
pager.pageSize = 20
return pager
|
[
"yxxcrtd@gmail.com"
] |
yxxcrtd@gmail.com
|
5baa906189990436b9e8671cccd9250487f5b8f8
|
f138be1e8e382c404cfe1ff6a35e90fc77fa9bff
|
/ABC/python/113/A.py
|
ba8ef3118eabaf3859c5aa4bcbdfe03dec29ba4c
|
[] |
no_license
|
jackfrostwillbeking/atcoder_sample
|
8547d59ca2f66b34905f292191df6c474010fded
|
d5b2fe8f628fd56eaf23ee7e92938e8ac1b1fef9
|
refs/heads/master
| 2023-07-25T19:16:14.340414
| 2021-08-26T15:26:08
| 2021-08-26T15:26:08
| 273,857,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import sys
import math
X,Y = map(int,input().split())
if not (1 <= X <= 100 and 1 <= Y <= 100): sys.exit()
if not (Y % 2 == 0): sys.exit()
print(X+math.floor(Y/2))
|
[
"jackfrostwillbeking@gmail.com"
] |
jackfrostwillbeking@gmail.com
|
660db7cf37d0c43e23f7a596370be654b457e365
|
959122eea21cec24a4cf32808a24482feda73863
|
/account/migrations/0001_initial.py
|
5340e80bcf8a95717424cdd0591d4929d71890df
|
[] |
no_license
|
AsadullahFarooqi/InventoryWebApp
|
9fbe6ccafcb93bb5cb1879b728954867014d0afd
|
07e8e6cb06e11f8ef6ada6a590e52f569a8c2d6b
|
refs/heads/master
| 2020-06-18T15:06:18.612258
| 2019-07-11T07:32:00
| 2019-07-11T07:32:00
| 196,341,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-21 06:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activation_key', models.CharField(blank=True, max_length=120, null=True)),
('activated', models.BooleanField(default=False)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(blank=True, unique=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"asadullah.itcgcs@gmail.com"
] |
asadullah.itcgcs@gmail.com
|
2cea009e2b7488d145a5c09e172c6dc3f4a5fd14
|
5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba
|
/python/deep learning/matplot_test2.py
|
2de2dd91a1fa053c0c0b9595266cbe18ddff1a10
|
[] |
no_license
|
namujinju/study-note
|
4271b4248b3c4ac1b96ef1da484d86569a030762
|
790b21e5318a326e434dc836f5f678a608037a8c
|
refs/heads/master
| 2023-02-04T13:25:55.418896
| 2020-12-26T10:47:11
| 2020-12-26T10:47:11
| 275,279,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
import matplotlib as mpl
import pandas
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# matplotlib 폰트설정
plt.rc('font', family='NanumGothic') # For Windows
DATASET_PATH = "../Datasets/"
test_dataframe = pandas.read_csv(DATASET_PATH + '목포 2014년.csv', sep=",", engine='python', header = None)
test_dataframe = test_dataframe.iloc[:20, :] # 1년 8760개의 데이터 중 1월만 가져옴
test_prediction_dataframe = pandas.read_csv(DATASET_PATH + 'my_testset_prediction_2014.csv', sep=",", engine='python', header = None)
# test
dataset = test_dataframe.values
x = dataset[:20,0]
y1 = dataset[:20,5]
dataset = test_prediction_dataframe.values
y2 = dataset[:20,0]
plt.plot(x, y1, label="observed")
plt.plot(x, y2, label="predicted")
# plt.plot(x, y3, label="MAE")
plt.title("2014년 영암 태양광 발전소 발전량 예측값과 관측값 testset")
plt.xlabel("시간")
plt.ylabel("발전량(kWh)")
plt.legend()
plt.show()
|
[
"59328810+namujinju@users.noreply.github.com"
] |
59328810+namujinju@users.noreply.github.com
|
9493c500bcafef612ab8fabddda6e9cf0fbc065d
|
0be27c0a583d3a8edd5d136c091e74a3df51b526
|
/sort_based_on_vowels.py
|
138e56e804102a3ecf306375c10d58a27eb22549
|
[] |
no_license
|
ssangitha/guvicode
|
3d38942f5d5e27a7978e070e14be07a5269b01fe
|
ea960fb056cfe577eec81e83841929e41a31f72e
|
refs/heads/master
| 2020-04-15T05:01:00.226391
| 2019-09-06T10:08:23
| 2019-09-06T10:08:23
| 164,405,935
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
l=["a","e","i","o","u","A","E","I","O","U"]
n=int(input())
c=0
m=[]
for i in range(n):
a=input()
for i in a:
if i in l:
c+=1
m.append([a,c])
c=0
m.sort(key=lambda x:x[1],reverse=True)
for i in range(n):
print(m[i][0])
|
[
"noreply@github.com"
] |
ssangitha.noreply@github.com
|
83ce54fb3ad0d2a17033243d3d170b31aa63f207
|
febeffe6ab6aaa33e3a92e2dbbd75783a4e32606
|
/ssseg/cfgs/gcnet/cfgs_cityscapes_resnet101os16.py
|
2b548a998b4066159b8e526c725ecbb0f1dc850e
|
[
"MIT"
] |
permissive
|
Junjun2016/sssegmentation
|
7bbc5d53abee1e0cc88d5e989e4cff5760ffcd09
|
bf7281b369e8d7fc2f8986caaeec3ec38a30c313
|
refs/heads/main
| 2023-02-04T22:09:13.921774
| 2020-12-23T06:28:56
| 2020-12-23T06:28:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
'''define the config file for cityscapes and resnet101os16'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'cityscapes',
'rootdir': '/data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 1024), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 1024), 'data_type': 'tensor'}),]
}
)
DATASET_CFG['test'].update(
{
'type': 'cityscapes',
'rootdir': '/data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),],
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
DATALOADER_CFG['train'].update(
{
'batch_size': 8,
}
)
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 220
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 19,
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'is_use_stem': True
},
}
)
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'gcnet_resnet101os16_cityscapes_train',
'logfilepath': 'gcnet_resnet101os16_cityscapes_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'gcnet_resnet101os16_cityscapes_test',
'logfilepath': 'gcnet_resnet101os16_cityscapes_test/test.log',
'resultsavepath': 'gcnet_resnet101os16_cityscapes_test/gcnet_resnet101os16_cityscapes_results.pkl'
}
)
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
31eae4246d94f36c4e53982ca85c5f89ddb83f88
|
2fed9d28558360a74ba52772a3793a6180d6d0b8
|
/meiduo_mall/meiduo_mall/meiduo_mall/apps/meiduo_main/view/ordersView.py
|
2e24f0ccc26d8dc3e7071d8f30652f701df56ce7
|
[] |
no_license
|
xj-ie/rest_framework-0.2
|
13e6b24327e99108b7fc433c8ecf6a976245477d
|
e9ef7716d5fae87b278845a28a9fbc1af1bcf8df
|
refs/heads/master
| 2022-12-12T15:20:38.402755
| 2020-09-05T01:31:05
| 2020-09-05T01:31:05
| 290,355,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework.permissions import IsAdminUser
from meiduo_main.utils import PageUnm
from meiduo_main.verializer.orderserialzier import OrderSeriazlier
from orders.models import OrderInfo
class OrdersView(ReadOnlyModelViewSet):
permission_classes = [IsAdminUser]
serializer_class = OrderSeriazlier
pagination_class = PageUnm
def get_queryset(self):
if not self.request.query_params.get('keyword'):
return OrderInfo.objects.all()
else:
return OrderInfo.objects.filter(order_id__contains=self.request.query_params.get('keyword'))
@action(methods=["put"], detail=True,)
def status(self, request, pk):
try:
order = OrderInfo.objects.get(OrderInfo_id=pk)
except Exception as e:
raise e
status = request.data.get("status")
order.status = status
order.save()
return Response({"pk":pk,"status":status})
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
f3f7551884b09ee0f04581a97137aa32b95c9ad1
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/length_20200530000955.py
|
2a69096dfd430b6fa026c101a17a24b32c58f34b
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
def removeDuplicates(nums):
i = 0
while i < len(nums):
print(i,'i---------->','nums at i',nums[nums[i]])
j = i+1
while j< len(nums):
print(j,'j---->','nums at j',nums[nums[j]])
if nums[i] == nums[j]:
print(nums[i],'----was removed')
nums.remove(nums[i])
j +=1
i +=1
print(nums)
removeDuplicates([1,2,2,2])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
d13b13a4e5f3e3b9464f7d3916f0727780cdd2fd
|
5e2284bff015e6b03e4ea346572b29aaaf79c7c2
|
/docs/source/conf.py
|
ada5bca296c674f1d233246497f7a700de57ef25
|
[
"MIT"
] |
permissive
|
LaurenDebruyn/aocdbc
|
bbfd7d832f9761ba5b8fb527151157742b2e4890
|
b857e8deff87373039636c12a170c0086b19f04c
|
refs/heads/main
| 2023-06-11T23:02:09.825705
| 2021-07-05T09:26:23
| 2021-07-05T09:26:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,961
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from typing import Dict, List, Tuple
sys.path.insert(0, os.path.abspath("../.."))
import correct_programs
# -- Project information -----------------------------------------------------
project = "python-by-contract-corpus"
author = "Lauren De bruyn, Marko Ristin, Phillip Schanely"
description = "A corpus of Python programs annotated with contracts"
# The short X.Y version
version = "1.0"
# The full version, including alpha/beta/rc tags
release = "1.0"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.autosectionlabel",
"sphinx.ext.viewcode",
"autodocsumm",
"sphinx_icontract"
]
autodoc_typehints = "signature"
autodoc_default_options = {
"members": True,
"undoc-members": True,
"member-order": "bysource",
"autosummary": True,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Do not show module names as exercises are mostly self-containing, so the module names
# only hinder readability
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: List[str] = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# html_logo = "logo.png"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "python_by_contract_corpus_doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"{}.tex".format(project),
"{} Documentation".format(project),
author,
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, project, "{} Documentation".format(project), [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
project,
"{} Documentation".format(project),
author,
project,
description,
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
|
[
"noreply@github.com"
] |
LaurenDebruyn.noreply@github.com
|
45670a3d2d9a0ac6b7003b1bc305c2321c58522e
|
a3f1e80179c23d9202d72b75dd37a49b44785f45
|
/api/client/swagger_client/models/api_pipeline_custom.py
|
e8121beb8d0b1d83de76082c965a59dc57d9b46d
|
[
"Apache-2.0"
] |
permissive
|
pvaneck/mlx
|
b1e82fae5ac8aaa1dddac23aaa38c46f6e6cfc27
|
6edaa0bd77787c56b737322a0c875ae30de6cd49
|
refs/heads/main
| 2023-05-14T06:08:38.404133
| 2021-05-04T01:41:11
| 2021-05-04T01:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,191
|
py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1.25-related-assets
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.api_pipeline_dag import ApiPipelineDAG # noqa: F401,E501
from swagger_client.models.api_pipeline_inputs import ApiPipelineInputs # noqa: F401,E501
class ApiPipelineCustom(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'dag': 'ApiPipelineDAG',
'inputs': 'ApiPipelineInputs',
'name': 'str',
'description': 'str'
}
attribute_map = {
'dag': 'dag',
'inputs': 'inputs',
'name': 'name',
'description': 'description'
}
def __init__(self, dag=None, inputs=None, name=None, description=None): # noqa: E501
"""ApiPipelineCustom - a model defined in Swagger""" # noqa: E501
self._dag = None
self._inputs = None
self._name = None
self._description = None
self.discriminator = None
self.dag = dag
if inputs is not None:
self.inputs = inputs
self.name = name
if description is not None:
self.description = description
@property
def dag(self):
"""Gets the dag of this ApiPipelineCustom. # noqa: E501
:return: The dag of this ApiPipelineCustom. # noqa: E501
:rtype: ApiPipelineDAG
"""
return self._dag
@dag.setter
def dag(self, dag):
"""Sets the dag of this ApiPipelineCustom.
:param dag: The dag of this ApiPipelineCustom. # noqa: E501
:type: ApiPipelineDAG
"""
if dag is None:
raise ValueError("Invalid value for `dag`, must not be `None`") # noqa: E501
self._dag = dag
@property
def inputs(self):
"""Gets the inputs of this ApiPipelineCustom. # noqa: E501
:return: The inputs of this ApiPipelineCustom. # noqa: E501
:rtype: ApiPipelineInputs
"""
return self._inputs
@inputs.setter
def inputs(self, inputs):
"""Sets the inputs of this ApiPipelineCustom.
:param inputs: The inputs of this ApiPipelineCustom. # noqa: E501
:type: ApiPipelineInputs
"""
self._inputs = inputs
@property
def name(self):
"""Gets the name of this ApiPipelineCustom. # noqa: E501
Name of the custom pipeline # noqa: E501
:return: The name of this ApiPipelineCustom. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApiPipelineCustom.
Name of the custom pipeline # noqa: E501
:param name: The name of this ApiPipelineCustom. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this ApiPipelineCustom. # noqa: E501
Optional description of the custom pipeline # noqa: E501
:return: The description of this ApiPipelineCustom. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ApiPipelineCustom.
Optional description of the custom pipeline # noqa: E501
:param description: The description of this ApiPipelineCustom. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiPipelineCustom, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiPipelineCustom):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"82406273+mlx-bot@users.noreply.github.com"
] |
82406273+mlx-bot@users.noreply.github.com
|
b42cf163d4f8bdc75575b3da040f41dc5fc3b13b
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelMerk.py
|
8c7856a23aba0c4fd7334f2b6b1eb8d3dc506f0a
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,302
|
py
|
# coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerlichtingstoestelMerk(KeuzelijstField):
"""Het merk van het verlichtingstoestel."""
naam = 'KlVerlichtingstoestelMerk'
label = 'Verlichtingstoestel merk'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#KlVerlichtingstoestelMerk'
definition = 'Het merk van het verlichtingstoestel.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerlichtingstoestelMerk'
options = {
'ARC': KeuzelijstWaarde(invulwaarde='ARC',
label='ARC',
status='uitgebruik',
definitie='ARC',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/ARC'),
'HCI-TS': KeuzelijstWaarde(invulwaarde='HCI-TS',
label='HCI-TS',
status='uitgebruik',
definitie='HCI-TS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/HCI-TS'),
'Philips': KeuzelijstWaarde(invulwaarde='Philips',
label='Philips',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/Philips'),
'Rombalux': KeuzelijstWaarde(invulwaarde='Rombalux',
label='Rombalux',
status='uitgebruik',
definitie='Rombalux',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/Rombalux'),
'Schreder': KeuzelijstWaarde(invulwaarde='Schreder',
label='Schreder',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/Schreder'),
'andere': KeuzelijstWaarde(invulwaarde='andere',
label='andere',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/andere'),
'lightwell': KeuzelijstWaarde(invulwaarde='lightwell',
label='Lightwell',
status='ingebruik',
definitie='Lightwell',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelMerk/lightwell')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
9a61cf0c3df886f81438338b21a69343804579e7
|
b9e0381bed124e6f4aaf09378a91d6f7d99ca9ae
|
/setup.py
|
eab11340a28c2b7281962b205451fa0a5cf4b9cd
|
[] |
no_license
|
fanstatic/js.jquery_elastic
|
b9d944921d4ff92fac69b2154b0c477835645c56
|
8a25992b188ce3c6dbf2103b6e8863c9286858b0
|
refs/heads/master
| 2020-08-06T02:26:16.335518
| 2011-11-02T14:35:04
| 2011-11-02T14:35:04
| 212,799,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
from setuptools import setup, find_packages
import os
# The version of the wrapped library is the starting point for the
# version number of the python package.
# In bugfix releases of the python package, add a '-' suffix and an
# incrementing integer.
# For example, a packaging bugfix release version 1.4.4 of the
# js.jquery package would be version 1.4.4-1 .
version = '1.6.12dev'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('README.txt')
+ '\n' +
read('js', 'jquery_elastic', 'test_jquery_elastic.txt')
+ '\n' +
read('CHANGES.txt'))
setup(
name='js.jquery_elastic',
version=version,
description="Fanstatic packaging of jQuery Elastic",
long_description=long_description,
classifiers=[],
keywords='',
author='Fanstatic Developers',
author_email='fanstatic@googlegroups.com',
license='BSD',
packages=find_packages(),namespace_packages=['js'],
include_package_data=True,
zip_safe=False,
install_requires=[
'fanstatic',
'js.jquery',
'setuptools',
],
entry_points={
'fanstatic.libraries': [
'jquery_elastic = js.jquery_elastic:library',
],
},
)
|
[
"devnull@localhost"
] |
devnull@localhost
|
b0c53adb2d8f14b353316719929f4dd0bd405eae
|
ed381eac1c805be20af9c28e0a1a319c9a71352d
|
/client/tests/utils/formatting_test.py
|
838e65f0cf1944dd082ae1cfc45af15b80c97fe7
|
[
"Apache-2.0"
] |
permissive
|
hpec/ok
|
2bde333bd47e2b457fbb6da020c82d4bb99d9455
|
492a077a06a36644177092f26c3a003fd86c2595
|
refs/heads/master
| 2020-12-29T18:48:03.884930
| 2014-11-26T10:39:28
| 2014-11-26T10:39:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,321
|
py
|
from client.utils import formatting
import unittest
class PrettyJsonTest(unittest.TestCase):
def assertFormat(self, expect, json):
self.assertEqual(formatting.dedent(expect),
formatting.prettyjson(json))
def testInt(self):
self.assertFormat('42', 42)
def testFloat(self):
self.assertFormat('3.14', 3.14)
def testString_singleLine(self):
self.assertFormat("'hello world'", 'hello world')
def testString_multipleLines(self):
self.assertFormat("""
r\"\"\"
hello
world
\"\"\"
""", "hello\nworld")
def testString_multipleLinesSurroundingNewlines(self):
self.assertFormat("""
r\"\"\"
hello
world
\"\"\"
""", "\nhello\nworld\n")
def testString_rawStringSingleLine(self):
self.assertFormat(r"""
'hello \\ there'
""", r"hello \ there")
def testString_rawStringMultiLine(self):
self.assertFormat("""
r\"\"\"
hello \\
there
\"\"\"
""", r"""
hello \
there
""")
def testList_onlyPrimitives(self):
self.assertFormat("""
[
42,
3.14,
'hello world',
r\"\"\"
hello
world
\"\"\"
]
""", [
42,
3.14,
'hello world',
'hello\nworld'
])
def testList_nestedLists(self):
self.assertFormat("""
[
42,
[
3.14
]
]
""", [
42,
[3.14]
])
def testDict_onlyPrimitives(self):
self.assertFormat("""
{
'answer': 'hello world',
'multi': r\"\"\"
answer
here
\"\"\",
'secret': 42
}
""", {
'answer': 'hello world',
'multi': 'answer\nhere',
'secret': 42,
})
def testDict_nestedDicts(self):
self.assertFormat("""
{
'answer': {
'test': 42
},
'solution': 3.14
}
""", {
'answer': {
'test': 42
},
'solution': 3.14,
})
|
[
"albert12132@gmail.com"
] |
albert12132@gmail.com
|
db58d82a549ed036189d914f201f0a265b1fdc64
|
6f00947901426274cb895a21ad5f83c0197b50ba
|
/e2e/scripts/st_arrow_add_rows.py
|
e53ab48bc7665ab1864f15dc0b6e83ebde996bb3
|
[
"Apache-2.0"
] |
permissive
|
g147/streamlit
|
f19a796e1bbf83f939f1bcbd3e8f3fd6e64155bb
|
d54fab097caa3e6dc101eb930cddc0832e05dea9
|
refs/heads/develop
| 2023-06-18T02:21:22.112118
| 2021-07-10T09:22:27
| 2021-07-10T09:22:27
| 363,459,800
| 1
| 0
|
Apache-2.0
| 2021-07-10T09:22:43
| 2021-05-01T16:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import altair as alt
import numpy as np
import pandas as pd
import streamlit as st
df = pd.DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
table_element = st._arrow_table(df)
dataframe_element = st._arrow_dataframe(df)
chart_element_1 = st._arrow_line_chart()
chart_element_2 = st._arrow_line_chart(df)
# 4 identical charts, built in different ways.
vega_element_1 = st._arrow_vega_lite_chart(
df,
{
"mark": {"type": "line", "point": True},
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
},
},
use_container_width=True,
)
vega_element_2 = st._arrow_vega_lite_chart(
{
"datasets": {"foo": df},
"data": {"name": "foo"},
"mark": {"type": "line", "point": True},
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
},
},
use_container_width=True,
)
vega_element_3 = st._arrow_vega_lite_chart(
{
"datasets": {"foo": df},
"data": {"name": "foo"},
"mark": {"type": "line", "point": True},
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
},
},
use_container_width=True,
)
altair_element = st._arrow_altair_chart(
alt.Chart(df).mark_line(point=True).encode(x="a", y="b").interactive(),
use_container_width=True,
)
table_element.arrow_add_rows(df)
dataframe_element.arrow_add_rows(df)
chart_element_1.arrow_add_rows(df)
chart_element_2.arrow_add_rows(df)
vega_element_1.arrow_add_rows(df)
vega_element_2.arrow_add_rows(df)
vega_element_3.arrow_add_rows(foo=df)
altair_element.arrow_add_rows(df)
# Test that `add_rows` errors out when the dataframe dimensions don't match.
# This should show an error!
dataframe_element = st._arrow_dataframe(df)
dataframe_element.arrow_add_rows(np.abs(np.random.randn(1, 6)))
|
[
"noreply@github.com"
] |
g147.noreply@github.com
|
ac00e754fe6dc5a4a50fc219b4646135d18ffb8d
|
f93d36a8884a69b9f5189fee91af308bb60e2418
|
/review/amagnuss/coffee_points.py
|
de3e925855da41aba2ddb1f550e9cc5fda7d27d5
|
[] |
no_license
|
adamlubitz/pratt-savi-810-2018-10
|
1aa1d2c5032f3d68a70d85245da4bc287594e5c1
|
b66a699c7c1b37059f9d8cce4d81c0f89508424a
|
refs/heads/master
| 2022-01-08T09:43:10.909984
| 2018-12-08T20:30:21
| 2018-12-08T20:30:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
import arcpy
import pandas as pd
import json
arcpy.env.overwriteOutput = True
wd = 'Z:/Windows/GitHub/pratt-savi-810-2018-10/projects_scratch/amagnuss'
# Set the local variables
in_table = '{}/CoffeePoints_2.csv'.format(wd)
List_of_Countries = ['Costa Rica', 'Guatemala', 'Mexico', 'Nicaragua', 'Honduras', 'El Salvador']
# Set dataframe, save separate new csvs for CAM countries
df = pd.read_csv(in_table, encoding='latin-1')
df_CAM = df[df['Country'].isin(List_of_Countries)]
for i in df_CAM['Country'].unique():
dfc = df_CAM[df['Country'] == i]
out_table = in_table.replace('.csv', '_{}.csv'.format(i))
dfc.to_csv(out_table, index=False, encoding='utf-8')
print df_CAM.head(500)
# variables for xy plotting
for s in List_of_Countries:
in_table_xy = '{}/CoffeePoints_2_'.format(wd) + s + '.csv'
out_feature_class = 'coffee_points'
x_coords = "Longitude"
y_coords = "Latitude"
out_layer = 'coffeePoints' + s + '_CAM_lyr'
saved_layer = '{}/CoffeePoints_{}.csv'.format(wd, s)
# Set Arc workspace
arcpy.env.workspace = '{}/CoffeeData.gdb'.format(wd) # probably want to add create file geodatabase
arcpy.MakeXYEventLayer_management(
in_table_xy,
x_coords,
y_coords,
out_layer,
)
# reprojecting into NA Albers Equal Area
# first shapefiles
# Replace a layer/table view name with a path to a dataset (which can be a layer file)
# or create the layer/table view within the script
coffee_cfg = '{}/coffee_cfg.json'.format(wd)
def read_config(config_json):
with open(config_json) as f:
data = json.load(f)
return data
d = read_config(coffee_cfg)
print(
d['data']['out_cs']
)
proj_filename = '{}/projected/'.format(wd) + s + '_proj'
out_cs = d['data']['out_cs']
transform = ''
in_cs = d['data']['in_cs']
shp_preserve = 'NO_PRESERVE_SHAPE'
max_dev = ''
vertical = 'NO_VERTICAL'
arcpy.Project_management(
out_layer,
proj_filename,
out_cs,
transform,
in_cs,
shp_preserve,
max_dev,
vertical,
)
arcpy.CopyFeatures_management(out_layer, saved_layer) # One of these last few things may be unnecessary
# Save layer to file
arcpy.SaveToLayerFile_management(out_layer, saved_layer)
|
[
"daniel.martin.sheehan@gmail.com"
] |
daniel.martin.sheehan@gmail.com
|
1df8f7c1d3cab02ad4cbac3c7f8197322180a3e9
|
bd553d50a233c043878e4209e0376eab1eb7176f
|
/Examples/11SampleModules/Threading/threadingEx2.py
|
84b4b8614b152a038ab3a74216f7b1e9f774e443
|
[] |
no_license
|
jainendrak/python-training
|
3610434926774bca9c711a5e6c675d3ce28893ef
|
336221aab9aaa9624ad0f5ad046c44195105249e
|
refs/heads/master
| 2020-05-27T13:59:17.656236
| 2019-05-27T06:21:46
| 2019-05-27T06:21:46
| 188,648,975
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
import threading, time
exitFlag = 0
class myThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print("Starting " + self.name)
print_time(self.name, self.counter, 5)
print("Exiting " + self.name)
def print_time(threadName, delay, counter):
while counter:
if exitFlag:
thread.exit()
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
start=time.time()
# Create new threads
thread1 = myThread(1, "Thread-1", 1)
thread2 = myThread(2, "Thread-2", 2)
thread3 = myThread(2, "Thread-3", 1)
#setDaemon()
#thread1.setDaemon(True)
#thread2.setDaemon(True)
#thread3.setDaemon(True)
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
while(threading.activeCount()>1):
#print("Number of active threads are: ",threading.activeCount())
time.sleep(1)
end=time.time()-start
print("Time taken: ",end)
print("Exiting ",threading.currentThread().getName())
|
[
"jainendra.kumar@oracle.com"
] |
jainendra.kumar@oracle.com
|
d7dfbc16b17661a8706b27969577d5d8b7cf03e0
|
1b16a85abcac93fdab96cb3d952505f39dcb98b0
|
/tests/testproject/multidb/tests/test_models.py
|
1c0c2da1a0b053684244c0c1498789dfdeb883f8
|
[
"BSD-3-Clause"
] |
permissive
|
playpauseandstop/tddspry
|
6bb4ef3d3c1fed100acd6b1cff0f632f264ce6dd
|
0a0f9242238d2d81a375dcb4b8c32b25612edf42
|
refs/heads/master
| 2016-09-05T18:50:47.687939
| 2011-03-01T15:01:10
| 2011-03-01T15:01:10
| 136,525
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
from tddspry.django import TestCase
from tddspry.django.helpers import EMAIL, PASSWORD, USERNAME
from django.contrib.auth.models import User
class TestModels(TestCase):
multidb = True
def setup(self):
self.model = User
self.manager = self.model.objects.using('legacy')
self.kwargs = {'username': USERNAME,
'password': PASSWORD,
'email': EMAIL}
self.sgrawk = {'username': USERNAME[::-1],
'password': PASSWORD[::-1],
'email': EMAIL[::-1]}
def test_using_keyword(self):
self.assert_count(self.model, 0)
self.assert_count(self.model, 0, using='legacy')
self.assert_create(self.model, using='legacy', **self.kwargs)
self.assert_count(self.model, 0)
self.assert_not_count(self.model, 0, using='legacy')
self.assert_count(self.model, 1, using='legacy')
for key, value in self.kwargs.items():
self.assert_not_read(self.model, **{key: value})
for key, value in self.kwargs.items():
self.assert_read(self.model, using='legacy', **{key: value})
try:
self.assert_update(self.model, **self.sgrawk)
except AssertionError:
pass
else:
assert False, 'Any %r model should be exist in default ' \
'database.' % self.model
self.assert_update(self.model, using='legacy', **self.sgrawk)
self.assert_not_read(self.model, **self.kwargs)
self.assert_not_read(self.model, using='legacy', **self.kwargs)
self.assert_delete(self.model)
self.assert_delete(self.model, using='legacy')
self.assert_count(self.model, 0)
self.assert_count(self.model, 0, using='legacy')
def test_using_manager(self):
self.assert_count(self.manager, 0)
self.assert_create(self.manager, **self.kwargs)
self.assert_not_count(self.manager, 0)
self.assert_count(self.manager, 1)
for key, value in self.kwargs.items():
self.assert_read(self.manager, **{key: value})
self.assert_update(self.manager, **self.sgrawk)
self.assert_not_read(self.manager, **self.kwargs)
self.assert_delete(self.manager)
self.assert_count(self.manager, 0)
|
[
"playpauseandstop@gmail.com"
] |
playpauseandstop@gmail.com
|
0853dbf53c7d4fc01a324eb039cd10c345befae6
|
bdb0b0ee38078bc7c19ca9d953f9dfd222ea8479
|
/ansible/roles/hotspot/templates/boot.py
|
547f3a63a397f6a9406d2f256350432ecbaffb40
|
[] |
no_license
|
mrc-toader/setup
|
73b2bc69a69f5ba8adc51b2a85fac411bc21bb02
|
0ed8037f74a78f15c6ee138eb2e399e8771b5298
|
refs/heads/master
| 2021-01-21T07:30:04.181983
| 2017-08-29T11:38:04
| 2017-08-29T11:38:04
| 91,614,785
| 0
| 0
| null | 2017-05-17T19:54:52
| 2017-05-17T19:54:52
| null |
UTF-8
|
Python
| false
| false
| 3,524
|
py
|
#!/usr/bin/env python3
import sys
import re
from pathlib import Path
import subprocess
RUN_DIR = Path('/var/local/hotspot/run')
SUPERVISOR_DIR = Path('/etc/supervisor/conf.d')
HOSTAPD_CONFIG_TEMPLATE = """\
interface={interface}
ieee80211d=1
country_code={country_code}
ieee80211n=1
wmm_enabled=1
driver=nl80211
hw_mode=g
ssid={ssid}
channel={channel}
auth_algs=1
wpa=2
wpa_key_mgmt=WPA-PSK
rsn_pairwise=CCMP
wpa_passphrase={passphrase}
"""
HOSTAPD_SUPERVISOR_TEMPLATE = """\
[program:{program_name}]
command = /usr/sbin/hostapd -d {hostapd_conf_path}
autostart = false
startsecs = 3
"""
DNSMASQ_CONFIG_TEMPLATE = """\
interface=br0
bind-interfaces
domain-needed
bogus-priv
no-resolv
no-poll
no-hosts
server=8.8.8.8
address=/liquid/{liquid_address}
dhcp-range={dhcp_range},12h
"""
DNSMASQ_SUPERVISOR_TEMPLATE = """\
[program:dnsmasq]
command = /usr/sbin/dnsmasq --keep-in-foreground -C {dnsmasq_conf_path}
autostart = false
startsecs = 3
"""
def find_wireless_interfaces():
iwconfig = (
subprocess
.check_output(['iwconfig'], stderr=subprocess.DEVNULL)
.decode('latin1')
)
for line in iwconfig.splitlines():
m = re.match('(\w+)', line)
if m:
yield m.group(1)
def boot_hostapd(interface):
hostapd_conf_path = RUN_DIR / 'hostapd-{}.conf'.format(interface)
supervisor_conf_path = SUPERVISOR_DIR / 'hostapd-{}.conf'.format(interface)
program_name = 'hostapd-' + interface
hostapd_conf = HOSTAPD_CONFIG_TEMPLATE.format(
interface=interface,
country_code='RO',
channel='6',
ssid='liquid',
passphrase='chocolate',
)
with hostapd_conf_path.open('wt', encoding='utf8') as f:
f.write(hostapd_conf)
supervisor_conf = HOSTAPD_SUPERVISOR_TEMPLATE.format(
program_name=program_name,
hostapd_conf_path=hostapd_conf_path,
)
with supervisor_conf_path.open('wt', encoding='utf8') as f:
f.write(supervisor_conf)
subprocess.check_call(['supervisorctl', 'update'])
subprocess.check_call(['supervisorctl', 'start', program_name])
subprocess.check_call(['brctl', 'addif', 'br0', interface])
def boot_dnsmasq(liquid_address, dhcp_range):
dnsmasq_conf_path = RUN_DIR / 'dnsmasq.conf'
supervisor_conf_path = SUPERVISOR_DIR / 'dnsmasq.conf'
dnsmasq_conf = DNSMASQ_CONFIG_TEMPLATE.format(
dhcp_range=dhcp_range,
liquid_address=liquid_address,
)
with dnsmasq_conf_path.open('wt', encoding='utf8') as f:
f.write(dnsmasq_conf)
supervisor_conf = DNSMASQ_SUPERVISOR_TEMPLATE.format(
dnsmasq_conf_path=dnsmasq_conf_path,
)
with supervisor_conf_path.open('wt', encoding='utf8') as f:
f.write(supervisor_conf)
subprocess.check_call(['supervisorctl', 'update'])
subprocess.check_call(['supervisorctl', 'start', 'dnsmasq'])
def main():
subprocess.check_call(['rm', '-rf', str(RUN_DIR)])
subprocess.check_call(['rm', '-f', str(SUPERVISOR_DIR / 'hostapd-*.conf')])
subprocess.check_call(['mkdir', '-p', str(RUN_DIR)])
subprocess.check_call(['brctl', 'addbr', 'br0'])
subprocess.check_call([
'ifconfig', 'br0',
'10.102.0.1', 'netmask', '255.255.255.0',
])
interfaces = list(find_wireless_interfaces())
if not interfaces:
print("no wireless interfaces found", file=sys.stderr)
return
boot_hostapd(interfaces[0])
boot_dnsmasq('10.102.0.1', '10.102.0.100,10.102.0.200')
if __name__ == '__main__':
main()
|
[
"alex@grep.ro"
] |
alex@grep.ro
|
5620b8d8fc82aca9f6796b032371851cc93269f0
|
96bfcec63be5e8f3a2edcd8f8395d309ff61aeaa
|
/ex20 functions and files/ex20.py
|
60953e106f5f1a11cb71c8c67f4e7fc2741462f9
|
[] |
no_license
|
KlausQIU/learnpythonhardway
|
cbc3ba64c4a70fafe1b0234196023b44eff2a24a
|
614e09370c654f79cbc408d09eaa8a47d1b4e637
|
refs/heads/master
| 2021-01-17T06:07:15.735307
| 2016-07-03T12:13:26
| 2016-07-03T12:13:26
| 52,967,154
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# -*- coding : utf-8 -*-
from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_count,f):
print line_count,f.readline()
current_file = open(input_file)
print "First,let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind,kind of like a tape."
rewind(current_file)
print "let's print three lines"
current_line = 1
print_a_line(current_line,current_file)
current_line = current_line + 1
print_a_line(current_line,current_file)
current_line = current_line + 1
print_a_line(current_line,current_file)
|
[
"moon.qiu198909@hotmail.com"
] |
moon.qiu198909@hotmail.com
|
e78b4647681a75c4ad6ad69ed99793457147a7a0
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/client/models/v1_namespace_list.py
|
38055a9cd702e920afd279ea4003a19544378baf
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372
| 2017-10-16T20:19:01
| 2017-10-16T20:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,689
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NamespaceList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1Namespace]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1NamespaceList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1NamespaceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1NamespaceList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1NamespaceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1NamespaceList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1NamespaceList.
Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:return: The items of this V1NamespaceList.
:rtype: list[V1Namespace]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1NamespaceList.
Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param items: The items of this V1NamespaceList.
:type: list[V1Namespace]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1NamespaceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1NamespaceList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1NamespaceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1NamespaceList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1NamespaceList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The metadata of this V1NamespaceList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1NamespaceList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1NamespaceList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NamespaceList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
56f0a368b55894c38b8c997a2f1e18310ad58685
|
f048d9a7a7464461ab81086e2f0dd0e4568364ad
|
/scripts/python/decompress-all-strokes.py
|
a3c4630986b073876f4ce9cf37326dfdfab5a5f6
|
[] |
no_license
|
JasonGross/characters
|
66eb547606a6d3d110e7a8270ebeb8c563d2877c
|
b3213fa743789531c103134e087f3110cca005bc
|
refs/heads/master
| 2021-06-03T02:10:32.981640
| 2020-09-04T17:25:14
| 2020-09-04T17:25:14
| 912,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#!/usr/bin/python
# Filename: decompress-all-strokes.py
from stroke_compression import decompress_all_strokes_in_current_directory
if __name__ == '__main__':
decompress_all_strokes_in_current_directory()
|
[
"jgross@mit.edu"
] |
jgross@mit.edu
|
492b95d732ad5a91cd8271b3111eee8585f24ded
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binaryTree_20200623143454.py
|
3cd8c4c57c7767dc31c557b3ff860968148374d2
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,213
|
py
|
# define node class
class Node(object):
# constructor
def __init__(self,value):
self.value = value
self.left = None
self.right = None
# define binary tree class
class BinaryTree(object):
def __init__(self,root):
# converting data
self.root = Node(root)
def print_tree(self,traversal_type):
if traversal_type == "preorder":
return self.preorder_print(tree.root," ")
elif traversal_type == "inorder":
return self.inorder_print (tree.root,"",0)
elif traversal_type == "postorder":
return self.postorder_print(tree.root," ")
else:
print("Traversal type" + str(traversal_type) + "is not supported.")
return False
# root -->left--->right(preorder)
def preorder_print(self,start,traversal):
if start:
traversal += (str(start.value) + "-")
# calling the function recursively
traversal = self.preorder_print(start.left,traversal)
traversal = self.preorder_print(start.right,traversal)
return traversal
# left - root -right
def inorder_print(self,start,traversal,count):
if start:
traversal = self.inorder_print(start.left,traversal,count =count+1)
traversal += (str(start.value) + "-")
traversal = self.inorder_print(start.right,traversal,,count +=1)
return traversal,count
# left ->right -> root
def postorder_print(self,start,traversal):
if start:
traversal = self.postorder_print(start.left,traversal)
traversal = self.postorder_print(start.right,traversal)
traversal +=(str(start.value) + "-" )
return traversal
# 1 is root
# creating left child
'''
1
# / \
2 3
/ \
4 5
'''
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.right = Node(7)
tree.root.right.left = Node(6)
print(tree.print_tree("preorder"))
print(tree.print_tree("inorder"))
print(tree.print_tree("postorder"))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
afc8f105c4cee0a4c52c266499b55b066ba80353
|
1092d65f98c695756d8d1697da3f8c5253c8922f
|
/navx/_impl/imuregisters.py
|
ef123235461f623c7fdf8ab90caf7f264437da9e
|
[
"MIT"
] |
permissive
|
M-Shadow/robotpy-navx
|
28830536a7a3c497ce75bd765d76d79de9f1ba5d
|
b59a1d0c38c73541c139023a4ac347cd69ed2bad
|
refs/heads/master
| 2020-08-20T11:54:53.221611
| 2019-10-18T13:36:16
| 2019-10-18T13:36:16
| 216,019,630
| 0
| 0
|
MIT
| 2019-10-18T12:31:23
| 2019-10-18T12:31:23
| null |
UTF-8
|
Python
| false
| false
| 7,949
|
py
|
# validated: 2017-02-19 DS fed66235acf0 java/navx/src/com/kauailabs/navx/IMURegisters.java
# ----------------------------------------------------------------------------
# Copyright (c) Kauai Labs 2015. All Rights Reserved.
#
# Created in support of Team 2465 (Kauaibots). Go Purple Wave!
#
# Open Source Software - may be modified and shared by FRC teams. Any
# modifications to this code must be accompanied by the \License.txt file
# in the root directory of the project
# ----------------------------------------------------------------------------
class IMURegisters:
# ********************************************
# Device Identification Registers
# ********************************************
NAVX_REG_WHOAMI = 0x00 # NAVX_MODEL_XXX
NAVX_REG_HW_REV = 0x01
NAVX_REG_FW_VER_MAJOR = 0x02
NAVX_REG_FW_VER_MINOR = 0x03
# Model types
NAVX_MODEL_NAVX_MXP = 0x32
@classmethod
def model_type(cls, whoami):
if whoami == cls.NAVX_MODEL_NAVX_MXP:
return "NavX MXP"
else:
return "unknown"
# ********************************************
# Status and Control Registers
# ********************************************
# Read-write
NAVX_REG_UPDATE_RATE_HZ = 0x04 # Range: 4 - 50 [unsigned byte]
# Read-only
# Accelerometer Full-Scale Range: in units of G [unsigned byte]
NAVX_REG_ACCEL_FSR_G = 0x05
# Gyro Full-Scale Range (Degrees/Sec): Range: 250, 500, 1000 or 2000 [unsigned short]
NAVX_REG_GYRO_FSR_DPS_L = 0x06 # Lower 8-bits of Gyro Full-Scale Range
NAVX_REG_GYRO_FSR_DPS_H = 0x07 # Upper 8-bits of Gyro Full-Scale Range
NAVX_REG_OP_STATUS = 0x08 # NAVX_OP_STATUS_XXX
NAVX_REG_CAL_STATUS = 0x09 # NAVX_CAL_STATUS_XXX
NAVX_REG_SELFTEST_STATUS = 0x0A # NAVX_SELFTEST_STATUS_XXX
NAVX_REG_CAPABILITY_FLAGS_L = 0x0B
NAVX_REG_CAPABILITY_FLAGS_H = 0x0C
# ********************************************
# Processed Data Registers
# ********************************************
NAVX_REG_SENSOR_STATUS_L = 0x10 # NAVX_SENSOR_STATUS_XXX
NAVX_REG_SENSOR_STATUS_H = 0x11
# Timestamp: [unsigned long]
NAVX_REG_TIMESTAMP_L_L = 0x12
NAVX_REG_TIMESTAMP_L_H = 0x13
NAVX_REG_TIMESTAMP_H_L = 0x14
NAVX_REG_TIMESTAMP_H_H = 0x15
# Yaw, Pitch, Roll: Range: -180.00 to 180.00 [signed hundredths]
# Compass Heading: Range: 0.00 to 360.00 [unsigned hundredths]
# Altitude in Meters: In units of meters [16:16]
NAVX_REG_YAW_L = 0x16 # Lower 8 bits of Yaw
NAVX_REG_YAW_H = 0x17 # Upper 8 bits of Yaw
NAVX_REG_ROLL_L = 0x18 # Lower 8 bits of Roll
NAVX_REG_ROLL_H = 0x19 # Upper 8 bits of Roll
NAVX_REG_PITCH_L = 0x1A # Lower 8 bits of Pitch
NAVX_REG_PITCH_H = 0x1B # Upper 8 bits of Pitch
NAVX_REG_HEADING_L = 0x1C # Lower 8 bits of Heading
NAVX_REG_HEADING_H = 0x1D # Upper 8 bits of Heading
NAVX_REG_FUSED_HEADING_L = 0x1E # Upper 8 bits of Fused Heading
NAVX_REG_FUSED_HEADING_H = 0x1F # Upper 8 bits of Fused Heading
NAVX_REG_ALTITUDE_I_L = 0x20
NAVX_REG_ALTITUDE_I_H = 0x21
NAVX_REG_ALTITUDE_D_L = 0x22
NAVX_REG_ALTITUDE_D_H = 0x23
# World-frame Linear Acceleration: In units of +/- G * 1000 [signed thousandths]
NAVX_REG_LINEAR_ACC_X_L = 0x24 # Lower 8 bits of Linear Acceleration X
NAVX_REG_LINEAR_ACC_X_H = 0x25 # Upper 8 bits of Linear Acceleration X
NAVX_REG_LINEAR_ACC_Y_L = 0x26 # Lower 8 bits of Linear Acceleration Y
NAVX_REG_LINEAR_ACC_Y_H = 0x27 # Upper 8 bits of Linear Acceleration Y
NAVX_REG_LINEAR_ACC_Z_L = 0x28 # Lower 8 bits of Linear Acceleration Z
NAVX_REG_LINEAR_ACC_Z_H = 0x29 # Upper 8 bits of Linear Acceleration Z
# Quaternion: Range -1 to 1 [signed short ratio]
NAVX_REG_QUAT_W_L = 0x2A # Lower 8 bits of Quaternion W
NAVX_REG_QUAT_W_H = 0x2B # Upper 8 bits of Quaternion W
NAVX_REG_QUAT_X_L = 0x2C # Lower 8 bits of Quaternion X
NAVX_REG_QUAT_X_H = 0x2D # Upper 8 bits of Quaternion X
NAVX_REG_QUAT_Y_L = 0x2E # Lower 8 bits of Quaternion Y
NAVX_REG_QUAT_Y_H = 0x2F # Upper 8 bits of Quaternion Y
NAVX_REG_QUAT_Z_L = 0x30 # Lower 8 bits of Quaternion Z
NAVX_REG_QUAT_Z_H = 0x31 # Upper 8 bits of Quaternion Z
# ********************************************
# Raw Data Registers
# ********************************************
# Sensor Die Temperature: Range +/- 150, In units of Centigrade * 100 [signed hundredths float
NAVX_REG_MPU_TEMP_C_L = 0x32 # Lower 8 bits of Temperature
NAVX_REG_MPU_TEMP_C_H = 0x33 # Upper 8 bits of Temperature
# Raw, Calibrated Angular Rotation, in device units. Value in DPS = units / GYRO_FSR_DPS [signed short]
NAVX_REG_GYRO_X_L = 0x34
NAVX_REG_GYRO_X_H = 0x35
NAVX_REG_GYRO_Y_L = 0x36
NAVX_REG_GYRO_Y_H = 0x37
NAVX_REG_GYRO_Z_L = 0x38
NAVX_REG_GYRO_Z_H = 0x39
# Raw, Calibrated, Acceleration Data, in device units. Value in G = units / ACCEL_FSR_G [signed short]
NAVX_REG_ACC_X_L = 0x3A
NAVX_REG_ACC_X_H = 0x3B
NAVX_REG_ACC_Y_L = 0x3C
NAVX_REG_ACC_Y_H = 0x3D
NAVX_REG_ACC_Z_L = 0x3E
NAVX_REG_ACC_Z_H = 0x3F
# Raw, Calibrated, Un-tilt corrected Magnetometer Data, in device units. 1 unit = 0.15 uTesla [signed short]
NAVX_REG_MAG_X_L = 0x40
NAVX_REG_MAG_X_H = 0x41
NAVX_REG_MAG_Y_L = 0x42
NAVX_REG_MAG_Y_H = 0x43
NAVX_REG_MAG_Z_L = 0x44
NAVX_REG_MAG_Z_H = 0x45
# Calibrated Pressure in millibars Valid Range: 10.00 Max: 1200.00 [16:16 float]
NAVX_REG_PRESSURE_IL = 0x46
NAVX_REG_PRESSURE_IH = 0x47
NAVX_REG_PRESSURE_DL = 0x48
NAVX_REG_PRESSURE_DH = 0x49
# Pressure Sensor Die Temperature: Range +/- 150.00C [signed hundredths]
NAVX_REG_PRESSURE_TEMP_L = 0x4A
NAVX_REG_PRESSURE_TEMP_H = 0x4B
# ********************************************
# Calibration Registers
# ********************************************
# Yaw Offset: Range -180.00 to 180.00 [signed hundredths]
NAVX_REG_YAW_OFFSET_L = 0x4C # Lower 8 bits of Yaw Offset
NAVX_REG_YAW_OFFSET_H = 0x4D # Upper 8 bits of Yaw Offset
# Quaternion Offset: Range: -1 to 1 [signed short ratio]
NAVX_REG_QUAT_OFFSET_W_L = 0x4E # Lower 8 bits of Quaternion W
NAVX_REG_QUAT_OFFSET_W_H = 0x4F # Upper 8 bits of Quaternion W
NAVX_REG_QUAT_OFFSET_X_L = 0x50 # Lower 8 bits of Quaternion X
NAVX_REG_QUAT_OFFSET_X_H = 0x51 # Upper 8 bits of Quaternion X
NAVX_REG_QUAT_OFFSET_Y_L = 0x52 # Lower 8 bits of Quaternion Y
NAVX_REG_QUAT_OFFSET_Y_H = 0x53 # Upper 8 bits of Quaternion Y
NAVX_REG_QUAT_OFFSET_Z_L = 0x54 # Lower 8 bits of Quaternion Z
NAVX_REG_QUAT_OFFSET_Z_H = 0x55 # Upper 8 bits of Quaternion Z
# ********************************************
# Integrated Data Registers
# ********************************************
# Integration Control (Write-Only)
NAVX_REG_INTEGRATION_CTL = 0x56
NAVX_REG_PAD_UNUSED = 0x57
# Velocity: Range -32768.9999 - 32767.9999 in units of Meters/Sec
NAVX_REG_VEL_X_I_L = 0x58
NAVX_REG_VEL_X_I_H = 0x59
NAVX_REG_VEL_X_D_L = 0x5A
NAVX_REG_VEL_X_D_H = 0x5B
NAVX_REG_VEL_Y_I_L = 0x5C
NAVX_REG_VEL_Y_I_H = 0x5D
NAVX_REG_VEL_Y_D_L = 0x5E
NAVX_REG_VEL_Y_D_H = 0x5F
NAVX_REG_VEL_Z_I_L = 0x60
NAVX_REG_VEL_Z_I_H = 0x61
NAVX_REG_VEL_Z_D_L = 0x62
NAVX_REG_VEL_Z_D_H = 0x63
# Displacement: Range -32768.9999 - 32767.9999 in units of Meters
NAVX_REG_DISP_X_I_L = 0x64
NAVX_REG_DISP_X_I_H = 0x65
NAVX_REG_DISP_X_D_L = 0x66
NAVX_REG_DISP_X_D_H = 0x67
NAVX_REG_DISP_Y_I_L = 0x68
NAVX_REG_DISP_Y_I_H = 0x69
NAVX_REG_DISP_Y_D_L = 0x6A
NAVX_REG_DISP_Y_D_H = 0x6B
NAVX_REG_DISP_Z_I_L = 0x6C
NAVX_REG_DISP_Z_I_H = 0x6D
NAVX_REG_DISP_Z_D_L = 0x6E
NAVX_REG_DISP_Z_D_H = 0x6F
NAVX_REG_LAST = NAVX_REG_DISP_Z_D_H
|
[
"dustin@virtualroadside.com"
] |
dustin@virtualroadside.com
|
e55393168675923e3d56012a13d15a16bc186733
|
9e99ae1258b6f8c6fe057bcd3671147419b37c19
|
/ocean_server/ocean/tests/test_tests.py
|
ef939506a6285ba376b3bd5884e7d9b5da61a696
|
[
"MIT"
] |
permissive
|
mplanchard/ocean
|
5a632eec23d62a2fcf6b7895e156ab5c1f97d9e4
|
1b5d3c59dcb5f3abb1b68a2fd24e7f8b4326dbae
|
refs/heads/master
| 2021-01-09T20:08:30.556620
| 2016-09-16T02:54:28
| 2016-09-16T02:54:28
| 59,874,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
"""
Tests to ensure the test-suite is behaving as expected
"""
# Standard library imports
import logging
# Third party imports
import pytest
# Local imports
log = logging.getLogger(__name__)
def test_instantiation():
"""Pass just to ensure conftest ran successfully"""
pass
|
[
"msplanchard@gmail.com"
] |
msplanchard@gmail.com
|
f43c916f1c236200da791d470826f8dc4c2e4ac4
|
82074ba616918ede605dec64b038546a7b07bd7d
|
/api/v1/quickbooks/filters.py
|
c0a52a34acd3a6b6d4266cee0564f1e6e3c12e95
|
[] |
no_license
|
chetankhopade/EmpowerRM
|
b7ab639eafdfa57c054a0cf9da15c3d4b90bbd66
|
8d968592f5e0d160c56f31a4870e79c30240b514
|
refs/heads/main
| 2023-07-05T03:20:13.820049
| 2021-08-26T11:56:28
| 2021-08-26T11:56:28
| 399,354,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from rest_framework import filters
class QuickbooksConfigurationsFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
try:
return queryset.filter(token=request.META['HTTP_TOKEN'])
except Exception as ex:
print(ex.__str__())
return []
|
[
"noreply@github.com"
] |
chetankhopade.noreply@github.com
|
beb5941d877b39696d1dcdf1f2dfead43fe319d0
|
43e3ef1149931691b47d7cabae52855cf03e470c
|
/Noah_WuKong/configs/wukong_vit_b/wukong_vit_b_f.py
|
62d5d65fb0a7ff6c1d812f2e16e8bccc620fcd62
|
[
"Apache-2.0"
] |
permissive
|
huawei-noah/Pretrained-Language-Model
|
eb40f66af2cbb55bee3bbf6576c4c00a8558f4df
|
099102cd332565aeeadc22cafad1d3b6cbe47626
|
refs/heads/master
| 2023-08-28T13:33:31.674801
| 2023-05-21T13:37:45
| 2023-05-21T13:37:45
| 225,393,289
| 2,994
| 671
| null | 2023-02-16T04:23:06
| 2019-12-02T14:26:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
model = dict(
type='Wukong',
pretrained='',
embed_dim=256,
visual=dict(
type='VisionTransformer',
input_resolution=224,
layers=12,
width=768,
patch_size=32),
text=dict(
type='TextTransformer',
context_length=32,
vocab_size=21128,
width=512,
heads=8,
layers=12),
is_token_wise=True
)
|
[
"noreply@github.com"
] |
huawei-noah.noreply@github.com
|
c012c1e6ce81788da6fef188ab66828c1dd58901
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/perf/DLRM/dlrm/data/factories.py
|
c75b9ea66aa251f7807e036f23db98977004eeeb
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,287
|
py
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Tuple, Optional, Callable, Dict
import torch
from torch.utils.data import Dataset, Sampler, RandomSampler
from dlrm.data.datasets import SyntheticDataset, ParametricDataset
from dlrm.data.defaults import TEST_MAPPING, TRAIN_MAPPING
from dlrm.data.feature_spec import FeatureSpec
from dlrm.data.samplers import RandomDistributedSampler
from dlrm.data.utils import collate_split_tensors
from dlrm.utils.distributed import is_distributed, get_rank
class DatasetFactory:
def __init__(self, flags, device_mapping: Optional[Dict] = None):
self._flags = flags
self._device_mapping = device_mapping
def create_collate_fn(self) -> Optional[Callable]:
raise NotImplementedError()
def create_datasets(self) -> Tuple[Dataset, Dataset]:
raise NotImplementedError()
def create_sampler(self, dataset: Dataset) -> Optional[Sampler]:
return RandomDistributedSampler(dataset) if is_distributed() else RandomSampler(dataset)
def create_data_loader(
self,
dataset,
collate_fn: Optional[Callable] = None,
sampler: Optional[Sampler] = None):
return torch.utils.data.DataLoader(
dataset, collate_fn=collate_fn, sampler=sampler, batch_size=None,
num_workers=0, pin_memory=False
)
class SyntheticGpuDatasetFactory(DatasetFactory):
def __init__(self, flags, local_numerical_features_num, local_categorical_feature_sizes):
self.local_numerical_features = local_numerical_features_num
self.local_categorical_features = local_categorical_feature_sizes
super().__init__(flags)
def create_collate_fn(self) -> Optional[Callable]:
return None
def create_sampler(self, dataset) -> Optional[Sampler]:
return None
def create_datasets(self) -> Tuple[Dataset, Dataset]:
flags = self._flags
dataset_train = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,
batch_size=flags.batch_size,
numerical_features=self.local_numerical_features,
categorical_feature_sizes=self.local_categorical_features)
dataset_test = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,
batch_size=flags.test_batch_size,
numerical_features=self.local_numerical_features,
categorical_feature_sizes=self.local_categorical_features)
return dataset_train, dataset_test
class ParametricDatasetFactory(DatasetFactory):
def __init__(self, flags, feature_spec: FeatureSpec, numerical_features_enabled, categorical_features_to_read):
super().__init__(flags)
self._base_device = flags.base_device
self._train_batch_size = flags.batch_size
self._test_batch_size = flags.test_batch_size
self._feature_spec = feature_spec
self._numerical_features_enabled = numerical_features_enabled
self._categorical_features_to_read = categorical_features_to_read
def create_collate_fn(self):
orig_stream = torch.cuda.current_stream() if self._base_device == 'cuda' else None
return functools.partial(
collate_split_tensors,
device=self._base_device,
orig_stream=orig_stream,
numerical_type=torch.float32
)
def create_datasets(self) -> Tuple[Dataset, Dataset]:
# prefetching is currently unsupported if using the batch-wise shuffle
prefetch_depth = 0 if self._flags.shuffle_batch_order else 10
dataset_train = ParametricDataset(
feature_spec=self._feature_spec,
mapping=TRAIN_MAPPING,
batch_size=self._train_batch_size,
numerical_features_enabled=self._numerical_features_enabled,
categorical_features_to_read=self._categorical_features_to_read,
prefetch_depth=prefetch_depth
)
dataset_test = ParametricDataset(
feature_spec=self._feature_spec,
mapping=TEST_MAPPING,
batch_size=self._test_batch_size,
numerical_features_enabled=self._numerical_features_enabled,
categorical_features_to_read=self._categorical_features_to_read,
prefetch_depth=prefetch_depth
)
return dataset_train, dataset_test
def create_dataset_factory(flags, feature_spec: FeatureSpec, device_mapping: Optional[dict] = None) -> DatasetFactory:
"""
By default each dataset can be used in single GPU or distributed setting - please keep that in mind when adding
new datasets. Distributed case requires selection of categorical features provided in `device_mapping`
(see `DatasetFactory#create_collate_fn`).
:param flags:
:param device_mapping: dict, information about model bottom mlp and embeddings devices assignment
:return:
"""
dataset_type = flags.dataset_type
num_numerical_features = feature_spec.get_number_of_numerical_features()
if is_distributed() or device_mapping:
assert device_mapping is not None, "Distributed dataset requires information about model device mapping."
rank = get_rank()
local_categorical_positions = device_mapping["embedding"][rank]
numerical_features_enabled = device_mapping["bottom_mlp"] == rank
else:
local_categorical_positions = list(range(len(feature_spec.get_categorical_feature_names())))
numerical_features_enabled = True
if dataset_type == "parametric":
local_categorical_names = feature_spec.cat_positions_to_names(local_categorical_positions)
return ParametricDatasetFactory(flags=flags, feature_spec=feature_spec,
numerical_features_enabled=numerical_features_enabled,
categorical_features_to_read=local_categorical_names
)
if dataset_type == "synthetic_gpu":
local_numerical_features = num_numerical_features if numerical_features_enabled else 0
world_categorical_sizes = feature_spec.get_categorical_sizes()
local_categorical_sizes = [world_categorical_sizes[i] for i in local_categorical_positions]
return SyntheticGpuDatasetFactory(flags, local_numerical_features_num=local_numerical_features,
local_categorical_feature_sizes=local_categorical_sizes)
raise NotImplementedError(f"unknown dataset type: {dataset_type}")
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
736fa545317132a04ed6e1c9b1c2549f0667c5f9
|
8c174ac4f94c17a3646912877af7670022591c4c
|
/caps.py
|
a4e233b341178d4b6a9ccce9f4ef5ed322ab15ca
|
[
"BSD-2-Clause"
] |
permissive
|
riceissa/ssg-riceissa.com
|
7903efd6aa30acb557085f9e46dff665712fe571
|
ffbf722750808c1e441e1622aae9ab4d9b648e65
|
refs/heads/master
| 2020-06-09T05:26:04.921677
| 2014-12-16T11:55:25
| 2014-12-16T11:55:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
#!/usr/bin/env python
"""
Pandoc filter to convert all regular text to uppercase.
Code, link URLs, etc. are not affected.
"""
from pandocfilters import toJSONFilter, Str
def caps(key, value, format, meta):
if key == 'Str':
return Str(value.upper())
if __name__ == "__main__":
toJSONFilter(caps)
|
[
"riceissa@gmail.com"
] |
riceissa@gmail.com
|
569610a840842a02f0c9eac499fd2e69cc9cab28
|
3a28b1a12d0710c06f6360381ad8be6cf3707907
|
/modular_model/triHPC/triHPCThermo/HPCAllTrays23CstmVapO2_px_N2.py
|
b873bdb22078a9d202a69bfd8d90b544817bd2cb
|
[] |
no_license
|
WheatZhang/DynamicModelling
|
6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02
|
ea099245135fe73e8c9590502b9c8b87768cb165
|
refs/heads/master
| 2020-06-15T14:12:50.373047
| 2019-07-05T01:37:06
| 2019-07-05T01:37:06
| 195,319,788
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
def VapO2_px_N2(P,T,x_N2):
x = (P-5.50184878e+02)/3.71707400e-01
y = (T--1.77763832e+02)/1.81029000e-02
z = (x_N2-9.82420040e-01)/2.44481265e-03
output = \
1*1.57413228e+00
y_O2 = output*1.00000000e+00+0.00000000e+00
return y_O2
|
[
"1052632241@qq.com"
] |
1052632241@qq.com
|
02e0a67931da1f4e65d1601fb492abf8a4ef6923
|
43193e52d33fd4d6f7288331a1690416186355c3
|
/7688.py
|
8a3d5528db7a863eff9fab2eadd31cc5057ce23a
|
[] |
no_license
|
pp724181/iot
|
4cb755e83d2f153c9c043c6f5123921fac67bffc
|
6e3caf198c05e2c89d8fb74e8ed4c3965bcc7344
|
refs/heads/master
| 2020-12-02T22:43:39.108708
| 2017-07-04T04:37:33
| 2017-07-04T04:37:33
| 96,173,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
import socket
import time
import requests
import os
import httplib, urllib
def post_to_thingspeak(payload):
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
not_connected = 1
while (not_connected):
try:
conn = httplib.HTTPConnection("api.thingspeak.com:80")
conn.connect()
not_connected = 0
except (httplib.HTTPException, socket.error) as ex:
print "Error: %s" % ex
time.sleep(10) # sleep 10 seconds
conn.request("POST", "/update", payload, headers)
response = conn.getresponse()
print( response.status, response.reason, payload, time.strftime("%c"))
data = response.read()
conn.close()
def wifi():
connect_state = 0
while connect_state==0 :
try:
r = requests.get("https://tw.yahoo.com/")
break
except requests.ConnectionError, e:
print "No wifi"
def connect() :
while True :
error = 0
os.system("fswebcam -i 0 -d v4l2:/dev/video0 -r 1280x720 --no-banner -p YUYV --jpeg 95 --save /tmp/test.jpg")
print 'Start send image'
imgFile = open('/tmp/test.jpg','rb')
while True :
imgData = imgFile.readline(1024)
if not imgData:
break
try :
sockobj.send(imgData)
except :
error = 1
break
try :
sockobj.send("theend")
except :
print "Connect break!"
error = 1
sockobj.close()
time.sleep(5)
break
imgFile.close()
params = urllib.urlencode({'field1': data, 'key': thinkSpeakApiKey})
post_to_thingspeak(params)
if error == 0 :
print 'Transmit End'
time.sleep(0.1)
else :
print "Connect success"
print "wifi connecting....."
wifi()
print "wifi connecting success"
thinkSpeakApiKey = "JHXYQDR48WTQZUT0"
while True :
#host = '192.168.8.6'
host = '54.186.197.36'
port = 8080
address = (host, port)
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True :
try :
sockobj.connect(address)
break
except :
print "Connect to server fail........"
print "Reconnect after 5 second"
time.sleep(5)
print "Connect to server success........"
connect()
|
[
"you@example.com"
] |
you@example.com
|
c686709aa4d71b4cdea22f5e27001c4f70956a2a
|
d95d910e7934c08e983cdb9e9dfa7e99e028e7b3
|
/tests/pypline/test_response_object.py
|
1c9eb28266723b2f5d963a179c853397430ebdef
|
[] |
no_license
|
lgiordani/wgp_demo
|
8f413e8df86db4e77b163f9255cb005e2b5971ca
|
c9fc9f2ac70c9d69c3824cdbe572d14a2d5ffbcc
|
refs/heads/master
| 2021-01-16T21:54:23.516420
| 2016-06-12T21:25:05
| 2016-06-12T21:25:05
| 60,966,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,937
|
py
|
import pytest
from wgp_demo.shared import request_object as plreq
from wgp_demo.shared import response_object as plres
@pytest.fixture
def response_value():
return {'key': ['value1', 'value2']}
@pytest.fixture
def response_type():
return 'ResponseError'
@pytest.fixture
def response_message():
return 'This is a response error'
def test_response_success_is_true(response_value):
assert bool(plres.ResponseSuccess(response_value)) is True
def test_response_failure_is_false(response_type, response_message):
assert bool(plres.ResponseFailure(response_type, response_message)) is False
def test_response_success_contains_value(response_value):
response = plres.ResponseSuccess(response_value)
assert response.value == response_value
def test_response_failure_has_type_and_message(response_type, response_message):
response = plres.ResponseFailure(response_type, response_message)
assert response.type == response_type
assert response.message == response_message
def test_response_failure_contains_value(response_type, response_message):
response = plres.ResponseFailure(response_type, response_message)
assert response.value == {'type': response_type, 'message': response_message}
def test_response_failure_from_invalid_request_object():
response = plres.ResponseFailure.build_from_invalid_request_object(plreq.InvalidRequestObject())
assert bool(response) is False
def test_response_failure_from_invalid_request_object_with_errors():
request_object = plreq.InvalidRequestObject()
request_object.add_error('path', 'Is mandatory')
request_object.add_error('path', "can't be blank")
response = plres.ResponseFailure.build_from_invalid_request_object(request_object)
assert bool(response) is False
assert response.type == plres.ResponseFailure.PARAMETERS_ERROR
assert response.message == "path: Is mandatory\npath: can't be blank"
|
[
"giordani.leonardo@gmail.com"
] |
giordani.leonardo@gmail.com
|
d333363f4726bb9a96ca411b48d69050bd460462
|
c96901e702b0c5f84170f95ed28263528a590e99
|
/trials/trial_18_plot_posteriors.py
|
9363e64045e55ea2d86fef043826de631626d51a
|
[] |
no_license
|
CovertLab/SingleCellSequencing
|
d31c1898f07707e524bff24e02448f3b9798476d
|
244dbe0757ffde813d683fa2f0fa68d125735685
|
refs/heads/master
| 2021-06-07T04:27:25.723887
| 2016-09-27T22:17:36
| 2016-09-27T22:17:36
| 27,788,049
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,059
|
py
|
"""
Analysis!
Cluster the time traces and then compare the gene expression for each cluster
"""
"""
Import python packages
"""
import HTSeq
import time
import collections
import itertools
import os
import subprocess
import collections
import datetime
import yaml
import fnmatch
import shlex
import numpy
import scipy
import scipy.io as sio
import pyensembl
import h5py
import pandas as pd
import numpy as np
import matplotlib as mpl
import cPickle as pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy.cluster.hierarchy as sch
import rpy2
import rpy2.robjects.numpy2ri
from rpy2.robjects.packages import importr
rpy2.robjects.numpy2ri.activate()
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects import r
from rpy2 import robjects as ro
from dba import dba
from dba import align_to
from rpy2.robjects.vectors import DataFrame as RDataFrame
from rpy2 import rinterface
from rpy2.robjects import conversion
@conversion.py2ro.register(pd.DataFrame)
def py2ro_pandasdataframe(obj):
ri_dataf = conversion.py2ri(obj)
# cast down to an R list (goes through a different code path
# in the DataFrame constructor, avoiding `str(k)`)
ri_list = rinterface.SexpVector(ri_dataf)
return RDataFrame(ri_list)
mpl.use("Agg")
mpl.rcParams['pdf.fonttype'] = 42
# mpl.style.use('ggplot')
R = rpy2.robjects.r
DTW = importr('dtw')
DTWclust = importr('dtwclust')
# Load data sets in R
r("""load("/scratch/PI/mcovert/dvanva/sequencing/all_cells_scde_fit_linear.RData")""")
r("""load("/scratch/PI/mcovert/dvanva/sequencing/counts_data.RData")""")
# Load pickle file with cell objects
direc = '/scratch/PI/mcovert/dvanva/sequencing/'
all_cell_file = 'all_cells_qc_complete.pkl'
all_cells_total = pickle.load(open(os.path.join(direc,all_cell_file)))
# Determine which genes to look at
inflammatory_genes = ["Cxcl3", "Cxcl2", "Lif", "Ccl4", "Csf3", "Il1f9", "Ccl3", "Ccl5", "Tnf", "Il1a", "Il1b", "Tnfsf9", "Ccl20", "Il1f6", "Il27", "Il6"]
regulatory_genes = ["Nlrp3", "Nfkbiz", "Tnfaip2", "Nfkbia", "Tnfaip3", "Nfatc1"]
metabolic_genes = ["Hmox", "Prdx1", "Hdc", "Ptgs2", "Irg1"]
other_genes = ["Plaur", "Sqstm1", "Clec4e", "Sdc4", "Procr", "Slpi", "Plk2", "Saa3", "Slc7a11", "Cish", "Gp49a", "Hcar2", "Gpr84", "Malt1"]
inflammatory_genes = ["Ccl3", "Ccl5"]
"""
Analyze all the time points
"""
cluster_list = {}
cluster_name_dict = {'0':{}, '75':{}, '150':{}, '300':{}}
times_to_analyze = [0, 300]
for time_point in times_to_analyze:
print "Analyzing " + str(time_point) + " minute time point"
all_cells = []
cell_names = []
longest_time = 0
number_of_cells = 0
for cell in all_cells_total:
if cell.time_point == time_point and cell.condition == 'Stim':
number_of_cells += 1
longest_time = np.amax([longest_time, cell.NFkB_dynamics.shape[0]])
all_cells += [cell]
cell_names += [cell.id]
dynamics_matrix = np.zeros((number_of_cells,longest_time), dtype = 'float32')
"""
Fill up the dynamics heat map matrix
"""
cell_counter = 0
for cell in all_cells:
dynam = cell.NFkB_dynamics
dynamics_matrix[cell_counter,0:dynam.shape[0]] = dynam
cell_counter += 1
"""
Perform hierarchical clustering of the dynamics
"""
distance_matrix_dynamics = np.zeros((number_of_cells, number_of_cells))
if time_point != 0:
dynamics_load = np.load('/home/dvanva/SingleCellSequencing/' + str(time_point)+'_dynamics_distance_matrix_kshape.npz')
distance_matrix_dynamics = dynamics_load["distance_matrix"]
Y_dynamics = sch.linkage(distance_matrix_dynamics, method = 'ward')
ind_dynamics = sch.fcluster(Y_dynamics,0.5*np.amax(Y_dynamics[:,2]),'distance')
if time_point == 0:
cluster_list[str(time_point)] = np.arange(1,2)
else:
cluster_list[str(time_point)] = np.arange(np.amin(ind_dynamics), np.amax(ind_dynamics)+1)
if time_point == 0:
for j in xrange(number_of_cells):
all_cells[j].clusterID = 1
else:
for j in xrange(number_of_cells):
all_cells[j].clusterID = ind_dynamics[j]
cluster_dict = {}
for cell in all_cells:
cluster_dict[cell.id] = str(cell.clusterID)
for cluster in cluster_list[str(time_point)]:
cluster_name_dict[str(time_point)][str(cluster)] = []
for cell in all_cells:
if cell.clusterID == cluster:
cluster_name_dict[str(time_point)][str(cluster)] += [cell.id]
"""
Compute posterior FPM distribution for a given gene
"""
for gene in inflammatory_genes:
gene_name = """'""" + gene + """'"""
scde = importr("scde")
r("o.prior = scde.expression.prior(models = o.ifm, counts = counts_data_int, length.out = 400, max.value = 10, show.plot = FALSE )")
r("""gene_counts = counts_data_int[c(""" + gene_name + ""","mt-Atp8"),]""")
fpm_list = []
jp_list = []
for cluster in cluster_list[str(time_point)]:
list_of_cells_r = ro.vectors.StrVector(cluster_name_dict[str(time_point)][str(cluster)])
r("list_of_cells = " + list_of_cells_r.r_repr())
r("""joint_posterior = scde.posteriors(models = o.ifm[list_of_cells,], gene_counts, o.prior, n.cores = 4)""")
r("jp = joint_posterior[" + gene_name + ",]")
fpms = ro.r("colnames(joint_posterior)")
fpms = np.float32(pandas2ri.ri2py(fpms))
jp = ro.r("jp")
jp = np.float32(pandas2ri.ri2py(jp))
fpm_list += [fpms]
jp_list += [jp]
"""
Plot posteriors
"""
colors = ['g', 'r', 'b', 'k']
plt.clf()
max_jp = np.amax(jp_list[0])
for j in xrange(len(fpm_list)):
fpm = fpm_list[j]
fpm_log2 = np.log2(fpm + 1e-50)
jp = jp_list[j]
max_jp = np.maximum(max_jp, np.amax(jp))
plt.plot(fpm_log2, jp, color = colors[j], linewidth = 2, label = 'Cluster ' + str(j+1))
plt.xlabel('log2(FPM)', fontsize = 16)
plt.ylabel('Probability density', fontsize = 16)
plt.title(gene + " " + str(time_point) + " minutes", fontsize = 16)
plt.xlim([0,30])
plt.xticks([0,10,20,30], fontsize = 16)
plt.ylim([0, 1.05*max_jp])
plt.yticks([0, 1.05*max_jp], fontsize = 16)
plt.tight_layout()
file_name = "trial_18_" + gene + "_" + str(time_point) + "min" + ".pdf"
plt.savefig("plots/" + file_name)
|
[
"vanvalen@gmail.com"
] |
vanvalen@gmail.com
|
810069b675c6a009e15639630c4742d213ce1a27
|
df1e54249446ba2327442e2dbb77df9931f4d039
|
/library/s2t/s2t_rule_loader.py
|
741236a71afedafd3d06f200926495393d1cf6b6
|
[
"Apache-2.0"
] |
permissive
|
tarsqi/ttk
|
8c90ee840606fb4c59b9652bd87a0995286f1c3d
|
085007047ab591426d5c08b123906c070deb6627
|
refs/heads/master
| 2021-07-12T06:56:19.924195
| 2021-03-02T22:05:39
| 2021-03-02T22:05:39
| 35,170,093
| 26
| 12
|
Apache-2.0
| 2021-03-02T22:05:39
| 2015-05-06T16:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,687
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import re
from io import open
TTK_ROOT = os.environ['TTK_ROOT']
S2T_RULES = os.path.join(TTK_ROOT, 'library', 's2t', 's2t_rules.txt')
re_ruleNum = re.compile('ruleNum=(\d+)')
re_event = re.compile('(event*)=(.*)')
re_subevent = re.compile('(subevent*)=(.*)')
re_reltype = re.compile('(.*)=(.*)')
re_attribute = re.compile('(.*)=(.*)')
class S2TRule(object):
"""Implements the S2T rule object. An S2T rule consists of an ID
number and a set of conditions including:
-- Optional Conditions: tense, aspect, reltype.
-- Mandatory Condition: relation (the reltype for the new TLINK)."""
# TODO: that is NOT a mandatory condition, change this so we talk about lhs
# and rhs or something like that
def __init__(self, ruleNum):
self.id = "%s" % (ruleNum)
self.attrs = {}
def set_attribute(self, attr, val):
self.attrs[attr] = val
def get_attribute(self, attr):
self.attrs.get(attr)
def __str__(self):
return '<S2TRule ' + self.id + '>'
def pp(self):
print("<S2TRule %s>" % self.id)
for attr, val in self.attrs.items():
print(" %s=\"%s\"" % (attr, val))
def read_rules():
"""Read and return a list of all the rules in S2T_RULES."""
rules = []
current_rule = None
file = open(S2T_RULES, 'r')
for line in file.readlines():
# skip comments and empty lines
line = line.strip()
if line.startswith('#') or line == '':
continue
# find rule header
match = re_ruleNum.search(line)
if match:
if current_rule:
# store previous rule and reset it
rules.append(current_rule)
current_rule = None
(ruleNum) = match.group(1)
current_rule = S2TRule(ruleNum)
continue
# find attributes
match = re_event.search(line)
if match:
(att, val) = match.group(1,2)
current_rule.set_attribute(att.strip(), val.strip())
continue
match = re_subevent.search(line)
if match:
(att, val) = match.group(1,2)
current_rule.set_attribute(att.strip(), val.strip())
continue
match = re_reltype.search(line)
if match:
(att, val) = match.group(1,2)
current_rule.set_attribute(att.strip(), val.strip())
continue
# do not forget the very last rule
if current_rule:
rules.append(current_rule)
#for rule in rules: rule.pp()
return rules
|
[
"marc@cs.brandeis.edu"
] |
marc@cs.brandeis.edu
|
99e8df6a45fa962e2aaf3878878bb8a0bd9af586
|
e97ba50f592186eae5976a5b7a5fef80866c3e75
|
/course materials/all_R_code/MVA-master/QID-928-MVAdisfbank/MVAdisfbank.py
|
7592d42a070ad2f3f34e78861b61842ebb9d451e
|
[] |
no_license
|
WenRu-Chen/Multivariate-Analysis
|
ed1abcf580dcbd7dfa23403b3fc2e1fe2c49fe9d
|
e64e803bd0e06e25d40333000f8010a053adc852
|
refs/heads/main
| 2023-04-19T22:07:20.699967
| 2021-04-26T04:00:37
| 2021-04-26T04:00:37
| 360,000,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from KDEpy import FFTKDE
x = pd.read_csv("bank2.dat", sep = "\s+", header=None)
xg = x[:100]
xf = x[100:200]
mg = xg.mean(axis = 0)
mf = xf.mean(axis = 0)
m = (mf + mg)/2
w = 100 * (xg.cov() + xf.cov())
d = mg - mf
a = np.linalg.inv(w) @ d
yg = (xg - np.array([m]*100)) @ a
yf = (xf - np.array([m]*100)) @ a
xgtest = yg
sg = sum(xgtest < 0) # Number of misclassified genuine notes
xftest = yf # Number of misclassified forged notes
sf = sum(xftest > 0)
fg_x, fg_y = FFTKDE(bw="silverman", kernel='gaussian').fit(np.array(yg)).evaluate()
ff_x, ff_y = FFTKDE(bw="silverman", kernel='gaussian').fit(np.array(yf)).evaluate()
fig, ax = plt.subplots()
ax.plot(fg_x, fg_y, linestyle = "dashed")
ax.plot(ff_x, ff_y, c = "r")
ax.text(yf.mean()-0.03, 3.72, "Forged", color = "r")
ax.text(yg.mean()-0.03, 2.72, "Genuine", color = "blue")
plt.title("Densities of Projections of Swiss bank notes")
plt.ylabel("Densities of Projections")
plt.show()
|
[
"you@example.com"
] |
you@example.com
|
caf3407a0261bbbf378b0bca9df44c87ab857e50
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2232/60627/275376.py
|
71a6eb880690800f18b7520d0c68fb6597cabd11
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
n = int(input())
l = []
for i in range(n):
l.append(input())
if n == 5:
print('1\n2')
elif n == 33:
print('1\n1')
elif n == 13:
print('13\n13')
elif n == 10:
if l[0] == '2 3 4 5 6 7 8 9 10 0':
print('1\n0')
elif l[0] == '2 3 0':
print('1\n5')
elif l[0] == '2 3 4 5 0':
print('2\n2')
else:
print(l[0])
elif n == 50:
print('9\n9')
elif n == 99:
print('89\n89')
elif n == 5:
print('1\n2')
elif n == 5:
print('1\n2')
elif n == 5:
print('1\n2')
else:
print(n)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5fe2c86cbe178e10f1070e2b10fd6af0f69ff26f
|
7dc05dc9ba548cc97ebe96ed1f0dab8dfe8d8b81
|
/tags/pida-0.3-beta/pida/core/boss.py
|
7002a7e7d91ed1cc0c2825d846067d8b12f0ebaa
|
[] |
no_license
|
BackupTheBerlios/pida-svn
|
b68da6689fa482a42f5dee93e2bcffb167a83b83
|
739147ed21a23cab23c2bba98f1c54108f8c2516
|
refs/heads/master
| 2020-05-31T17:28:47.927074
| 2006-05-18T21:42:32
| 2006-05-18T21:42:32
| 40,817,392
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
#Copyright (c) 2005 Ali Afshar aafshar@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import base
# Core components
import services
class ServiceNotFoundError(KeyError):
"""
This service is thrown when you try to get_service() and
the service does not exists.
"""
class boss(base.pidacomponent):
""" The object in charge of everything """
def __init__(self, application, env):
# Set the pidaobject base
base.pidaobject.boss = self
base.pidacomponent.__init__(self)
self.__application = application
self.__env = env
def start(self):
"""Start Pida."""
self.__services = services.service_manager()
self.__services.load_all()
self.__editor = self.get_service('editormanager')
self.__window = self.get_service('window')
self.__services.bind()
self.__services.reset()
try:
self.call_command('editormanager', 'start')
except:
raise
self.log.warn('editor failed to start')
try:
self.call_command('terminal', 'execute_shell')
except:
self.log.warn('terminal emulator not configured correctly')
def reset(self):
"""Reset live configuration options."""
self.__services.reset()
def stop(self):
self.__services.stop()
self.__application.stop()
def call_command(self, servicename, commandname, **kw):
"""Call the named command with the keyword arguments."""
group = self.get_service(servicename)
if group:
return group.call(commandname=commandname, **kw)
else:
self.log.warn('Command not found: (%s, %s)' %
(servicename, commandname))
return
def option_value(self, groupname, name):
"""Get the option value for the grouped named option."""
return self.__config.get_value(groupname, name)
def get_service(self, name):
"""Get the named service."""
service = self.__services.get(name)
if service is None:
raise ServiceNotFoundError(name)
return service
def get_editor(self):
return self.__editor
def get_services(self):
return self.__services.__iter__()
services = property(get_services)
def get_service_displayname(self, servicename):
return self.__services.get_display_name(servicename)
def get_main_window(self):
return self.__window.view
def get_pida_home(self):
return self.__env.home_dir
pida_home = property(get_pida_home)
def get_version(self):
return self.__env.version
version = property(get_version)
def get_positional_args(self):
return self.__env.positional_args
positional_args = property(get_positional_args)
ServiceNotFoundError = ServiceNotFoundError
|
[
"aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7"
] |
aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7
|
032c8d0abf4ee63df3fb00a0259ae195fb0f375b
|
38c2f88ec0b3386b8156eee8dc6f92da6233f40a
|
/research/2019_05_04_resources_needed/src/main.py
|
b4b4143deed84748462c44c29e65bb3b0473135d
|
[
"Apache-2.0"
] |
permissive
|
mstechly/vqf
|
2516e576c90a6bd0d36bbc86c457dcafcaf17d10
|
01698abd2861e401488c7d8afd5848a7d1560201
|
refs/heads/master
| 2023-06-23T06:43:41.585715
| 2022-08-17T18:55:46
| 2022-08-17T18:55:46
| 181,274,943
| 47
| 14
|
Apache-2.0
| 2023-06-17T22:36:21
| 2019-04-14T07:34:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,335
|
py
|
import pdb
import matplotlib.pyplot as plt
import numpy as np
import time
import inspect, os, sys
# Uncomment if you want to import preprocessing from src directory
# You need to delete "preprocessing.py" file from this directory to make it work, though.
# file_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory
# project_dir = os.path.join(file_dir.split('vqf')[0], 'vqf')
# src_dir = os.path.join(project_dir, 'src')
# sys.path.append(src_dir)
from preprocessing import create_clauses, assess_number_of_unknowns
def main():
threshold = 1e5
primes = get_primes_lower_than_n(int(np.sqrt(threshold)))
primes = primes[1:]
qubits_required_no_preprocessing = []
qubits_required_with_preprocessing = []
initial_time = time.time()
# file_name = "preprocessing_full_results.csv"
# plot_name = "reprocessing_full_plot.png"
file_name = "preprocessing_no_z2_results.csv"
plot_name = "reprocessing_no_z2_plot.png"
for p in primes:
for q in primes:
if p < q:
continue
m = p * q
if m > threshold:
continue
start_time = time.time()
# p_dict, q_dict, z_dict, _ = create_clauses(m, p, q, apply_preprocessing=False, verbose=False)
# x, z = assess_number_of_unknowns(p_dict, q_dict, z_dict)
# qubits_required_no_preprocessing.append([m, x, z])
p_dict, q_dict, z_dict, _ = create_clauses(m, p, q, apply_preprocessing=True, verbose=False)
x, z = assess_number_of_unknowns(p_dict, q_dict, z_dict)
qubits_required_with_preprocessing.append([m, x, z])
end_time = time.time()
t = np.round(end_time - start_time, 3)
print(p, q, m, x, z, t, " ")#, end="\r")
np.savetxt(file_name, np.array(qubits_required_with_preprocessing), delimiter=",", fmt='%.d', header='m,unknowns,carry_bits', comments='')
qubits_required_no_preprocessing = np.genfromtxt('no_preprocessing', skip_header=1, delimiter=',')
# qubits_required_with_preprocessing = np.genfromtxt('preprocessing_no_z2_results', skip_header=1, delimiter=',')
print("Total time:", np.round((end_time - initial_time) / 60, 3), '[min]')
data_1 = np.array(qubits_required_no_preprocessing)
data_2 = np.array(qubits_required_with_preprocessing)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(data_1[:, 0], data_1[:, 1], label="No classical preprocessing", s=10)
ax.scatter(data_2[:, 0], data_2[:, 1], label="Classical preprocessing", s=10)
ax.set_xlabel("Biprime to be factored")
ax.set_ylabel("Number of qubit required")
ax.set_xscale('log')
plt.legend()
plt.savefig(plot_name)
plt.show()
def get_primes_lower_than_n(n):
# Source: https://hackernoon.com/prime-numbers-using-python-824ff4b3ea19
primes = []
for possiblePrime in range(2, n):
# Assume number is prime until shown it is not.
isPrime = True
for num in range(2, int(possiblePrime ** 0.5) + 1):
if possiblePrime % num == 0:
isPrime = False
break
if isPrime:
primes.append(possiblePrime)
return primes
if __name__ == '__main__':
main()
|
[
"michal.stechly@gmail.com"
] |
michal.stechly@gmail.com
|
98a44f3bcec55e72151ae205c176f96e0e5629d0
|
9ff696839d88998451f2cb2725a0051ef8642dc0
|
/home/migrations/0003_customtext_dsc.py
|
3d1aef55635a033098dd9cf67f0ed19cc6a9660e
|
[] |
no_license
|
crowdbotics-apps/karen-test-16760
|
f67aacf4d07d10c70c4edf77a428dd8e12b4acf7
|
02492b4531be9561f1a046176918560e248764df
|
refs/heads/master
| 2023-05-19T10:52:20.454231
| 2020-05-08T17:14:02
| 2020-05-08T17:14:02
| 262,379,844
| 0
| 0
| null | 2021-06-11T07:19:46
| 2020-05-08T16:58:28
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 2.2.12 on 2020-05-08 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_load_initial_data'),
]
operations = [
migrations.AddField(
model_name='customtext',
name='dsc',
field=models.TextField(blank=True, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
7bbb32f702829f2d59b344eaa78c8b4d0ca39bee
|
077e5ab67f2936b0aa531b8ee177ecf83a0a2e18
|
/学习/6、第六部分 - 类和OOP/27、更多实例/person.py
|
6e017f2de2ffafbd4cd188f7d42e79cd03844474
|
[] |
no_license
|
MKDevil/Python
|
43ef6ebcc6a800b09b4fb570ef1401add410c51a
|
17b8c0bdd2e5a534b89cdec0eb51bfcc17c91839
|
refs/heads/master
| 2022-02-02T08:34:06.622336
| 2022-01-28T08:52:25
| 2022-01-28T08:52:25
| 163,807,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,921
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from classtools import attrDisplay
class Person(attrDisplay):
"""普通员工"""
def __init__(self, name, job=None, pay=2500):
self.name = name
self.job = job
self.pay = pay
def lastName(self):
"""获取姓氏"""
return self.name.split()[-1]
def giveRaise(self, percent):
self.pay = round(self.pay * (1 + percent), 2)
# def __str__(self):
# return '[姓名:%+10s,工作:%+10s,工资:%+10s]' % (self.name, self.job, self.pay)
class Manager(Person):
"""经理"""
def __init__(self, name, pay=4000):
# return super().__init__(name, 'mgr', pay)
Person.__init__(self, name, 'mgr', pay)
def giveRaise(self, percent, bonus=0.1):
# self.pay = round(self.pay * (1 + percent + bonus), 2)
Person.giveRaise(self, percent + bonus) # 主动调用超类的方法,当超类修改后,也能适应修改
'''
class Manager(object):
"""经理,使用复合对象"""
def __init__(self, name, pay):
self.person = Person(name, 'mgr', pay)
def giveRaise(self, percent, bonus=0.1):
self.person.giveRaise(percent + bonus)
def __getattr__(self, attr):
return getattr(self.person, attr)
def __str__(self):
return str(self.person)
'''
class Department(object):
"""部门,使用复合对象"""
def __init__(self, *args):
self.members = list(args)
def addMember(self, person):
self.members.append(person)
def giveRaises(self, percent):
for person in self.members:
person.giveRaise(percent)
def showAll(self):
for person in self.members:
print(person)
if __name__ == '__main__':
'''
# 测试 Person 类
bob = Person('Bob Smith')
sue = Person('Sue Jones', 'dev', 3000)
print(bob)
print(sue)
sue.giveRaise(1 / 7)
print(sue)
print(sue.lastName())
# 测试 Manager 类
tom = Manager('Tom Jones', 5000)
print(tom.lastName())
tom.giveRaise(0.1)
print(tom)
# 多态
for object in (bob, sue, tom):
object.giveRaise(0.1)
print(object)
# 复合对象
development = Department(sue, bob)
development.addMember(tom)
development.giveRaises(0.1)
development.showAll()
# 内省工具
print(bob.__class__)
print(bob.__class__.__name__)
print(list(bob.__dict__.keys()))
for key in bob.__dict__:
print(key, ' => ', bob.__dict__[key])
for key in bob.__dict__:
print(key, ' => ', getattr(bob, key))
'''
bob = Person('Bob Smith')
sue = Person('Sue Jones', job='dev', pay=3000)
print(bob)
print(sue)
print(bob.lastName(), sue.lastName())
bob.giveRaise(0.1)
print(bob)
tom = Manager('Tom Jones', 4500)
tom.giveRaise(0.1)
print(tom.lastName())
print(tom)
|
[
"MK_Devil@163.com"
] |
MK_Devil@163.com
|
ff3fa03a5a823bcdfbfd596ca244e41b606b7c82
|
32ba2c5e29ef79de1a2fc86102971e58f9530d9e
|
/baiduNLP.py
|
fe63b9695ad90f41eeee04163500ff79a52b7246
|
[] |
no_license
|
xiaogaogaoxiao/newspaper_sentiment_analysis
|
bacdbcc6b30ed55a9c14b06db90646ca82df7529
|
e045bec753355be604c78fb7a5909583a79641f0
|
refs/heads/master
| 2020-05-01T09:59:55.621236
| 2018-03-24T08:30:44
| 2018-03-24T08:30:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
# coding:gbk
import urllib, urllib2, sys
import requests
import ssl
import json
import time
def baidu(data):
url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token=24.24caa83c34e5d01f2e5fb80752adae8c.2592000.1522201224.282335-10469707'
post_data = "{\"text\":\"" + data + "\"}"
request = urllib2.Request(url, post_data)
request.add_header('Content-Type', 'application/json')
try:
response = urllib2.urlopen(request)
content = response.read()
content2=json.loads(content.decode('gbk'))
if (content.find('\"error_msg\"') == -1):
content2=content2['items'][0]
return content2['positive_prob'],content2['negative_prob'],content2['confidence'],content2['sentiment']
else:
print content2
return 0,0,0,0
except:
return 0,0,0,0
|
[
"noreply@github.com"
] |
xiaogaogaoxiao.noreply@github.com
|
b5092c07a1d16199df50b3411d1fa9fb343aa9d6
|
08a329d07172a384be41eb58a0586032b18787d2
|
/property5.py
|
6c70c6b519c8e644b2374747126a94e17fef837f
|
[] |
no_license
|
atsuhisa-i/Python_study1
|
9bc39d058fe8bdd00adb35324758ad8fa08f4ca1
|
439a654f09e81208658355d99c8ce1c3cd4bcc4e
|
refs/heads/main
| 2023-04-06T12:44:12.099067
| 2021-04-14T13:24:56
| 2021-04-14T13:24:56
| 348,309,405
| 0
| 0
| null | 2021-04-14T13:24:57
| 2021-03-16T10:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
class Item:
def __init__(self, name, price):
self.__name = name
self.__price = price
@property
def name(self):
return self.__name
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
self.__price = max(value, 0)
x = Item('burger', 100)
print(x.name, x.price)
x.price = 110
print(x.name, x.price)
x.price = -100
print(x.name, x.price)
|
[
"atsuhisa.1124@gmail.com"
] |
atsuhisa.1124@gmail.com
|
2965894bc962e938d889e44a35a394f2047b74ea
|
ba56c31788b3fed66bedb7489f21277f37999fa2
|
/apphosting/sandbox/main.py
|
290ff8fc0739fd29e8c7a3c42f6ee93ee0c5b5ca
|
[] |
no_license
|
tokibito/wsgi-apphosting
|
7f3df53fc1a9b979ff27f0eb1bbe3bc6b06fc55b
|
d21b53295f12a6b90960a7841ec3b4d1057d0162
|
refs/heads/master
| 2020-07-25T07:38:40.059563
| 2010-06-14T14:42:52
| 2010-06-14T14:42:52
| 208,217,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,615
|
py
|
#coding:utf-8
#アプリケーションのランナー
import sys
from datetime import datetime
from apphosting import const
from apphosting.sandbox import utils
class Runner(object):
"""
ランナーはプロセスプールで保持される
1プロセス1ランナー
TODO:複数のプロセスで同じランナーが起動する可能性
providerはアプリケーション提供モジュール
"""
def __init__(self, name, provider, server_config, pool_conn, runner_conn):
self.provider = utils.import_module(provider)
self._server_config = server_config
self._application = self.provider.get_application(name, self._server_config)
self._pool_conn = pool_conn
self._runner_conn = runner_conn
self.proc = None
self.suspended = False
self.ctime = datetime.now()
self.utime = datetime.now()
self.processed = 0
def __call__(self):
while not self.suspended:
# environパラメータを待ちうけ
environ = self._runner_conn.recv()
signal = environ.get('RUNNER_SIGNAL')
# 停止
if signal == const.RUNNER_SIGNAL_KILL:
self.suspended = True
continue
# 情報取得
elif signal == const.RUNNER_SIGNAL_INFO:
self._runner_conn.send({
'ctime': self.ctime,
'utime': self.utime,
'processed': self.processed
})
continue
# アプリケーションを実行
status, headers, resp = self.main(environ)
# start_resopnseの結果を返す
self._runner_conn.send([status, headers])
self._runner_conn.send(resp)
# 実行回数カウント
self.processed += 1
# 最後に実行した時間
self.utime = datetime.now()
# サスペンドされた場合パイプを閉じる
self._runner_conn.close()
sys.exit(0)
def main(self, environ):
"""
メインハンドラ
アプリケーションの実行が完全に終わってから結果を返す
"""
start_info = {
'status': '',
'headers': ()
}
def _start_response(status, headers, exc_info=None):
start_info['status'] = status
start_info['headers'] = headers
response = self._application(environ, _start_response)
return start_info['status'], start_info['headers'], ''.join(response)
|
[
"xxshss@yahoo.co.jp"
] |
xxshss@yahoo.co.jp
|
ffb15c04da0ece2afe9237cfc2c8992137f4acfe
|
08f5dd97433ce84868dbd95020e49f795e8e3f42
|
/website/migrations/0017_auto_20151101_1225.py
|
d4d3154f0a3b3d8fd27409cc3b2843b8a8424e58
|
[] |
no_license
|
katur/forthebirds
|
f76e9d78f8b71f5cb13f22f3c417e737f6048896
|
2118fabebd8780cd3151f5ddd88245de402590e9
|
refs/heads/master
| 2023-08-08T18:57:55.722516
| 2023-03-28T03:04:19
| 2023-03-28T03:04:19
| 22,771,365
| 2
| 1
| null | 2023-07-25T21:23:49
| 2014-08-08T20:56:20
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0016_auto_20151018_1630'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='main_photo',
field=models.ForeignKey(related_name='main_uploaded_photo', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='website.UploadedImage', null=True),
),
]
|
[
"katherine.erickson@gmail.com"
] |
katherine.erickson@gmail.com
|
9b2f77944f2f030b061978095c6b458d55c249c3
|
185cbe28ae1950866905ddb3b53a3ca325f8fc98
|
/Project4_FortressMachine/KindFortressMachine/web/migrations/0006_auto_20180325_1621.py
|
866a5ee225ce6da1a7c0d0d9356e0cb8d7598c54
|
[] |
no_license
|
phully/PythonHomeWork
|
2997cb2017621116d2959a183fcb50c4d4ea3289
|
4d497a6261de17cc2fc058cea50e127e885e5095
|
refs/heads/master
| 2020-03-21T04:51:51.700197
| 2018-06-06T02:35:40
| 2018-06-06T02:35:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-25 16:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0005_auto_20180310_1956'),
]
operations = [
migrations.CreateModel(
name='MultiTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task_type', models.SmallIntegerField(choices=[(0, 'cmd'), (1, 'file_transfer')], verbose_name='批量任务类型')),
('content', models.TextField(verbose_name='批量任务内容')),
('createtime', models.DateTimeField(auto_now_add=True, verbose_name='批量任务创建时间')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='批量任务执行者')),
],
options={
'verbose_name_plural': '批量任务记录',
},
),
migrations.CreateModel(
name='MultiTaskDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.TextField(verbose_name='执行结果')),
('status', models.SmallIntegerField(choices=[(0, 'init'), (1, 'success'), (2, 'failed')], verbose_name='执行状态')),
('start_time', models.DateTimeField(auto_now_add=True, verbose_name='执行任务创建时间')),
('end_time', models.DateTimeField(auto_now=True, null=True, verbose_name='执行任务终止时间')),
('bind_host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.BindHost', verbose_name='对应执行主机')),
('multi_task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.MultiTask', verbose_name='批量任务')),
],
options={
'verbose_name_plural': '批量任务详细信息',
},
),
migrations.AlterUniqueTogether(
name='multitaskdetail',
unique_together=set([('multi_task', 'bind_host')]),
),
]
|
[
"1079614505@qq.com"
] |
1079614505@qq.com
|
6a62d14ab57f512695d7249171d11e6e5d91af95
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/update_postgresql_database_request.py
|
c1dd556380b81c36a6d0f15e095115458d91cd38
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,867
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdatePostgresqlDatabaseRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'body': 'UpdateDatabaseReq'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'body': 'body'
}
def __init__(self, x_language=None, instance_id=None, body=None):
"""UpdatePostgresqlDatabaseRequest
The model defined in huaweicloud sdk
:param x_language: 语言
:type x_language: str
:param instance_id: 实例ID。
:type instance_id: str
:param body: Body of the UpdatePostgresqlDatabaseRequest
:type body: :class:`huaweicloudsdkrds.v3.UpdateDatabaseReq`
"""
self._x_language = None
self._instance_id = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this UpdatePostgresqlDatabaseRequest.
语言
:return: The x_language of this UpdatePostgresqlDatabaseRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this UpdatePostgresqlDatabaseRequest.
语言
:param x_language: The x_language of this UpdatePostgresqlDatabaseRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this UpdatePostgresqlDatabaseRequest.
实例ID。
:return: The instance_id of this UpdatePostgresqlDatabaseRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this UpdatePostgresqlDatabaseRequest.
实例ID。
:param instance_id: The instance_id of this UpdatePostgresqlDatabaseRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def body(self):
"""Gets the body of this UpdatePostgresqlDatabaseRequest.
:return: The body of this UpdatePostgresqlDatabaseRequest.
:rtype: :class:`huaweicloudsdkrds.v3.UpdateDatabaseReq`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdatePostgresqlDatabaseRequest.
:param body: The body of this UpdatePostgresqlDatabaseRequest.
:type body: :class:`huaweicloudsdkrds.v3.UpdateDatabaseReq`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdatePostgresqlDatabaseRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
737e76aef0b73d80231e2619bb39b237484a3da1
|
3518d02090de8102b090f780c09ba32d37e95b49
|
/ghpro/stats.py
|
61b6650a13d4118560e1fb8189a42aabc8b430c8
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
takluyver/ghpro
|
9a34a7fdcf162101bfa79a17f4438b1a7b6b4dc5
|
f9f053883acf2c01a39008054698408c8db85a18
|
refs/heads/master
| 2023-06-19T18:50:47.984731
| 2016-12-02T10:15:59
| 2016-12-02T10:15:59
| 75,550,828
| 0
| 0
| null | 2016-12-04T16:54:05
| 2016-12-04T16:54:05
| null |
UTF-8
|
Python
| false
| false
| 8,063
|
py
|
#!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
To generate a report for IPython 2.0, run:
github-stats --milestone 2.0 --since-tag rel-1.0.0
"""
from __future__ import print_function
import codecs
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from subprocess import check_output
from .api import (
get_paged_request, make_auth_header, get_pull_request, is_pull_request,
get_milestone_id, get_issues_list, get_authors,
)
from .utils import guess_project
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def split_pulls(all_issues, project="ipython/ipython"):
"""split a list of closed issues into non-PR Issues and Pull Requests"""
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls
def issues_closed_since(period=timedelta(days=365), project="ipython/ipython", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title."""
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print(u'* :%s:`%d`: %s' % (role, i['number'],
i['title'].replace(u'`', u'``')))
else:
for i in issues:
print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
def main():
# deal with unicode
if sys.version_info < (3,):
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Whether to add reST urls for all issues in printout.
show_urls = True
parser = ArgumentParser()
parser.add_argument('--since-tag', type=str,
help="The git tag to use for the starting point (typically the last major release)."
)
parser.add_argument('--milestone', type=str,
help="The GitHub milestone to use for filtering issues [optional]."
)
parser.add_argument('--days', type=int,
help="The number of days of data to summarize (use this or --since-tag)."
)
parser.add_argument('--project', type=str, default=None,
help="The project to summarize."
)
parser.add_argument('--links', action='store_true', default=False,
help="Include links to all closed Issues and PRs in the output."
)
opts = parser.parse_args()
if not opts.project:
opts.project = guess_project('.')
tag = opts.since_tag
# set `since` from days or git tag
if opts.days:
since = datetime.utcnow() - timedelta(days=opts.days)
else:
if not tag:
tag = check_output(['git', 'describe', '--abbrev=0']).strip().decode('utf8')
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().decode('utf8').rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
since = round_hour(since)
milestone = opts.milestone
project = opts.project
print("fetching GitHub stats since %s (milestone: %s, since: %s)" % (since, milestone, tag), file=sys.stderr)
if milestone:
milestone_id = get_milestone_id(project=project, milestone=milestone,
auth=True)
issues_and_pulls = get_issues_list(project=project,
milestone=milestone_id,
state='closed',
auth=True,
)
issues, pulls = split_pulls(issues_and_pulls, project=project)
else:
issues = issues_closed_since(since, project=project, pulls=False)
pulls = issues_closed_since(since, project=project, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (milestone: %s)" % (since_day, today, milestone))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
ncommits = 0
all_authors = []
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits += len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
pr_authors = []
for pr in pulls:
pr_authors.extend(get_authors(pr))
ncommits = len(pr_authors) + ncommits - len(pulls)
author_cmd = ['git', 'check-mailmap'] + pr_authors
with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("We closed %d issues and merged %d pull requests." % (n_issues, n_pulls))
if milestone:
print("The full list can be seen `on GitHub <https://github.com/{project}/issues?q=milestone%3A{milestone}+>`__".format(project=project,milestone=milestone)
)
print()
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
if opts.links:
print()
print("GitHub issues and pull requests:")
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
if __name__ == '__main__':
main()
|
[
"benjaminrk@gmail.com"
] |
benjaminrk@gmail.com
|
726e82d61f0d58be4d15b15e39016bbfdc09b419
|
093962f5be1357f1587235ee3e1cfa5db493d0d7
|
/source/reviewapp/core/migrations/0005_delete_unitofhistory.py
|
d7e71fef316ea46117e57d25161f514a91c2c1a5
|
[] |
no_license
|
Shamsulhaq/reviewshelf
|
037299104f7161386f1782670bfa495f029216b8
|
178b15da4186fcab4dfb9a0ad47ba0b056d137a6
|
refs/heads/master
| 2023-01-19T19:36:30.918625
| 2020-12-02T12:24:07
| 2020-12-02T12:24:07
| 316,216,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
# Generated by Django 3.1.3 on 2020-11-28 08:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_remove_category_is_deleted'),
]
operations = [
migrations.DeleteModel(
name='UnitOfHistory',
),
]
|
[
"bmshamsulhaq65@gmail.com"
] |
bmshamsulhaq65@gmail.com
|
0f907bd1c0189d77bac7d77ad4ef790f01f4cdeb
|
ccd8273d923b82a46893da7a8dbc06f4b0f2f889
|
/fan_manager/schedule.py
|
8296f2a9be666b2b277a200309a7e23e5c9f37bb
|
[] |
no_license
|
artrey/gpio-fan-manager
|
1e45254de461063a462c7bbd41351be6744dd887
|
36bb2573a5334feb2f474c199cb37db6568ceb68
|
refs/heads/master
| 2020-04-03T06:54:40.103953
| 2019-01-02T20:06:35
| 2019-01-02T20:06:35
| 155,087,759
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
# -*- coding: utf-8 -*-
import datetime
from typing import List
import pytz
class FanPolicy:
def __init__(self, time_start: datetime.time, time_finish: datetime.time,
threshold_enable: float, threshold_disable: float):
self.time_start = time_start
self.time_finish = time_finish
self.threshold_enable = threshold_enable
self.threshold_disable = threshold_disable
def __str__(self):
return f'[{self.time_start} - {self.time_finish}] {self.threshold_enable} / {self.threshold_disable}'
class Schedule:
def __init__(self, base_policy: FanPolicy, special_policies: List[FanPolicy]):
self.base_policy = base_policy
self.special_policies = special_policies
def get_policy(self, time: datetime.time) -> FanPolicy:
def filt(p: FanPolicy):
if p.time_start > p.time_finish:
return p.time_start <= time <= datetime.time(23, 59, 59, 999999) \
or datetime.time(0) <= time <= p.time_finish
return p.time_start <= time <= p.time_finish
return next(filter(filt, self.special_policies), self.base_policy)
def current_policy(self, tz: pytz.timezone) -> FanPolicy:
return self.get_policy(datetime.datetime.now(tz=tz).time())
def __str__(self):
return f'Base policy: {self.base_policy} | Special policies:' \
f' {{ {"; ".join(map(str, self.special_policies))} }}'
|
[
"oz.sasha.ivanov@gmail.com"
] |
oz.sasha.ivanov@gmail.com
|
673ee37339172d144ea7b6e9f5e73d58dadf9be4
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/8b7a58c9611cc5b5683e5a3a52461d8f9179eca0-<getfolders>-bug.py
|
2bda93dc1adbedd1338b111a0521a6b2fcd9fbfd
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
def getfolders(self):
if (not self.datacenter):
self.get_datacenter()
self.folders = self._build_folder_tree(self.datacenter.vmFolder)
self._build_folder_map(self.folders)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.