blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4afc9e26c651892b4c66a8e40b134a2277fdb425
|
be4759201435054c55ca76d4a973aee8c549e1a6
|
/sockets/mn_edge_indices_list_socket.py
|
82fca744d5684176868484ad02929b8ee962b360
|
[] |
no_license
|
vvFiCKvv/animation-nodes
|
75f94549f82702b3ac5f548f009dd2202c694240
|
6988606b8c3601d428fa3fe32c77c7b440eb7c38
|
refs/heads/master
| 2021-01-17T00:29:13.299665
| 2015-04-25T16:46:20
| 2015-04-25T16:46:20
| 27,539,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
import bpy
from animation_nodes.mn_execution import nodePropertyChanged
from animation_nodes.mn_node_base import *
class mn_EdgeIndicesListSocket(mn_BaseSocket, mn_SocketProperties):
bl_idname = "mn_EdgeIndicesListSocket"
bl_label = "Edge Indices List Socket"
dataType = "Edge Indices List"
allowedInputTypes = ["Edge Indices List"]
drawColor = (0, 0.55, 0.23, 1)
def drawInput(self, layout, node, text):
layout.label(text)
def getValue(self):
return []
def setStoreableValue(self, data):
pass
def getStoreableValue(self):
pass
def getCopyValueFunctionString(self):
return "return [edgeIndices[:] for edgeIndices in value]"
|
[
"mail@jlucke.com"
] |
mail@jlucke.com
|
7887f18e6d71f9eaf61d02aa2d134eb927a02aec
|
d3638776a2ce455eb42f29c9c06e267392b6815a
|
/reading/book/migrations/0007_auto_20180703_2156.py
|
d188bede58a1d3c4590c803e1b5a26ae3aa7e460
|
[
"MIT"
] |
permissive
|
Family-TreeSY/reading
|
abce1b5e6047c72867839303ab0181c7a4997913
|
a35d1242ce3a7303fe125c11de8125bd9afbdb3c
|
refs/heads/master
| 2020-03-20T04:53:18.089247
| 2018-07-09T08:51:32
| 2018-07-09T08:51:32
| 137,197,886
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-03 13:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0006_auto_20180619_2116'),
]
operations = [
migrations.AddField(
model_name='story',
name='html',
field=models.TextField(default='', help_text='\u6b63\u6587\u53ef\u4ee5\u4f7f\u7528markdown', verbose_name='html\u6e32\u67d3\u540e\u7684\u9875\u9762'),
),
migrations.AddField(
model_name='story',
name='is_markdown',
field=models.BooleanField(default=True, verbose_name='\u4f7f\u7528markdown'),
),
]
|
[
"286210002@qq.com"
] |
286210002@qq.com
|
bb89c0558e9830a7ba414e9cea296ffb578f8509
|
e49b654d3db99773390c5b9686df9c99fbf92b2a
|
/linked_lists/linked_list.py
|
f018e58590e973d2a1aac0516779018498713c0c
|
[] |
no_license
|
hao89/diary_of_programming_puzzles
|
467e8264d0ad38768ba5ac3cfb45301293d79943
|
0e05d3716f28075f99bbd7b433d16a383209e57c
|
refs/heads/master
| 2021-01-16T00:49:38.956102
| 2015-08-25T13:44:53
| 2015-08-25T13:44:53
| 41,692,587
| 1
| 0
| null | 2015-08-31T18:20:38
| 2015-08-31T18:20:36
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
class LinkedListNode:
def __init__(self, data):
self.next = None
self.data = data
def __str__(self):
node_str = ""
current_node = self
while current_node:
if current_node.next:
node_str = node_str + str(current_node.data) + ", "
else:
node_str = node_str + str(current_node.data)
current_node = current_node.next
return node_str
|
[
"me@davidadamojr.com"
] |
me@davidadamojr.com
|
7153a4c17d679b6a69da201b4c53f56cfe0c5619
|
517a904955033092aec11288151d725548226abc
|
/pandas_tutorial/data_advance/df_column_order.py
|
bd0d1a7f8f2a20eab540746de7dffb1501d42be3
|
[] |
no_license
|
MinSu-Kim/python_tutorial
|
ae0a4e3570aa4cb411626cefbc031777364764d5
|
ed0c08892822d7054161c9e8f98841370868e82d
|
refs/heads/master
| 2021-06-16T16:15:30.349719
| 2021-05-26T04:59:47
| 2021-05-26T04:59:47
| 207,266,202
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
import seaborn as sns
print("# titanic 데이터셋의 부분을 선택하여 데이터프레임 만들기")
titanic = sns.load_dataset('titanic')
df = titanic.loc[0:4, 'survived':'age']
print(df, '\n')
print("# 열 이름의 리스트 만들기")
columns = list(df.columns.values) # 기존 열 이름
print("ssss", sorted(columns, reverse=True), type(sorted(columns, reverse=True)))
print("# 열 이름을 알파벳 순으로 정렬하기")
columns_sorted = sorted(columns) # 알파벳 순으로 정렬
df_sorted = df[columns_sorted]
print(df_sorted, '\n')
print(columns_sorted, '\n')
print("# 열 이름을 기존 순서의 정반대 역순으로 정렬하기")
columns_reversed = list(sorted(columns, reverse=True))
df_reversed = df[columns_reversed]
print(df_reversed, '\n')
print(columns_reversed, '\n')
print("# 열 이름을 사용자가 정의한 임의의 순서로 재배치하기")
columns_customed = ['pclass', 'sex', 'age', 'survived']
df_customed = df[columns_customed]
print(df_customed)
|
[
"net94.teacher@gmail.com"
] |
net94.teacher@gmail.com
|
fd69e5c0ad13bddd3665e157cdd85e17f6da1920
|
d25003d4e1a1cd3b5eca1525c0119da47579f294
|
/scripts/sort_double.py
|
51093694d8a595573520419157b7d218af437429
|
[] |
no_license
|
rd37/GooglePracticeProjects
|
ceabcb838bd4bd50397b8fdf775e810db320dbb1
|
b3543ada39b8c24f688a41cf0b745482013a93d9
|
refs/heads/master
| 2016-09-06T16:50:41.303580
| 2014-12-12T03:23:23
| 2014-12-12T03:23:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
'''
Created on Dec 10, 2014
@author: ronaldjosephdesmarais
'''
ints = [5,8.2,1,7,4.1,13,12,4.1,8.2]
print "------use python sorted------"
print sorted(ints)
print "------use dictionary ------"
srt_dict = {}
srt_arr = []
for i in ints:
if i not in srt_dict:
srt_dict[i]=1
else:
srt_dict[i]=srt_dict[i]+1
for i_key in srt_dict:
for i in range(0,srt_dict[i_key]):
srt_arr.append(i_key)
print srt_arr
|
[
"ron.desmarais@gmail.com"
] |
ron.desmarais@gmail.com
|
17b011426ea5dd281920f3b73b76457056e5bd1b
|
4ce6fb5c49ee6ec4b5df9e056040382812a8a591
|
/product/migrations/0029_auto_20191001_0528.py
|
2120012f6b7045350592076be1c5027236969a78
|
[] |
no_license
|
yantrashalait/Multronics
|
198c807a0bb2b8c1ae7bcc2325436467ee8a90b3
|
c85b5a263fe1507c994236bba26ad12d93157622
|
refs/heads/master
| 2021-02-14T18:28:25.984830
| 2021-01-18T09:19:21
| 2021-01-18T09:19:21
| 244,825,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# Generated by Django 2.2.4 on 2019-10-01 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0028_aboutiteam'),
]
operations = [
migrations.AddField(
model_name='product',
name='visibility',
field=models.BooleanField(default=True, verbose_name='Make this product visibile?'),
),
migrations.AlterField(
model_name='aboutiteam',
name='logo',
field=models.ImageField(help_text='Image size: width=192px height=31px', upload_to='logo/'),
),
]
|
[
"saneprijal@gmail.com"
] |
saneprijal@gmail.com
|
24567018d6cc56c197cd0f52a9cf7d6b9311506f
|
349d6ff272a4a113cee5b0ab7849f46305ebfb13
|
/sc2/game_data.py
|
2e69241ddf4f3f9265fd5ee0cf9aa760d4ddda4e
|
[
"MIT"
] |
permissive
|
raimohanska/python-sc2
|
dafec03d73b905b092c92aefd5ee9d896e8df5e1
|
fb936be1618b4c8b8bf453d76d3f9894780a0f21
|
refs/heads/master
| 2021-09-03T04:04:46.630550
| 2018-01-05T12:50:11
| 2018-01-05T12:50:11
| 116,264,519
| 0
| 0
| null | 2018-01-04T13:41:56
| 2018-01-04T13:41:55
| null |
UTF-8
|
Python
| false
| false
| 2,856
|
py
|
from functools import lru_cache
from .data import Attribute
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
class GameData(object):
def __init__(self, data):
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability):
for unit in self.units.values():
if unit.creation_ability == ability:
return unit.cost
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class AbilityData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def id(self):
if self._proto.remaps_to_ability_id:
return AbilityId(self._proto.remaps_to_ability_id)
return AbilityId(self._proto.ability_id)
@property
def cost(self):
return self._game_data.calculate_ability_cost(self.id)
def __repr__(self):
return f"AbilityData(name={self._proto.button_name})"
class UnitTypeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def name(self):
return self._proto.name
@property
def creation_ability(self):
return self._game_data.abilities[self._proto.ability_id]
@property
def attributes(self):
return self._proto.attributes
@property
def has_attribute(self, attr):
assert isinstance(attr, Attribute)
return attr in self.attributes
@property
def has_minerals(self):
return self._proto.has_minerals
@property
def has_vespene(self):
return self._proto.has_vespene
@property
def cost(self):
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost
)
class UpgradeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def name(self):
return self._proto.name
@property
def research_ability(self):
return self._game_data.abilities[self._proto.ability_id]
@property
def cost(self):
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost
)
class Cost(object):
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self):
return f"Cost({self.minerals}, {self.vespene})"
|
[
"hannes.karppila@gmail.com"
] |
hannes.karppila@gmail.com
|
7a286bf190f3a7ccafa0b6a2278c68f4aebdc583
|
40280c446e21c07ac3ffd20c5eda064a05093698
|
/easy_module_attribute_getter/custom_transforms.py
|
4eb9cbf5ed62de0c0da0fdd380f0b4112685e08a
|
[
"MIT"
] |
permissive
|
KevinMusgrave/easy-module-attribute-getter
|
884fdee1960b792db49e09edc5de0d268fd6ac8a
|
e0a733c02f2e6a969191a75c79159f45440c969f
|
refs/heads/master
| 2021-07-04T02:18:17.113242
| 2021-01-21T03:32:22
| 2021-01-21T03:32:22
| 218,787,854
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
import torchvision.transforms.functional as F
from PIL import Image
class ConvertToBGR(object):
"""
Converts a PIL image from RGB to BGR
"""
def __init__(self):
pass
def __call__(self, img):
r, g, b = img.split()
img = Image.merge("RGB", (b, g, r))
return img
def __repr__(self):
return "{}()".format(self.__class__.__name__)
class Multiplier(object):
def __init__(self, multiple):
self.multiple = multiple
def __call__(self, img):
return img*self.multiple
def __repr__(self):
return "{}(multiple={})".format(self.__class__.__name__, self.multiple)
|
[
"tkm45@cornell.edu"
] |
tkm45@cornell.edu
|
e9aaecada9a17d7d8b636210f8d990f11a900e07
|
16631cf7cd4a70f2cd2750851649d3eff5e17724
|
/2022/day15/part2.py
|
00daf3f196e692462e068c11a24b226c3febf106
|
[] |
no_license
|
kynax/AdventOfCode
|
1dd609a3308d733f2dd7d4ea00508d2da73180b9
|
36a339241dd7a31ebe08a73e5efa599e5faeea1a
|
refs/heads/master
| 2022-12-21T13:32:52.591068
| 2022-12-16T22:41:30
| 2022-12-16T22:41:30
| 48,439,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
import sys
grid = {}
sensitivity = []
sensors = []
beacons = []
for l in sys.stdin:
l = l.strip().split(' ')
sx, sy, bx, by = int(l[2][2:-1]), int(l[3][2:-1]), int(l[8][2:-1]), int(l[9][2:])
grid[(sx,sy)] = 'S'
grid[(bx,by)] = 'B'
dx,dy = abs(sx-bx), abs(sy-by)
md = dx+dy
sensitivity.append((sx,sy,md))
sensors.append((sx,sy))
if (bx,by) not in beacons: beacons.append((bx,by))
minx = min([i[0]-i[2] for i in sensitivity])-1
maxx = max([i[0]+i[2] for i in sensitivity])+1
for row in range(4000000):
intervals = []
for s in sensitivity:
d = abs(s[1] - row)
if d > s[2]:
continue
w = s[2] - d
b,e = s[0] - abs(w), s[0] + abs(w)
if e < b: b,e = e,b
intervals.append((b,e))
ints = sorted(intervals)
nints = [ints[0]]
for i in range(1, len(ints)):
if ints[i][0] <= nints[-1][1]+1:
if ints[i][1] <= nints[-1][1]:
pass # fully included
else:
nints[-1] = (nints[-1][0], ints[i][1])
else:
nints.append(ints[i])
if len(nints) > 1:
print(nints, nints[0][1] + 1, row)
print(4000000 * (nints[0][1]+1) + row)
break
|
[
"guilemay@gmail.com"
] |
guilemay@gmail.com
|
5a9660779063959ecef329d1b58ac42c1dc13e5e
|
0da3ebae606295ee3c1613004c6f21650e914841
|
/codestreak/extensions.py
|
07761de1820e73e03a2ea21169597925d9435969
|
[] |
no_license
|
mfwarren/codestreak.io
|
38bac87f2ddc6e7cff56a4bc95b6b1ca4a41ef1a
|
bd37dd7ad55c9926e7a4752afca5986c08145d34
|
refs/heads/master
| 2020-06-11T06:21:27.012529
| 2019-03-03T15:43:32
| 2019-03-03T15:43:32
| 75,747,414
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
# -*- coding: utf-8 -*-
"""Extensions module. Each extension is initialized in the app factory located in app.py."""
from flask_debugtoolbar import DebugToolbarExtension
from flask_migrate import Migrate
from raven.contrib.flask import Sentry
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CsrfProtect
csrf_protect = CsrfProtect()
db = SQLAlchemy()
migrate = Migrate()
debug_toolbar = DebugToolbarExtension()
sentry = Sentry()
|
[
"matt.warren@gmail.com"
] |
matt.warren@gmail.com
|
d911a1de75e301eed643472356197ac68faf3647
|
b0fab024e9b7e7bd51c18c5578f0f45314808592
|
/sine_competition_url/competition_url.py
|
692142fbbe0f49ea5ac2e79373fac3914a120a1b
|
[] |
no_license
|
dhecar/SINERGIA
|
a34d98fda84ce8ca8d2f67b89680bbf19c15fe1b
|
678cfd41df8045645be130d2f3d51399908b15fd
|
refs/heads/master
| 2021-01-23T11:07:48.724177
| 2017-04-20T10:51:07
| 2017-04-20T10:51:07
| 33,182,317
| 1
| 7
| null | 2015-08-31T20:59:43
| 2015-03-31T11:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.osv import fields, osv
import urllib
import re
class competition_url(osv.osv):
_name = 'competition.url'
_description = 'URL for competition'
_table = 'competition_url'
_rec_name = 'url_competition'
_columns = {
'url_competition': fields.char('Url ', size=150),
'regex': fields.char('Expression', size=300),
}
competition_url()
|
[
"dhecar@gmail.com"
] |
dhecar@gmail.com
|
752e9f1a6a208543137c36cda179ddf64539f177
|
b4a0380acd79a21c5596bfa5fac6eb337ef5359a
|
/build/lib.linux-x86_64-3.8/maskrcnn_benchmark/data/datasets/evaluation/kitchen/__init__.py
|
262e29603168969af9a493dd19f6620fd1abb4d8
|
[] |
no_license
|
xiaofeng-c/Morphable-Detector
|
781104d8a7221eb03c55a67f51f696e46ded4003
|
3e50bb20493c3e0b99d37971e51487124aa08b5b
|
refs/heads/master
| 2023-08-27T20:53:21.606442
| 2021-10-18T22:28:38
| 2021-10-18T22:28:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
import logging
from .kitchen_eval import do_kitchen_evaluation
def kitchen_evaluation(dataset, predictions, output_folder, box_only, **_):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.warning("kitchen evaluation doesn't support box_only, ignored.")
logger.info("performing kitchen evaluation, ignored iou_types.")
return do_kitchen_evaluation(
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
logger=logger,
)
|
[
"zhaoxiangyun915@gmail.com"
] |
zhaoxiangyun915@gmail.com
|
52fe5c0ff1bb7e21c43186b82e52b142647c0566
|
83ed1e2f176133c03a5f6dfa504b8df15ae71efb
|
/python/secondary/jnet/jnet.py
|
d29d5ce53337781076ea7ea61b55aca71ca18040
|
[] |
no_license
|
jmborr/code
|
319db14f28e1dea27f9fc703be629f171e6bd95f
|
32720b57699bf01803367566cdc5fff2b6bce810
|
refs/heads/master
| 2022-03-09T16:11:07.455402
| 2019-10-28T15:03:01
| 2019-10-28T15:03:01
| 23,627,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,652
|
py
|
#!/usr/bin/python
import os,sys,re
from jobs.job import pastry
from inputArgs.inputArgs import inpHand
from utilities.small_utilities import Bye,junkName
from utilities.codedir import codedir
from seq.blastManager import blastRun
from seq.fastaManager import importFastaEntry
from seq.msa import gen_msa
from seq.letters import one2three
#create frequency file
def createFreqFile(seq,msa):
'''
seq: query sequence
msa: multiple sequence alignment in a list, in the style of 3590
and e10. msa does not contain the query sequence, and each
template sequence is a list item
'''
aas='ARNDCQEGHILKMFPSTWYV' #amino acid order to output, assumed by jnet
freq=[] #freq[i][j] 0<i<len(seq), 0<j<19
N=len(seq)
for i in range(N): freq.append([0]*20) #initialize
for i in range(N): #add amino acids of query sequence to the frequency table
m=aas.find(seq[i]) #return position in aas corresponding to amino acid seq[i]
if m<0:continue #seq[i] character not found in aas (like "X" or "-")
freq[i][m]+=1
for seq2 in msa: #do same for sequences in the alignment
for i in range(N):
m=aas.find(seq2[i]) #return position in aas corresponding to amino acid seq[i]
if m<0:continue #seq[i] character not found in aas
freq[i][m]+=1
blastffreq=junkName()+'.jnetfreq'
out=open(blastffreq,'w')
for i in range(N):
line='' ; m=0
for j in range(20):m+=freq[i][j] #total number of counts for amino acid at position "i"
for j in range(20):
freq[i][j]=round( (10.0*freq[i][j])/m ) #rounded to nearest integer
line+='%3d'%(freq[i][j])
out.write(line+'\n') #write amino acid frequencies for amino acid at position "i"
out.close()
return blastffreq
#########################################################
# SCRIPT EXECUTION STARTS HERE
#########################################################
inpHand('Usage: inprosp.py [options]',
' -a _RA_fastaf sequence file in fasta format',
' -b _A_blasttarf tarred blast output (contains xxxxx.blast,xxxxx.pssm). If not provided, jnet.py will do a psiblast run',
' -c _A_outf output file name (def: ./seq.dat)',
).parse(locals(),sys.argv)
currd=os.getcwd() #current directory
if not outf: outf=currd+'/seq.dat' #output file
workd=currd+'/'+junkName() ;os.system('/bin/mkdir -p '+workd) ; os.chdir(workd) #temp directory
header,seq=importFastaEntry(open(fastaf,'r'))
pastry('/bin/cp '+fastaf+' .') ;fastaf=os.path.basename(fastaf)
#Retrieve/create psiblast outputs
if blasttarf:#we passed a *.tar file containing psiblast report and pssm file
blastOuts={'outf':'', 'blast':'', 'chk':'', 'fasta':fastaf, 'pssm':''}
os.system('tar xf '+blasttarf)
blastOuts['blast']=os.popen('ls -1 *.blast').readline().strip() #get name of blast report
blastOuts['pssm']=os.popen('ls -1 *pssm.').readline().strip() #get name of pssm file
else: blastOuts=blastRun(fastaf) #run blast with default options
#create multiple sequence alignment projected to the query sequence (3590 or e10 style)
msa=gen_msa(seq,header,Eco=0.0001,maxId=0.85,minId=0.10,red=0.75,blastOuts=blastOuts)['alignments']
#find frequency table from msa, and output to "freq" file. I did this
#function because PSIBLAST has evolved and the perl script "getfreq"
#provided in the jnet distro does not work. I could use newer script
#"parse_psi -freq" but it requires a psiblast report obtained with
#blastpgp -m 6, instead of blastpgp -m 0. I don't want to run PSIBLAST
#twice and running with -m 6 gives some humongously-sized reports
blastffreq=createFreqFile(seq,msa)
#remove too short sequences, thenk keep only first M sequences
msa2=[] ; N=len(seq) ; Lhalf=N/2
for seqgapped in msa:
sequngapped=seqgapped.replace('-','')
if len(sequngapped) > Lhalf: msa2.append(sequngapped)
M=int(1000*200/N) #maximum number of sequences to use
msa=msa2[0:M] #reclaim memory space by liberating the gapped sequences list
#output alignment as suitable use for clustalw
rootname=junkName()
fastasf=rootname+'.aln' ; fpt=open(fastasf,'w')
fpt.write('>00000\n'+seq+'\n')
for i in range(len(msa)): fpt.write('>'+'%05d\n'%(i+1)+msa[i]+'\n')
fpt.close()
#run clustalw
os.system('clustalw -OUTORDER=INPUT -INFILE='+fastasf+' -OUTPUT=GCG >/dev/null')
msf=rootname+'.msf'
if not os.path.exists(msf): Bye('ERROR: not msf file generated in jnet.py')
#run perl scripts to create the various inputs required by jnet
pastry('/bin/cp -r '+codedir+'/bin/jnet/perl .')
pastry('/bin/cp -r '+codedir+'/bin/jnet/bin .')
os.system('./perl/msf2jnet '+msf) ; msffa=msf+'.fa' #multiple sequence alignment
os.system('./perl/gethmm '+msf+' >/dev/null') ; msfhm=msf+'.hmmprof' #hidden-Markov model
pssmf=blastOuts['pssm']
os.system('./perl/getpssm '+pssmf+' > '+pssmf+'.jnetpssm') ; pssmf=pssmf+'.jnetpssm'
#run jnet and parse to generate seq.dat
jnetout=junkName()
os.system('./bin/jnet -p '+msffa+' '+msfhm+' '+pssmf+' '+blastffreq+' > '+jnetout)
pattern=re.compile(':\s(\S+)\n') ; final='' ; conf=''
ss2nn={'-':1, 'H':2, 'E':4}
for line in os.popen('grep -P "\sFINAL\t" '+jnetout).readlines():
final+=pattern.search(line).group(1)
for line in os.popen('grep -P "\sCONF\t" '+jnetout).readlines():
conf+=pattern.search(line).group(1)
out=open(outf,'w')
for i in range(N):
out.write( '%5d%6s%5d%5d\n'%( i+1, one2three[seq[i]], ss2nn[final[i]], int(conf[i]) ) )
out.close()
#clean-up working directory
os.chdir(currd)
#os.system('/bin/rm -rf '+workd)
sys.exit(0)
|
[
"borreguero@gmail.com"
] |
borreguero@gmail.com
|
8873e1b784b24057a8e64655dca5dc3c4d1f3d87
|
5603625e865a7cfe415c1aae4035a890aeb23864
|
/bin/mnu.py
|
a178061c01bb7c57e4e3d48aa0bfeed54f50e963
|
[] |
no_license
|
msyriac/peakaboo
|
aa3ac1396c2af0862f9c5891a20a08dddd97068b
|
8bb8a50262695733b086984f7d89ff4f04187278
|
refs/heads/master
| 2021-01-21T13:30:31.434801
| 2018-05-16T18:53:34
| 2018-05-16T18:53:34
| 102,130,912
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,997
|
py
|
import numpy as np
from peakaboo import liuSims as ls
import orphics.tools.io as io
import os,sys
import orphics.analysis.flatMaps as fmaps
from mpi4py import MPI
from orphics.analysis.pipeline import mpi_distribute, MPIStats
import orphics.analysis.flatMaps as fmaps
from enlib import enmap, resample
# Get MPI comm
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numcores = comm.Get_size()
out_dir = os.environ['WWW']+"peakaboo/"
#file_root = "/gpfs01/astro/workarea/msyriac/data/sims/jia/output/jia_recon_"
file_root = lambda mass,ftype,x,ext: "/gpfs01/astro/workarea/msyriac/data/sims/jia/output/"+mass+"_"+ftype+"_experiment_simple_"+str(x).zfill(9)+"."+ext
Ntot = 500
num_each,each_tasks = mpi_distribute(Ntot,numcores)
mpibox = MPIStats(comm,num_each,tag_start=333)
my_tasks = each_tasks[rank]
LCmassless = ls.LiuConvergence(root_dir="/gpfs01/astro/workarea/msyriac/data/sims/jia/cmb/massless/",zstr="1100.00")
LCmassive = ls.LiuConvergence(root_dir="/gpfs01/astro/workarea/msyriac/data/sims/jia/cmb/massive/",zstr="1100.00")
lbin_edges = np.arange(200,3000,100)
for k,i in enumerate(my_tasks):
#massive = enmap.read_map(file_root+"massive_"+str(i).zfill(9)+".fits")
#massless = enmap.read_map(file_root+"massless_"+str(i).zfill(9)+".fits")
massive = enmap.read_map(file_root("massive","kappa_recon",i,"fits"))
massless = enmap.read_map(file_root("massless","kappa_recon",i,"fits"))
if k==0:
qpower = fmaps.QuickPower(massive.modlmap(),lbin_edges)
massive_input = LCmassive.get_kappa(i+1)
massive_input = enmap.ndmap(resample.resample_fft(massive_input,massive.shape),massive.wcs)
massless_input = LCmassless.get_kappa(i+1)
massless_input = enmap.ndmap(resample.resample_fft(massless_input,massless.shape),massless.wcs)
print massive.shape
print massive_input.shape
cents, pauto_massive = qpower.calc(massive)
cents, pauto_massless = qpower.calc(massless)
cents, pcross_massive = qpower.calc(massive,massive_input)
cents, pcross_massless = qpower.calc(massless,massless_input)
cents, pauto_massive_input = qpower.calc(massive_input)
cents, pauto_massless_input = qpower.calc(massless_input)
lcents,massive_rkk = np.loadtxt(file_root("massive","auto_n0_subbed",i,"fits"),unpack=True)
lcents,massless_rkk = np.loadtxt(file_root("massless","auto_n0_subbed",i,"fits"),unpack=True)
mpibox.add_to_stats("massiveAutoN0",massive_rkk)
mpibox.add_to_stats("masslessAutoN0",massless_rkk)
mpibox.add_to_stats("massiveAuto",pauto_massive)
mpibox.add_to_stats("masslessAuto",pauto_massless)
mpibox.add_to_stats("masslessCross",pcross_massless)
mpibox.add_to_stats("massiveCross",pcross_massive)
mpibox.add_to_stats("massiveInput",pauto_massive_input)
mpibox.add_to_stats("masslessInput",pauto_massless_input)
print rank,i
mpibox.get_stats()
if rank==0:
rm = mpibox.stats["massiveAutoN0"]
rm0 = mpibox.stats["masslessAutoN0"]
mauto = mpibox.stats["massiveAuto"]
m0auto = mpibox.stats["masslessAuto"]
m0cross = mpibox.stats["masslessCross"]
mcross = mpibox.stats["massiveCross"]
mauto_input = mpibox.stats["massiveInput"]
m0auto_input = mpibox.stats["masslessInput"]
def camb_pred(nu):
import orphics.tools.cmb as cmb
# camb_cl_prediction
cambRoot = "data/jia_"+nu
theory = cmb.loadTheorySpectraFromCAMB(cambRoot,unlensedEqualsLensed=False,useTotal=False,TCMB = 2.7255e6,lpad=9000)
ellrange = np.arange(2,3000,1)
clkk_camb = theory.gCl("kk",ellrange)
return ellrange,clkk_camb
ellrange,clkk_camb0 = camb_pred("massless")
pl = io.Plotter(scaleY='log',labelX="$\\ell$",labelY="$C_{\ell}$")
pl.addErr(lcents,rm0['mean'],yerr=rm0['errmean'],marker="o")
pl.addErr(cents,m0cross['mean'],yerr=m0cross['errmean'],marker="^")
pl.add(cents,m0auto_input['mean'],marker="x",ls="none")
pl.add(ellrange,clkk_camb0,label="cl camb",color="k")
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done(out_dir+"massless.png")
ellrange,clkk_camb = camb_pred("massive")
pl = io.Plotter(scaleY='log',labelX="$\\ell$",labelY="$C_{\ell}$")
pl.addErr(lcents,rm['mean'],yerr=rm['errmean'],marker="o")
pl.addErr(cents,mcross['mean'],yerr=mcross['errmean'],marker="^")
pl.add(cents,mauto_input['mean'],marker="x",ls="none")
pl.add(ellrange,clkk_camb,label="cl camb",color="k")
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done(out_dir+"massive.png")
pdiff = (clkk_camb-clkk_camb0)*100./clkk_camb0
pl = io.Plotter(labelX="$\\ell$",labelY="$100\\Delta C_{\ell}/C_{\ell}$")
pl.add(lcents,(rm['mean']-rm0['mean'])*100./rm0['mean'],marker="o",ls="none")
pl.add(cents,(mauto_input['mean']-m0auto_input['mean'])*100./m0auto_input['mean'],marker="x",ls="none")
pl.add(ellrange,pdiff,label="cl camb",color="k")
pl.hline()
#pl._ax.set_ylim(-2,1)
pl._ax.set_xlim(500,3000)
pl.done(out_dir+"mnudiff.png")
|
[
"mathewsyriac@gmail.com"
] |
mathewsyriac@gmail.com
|
50499ed278f1c769e6003b5e965f70ca46dd96e2
|
8972658ca2c64703e8281db89d7a6ac47cbabbf7
|
/backend/tests/models.py
|
db9754be03118136304be7ed51dc6c7b912ed427
|
[
"MIT"
] |
permissive
|
denisorehovsky/linkanywhere
|
15721824719cc8a959cdddb4178cfe754eb4862d
|
e21d6725fbe0e74a7301e40f9d9bdbac17c68e68
|
refs/heads/master
| 2022-07-21T16:16:17.412930
| 2017-08-24T06:32:37
| 2017-08-24T06:32:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
import uuid
from django.db import models
from linkanywhere.apps.base.behaviors import Published
class TestModel(models.Model):
"""
Base for test models that sets app_label, so they play nicely.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class Meta:
app_label = 'tests'
abstract = True
class BasicModel(TestModel):
text = models.CharField(max_length=100)
class LikeModel(TestModel):
text = models.CharField(max_length=100)
class PublishedModel(Published, TestModel):
pass
|
[
"denis.orehovsky@gmail.com"
] |
denis.orehovsky@gmail.com
|
9ab61ddea3a8f45f1f40b9490b41e4da6d9a6544
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/SConsTools/tags/V00-00-16/src/standardExternalPackage.py
|
f433403d52307a3f120d028b2d80755d194bb0c7
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,349
|
py
|
#===============================================================================
#
# SConscript fuction for standard external package
#
# $Id$
#
#===============================================================================
import os
import sys
from os.path import join as pjoin
from fnmatch import fnmatch
from SCons.Defaults import *
from SCons.Script import *
from SConsTools.trace import *
from SConsTools.dependencies import *
#
# This is an interface package for the external package. We wan to make
# symlinks to the include files, libs and binaries
#
# build package name from prefix and directory
def _absdir ( prefix, dir ):
if not dir :
return None
if prefix and not os.path.isabs( dir ) :
dir = pjoin( prefix, dir )
if not os.path.isdir( dir ) :
dir = None
return dir
def _glob ( dir, patterns ):
if patterns is None :
return os.listdir(dir)
# patterns could be space-separated string of patterns
if isinstance(patterns,(str,unicode)) :
patterns = patterns.split()
if not patterns : return []
result = []
for l in os.listdir(dir) :
for p in patterns :
if fnmatch ( l, p ) : result.append(l)
return result
#
# Define all builders for the external package
#
def standardExternalPackage ( package, **kw ) :
""" Understands following keywords (all are optional):
PREFIX - top directory of the external package
INCDIR - include directory, absolute or relative to PREFIX
PYDIR - Python src directory, absolute or relative to PREFIX
PYDIRSEP - if present and evaluates to True installs python code to a
separate directory arch/$LUSI_ARCH/python/<package>
LIBDIR - libraries directory, absolute or relative to PREFIX
LINKLIBS - library names to link, or all libs if not present
BINDIR - binaries directory, absolute or relative to PREFIX
LINKBINS - binary names to link, or all libs if not present
PKGLIBS - names of libraries that have to be linked for this package
DEPS - names of other packages that we depend upon
"""
pkg = os.path.basename(os.getcwd())
trace ( "Standard SConscript for external package `"+package+"'", "SConscript", 1 )
env = DefaultEnvironment()
prefix = kw.get('PREFIX',None)
trace ( "prefix: %s" % prefix, "standardExternalPackage", 3 )
# link include directory
inc_dir = _absdir ( prefix, kw.get('INCDIR',None) )
if inc_dir :
trace ( "include_dir: %s" % inc_dir, "standardExternalPackage", 5 )
# make 'geninc' directory if not there yet
archinc = Dir(env.subst("$ARCHINCDIR"))
archinc = str(archinc)
if not os.path.isdir( archinc ) : os.makedirs( archinc )
target = pjoin(archinc,package)
if not os.path.lexists(target) : os.symlink ( inc_dir, target )
# link python directory
py_dir = _absdir ( prefix, kw.get('PYDIR',None) )
if py_dir :
trace ( "py_dir: %s" % py_dir, "standardExternalPackage", 5 )
if kw.get('PYDIRSEP',False) :
# make a link to the whole dir
targ = env.Symlink ( Dir(pjoin(env.subst("$PYDIR"),package)), Dir(py_dir) )
env['ALL_TARGETS']['LIBS'].extend ( targ )
else :
# make links for every file in the directory
files = os.listdir(py_dir)
for f in files :
loc = pjoin(py_dir,f)
if not os.path.isdir(loc) :
targ = env.Symlink ( pjoin(env.subst("$PYDIR"),f), loc )
env['ALL_TARGETS']['LIBS'].extend( targ )
# link all libraries
lib_dir = _absdir ( prefix, kw.get('LIBDIR',None) )
if lib_dir :
trace ( "lib_dir: %s" % lib_dir, "standardExternalPackage", 5 )
# make a list of libs to link
libraries = kw.get('LINKLIBS',None)
trace ( "libraries: %s" % libraries, "standardExternalPackage", 5 )
libraries = _glob ( lib_dir, libraries )
trace ( "libraries: %s" % libraries, "standardExternalPackage", 5 )
for f in libraries :
loc = pjoin(lib_dir,f)
if os.path.isfile(loc) :
#targ = env.Install( "$LIBDIR", loc )
targ = env.Symlink ( pjoin(env.subst("$LIBDIR"),f), loc )
trace ( "linklib: %s -> %s" % (str(targ[0]),loc), "standardExternalPackage", 5 )
env['ALL_TARGETS']['LIBS'].extend ( targ )
# link all executables
bin_dir = _absdir ( prefix, kw.get('BINDIR',None) )
if bin_dir :
trace ( "bin_dir: %s" % bin_dir, "standardExternalPackage", 5 )
# make list of binaries to link
binaries = kw.get('LINKBINS',None)
binaries = _glob ( bin_dir, binaries )
for f in binaries :
loc = pjoin(bin_dir,f)
if os.path.isfile(loc) :
targ = env.Symlink ( pjoin(env.subst("$BINDIR"),f), loc )
env['ALL_TARGETS']['BINS'].extend ( targ )
# add my libs to a package tree
setPkgLibs ( env, package, kw.get('PKGLIBS',[]) )
# add packages that I depend on
setPkgDeps ( env, package, kw.get('DEPS',[]) )
|
[
"salnikov@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
salnikov@b967ad99-d558-0410-b138-e0f6c56caec7
|
80137af34837964be8bf789dbbcf21a7a1f05a3a
|
3d386ef093427c227f0ba6637eedfbce044a2e9e
|
/tfbert/optimization/create_optimizer.py
|
ac6c1abc5c6bf2aebe07f51b89fe61f37dbec2ae
|
[] |
no_license
|
HaierAI/tfbert
|
c3eeb77af70e79e925e72c393a3e8229feaf1a4a
|
3779e59a4ebe7458ae732fef547f1168badbba2b
|
refs/heads/master
| 2023-07-09T05:25:19.015760
| 2021-08-16T12:27:37
| 2021-08-16T12:27:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,278
|
py
|
# -*- coding:utf-8 -*-
# @FileName :create_optimizer.py
# @Time :2021/1/31 19:58
# @Author :huanghui
import tensorflow.compat.v1 as tf
from .adamw import AdamWeightDecayOptimizer
from .lamb import LAMBOptimizer
from .schedule import lr_schedule
def create_optimizer(
learning_rate,
num_train_steps=None,
num_warmup_steps=None,
optimizer_type='adamw',
epsilon=1e-6,
momentum=0.,
weight_decay=0.01,
decay_method='poly',
mixed_precision=False,
init_loss_scale=2 ** 32
):
if decay_method is not None and num_train_steps is not None and num_warmup_steps is not None:
num_train_steps = int(num_train_steps)
num_warmup_steps = int(num_warmup_steps)
learning_rate = lr_schedule(
learning_rate, num_train_steps, num_warmup_steps,
decay_method=decay_method, optimizer_type=optimizer_type
)
if optimizer_type == 'adamw':
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=weight_decay,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
elif optimizer_type == 'lamb':
optimizer = LAMBOptimizer(
learning_rate,
weight_decay_rate=weight_decay,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
elif optimizer_type == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=epsilon)
elif optimizer_type == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
)
elif optimizer_type == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate,
rho=0.95,
epsilon=epsilon,
)
elif optimizer_type == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate=learning_rate,
initial_accumulator_value=0.1
)
elif optimizer_type == 'rmsp':
optimizer = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
decay=0.9,
momentum=momentum,
epsilon=epsilon,
)
else:
raise ValueError('Unsupported optimizer option: %s' % optimizer_type)
if mixed_precision:
loss_scaler = tf.train.experimental.DynamicLossScale(
initial_loss_scale=init_loss_scale, increment_period=1000,
multiplier=2.0)
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scaler)
loss_scale_value = tf.identity(loss_scaler(), name="loss_scale")
return optimizer
def create_train_op(
optimizer,
grads_and_vars,
max_grad=1.0,
mixed_precision=False,
gradient_accumulation_steps=1):
global_step = tf.train.get_or_create_global_step()
if gradient_accumulation_steps > 1:
local_step = tf.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False,
initializer=tf.zeros_initializer)
batch_finite = tf.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False,
initializer=tf.ones_initializer)
accum_vars = [tf.get_variable(
name=tvar.name.split(":")[0] + "/accum",
shape=tvar.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()) for tvar in tf.trainable_variables()]
reset_step = tf.cast(tf.math.equal(local_step % gradient_accumulation_steps, 0), dtype=tf.bool)
local_step = tf.cond(reset_step, lambda: local_step.assign(tf.ones_like(local_step)),
lambda: local_step.assign_add(1))
grads_and_vars_and_accums = [(gv[0], gv[1], accum_vars[i]) for i, gv in enumerate(grads_and_vars) if
gv[0] is not None]
grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if mixed_precision else tf.constant(
True,
dtype=tf.bool)
batch_finite = tf.cond(reset_step,
lambda: batch_finite.assign(
tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite)),
lambda: batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite)))
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=max_grad)
accum_vars = tf.cond(reset_step,
lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)],
lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)])
def update(accum_vars):
return optimizer.apply_gradients(list(zip(accum_vars, tvars)))
update_step = tf.identity(
tf.cast(tf.math.equal(local_step % gradient_accumulation_steps, 0), dtype=tf.bool),
name="update_step")
update_op = tf.cond(update_step,
lambda: update(accum_vars), lambda: tf.no_op())
new_global_step = tf.cond(tf.math.logical_and(update_step, batch_finite),
lambda: global_step + 1,
lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
grads, tvars = list(zip(*grads_and_vars))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if mixed_precision else tf.constant(True,
dtype=tf.bool)
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=max_grad)
# 这里不要传入global step,adam内部没有对global step累加
# 而原本adam等tf内置优化器会累加,这样就会造成global step重复增加
train_op = optimizer.apply_gradients(
list(zip(clipped_grads, tvars)))
new_global_step = tf.cond(all_are_finite, lambda: global_step + 1, lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
|
[
"m13021933043@163.com"
] |
m13021933043@163.com
|
84f66a0bf9e3af5d28f84b3115109b132927b474
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/Chandu and chandni's secret chat/solution.py
|
5347d87bca861f96880c7fd9b656c67c7b40092f
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
s, k = input().strip().split()
k = int(k)
idxes = list(range(len(s)))
idxes.sort(key=lambda i: s[i], reverse=True)
idx = idxes[k - 1]
word = ''
for _ in range(len(s)):
word += s[idx]
idx = idxes[idx]
word = word[-1] + word[:-1]
print(word)
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
b2b80179d0f8ddc7a76ed005cbc3670219bb8091
|
28f0dc2b48ed019dfef08d84e842c5d75e116dfc
|
/Versions/Release.2.x.x/py/OBSOLETE/BibleTable.py
|
cced9d9f98a8cb768a4c415715efb06a18c3eb2b
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
garygriswold/SafeBible
|
9da0e8d89cb08888b8cf48773b4b3860086c49f7
|
2d378e84cbd6b81641bcccd6ba66699d24208548
|
refs/heads/master
| 2022-02-25T19:41:10.367183
| 2019-08-22T03:35:02
| 2019-08-22T03:35:02
| 34,028,119
| 0
| 0
|
MIT
| 2019-10-30T07:11:44
| 2015-04-16T01:40:19
|
TSQL
|
UTF-8
|
Python
| false
| false
| 3,942
|
py
|
#
# This program generates SQL statements to create and populate the Bible table
# This was previously called the Version table
#
import io
import os
import json
out = io.open("sql/bible.sql", mode="w", encoding="utf-8")
out.write(u"DROP TABLE IF EXISTS Bible;\n")
out.write(u"CREATE TABLE Bible (\n")
out.write(u" bibleId TEXT NOT NULL PRIMARY KEY,\n") # info.json filename[5:18]
out.write(u" code TEXT NOT NULL,\n") # info.json abbr
out.write(u" abbr TEXT NOT NULL,\n") # info.json abbr char 4-6
out.write(u" iso3 TEXT NOT NULL REFERENCES Language(iso3),\n") # info.json lang
out.write(u" name TEXT NOT NULL,\n") # info.json name
out.write(u" englishName TEXT NULL,\n") # info.json nameEnglish
out.write(u" localizedName TEXT NULL,\n") # Google Translate API
out.write(u" direction TEXT CHECK (direction IN('ltr','rtl')) default('ltr'),\n") # info.json dir
out.write(u" script TEXT NULL,\n") # info.json script
out.write(u" country TEXT NULL REFERENCES Country(code),\n") # info.json countryCode
out.write(u" s3Bucket TEXT NOT NULL,\n") # this program
out.write(u" s3KeyPrefix TEXT NOT NULL,\n") # info.json filename
out.write(u" s3Key TEXT NULL,\n") # %I_%O_%B_%C.html
# I cannot find program, which generated this template: s3KeyTemplate.py
out.write(u" s3CredentialId TEXT NULL,\n") # TBD
out.write(u" otDamId TEXT NULL,\n") # BibleUpdateDamId.py
out.write(u" ntDamId TEXT NULL,\n") # BibleUpdateDamId.py
out.write(u" stylesheet TEXT NOT NULL);\n") # constant stylesheet
prefix2 = "INSERT INTO Bible (bibleId, code, abbr, iso3, name, englishName, direction, script, country, s3Bucket, s3KeyPrefix, s3Key, stylesheet) VALUES"
stylesheet = "BibleApp2.css"
# read and process all info.json files
source = "/Users/garygriswold/ShortSands/DBL/FCBH_info/"
filelist = sorted(os.listdir(source))
for filename in filelist:
#if len(filename) != 28:
#print(len(filename), filename)
#else:
if len(filename) == 28:
#print(filename)
input2 = io.open(source + filename, mode="r", encoding="utf-8")
data = input2.read()
bible = json.loads(data)
bibleId = filename[5:18]
# check type to see if == bible
bType = bible['type']
if bType != 'bible':
print "?? Type = ", bType
# check abbr to see if different from bibleId
code = bible['abbr']
# remove lang code from abbr
abbr = code[3:]
# check that lang == first 3 letters of bibleId
iso3 = bible['lang']
if iso3.upper() != code[0:3]:
print "?? abbr=", code, " iso3=", iso3
iso3 = iso3.lower()
name = bible['name'].replace("'", "''")
englishName = bible['nameEnglish'].replace("'", "''")
direction = bible['dir']
# convert script to iso 15924 code
script = bible.get('script')
validScripts = [None, 'Arab', 'Beng', 'Bugi', 'Cans', 'Cyrl', 'Deva', 'Ethi', 'Geor',
'Hans', 'Hant', 'Java', 'Kore', 'Latn', 'Orya', 'Syrc', 'Taml', 'Thai' ]
#if validScripts.index(script) < 0:
if script in validScripts:
a = 1
else:
if script == 'Latin':
script = 'Latn'
elif script == 'Cyrillic':
script = 'Cyrl'
elif script == 'Arabic':
script = 'Arab'
elif script == 'Devangari':
script = 'Deva'
elif script == 'Devanagari (Nagari)':
script = 'Deva'
elif script == 'CJK':
script = None
else:
print "ERROR: unknown script code", script, filename
script = "'" + script + "'" if script != None else 'null'
country = bible.get('countryCode')
country = "'" + country.upper() + "'" if len(country) > 0 else 'null'
bucket = "dbp-prod"
keyPrefix = filename.replace("info.json", "").replace(":", "/")
s3Key = '%I_%O_%B_%C.html'
out.write("%s ('%s', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, '%s', '%s', '%s', '%s');\n" %
(prefix2, bibleId, code, abbr, iso3, name, englishName, direction, script, country, bucket, keyPrefix, s3Key, stylesheet))
out.close()
|
[
"gary@shortsands.com"
] |
gary@shortsands.com
|
1f33447947159a11ecf117ebfd09d4a0232c26ed
|
890a6921b9dbc3d849ee51366c76a791761d35d2
|
/.qt_for_python/uic/PlacefieldVisualSelectionWidgetBase.py
|
e5bfd84f65b40625dd5b625a786686f1a3fc1927
|
[] |
no_license
|
CommanderPho/Spike3D
|
87e1ea17a76080e18e835e9d015e7fe7bb3426e4
|
63e5e78c3bcb28f3dbab02d6354e6eb83cbccc2a
|
refs/heads/master
| 2023-08-17T10:40:44.389682
| 2023-08-16T10:57:12
| 2023-08-16T10:57:12
| 413,545,455
| 2
| 0
| null | 2022-10-22T05:54:57
| 2021-10-04T18:48:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:\Users\pho\repos\pyPhoPlaceCellAnalysis\src\pyphoplacecellanalysis\GUI\Qt\PlacefieldVisualSelectionControls\PlacefieldVisualSelectionWidgetBase.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_rootForm(object):
def setupUi(self, rootForm):
rootForm.setObjectName("rootForm")
rootForm.resize(94, 126)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(rootForm.sizePolicy().hasHeightForWidth())
rootForm.setSizePolicy(sizePolicy)
rootForm.setMinimumSize(QtCore.QSize(50, 0))
rootForm.setBaseSize(QtCore.QSize(50, 126))
rootForm.setStyleSheet("background-color: rgb(71, 58, 46);\n"
"border-color: rgb(207, 207, 207);\n"
"background-color: rgba(71, 65, 60, 180);\n"
"color: rgb(244, 244, 244);\n"
"border-color: rgb(0, 0, 0);")
self.gridLayout = QtWidgets.QGridLayout(rootForm)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(rootForm)
self.groupBox.setMinimumSize(QtCore.QSize(50, 0))
self.groupBox.setMaximumSize(QtCore.QSize(160, 160))
self.groupBox.setBaseSize(QtCore.QSize(50, 0))
self.groupBox.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.groupBox.setFlat(False)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setContentsMargins(2, 0, 2, 4)
self.verticalLayout_2.setSpacing(2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.btnTitle = QtWidgets.QPushButton(self.groupBox)
self.btnTitle.setObjectName("btnTitle")
self.verticalLayout_2.addWidget(self.btnTitle)
self.btnColorButton = ColorButton(self.groupBox)
self.btnColorButton.setEnabled(False)
self.btnColorButton.setMinimumSize(QtCore.QSize(24, 24))
self.btnColorButton.setText("")
self.btnColorButton.setObjectName("btnColorButton")
self.verticalLayout_2.addWidget(self.btnColorButton)
self.chkbtnPlacefield = QtWidgets.QToolButton(self.groupBox)
self.chkbtnPlacefield.setCheckable(True)
self.chkbtnPlacefield.setChecked(False)
self.chkbtnPlacefield.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.chkbtnPlacefield.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.chkbtnPlacefield.setObjectName("chkbtnPlacefield")
self.verticalLayout_2.addWidget(self.chkbtnPlacefield)
self.chkbtnSpikes = QtWidgets.QToolButton(self.groupBox)
self.chkbtnSpikes.setCheckable(True)
self.chkbtnSpikes.setChecked(False)
self.chkbtnSpikes.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.chkbtnSpikes.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.chkbtnSpikes.setObjectName("chkbtnSpikes")
self.verticalLayout_2.addWidget(self.chkbtnSpikes)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(rootForm)
QtCore.QMetaObject.connectSlotsByName(rootForm)
def retranslateUi(self, rootForm):
_translate = QtCore.QCoreApplication.translate
rootForm.setWindowTitle(_translate("rootForm", "Pf"))
self.groupBox.setTitle(_translate("rootForm", "pf[i]"))
self.btnTitle.setText(_translate("rootForm", "pf[i]"))
self.chkbtnPlacefield.setText(_translate("rootForm", "pf"))
self.chkbtnSpikes.setText(_translate("rootForm", "spikes"))
from pyphoplacecellanalysis.External.pyqtgraph.widgets.ColorButton import ColorButton
|
[
"CommanderPho@users.noreply.github.com"
] |
CommanderPho@users.noreply.github.com
|
db8551c3b00fdaa9cea83beff7f976a27482b764
|
0486b6ccf883e9cd7a24bbd89b5420e7de2172b9
|
/DRF Study Material/Django REST Code/gs23/manage.py
|
0fd63ec5bb04d9d85aeb9039d1fb86e9be16bd10
|
[] |
no_license
|
ajitexl/restfrmaework
|
2980203d7faa6c8364288283758d32c8f2a37817
|
9ab203748e623516365d9924dcc68acc786a66e1
|
refs/heads/main
| 2023-02-03T08:52:00.672047
| 2020-12-10T09:50:51
| 2020-12-10T09:50:51
| 320,222,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs23.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"you@example.com"
] |
you@example.com
|
a9d0bc7546f7ca3723a17a3f5fd7ba086d51f28c
|
21bf726bf895569a41a8b8d2db6772dc51f46cfd
|
/OTHERS/Interviews/Akuna.py
|
abba9d6a6b4e54cda0048899107ec10a4fa00cc0
|
[] |
no_license
|
jeffsnguyen/Python-1
|
dd924d25337cd6ac21e321d7b2c5ac17c065d94b
|
463d32a61a760d076656c73c9f8c9fadf262438d
|
refs/heads/master
| 2022-03-23T09:50:04.476094
| 2019-12-23T12:32:49
| 2019-12-23T12:32:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
def if_none_trivial(x):
if x==0:
return 0
else:
return 1
def violet_search_icecream_shop(stock, max_capacity,demands,n_days,overnight_fee,price,deliver_fee,total_expense=[],expense=0):
delivery_min = max(0,demands[0]-stock)
delivery_max = max(0,sum(demands) - stock)
for delivery in range(delivery_min,delivery_max+1,1):
expense_today = expense + if_none_trivial(delivery)*deliver_fee + delivery*price
expense_today = expense_today + max(0,(stock+delivery-max_capacity))*overnight_fee
stock_next = stock+delivery-demands[0]
print("***********************")
print("expense until yesterday: ",expense)
print("expense until today: ", expense_today)
print(n_days, "remains")
if n_days>1:
violet_search_icecream_shop(stock_next, max_capacity,demands[1:],n_days-1,overnight_fee,price,deliver_fee,total_expense,expense_today)
else:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("total expense",expense_today)
total_expense.append(expense_today)
# yield(expense_today)
total_expense=[]
violet_search_icecream_shop(0,10,[1,2,1,4],4,1,3,4,total_expense=total_expense)
print(total_expense)
print("the optimum cost:", min(total_expense))
from collections import defaultdict
def code_preprocessing(delivery_code):
code_dic = defaultdict(list)
i = 0
for code in delivery_code:
crude = code.split('-',1)
code_dic[crude[0]].append((crude[1],i))
i = i+1
print(code_dic)
code_dict = code_preprocessing(["123-2","2345-1","123-3","123-5","2345-5"])
def swarm_delivery(code_dict):
bee = []
for key,value in code_dict:
bee.append(value)
print(bee)
swarm_delivery(code_dict)
|
[
"jerryxyx@163.com"
] |
jerryxyx@163.com
|
5b771ee4fa02ac609d1a9cff17e724f9d74cdcdc
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/5041434/snippet.py
|
a978469c85746091bf61c1a14c4ddfde95ab6244
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
import requests
from bs4 import BeautifulSoup, NavigableString
def get_review_text(block):
"""Get just the text of a review from it's DIV"""
strings = []
for possible_text in block.children:
if isinstance(possible_text, NavigableString):
stripped_text = possible_text.strip()
if len(stripped_text) > 0:
strings.append(stripped_text)
return "\n".join(strings)
def get_review_texts(review_html):
"""Get all the reviews on a review page"""
soup = BeautifulSoup(review_html)
table = soup.find(id="productReviews").tr.td
review_blocks = table.find_all("div", recursive=False)
return [get_review_text(block) for block in review_blocks]
def get_review_page_count(review_html):
"""Get the number of review pages"""
soup = BeautifulSoup(review_html)
try:
return int(soup.find("span", class_="paging").find_all("a")[-2].text)
except:
return 1
def get_all_reviews(review_url):
"""Get all the reviews, given a review page URL"""
# sanitize the url
review_url = "/".join(review_url.split("/")[:-1])
first_review_page = requests.get(review_url).text
review_page_count = get_review_page_count(first_review_page)
reviews = []
for i in range(1, review_page_count + 1):
url = review_url + "?pageNumber=%d" % i
review_html = requests.get(url).text
reviews.extend(get_review_texts(review_html))
return reviews
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
dac74fe07f41bad595f3daece43a0047c4795112
|
c105570f12f1d56087ffb831f5d34cd763d6c90b
|
/top/api/rest/HotelRoomImgDeleteRequest.py
|
55f04fe4e6cf46864d486cf6bd4e5cf9dc7d3abd
|
[] |
no_license
|
wjianwei126/Alinone
|
01607423833d7736b2fd3c77e9e21f63c69b4e4c
|
80144d4657cb049d651c09647eb245405240f12f
|
refs/heads/master
| 2020-12-07T05:14:58.746777
| 2015-05-06T12:48:33
| 2015-05-06T12:48:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
'''
Created by auto_sdk on 2014-11-09 14:51:18
'''
from top.api.base import RestApi
class HotelRoomImgDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.gid = None
self.position = None
def getapiname(self):
return 'taobao.hotel.room.img.delete'
|
[
"rapospectre@0163.com"
] |
rapospectre@0163.com
|
38989ea3cc2d9323d7df74726b4cbe4770c237d1
|
1d0895269d1d93bab6a0b595c806418b1eeda735
|
/qiskit/providers/ibmq/api/rest/experiment.py
|
21cea49b8c27b3ce319c61a25cd6e4314e06b812
|
[
"Apache-2.0"
] |
permissive
|
Qiskit/qiskit-ibmq-provider
|
3921bf5f77a9621013ada7ea5e18fa199470650c
|
590f68d9ddb42a45c4ac8a8626ea60da85575b21
|
refs/heads/master
| 2023-06-08T03:17:52.745052
| 2023-06-05T14:20:16
| 2023-06-05T14:20:16
| 163,192,893
| 240
| 182
|
Apache-2.0
| 2023-06-05T14:20:18
| 2018-12-26T15:22:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,218
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Experiment REST adapter."""
import logging
from typing import Dict, Union
from .base import RestAdapterBase
from ..session import RetrySession
logger = logging.getLogger(__name__)
class Experiment(RestAdapterBase):
"""Rest adapter for experiment related endpoints."""
URL_MAP = {
'self': '',
'upload_plots': '/plots'
}
def __init__(self, session: RetrySession, experiment_uuid: str, url_prefix: str = '') -> None:
"""Experiment constructor.
Args:
session: Session to be used in the adaptor.
experiment_uuid: UUID of the experiment.
url_prefix: URL prefix.
"""
super().__init__(session, '{}/experiments/{}'.format(url_prefix, experiment_uuid))
def retrieve(self) -> str:
"""Retrieve the specific experiment.
Returns:
Experiment data.
"""
url = self.get_url('self')
return self.session.get(url).text
def update(self, experiment: str) -> Dict:
"""Update the experiment.
Args:
experiment: Experiment to update.
Returns:
JSON response.
"""
url = self.get_url('self')
return self.session.put(url, data=experiment, headers=self._HEADER_JSON_CONTENT).json()
def delete(self) -> Dict:
"""Delete the experiment.
Returns:
JSON response.
"""
url = self.get_url('self')
return self.session.delete(url).json()
def upload_plot(
self,
plot: Union[bytes, str],
plot_name: str,
sync_upload: bool = True
) -> Dict:
"""Upload a plot for the experiment.
Args:
plot: Plot file name or data to upload.
plot_name: Name of the plot.
sync_upload: By default the server will upload the plot file
to backend storage asynchronously. Set this to False to use
that behavior and not block the upload.
Returns:
JSON response.
"""
url = self.get_url('upload_plots')
headers = {
'x-sync-upload': str(sync_upload)
}
if isinstance(plot, str):
with open(plot, 'rb') as file:
data = {'plot': (plot_name, file)}
response = self.session.post(url, files=data, headers=headers).json()
else:
data = {'plot': (plot_name, plot)} # type: ignore[dict-item]
response = self.session.post(url, files=data, headers=headers).json()
return response
class ExperimentPlot(RestAdapterBase):
"""Rest adapter for experiment plot related endpoints."""
URL_MAP = {
'self': ''
}
def __init__(
self,
session: RetrySession,
experiment_uuid: str,
plot_name: str,
url_prefix: str = '') -> None:
"""Experiment constructor.
Args:
session: Session to be used in the adaptor.
experiment_uuid: UUID of the experiment.
plot_name: Name of the plot.
url_prefix: URL prefix.
"""
super().__init__(session, '{}/experiments/{}/plots/{}'.format(
url_prefix, experiment_uuid, plot_name))
self.plot_name = plot_name
def retrieve(self) -> bytes:
"""Retrieve the specific experiment plot.
Returns:
Plot content.
"""
url = self.get_url('self')
response = self.session.get(url)
return response.content
def delete(self) -> None:
"""Delete this experiment plot."""
url = self.get_url('self')
self.session.delete(url)
def update(
self,
plot: Union[bytes, str],
sync_upload: bool = True
) -> Dict:
"""Update an experiment plot.
Args:
plot: Plot file name or data to upload.
sync_upload: By default the server will upload the plot file
to backend storage asynchronously. Set this to False to use
that behavior and not block the upload.
Returns:
JSON response.
"""
url = self.get_url('self')
headers = {
'x-sync-upload': str(sync_upload)
}
if isinstance(plot, str):
with open(plot, 'rb') as file:
data = {'plot': (self.plot_name, file)}
response = self.session.put(url, files=data, headers=headers).json()
else:
data = {'plot': (self.plot_name, plot)} # type: ignore[dict-item]
response = self.session.put(url, files=data, headers=headers).json()
return response
|
[
"noreply@github.com"
] |
Qiskit.noreply@github.com
|
be022fe3fefde1e08b0c0cafbf8646767f2ba51d
|
a65103e2f33192d9e6fcf8c8852f263369190175
|
/core/models.py
|
c653f88b69de6db591ec935f5bf162047d706249
|
[] |
no_license
|
dhilipsiva/ircman
|
e23153572d5f8cf09d4ed7d47c47b90050762489
|
767b42f321598b155f2fd74729947ed92f8da160
|
refs/heads/master
| 2023-07-10T06:42:45.855788
| 2015-07-22T04:17:00
| 2015-07-22T04:17:00
| 35,310,806
| 6
| 0
| null | 2023-09-05T05:15:21
| 2015-05-09T01:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,308
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: models.py
Version: 0.1
Author: dhilipsiva <dhilipsiva@gmail.com>
Date created: 2015-05-09
"""
__author__ = "dhilipsiva"
__status__ = "development"
"""
"""
# Python imports
import datetime
from uuid import uuid4
# Django imports
from django.utils.timezone import utc
from django.contrib.auth.models import AbstractUser
from django.db.models import Model, ForeignKey, DateTimeField, UUIDField, \
CharField, TextField, PositiveIntegerField, BooleanField
def utc_now():
"""
`now` with UTC
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class User(AbstractUser):
"""
A custom user so that we can add permissions easily
"""
id = UUIDField(primary_key=True, default=uuid4, editable=False)
socket = UUIDField(default=uuid4, editable=False)
class Meta(AbstractUser.Meta):
abstract = False
def save(self, *args, **kwargs):
if 'pbkdf2_sha256' not in self.password:
self.set_password(self.password)
super(User, self).save(*args, **kwargs)
def to_dict(self, with_sensitive_data=False):
"""
Dictify user
"""
d = {
'id': str(self.id),
'username': self.username,
'firstName': self.first_name,
'lastName': self.last_name,
}
if with_sensitive_data:
d.update({
'socket': str(self.socket),
'email': self.email,
})
return d
def __str__(self):
return self.username
def __repr__(self):
return "<User: %s>" % self.__str__()
class Server(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
host = CharField(max_length=256)
port = PositiveIntegerField(default=6667, blank=True)
is_ssl = BooleanField(default=False)
is_sasl = BooleanField(default=False)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'host': self.host,
'port': self.port,
'isSsl': self.is_ssl,
'isSasl': self.is_sasl,
}
def __str__(self):
return self.host
def __repr__(self):
return "<Server: %s>" % self.__str__()
class UserServer(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user = ForeignKey(User, related_name="user_servers")
server = ForeignKey(Server, related_name="user_servers")
label = CharField(max_length=256, default="My IRC Server")
username = CharField(max_length=256)
password = CharField(max_length=256, null=True, blank=True)
nickname = CharField(max_length=256)
realname = CharField(max_length=256, null=True, blank=True)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'user': str(self.user_id),
'server': str(self.server_id),
'label': self.label,
'username': self.username,
'password': self.password,
'nickname': self.nickname,
'realname': self.realname,
}
def __str__(self):
return "%s - %s" % (self.user, self.server)
def __repr__(self):
return "<UserServer: %s>" % self.__str__()
class Channel(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
server = ForeignKey(Server, related_name="channels")
name = CharField(max_length=256)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'server': str(self.server_id),
'name': self.name,
}
def __str__(self):
return "%s - %s" % (self.server, self.name)
def __repr__(self):
return "<Channel: %s>" % self.__str__()
class UserChannel(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user_server = ForeignKey(
UserServer, related_name="user_channels", null=True)
channel = ForeignKey(Channel, related_name="user_channels")
nickname = CharField(max_length=256)
password = CharField(max_length=256, null=True, blank=True)
mode = CharField(max_length=16, null=True, blank=True)
def to_dict(self):
"""
Dictify user
"""
return {
"id": str(self.id),
"userServer": str(self.user_server_id),
"channel": str(self.channel_id),
"nickname": self.nickname,
"password": self.password,
"mode": self.mode,
}
def to_dict_deep(self):
"""
Deep `to_dict`
"""
d = self.to_dict()
d['userServer'] = self.user_server.to_dict()
d['channel'] = self.channel.to_dict()
return d
def __str__(self):
return "%s - %s" % (self.channel, self.nickname)
def __repr__(self):
return "<UserChannel: %s>" % self.__str__()
class BaseMessage(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
text = TextField()
created_on = DateTimeField(auto_now_add=True)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'text': self.text,
'createdOn': self.created_on,
}
class Meta:
abstract = True
class Message(BaseMessage):
channel = ForeignKey(Channel, related_name="messages")
user_channel = ForeignKey(UserChannel, related_name="messages")
def to_dict(self):
"""
Dictify user
"""
d = super(Message, self).to_dict()
d.update({
'channel': str(self.channel_id),
'userChannel': str(self.user_channel_id),
})
return d
def __str__(self):
return "%s" % self.text
def __repr__(self):
return "<Message: %s>" % self.__str__()
class Conversation(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user_channel_1 = ForeignKey(UserChannel, related_name='+')
user_channel_2 = ForeignKey(UserChannel, related_name='+')
def to_dict(self):
"""
Dictify Conversation
"""
return {
'id': str(self.id),
'userChannel1': str(self.user_channel_1_id),
'userChannel2': str(self.user_channel_2_id),
}
def __str__(self):
return "%s - %s" % (self.user_channel_1_id, self.user_channel_2_id)
def __repr__(self):
return "<Conversation: %s>" % self.__str__()
class PrivateMessage(BaseMessage):
conversation = ForeignKey(Conversation, related_name='private_messages')
user_channel = ForeignKey(UserChannel, related_name='private_messages')
read = BooleanField(default=False)
def to_dict(self):
"""
Dictify user
"""
d = super(PrivateMessage, self).to_dict()
d.update({
'conversation': str(self.conversation_id),
'userChannel': str(self.user_channel_id),
'read': self.read,
})
return d
def __repr__(self):
return "<PrivateMessage: %s>" % self.__str__()
|
[
"dhilipsiva@gmail.com"
] |
dhilipsiva@gmail.com
|
86c9b86af42f911790b3c9a9171e90b2a3a6d5ab
|
6b8c3974d3ce5f7841e51dcb406666c0c5d92155
|
/heat/heat/tests/mistral/test_mistral_cron_trigger.py
|
f675a7962563de8a3c4633e373b00471eb251b3b
|
[
"Apache-2.0"
] |
permissive
|
swjang/cloudexchange
|
bbbf78a2e7444c1070a55378092c17e8ecb27059
|
c06ed54f38daeff23166fb0940b27df74c70fc3e
|
refs/heads/master
| 2020-12-29T03:18:43.076887
| 2015-09-21T07:13:22
| 2015-09-21T07:13:22
| 42,845,532
| 1
| 1
| null | 2015-09-21T07:13:22
| 2015-09-21T05:19:35
|
C++
|
UTF-8
|
Python
| false
| false
| 4,428
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import template_format
from heat.engine import resources
from heat.engine.resources.openstack.mistral import cron_trigger
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
stack_template = '''
heat_template_version: 2013-05-23
resources:
cron_trigger:
type: OS::Mistral::CronTrigger
properties:
name: my_cron_trigger
pattern: "* * 0 * *"
workflow: {'name': 'get_first_glance_image', 'input': {} }
count: 3
first_time: "2015-04-08 06:20"
'''
class FakeCronTrigger(object):
def __init__(self, name):
self.name = name
self.next_execution_time = '2015-03-01 00:00:00'
self.remaining_executions = 3
self._data = {'trigger': 'info'}
class MistralCronTriggerTestResource(cron_trigger.CronTrigger):
@classmethod
def is_service_available(cls, context):
return True
class MistralCronTriggerTest(common.HeatTestCase):
def setUp(self):
super(MistralCronTriggerTest, self).setUp()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
t = template_format.parse(stack_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['cron_trigger']
self.client = mock.Mock()
self.patchobject(MistralCronTriggerTestResource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ct = MistralCronTriggerTestResource(name, snippet, stack)
self.client.cron_triggers.create.return_value = FakeCronTrigger(
'my_cron_trigger')
self.client.cron_triggers.get.return_value = FakeCronTrigger(
'my_cron_trigger')
scheduler.TaskRunner(ct.create)()
args = self.client.cron_triggers.create.call_args[1]
self.assertEqual('* * 0 * *', args['pattern'])
self.assertEqual('get_first_glance_image', args['workflow_name'])
self.assertEqual({}, args['workflow_input'])
self.assertEqual('2015-04-08 06:20', args['first_time'])
self.assertEqual(3, args['count'])
self.assertEqual('my_cron_trigger', ct.resource_id)
return ct
def test_create(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
expected_state = (ct.CREATE, ct.COMPLETE)
self.assertEqual(expected_state, ct.state)
def test_resource_mapping(self):
mapping = cron_trigger.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(cron_trigger.CronTrigger,
mapping['OS::Mistral::CronTrigger'])
def test_attributes(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
self.assertEqual('2015-03-01 00:00:00',
ct.FnGetAtt('next_execution_time'))
self.assertEqual(3, ct.FnGetAtt('remaining_executions'))
self.assertEqual({'trigger': 'info'}, ct.FnGetAtt('show'))
def test_delete(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
scheduler.TaskRunner(ct.delete)()
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
self.client.cron_triggers.delete.assert_called_once_with(
ct.resource_id)
def test_delete_not_found(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
self.client.cron_triggers.delete.side_effect = (
self.client.mistral_base.APIException(error_code=404))
scheduler.TaskRunner(ct.delete)()
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
self.client.cron_triggers.delete.assert_called_once_with(
ct.resource_id)
|
[
"kiku4@kinx.net"
] |
kiku4@kinx.net
|
aba17ae60493775a1cbdd3dc41b31bb2ee9afbcd
|
669e9241b02bdaa303fbc2fd4023b90d4d179a59
|
/Cash Register/base.py
|
272ceeded4ef34a761806bab212fe9e20d79a550
|
[] |
no_license
|
benjaminpotter/HatchProjects
|
0854cf46ae7c3781468116a5d63b703dd54ae68c
|
7f6a948d3474c755d071751b725c059e6c7f3553
|
refs/heads/master
| 2022-01-28T16:58:03.449073
| 2019-08-16T13:47:30
| 2019-08-16T13:47:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
def setup():
size(400, 400)
bg_color = color(120, 120, 120)
bills_and_coins = [100, 50, 20, 10, 5, 2, 1, 0.25, 0.10, 0.5]
def draw_cash_registerfunction():
noStroke()
fill(50, 50, 50)
rect(50, 50, 300, 220, 0)
fill(225, 225, 225)
rect(87, 130, 225, 35, 0)
fill(225, 225, 225)
rect(87, 210, 225, 35, 0)
fill(225, 225, 225)
textSize(20)
text("Cash Register", 135, 85)
textSize(14)
text("Cost", 90, 120)
text("Tendered", 90, 200)
def draw():
background(bg_color)
draw_cash_register()
noLoop()
cost = prompt("Input cost", "")
tendered = prompt("Input tendered amount", "")
change = Math.round((tendered - cost) / 0.05) * 0.05
fill(0, 0, 0)
text(cost, 95, 152)
text(tendered, 95, 232)
res = []
for i in range(0, 10):
count = 0;
while change >= bills_and_coins[i]:
count++
change -= bills_and_coins[i]
res.append(count)
answer = ""
for i in range (0, 10):
if res[i] > 0:
answer += res[i] + "x $" + bills_and_coins[i] + "\n"
text(answer, 70, 325)
|
[
"noreply@github.com"
] |
benjaminpotter.noreply@github.com
|
4077f2c69b513dfbb09a6279cfbd85f564d84ab5
|
9f9ec8bebfe8b7ac8e60dcaa23153abe976585e6
|
/dataCommons/postingAPI/tasks.py
|
1ade94032b03b28ce48f6ba157446b40d942de40
|
[] |
no_license
|
erikwestra/data-commons
|
bbf32cd9b4b64ace28bcb049190d8272a23ed891
|
e3ed33fad104157ff505bb02bc7ae981f8ba3b11
|
refs/heads/master
| 2020-04-11T12:03:19.996644
| 2013-02-14T17:08:24
| 2013-02-14T17:08:24
| 8,188,655
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
""" dataCommons.postingAPI.tasks
This module implements the background tasks run by the Celery task queuing
system.
"""
import logging
from celery import task
from dataCommons.shared.lib.decorators import print_exceptions_to_stdout
from dataCommons.postingAPI import postingProcessor
#############################################################################
logger = logging.getLogger(__name__)
#############################################################################
@task()
@print_exceptions_to_stdout
def process_postings(parsed_postings):
""" Process the given set of postings.
Note that we simply call the posting processor to do all the work, but
wrap it up in a Huey command so that the work is queued, and use the
'print_exceptions_to_stdout' decorator so that any exceptions will be
logged to stdout rather than written to the Huey log file (which won't
exist when the system is deployed to Heroku).
"""
postingProcessor.process_postings(parsed_postings)
|
[
"ewestra@gmail.com"
] |
ewestra@gmail.com
|
06c1ae158672d9a651a94f2a70faf79cce3232d5
|
fff54b01b46cef0bbc70a6469c88c01c82af5a57
|
/network/analyzer/libpcap/actions.py
|
33f3310aad9a49bdb10846b36e483c86e64304b9
|
[] |
no_license
|
LimeLinux/Packages
|
e51deae6c0d1406e31f06caa5aaa7749466bef0b
|
d492e075d8b051df68b98c315ad0628e33a8fac4
|
refs/heads/master
| 2021-01-11T12:37:22.150638
| 2018-08-30T18:24:32
| 2018-08-30T18:24:32
| 77,054,292
| 5
| 19
| null | 2018-02-02T17:24:06
| 2016-12-21T13:33:45
|
Python
|
UTF-8
|
Python
| false
| false
| 853
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.export("CFLAGS", "%s -fPIC" % get.CFLAGS())
autotools.autoreconf("-vfi")
autotools.configure("--prefix=/usr \
--enable-ipv6")
def build():
autotools.make("all")
autotools.make("shared")
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
# No static libs
pisitools.remove("/usr/lib/*.a")
# it is needed for ppd etc.
pisitools.insinto("/usr/include", "pcap-int.h")
pisitools.dodoc("CHANGES", "CREDITS", "README*", "VERSION", "TODO")
|
[
"zirkovandersen@gmail.com"
] |
zirkovandersen@gmail.com
|
0c3ba85209268e4419995bf3b0e59c8dc4ee5a21
|
1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2
|
/projects/project02/tests/q1_1.py
|
61dd4039d3bf8affab16b12c4665cbd175e3a540
|
[] |
no_license
|
taylorgibson/ma4110-fa21
|
201af7a044fd7d99140c68c48817306c18479610
|
a306e1b6e7516def7de968781f6c8c21deebeaf5
|
refs/heads/main
| 2023-09-05T21:31:44.259079
| 2021-11-18T17:42:15
| 2021-11-18T17:42:15
| 395,439,687
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
test = { 'name': 'q1_1',
'points': None,
'suites': [{'cases': [{'code': '>>> type(all_unique_causes) in [np.ndarray, list]\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
|
[
"taylorgibson@gmail.com"
] |
taylorgibson@gmail.com
|
872fea6e32fd13b181d5aee64e9711014a9df0d1
|
cbc5e26bb47ae69e80a3649c90275becf25ce404
|
/xlsxwriter/test/styles/test_write_num_fmts.py
|
1445d94a1fdf72bcc2de972b5c46a5085b48cd0d
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] |
permissive
|
mst-solar-car/kicad-bom-generator
|
c3549409c3139f787ad28391372b5cb03791694a
|
2aae905056d06f3d25343a8d784049c141d05640
|
refs/heads/master
| 2021-09-07T14:00:40.759486
| 2018-02-23T23:21:13
| 2018-02-23T23:21:13
| 107,868,801
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteNumFmts(unittest.TestCase):
"""
Test the Styles _write_num_fmts() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_num_fmts(self):
"""Test the _write_num_fmts() method"""
xf_format = Format()
xf_format.num_format_index = 164
xf_format.set_num_format('#,##0.0')
self.styles._set_style_properties([[xf_format], None, 0, 1, 0, 0, [], []])
self.styles._write_num_fmts()
exp = """<numFmts count="1"><numFmt numFmtId="164" formatCode="#,##0.0"/></numFmts>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"mwrb7d@mst.edu"
] |
mwrb7d@mst.edu
|
84a8d7587333beacb530ca0dc5bd8c795e393d3a
|
5cb29431ecbba7b61463c67749794b54201907e1
|
/pelicide/runner.py
|
688ba9e582ad94aeb3dcda92e73a6b397e49a1ed
|
[] |
no_license
|
iksteen/pelicide
|
6a9a88a1fe2df6acb271c465942820ab76ccfa82
|
5b8a6a919257840fafdcab5c886c81a72b18a6c0
|
refs/heads/master
| 2021-05-16T02:39:52.910803
| 2016-01-06T11:40:51
| 2016-01-06T11:40:51
| 34,100,676
| 16
| 2
| null | 2019-10-22T23:49:10
| 2015-04-17T06:35:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,958
|
py
|
import json
import os
import sys
from twisted.internet import defer, protocol
class RunnerProtocol(protocol.ProcessProtocol):
def __init__(self, callback):
self.callback = callback
self.seq = 0
self.buffer = ''
self.pending = set()
def sendCommand(self, command, args=None):
self.seq += 1
self.pending.add(self.seq)
self.transport.write('%d %s %s\n' % (self.seq, command, json.dumps(args)))
return self.seq
def outReceived(self, data):
self.buffer += data
while '\n' in self.buffer:
response, self.buffer = self.buffer.split('\n', 1)
self.process_response(response)
def process_response(self, response):
seq, result, args = response.split(' ', 2)
seq = int(seq)
if seq in self.pending:
self.pending.remove(seq)
args = json.loads(args)
if self.callback is not None:
self.callback(seq, result == '+', args)
def processExited(self, reason):
pending, self.pending = self.pending, set()
while pending:
self.callback(pending.pop(), False, reason)
class Runner(object):
def __init__(self, python, config_path, settings, **kwargs):
reactor = kwargs.get('reactor')
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
self.python = python
self.config_path = config_path
self.init_settings = settings
self.settings = None
self.d = None
self.pending = {}
def start(self):
self.d = defer.Deferred()
runner = os.path.join(os.path.dirname(__file__), 'pelican-runner.py')
protocol = RunnerProtocol(self.responseReceived)
self.transport = self.reactor.spawnProcess(
protocol,
self.python,
[
self.python,
runner,
self.config_path,
json.dumps(self.init_settings),
],
env=None,
childFDs={
0: 'w',
1: 'r',
2: sys.stderr.fileno(),
},
)
return self.d
def restart(self):
return self.command('quit').addCallback(lambda _: self.start())
def command(self, command, args=None):
if self.transport.proto is None:
self.start()
command_id = self.transport.proto.sendCommand(command, args)
d = defer.Deferred()
self.pending[command_id] = d
return d
def responseReceived(self, command_seq, success, args):
if command_seq == 0:
self.settings = args
if self.d:
self.d.callback(args)
self.d = None
return
d = self.pending.pop(command_seq)
if success:
d.callback(args)
else:
d.errback(RuntimeError(args))
|
[
"iksteen@gmail.com"
] |
iksteen@gmail.com
|
842861fff402dd09ab5a9f2cfa8e490d1b842ff7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03696/s669152949.py
|
6ffe317a8dbd760aeaf50530708cdf2ac5bd88ad
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
N=int(input())
S=input()
A=0 #(
B=0 #)
for s in S:
if s=='(':
A+=1
elif s==')' and A>0:
A-=1
else:
B+=1
print('('*B+S+')'*A)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1ee7dc2b9ca208d6002aaa8adfe393e5b25d084f
|
88be4d5657d19462eb1d74d2d4d98180b423a889
|
/robolearn/torch/policies/weighted_multi_policy_selector.py
|
e0cb8481457f36ea0e1a6161526cff851f74721d
|
[
"BSD-3-Clause"
] |
permissive
|
domingoesteban/robolearn
|
bc58278fe38894f4ca9ec9e657ee13a479a368b7
|
0d20125425c352b80ef2eeed1c0b11ab6497b11a
|
refs/heads/master
| 2020-04-15T22:38:25.343229
| 2019-01-29T17:01:42
| 2019-01-29T17:01:42
| 165,080,647
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
from robolearn.torch.core import PyTorchModule
from robolearn.models.policies import ExplorationPolicy
class WeightedMultiPolicySelector(PyTorchModule, ExplorationPolicy):
def __init__(self, multipolicy, idx):
self.save_init_params(locals())
super(WeightedMultiPolicySelector, self).__init__()
ExplorationPolicy.__init__(self, multipolicy.action_dim)
self._multipolicy = multipolicy
self.idx = idx
def get_action(self, *args, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy.get_action(*args, **kwargs)
return action, policy_info
def get_actions(self, *args, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy.get_actions(*args, **kwargs)
return action, policy_info
def forward(self, *nn_input, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy(*nn_input, **kwargs)
return action, policy_info
|
[
"domingo.esteban@iit.it"
] |
domingo.esteban@iit.it
|
8ae9fb9ae54b014300cf7675e7bfdbabcd0e5011
|
836d5f7190f6b4503e758c87c71598f18fdfce14
|
/12-Veri-Tabanı/sqlite-database-2/database.py
|
d5adc5a68ad53c2351607d67672c1ff0cbb2b0b7
|
[] |
no_license
|
S-Oktay-Bicici/PYTHON-PROGRAMMING
|
cf452723fd3e7e8ec2aadc7980208d747c502e9a
|
22e864f89544249d6309d6f4570a4104bf47346b
|
refs/heads/main
| 2021-11-30T00:19:21.158084
| 2021-11-16T15:44:29
| 2021-11-16T15:44:29
| 316,716,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import sqlite3
veriler = [
("Ahmet Ümit","İstanbul Hatırası"),
("Yaşar Kemal","İnce Memed"),
("Paulo Coelho","Simyacı"),
("Paulo Coelho","Aldatmak")]
db = sqlite3.connect("kitaplar.db")
imlec = db.cursor()
imlec.execute("CREATE TABLE IF NOT EXISTS 'kitaplık tablosu' (yazar,kitap)")
for veri in veriler:
imlec.execute("INSERT INTO 'kitaplık tablosu' VALUES (?,?)",veri)
db.commit()
db.close()
|
[
"noreply@github.com"
] |
S-Oktay-Bicici.noreply@github.com
|
263e5accf9c46da5bf018e6fe716b80de9ee55da
|
4e0ff785b993b6bae70745434e61f27ca82e88f0
|
/229-Majority-Element-II/solution.py
|
1b9494257f4bd6ea4a55db58f2ad57d67a4ef1ec
|
[] |
no_license
|
NobodyWHU/Leetcode
|
2ee557dd77c65c5fa8ca938efb6de3793b4de261
|
d284fa3daab02531e5300867463b293d44737e32
|
refs/heads/master
| 2021-01-23T14:05:28.161062
| 2016-09-23T11:51:51
| 2016-09-23T11:51:51
| 58,898,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n1 = n2 = None
c1 = c2 = 0
for num in nums:
if n1 == num:
c1 += 1
elif n2 == num:
c2 += 1
elif c1 == 0:
n1, c1 = num, 1
elif c2 == 0:
n2, c2 = num, 1
else:
c1, c2 = c1 - 1, c2 - 1
size = len(nums)
return [n for n in (n1, n2)
if n is not None and nums.count(n) > size / 3]
|
[
"haohaoranran@126.com"
] |
haohaoranran@126.com
|
4490d298cb083a520e91f8cd046242f7439b10be
|
60cf5de97160c0c104b447879edd0ea1ca9724e8
|
/q29.py
|
34fb2528f462f89c7b3226061a2fd7f1d74bc2cd
|
[] |
no_license
|
VinayHaryan/String
|
6f6b7924ab87ac8ea5509edefaa3aeda795b0de0
|
089dcf02a8d26afcae0ac2b23c640be5a6079095
|
refs/heads/main
| 2023-05-27T22:15:31.792837
| 2021-06-17T08:39:42
| 2021-06-17T08:39:42
| 377,736,749
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,557
|
py
|
'''
RUN LENGTH ENCODING IN PYTHON
given an input string, write a function that returns
string for the input string
For example, if the input string is ‘wwwwaaadexxxxxx’,
then the function should return ‘w4a3d1e1x6’.
Examples:
Input : str = 'wwwwaaadexxxxxx'
Output : 'w4a3d1e1x6'
'''
# python code for run length encoding
# from collections import OrderedDict
# def runlength(input):
# # generate ordered dictionary of all lower
# # case alphabets, its output will be
# # dict = {'w':0, 'a':0, 'd':0, 'e':0, 'x':0}
# dict = OrderedDict.fromkeys(input,0)
# # now iterate through input string to calculate
# # frquency of each character, its output will be
# # dict = {'w':4,'a':3,'d':1,'e':1,'x':6}
# for ch in input:
# dict[ch] += 1
# # now iterate through dictionary to make
# # output string from (key,value) pairs
# output = ''
# for key,value in dict.items():
# output = output + key + str(value)
# return output
# # Driver function
# if __name__ == '__main__':
# input="wwwwaaadexxxxxx"
# print (runlength(input))
from collections import OrderedDict
def runlengthencoding(input):
dict = OrderedDict.fromkeys(input,0)
for ch in input:
dict[ch] += 1
output = ''
for key, value in dict.items():
output = output + key + str(value)
return output
# Driver function
if __name__ == '__main__':
input = 'wwwwaaadexxxxxx'
print(runlengthencoding(input))
|
[
"noreply@github.com"
] |
VinayHaryan.noreply@github.com
|
9d5e381b6742e606d841d20ce2e6480a9029a65d
|
e3af1769d017fa5b20677b1228fd3ab42afc8927
|
/projet/IBPackage/operations.py
|
481a6a5755012665fb4d06ee741c5fdef5d684ad
|
[] |
no_license
|
komi24/IB201116
|
08e8692a72badb82eecc79af753e1cf5c4021380
|
924c6540978b0308686eac867c16a3f6d1725f65
|
refs/heads/master
| 2023-01-19T11:34:32.627342
| 2020-11-20T15:24:31
| 2020-11-20T15:24:31
| 313,242,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
# -*- coding: utf-8 -*-
def ajoute_2(a):
return a + 2
def addition(a, b):
return a + b
def produit(a, b):
return a * b
# def moyenne(liste, operation, init):
# somme = init
# for i in liste:
# somme = operation(i, somme)
|
[
"mickael.bolnet@gmail.com"
] |
mickael.bolnet@gmail.com
|
77b73a4c62c781666aa6f58703e8ed6973d129db
|
c61145e8771724575f67ae5738dd6cbb9626a706
|
/blog/models.py
|
b885ebd7fe878c0266a464c1da35eb04d96169e5
|
[] |
no_license
|
Seredyak1/test_task
|
1399dd082f4281ca6f72d036f4df4c1c6945dafe
|
a5d433b827df46ffa95dd6dd91245b204884674f
|
refs/heads/master
| 2020-04-16T08:03:04.521740
| 2019-01-16T09:33:47
| 2019-01-16T09:33:47
| 165,409,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
class Meta:
verbose_name_plural = 'Posts'
ordering = ('-updated_at',)
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=128, blank=True, null=True)
body = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
@property
def like_count(self):
"""Show the number of likes in the Post"""
return self.like_set.count()
def add_like(self, user):
"""Add like to this Post"""
Like.objects.get_or_create(user=user, post=self)
def unlike(self, user):
"""Delete like to this Post"""
Like.objects.filter(user=user, post=self).delete()
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
|
[
"sanya.seredyak@gmail.com"
] |
sanya.seredyak@gmail.com
|
8ae1215e7351323fa30e296d34e9cf8d769a78c1
|
6c50175e82974fdb0ccabd544a40e013e6672cb0
|
/LoginReg_Bootstrap/settings.py
|
fbcb01abea32951e3eba4b4872bb9626432a3aa4
|
[] |
no_license
|
Jallnutt1/LoginReg_Bootstrap
|
9515878688ac6a16efaba18345b90b389a6c6213
|
60532872f1e04a5809f65745665e2f16df0a913e
|
refs/heads/main
| 2023-05-26T05:02:59.124607
| 2021-06-04T01:50:46
| 2021-06-04T01:50:46
| 373,688,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
"""
Django settings for LoginReg_Bootstrap project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!lgvb3n$coe(bg@64j#p)&r^u6+o&y!vjmh=1c&iph=j%%&ylu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'LoginReg',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LoginReg_Bootstrap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LoginReg_Bootstrap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
c006ca77594cce7b285e3cb5b9081c678b8e1f01
|
668dad44beb30cadb170e32a8a7f0a57c42e653c
|
/denormalize_to_csv.py
|
d2910eec545e9f6d62c1a4e254eb4424ae66ed54
|
[] |
no_license
|
SEL-Columbia/ss_data_analysis
|
22b72540732b03836423e18462495b2252a2cca8
|
dfb8c2670cddbddbb693e5a3243bc829bccf5ae0
|
refs/heads/master
| 2016-09-05T12:31:00.546458
| 2013-10-21T23:01:34
| 2013-10-21T23:01:34
| 13,162,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,354
|
py
|
import os
import datetime
import csv
import sys
"""
denormalize_to_csv.py
usage: python denormalize_to_csv.py logs_dir
description: Script to take a directory of sharedsolar log files
in csv format and denormalizes them such that they
can be concatenated together into "one big table"
of the same structure without losing any information
(while duplicating some...hence "denormalize")
"""
FIELD_MAP = {
'Time Stamp': 'time_stamp',
'Watts': 'watts',
'Volts': 'volts',
'Amps': 'amps',
'Watt Hours SC20': 'watt_hours_sc20',
'Watt Hours Today': 'watt_hours_today',
'Max Watts': 'max_watts',
'Max Volts': 'max_volts',
'Max Amps': 'max_amps',
'Min Watts': 'min_watts',
'Min Volts': 'min_volts',
'Min Amps': 'min_amps',
'Power Factor': 'power_factor',
'Power Cycle': 'power_cycle',
'Frequency': 'frequency',
'Volt Amps': 'volt_amps',
'Relay Not Closed': 'relay_not_closed',
'Send Rate': 'send_rate',
'Machine ID': 'machine_id',
'Type': 'circuit_type',
'Credit': 'credit'
}
def write_denormalized_csv(logfile, site, ip):
outfile = logfile.replace(".log", ".csv")
with open(logfile,'r') as csvinput:
with open(outfile, 'w') as csvoutput:
first_line = csvinput.readline()
# Simple check for properly formatted file (NOTE: MAINS files will not have a credit field at the end)
if (first_line.startswith("Time Stamp,Watts,Volts,Amps,Watt Hours SC20,Watt Hours Today,Max Watts,Max Volts,Max Amps,Min Watts,Min Volts,Min Amps,Power Factor,Power Cycle,Frequency,Volt Amps,Relay Not Closed,Send Rate,Machine ID,Type")):
# reset read ptr
csvinput.seek(0)
reader = csv.reader(csvinput)
writer = csv.writer(csvoutput, lineterminator='\n')
all = []
has_credit = True
# handle the header row
existing_header_row = next(reader)
new_header_row = []
# add the new denormalized fields to the new header
new_header_row.append('line_num')
new_header_row.append('site_id')
new_header_row.append('ip_addr')
# If the header row doesn't contain the Credit field, add it
if existing_header_row[-1] != 'Credit':
existing_header_row.append('Credit')
has_credit = False
# convert field names
for field in existing_header_row:
if field not in FIELD_MAP:
sys.stderr.write("Erroneous field: %s in file: %s skipping..." % (field, logfile))
else:
new_header_row.append(FIELD_MAP[field])
all.append(new_header_row)
line_num = 0
for row in reader:
row.insert(0, line_num)
row.insert(1, site)
row.insert(2, ip)
# in case there was no credit field in the input file, make it 0
if not has_credit:
row.append("0")
all.append(row)
line_num = line_num + 1
writer.writerows(all)
line_num = 0
else:
sys.stderr.write("Empty or corrupted file: %s\n" % logfile)
def denormalize_to_csv(logs_dir):
for (dirpath,dirnames,filenames) in os.walk(logs_dir):
for f in filenames:
if f.endswith(".log"):
dir_info = dirpath.split("/")
# Note: dir_info contents are blah/Site/YYYY/MM/DD/HH
site = dir_info[-5] # get the site from the dir (site is always 5 dirs up in the path)
ip = f[0:f.find(".")] # get the ip from the filename
full_filename = os.path.join(dirpath, f)
write_denormalized_csv(full_filename, site, ip)
if __name__=="__main__":
import sys
assert len(sys.argv) == 2, \
"Usage: python denormalize_to_csv.py logs_dir"
logs_dir = sys.argv[1]
denormalize_to_csv(logs_dir)
|
[
"chris.natali@gmail.com"
] |
chris.natali@gmail.com
|
5d2c18d1ea37c56236232061cf2a19e8e6d11fac
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part2/interactive/jm/random_normal_1/561870518.py
|
e07b9173fe09044ec19d2a4fbff66e5550b7c929
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713
| 2020-06-09T21:15:37
| 2020-06-09T21:15:37
| 262,290,632
| 0
| 0
|
MIT
| 2020-06-09T21:15:38
| 2020-05-08T10:10:47
|
C
|
UTF-8
|
Python
| false
| false
| 7,951
|
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 561870518
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 4, 15)
assert board is not None
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 4, 2) == 1
assert gamma_move(board, 2, 3, 7) == 1
assert gamma_move(board, 2, 2, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 3, 4) == 1
assert gamma_free_fields(board, 3) == 43
board700952916 = gamma_board(board)
assert board700952916 is not None
assert board700952916 == ("..22..\n"
"......\n"
"......\n"
"...3..\n"
"......\n"
"1...1.\n"
"......\n"
"......\n")
del board700952916
board700952916 = None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_free_fields(board, 2) == 41
assert gamma_move(board, 3, 1, 7) == 1
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 4, 3, 3) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 1, 5, 5) == 1
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_move(board, 2, 4, 7) == 1
assert gamma_move(board, 3, 3, 5) == 1
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 4, 4, 4) == 1
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_free_fields(board, 1) == 31
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_busy_fields(board, 3) == 5
assert gamma_move(board, 4, 4, 1) == 1
assert gamma_move(board, 1, 6, 2) == 0
assert gamma_busy_fields(board, 1) == 5
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_move(board, 3, 1, 5) == 1
assert gamma_move(board, 4, 3, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_free_fields(board, 2) == 25
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 4, 6, 1) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 7, 0) == 0
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 4, 6, 5) == 0
board137299271 = gamma_board(board)
assert board137299271 is not None
assert board137299271 == (".3222.\n"
"......\n"
"33.3.1\n"
"..234.\n"
".2.41.\n"
"1.331.\n"
".4..42\n"
"1334..\n")
del board137299271
board137299271 = None
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_busy_fields(board, 2) == 8
assert gamma_move(board, 3, 2, 5) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 4, 1, 2) == 1
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 5) == 0
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 3, 5, 7) == 1
assert gamma_move(board, 4, 4, 6) == 1
assert gamma_move(board, 1, 6, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 2, 5) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_golden_move(board, 2, 1, 1) == 1
assert gamma_move(board, 3, 7, 0) == 0
assert gamma_move(board, 3, 0, 7) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 12
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 0, 6) == 1
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 4, 5) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_busy_fields(board, 1) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 4, 7) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_busy_fields(board, 4) == 8
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 0, 3) == 1
assert gamma_golden_move(board, 4, 6, 5) == 0
assert gamma_move(board, 1, 1, 0) == 0
board301797101 = gamma_board(board)
assert board301797101 is not None
assert board301797101 == ("332223\n"
"4...42\n"
"333311\n"
"232342\n"
"42441.\n"
"14331.\n"
".21.42\n"
"1334..\n")
del board301797101
board301797101 = None
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_free_fields(board, 3) == 9
assert gamma_move(board, 1, 4, 7) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_free_fields(board, 2) == 9
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 3, 1, 7) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 2, 7) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 4, 6) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 6, 2) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 4, 5) == 0
assert gamma_move(board, 1, 5, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_busy_fields(board, 3) == 14
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 6, 3) == 0
assert gamma_busy_fields(board, 4) == 9
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 1, 1, 4) == 0
board493510436 = gamma_board(board)
assert board493510436 is not None
assert board493510436 == ("332223\n"
"4...42\n"
"333311\n"
"232342\n"
"42441.\n"
"14331.\n"
"321.42\n"
"1334.1\n")
del board493510436
board493510436 = None
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
gamma_delete(board)
|
[
"jakub@molinski.dev"
] |
jakub@molinski.dev
|
4660ea0d2890f4a7ae7e8f48cbe1f776c8393822
|
de428c011b56db862f05ec0ceab17b85f83f94b1
|
/pythongame/scenes_game/player_environment_interactions.py
|
bfdfdb4eea733e83b0f61229bf7c9e6e1f382640
|
[] |
no_license
|
risooonho/python-2d-game
|
c6d1fceaf09c72a6f7573230a4a899bf79164b7f
|
24b02646ed56f9017069b243b774e0ee46951aea
|
refs/heads/master
| 2021-05-17T06:02:13.538699
| 2020-02-15T23:59:54
| 2020-02-15T23:59:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,646
|
py
|
import sys
from typing import Optional, Any, List, Tuple
from pythongame.core.game_data import CONSUMABLES, PORTALS
from pythongame.core.game_state import GameState, NonPlayerCharacter, LootableOnGround, Portal, WarpPoint, \
ConsumableOnGround, ItemOnGround, Chest, Shrine
from pythongame.core.game_state import WorldEntity
from pythongame.core.item_data import build_item_name, create_item_description, get_item_data
from pythongame.core.math import boxes_intersect, is_x_and_y_within_distance, \
get_manhattan_distance_between_rects
from pythongame.core.npc_behaviors import has_npc_dialog
from pythongame.core.view.game_world_view import EntityActionText, EntityActionTextStyle
from pythongame.scenes_game.game_engine import GameEngine
class PlayerInteractionsState:
def __init__(self):
self.entity_to_interact_with: Any = None
def handle_nearby_entities(self, player_entity: WorldEntity, game_state: GameState, game_engine: GameEngine):
self.entity_to_interact_with = None
player_position = player_entity.get_position()
distance_to_closest_entity = sys.maxsize
for npc in game_state.non_player_characters:
if has_npc_dialog(npc.npc_type):
close_to_player = is_x_and_y_within_distance(player_position, npc.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), npc.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = npc
distance_to_closest_entity = distance
lootables_on_ground: List[LootableOnGround] = list(game_state.items_on_ground)
lootables_on_ground += game_state.consumables_on_ground
for lootable in lootables_on_ground:
if boxes_intersect(player_entity.rect(), lootable.world_entity.rect()):
self.entity_to_interact_with = lootable
distance_to_closest_entity = 0
for portal in game_state.portals:
close_to_player = is_x_and_y_within_distance(player_position, portal.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), portal.world_entity.rect())
if close_to_player:
game_engine.handle_being_close_to_portal(portal)
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = portal
distance_to_closest_entity = distance
for warp_point in game_state.warp_points:
close_to_player = is_x_and_y_within_distance(player_position, warp_point.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), warp_point.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = warp_point
distance_to_closest_entity = distance
for chest in game_state.chests:
close_to_player = is_x_and_y_within_distance(player_position, chest.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), chest.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = chest
distance_to_closest_entity = distance
for shrine in game_state.shrines:
close_to_player = is_x_and_y_within_distance(player_position, shrine.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), shrine.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = shrine
distance_to_closest_entity = distance
def get_entity_to_interact_with(self):
return self.entity_to_interact_with
def get_entity_action_text(self, is_shift_key_held_down: bool) -> Optional[EntityActionText]:
if self.entity_to_interact_with is None:
return None
return _get_entity_action_text(self.entity_to_interact_with, is_shift_key_held_down)
def _get_entity_action_text(ready_entity: Any, is_shift_key_held_down: bool) -> Optional[EntityActionText]:
if isinstance(ready_entity, NonPlayerCharacter):
return EntityActionText(ready_entity.world_entity, "...", [])
elif isinstance(ready_entity, LootableOnGround):
name, style = _get_loot_name(ready_entity)
if is_shift_key_held_down:
loot_details = _get_loot_details(ready_entity)
else:
loot_details = []
return EntityActionText(ready_entity.world_entity, name, loot_details, style=style)
elif isinstance(ready_entity, Portal):
if ready_entity.is_enabled:
data = PORTALS[ready_entity.portal_id]
return EntityActionText(ready_entity.world_entity, data.destination_name, [])
else:
return EntityActionText(ready_entity.world_entity, "???", [])
elif isinstance(ready_entity, WarpPoint):
return EntityActionText(ready_entity.world_entity, "Warp", [])
elif isinstance(ready_entity, Chest):
return EntityActionText(ready_entity.world_entity, "Open", [])
elif isinstance(ready_entity, Shrine):
if ready_entity.has_been_used:
return None
else:
return EntityActionText(ready_entity.world_entity, "Touch", [])
else:
raise Exception("Unhandled entity: " + str(ready_entity))
def _get_loot_name(lootable: LootableOnGround) -> Tuple[str, EntityActionTextStyle]:
if isinstance(lootable, ConsumableOnGround):
name = CONSUMABLES[lootable.consumable_type].name
return name, EntityActionTextStyle.PLAIN
if isinstance(lootable, ItemOnGround):
name = build_item_name(lootable.item_id)
if lootable.item_id.suffix_id is not None:
style = EntityActionTextStyle.LOOT_RARE
elif get_item_data(lootable.item_id).is_unique:
style = EntityActionTextStyle.LOOT_UNIQUE
else:
style = EntityActionTextStyle.PLAIN
return name, style
def _get_loot_details(lootable: LootableOnGround) -> List[str]:
if isinstance(lootable, ConsumableOnGround):
return [CONSUMABLES[lootable.consumable_type].description]
if isinstance(lootable, ItemOnGround):
# TODO Render suffix lines differently?
return [line.text for line in create_item_description(lootable.item_id)]
|
[
"jonte.murray@gmail.com"
] |
jonte.murray@gmail.com
|
f4018757458a86a63df44d42374c69ea3d612194
|
de4d3fed2b538587124ad855c8ba2f30933e7edf
|
/backend/sparepart_main/sparepart_main/asgi.py
|
597f5430d993ea910e06c11dd1b1488e41205dd3
|
[] |
no_license
|
zahydakhan/project_spare
|
aaea130edefa95630f73b3026de6c32800b0bc7f
|
850374c270fd5ad2897bf9b6f0afb93b9e171059
|
refs/heads/master
| 2023-03-11T17:13:13.103574
| 2021-02-23T06:40:52
| 2021-02-23T06:40:52
| 339,530,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
ASGI config for sparepart_main project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sparepart_main.settings')
application = get_asgi_application()
|
[
"zahydakhan@gmail.com"
] |
zahydakhan@gmail.com
|
4913c2722dadc4eab70e690b9fb6b88e0097a781
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2022_06_15/aio/_application_insights_management_client.py
|
8b411d38bef302c4b99c3ad09e97a57e5267f2e1
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,161
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WebTestsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar web_tests: WebTestsOperations operations
:vartype web_tests:
azure.mgmt.applicationinsights.v2022_06_15.aio.operations.WebTestsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-06-15". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.web_tests = WebTestsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ApplicationInsightsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
58917f325490ea31f0266642594353b9a3a355ea
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/2992.py
|
b11dac90d43eb4458792e75c04fcfe2c80575061
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
def read_case(file):
answer = int(file.readline()) - 1
for skip in range(4):
line = file.readline()
if skip is answer:
result = set(line.split())
return result
def read_input(filename):
with open(filename, "r") as in_file:
n_cases = int(in_file.readline().split()[0])
for case in range(n_cases):
yield case + 1, read_case(in_file), read_case(in_file)
def solve_case(first, second):
answer = first & second
if len(answer) == 1:
return "".join(answer)
elif answer:
return "Bad magician!"
else:
return "Volunteer cheated!"
cases = read_input("A-small-attempt0.in")
outfile = open("output.txt", "w+")
for case, first, second in cases:
result = solve_case(first, second)
outfile.write("Case #{}: {}\n".format(case, result))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
70ec64d86ed5c0c271e847db24f5640fba8b206c
|
d6f95f4347c2bd934393603819787acf70aaf4eb
|
/2018年11月15日福建省/gg.py
|
af58924762c19caec1070c8e9830667202198a39
|
[] |
no_license
|
moto-faith/work
|
531804bca7b6ecb6d9776ed2086bbf9952e2043b
|
e77e40dbbb7dbb80bd2bc2584a6d1d020f92d2b4
|
refs/heads/master
| 2020-04-08T11:20:37.533419
| 2019-03-18T08:09:27
| 2019-03-18T08:09:27
| 159,302,505
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
#!/usr/bin/env python
#coding=utf-8
import time
import datetime
import re
import json
import requests
import time
import redis
import sys
from urlparse import urljoin
from db import DB
reload (sys)
import copy
import MySQLdb
sys.setdefaultencoding ("utf-8")
import htmlparser
from PIL import Image
def handle_post(post):
post = copy.deepcopy(post)
for k,v in post.iteritems():
print k,v
if isinstance(v, unicode):
v = v.encode("utf8")
if not isinstance(v,str) and not isinstance(v, int) and not isinstance(v, float):
v = json.dumps(v)
try:v = MySQLdb.escape_string(v)
except:pass
post.update({k:v})
return post
db = DB ().create ('mysql://zhxg:ZHxg2017!@192.168.1.19:3306/sjk')
table = "list_info"
result1 = "list_model_filter"
urls = db.table(table).where('''siteName = "福建省公共资源交易中心"''').find()
dict_page_info = [url for url in urls if url is not None]
print "********-->", len (dict_page_info)
for str_urls in dict_page_info:
dict_post = str_urls
# print isinstance(dict_post,dict)
# dict_post = json.loads(dict_post)
# for k,v in dict_post.items():
# print k,v
# dd = dict_post.get("detailUrl")
dict_post["tf"]="1"
dict_post["irepeat"]="1"
dict_post["service"]="勘察设计"
dict_post["industry"]="industry"
dic = handle_post (dict_post)
try:
db.table (result1).add (dic)
except Exception as e:
print e
# for k,v in dict_post.items():
# print k,v
# detailUrl = dict_post.get ("detailUrl")
if __name__ == "__main__":
pass
|
[
"noreply@github.com"
] |
moto-faith.noreply@github.com
|
ab6333b26ca5c5e92c98730f02f2f883ba820907
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_frizzing.py
|
1ed7c522d5ae15fc66c9c2b646ba67fb89ea4cfa
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from xai.brain.wordbase.verbs._frizz import _FRIZZ
#calss header
class _FRIZZING(_FRIZZ, ):
def __init__(self,):
_FRIZZ.__init__(self)
self.name = "FRIZZING"
self.specie = 'verbs'
self.basic = "frizz"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b6f490deab8b0d16a1adff8b3c97ecf942ab4482
|
9908dc07233b4025425dc212b5e4acb3b087971e
|
/Medium/findRedundantConnection.py
|
c3fd9af33cb44dbda9d4c81e96ae23b61cd0a8ad
|
[] |
no_license
|
Abdelhamid-bouzid/problem-Sovling-
|
15769da71d19186947607574860462ad81f34e40
|
fa0eecab8a94d1ad20b5aa129973f59eddd5678d
|
refs/heads/main
| 2023-08-27T21:49:32.337979
| 2021-10-23T21:57:55
| 2021-10-23T21:57:55
| 317,097,388
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
self.g = collections.defaultdict(list)
for u,v in edges:
self.g[u].append(v)
self.g[v].append(u)
for u,v in edges[::-1]:
self.vis=set()
self.dfs(1,u,v)
if len(self.vis)==len(self.g):
return [u,v]
def dfs(self,node,u,v):
if node in self.vis:
return True
self.vis.add(node)
for adj in self.g[node]:
if [node,adj]!=[u,v] and [adj,node]!=[u,v]:
self.dfs(adj,u,v)
|
[
"noreply@github.com"
] |
Abdelhamid-bouzid.noreply@github.com
|
a9fa1f05a49145676d8d384b3c7e7cc8f4b16897
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2016_09_26_polycrystal_FIP_allpoint/plot_evd.py
|
d523d88b853904fc3267a94e0c6fc19be735c236
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from constants import const
import h5py
import sys
def pltevd(H):
C = const()
"""define the colors of interest"""
n_col = len(C['sid'])
colormat = cm.rainbow(np.linspace(0, 1, n_col))
f_reg = h5py.File("regression_results_L%s.hdf5" % H, 'r')
fig = plt.figure(figsize=[5.5, 4])
f = h5py.File("responses.hdf5", 'r')
for ii in xrange(n_col):
sid = C['sid'][ii]
"""get the x, y data for plotting the evd"""
x = f.get('evd_%s' % sid)[...]
if ii == 0:
xmin = np.log(x).min()
xmax = np.log(x).max()
else:
xmin = np.min([xmin, np.log(x).min()])
xmax = np.max([xmax, np.log(x).max()])
y = (np.arange(x.size)+1)/np.float32(x.size)
"""plot the original data and the fits"""
# plt.plot(np.log(x), y, '.', markersize=2, color=colormat[ii, :],
# label=sid)
plt.plot(np.log(x), y, '-', color=colormat[ii, :],
label=sid)
f.close()
f_reg.close()
plt.xlabel("ln(FIP)")
plt.ylabel("CDF")
plt.legend(loc='lower right', shadow=True, fontsize='small')
rng = np.abs(xmax - xmin)
xmin += -0.01*rng
xmax += 0.01*rng
plt.xlim((xmin, xmax))
ymin = y.min()
ymax = y.max()
rng = ymax - ymin
ymin = 0
ymax += 0.01*rng
plt.ylim((ymin, ymax))
plt.tight_layout()
fig_name = 'evd_orig_L%s.png' % H
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
if __name__ == '__main__':
sid = sys.argv[1]
pltevd(sid)
plt.show()
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
168a1a3ba4c092e59778ca8c0e121f8af2cbdb0f
|
b6af5ed67f758dace85c9cec2091c36d769e5668
|
/build/handsnet_time/catkin_generated/installspace/tactile_image_publisher_5.py
|
34897b9675238a073264ba477e450f23797c3517
|
[
"MIT"
] |
permissive
|
robertokcanale/ros_workspace_handsnet
|
897920d6ef30554556449085816d2e8ffb096721
|
09672bf2b4c54d0064f339005dc5eb3ac4f9d80d
|
refs/heads/main
| 2023-04-10T06:50:12.652997
| 2021-04-12T12:08:41
| 2021-04-12T12:08:41
| 353,714,029
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
#!/usr/bin/env python3
import rospy
#import tensorflow as tf
from PIL import Image
from sensor_msgs.msg import Image as TactileImage
from handsnet_time.msg import Image_array
import numpy as np
#I can make a message of this type
#sensor_msgs/Image[] data
if __name__ == '__main__':
pub = rospy.Publisher('tactile_image_array', Image_array, queue_size=10)
rospy.init_node('tactile_image_publisher5')
rate = rospy.Rate(1000) # 1hz
contacts = Image_array()
#print(contacts.tactile_image[1])
while not rospy.is_shutdown():
for i in range(0, 4):
im_name='src/handsnet_time/data/'+str(i+1)+'.png'
#PIL image
im = Image.open(im_name)
im = im.convert('RGB')
im = im.resize((68,100), Image.ANTIALIAS)
#sensor_msgs.msg.Image
contacts.tactile_image[i] = TactileImage()
contacts.tactile_image[i].header.stamp = rospy.Time.now()
contacts.tactile_image[i].height = im.height
contacts.tactile_image[i].width = im.width
contacts.tactile_image[i].encoding = "rgb8"
contacts.tactile_image[i].is_bigendian = False
contacts.tactile_image[i].step = 3 * im.width # Full row length in bytes
contacts.tactile_image[i].data = np.array(im).tobytes()
pub.publish(contacts)
rate.sleep()
#also, I need something of the kind PIL.Image.Image
#tested it, and it wants a PIL image, don't forget to place the GPU stuff
#model = tf.keras.models.load_model('src/handsnet/data/HandsNet_2_97.h5')
#input_arr= tf.keras.preprocessing.image.img_to_array(im)
#input_arr = np.array([input_arr])
#predictions = model.predict(input_arr)
#print(predictions)
|
[
"robyrugby95@gmail.com"
] |
robyrugby95@gmail.com
|
e6ff765e39660197728176631c129a6e521196c7
|
ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3
|
/old/past1_E.py
|
e538f70ef5be64f085af47fec1d7b8236ac36a41
|
[] |
no_license
|
kussy-tessy/atcoder
|
5604919747242ee9740b9131bb6e168e96af0151
|
ee917fa5a5218d4a9e72f710d0d844e7c203f13b
|
refs/heads/master
| 2023-07-21T09:25:15.464881
| 2021-09-04T14:06:02
| 2021-09-04T14:06:02
| 311,221,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
# print('input >>')
N, Q = map(int,(input().split()))
follows = [[0] * N for _ in range(N)]
logs = []
for _ in range(Q):
logs.append(input())
for log in logs:
log_info = log.split()
person = int(log_info[1])-1
if log_info[0] == '1':
follows[person][int(log_info[2])-1] = 1
elif log_info[0] == '2':
for i in range(N):
if follows[i][person] == 1:
follows[person][i] = 1
elif log_info[0] == '3':
xs = []
for i in range(N):
if follows[person][i] == 1:
xs.append(i)
for x in xs:
for j in range(N):
if follows[x][j] == 1:
follows[person][j] = 1
# print('-----output-----')
for i, fs in enumerate(follows):
for j, f in enumerate(fs):
if f == 1 and i != j:
print('Y', end='')
else:
print('N', end='')
print()
|
[
"teppei04285000@gmail.com"
] |
teppei04285000@gmail.com
|
080b3808ad65aeadf62c0f3a420f7f9a286b309d
|
7a4da5ec2196bf975a9e6115846244788b36b952
|
/3.7.0/lldb-3.7.0.src/test/python_api/frame/inlines/TestInlinedFrame.py
|
217b87e892696f5a9f974f714461cd306ed5a293
|
[
"NCSA",
"MIT"
] |
permissive
|
androm3da/clang_sles
|
ca4ada2ec85d625c65818ca9b60dcf1bc27f0756
|
2ba6d0711546ad681883c42dfb8661b842806695
|
refs/heads/master
| 2021-01-10T13:50:25.353394
| 2016-03-31T21:38:29
| 2016-03-31T21:38:29
| 44,787,977
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,812
|
py
|
"""
Testlldb Python SBFrame APIs IsInlined() and GetFunctionName().
"""
import os, time
import re
import unittest2
import lldb, lldbutil
from lldbtest import *
class InlinedFrameAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@python_api_test
@dsym_test
def test_stop_at_outer_inline_with_dsym(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
self.buildDsym()
self.do_stop_at_outer_inline()
@python_api_test
@dwarf_test
def test_stop_at_outer_inline_with_dwarf(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
self.buildDwarf()
self.do_stop_at_outer_inline()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to of function 'c'.
self.source = 'inlines.c'
self.first_stop = line_number(self.source, '// This should correspond to the first break stop.')
self.second_stop = line_number(self.source, '// This should correspond to the second break stop.')
def do_stop_at_outer_inline(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
exe = os.path.join(os.getcwd(), "a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by the name of 'inner_inline'.
breakpoint = target.BreakpointCreateByName('inner_inline', 'a.out')
#print "breakpoint:", breakpoint
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() > 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple (None, None, self.get_process_working_directory())
process = target.GetProcess()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
import lldbutil
stack_traces1 = lldbutil.print_stacktraces(process, string_buffer=True)
if self.TraceOn():
print "Full stack traces when first stopped on the breakpoint 'inner_inline':"
print stack_traces1
# The first breakpoint should correspond to an inlined call frame.
# If it's an inlined call frame, expect to find, in the stack trace,
# that there is a frame which corresponds to the following call site:
#
# outer_inline (argc);
#
frame0 = process.GetThreadAtIndex(0).GetFrameAtIndex(0)
if frame0.IsInlined():
filename = frame0.GetLineEntry().GetFileSpec().GetFilename()
self.assertTrue(filename == self.source)
self.expect(stack_traces1, "First stop at %s:%d" % (self.source, self.first_stop), exe=False,
substrs = ['%s:%d' % (self.source, self.first_stop)])
# Expect to break again for the second time.
process.Continue()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
stack_traces2 = lldbutil.print_stacktraces(process, string_buffer=True)
if self.TraceOn():
print "Full stack traces when stopped on the breakpoint 'inner_inline' for the second time:"
print stack_traces2
self.expect(stack_traces2, "Second stop at %s:%d" % (self.source, self.second_stop), exe=False,
substrs = ['%s:%d' % (self.source, self.second_stop)])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
[
"brian.cain@gmail.com"
] |
brian.cain@gmail.com
|
e489b80b813521f9e69b3f3c43c39d02cdba43cf
|
c526d2f3e457b1b25d5f2cb5bda914236e6c265b
|
/candidates/urls.py
|
5c8fb469cc8b566cb76f30f605fdaab774b81d78
|
[
"CC0-1.0"
] |
permissive
|
yhsiang/twly-voter-guide
|
99e2269da57a21b5779ec3defd9c7e23c7668f64
|
ae87c9f9b9f053f79a12f04afe0d60f227dc68c1
|
refs/heads/master
| 2020-12-31T02:42:01.568168
| 2015-03-21T13:24:55
| 2015-03-21T13:24:55
| 33,117,399
| 0
| 0
| null | 2015-03-30T10:42:56
| 2015-03-30T10:42:56
| null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from candidates import views
urlpatterns = patterns('',
url(r'^$', views.counties, {"ad": 8}),
url(r'^(?P<ad>\d+)/(?P<county>\S+)/(?P<constituency>\d+)/$', views.district, name='district'),
url(r'^(?P<ad>\d+)/(?P<county>\S+)/$', views.districts, name='districts'),
url(r'^(?P<ad>\d+)/$', views.counties, name='counties'),
url(r'^political_contributions/(?P<uid>\S+)/(?P<ad>\d+)/$', views.political_contributions, name='political_contributions'),
)
|
[
"twly.tw@gmail.com"
] |
twly.tw@gmail.com
|
c76e0a8da89cff2174d9900d2b2d795ccf522914
|
be50b4dd0b5b8c3813b8c3158332b1154fe8fe62
|
/Math/Python/SortedPermutationRank.py
|
28c0906d6624cf1570338c7b46d6235b336d4950
|
[] |
no_license
|
Zimmermann25/InterviewBit
|
a8d89e090068d9644e28085625963c8ce75d3dff
|
6d2138e740bd5ba8eab992d9bf090977e077bfc5
|
refs/heads/main
| 2023-03-24T18:12:48.244950
| 2021-03-24T14:36:48
| 2021-03-24T14:36:48
| 350,835,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
class Solution:
import math
# @param A : string
# @return an integer
def findRank(self, A):
if len(A) < 1:return 0
if len(A) ==1:return 1
counter = 0
# False oznacza,ze ta litera nie zostala jeszcze wykorzystana
charArr = [[A[i], False] for i in range(len(A))]
charArr.sort()
#print("charArr: ", charArr)
for i in range(len(A)):
curChar = A[i]
j = 0 # do pętli while i szukania mniejszych
smallCounter = 0
while j < len(charArr):
# tutaj uwzględnić etykietę TrueFalse
if charArr[j][0] >= curChar :
charArr[j][1] = True #oznacz tą literę jako użytą
break
if charArr[j][1]==False:
smallCounter +=1
j+=1
#print("fact: ", math.factorial(len(A)-j))
#print("smallCounter: ", smallCounter)
counter += (smallCounter * math.factorial(len(A)-i-1) )
#print("counter: ", counter, " j: ", j, "i: ", i, "f: ", math.factorial(len(A)-i-1))
return (counter+1) % 1000003
|
[
"noreply@github.com"
] |
Zimmermann25.noreply@github.com
|
45c93dfe5019d6bc09fc9cd7499e5990d2691491
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02821/s486815367.py
|
c6e104ff5932a2b1ed2561cc6e8d0125a064d8c4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
import sys
from bisect import bisect_left,bisect_right
sys.setrecursionlimit(10**9)
INF=10**18
def input():
return sys.stdin.readline().rstrip()
def main():
N,M=map(int,input().split())
A=sorted(list(map(int,input().split())))
S=[0]*(N+1)
for i in range(N):
S[i+1]=S[i]+A[i]
def nibutan(ok,ng):
while abs(ok-ng) > 1:
mid = (ok + ng) // 2
if solve(mid):
ok = mid
else:
ng = mid
return ok
def solve(mid):
c=0
for i in range(N):
c+=N-bisect_left(A,mid-A[i])
if c>=M:
return True
else:
return False
x=nibutan(0,10**11)
ans=0
count=0
for i in range(N):
b_l=bisect_left(A,x-A[i])
count+=(N-b_l)
ans+=S[N]-S[b_l]+A[i]*(N-b_l)
if count==M:
print(ans)
else:
print(ans+(M-count)*x)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0d07714134ac6449e78e4d248375b431f66f16e0
|
16047f965a69893a8cd2c8d18fbd7b9c86a07eb3
|
/src/kubernetes/client/models/v1_quobyte_volume_source.py
|
52fef80de7b4ac6de368f6c4785f8c2a3414d71f
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
guctum/aws-kube-codesuite
|
9ce2cc02fe5fa15c2e175fb697138014fb162f1e
|
5d62beaadc13bec745ac7d2fc18f07805e91cef3
|
refs/heads/master
| 2021-05-24T10:08:00.651840
| 2020-04-23T20:21:46
| 2020-04-23T20:21:46
| 253,511,083
| 0
| 0
|
Apache-2.0
| 2020-04-06T13:48:14
| 2020-04-06T13:48:13
| null |
UTF-8
|
Python
| false
| false
| 6,587
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1QuobyteVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, group=None, read_only=None, registry=None, user=None, volume=None):
"""
V1QuobyteVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'group': 'str',
'read_only': 'bool',
'registry': 'str',
'user': 'str',
'volume': 'str'
}
self.attribute_map = {
'group': 'group',
'read_only': 'readOnly',
'registry': 'registry',
'user': 'user',
'volume': 'volume'
}
self._group = group
self._read_only = read_only
self._registry = registry
self._user = user
self._volume = volume
@property
def group(self):
"""
Gets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:return: The group of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:param group: The group of this V1QuobyteVolumeSource.
:type: str
"""
self._group = group
@property
def read_only(self):
"""
Gets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:return: The read_only of this V1QuobyteVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:param read_only: The read_only of this V1QuobyteVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def registry(self):
"""
Gets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:return: The registry of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""
Sets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:param registry: The registry of this V1QuobyteVolumeSource.
:type: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`")
self._registry = registry
@property
def user(self):
"""
Gets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:return: The user of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:param user: The user of this V1QuobyteVolumeSource.
:type: str
"""
self._user = user
@property
def volume(self):
"""
Gets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:return: The volume of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._volume
@volume.setter
def volume(self, volume):
"""
Sets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:param volume: The volume of this V1QuobyteVolumeSource.
:type: str
"""
if volume is None:
raise ValueError("Invalid value for `volume`, must not be `None`")
self._volume = volume
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1QuobyteVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"olari@784f435df7a4.ant.amazon.com"
] |
olari@784f435df7a4.ant.amazon.com
|
a7f5239914d25e60fde6bf4ad74825ca1a302698
|
360ae1188ad79e71ccc72da0b9ae709bda678f91
|
/ryu/lib/xflow/netflow.py
|
f41a9f57341ddbaf6c3a1e32928888653be34be0
|
[
"Apache-2.0"
] |
permissive
|
faucetsdn/ryu
|
47b3523e7ccb381f3bdf2877a3f9f01cb1876054
|
d6cda4f427ff8de82b94c58aa826824a106014c2
|
refs/heads/master
| 2023-09-05T06:37:21.991029
| 2022-06-09T23:09:40
| 2022-06-09T23:09:40
| 2,945,007
| 385
| 215
|
Apache-2.0
| 2022-11-13T10:50:25
| 2011-12-09T03:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,009
|
py
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
NETFLOW_V1 = 0x01
NETFLOW_V5 = 0x05
NETFLOW_V6 = 0x06
NETFLOW_V7 = 0x07
NETFLOW_V8 = 0x08
NETFLOW_V9 = 0x09
class NetFlow(object):
_PACK_STR = '!H'
_NETFLOW_VERSIONS = {}
@staticmethod
def register_netflow_version(version):
def _register_netflow_version(cls):
NetFlow._NETFLOW_VERSIONS[version] = cls
return cls
return _register_netflow_version
def __init__(self):
super(NetFlow, self).__init__()
@classmethod
def parser(cls, buf):
(version,) = struct.unpack_from(cls._PACK_STR, buf)
cls_ = cls._NETFLOW_VERSIONS.get(version, None)
if cls_:
return cls_.parser(buf)
else:
return None
@NetFlow.register_netflow_version(NETFLOW_V5)
class NetFlowV5(object):
_PACK_STR = '!HHIIIIBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, version, count, sys_uptime, unix_secs,
unix_nsecs, flow_sequence, engine_type, engine_id,
sampling_interval, flows=None):
self.version = version
self.count = count
self.sys_uptime = sys_uptime
self.unix_secs = unix_secs
self.unix_nsecs = unix_nsecs
self.flow_sequence = flow_sequence
self.engine_type = engine_type
self.engine_id = engine_id
self.sampling_interval = sampling_interval
@classmethod
def parser(cls, buf):
(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id, sampling_interval) = \
struct.unpack_from(cls._PACK_STR, buf)
msg = cls(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id,
sampling_interval)
offset = cls._MIN_LEN
msg.flows = []
while len(buf) > offset:
f = NetFlowV5Flow.parser(buf, offset)
offset += NetFlowV5Flow._MIN_LEN
msg.flows.append(f)
return msg
class NetFlowV5Flow(object):
_PACK_STR = '!IIIHHIIIIHHxBBBHHBB2x'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, srcaddr, dstaddr, nexthop, input_, output,
dpkts, doctets, first, last, srcport, dstport,
tcp_flags, prot, tos, src_as, dst_as, src_mask,
dst_mask):
self.srcaddr = srcaddr
self.dstaddr = dstaddr
self.nexthop = nexthop
self.input = input_
self.output = output
self.dpkts = dpkts
self.doctets = doctets
self.first = first
self.last = last
self.srcport = srcport
self.dstport = dstport
self.tcp_flags = tcp_flags
self.prot = prot
self.tos = tos
self.src_as = src_as
self.dst_as = dst_as
self.src_mask = src_mask
self.dst_mask = dst_mask
@classmethod
def parser(cls, buf, offset):
(srcaddr, dstaddr, nexthop, input_, output, dpkts, doctets,
first, last, srcport, dstport, tcp_flags, prot, tos, src_as,
dst_as, src_mask, dst_mask) = struct.unpack_from(
cls._PACK_STR, buf, offset)
msg = cls(srcaddr, dstaddr, nexthop, input_, output, dpkts,
doctets, first, last, srcport, dstport, tcp_flags,
prot, tos, src_as, dst_as, src_mask, dst_mask)
return msg
|
[
"fujita.tomonori@lab.ntt.co.jp"
] |
fujita.tomonori@lab.ntt.co.jp
|
22c15be1586d632b333fa96826a4638948b75d8e
|
8a102033a266d39128e4b64aa0780cf67055e196
|
/15552.py
|
0bfab577f1d44f67f2be860eabace0e46000ab0d
|
[] |
no_license
|
yuseungwoo/baekjoon
|
4dec0798b8689b9378121b9d178713c9cf14a53f
|
099031e2c4401e27edcdc05bd6c9e6a558b09bb9
|
refs/heads/master
| 2020-09-03T15:25:40.764723
| 2018-10-08T02:35:27
| 2018-10-08T02:35:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
# coding-utf-8
import sys
count = sys.stdin.readline().rstrip()
count = int(count)
for _ in range(count):
numbers = sys.stdin.readline().rstrip().split()
number = sum(list(map(int, numbers)))
number = str(number) + '\n'
sys.stdout.write(number)
|
[
"blacksangi14@naver.com"
] |
blacksangi14@naver.com
|
92f1518267d637703c7a7e2205d182907358658a
|
a35d07b11f013a26901942f730d4b720f4e27355
|
/warmup1/near_hundred.py
|
5cb06a22dcabd67a5cd1a9ba4cf2f360c62fd633
|
[] |
no_license
|
PMiskew/codingbat_solutions_python
|
7cbbf293fb6b230e274a8cee373a2222a5a27e8d
|
6e62fd0080c2a9bcd59fd4f803cc7966a2cb88d1
|
refs/heads/master
| 2022-11-13T13:24:53.078833
| 2020-07-14T18:38:06
| 2020-07-14T18:38:06
| 255,197,455
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
'''
QUESTION:
Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.
near_hundred(93) → True
near_hundred(90) → True
near_hundred(89) → False
'''
def near_hundred(n):
#Approach 1:
'''
if (n < 100):
if 100 - n <= 10:
return True
return False
elif (n > 100 and n < 190):
if n - 100 <= 10:
return True
return False
elif (n <= 200 and n > 110):
if 200 - n <= 10:
return True
return False
elif (n > 200):
if n - 200 <= 10:
return True
return False
#'''
#Approach 2:
#Here we use the abs function so we don't have to check the value relative
#to 100 or 200 to decide if it is 100 - n or n - 100.
'''
if abs(n - 100) <= 10 or abs(n - 200) <= 10:
return True
return False
#'''
|
[
"paul.miskew@gmail.com"
] |
paul.miskew@gmail.com
|
37cb5f11bdcd8f63dd000d2f706336c3c37ee0ec
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/sensu/sensu_go/tests/unit/modules/test_role_binding_info.py
|
6dc780d18f9e93a04704512448a963a692889b1f
|
[
"GPL-3.0-only",
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 1,977
|
py
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible_collections.sensu.sensu_go.plugins.module_utils import (
errors, utils,
)
from ansible_collections.sensu.sensu_go.plugins.modules import role_binding_info
from .common.utils import (
AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args,
)
class TestRoleBindingInfo(ModuleTestCase):
def test_get_all_role_bindings(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = [1, 2, 3]
set_module_args(namespace="my")
with pytest.raises(AnsibleExitJson) as context:
role_binding_info.main()
_client, path = get_mock.call_args[0]
assert path == "/api/core/v2/namespaces/my/rolebindings"
assert context.value.args[0]["objects"] == [1, 2, 3]
def test_get_single_role_binding(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = 1
set_module_args(name="test-role-binding")
with pytest.raises(AnsibleExitJson) as context:
role_binding_info.main()
_client, path = get_mock.call_args[0]
assert path == "/api/core/v2/namespaces/default/rolebindings/test-role-binding"
assert context.value.args[0]["objects"] == [1]
def test_missing_single_role_binding(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = None
set_module_args(name="sample-role-binding")
with pytest.raises(AnsibleExitJson) as context:
role_binding_info.main()
assert context.value.args[0]["objects"] == []
def test_failure(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.side_effect = errors.Error("Bad error")
set_module_args(name="sample-role-binding")
with pytest.raises(AnsibleFailJson):
role_binding_info.main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
8c0b7a66053ff8a78c350d3e918291d75673b78a
|
b11d97bf5731bf6faeef14814292d1aff6866e3a
|
/seq2annotation/server/tensorflow_inference.py
|
9d6ce6042c4c74192440f99d7b707623aee40e82
|
[
"Apache-2.0"
] |
permissive
|
tyc1922/seq2annotation
|
2e2193aff1281242c2b66da8cbe27571e2c7f3fc
|
c161099570be544881c14105f4392d764d6d8247
|
refs/heads/master
| 2022-04-21T19:14:03.117606
| 2020-04-25T09:24:02
| 2020-04-25T09:24:02
| 259,069,353
| 1
| 0
|
Apache-2.0
| 2020-04-26T15:46:48
| 2020-04-26T15:46:48
| null |
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
from typing import List
import keras
from tokenizer_tools.tagset.NER.BILUO import BILUOSequenceEncoderDecoder
from tokenizer_tools.tagset.offset.sequence import Sequence
from tensorflow.contrib import predictor
from tokenizer_tools.tagset.exceptions import TagSetDecodeError
decoder = BILUOSequenceEncoderDecoder()
class Inference(object):
def __init__(self, model_path):
# load model
self.model_dir = model_path
self.predict_fn = predictor.from_saved_model(model_path)
def infer(self, input_text: str):
infer_result = self._infer(input_text)
return infer_result[0]
def batch_infer(self, input_text: List[str]):
return self._infer(input_text)
def _infer(self, input_text):
if isinstance(input_text, str):
input_list = [input_text]
else:
input_list = input_text
raw_sequences = [[i for i in text] for text in input_list]
sentence = keras.preprocessing.sequence.pad_sequences(
raw_sequences, dtype='object',
padding='post', truncating='post', value=['<pad>']
).tolist()
# TODO: batch infer will cause padding, which will maybe cause decoder to offset bug.
# TODO: feature translate should out of this main program for better compatible with keras and estimator model
input_feature = {
'words': [[i for i in text] for text in sentence],
'words_len': [len(text) for text in raw_sequences],
}
# print(input_feature)
predictions = self.predict_fn(input_feature)
tags_list = predictions['tags']
infer_result = []
for raw_input_text, raw_text, normalized_text, tags in zip(input_list, raw_sequences, sentence, tags_list):
# decode Unicode
tags_seq = [i.decode() for i in tags]
# print(tags_seq)
# BILUO to offset
failed = False
try:
seq = decoder.to_offset(tags_seq, raw_text)
except TagSetDecodeError as e:
print(e)
# invalid tag sequence will raise exception
# so return a empty result
seq = Sequence(input_text)
failed = True
infer_result.append((raw_input_text, seq, tags_seq, failed))
return infer_result
|
[
"u1mail2me@gmail.com"
] |
u1mail2me@gmail.com
|
90060297c37f8438877900ed28743d74da252c12
|
70e77b4e49fa1be07a89aa9370aa8069f4dd17cc
|
/imb_manager/asgi.py
|
d88f76b423017178dd9fba08e2652cdcf0103e46
|
[] |
no_license
|
rosoba/imb_manager
|
7a542da0fb032839dcabd3a7d9073f69616cfaeb
|
13f277cb5170ef17deebb2e4305c99f73421e2a2
|
refs/heads/master
| 2023-01-31T04:16:56.526477
| 2020-12-12T14:52:03
| 2020-12-12T14:52:03
| 320,850,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for imb_manager project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'imb_manager.settings')
application = get_asgi_application()
|
[
"rostislav.chudoba@rwth-aachen.de"
] |
rostislav.chudoba@rwth-aachen.de
|
b1ad6df8c1fd9c67554b4f8f8f19ab2cc90e7283
|
5dd190725aaaeb7287d935b3c99c20480b208816
|
/official/vision/keras_cv/metrics/iou.py
|
b6391a61c7d3cbf38407a26b17b068ba77b3fb66
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
DemonDamon/mask-detection-based-on-tf2odapi
|
32d947164fb54395b9e45368c0d4bcf3a6ea1c28
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
refs/heads/main
| 2023-05-13T05:05:44.534885
| 2021-06-08T05:56:09
| 2021-06-08T05:56:09
| 369,463,131
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,723
|
py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IOU Metrics used for semantic segmentation models."""
import numpy as np
import tensorflow as tf
class PerClassIoU(tf.keras.metrics.Metric):
"""Computes the per-class Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = [(1 / (2 + 2 - 1), 1 / (2 + 2 - 1)] = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
[0.33333334, 0.33333334]
"""
def __init__(self, num_classes, name=None, dtype=None):
"""Initializes `PerClassIoU`.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(PerClassIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=tf.compat.v1.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
IOU per class.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = tf.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = tf.reshape(y_true, [-1])
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self._dtype)
if sample_weight.shape.ndims > 1:
sample_weight = tf.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = tf.math.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=self._dtype)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = tf.cast(
tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = tf.cast(
tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = tf.cast(
tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
return tf.math.divide_no_nan(true_positives, denominator)
def reset_states(self):
tf.keras.backend.set_value(
self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(PerClassIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"noreply@github.com"
] |
DemonDamon.noreply@github.com
|
3262a9aae0eca54f26f482a58a5c1b4c27d466ef
|
f39439548beba34b26f2e0cb40d9bcdfc5c85c71
|
/runtag/bootcamp.py
|
ee077f8e2a9e0ac5ddee6ac10f77e65dfb76a5a6
|
[] |
no_license
|
willook/ape-x2
|
0a7b813c59efc572b3a5b0c3b63d738bbec2a8e1
|
b299e75d20746f4d83ee7227fad9d8d3ef21a192
|
refs/heads/master
| 2023-02-21T15:41:27.241782
| 2021-01-21T06:37:01
| 2021-01-21T06:37:01
| 331,537,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
from .entities import Commander, Subordinate, Squad
class Bootcamp:
def __init__(self, grid):
self.grid = grid
def recruit(self, name, number_of_subordinates=1):
squad = Squad(self.grid, name)
squad.assign(Commander(self.grid))
for index in range(number_of_subordinates):
squad.assign(Subordinate(self.grid, identifier=index))
return squad
|
[
"you@example.com"
] |
you@example.com
|
045edd1c218f527ab6ff454da5507798a547fdd8
|
32cb84dd41e4be24c065bb205f226f9b121a6db2
|
/cconf/migrations/0001_initial.py
|
f89cbce98d36e32d7722b4cebcb2e3294a10711b
|
[] |
no_license
|
InformatykaNaStart/staszic-sio2
|
b38fda84bd8908472edb2097774838ceed08fcfa
|
60a127e687ef8216d2ba53f9f03cfaa201c59e26
|
refs/heads/master
| 2022-06-29T11:09:28.765166
| 2022-06-13T21:56:19
| 2022-06-13T21:56:19
| 115,637,960
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2019-10-05 08:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contests', '0005_submission_auto_rejudges'),
]
operations = [
migrations.CreateModel(
name='CConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('compiler', models.CharField(choices=[(b'GCC 4.6 (32 bits)', b'compiler-gcc.4_6_3'), (b'GCC 4.8 (32 bits)', b'compiler-gcc.4_8_2'), (b'GCC 8.3 (32 bits)', b'compiler-gcc.8_3_0-i386'), (b'GCC 8.3 (64 bits)', b'compiler-gcc.8_3_0-amd64')], default=b'compiler-gcc.4_8_2', max_length=128)),
('cflags', models.CharField(default=b'-std=gnuc99 -static -O2 -s -lm', max_length=256)),
('cxxflags', models.CharField(default=b'-std=c++11 -static -O2 -s -lm', max_length=256)),
('contest', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='contests.Contest')),
],
),
]
|
[
"hugo@staszic.waw.pl"
] |
hugo@staszic.waw.pl
|
de0b0b059a80c07749a16ea129918524290a5f28
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/David.Liu/q3.py
|
a26e4881ffe0696d7c67a6a0d0631ed73764fc4d
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 866
|
py
|
import math
n=32
x=500
outf=open("q3large.out","w")
def factor(num):
r=500
for i in range(2, r):
if num%i==0:
return i
return -1
def makelist(n):
lst=[]
l=len(n)
for i in range(2, 11):
num=0
for j in range(0, l):
num+=int(n[j])*(i**(l-1-j))
fac=factor(num)
if fac==-1:
break;
lst.append(fac)
return lst
def f(n, k):
if n==0:
l=makelist(k+"1")
if len(l)==9:
outf.write(k+"1")
for p in l:
outf.write(" "+str(p))
outf.write("\n")
global x
x=x-1
print(x)
if x==0:
outf.close()
exit()
else:
f(n-1, k+"0")
f(n-1, k+"1")
f(n-2, "1")
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
4a1758e7ca32cd345aa0c5b376f92a5dc0a0b52f
|
7996d7fefe2d3e5b4d53df4376d6fd8908407a1a
|
/authentication/urls.py
|
139def747c67ed7664c5e93050e1419ada49d7e8
|
[] |
no_license
|
Imraj423/twitterclone
|
2aa1446ef6e5dec6548f26c6254d478a696970ec
|
0c3dfab5436de9095248305d3994dc77549e0b1e
|
refs/heads/master
| 2021-01-15T02:07:06.684002
| 2020-04-04T23:47:39
| 2020-04-04T23:47:39
| 242,843,822
| 0
| 0
| null | 2020-03-07T04:02:21
| 2020-02-24T21:08:23
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('login/', views.login_view, name='login'),
path('logout/', views.logoutUser, name='logout'),
]
|
[
"dahqniss@gmail.com"
] |
dahqniss@gmail.com
|
5ed6ab127cba5918dd12490bf579baafac9dc250
|
1fa262359f91768f1b98c45944fd4a63645f4567
|
/variable_examples.py
|
03bd0c75fcb7f7afb46ffb09f440a337d5d26ae4
|
[] |
no_license
|
feleHaile/20190225KAPL
|
d1a95dd6632ba83b6cd3380d92e2a2a18a5a4942
|
3957c1d738cc3e42d5dac0fb4a6f6071a1bb391a
|
refs/heads/master
| 2020-05-15T22:31:51.881632
| 2019-02-28T20:11:29
| 2019-02-28T20:11:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
#!/usr/bin/env python
x = 5
print(x)
y = x
things = [1, 2, 3]
t = things
print(t)
t.append(42)
print(things)
print(t is things)
print(id(t), id(things))
print(type(x), type(t), type(type), type('spam'))
t = 42
print(type(t))
t = "amazon"
print(type(t))
m = None
print(m)
|
[
"jstrickler@gmail.com"
] |
jstrickler@gmail.com
|
a8cfde36a731a0cfeb460159e2cc73d43db7c46e
|
101d866f8e2f84dc8f76181341180c13b38e0ecf
|
/case/Demo/test_global_init.py
|
96c3320be20e782b490bbf14bbb0cf12cef8b2c5
|
[] |
no_license
|
cming091/autotest
|
1d9a6f5f750c04b043a6bc45efa423f2e730b3aa
|
0f6fe31a27de9bcf0697c28574b97555fe36d1e1
|
refs/heads/master
| 2023-06-02T18:22:24.971786
| 2021-06-21T08:52:47
| 2021-06-21T08:52:47
| 378,858,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# coding=utf-8
import pytest
import allure
from case.base import TestBase
@allure.feature('测试初始化全局数据')
@allure.link(url="https://pages/viewpage.action?pageId=84191585", name="测试用例")
@pytest.mark.usefixtures("init_module_data")
@pytest.mark.usefixtures("init_global_data")
class TestGlobalDataInit(TestBase):
"""test global data init"""
@allure.story("1.第一步")
def test_step_one(self, request):
print('test step one...done')
def test_step_two(self, request):
print('test step two...done')
assert 1 == 2
def test_step_three(self, request):
print('test step three... done')
|
[
"349152234@qq.com"
] |
349152234@qq.com
|
eb5b33dc0fc012d521bf71c982068b71534887b6
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/sql/get_database_vulnerability_assessment.py
|
a6aa84ede40a8f73706d9941a81575ccf9b1a66e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,722
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetDatabaseVulnerabilityAssessmentResult',
'AwaitableGetDatabaseVulnerabilityAssessmentResult',
'get_database_vulnerability_assessment',
]
@pulumi.output_type
class GetDatabaseVulnerabilityAssessmentResult:
"""
A database vulnerability assessment.
"""
def __init__(__self__, id=None, name=None, recurring_scans=None, storage_account_access_key=None, storage_container_path=None, storage_container_sas_key=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if recurring_scans and not isinstance(recurring_scans, dict):
raise TypeError("Expected argument 'recurring_scans' to be a dict")
pulumi.set(__self__, "recurring_scans", recurring_scans)
if storage_account_access_key and not isinstance(storage_account_access_key, str):
raise TypeError("Expected argument 'storage_account_access_key' to be a str")
pulumi.set(__self__, "storage_account_access_key", storage_account_access_key)
if storage_container_path and not isinstance(storage_container_path, str):
raise TypeError("Expected argument 'storage_container_path' to be a str")
pulumi.set(__self__, "storage_container_path", storage_container_path)
if storage_container_sas_key and not isinstance(storage_container_sas_key, str):
raise TypeError("Expected argument 'storage_container_sas_key' to be a str")
pulumi.set(__self__, "storage_container_sas_key", storage_container_sas_key)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="recurringScans")
def recurring_scans(self) -> Optional['outputs.VulnerabilityAssessmentRecurringScansPropertiesResponse']:
"""
The recurring scans settings
"""
return pulumi.get(self, "recurring_scans")
@property
@pulumi.getter(name="storageAccountAccessKey")
def storage_account_access_key(self) -> Optional[str]:
"""
Specifies the identifier key of the storage account for vulnerability assessment scan results. If 'StorageContainerSasKey' isn't specified, storageAccountAccessKey is required.
"""
return pulumi.get(self, "storage_account_access_key")
@property
@pulumi.getter(name="storageContainerPath")
def storage_container_path(self) -> Optional[str]:
"""
A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/). It is required if server level vulnerability assessment policy doesn't set
"""
return pulumi.get(self, "storage_container_path")
@property
@pulumi.getter(name="storageContainerSasKey")
def storage_container_sas_key(self) -> Optional[str]:
"""
A shared access signature (SAS Key) that has write access to the blob container specified in 'storageContainerPath' parameter. If 'storageAccountAccessKey' isn't specified, StorageContainerSasKey is required.
"""
return pulumi.get(self, "storage_container_sas_key")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDatabaseVulnerabilityAssessmentResult(GetDatabaseVulnerabilityAssessmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseVulnerabilityAssessmentResult(
id=self.id,
name=self.name,
recurring_scans=self.recurring_scans,
storage_account_access_key=self.storage_account_access_key,
storage_container_path=self.storage_container_path,
storage_container_sas_key=self.storage_container_sas_key,
type=self.type)
def get_database_vulnerability_assessment(database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
vulnerability_assessment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseVulnerabilityAssessmentResult:
"""
A database vulnerability assessment.
API Version: 2020-08-01-preview.
:param str database_name: The name of the database for which the vulnerability assessment is defined.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
"""
__args__ = dict()
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql:getDatabaseVulnerabilityAssessment', __args__, opts=opts, typ=GetDatabaseVulnerabilityAssessmentResult).value
return AwaitableGetDatabaseVulnerabilityAssessmentResult(
id=__ret__.id,
name=__ret__.name,
recurring_scans=__ret__.recurring_scans,
storage_account_access_key=__ret__.storage_account_access_key,
storage_container_path=__ret__.storage_container_path,
storage_container_sas_key=__ret__.storage_container_sas_key,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
79db5f1c36777c88d7fa3bc39575c57b377af1e3
|
81d2815060bdf51e59f40366df72954ad28b2398
|
/4th_hw/fourth_homework/settings.py
|
fd3cb961f456cae77e2fd2c6099a1d6763910875
|
[] |
no_license
|
ningpop/LikeLion_7th_HW
|
6016604427e335250f2e3daeec27f17731612b47
|
b2c65a0b7a9a928a45cf07b67cd9ed18fb86d799
|
refs/heads/master
| 2020-06-30T18:08:54.024617
| 2019-12-30T16:17:03
| 2019-12-30T16:17:03
| 200,902,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,613
|
py
|
"""
Django settings for fourth_homework project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool( os.environ.get('DJANGO_DEBUG', False))
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'learning.apps.LearningConfig',
'accounts.apps.AccountsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'fourth_homework.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['fourth_homework/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fourth_homework.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'learning', 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
|
[
"craft1933@naver.com"
] |
craft1933@naver.com
|
048c333f5f321f508763e1bc3d96c4ec5a465231
|
3bddb2814881bb5e4679de3d31ac0bde57b86148
|
/trax/data/tokenizer.py
|
64081f4da0735026efb1c20851a2a900e708ad02
|
[
"Apache-2.0"
] |
permissive
|
google/trax
|
7a2b1a83eb8848136a5f5e07988efcef2f0b704f
|
1bb3b89427f669f2f0ec84633952e21b68964a23
|
refs/heads/master
| 2023-08-30T22:36:09.651644
| 2023-03-29T01:14:20
| 2023-03-29T01:15:47
| 213,020,264
| 8,180
| 917
|
Apache-2.0
| 2023-08-29T14:30:03
| 2019-10-05T15:09:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,810
|
py
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
import collections
import sys
import unicodedata
from absl import logging
import six
import tensorflow as tf
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in range(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(tf.io.gfile.glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.io.gfile.GFile(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Returns:
a dictionary mapping token to count.
"""
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines):
counts.update(encode(doc))
return counts
def vocab_token_counts(text_filepattern, max_lines):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
logging.warning("Malformed vocab line #%d '%s'", i, line)
continue
token, count = line.rsplit(",", 1)
ret[token] = int(count)
return ret
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
b64ec8ccaf0a47dd9f85266b92faf3122e5e57ff
|
6896fce8ee082f9730c056436e49ef0d16a6ea03
|
/exception/exceptions.py
|
cbec08fae3c703e147a7daef31cd584579c057d3
|
[] |
no_license
|
Sugeei/python-practice
|
5022ae7c34bc04972edebc15936248cb9869ec54
|
048df40500a059e4380f3ecc2581de96c9a1fc9b
|
refs/heads/master
| 2022-12-07T06:34:40.740379
| 2022-11-13T11:48:29
| 2022-11-13T11:48:29
| 121,074,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
from bs4 import BeautifulSoup
ERROR_MAP = {
"200000": "invalid input parameter",
"500000": "load data error",
"600000": "dump data error",
"700000": "data verify error",
"800000": "algorithm error"
}
class UranusError(Exception):
def __init__(self, error_code=None, message=''):
Exception.__init__(self,
'%s%s' % (ERROR_MAP[error_code] if ERROR_MAP.get(
error_code) is not None else '', message))
self.error_code = error_code
# assertion
# https://realpython.com/python-exceptions/
def divide(a, b):
try:
r = a / b
except:
raise ValueError
else: # no exceptions , run this code
print('divide result is %s' % r)
finally: # always run this code
print("done")
# divide(4,0)
print('--------')
divide(4,1)
|
[
"215626824@qq.com"
] |
215626824@qq.com
|
b6ecbef1faf3aab95571a56f1eaf1dece622f4c0
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_org_apache_sling_distribution_monitor_distribution_queue_health_check_properties.py
|
78c34d346fd02e2be860bd78e70e6726077ba3fc
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.org_apache_sling_distribution_monitor_distribution_queue_health_check_properties import OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestOrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties(unittest.TestCase):
"""OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties(self):
"""Test OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.org_apache_sling_distribution_monitor_distribution_queue_health_check_properties.OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
7b4ea0f4ff1d23cb5acb9a1696155e58d41a06ed
|
deb3c16ef887b6c496b8c920809d79b9f73aa2fe
|
/libs/telewizjaonline.py
|
3ff44fd184c1e68bbc06dccfa2babf9394c94358
|
[] |
no_license
|
Yaser7440/cmdline_iptvplayer
|
1ea35f4fd36c708176a43d402a49342c4cf723a5
|
4e287021d86cab8d6525262b647d144c6141d6b1
|
refs/heads/master
| 2021-01-24T10:49:29.278730
| 2016-09-21T09:24:26
| 2016-09-21T09:24:26
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 4,018
|
py
|
# -*- coding: utf-8 -*-
###################################################
# LOCAL import
###################################################
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvplayerinit import TranslateTXT as _
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvtools import printDBG, printExc, GetCookieDir
from Plugins.Extensions.IPTVPlayer.tools.iptvtypes import strwithmeta
from Plugins.Extensions.IPTVPlayer.libs.pCommon import common
from Plugins.Extensions.IPTVPlayer.libs.urlparser import urlparser
###################################################
###################################################
# FOREIGN import
###################################################
from Components.config import config, ConfigSelection, ConfigYesNo, ConfigText, getConfigListEntry
import re
try: import simplejson as json
except: import json
from os import path as os_path
############################################
###################################################
# E2 GUI COMMPONENTS
###################################################
from Plugins.Extensions.IPTVPlayer.components.asynccall import MainSessionWrapper
###################################################
###################################################
# Config options for HOST
###################################################
config.plugins.iptvplayer.telewizjaonline_sort = ConfigSelection(default = "date", choices = [("date", "Date"), ("ostatnio-ogladane", "ostatnio oglądane"), ("title", "Title"), ("view", "Views"), ("like", "Likes"), ("comment", "Comments")])
def GetConfigList():
optionList = []
optionList.append(getConfigListEntry("Sortuj kanały według:", config.plugins.iptvplayer.telewizjaonline_sort))
return optionList
###################################################
class TelewizjaOnline:
MAINURL = 'http://telewizja-online.pl/'
HTTP_HEADER = { 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0', 'Referer': MAINURL }
def __init__(self):
self.cm = common()
self.up = urlparser()
def getCategoriesList(self):
printDBG("TelewizjaOnline.getCategoriesList")
catsList = []
sts,data = self.cm.getPage(TelewizjaOnline.MAINURL)
if not sts: return catsList
data = self.cm.ph.getDataBeetwenMarkers(data, 'Kategorie Stacji TV', '</ul>', False)[1]
data = re.compile('<a[^>]+?href="([^"]+?)"[^>]*?>([^<]+?)<').findall(data)
for item in data:
catsList.append({'url':item[0], 'title':item[1]})
return catsList
def getChannelsList(self, baseUrl):
printDBG("TelewizjaOnline.getChannelsList baseUrl[%s]" % baseUrl )
channelsList = []
url = baseUrl + '?orderby=' + config.plugins.iptvplayer.telewizjaonline_sort.value
sts,data = self.cm.getPage(url)
if not sts: return channelsList
data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="col-md-3', '<center>', False)[1]
data = data.split('<div class="col-md-3')
for item in data:
title = self.cm.ph.getSearchGroups(item, 'title="([^"]+?)"')[0]
url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0]
icon = self.cm.ph.getSearchGroups(item, 'src="(http[^"]+?)"')[0]
channelsList.append({'title':title, 'url':url, 'icon':icon})
return channelsList
def getVideoLink(self, baseUrl):
printDBG("TelewizjaOnline.getVideoLink url[%s]" % baseUrl)
def _url_path_join(a, b):
from urlparse import urljoin
return urljoin(a, b)
sts,data = self.cm.getPage(baseUrl)
if not sts: return []
data = self.cm.ph.getDataBeetwenMarkers(data, '<div id="player-embed">', '<div class="player-button">', False)[1]
url = self.cm.ph.getSearchGroups(data, '<iframe[^>]+?src="([^"]+?)"')[0]
if '' != url:
data = None
return self.up.getAutoDetectedStreamLink(url, data)
|
[
"zdzislaw22@windowslive.com"
] |
zdzislaw22@windowslive.com
|
85276507b54d3f216c070a9c8873c8ff120d8120
|
72a8181e5502128fec62b132fbe19cd9d50dab4c
|
/rules/plot.smk
|
28df3be0b7d220ab200f373be5f95348d4b02f2c
|
[] |
no_license
|
EthanHolleman/DRIP-AGS-ENCODE
|
1fd3b7065ec7f47e783674df14955a7b655edc08
|
e3bb63b6d1cae82ddc6fe8857a1e66e2f41b2781
|
refs/heads/main
| 2023-04-03T21:07:34.651467
| 2021-04-23T02:42:18
| 2021-04-23T02:42:18
| 360,375,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
smk
|
rule make_feature_intersection_plot:
conda:
'../envs/R.yml'
input:
'output/intersect/all_samples_concat.intersection.bed'
output:
'output/plots/feature_intersection_plot.png'
shell:'''
mkdir -p output/plots
Rscript scripts/plot_encode_intersections.R {input} {output}
'''
|
[
"etholleman@ucdavis.edu"
] |
etholleman@ucdavis.edu
|
0b2bc07bfe47ebc246deec181f61d7fa55a65b8f
|
e8d5471bd4a47794d66162060343f740e0febca4
|
/server/src/uds/auths/RegexLdap/__init__.py
|
4065f8086cf40c30d7e64bfeaa397d4232fd9e6e
|
[] |
no_license
|
git38438/openuds
|
ef939c2196d6877e00e92416609335d57dd1bd55
|
7d66d92f85f01ad1ffd549304672dd31008ecc12
|
refs/heads/master
| 2020-06-22T14:07:33.227703
| 2019-07-18T11:03:56
| 2019-07-18T11:03:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
from .Authenticator import RegexLdap
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
1af643695b4192619ffcd424991f063d051f610c
|
6cac02f4df495f1acec3fde64335aa4881230cba
|
/tutorials/foo-tutorial/foo/foo.py
|
c828d610c07b3232e3f034ebfbced761d19fd565
|
[] |
no_license
|
ivannz/pkg_deploy_repo_renamed
|
96610728c097f0bb77a047b09681bb1d5fe6ffc3
|
9ce24ffcc5db6235dd3946f8a63123c3955ea957
|
refs/heads/master
| 2020-07-16T17:28:59.668633
| 2019-09-03T07:08:29
| 2019-09-03T07:08:29
| 205,832,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
def this():
from this import s
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i + c)] = chr((i + 13) % 26 + c)
return "".join(d.get(c, c) for c in s)
|
[
"ivannz@yandex.ru"
] |
ivannz@yandex.ru
|
2a21ac1ec7913bc31720e0eb686e858987acfe58
|
75117becf9f75122e60cd806599ae24c16065689
|
/python_models8/neuron/builds/IF_curr_exp_i.py
|
307db7fb5aee6fefae5f3e8176e659b1466f3901
|
[] |
no_license
|
chanokin/sPyNNaker8NewModelTemplate
|
d911443fa650a4016828341fd252ddb2d7bad313
|
2d64f34ed5a8f5312a3176792bee57339785c5ea
|
refs/heads/master
| 2020-11-27T01:10:50.593741
| 2020-01-07T15:56:54
| 2020-01-07T15:56:54
| 229,252,692
| 0
| 0
| null | 2019-12-20T11:28:48
| 2019-12-20T11:28:48
| null |
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
# A PyNN Model for standard neurons built from components
from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard
# Components from main tools
from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent
from spynnaker.pyNN.models.neuron.synapse_types import SynapseTypeExponential
from spynnaker.pyNN.models.defaults import default_initial_values
from spynnaker.pyNN.models.neuron.neuron_models.neuron_model_leaky_integrate_and_fire import NeuronModelLeakyIntegrateAndFire
from python_models8.neuron.threshold_types.AdaptiveThreshold import AdaptiveThreshold
class IF_curr_exp_i(AbstractPyNNNeuronModelStandard):
@default_initial_values({"v_init", "isyn_exc", "isyn_inh"})
def __init__(self,
# neuron model parameters and state variables
i_offset=0.0,
v_init=-70.0,
v_rest=-70.0,
v_reset=-100.0,
tau_m=10.0,
cm=2.0,
tau_refrac=3.0,
# threshold type parameters
v_threshold=-10.0,
tau_threshold=120,
w_threshold=1.8,
# synapse type parameters
tau_syn_E=5.0,
tau_syn_I=5.0,
isyn_exc=0.0,
isyn_inh=0.0
):
if v_init is None:
v_init = v_rest
self.__v_init = v_init
self.__v_threshold = v_threshold
self.__tau_threshold = tau_threshold
self.__w_threshold = w_threshold
neuron_model = NeuronModelLeakyIntegrateAndFire(v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac)
synapse_type = SynapseTypeExponential(tau_syn_E, tau_syn_I, isyn_exc, isyn_inh)
input_type = InputTypeCurrent()
threshold_type = AdaptiveThreshold(v_threshold, w_threshold, tau_threshold, v_rest)
super(IF_curr_exp_i, self).__init__(
model_name="IF_curr_exp_i",
binary="IF_curr_exp_i.aplx",
neuron_model=neuron_model,
input_type=input_type,
synapse_type=synapse_type,
threshold_type=threshold_type
)
@property
def v_init(self):
return self.__v_init
@v_init.setter
def v_init(self, v_init):
self.__v_init = v_init
@property
def v_threshold(self):
return self.__v_threshold
@v_threshold.setter
def v_threshold(self, v_threshold):
self.__v_threshold = v_threshold
@property
def tau_threshold(self):
return self.__tau_threshold
@tau_threshold.setter
def tau_threshold(self, tau_threshold):
self.__tau_threshold = tau_threshold
@property
def w_threshold(self):
return self.__w_threshold
@w_threshold.setter
def w_threshold(self, w_threshold):
self.__w_threshold = w_threshold
|
[
"chanokin@gmail.com"
] |
chanokin@gmail.com
|
5f69045f7aa47cdf638b20fe0213be9eee7ea9cf
|
37c38b97d0a4b8098ec3c35b7122afb1fbb9eac9
|
/newke/py36/py36/class_biao.py
|
ffce719d491f697100ed5daab2206f4b953fd2aa
|
[] |
no_license
|
lionheartStark/sword_towards_offer
|
8c2f9015a427317375d53eee982d630ffd4fa9c0
|
cb3587242195bb3f2626231af2da13b90945a4d5
|
refs/heads/master
| 2022-12-02T20:50:18.789828
| 2020-08-23T02:00:48
| 2020-08-23T02:00:48
| 266,257,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
from typing import List
from collections import defaultdict,deque
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
need_map = defaultdict(set)
for i in prerequisites:
need, be_need = i
need_map[need].add(be_need)
a_solution = []
queue = deque()
count = 0
for i in range(numCourses):
if i not in need_map:
queue.append(i)
count += 1
while queue:
nowican = queue.popleft()
a_solution.append(nowican)
should_rm = []
for k,v in need_map.items():
if nowican in v:
v.remove(nowican)
if len(v) == 0:
should_rm.append(k)
queue.append(k)
count += 1
for m in should_rm:
need_map.pop(m)
can = (count == numCourses)
if can:
return a_solution
else:
return []
|
[
"1003146780@qq.com"
] |
1003146780@qq.com
|
5a520bbe602829e4a1a651efc846844f07970208
|
bcfa02c21a73798872bbb28303233d1f0039cf00
|
/server/www/teleport/webroot/app/controller/dashboard.py
|
65a5f2eea63c35642406ac5a3c52e530667224cb
|
[
"Apache-2.0"
] |
permissive
|
zhoulhb/teleport
|
6301cd50c951bcbac21cbe24017eb8421ff57adc
|
54da194697898ef77537cfe7032d774555dc1335
|
refs/heads/master
| 2021-11-10T17:10:59.661130
| 2021-11-09T11:16:19
| 2021-11-09T11:16:19
| 192,643,069
| 0
| 0
|
Apache-2.0
| 2019-06-19T02:20:53
| 2019-06-19T02:20:52
| null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# -*- coding: utf-8 -*-
from app.const import *
from app.base.controller import TPBaseHandler, TPBaseJsonHandler
from app.model import stats
class IndexHandler(TPBaseHandler):
def get(self):
ret = self.check_privilege(TP_PRIVILEGE_LOGIN_WEB)
if ret != TPE_OK:
return
self.render('dashboard/index.mako')
|
[
"apex.liu@qq.com"
] |
apex.liu@qq.com
|
0f2ac223d96f5a6d71a7a54cad6006c3bc48733c
|
b6f8b2f023004fc0ea185b5e1ef2cbccce9ef513
|
/misc/figures_thesis.py
|
05bcf26cc2a72e5051b3bd7f7406d3d6a1d50359
|
[
"BSD-3-Clause"
] |
permissive
|
tenglongcong/petibm-examples
|
a73a6cdba864269fe9402d0a8b44582f2bcbcd9f
|
3817d50b0b26df5901701c0cfe82a2d57c964e89
|
refs/heads/master
| 2020-11-27T17:12:28.335357
| 2019-12-04T23:51:54
| 2019-12-04T23:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
"""Gather figures (to be included in thesis)."""
import os
import pathlib
import shutil
rootdir = pathlib.Path(__file__).absolute().parents[1]
n_parts = len(rootdir.parts)
# Create the output directory.
figdir = rootdir / 'figures_thesis'
figdir.mkdir(parents=True, exist_ok=True)
# Load paths of figures to gather.
inpath = rootdir / 'misc' / 'figures_thesis.txt'
filepaths = []
with open(inpath, 'r') as infile:
filepaths = [rootdir / line.strip() for line in infile.readlines()
if not line.startswith('#')]
# Define new names of the output figures.
filenames = []
for filepath in filepaths:
filename, filedir = filepath.name, filepath.parent
prefix = '_'.join([e for e in filedir.parts[n_parts + 2:]
if e != 'figures'])
filenames.append('_'.join([prefix, filename]).lstrip('_'))
# Copy figures to output directory.
for filepath, filename in zip(filepaths, filenames):
shutil.copy(filepath, figdir / filename)
|
[
"mesnardo@gwu.edu"
] |
mesnardo@gwu.edu
|
862e1582b1eea05a10d17fec0afe45b0ba83391c
|
17e08f795273d6f4233ab440c2706130f6520b58
|
/fannypack/utils/_deprecation.py
|
9174fd10a6bbb73f059b87105a1183e6c2716f63
|
[
"MIT"
] |
permissive
|
HaoWen470/fannypack
|
db5e6bb670004e470254e1e632899aeec38ee041
|
7e2c949de0e0cac69a95a5a777f8a4b1fa0fc17a
|
refs/heads/master
| 2023-01-03T20:35:35.248848
| 2020-10-31T09:01:01
| 2020-10-31T09:01:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
import warnings
from typing import Callable
def deprecation_wrapper(message: str, function_or_class: Callable) -> Callable:
"""Creates a wrapper for a deprecated function or class. Prints a warning
the first time a function or class is called.
Args:
message (str): Warning message.
function_or_class (Callable): Function or class to wrap.
Returns:
Callable: Wrapped function/class.
"""
warned = False
def curried(*args, **kwargs): # pragma: no cover
nonlocal warned
if not warned:
warnings.warn(message, DeprecationWarning, stacklevel=2)
warned = True
return function_or_class(*args, **kwargs)
return curried
def new_name_wrapper(
old_name: str, new_name: str, function_or_class: Callable
) -> Callable:
"""Creates a wrapper for a renamed function or class. Prints a warning the first
time a function or class is called with the old name.
Args:
old_name (str): Old name of function or class. Printed in warning.
new_name (str): New name of function or class. Printed in warning.
function_or_class (Callable): Function or class to wrap.
Returns:
Callable: Wrapped function/class.
"""
return deprecation_wrapper(
f"{old_name} is deprecated! Use {new_name} instead.", function_or_class
)
|
[
"yibrenth@gmail.com"
] |
yibrenth@gmail.com
|
91e61e3f950f46f177a4001f65690a53add7f6f1
|
1765ebc1c393ab4720c5fc5f9397516f5d66cfce
|
/setup.py
|
de4fc91885a05f03e2c2bf89d2af47d2323230c1
|
[
"MIT"
] |
permissive
|
Lukasa/rproxy
|
d1db08aa99470c3649258254ead291c6dbd2d202
|
c15f9f56608a53db19d4f3737f05dfd02d66bc60
|
refs/heads/master
| 2020-12-28T22:46:48.225095
| 2016-06-30T08:27:23
| 2016-06-30T08:27:23
| 67,593,070
| 0
| 0
| null | 2016-09-07T09:35:22
| 2016-09-07T09:35:22
| null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from setuptools import setup
setup(
name='rproxy',
description='A super simple reverse proxy.',
long_description=open("README.rst").read(),
author='Amber Brown',
author_email='hawkowl@atleastfornow.net',
packages=['rproxy', 'twisted.plugins'],
package_dir={"": "src"},
install_requires=[
'twisted >= 15.5.0',
'pyopenssl',
'txsni',
'incremental',
],
zip_safe=False,
setup_requires=["incremental"],
use_incremental=True,
)
|
[
"hawkowl@atleastfornow.net"
] |
hawkowl@atleastfornow.net
|
3040eece0cb8864c9e7d39ddab4a66343a0f3988
|
2112e4cfd9568128573098f8e209962002f66a23
|
/app.py
|
23284a6ccf1befdf6ba398a9fa834d9e7048b7e3
|
[] |
no_license
|
amazingguni/stock-trader
|
0bd39cce6f6462c9648e3c8b2893b3e8379e37ca
|
252c9230885200cfde845f2a03677140564cfc62
|
refs/heads/main
| 2023-05-05T18:26:04.124690
| 2021-05-30T13:12:58
| 2021-05-30T13:12:58
| 362,616,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
from flask import Flask, Response
from flask_login import LoginManager
from flask_cors import CORS
from config import get_config_by_env
from container import Container
from web.admin import admin
from mongodb import db
login_manager = LoginManager()
def create_app():
app = Flask(__name__, template_folder='./web/templates')
@app.route('/')
# pylint: disable=unused-variable
def index():
return Response(status=200)
app.config.from_object(get_config_by_env())
CORS(app)
login_manager.init_app(app)
container = Container()
app.container = container
from web.admin.views import sync as admin_sync_views
from web.admin.views import stock as admin_stock_views
from web.admin.views import portfolio as admin_portfolio_views
admin_views = [admin_sync_views,
admin_stock_views, admin_portfolio_views, ]
with app.app_context():
container.wire(modules=admin_views)
admin.init_app(app)
db.init_app(app)
return app
def register_blueprints(app, views):
for view in views:
app.register_blueprint(view.bp)
@login_manager.user_loader
def load_user(user_id):
from core.user.domain.user import User
return User.query.filter(User.id == user_id).first()
app = create_app()
|
[
"amazingguni@gmail.com"
] |
amazingguni@gmail.com
|
60c586549370e3fbb1ebd8bbe3f0cd9caba71e15
|
3f29503e6d776ef0914217b1c922f4bc78af4fdd
|
/13.HASH MAP/1338_Reduce Array Size to The Half_MED/solution.py
|
8a43eec458a5c93f42b5aa20c4251801a04035a9
|
[] |
no_license
|
kimmyoo/python_leetcode
|
cd4ff3c4f6d190840bbf5fb9acdca2b92554a6fa
|
813235789ce422a3bab198317aafc46fbc61625e
|
refs/heads/master
| 2023-08-16T07:36:38.688871
| 2023-08-15T22:38:00
| 2023-08-15T22:38:00
| 132,544,297
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
class Solution(object):
def minSetSize(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
half = len(arr)/2
d = collections.Counter(arr)
c = d.values()
c.sort(reverse=True)
if max(c) >= half:
return 1
begin, end = 0, 1
sum = max(c)
while True:
sum += c[end]
if sum >= half:
return end+1-begin
else:
end+=1
|
[
"kimmyoo@gmail.com"
] |
kimmyoo@gmail.com
|
3dcb6a3fba7b2c2c8998314caf270f7dc4b3d69c
|
824f19d20cdfa26c607db1ff3cdc91f69509e590
|
/TopInterviewQuestions/LinkedList/01-Delete-Node.py
|
e8cc4b8ff1bfc6f65dfa58aa9f76058738818e2a
|
[] |
no_license
|
almamuncsit/LeetCode
|
01d7e32300eebf92ab54c983de6e183242b3c985
|
17aa340649574c37067ec170ceea8d9326be2d6a
|
refs/heads/master
| 2021-07-07T09:48:18.069020
| 2021-03-28T11:26:47
| 2021-03-28T11:26:47
| 230,956,634
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
while node.next.next:
node.val = node.next.val
node = node.next
node.val = node.next.val
node.next = None
|
[
"msarkar.cse@gmail.com"
] |
msarkar.cse@gmail.com
|
dfca92b9a02a0b34ddb02223c46fc05f0ac34303
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/434.number-of-segments-in-a-string.py
|
229c119643f381afef999ff54714d595c048b7dc
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481
| 2019-11-12T22:59:07
| 2019-11-12T22:59:07
| 138,658,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
#
# @lc app=leetcode id=434 lang=python3
#
# [434] Number of Segments in a String
#
# https://leetcode.com/problems/number-of-segments-in-a-string/description/
#
# algorithms
# Easy (37.34%)
# Total Accepted: 64.1K
# Total Submissions: 171.8K
# Testcase Example: '"Hello, my name is John"'
#
# Count the number of segments in a string, where a segment is defined to be a
# contiguous sequence of non-space characters.
#
# Please note that the string does not contain any non-printable characters.
#
# Example:
#
# Input: "Hello, my name is John"
# Output: 5
#
#
#
class Solution:
def countSegments(self, s: str) -> int:
|
[
"liseyko@gmail.com"
] |
liseyko@gmail.com
|
1ac603767f5fde5c05e1576e3f1e35df16a53af1
|
63e0bc889563192a602463e662121058a4da30b5
|
/Smart.py
|
4c26367cb520a58c7477ccfe077c736086992b6b
|
[] |
no_license
|
adaptiveUK/rhinopythonscripts
|
11accd3048caad7b69024db55e5847acb3c7feb6
|
bdd260e7f7257de54a4fac25a465dcdafff3b68c
|
refs/heads/master
| 2021-01-17T08:38:26.863981
| 2011-07-25T19:07:29
| 2011-07-25T19:07:29
| 3,888,627
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
'''A module for wrapping geometry with UserString and Attribute Dictionaries'''
import Rhino
class SmartFeature(object):
def __init__(self, rhinoObjectOrTuple):
self._parseConstructor(rhinoObjectOrTuple)
def _parseConstructor(self, rhinoObjectOrTuple):
# determine if it is a tuple
kind = type(rhinoObjectOrTuple)
if kind == tuple or kind == list:
# build from geom, user string pair
pair = rhinoObjectOrTuple
self.geom = self._filterGeometry(pair[0]) # geometry
self.attributes = pair[1] # properties (as dictionary)
else: # assume RhinoObject
rhObj = rhinoObjectOrTuple
self.geom = self._filterGeom(rhObj.Geometry)
self.attributes = {}
numAtts = rhObj.Attributes.UserStringCount
rawAtts = rhObj.Attributes.GetUserStrings()
keys = rawAtts.AllKeys
for key in keys:
self.attributes[key] = rhObj.Attributes.GetUserString(key)
def _filterGeom(self, geometry):
if type(geometry) == Rhino.Geometry.Point:
return geometry.Location
else:
return geometry
def objAttributes(self, objectAttributes):
for key in self.attributes:
objectAttributes.SetUserString(key, self.attributes[key])
return objectAttributes
def RhinoObjectsToSmartFeatures(RhinoObjectList):
return [SmartFeature(obj) for obj in RhinoObjectList]
def replaceGeometries(smartFeatures, geometries):
out = []
for i in range(len(smartFeatures)):
feature = smartFeatures[i]
geometry = geometries[i]
feature.geom = geometry
out.append(feature)
return out
|
[
"benjamin.j.golder@gmail.com"
] |
benjamin.j.golder@gmail.com
|
c8aa00a8afba3954be9744854afed97a99745d3f
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/codewar/_Codewars-Solu-Python-master/src/kyu7_Linked_Lists-Move_Node.py
|
4367bf1aeaa9de6050ecb664223c5ff2f974bf3a
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,573
|
py
|
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
class Context(object):
def __init__(self, source, dest):
self.source = source
self.dest = dest
class Solution():
"""
https://www.codewars.com/kata/linked-lists-move-node
Linked Lists - Move Node
Write a MoveNode() function which takes the node from the front of the source list and
moves it to the front of the destintation list. You should throw an error when the source list is empty.
For simplicity, we use a Context object to store and return the state of the two linked lists.
A Context object containing the two mutated lists should be returned by moveNode.
MoveNode() is a handy utility function to have for later problems.
JavaScript
var source = 1 -> 2 -> 3 -> null
var dest = 4 -> 5 -> 6 -> null
moveNode(source, dest).source === 2 -> 3 -> null
moveNode(source, dest).dest === 1 -> 4 -> 5 -> 6 -> null
Python
source = 1 -> 2 -> 3 -> None
dest = 4 -> 5 -> 6 -> None
move_node(source, dest).source == 2 -> 3 -> None
move_node(source, dest).dest == 1 -> 4 -> 5 -> 6 -> None
Ruby
source = 1 -> 2 -> 3 -> nil
dest = 4 -> 5 -> 6 -> nil
move_node(source, dest).source == 2 -> 3 -> nil
move_node(source, dest).dest == 1 -> 4 -> 5 -> 6 -> nil
The push() and buildOneTwoThree() functions need not be redefined.
There is another kata called Linked Lists - Move Node In-place that is related but more difficult.
Related Kata in order of expected completion (increasing difficulty):
Linked Lists - Push & BuildOneTwoThree
Linked Lists - Length & Count
Linked Lists - Get Nth Node
Linked Lists - Insert Nth Node
Linked Lists - Sorted Insert
Linked Lists - Insert Sort
Linked Lists - Append
Linked Lists - Remove Duplicates
Linked Lists - Move Node
Linked Lists - Move Node In-place
Linked Lists - Alternating Split
Linked Lists - Front Back Split
Linked Lists - Shuffle Merge
Linked Lists - Sorted Merge
Linked Lists - Merge Sort
Linked Lists - Sorted Intersect
Linked Lists - Iterative Reverse
Linked Lists - Recursive Reverse
Inspired by Stanford Professor Nick Parlante's excellent Linked List teachings.
"""
def __init__(self):
pass
def move_node_01(self, source, dest):
if not source:
raise ValueError
node = source
source = source.next
node.next = dest
return Context(source, node)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
c3e539c4bf3ff081920dd8d7384b3aab42f9f2aa
|
c65d512975feed7dfe74f1117cdd1337293d9d60
|
/python/my_py_notes_万物皆对象/db_and_数据持久化/Mysql/mysql与python交互/py_mySQL.py
|
457399daaab72146341157a493068ef91fee16ba
|
[] |
no_license
|
Rockyzsu/StudyRepo
|
e5c6420e325917c2df7dc51d606be5fa3c2ee1b8
|
385785c09bebb56df156fd149a088043f38d0aab
|
refs/heads/master
| 2022-12-09T13:45:38.332899
| 2020-09-15T09:56:09
| 2020-09-15T09:56:09
| 295,388,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
# -*- coding:utf-8 -*-
# file: PyMySQL.py
#
import MySQLdb # 导入MySQLdb模块
db = MySQLdb.connect(host='localhost', # 连接到数据库,服务器为本机
user='root', # 用户为root
passwd='root654321', # 密码为root654321
db='python') # 数据库名为python
cur = db.cursor() # 获得数据库游标
cur.execute('insert into people (name,age,sex) values (\'Jee\',21,\'F\')') # 执行SQL语句
r = cur.execute('delete from people where age=20') # 执行SQL语句
r = cur.execute('select * from people') # 执行SQL语句
db.commit() # 提交事务
r = cur.fetchall() # 获取数据
print(r) # 输出数据
cur.close() # 关闭游标
db.close() # 关闭数据库连接
|
[
"jinweizsu@gmail.com"
] |
jinweizsu@gmail.com
|
43fddc8268d67792feed4cbae6473c1f9b58a178
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_210/72.py
|
8b87687f01a56f0ea2dfa277b816f22c349a561c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
#!/usr/bin/env python
import sys
import numpy as np
T = int(raw_input())
for t in xrange(T):
# solve the input
Ac, Aj = raw_input().strip().split()
Ac = int(Ac)
Aj = int(Aj)
activities = [] # start, end, duration, who
ctotal = 0
jtotal = 0
C = np.zeros(Ac)
D = np.zeros(Ac)
for c in xrange(Ac):
cc, dc = raw_input().strip().split()
C[c] = int(cc)
D[c] = int(dc)
activities.append((C[c], D[c], D[c]-C[c], "C"))
ctotal += D[c] - C[c]
J = np.zeros(Aj)
K = np.zeros(Aj)
for j in xrange(Aj):
jj, kj = raw_input().strip().split()
J[j] = int(jj)
K[j] = int(kj)
activities.append((J[j], K[j], K[j]-J[j], "J"))
jtotal += K[j] - J[j]
activities.sort()
cremain = 720 - ctotal
jremain = 720 - jtotal
result = 0
if Aj + Ac == 1:
# only one activity, so only two changes
print "Case #{0}: {1}".format(t+1, 2)
continue
# at least two activities, so at least two proper gaps, and first != last
### FIND ALL GAPS AND THEIR TYPES, FIRST AND LAST
first = activities[0][3]
last = activities[-1][3]
gaps = []
prev = activities[0]
for i in xrange(1, len(activities)):
curr = activities[i]
gaps.append((prev[3] + curr[3], curr[0] - prev[1])) # type of gap, length of gap
prev = curr
# add the last gap
gaps.append((last + first, (1440 - activities[-1][1]) + activities[0][0]))
# sort the gaps, then deal with them separately
gaps.sort()
# start with CC gaps
i = 0
while i < len(gaps) and gaps[i][0] == 'CC':
gap = gaps[i]
if gap[1] <= cremain:
# gap is filled in
cremain -= gap[1]
else:
# cannot fill in the gap, so need 2 changes
result += 2
i += 1
# deal with CJ and JC gaps
while i < len(gaps) and (gaps[i][0] == 'CJ' or gaps[i][0] == 'JC'):
result += 1
i += 1
# deal with JJ gaps
while i < len(gaps) and gaps[i][0] == 'JJ':
gap = gaps[i]
if gap[1] <= jremain:
# gap is filled in
jremain -= gap[1]
else:
# cannot fill in the gap, so need 2 changes
result += 2
i += 1
# this is hopefully all
print "Case #{0}: {1}".format(t+1, result)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d335c1953908c703540fee6892011ac539fd127d
|
7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a
|
/.history/DEBER_20210831114343.py
|
1bb3ad9fa1d068428284e79f3e43f05e552a0685
|
[
"MIT"
] |
permissive
|
Alopezm5/PROYECTO-PARTE-1
|
a1dce04009b24852c1c60e69bdf602ad3af0574b
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
refs/heads/main
| 2023-07-25T11:22:17.994770
| 2021-09-07T03:27:34
| 2021-09-07T03:27:34
| 403,670,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,536
|
py
|
class Nomina:
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr="",email="",estado="",profe="",dep=""):#3
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.departamento=dep
class Empresa(Nomina):
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("Datos de la Empresa")
print("La empresa "{}"\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de "{}"\n Es una entidad "{}"".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Departamento(Empleado):
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: "{}"".format(self.departamento))
class Empleado(Nomina):
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):#falta dos atributo como definicion de oficina
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de ")
if eleccion==1:
print(self.estadocivil)
elif eleccion==2:
print(self.profesion)
# class Pagos():
# def __init__(self):
# pass
# def pagoNormal(self, valhora,hoesti,hotraba, desc, desper):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# def pagoExtra(self, valhora,hoesti,hotraba,incentivos):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.bono=incentivos
# def Nomina(self, nom, valhora,hoesti,hotraba, desc, desper,incentivos):#faltan 8 atributos incluir cosas del empleado y sobretiempo
# self.nombre= nom
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# self.bono=incentivos
nom=Nomina()
emp=Empresa()
emp.datosEmpresa()
emp.mostrarEmpresa()
# emple=Empleado()
# emple.empleado()
# eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
# if eleccion==1:
# emple.empleadoObrero()
# elif eleccion==2:
# emple.empleadoOficina()
# else:
# print("No selecciono el tipo de empleado")
# emple.mostrarempleado()
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
2af5c8223fc344d1baaffd129038607c4fdce3a2
|
9d4c84a3ddee3c53bfb85b7e576be97f727caca0
|
/iso_two_param/optimization_blue_cv04.py
|
a998f3794d796a338806c045f2957cc9bb25c077
|
[
"MIT"
] |
permissive
|
cjekel/inv_bubble_opt
|
386cca2c009bf1c97007c14bcbf9cd5df482afe3
|
3ecd57ee91ee09ac38319d151adbd8e9c3b9a1bb
|
refs/heads/master
| 2023-08-15T04:26:09.702395
| 2021-08-14T01:10:49
| 2021-08-14T01:10:49
| 166,838,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
# MIT License
# Copyright (c) 2019 Charles Jekel
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import invbubble
import os
from scipy.optimize import fmin_l_bfgs_b
if __name__ == "__main__":
invbubble.delete_files()
# load the test data
homeuser = os.path.expanduser('~')
blue00 = np.load(os.path.join(homeuser, 'blue00.npy'),
allow_pickle=True)
blue01 = np.load(os.path.join(homeuser, 'blue01_rotated_90.npy'),
allow_pickle=True)
blue02 = np.load(os.path.join(homeuser, 'blue02_rotated_90.npy'),
allow_pickle=True)
blue03 = np.load(os.path.join(homeuser, 'blue03.npy'),
allow_pickle=True)
test_data = [blue00, blue01, blue02]
# initialize a maximum objective value
max_obj = 30.0 # mm
opt_hist_file = 'iso04r00.csv'
header = ['E', 'G', 'OBJ', 'Success']
my_opt = invbubble.BubbleOpt(opt_hist_file, header, max_obj,
None, None,
test_data=test_data,
weights=[1.0, 1.0, 1.0],
mat_model='iso-two')
np.random.seed(121)
my_bounds = np.zeros((2, 2))
my_bounds[0, 0] = 0.12
my_bounds[0, 1] = 0.25
my_bounds[1, 0] = 0.2
my_bounds[1, 1] = 0.9
X = np.array([[0.166, 0.60],
[0.155, 0.52],
[0.193, 0.67],
[0.167, 0.56],
[0.198, 0.7]])
xres = np.zeros_like(X)
fres = np.zeros(5)
for i, x0 in enumerate(X):
res = fmin_l_bfgs_b(my_opt.calc_obj_function_test_data, x0,
approx_grad=True, bounds=my_bounds, factr=1e12,
pgtol=1e-06, epsilon=1e-3, iprint=1, m=10000,
maxfun=200, maxiter=10, maxls=20)
xres[i] = res[0]
fres[i] = res[1]
# find the best result
best_ind = np.argmin(fres)
message = '\nBest result: \n' + str(fres[best_ind]) + """\n
Best values: \n""" + str(xres[best_ind]) + """\n
The full result: \n""" + str(fres) + """\n
Full values: \n""" + str(xres)
print(message)
invbubble.send_email('cjekel@ufl.edu', 'iso blue cv 04 done', message)
|
[
"cjekel@gmail.com"
] |
cjekel@gmail.com
|
fee00f670adab1b0c03e332059c2a4409748e8a6
|
9b483d42da47237d28a9f80c378aba412b89f5b1
|
/special/cookie.py
|
59a4301e436c65e01d0d8954723bc8de92163246
|
[] |
no_license
|
smartree/Tentacle
|
b0c83b671c1abe26338125d672d77b277abd2b28
|
a53e046f3434bf1ac4b606ba7dfe951d9b7f5464
|
refs/heads/master
| 2020-05-23T01:25:57.576494
| 2019-05-09T15:33:00
| 2019-05-09T15:33:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Check header' cookies secure, e.g. httponly, secure and so on.
'''
from re import findall
from re import search
from re import I
def info(data=None):
info = {
"name": "cookie",
"info": "cookie",
"level": "low",
"type": "info"
}
return info
def _plus(data, info,key = "cookie"):
data['flag'] = 1
data['res'].append({"info": info, "key": key})
return data
def prove(data):
data = init(data, 'web')
if data['url']:
try:
headers = curl('get',data['url']).headers
if 'cookies' in headers.keys():
cookies = headers['cookies'],
if not search(r'secure;', cookies, I):
data = _plus(data,'Cookie without Secure flag set')
if not search(r'httponly;', cookies, I):
data = _plus(data, 'Cookie without HttpOnly flag set')
if search(r'domain\=\S*', cookies, I):
domain = findall(r'domain\=(.+?);', headers, I)
if domain:
data = _plus(data, 'Session Cookie are valid only at Sub/Domain: %s' % domain[0])
if search(r'path\=\S*', cookies, I):
path = findall(r'path\=(.+?);', headers, I)
if path:
data = _plus(data, 'Session Cookie are valid only on that Path: %s' % path[0])
if search(r'(.+?)\=\S*;', cookies, I):
cookie_sessions = findall(r'(.+?)\=\S*;', headers, I)
for cs in cookie_sessions:
if cs not in ['domain', 'path', 'expires']:
data = _plus(data, 'Cookie Header contains multiple cookies')
break
if 'x-xss-protection' not in headers.keys():
data = _plus(data, 'X-XSS-Protection header missing','x-xss-protection')
if 'x-frame-options' not in headers:
data = _plus(data, 'Clickjacking: X-Frame-Options header missing','x-frame-options')
if 'content-type' not in headers:
data = _plus(data, 'Content-Type header missing','content-type')
if 'strict-transport-security' not in headers:
data = _plus(data, 'Strict-Transport-Security header missing','strict-transport-security')
if 'x-content-type-options' not in headers:
data = _plus(data, 'X-Content-Type-Options header missing','x-content-type-options')
except :
pass
return data
if __name__=='__main__':
from script import init, curl
print(prove({'target_host':'www.baidu.com','target_port': 22,'flag':-1,'data':[],'res':[]}))
|
[
"546577246@qq.com"
] |
546577246@qq.com
|
48c70164518cd8c13035be54a8fd4613798d6f4b
|
574a23f57daec3d462967e30ff808779127dc839
|
/herle_inventarios/inventarios/serializers.py
|
42469033e71751845727f0149bc8dc594c449395
|
[] |
no_license
|
RedGranatum/herleBackEnd
|
18d3aecf75eb0d349470747f3fca4dbfd1581e80
|
8f21a7f7d0c2d3fdf3ae52eab6b31cbea7d3da97
|
refs/heads/master
| 2023-01-07T07:32:17.725947
| 2020-12-15T00:23:38
| 2020-12-15T00:23:38
| 57,180,381
| 0
| 0
| null | 2022-12-26T20:22:35
| 2016-04-27T03:25:14
|
Python
|
UTF-8
|
Python
| false
| false
| 618
|
py
|
from rest_framework import serializers
from .models import Inventario
class InventarioSerializer(serializers.ModelSerializer):
class Meta:
model = Inventario
fields = ("id","compra_detalle","invoice_compra","material","calibre","ancho","largo",
"codigo_producto","num_rollo","peso_kg","peso_lb","transporte","pais",
"precio_libra","factor","precio_dolar","factor_impuesto","con_comercializadora",
"porc_comercializadora", "factor_kilos","valor_kilo_dolar","valor_tonelada_dolar","valor_kilo_pesos",
"valor_final_kilo_pesos","descripcion","comentarios","precio_tonelada_dolar")
|
[
"raultr@gmail.com"
] |
raultr@gmail.com
|
59ac6c1ca8c6b389889458634600394990b5dc69
|
1f62195fb1960c6bddb38343adbe41c0497e40bc
|
/torchrl/utils/gym_wrappers/atari_wrappers.py
|
14d18b1a90860b41d033edaf180e5bd29d3c4bef
|
[
"MIT"
] |
permissive
|
alyssonmazoni/torchrl
|
800ec186893607adac14c38c39c1d36f3488d3d8
|
75e82f073b7234432b491a21e5083bc55e3e985a
|
refs/heads/master
| 2020-03-16T06:12:43.733257
| 2018-05-05T02:37:50
| 2018-05-05T02:37:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,291
|
py
|
'''
Copied from:
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
'''
import numpy as np
import gym
from gym import spaces
import cv2
def atari_wrap(env, frame_skip=4, noop_max=30):
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=noop_max)
env = MaxAndSkipEnv(env, skip=frame_skip)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
env = ClipRewardEnv(env)
return env
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2, ) + env.observation_space.shape, dtype='uint8')
self._skip = skip
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(
low=0, high=255, shape=(1, self.height, self.width))
def _observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[None, :, :]
|
[
"lucasgouvaz@gmail.com"
] |
lucasgouvaz@gmail.com
|
b5935e769053443d0cf189014e51f82faab401ff
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02785/s130693103.py
|
ccbb8dc59483b523e40685ea0fb13bcd61a7bf65
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import heapq
(N,K) = map(int,input().split())
h = [int(x)*-1 for x in input().split()]
ans = 0
heapq.heapify(h)
if K <= N:
for i in range(K):
heapq.heappop(h)
while h != []:
ans -= heapq.heappop(h)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.