blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d29e7055408af7ae7da5c6ab906e176235637629
|
67fbacb9af9185d2c32968f51ab642d8cc87a505
|
/backend/venv/bin/pip
|
412bd607e118b840a47da2dcb55830ebdc70dda7
|
[] |
no_license
|
HoldenGs/tutorial-repo
|
1013d4f237a328b6480d7fcf286e09a4b2f49475
|
4735f6327a706ad1fa5648ee36873a1846b2f339
|
refs/heads/main
| 2023-03-23T06:18:42.027761
| 2021-03-11T23:34:47
| 2021-03-11T23:34:47
| 344,987,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/Users/holden/tutorial-repo/backend/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"holdeng96@gmail.com"
] |
holdeng96@gmail.com
|
|
5c07099b4dfaf777ccdfddb969e1d0d9ca5712b6
|
bff3053fcc40995bbd8c8bf251042ada99d15427
|
/Logging Advanced - Loggers, Handlers, and Formatters.py
|
ee7c509f9e76af6a1653d9d0ff50104949bceaab
|
[] |
no_license
|
iampaavan/Pure_Python
|
e67213eb42229614517c249b4f5b0a01c71c8ce9
|
e488b05ea526ab104ebc76a8e5e621301bed8827
|
refs/heads/master
| 2020-04-27T23:10:48.482213
| 2019-06-30T19:32:08
| 2019-06-30T19:32:08
| 174,765,589
| 1
| 0
| null | 2019-04-21T01:39:53
| 2019-03-10T01:39:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,891
|
py
|
import logging
import employeelogs
# DEBUG: Detailed information, typically of interest only when diagnosing problems.
# INFO: Confirmation that things are working as expected.
# WARNING: An indication that something unexpected happened, or indicative of some problem in the near future (e.g. ‘disk space low’). The software is still working as expected.
# ERROR: Due to a more serious problem, the software has not been able to perform some function.
# CRITICAL: A serious error, indicating that the program itself may be unable to continue running.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s: %(name)s: %(message)s')
# file_hander = logging.FileHandler('sample.log')
file_hander = logging.FileHandler('sample_1.log')
file_hander.setLevel(logging.ERROR)
file_hander.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_hander)
logger.addHandler(stream_handler)
# logging.basicConfig(filename='sample.log', level=logging.DEBUG, format='%(asctime)s: %(name)s: %(message)s')
def add_function(x, y):
"""Addition Function"""
return x + y
def sub_function(x, y):
"""Subtraction Function"""
return x - y
def mul_function(x, y):
"""Multiplication Function"""
return x * y
def div_function(x, y):
"""Division Function"""
try:
result = x / y
except ZeroDivisionError:
# logger.error('Tried to divide the number by 0. ')
logger.exception('Tried to divide the number by 0. ')
else:
return result
# return x / y
num_1 = 20
# num_2 = 10
num_2 = 0
addition_result = add_function(num_1, num_2)
print(f'Add: {num_1} + {num_2} = {addition_result}')
# logging.debug(f'Add: {num_1} + {num_2} = {addition_result}')
# logging.info(f'Add: {num_1} + {num_2} = {addition_result}')
logger.debug(f'Add: {num_1} + {num_2} = {addition_result}')
subtraction_result = sub_function(num_1, num_2)
print(f'Sub: {num_1} - {num_2} = {subtraction_result}')
# logging.debug(f'Sub: {num_1} - {num_2} = {subtraction_result}')
# logging.info(f'Sub: {num_1} - {num_2} = {subtraction_result}')
logger.debug(f'Sub: {num_1} - {num_2} = {subtraction_result}')
multiplication_result = mul_function(num_1, num_2)
print(f'Mul: {num_1} * {num_2} = {multiplication_result}')
# logging.debug(f'Mul: {num_1} * {num_2} = {multiplication_result}')
# logging.info(f'Mul: {num_1} * {num_2} = {multiplication_result}')
logger.debug(f'Mul: {num_1} * {num_2} = {multiplication_result}')
division_result = div_function(num_1, num_2)
print(f'Div: {num_1} / {num_2} = {division_result}')
# logging.debug(f'Div: {num_1} / {num_2} = {division_result}')
# logging.info(f'Div: {num_1} / {num_2} = {division_result}')
logger.debug(f'Div: {num_1} / {num_2} = {division_result}')
|
[
"noreply@github.com"
] |
iampaavan.noreply@github.com
|
43a68df31cc43515a301af182ef1a918be3f2f14
|
fe91e76bfd936cb7062fed96897933f4ed2c7fc2
|
/py/api.py
|
3f7e593a66546ef48c0b59585fef00203723ee12
|
[] |
no_license
|
shixing/CDS
|
9ed64940787afe11e8521ec7ea17676f77651bd4
|
a4d4c013a3189e9c092d2c3dff9e81c9219cefa8
|
refs/heads/master
| 2016-09-06T16:45:18.442503
| 2015-08-15T14:54:53
| 2015-08-15T14:54:53
| 21,624,047
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
from flask import request,make_response
import sys
import configparser
import urllib
import json
import hashlib
import time
from utils.config import get_config
import numpy as np
from vector.bruteForce import BruteForceSearch
from utils.config import get_config
import vector.LSH
import vector.LSH2gram
import logging
import exp.analysis
from nearpy.distances.angular import AngularDistance
# log
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# get config
config_fn = sys.argv[1]
config = get_config(config_fn)
# lsh2gram
logging.info('H1')
lsh2gram = vector.LSH2gram.LSH2gram()
logging.info('H2')
lsh2gram.load_from_config_light(config)
logging.info('H3')
lsh2gram.engine_2gram.build_permute_index(200,10,500)
# app
logging.info('H4')
app = Flask(__name__)
api = Api(app)
logging.info('H5')
# decompose
class Decompose(Resource):
def get(self,t):
results = None
print request.args
print t
if t == 'q1':
qw = request.args['w']
k = int(request.args['k'])
naive = False
if 'naive' in request.args:
naive = True
print qw,k,naive
results = lsh2gram.query_1_2(qw,k,naive)
if t == 'q2':
qw1 = request.args['w1']
qw2 = request.args['w2']
k = int(request.args['k'])
naive = False
if 'naive' in request.args:
naive = True
results = lsh2gram.query_2_2(qw1,qw2,k,naive)
return make_response(repr(results))
logging.info('H6')
api.add_resource(Decompose,'/api/decompose/<string:t>')
if __name__ == '__main__':
logging.info('H7')
app.run()
logging.info('H8')
|
[
"shixing19910105@gmail.com"
] |
shixing19910105@gmail.com
|
5b7761b8cced9a76746413cdbbdc596b9522710b
|
dd6c21308e1cba24658c8ca7a49e2499cd167da6
|
/venv/Lib/site-packages/openpyxl/descriptors/nested.py
|
490f672e9c9242caa052d95f2a59ffaa5c90ad3a
|
[
"MIT"
] |
permissive
|
ansonsry/Freshshop
|
3a53db4d6d0bf1d6705498869a13a3aa7db6ab8c
|
79ab8beb1aa993f6365182c8d3bb478ee4e028f8
|
refs/heads/master
| 2021-06-20T18:54:08.009409
| 2019-07-26T02:56:55
| 2019-07-26T03:02:27
| 198,931,513
| 0
| 0
|
MIT
| 2021-03-19T22:33:14
| 2019-07-26T02:23:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,656
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
"""
Generic serialisable classes
"""
from .base import (
Convertible,
Bool,
Descriptor,
NoneSet,
MinMax,
Set,
Float,
Integer,
String,
Text,
)
from .sequence import Sequence
from openpyxl.compat import safe_string
from openpyxl.xml.functions import Element, localname
class Nested(Descriptor):
nested = True
attribute = "val"
def __set__(self, instance, value):
if hasattr(value, "tag"):
tag = localname(value)
if tag != self.name:
raise ValueError("Tag does not match attribute")
value = self.from_tree(value)
super(Nested, self).__set__(instance, value)
def from_tree(self, node):
return node.get(self.attribute)
def to_tree(self, tagname=None, value=None, namespace=None):
namespace = getattr(self, "namespace", namespace)
if value is not None:
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
value = safe_string(value)
return Element(tagname, {self.attribute:value})
class NestedValue(Nested, Convertible):
"""
Nested tag storing the value on the 'val' attribute
"""
pass
class NestedText(NestedValue):
"""
Represents any nested tag with the value as the contents of the tag
"""
def from_tree(self, node):
return node.text
def to_tree(self, tagname=None, value=None, namespace=None):
namespace = getattr(self, "namespace", namespace)
if value is not None:
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
el = Element(tagname)
el.text = safe_string(value)
return el
class NestedFloat(NestedValue, Float):
pass
class NestedInteger(NestedValue, Integer):
pass
class NestedString(NestedValue, String):
pass
class NestedBool(NestedValue, Bool):
def from_tree(self, node):
return node.get("val", True)
class NestedNoneSet(Nested, NoneSet):
pass
class NestedSet(Nested, Set):
pass
class NestedMinMax(Nested, MinMax):
pass
class EmptyTag(Nested, Bool):
"""
Boolean if a tag exists or not.
"""
def from_tree(self, node):
return True
def to_tree(self, tagname=None, value=None, namespace=None):
if value:
namespace = getattr(self, "namespace", namespace)
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
return Element(tagname)
|
[
"ansonsry@sina.com"
] |
ansonsry@sina.com
|
ebbddea20b6e452b551538b9bb64078fb607829f
|
13ef33cb9067419fae743be1edb46471374c3a64
|
/hrm/cron_job.py
|
5ce271862e4a2b39de34798546facb57e0d9b768
|
[] |
no_license
|
andrewidya/littleerp
|
8c33ad0ee4dac2a85bea4e540b748a47d61f3886
|
0cf8fb1be8ac3c27304807ed7aac7eb0032c2cb6
|
refs/heads/master
| 2021-01-24T00:42:26.962248
| 2019-07-22T01:53:58
| 2019-07-22T01:53:58
| 68,295,804
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
from django_cron import CronJobBase, Schedule
from django.utils import timezone
from django.conf import settings
import datetime
from hrm.models import EmployeeContract
class EmployeeContractCronJob(CronJobBase):
RUN_EVERY_MINS = 1
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'hrm.employee_contract_cron_job'
def do(self):
print("Checking contract")
print(timezone.now())
print("===========================================================")
warning = timezone.now() + datetime.timedelta(days=settings.MINIERP_SETTINGS['HRM']['recontract_warning'])
contract_list = EmployeeContract.objects.all().filter(end_date__lte=warning.date())
for contract in contract_list:
contract.contract_status = contract.check_contract_status()
contract.save(update_fields=['contract_status'])
print("===========================================================")
print("DONE")
print("===========================================================")
|
[
"andrywidyaputra@gmail.com"
] |
andrywidyaputra@gmail.com
|
32638c42c6165833ce4f701f734dedd12a335cf7
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/28aPKtEcWJPMwb9mm_4.py
|
a6c9ac7ad1ca5e7b223c9eb9f8311b6376d58e43
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
"""
**Mubashir** needs your help to learn Python Programming. Help him by
modifying a given string `txt` as follows:
* Reverse the string given.
* Replace each letter to its position in the alphabet for example (a = 1, b = 2, c = 3, ...).
* Join the array and convert it to a number.
* Convert the number to binary.
* Convert the string back to a number.
See below example for more understanding :
**modify("hello") ➞ 111001101011101101101010**
"hello" = "olleh"
"olleh" = ['15', '12', '12', '5', '8']
['15', '12', '12', '5', '8'] = 15121258
15121258 = "111001101011101101101010"
"111001101011101101101010" = 111001101011101101101010
### Examples
modify("hello") ➞ 111001101011101101101010
modify("mubashir") ➞ 10110000110010000110011111000111000001
modify("edabit") ➞ 111111110110001110001
### Notes
There are no spaces and the string is lowercase.
"""
def modify(txt):
return int(bin(int(''.join([str(ord(x)-ord('a')+1) for x in txt[::-1]])))[2:])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
5a6e8cc5612def0640aceaad72b3d46b018163fc
|
197ccfb1b9a2713155efd2b994dc4fda8d38751f
|
/src/contracts/Test.py
|
e4c0fd86f4b6631be0f91b05e0a34b8fa06d0a7e
|
[
"MIT"
] |
permissive
|
stonecoldpat/sprites-python
|
39bf8d436e625eabf4511f6e2d7528dc01d4e0e7
|
398abb86f27dcbd5a91b6aad648a06529c029d26
|
refs/heads/master
| 2020-03-20T09:40:27.468535
| 2018-06-14T10:44:34
| 2018-06-14T10:44:34
| 137,344,692
| 0
| 1
| null | 2018-06-14T10:42:30
| 2018-06-14T10:42:29
| null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
class Test:
def __init__(self, contract):
self._contract = contract
def get(self):
return self._contract.functions.get()
def getArrays(self):
return self._contract.functions.getArrays()
def getStruct(self):
return self._contract.functions.getStruct()
def incr(self):
return self._contract.functions.incr()
|
[
"sveitser@gmail.com"
] |
sveitser@gmail.com
|
9f0fd0bb22ffa5ad4f70cdc78e5c4a4fede87dfd
|
ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a
|
/AtCoder/Beginner 146/F.py
|
97ed9139e54db4abaa0a8e3ada55a064fb3ad8c3
|
[] |
no_license
|
cormackikkert/competitive-programming
|
f3fa287fcb74248ba218ecd763f8f6df31d57424
|
3a1200b8ff9b6941c422371961a127d7be8f2e00
|
refs/heads/master
| 2022-12-17T02:02:40.892608
| 2020-09-20T11:47:15
| 2020-09-20T11:47:15
| 266,775,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,838
|
py
|
N, M = map(int, input().split())
s = input()
N += 1
# Max size of tree
tree = [0] * (2 * N);
n = N
# function to build the tree
def build() :
# insert leaf nodes in tree
for i in range(N) :
tree[n + i] = (float('inf'), float('inf'));
# build the tree by calculating parents
for i in range(n - 1, 0, -1) :
tree[i] = min(tree[i << 1], tree[i << 1 | 1])
# function to update a tree node
def updateTreeNode(p, value) :
# set value at position p
tree[p + n] = (value, p);
p = p + n;
# move upward and update parents
i = p;
while i > 1 :
tree[i >> 1] = min(tree[i], tree[i ^ 1]);
i >>= 1;
# function to get sum on interval [l, r)
def query(l, r) :
res = (float('inf'), float('inf'));
# loop to find the sum in the range
l += n;
r += n;
while l < r :
if (l & 1) :
res = min(res, tree[l]);
l += 1
if (r & 1) :
r -= 1;
res = min(res, tree[r]);
l >>= 1;
r >>= 1
return res;
par = [None for i in range(N)]
build()
updateTreeNode(0, 0)
for i in range(1, N):
if s[i] == "1": continue
r = query(max(0, i - M), i)
par[i] = r[1]
updateTreeNode(i, r[0]+1)
# updateTreeNode(i, query(max(0, i - M), i))
# for k in range(1, M+1):
# if i - k < 0: break
# dp[i] = min(dp[i], (dp[i-k][0], (i - k)))
moves = []
cur = N - 1
if par[cur] == float('inf'):
print(-1)
quit()
try:
while par[cur] != None:
new = par[cur]
moves.append(cur - new)
cur = new
except:
print(-1)
quit()
moves = list(reversed(moves))
print(" ".join(map(str, moves)))
|
[
"u6427001@anu.edu.au"
] |
u6427001@anu.edu.au
|
aeeae4d63fce0632d0226ebc30ce1218b88f42cd
|
9769c74a7e69a134657ef73dbe3c251bf171b33c
|
/CoreBuild/ServiceItems/ComCommand/ComCommandItem.py
|
bbdbca8ae13f37e0e995ff531291766ee8fa4f63
|
[] |
no_license
|
caojiaju-2017/SimpleCode
|
4bdf506837ebdee905373b87198853a84afebc45
|
9f2aa7fea296d0acaf91b75e03bdaa185d93fe89
|
refs/heads/master
| 2020-03-19T05:06:50.028584
| 2018-06-25T13:05:22
| 2018-06-25T13:05:22
| 135,901,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Platform.ItemBase import *
import json
class ComCommandItem(ItemBase):
def __init__(self):
super(ComCommandItem, self).__init__()
self.itemtype = ItemType.Module
self.shapetype = ImageShape.Image100002
self.itemname = "串口操作"
self.iteminfo = "模块提供串口的读写能力"
self.buildConfig()
pass
def getCfgJson(self):
return self.self_to_json()
def checkResult(self):
print("sub class")
def buildConfig(self):
'''
接口初始化函数
:return:
'''
inputS1 = InputBase()
inputS1.type = InputType.String
self.setInputDefine("param1", inputS1)
inputS2 = InputBase()
inputS2.type = InputType.String
inputS2.inputIndex = 2
self.setInputDefine("param2", inputS2)
outputS = OutputBase()
outputS.type = InputType.Boolean
self.setOutput(outputS)
|
[
"jiaju_cao@hotmail.com"
] |
jiaju_cao@hotmail.com
|
45c5e3cd5bc85b023d0ec30c5297be2edce48931
|
0fac73e70eeb8e3b8635de8a4eaba1197cd42641
|
/shop/migrations/0013_auto_20161218_1533.py
|
696a3cbe43f4ecd4a798b9baa098e4109d16141b
|
[] |
no_license
|
gauraviit1/myshop_aws
|
0e6c9d822cbbc6505eb7c7a71654d34591e7b168
|
261b296d79cfdf8fa4cb9105b4e2fe70e864f6a6
|
refs/heads/master
| 2021-01-19T13:44:12.977253
| 2017-03-03T07:52:58
| 2017-03-03T07:52:58
| 82,444,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-12-18 10:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0012_auto_20161218_1516'),
]
operations = [
migrations.AlterModelOptions(
name='attribute',
options={'ordering': ('weight', 'waist_size', 'size')},
),
]
|
[
"mcjail.shi.hp@gmail.com"
] |
mcjail.shi.hp@gmail.com
|
f0632da5f1733d878b113b1a94d7c12fad6b81a5
|
149e9e52304a970ffb256f290fce5f614c9e20c4
|
/Python Programming language/DataCampPractice/Personal_programs/MyProjects/Projects1_mean_median_std_correlation.py
|
6f838cdb179693d4985c0ae00a5f4d1b9b360ade
|
[] |
no_license
|
Pasquale-Silv/Improving_Python
|
7451e0c423d73a91fa572d44d3e4133b0b4f5c98
|
96b605879810a9ab6c6459913bd366b936e603e4
|
refs/heads/master
| 2023-06-03T15:00:21.554783
| 2021-06-22T15:26:28
| 2021-06-22T15:26:28
| 351,806,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
import numpy as np
list1 = [3, 6, 9, 15]
list2 = [5, 9, 8, 22]
np_list1 = np.array(list1)
np_list2 = np.array(list2)
np_mean_list1 = np.mean(np_list1)
np_mean_list2 = np.mean(np_list2)
print(np_mean_list1)
print("Prova media lista 1: " + str((3+6+9+15) / 4))
print(np_mean_list2)
print("Calcolo mediane:")
np_median_lista1 = np.median(np_list1)
np_median_lista2 = np.median(np_list2)
print(np_median_lista1)
print(np_median_lista2)
print("Calcolo deviazione standard:")
np_std_lista1 = np.std(np_list1)
np_std_lista2 = np.std(np_list2)
print(np_std_lista1)
print(np_std_lista2)
if(np_std_lista1 > np_std_lista2):
print("La prima lista presenta una variabilità maggiore")
elif(np_std_lista1 < np_std_lista2):
print("La seconda lista presenta una variabilità maggiore")
elif(np_std_lista1 == np_std_lista2):
print("Le liste presentano la medesima variabilità")
rho = np.corrcoef(np_list1 , np_list2)
print(rho)
rhoSingolo = rho[0,1]
print("\nCorrelazione tra le due liste: " + str(rhoSingolo))
if(rhoSingolo > 0.6):
print("La correlazione è abbastanza forte ed è positiva.")
elif(rhoSingoloo > 0.85):
print("La correlazione è molto forte ed è positiva.")
elif(rhoSingoloho == 0):
print("Non c'è correlazione lineare tra le lista oppure sussiste un altro tipo di relazione.")
elif(rhoSingoloo < -0.85):
print("La correlazione è molto forte ed è negativa.")
elif(rhoSingoloo < -0.6):
print("La correlazione è abbastanza forte ed è negativa.")
else:
print("Sussiste una debole correlazione.")
|
[
"55320885+Pasquale-Silv@users.noreply.github.com"
] |
55320885+Pasquale-Silv@users.noreply.github.com
|
edf3569da3983da953bb7449f015668772156b34
|
a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c
|
/venv/Lib/site-packages/nltk/lm/__init__.py
|
5911f5b8057f85608708dfbffc1ab6ca16d731ee
|
[] |
no_license
|
mylonabusiness28/Final-Year-Project-
|
e4b79ccce6c19a371cac63c7a4ff431d6e26e38f
|
68455795be7902b4032ee1f145258232212cc639
|
refs/heads/main
| 2023-07-08T21:43:49.300370
| 2021-06-05T12:34:16
| 2021-06-05T12:34:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:9d7a198b8f9b3a257beb02a331dbe48e28ed7deef6b7c199766958dba6bf362f
size 7695
|
[
"chuksajeh1@gmail.com"
] |
chuksajeh1@gmail.com
|
d46665a79cfdfa452ad3b320f43b4685085f7948
|
226c99d29ac089f9a9581983dd6020a267599221
|
/chap07/beatiful_soup_weather.py
|
d1e53dafc5f28d6d51930fd50b51ea2ce1ac1111
|
[] |
no_license
|
azegun/python_study
|
c1b265abb41172609d144d4ba331c920ac8b9312
|
520f1f7bc83771f74f59304c66223f2c06fcf285
|
refs/heads/master
| 2023-05-11T16:32:23.740742
| 2021-06-10T01:03:33
| 2021-06-10T01:03:33
| 368,451,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
# 모듈을 읽어 들입니다.
from urllib import request
from bs4 import BeautifulSoup
#urlopen() 함수로 기상청의 전국 날씨를 읽습니다.
target = request.urlopen("http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=108")
# BeatifulSoup을 사용해 웹 페이지를 분석합니다.
soup = BeautifulSoup(target, "html.parser")
for location in soup.select("location"):
print("도시 : ", location.select_one("city").string)
print("날씨 : ", location.select_one("wf").string)
print("최저기온 : ", location.select_one("tmn").string)
print("최고기온 : ", location.select_one("tmx").string)
print()
|
[
"tkdrjs7@naver.com"
] |
tkdrjs7@naver.com
|
05a724d4fdcff756091a51829c32237081499db4
|
23c4f6d8a2a6b97077628c2a012b2b402c816d91
|
/LeetCode算法题/LCP_01_猜数字/猜数字.py
|
80a344b4bcf26b2aeff3e905478392845ce83c5b
|
[] |
no_license
|
exueyuanAlgorithm/AlgorithmDemo
|
7ef6ff8104e8da5a81037795184115fb0ac8ca9a
|
d34d4b592d05e9e0e724d8834eaf9587a64c5034
|
refs/heads/master
| 2023-07-16T19:00:05.664780
| 2021-09-04T11:31:07
| 2021-09-04T11:31:07
| 277,327,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
class Solution(object):
def game(self, guess, answer):
"""
:type guess: List[int]
:type answer: List[int]
:rtype: int
"""
dui = 0
for i,b in enumerate(guess):
a = answer[i]
if a == b:
dui += 1
return dui
|
[
"1079240024@qq.com"
] |
1079240024@qq.com
|
c98de0cf945e97e117c47bac2d33c22d71a8e9ff
|
0e1813197ae5e4d9ca0709089002a48249c2cc1f
|
/UnitTest/test_phonebook.py
|
4b368e4de6f6c6566c8635f9225740f98bf4d31a
|
[] |
no_license
|
sockduct/general
|
55c1ac766d61b66463ae89c7f6fd0a748fdd79ad
|
f885352dc5b402cbc2488e66d37b421d5a4f82f0
|
refs/heads/master
| 2021-01-17T16:45:06.968332
| 2017-06-26T14:52:11
| 2017-06-26T14:52:11
| 95,455,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,845
|
py
|
####################################################################################################
# Unit Testing with Python - Module 1
####################################################################################################
from phonebook import Phonebook
import unittest
class PhonebookTest(unittest.TestCase):
# Test Fixture
# Setup (half of fixture) - run before each test method
def setUp(self):
self.phonebook = Phonebook()
def test_lookup_entry_by_name(self):
self.phonebook.add('Bob', '12345')
self.assertEqual('12345',self.phonebook.lookup('Bob'))
def test_missing_entry_raises_KeyError(self):
with self.assertRaises(KeyError):
self.phonebook.lookup('missing')
def test_empty_phonebook_is_consistent(self):
self.assertTrue(self.phonebook.is_consistent())
# Example of what not to do - split up into workable tests following this function
@unittest.skip('poor example')
def test_is_consistent(self):
self.assertTrue(self.phonebook.is_consistent())
self.phonebook.add('Bob', '12345')
self.assertTrue(self.phonebook.is_consistent())
self.phonebook.add('Mary', '012345')
self.assertTrue(self.phonebook.is_consistent())
# Not a good way to write test cases
# Once assertion fails, rest of test case is abandoned
self.phonebook.add('Sue', '12345') # identical to Bob
self.assertFalse(self.phonebook.is_consistent())
self.phonebook.add('Sue', '123') # prefix of Bob
self.assertFalse(self.phonebook.is_consistent())
# These test cases have much better names - each name is descriptive of the test
# Each of these test cases are structured - arrange, act, assert
# Arrange - put entries into phonebook
# Act - call is_consistent()
# Assert - assertTrue|assertFalse about results
def test_phonebook_with_normal_entires_is_consistent(self):
self.phonebook.add('Bob', '12345')
self.phonebook.add('Mary', '012345')
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_duplicate_entries_is_inconsistent(self):
self.phonebook.add('Bob', '12345')
self.phonebook.add('Mary', '12345')
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):
self.phonebook.add('Bob', '12345')
self.phonebook.add('Mary', '123')
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_adds_names_and_numbers(self):
self.phonebook.add('Sue', '12345')
self.assertIn('Sue', self.phonebook.get_names())
self.assertIn('12345', self.phonebook.get_numbers())
if __name__ == '__main__':
unittest.main()
|
[
"james.r.small@outlook.com"
] |
james.r.small@outlook.com
|
6fc62e33dedb16842586b86715afb347e7ce39aa
|
9ba2b89dbdeefa54c6b6935d772ce36be7b05292
|
/devilry/devilry_gradingsystem/tests/views/test_download_feedbackdraftfile.py
|
c840023a5866328f619fed3370b7ae85504fe7d5
|
[] |
no_license
|
kristtuv/devilry-django
|
0ffcd9d2005cad5e51f6377484a83d778d65050f
|
dd2a4e5a887b28268f3a45cc3b25a40c0e313fd3
|
refs/heads/master
| 2020-04-27T06:02:45.518765
| 2019-02-15T13:28:20
| 2019-02-15T13:28:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
import unittest
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.http import urlencode
from devilry.project.develop.testhelpers.corebuilder import AssignmentGroupBuilder
from devilry.project.develop.testhelpers.corebuilder import UserBuilder
from devilry.devilry_gradingsystem.models import FeedbackDraftFile
@unittest.skip('devilry_gradingsystem will most likely be replaced in 3.0')
class TestDownloadFeedbackDraftFileView(TestCase):
def setUp(self):
self.testexaminer = UserBuilder('testexaminer').user
self.deliverybuilder = AssignmentGroupBuilder\
.quickadd_ducku_duck1010_active_assignment1_group()\
.add_examiners(self.testexaminer)\
.add_deadline_in_x_weeks(weeks=1)\
.add_delivery_x_hours_before_deadline(hours=1)
self.draftfile = FeedbackDraftFile(
delivery=self.deliverybuilder.delivery,
saved_by=self.testexaminer,
filename='test.txt')
self.draftfile.file.save('test.txt', ContentFile('A testfile'))
def _login(self, user):
self.client.login(username=user.shortname, password='test')
def _get_as(self, user, pk, **querystring):
self._login(user)
url = reverse('devilry_gradingsystem_feedbackdraftfile', kwargs={
'pk': pk
})
if querystring:
url = '{}?{}'.format(url, urlencode(querystring))
return self.client.get(url)
def test_403_not_owner_or_superuser(self):
response = self._get_as(UserBuilder('otheruser').user, self.draftfile.id)
self.assertEquals(response.status_code, 403)
def test_404_not_found(self):
response = self._get_as(self.testexaminer, 10001)
self.assertEquals(response.status_code, 404)
def _test_as(self, user):
response = self._get_as(user, self.draftfile.id)
self.assertEquals(response.status_code, 200)
self.assertEquals(response['content-type'], 'text/plain')
self.assertEquals(response.content, 'A testfile')
self.assertNotIn('content-disposition', response)
def test_ok_as_owner(self):
self._test_as(self.testexaminer)
def test_ok_as_superuser(self):
self._test_as(UserBuilder('superuser', is_superuser=True).user)
def test_download_content_disposition(self):
response = self._get_as(self.testexaminer, self.draftfile.id, download='yes')
self.assertEquals(response.status_code, 200)
self.assertIn('content-disposition', response)
self.assertEquals(response['content-disposition'], 'attachment; filename=test.txt')
|
[
"post@espenak.net"
] |
post@espenak.net
|
f160a2a7c5cef3e792945a605faf4f8c5bb13c06
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/calculate_tetragonal_from_rs.py
|
6cd6a88ad6a1d08ee5476959f8675ef36a8160ad
|
[] |
no_license
|
zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692
| 2013-06-05T04:53:08
| 2013-06-05T04:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/env python
# backlib.py
import math
def main():
# constants
a0 = 5.291772108E-09 # Bohr radius in cm
pi = math.pi
# inputs
Rs = input('For what value of Rs would you like the box? ')
N = input('How many particles are there in your box? ')
ratio = input('What is the c/a ratio? ')
volume = (1.0/3)*(4*N*pi)*(Rs*a0)**3
a = (volume*6.7483346e+24/ratio)**(1.0/3)
c = a*ratio
print 'rs:',Rs
print 'ratio:',ratio
print 'a:',a,'bohr'
print 'c:',c,'bohr'
print 'volume:',volume*6.7483346e+24,'bohr^3'
print 'density:',N/(volume*6.7483346e+24),'particles/bohr^3'
if __name__ == '__main__':
main()
|
[
"boates@gmail.com"
] |
boates@gmail.com
|
2e7796b0f384d360c9aa18cc0cfdb8abd7d1932b
|
9eae66764d420fa4872baf5a10a5c66cc7fca580
|
/contenidos/semana-09/EjemplosClase/main_3.py
|
fbcc6a865bd35e2e2c35fa636a297972b496d9a3
|
[] |
no_license
|
isidonoso/Prueba
|
f89ad14d5ffb61f0df6d53fdccddf2395e49ab19
|
29a4ef531d69ed79f56cc5fa1879375b743972a8
|
refs/heads/master
| 2022-12-26T07:03:57.881036
| 2020-10-02T01:11:29
| 2020-10-02T01:11:29
| 291,497,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QGridLayout, QPushButton, QVBoxLayout)
class Ventana(QWidget):
def __init__(self):
super().__init__()
self.etiqueta1 = QLabel('Texto a modificar', self)
self.grilla = QGridLayout()
valores = ['0', '1']
posiciones = [(0,0),(0,1)]
for i in range(2):
boton = QPushButton(valores[i])
# Conectamos el evento clicked con la función
boton.clicked.connect(self.boton_clickeado)
self.grilla.addWidget(boton, *posiciones[i])
vbox = QVBoxLayout()
vbox.addWidget(self.etiqueta1)
vbox.addLayout(self.grilla)
self.setLayout(vbox)
self.setWindowTitle('Emit signal')
self.show()
def boton_clickeado(self):
# Sender retorna el objeto que fue clickeado.
boton = self.sender()
# Obtenemos el identificador del elemento en la grilla
idx = self.grilla.indexOf(boton)
# Con el identificador obtenemos la posición del ítem en la grilla
posicion = self.grilla.getItemPosition(idx)
# Actualizamos label1
self.etiqueta1.setText(f'Presionado boton {idx}, en fila/columna: {posicion[:2]}.')
app = QApplication(sys.argv)
ex = Ventana()
sys.exit(app.exec_())
|
[
"you@example.com"
] |
you@example.com
|
2b83c2886cd1db915273df2a5786015b27537710
|
22d84f804271a629a395cec785b7eb4b47f72f36
|
/examples/statistics/ANOVA_rutherford_2.py
|
5d75201ea451cc523a716af8176b98a77449362d
|
[] |
no_license
|
imclab/Eelbrain
|
bb787294218a2ba00f90f447af0e629abadeac88
|
e52eb3a5bd8bf8fc9aece2fb4413e0286e080c46
|
refs/heads/master
| 2021-01-21T23:34:15.673295
| 2014-05-02T19:17:04
| 2014-05-02T19:17:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
# Rutherford (2001) Examples, cross-checked results:
#
# factorial anova
#
#Independent Measures (p. 53):
## SS df MS F p
##_____________________________________________________________
##A 432.0000 1 432.0000 47.7474*** .0000
##B 672.0000 2 336.0000 37.1368*** .0000
##A x B 224 2 112 12.3789*** .0001
##subject(A x B) 380.0000 42 9.0476
##_____________________________________________________________
##Total 1708 47
#
#
#Repeated Measure (p. 86):
## SS df MS F p
##_________________________________________________________________
##A 432.0000 1 432.0000 40.1416*** .0004
##B 672.0000 2 336.0000 29.2174*** .0000
##A x B 224.0000 2 112.0000 17.1055*** .0002
##subject 52.0000 7 7.4286 0.7927 .5984
##A x subject 75.3333 7 10.7619 1.6436 .2029
##B x subject 161.0000 14 11.5000 1.7564 .1519
##A x B x subject 91.6667 14 6.5476
##_________________________________________________________________
##Total 1708 47
import numpy as np
from eelbrain.lab import *
Y = np.array([ 7, 3, 6, 6, 5, 8, 6, 7,
7,11, 9,11,10,10,11,11,
8,14,10,11,12,10,11,12,
16, 7,11, 9,10,11, 8, 8,
16,10,13,10,10,14,11,12,
24,29,10,22,25,28,22,24])
A = Factor([1,0], rep=3*8, name='A')
B = Factor(range(3), tile=2, rep=8, name='B')
# Independent Measures:
subject = Factor(range(8*6), name='subject', random=True)
print test.anova(Y, A*B+subject(A%B), title="Independent Measures:")
# Repeated Measure:
subject = Factor(range(8), tile=6, name='subject', random=True)
print test.anova(Y, A * B * subject, title="Repeated Measure:")
|
[
"christianmbrodbeck@gmail.com"
] |
christianmbrodbeck@gmail.com
|
67e71bb59332794c1489ec2409abb7ee23400fb6
|
faecebfb2aba45bc3dbb85d55f491365ff578344
|
/game/ui_enemy_health_bar.py
|
4e22450855f2381304d2be227478cdec3dfe8a97
|
[] |
no_license
|
MyreMylar/vania
|
c4658d16ae394cf8adecebd16470e82e773f98da
|
e813587db4d3a83e60188903238d8f0a3f124012
|
refs/heads/master
| 2020-08-11T01:29:48.462223
| 2019-10-11T15:05:38
| 2019-10-11T15:05:38
| 214,462,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
import pygame
class UIEnemyHealthBar(pygame.sprite.Sprite):
"""
A UI that will display the enemy's health capacity and their current health.
"""
def __init__(self, enemy, *groups):
super().__init__(*groups)
self.enemy = enemy
self.position = enemy.screen_position[:]
self.width = int(enemy.rect.width*0.75)
self.height = 10
self.rect = pygame.Rect(self.position, (self.width, self.height))
self.background_colour = pygame.Color("#000000")
self.background_surface = pygame.Surface((self.rect.w, self.rect.h)).convert()
self.background_surface.fill(self.background_colour)
self.image = pygame.Surface((self.rect.w, self.rect.h)).convert()
self.hover_height = 10
self.horiz_padding = 2
self.vert_padding = 2
self.capacity_width = self.width - (self.horiz_padding * 2)
self.capacity_height = self.height - (self.vert_padding * 2)
self.health_capacity_rect = pygame.Rect([self.horiz_padding,
self.vert_padding],
[self.capacity_width, self.capacity_height])
self.health_empty_colour = pygame.Color("#CCCCCC")
self.health_colour = pygame.Color("#f4251b")
self.current_health = 50
self.health_capacity = 100
self.health_percentage = self.current_health / self.health_capacity
self.current_health_rect = pygame.Rect([self.horiz_padding,
self.vert_padding],
[int(self.capacity_width*self.health_percentage),
self.capacity_height])
def update(self):
self.position = [self.enemy.screen_position[0] - self.enemy.rect.width/2,
self.enemy.screen_position[1] - (self.enemy.rect.height/2) - self.hover_height]
self.current_health = self.enemy.current_health
self.health_capacity = self.enemy.base_health
self.health_percentage = self.current_health / self.health_capacity
self.current_health_rect.width = int(self.capacity_width * self.health_percentage)
self.image.blit(self.background_surface, (0, 0))
pygame.draw.rect(self.image, self.health_empty_colour, self.health_capacity_rect)
pygame.draw.rect(self.image, self.health_colour, self.current_health_rect)
self.rect.x = self.position[0]
self.rect.y = self.position[1]
|
[
"dan@myrespace.com"
] |
dan@myrespace.com
|
de26a67822e84a5591ccfa7820b8be8780320d27
|
277d4ee56616bb5930c57a57c68a202bf5085501
|
/stubs/thinc/neural/_classes/maxout.pyi
|
bf42de303504a6c926134e3e0fd43fcc450d1488
|
[
"MIT"
] |
permissive
|
miskolc/spacy-pytorch-transformers
|
fc502523644eb25cb293e0796b46535ba581a169
|
ab132b674c5a91510eb8cc472cdbdf5877d24145
|
refs/heads/master
| 2020-07-22T09:47:17.905850
| 2019-09-04T15:12:09
| 2019-09-04T15:12:09
| 207,156,566
| 1
| 0
|
MIT
| 2019-09-08T18:37:55
| 2019-09-08T18:37:55
| null |
UTF-8
|
Python
| false
| false
| 735
|
pyi
|
# Stubs for thinc.neural._classes.maxout (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from ...describe import Biases, Dimension, Gradient, Synapses
from ..util import get_array_module
from .model import Model
from typing import Any, Optional
def xavier_uniform_init(W: Any, ops: Any) -> None: ...
def normal_init(W: Any, ops: Any) -> None: ...
class Maxout(Model):
name: str = ...
nO: Any = ...
nI: Any = ...
nP: Any = ...
drop_factor: Any = ...
def __init__(self, nO: Optional[Any] = ..., nI: Optional[Any] = ..., pieces: int = ..., **kwargs: Any) -> None: ...
def predict(self, X__BI: Any): ...
def begin_update(self, X__bi: Any, drop: float = ...): ...
|
[
"honnibal+gh@gmail.com"
] |
honnibal+gh@gmail.com
|
f0bd60edb571b6ddd80c330a4471bfd278cc8c71
|
a74b980fd95d5d810315f181449fc9d1710e6923
|
/savecode/threeyears/idownserver/config_dispatch.py
|
a9332f00f371526603ca0cd4873e8a2e22388314
|
[
"Apache-2.0"
] |
permissive
|
cbbbbbbbb/sspywork
|
b70f5539203b47b21eec2f0514ddca155affc2b8
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
refs/heads/master
| 2023-03-22T19:45:13.024076
| 2021-03-08T01:24:21
| 2021-03-08T01:24:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
"""配置命令数据分配器"""
# -*- coding:utf-8 -*-
from datacontract.datamatcher import ExtMatcher
from .taskdispatcher import (
AutoTaskDispatcher,
CmdDispatcher,
DispatchConfig,
IScanDispatcher,
IScoutDispatcher,
TaskDispatcher,
)
# 分配器配置
dispatchconfig = DispatchConfig(
taskdispatchers={
"idowntask": TaskDispatcher(
uniquename="idowntask",
datamatcher=ExtMatcher(["idown_task", "an_cookie"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"cmd": CmdDispatcher(
uniquename="cmd",
datamatcher=ExtMatcher(["idown_cmd"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"iscantask": IScanDispatcher(
uniquename="iscantask",
datamatcher=ExtMatcher(["iscan_task"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"iscouttask": IScoutDispatcher(
uniquename="iscouttask",
datamatcher=ExtMatcher(["iscout_task"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"autotask": AutoTaskDispatcher(
uniquename="autotask",
datamatcher=ExtMatcher(["automated_task"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
}
)
|
[
"shiyuegege@qq.com"
] |
shiyuegege@qq.com
|
3a7848c94352123a109254d2864aa07d7452327f
|
1d60c5a7b8ce6277bff514e376f79848f706344c
|
/Machine Learning Scientist with Python/14. Introduction to TensorFlow in Python/02. Linear models/04. Modifying the loss function.py
|
bc7863271f780922fc8c310bb47f666f7fa4ed4a
|
[] |
no_license
|
DidiMilikina/DataCamp
|
338c6e6d3b4f5b6c541c1aba155a36e9ee24949d
|
3bf2cf3c1430190a7f8e54efda7d50a5fd66f244
|
refs/heads/master
| 2020-12-15T13:16:54.178967
| 2020-05-06T17:30:54
| 2020-05-06T17:30:54
| 235,113,616
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
'''
Modifying the loss function
In the previous exercise, you defined a tensorflow loss function and then evaluated it once for a set of actual and predicted values. In this exercise, you will compute the loss within another function called loss_function(), which first generates predicted values from the data and variables. The purpose of this is to construct a function of the trainable model variables that returns the loss. You can then repeatedly evaluate this function for different variable values until you find the minimum. In practice, you will pass this function to an optimizer in tensorflow. Note that features and targets have been defined and are available. Additionally, Variable, float32, and keras are available.
Instructions
100 XP
Define a variable, scalar, with an initial value of 1.0 and a type of float32.
Define a function called loss_function(), which takes scalar, features, and targets as arguments in that order.
Use a mean absolute error loss function.
'''
SOLUTION
# Initialize a variable named scalar
scalar = Variable(1.0, float32)
# Define the model
def model(scalar, features = features):
return scalar * features
# Define a loss function
def loss_function(scalar=scalar, features = features, targets = targets):
# Compute the predicted values
predictions = model(scalar, features)
# Return the mean absolute error loss
return keras.losses.mae(targets, predictions)
# Evaluate the loss function and print the loss
print(loss_function(scalar).numpy())
|
[
"didimilikina8@gmail.com"
] |
didimilikina8@gmail.com
|
147f6932afc8b302279c9890d43d0cc9046e3413
|
b21822a35da6cda8d7b7c89a4ada9a5651aed7b2
|
/Problem-2.py
|
e5131635a1999b7147b237d1275810e7337eda37
|
[] |
no_license
|
s4git21/Backtracking-3
|
ba39bf9a7f303adf3d6eb13baae5d14f2b6bb4bc
|
0412b355f0c6f24cdcf82648eeb22de8ae50f078
|
refs/heads/master
| 2023-06-14T05:27:11.218350
| 2021-07-06T00:50:25
| 2021-07-06T00:50:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
"""
Approach: 1) use DFS traversal and move in all 4 directions to look for the string in the board.
2) note that the traversal can start again from the current cell so you have to mark it visited
a) instead of maintaining another board for keep track of visited cells, we'll chang the char value to indicate that
it is visited
TC: O(n*3^L) n = number of cells, L = len of string
SC: O(L) the recursion stack can have all the chars in the string
"""
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
for r, row in enumerate(board):
for c, cell in enumerate(row):
if self.backtrack(board, word, 0, r, c):
return True
return False
def backtrack(self, board, word, index, r, c):
# base
if index == len(word):
return True
if r < 0 or r >= len(board) or c < 0 or c >= len(board[0]) or board[r][c] != word[index]:
return False
# action
temp = board[r][c]
board[r][c] = '#'
# logic
dirs = [[0, 1], [0, -1], [-1, 0], [1, 0]]
for roff, coff in dirs:
new_row = r + roff
new_col = c + coff
if self.backtrack(board, word, index + 1, new_row, new_col):
return True
# backtrack
board[r][c] = temp
|
[
"syed.hope@gmail.com"
] |
syed.hope@gmail.com
|
36fcc9c738f1be919faac0c727dbe29e7658f835
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/discord/scheduled_event/scheduled_event_entity_metadata/tests/test__parse_speaker_ids.py
|
f554c1e962233b4b01ee1c67dfa68b0f357bebc9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044
| 2023-08-20T13:09:03
| 2023-08-20T13:09:03
| 163,677,173
| 3
| 3
|
Apache-2.0
| 2019-12-18T03:46:12
| 2018-12-31T14:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 672
|
py
|
import vampytest
from ..fields import parse_speaker_ids
def test__parse_speaker_ids():
"""
Tests whether ``parse_speaker_ids`` works as intended.
"""
speaker_id_1 = 202303120072
speaker_id_2 = 202303120073
for input_data, expected_output in (
({}, None),
({'speaker_ids': None}, None),
({'speaker_ids': []}, None),
({'speaker_ids': [str(speaker_id_1), str(speaker_id_2)]}, (speaker_id_1, speaker_id_2)),
({'speaker_ids': [str(speaker_id_2), str(speaker_id_1)]}, (speaker_id_1, speaker_id_2)),
):
output = parse_speaker_ids(input_data)
vampytest.assert_eq(output, expected_output)
|
[
"re.ism.tm@gmail.com"
] |
re.ism.tm@gmail.com
|
4b82ce3e1aab7b779c48a30fcf2274b21180ea7c
|
084e35c598426b1137f9cd502e1b5e7f09cdf034
|
/每日一题/problem1049_最后一块石头的重量II.py
|
da7c8a54ef837b1d51c1e29591562d625bc8f668
|
[] |
no_license
|
sakurasakura1996/Leetcode
|
3a941dadd198ee2f54b69057ae3bbed99941974c
|
78f239959af98dd3bd987fb17a3544010e54ae34
|
refs/heads/master
| 2021-09-11T05:07:44.987616
| 2021-09-07T05:39:34
| 2021-09-07T05:39:34
| 240,848,992
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
from typing import List
class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
total = sum(stones)
n, m = len(stones), total // 2
dp = [[False] * (m + 1) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n):
for j in range(m + 1):
if j < stones[i]:
dp[i + 1][j] = dp[i][j]
else:
dp[i + 1][j] = dp[i][j] or dp[i][j - stones[i]]
ans = None
for j in range(m, -1, -1):
if dp[n][j]:
ans = total - 2 * j
break
return ans
|
[
"2470375551@qq.com"
] |
2470375551@qq.com
|
a2784b7f3609ce114701b71b1c5d856b349dcc2c
|
ba157236151a65e3e1fde2db78b0c7db81b5d3f6
|
/String/reverse_only_letters.py
|
99750af83a43b0a37f1d087de1d7be02d80b9b6f
|
[] |
no_license
|
JaberKhanjk/LeetCode
|
152488ccf385b449d2a97d20b33728483029f85b
|
78368ea4c8dd8efc92e3db775b249a2f8758dd55
|
refs/heads/master
| 2023-02-08T20:03:34.704602
| 2020-12-26T06:24:33
| 2020-12-26T06:24:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
class Solution(object):
def reverseOnlyLetters(self, S):
stack = []
hash_map = {}
for i,each in enumerate(S):
if (each >= 'a' and each <= 'z') or (each >= 'A' and each <= 'Z'):
stack.append(each)
else:
hash_map[i] = each
n = len(S)
final = ""
for i in range(n):
if i in hash_map:
final += hash_map[i]
else:
final += stack.pop()
return final
"""
:type S: str
:rtype: str
"""
|
[
"spondoncsebuet@gmail.com"
] |
spondoncsebuet@gmail.com
|
168e1a720ae61eeab4b60a352e20ce73f9ca790a
|
61aa319732d3fa7912e28f5ff7768498f8dda005
|
/src/arch/x86/isa/insts/simd128/floating_point/arithmetic/multiplication.py
|
d4d99381ec3931f0f03fb84ea1f7192fe1c20f0a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
TeCSAR-UNCC/gem5-SALAM
|
37f2f7198c93b4c18452550df48c1a2ab14b14fb
|
c14c39235f4e376e64dc68b81bd2447e8a47ff65
|
refs/heads/main
| 2023-06-08T22:16:25.260792
| 2023-05-31T16:43:46
| 2023-05-31T16:43:46
| 154,335,724
| 62
| 22
|
BSD-3-Clause
| 2023-05-31T16:43:48
| 2018-10-23T13:45:44
|
C++
|
UTF-8
|
Python
| false
| false
| 3,943
|
py
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
microcode = '''
def macroop MULSS_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=4, ext=Scalar
};
def macroop MULSS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=Scalar
};
def macroop MULSS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=Scalar
};
def macroop MULSD_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=8, ext=Scalar
};
def macroop MULSD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=Scalar
};
def macroop MULSD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=Scalar
};
def macroop MULPS_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=4, ext=0
mmulf xmmh, xmmh, xmmhm, size=4, ext=0
};
def macroop MULPS_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=0
mmulf xmmh, xmmh, ufp2, size=4, ext=0
};
def macroop MULPS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=0
mmulf xmmh, xmmh, ufp2, size=4, ext=0
};
def macroop MULPD_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=8, ext=0
mmulf xmmh, xmmh, xmmhm, size=8, ext=0
};
def macroop MULPD_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=0
mmulf xmmh, xmmh, ufp2, size=8, ext=0
};
def macroop MULPD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=0
mmulf xmmh, xmmh, ufp2, size=8, ext=0
};
'''
|
[
"sroger48@uncc.edu"
] |
sroger48@uncc.edu
|
ce7c741bc80b28875478e6d0bd778a104eaeb01d
|
ebe5167148cfff43d24b6c66e44634bb55513b72
|
/solutions/graph/133.Clone.Graph.py
|
8d6ec848c07c4a432abacd4871ee8f9ad1532cbd
|
[] |
no_license
|
ljia2/leetcode.py
|
c90ac38a25331d61d3ff77fd135b82372da3a09f
|
08c6d27498e35f636045fed05a6f94b760ab69ca
|
refs/heads/master
| 2020-03-25T03:37:13.318582
| 2019-07-18T23:14:41
| 2019-07-18T23:14:41
| 143,351,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,532
|
py
|
# Definition for a Node.
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class DFSSolution(object):
def cloneGraph(self, node):
"""
Given a reference of a node in a connected undirected graph, return a deep copy (clone) of the graph.
Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},{"$id":"3","neighbors":[{"$ref":"2"},{"$id":"4","neighbors":[{"$ref":"3"},{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
The number of nodes will be between 1 and 100.
The undirected graph is a simple graph, which means no repeated edges and no self-loops in the graph.
Since the graph is undirected, if node p has node q as neighbor, then node q must have node p as neighbor too.
You must return the copy of the given node as a reference to the cloned graph.
:type node: Node
:rtype: Node
DFS + dict key = original node, val = copy node.
"""
if not node:
return None
node2copy = dict()
self.dfs(node, set(), node2copy)
return node2copy[node]
def dfs(self, node, visited, node2copy):
if node in visited:
return
visited.add(node)
if node not in node2copy.keys():
node2copy[node] = Node(node.val, [])
for neighbor in node.neighbors:
if neighbor not in node2copy.keys():
neighborcopy = node2copy[neighbor]
else:
neighborcopy = Node(neighbor.val, [])
node2copy[neighbor] = neighborcopy
node2copy[node].neighbors.append(neighborcopy)
self.dfs(neighbor, visited, node2copy)
return
class BFSSolution(object):
def cloneGraph(self, node):
"""
Given a reference of a node in a connected undirected graph, return a deep copy (clone) of the graph.
Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},{"$id":"3","neighbors":[{"$ref":"2"},{"$id":"4","neighbors":[{"$ref":"3"},{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
The number of nodes will be between 1 and 100.
The undirected graph is a simple graph, which means no repeated edges and no self-loops in the graph.
Since the graph is undirected, if node p has node q as neighbor, then node q must have node p as neighbor too.
You must return the copy of the given node as a reference to the cloned graph.
:type node: Node
:rtype: Node
BFS + dict key = original node, val = copy node.
"""
if not node:
return None
node2copy = dict()
self.bfs(node, node2copy)
return node2copy[node]
def bfs(self, node, node2copy):
qe = [node]
visited = set()
while qe:
size = len(qe)
while size > 0:
n = qe.pop(0)
size -= 0
if n in visited:
continue
# get or generate the copy of node
node2copy[n] = Node(n.val, [])
visited.add(n)
# populate the neighbors of nn according to that of n.
for neighbor in n.neighbors:
qe.append(neighbor)
# n node's copy nn's neighbor has been populated.
# set up the dict node2copy by cloning the edges
for node in node2copy.keys():
for neighbor in node.neighbors:
node2copy[node].neighbors.append(node2copy[neighbor])
return
|
[
"ljia@conversantmedia.com"
] |
ljia@conversantmedia.com
|
25d1d944d7e4f8f2d8859c6fad01ba0ade0590b9
|
bfe345ba31673553ce156e4dca1ba5b6317ca4c2
|
/core/models/cms/interface.py
|
f3476948da56841820fdffaf4026b4a2080662e6
|
[] |
no_license
|
phillipmalboeuf/dauphin
|
215c6432fbcaf574ea0bd987238a0d2309d699af
|
101d8ffc113f36c8d1754077eebae2a6f18d4a3d
|
refs/heads/master
| 2023-01-09T01:55:36.905732
| 2021-06-20T18:35:35
| 2021-06-20T18:35:35
| 84,462,808
| 0
| 0
| null | 2023-01-05T06:11:24
| 2017-03-09T16:13:59
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
from core import app
from flask import request, abort
from core.models.core.content import Content
from core.models.core.has_routes import HasRoutes
from core.helpers.validation_rules import validation_rules
from core.helpers.json import to_json
with app.app_context():
class Interface(Content):
collection_name = 'interface'
@classmethod
def get(cls, lang=None):
return cls.list(lang)[0]
|
[
"phil@boeuf.coffee"
] |
phil@boeuf.coffee
|
1498fee5084ad1aba990b5613482f15303cbf55b
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-workspaceapp/huaweicloudsdkworkspaceapp/v1/model/create_share_folder_request.py
|
100f5ccf04f31234da8ec88802583bc9e72f6215
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,965
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateShareFolderRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'storage_id': 'str',
'body': 'CreateShareFolderReq'
}
attribute_map = {
'storage_id': 'storage_id',
'body': 'body'
}
def __init__(self, storage_id=None, body=None):
"""CreateShareFolderRequest
The model defined in huaweicloud sdk
:param storage_id: WKS存储ID
:type storage_id: str
:param body: Body of the CreateShareFolderRequest
:type body: :class:`huaweicloudsdkworkspaceapp.v1.CreateShareFolderReq`
"""
self._storage_id = None
self._body = None
self.discriminator = None
self.storage_id = storage_id
if body is not None:
self.body = body
@property
def storage_id(self):
"""Gets the storage_id of this CreateShareFolderRequest.
WKS存储ID
:return: The storage_id of this CreateShareFolderRequest.
:rtype: str
"""
return self._storage_id
@storage_id.setter
def storage_id(self, storage_id):
"""Sets the storage_id of this CreateShareFolderRequest.
WKS存储ID
:param storage_id: The storage_id of this CreateShareFolderRequest.
:type storage_id: str
"""
self._storage_id = storage_id
@property
def body(self):
"""Gets the body of this CreateShareFolderRequest.
:return: The body of this CreateShareFolderRequest.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.CreateShareFolderReq`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateShareFolderRequest.
:param body: The body of this CreateShareFolderRequest.
:type body: :class:`huaweicloudsdkworkspaceapp.v1.CreateShareFolderReq`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateShareFolderRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
27bc677940517f8b690617146b98d68d33e73b91
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/examples/custom_ops_doc/multiplex_4/multiplex_4_load_use.py
|
3716b63836e08b403abcc2ab5c1ec98035281c2f
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for showing C++ backward compatibility.
This loads a previously created SavedModel (esp. a model created by
multiplex_2_save.py which uses the "old" op and C++ kernel from multiplex_2)
and runs the model using the "new" multiplex_4 C++ kernel.
https://www.tensorflow.org/guide/saved_model
https://www.tensorflow.org/api_docs/python/tf/saved_model/save
"""
from absl import app
from tensorflow.examples.custom_ops_doc.multiplex_4 import model_using_multiplex
def main(argv):
del argv # not used
path = 'model_using_multiplex'
result = model_using_multiplex.load_and_use(path)
print('Result:', result)
if __name__ == '__main__':
app.run(main)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
78b768e2735891cfd4e3f013fde8b38428d1d0c3
|
af97a81a69d100e684b8e9f02bc1b117069b9895
|
/Testcases_InCoder/CWE-117/sonar_1.py
|
75ba3e45cab4af79ddf4d72b0685e4b46958d8fb
|
[] |
no_license
|
s2e-lab/SecurityEval
|
8de3e2f78fffa7058d79cce84fbcd8ff29c8468d
|
f637f67b8e8d52d641ab15903e46a41ead7e30a5
|
refs/heads/main
| 2023-06-17T22:55:08.047769
| 2023-06-16T16:54:54
| 2023-06-16T16:54:54
| 509,664,226
| 28
| 7
| null | 2023-05-27T04:55:05
| 2022-07-02T05:10:17
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
from flask import request, current_app, Flask
app = Flask(__name__)
@app.route('/log')
def log():
'''
get the input from the request and log it as an error
'''
try:
ip = request.remote_addr
except:
ip = '127.0.0.1'
error_log = current_app.config['ERROR_LOG']
with open(error_log, 'a') as f:
f.write('{0}\t{1}\t{2}\n'.format(ip, request.method, request.path))
return '', 200
|
[
"lsiddiqsunny@gmail.com"
] |
lsiddiqsunny@gmail.com
|
7a54088b69601dc6582e0d6ff16f9dbd3318156d
|
c06002a780c0a1f33cc7bc1a84b4e54d1edf91f8
|
/Payload_Type/apfell/mythic/agent_functions/download.py
|
912e89946fef3f8441438de4ca6de6aaf320927e
|
[] |
no_license
|
apple0012/apfell
|
98c5331383a09c420f7fde128d65882a87945337
|
8d0c83b6482a9724bcc0754384f423973c9c5dfe
|
refs/heads/master
| 2023-07-17T19:58:49.909027
| 2021-09-03T20:19:52
| 2021-09-03T20:19:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
from mythic_payloadtype_container.MythicCommandBase import *
import json
from mythic_payloadtype_container.MythicRPC import *
class DownloadArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
if len(self.command_line) > 0:
if self.command_line[0] == "{":
temp_json = json.loads(self.command_line)
if "host" in temp_json:
# this means we have tasking from the file browser rather than the popup UI
# the apfell agent doesn't currently have the ability to do _remote_ listings, so we ignore it
self.command_line = temp_json["path"] + "/" + temp_json["file"]
else:
raise Exception("Unsupported JSON")
else:
raise Exception("Must provide a path to download")
class DownloadCommand(CommandBase):
cmd = "download"
needs_admin = False
help_cmd = "download {path to remote file}"
description = "Download a file from the victim machine to the Mythic server in chunks (no need for quotes in the path)."
version = 1
supported_ui_features = ["file_browser:download"]
author = "@its_a_feature_"
parameters = []
attackmapping = ["T1020", "T1030", "T1041"]
argument_class = DownloadArguments
browser_script = BrowserScript(script_name="download", author="@its_a_feature_")
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicRPC().execute("create_artifact", task_id=task.id,
artifact="$.NSFileHandle.fileHandleForReadingAtPath, readDataOfLength",
artifact_type="API Called",
)
return task
async def process_response(self, response: AgentResponse):
pass
|
[
"codybthomas@gmail.com"
] |
codybthomas@gmail.com
|
571e0708ec0d32fd46c632db1f7e914a8d5f89e9
|
2d8da5cacd21dd425688d67e1a92faa50aefc6bc
|
/bulb-switcher.py
|
9b83a07e1278919276ff86b2dbc70c2d92431363
|
[] |
no_license
|
stella-shen/Leetcode
|
970857edb74ae3ccf4bcce0c40e972ab8bcc5348
|
16ad99a6511543f0286559c483206c43ed655ddd
|
refs/heads/master
| 2021-01-19T02:48:49.918054
| 2018-11-29T10:36:43
| 2018-11-29T10:36:43
| 47,523,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
import math
class Solution(object):
def bulbSwitch(self, n):
"""
:type n: int
:rtype: int
"""
return int(math.sqrt(n))
|
[
"szsxt818@gmail.com"
] |
szsxt818@gmail.com
|
22e182483efad689fb6e85baeebdc8221a228639
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/Cookbook/Array/移动零.py
|
69b79cb17710f98a8a696166a87d241bba491561
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316
| 2022-09-01T08:20:37
| 2022-09-01T08:20:37
| 95,668,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
'''
给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
示例:
输入: [0,1,0,3,12]
输出: [1,3,12,0,0]
说明:
必须在原数组上操作,不能拷贝额外的数组。
尽量减少操作次数。
'''
from typing import List
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
first = second = 0
while second < len(nums):
if nums[second] != 0:
nums[first], nums[second] = nums[second], nums[first]
first += 1
second += 1
if __name__ == '__main__':
nums = [0, 1, 0, 3, 12]
sol = Solution()
sol.moveZeroes(nums)
print(nums)
|
[
"sqw123az@sina.com"
] |
sqw123az@sina.com
|
023384e4cc57c97689bf7abe06d61f93b5d0695f
|
eee10264c0e24b488110ca089816b291b05ed8c7
|
/ingestors/documents/html.py
|
5538f17b1a40258591783812725fe650655ebe0e
|
[
"MIT"
] |
permissive
|
pombredanne/ingestors
|
def03d1b9a71640a1987a05e26385240a3650eb8
|
7f7ec82a6757743bc00e84b6b3d62b1c3cf7630a
|
refs/heads/master
| 2021-08-22T23:34:56.808521
| 2017-12-01T17:42:49
| 2017-12-01T17:42:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from ingestors.base import Ingestor
from ingestors.support.html import HTMLSupport
from ingestors.support.encoding import EncodingSupport
class HTMLIngestor(Ingestor, EncodingSupport, HTMLSupport):
"HTML file ingestor class. Extracts the text from the web page."
MIME_TYPES = ['text/html']
def ingest(self, file_path):
"""Ingestor implementation."""
html_body = self.read_file_decoded(file_path)
self.result.flag(self.result.FLAG_HTML)
self.extract_html_content(html_body)
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
24fdaeb60ef7bfaa6b0959fad30390a8df853cb9
|
6b9084d234c87d7597f97ec95808e13f599bf9a1
|
/training/transt/logger/_wandb.py
|
7f869c9b3cf935c338a8f9e823b7f4cac86f5ad6
|
[] |
no_license
|
LitingLin/ubiquitous-happiness
|
4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc
|
aae2d764e136ca4a36c054212b361dd7e8b22cba
|
refs/heads/main
| 2023-07-13T19:51:32.227633
| 2021-08-03T16:02:03
| 2021-08-03T16:02:03
| 316,664,903
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
from miscellanies.flatten_dict import flatten_dict
from miscellanies.git_status import get_git_status
from miscellanies.torch.distributed import is_main_process, is_dist_available_and_initialized
class WandbLogger:
def __init__(self, id_, project_name: str, config: dict,
tags: list, step_times: int,
initial_step: int, log_freq: int,
only_log_on_main_process: bool,
watch_model_freq: int,
watch_model_parameters=False, watch_model_gradients=False,
tensorboard_root_path=None
):
if not has_wandb:
print('Install wandb to enable remote logging')
return
if tensorboard_root_path is not None:
wandb.tensorboard.patch(pytorch=True, tensorboardX=False, root_logdir=tensorboard_root_path)
self.id = id_
self.project_name = project_name
config = flatten_dict(config)
config['git_version'] = get_git_status()
self.tags = tags
self.config = config
self.step = initial_step
self.log_freq = log_freq
self.only_log_on_main_process = only_log_on_main_process
self.step_times = step_times
if watch_model_parameters and watch_model_gradients:
watch_model = 'all'
elif watch_model_parameters:
watch_model = 'parameters'
elif watch_model_gradients:
watch_model = 'gradients'
else:
watch_model = None
self.watch_model = watch_model
self.watch_model_freq = watch_model_freq
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _is_disabled(self):
return self.only_log_on_main_process and not is_main_process()
def start(self):
if self._is_disabled():
return
configs = {'project': self.project_name, 'entity': 'llt', 'tags': self.tags, 'config': flatten_dict(self.config),
'force': True, 'job_type': 'train', 'id': self.id}
if not self.only_log_on_main_process and is_dist_available_and_initialized():
configs['group'] = 'ddp'
wandb.init(**configs)
def log_train(self, epoch, forward_stats, backward_stats):
if self._is_disabled():
return
if self.step % self.log_freq == 0:
step = self.step * self.step_times
log = {'epoch': epoch, 'batch': step, **forward_stats, **backward_stats}
wandb.log(log, step=step)
self.step += 1
def log_test(self, epoch, summary):
if self._is_disabled():
return
step = self.step * self.step_times
summary = {'test_' + k: v for k, v in summary.items()}
summary['epoch'] = epoch
wandb.log(summary, step=step)
def watch(self, model):
if self._is_disabled():
return
wandb.watch(model, log=self.watch_model, log_freq=self.watch_model_freq)
def stop(self):
if self._is_disabled():
return
wandb.finish()
|
[
"linliting06@live.com"
] |
linliting06@live.com
|
51773aa0f9023d2645abc5899ed45f0596fa157c
|
473ae3b2ea92549d18adc1b33c473edda0abd6cb
|
/back-end/news/newsapi/serializer.py
|
5192fc39bfd70dcb7d0b3c57dcac1c22199fbdd6
|
[] |
no_license
|
Stelmaszv/news-api
|
0e7aabc8425f8b4105ce278261034b2ea2ad3aee
|
86bb933b882fda2c14ecabf352bfa50c875cf91f
|
refs/heads/master
| 2023-01-09T17:27:05.918639
| 2020-03-23T10:47:56
| 2020-03-23T10:47:56
| 244,368,280
| 0
| 1
| null | 2023-01-07T15:29:37
| 2020-03-02T12:41:55
|
Python
|
UTF-8
|
Python
| false
| false
| 184
|
py
|
from .models import news;
from rest_framework.serializers import ModelSerializer
class NewsSerializer(ModelSerializer):
class Meta:
model = news;
fields= '__all__'
|
[
"stelmaszv@gmail.com"
] |
stelmaszv@gmail.com
|
6aa14a16c6fa5277c86e7faedb75368cd0993321
|
deaf60a5ba012e68f8509c0df0d35a5228419b71
|
/慧聪网系列/慧聪网抓取22/慧聪网诚信/hui_cong_gong/hui_cong_gong/pipelines.py
|
01a5437673b488171028c31edfad2a8828267ed3
|
[] |
no_license
|
kokohui/con_spider
|
7162d8e58725d9334db5f1da34649cd1d1ef29ea
|
da1181b53e5cbca546d1bb749f9efc2f48e698f8
|
refs/heads/master
| 2022-03-03T19:37:33.721533
| 2019-08-22T10:05:32
| 2019-08-22T10:05:32
| 193,631,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,658
|
py
|
import pymysql
class HuiCongGongPipeline(object):
cursor = None # mysql游标对象声明
cur = None # 获取一个游标
def open_spider(self, spdier):
print('爬虫开始》》》》')
self.conn = pymysql.Connect(host='192.168.1.210', user='root', passwd='zhangxing888', db='ktcx_buschance',
port=3306,
charset='utf8')
self.cur = self.conn.cursor() # 获取一个游标
def process_item(self, item, spider):
print('process_item>>>>>>>>>>>>>>>>>>>>>>>')
# 数据库最大id查询
res_num = 0
try:
sql_1 = 'select max(id) from bus_user'
self.cur.execute(sql_1)
res_num = int(self.cur.fetchone()[0])
print('res.......................', res_num)
except:
print('查询错误.')
# 查询公司存储个数, 如果没有则存储~
sql_count = "select count(0) from bus_user where company_name='{}'".format(item['com_name'])
self.cur.execute(sql_count)
result = self.cur.fetchall()
result_count = int(result[0][0])
print('result_count........................', result_count)
if result_count == 0:
# 数据库最大id查询
res_num = res_num + 1
# 进行存储
try:
# 公司
sql = 'insert into `bus_user`(' \
'`id`,`name`, `logo`, `phone`, `password`, `source`, `type`, `state`, `plate_visit_num`, `plate_visit_pnum`,' \
' `product_visit_num`, `balance`, `growth`, `status`, `company_name`, `linkman`, `mobile`, `number`, `url`, `submit_date`,' \
' `by_date`, `domain_name`, `is_del`, `create_by`, `create_date`, `province_id`, `province_name`, `city_id`, `city_name`, `county_id`,' \
' `county_name`, `address`, `sub_summary`, `summary`, `sub_scopes`, `scopes`, `minglu_img`, `company_img`, `mapx`, `mapy`,' \
' `zip_code`, `email`, `qq`, `tel`, `website`, `total_fee`, `send_num`, `refresh_num`, `supply_inquiry_num`, `purchase_inquiry_num`,' \
' `ad_price`, `openid`, `provider_id`, `provider_name`, `channel_duty_id`, `channel_open_id`, `service_id`, `keywords`, `is_cx`) ' \
'VALUE' \
'(%s,%s,%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
self.cur.execute(sql, (
res_num, item['com_name'], item['list_img'], item['mobile'], '123456', 'pc', 'supply', '1', 0, 0,
0, 0, 0, '0', item['com_name'], item["linkman"], item['mobile'], '', '', item['create_date'],
item['create_date'], '', '0', '5fc530f6b8574e03b6f13794ec64c1f8', item['create_date'], '', '', '',
'', '',
'', item['address'], item['summary'], item['summary'], item['summary'], item['scopes'], '', '', '',
'', '', '', 123456, item['mobile'], '', 0, 0, 0, 0, 0,
0, '', '75cebe2e19434dcd9c4586f4621e6f9c', '', '', '', '', item['com_keyword'], 1))
# 第三表
sql_in_2 = "insert into `bus_user_industry` (`create_by`, `one_level_id`, `two_level_id`, `three_level_id`, `sort`, `is_del`) values(%s,%s,%s,%s,%s,%s)"
self.cur.execute(sql_in_2, (
res_num, item['one_level_id'], item['two_level_id'], item['three_level_id'], '1', '0'))
except Exception as e:
self.conn.rollback() # 事务回滚
print('事务处理失败')
raise e
else:
self.conn.commit() # 事务提交
print('数据添加成功')
# 产品信息存储
try:
sql_in = "INSERT INTO `bus_product` (`create_by`, `create_date`, `is_del`, `list_img`, `price`, `title`,`way`,`one_level_id`, `two_level_id`, `three_level_id`, `custom_id`, `keywords`,`models`,`standards`, `imgs`, `sort`, `update_time`, `state`, `is_verify`, `verify_remark`,`verify_time`, `verify_by`, `detail`, `types`, `start_time`, `end_time`, `num`, `units`,`money_units`, `province_id`, `province_name`, `city_id`, `city_name`, `view_count`,`inquiry_count`,`provider_id`, `provider_name`, `is_import`, `com_name`, `linkman`,`mobile`, `add_by`,`one_class_name`, `one_class_id`, `two_class_name`, `two_class_id`, `tree_class_name`, `tree_class_id`)" \
"VALUE " \
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
data = self.cur.execute(sql_in, (
res_num, item['create_date'], '0', item['list_img'], item['price'], item['title'],
item['way'],
item['one_level_id'], item['two_level_id'], item['three_level_id'], 0, item['keywords'], '',
'', item['imgs'], '1', item['create_date'], '1', '1', 0,
item['create_date'], '', item['detail'], '0', item['create_date'], item['create_date'], 1,
item['units'],
'元', '', '', '', '', '0', '0',
'1ec40ecd3cf64908941b5f7679f19d2b', '', '0', item['com_name'], item['linkman'], item['mobile'],
'43e9737882af413095f612ef34412a8f', item['one_class_name'], '',
item['two_class_name'], '', item['tree_class_name'],
item['tree_class_id'])) # 单条插入
print('.......................................')
print('data', data)
self.conn.commit() # 提交
print('添加成功')
except Exception as e:
raise e
return item
def close_spider(self, spider):
# sql_id = "SELECT id FROM bus_spider_data WHERE source='慧聪网'AND TYPE = 'gongying' AND is_del = '0' AND isuse = '0' ORDER BY create_date LIMIT 1 "
# self.cur.execute(sql_id)
# res_all_list = self.cur.fetchall()
# id = res_all_list[0][0]
# sql_insert = "UPDATE ktcx_buschance.bus_spider_data SET isuse='1' WHERE id={}".format(id)
# print(sql_insert)
# self.cur.execute(sql_insert)
# self.conn.commit()
print('爬虫结束>>>>>>>>')
self.cur.close()
self.conn.close()
|
[
"2686162923@qq.com"
] |
2686162923@qq.com
|
48874e51e27fff452749f385f2d2110852fbf097
|
b1bf615bfa1ee2065e3adfe90310814c3b27c61d
|
/2021-3-5/linked-list-random-node.py
|
ca1fb15ecae5681d7901a7c15f9a0a1db9a706f2
|
[] |
no_license
|
Huajiecheng/leetcode
|
73b09a88e61ea3b16ca3bf440fadd1470652ccf2
|
4becf814a2a06611ee909ec700380ab83ac8ab99
|
refs/heads/main
| 2023-03-19T21:54:20.952909
| 2021-03-06T03:34:52
| 2021-03-06T03:34:52
| 320,959,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
self.vals = self.return_vals(self.head)
def return_vals(self, head):
vals = list()
while head:
vals.append(head.val)
head = head.next
return vals
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
rand = random.randint(0,len(self.vals)-1)
return self.vals[rand]
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom()
|
[
"chenghuajie1998@gmail.com"
] |
chenghuajie1998@gmail.com
|
7abe0c4b9e89b36e941c342c156fa86726cb9d19
|
50025b693f437cd43e27282daadef67d7b77c5e0
|
/models/transformer_model.py
|
d9e945f449b61d42cfaa7e03d2b04dc6c9913a57
|
[] |
no_license
|
MatthijsBiondina/Memory-Augmented-Neural-Networks
|
b29e99a2b167ce94e03b1eea63afd90049ddcb47
|
510e6f6703775db9ade6b8599744623b9b58f074
|
refs/heads/master
| 2023-01-11T00:33:41.024739
| 2020-11-06T16:32:09
| 2020-11-06T16:32:09
| 301,348,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import torch
from torch import nn, Tensor
from torch.nn import functional as F
import utils.config as cfg
BATCH_DIM, CHANNEL_DIM, LENGTH_DIM = 0, 1, 2
class TransformerModel(nn.Module):
def __init__(self):
super(TransformerModel, self).__init__()
self.line_prep = nn.Linear(cfg.input_size, cfg.num_units)
self.transformer = nn.Transformer(d_model=cfg.num_units, nhead=10, num_encoder_layers=12)
self.line_post = nn.Linear(cfg.num_units, cfg.output_size)
def forward(self, src: Tensor, mask=None, return_sequence=False):
src = src.transpose(1, 2).transpose(0, 1)
src = self.line_prep(src)
tgt = torch.zeros_like(src)
tgt = self.transformer(src, tgt)
out = cfg.output_func(self.line_post(tgt))
out = out.transpose(0, 1).transpose(1, 2)
return out
@property
def device(self):
return self.line_prep.bias.device
|
[
"biondina.matthijs@gmail.com"
] |
biondina.matthijs@gmail.com
|
306c04c7f95a6a4e5a99170f0e7f803142db8232
|
cf7d96bdd34205ede987f0985dfc9e3ab415ee06
|
/ad_covering_doc/wizard/covering_doc_onshipping.py
|
e959848948a72a36f9ae05490d5e15731633b6aa
|
[] |
no_license
|
hendrasaputra0501/btxjalan
|
afc93467d54a6f20ef6ac46f7359e964ad5d42a0
|
d02bc085ad03efc982460d77f7af1eb5641db729
|
refs/heads/master
| 2020-12-30T11:02:05.416120
| 2017-07-31T01:34:08
| 2017-07-31T01:34:08
| 98,836,234
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,274
|
py
|
from osv import osv, fields
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class covering_doc_onshipping(osv.osv_memory):
_name = "covering.doc.onshipping"
_description = "Covering Document Onshipping"
_columns = {
'group': fields.boolean("Group by Schedule Date"),
'date': fields.date('Creation date'),
}
_defaults = {
'group' : True,
}
def open_covering_doc(self, cr, uid, ids, context=None):
if context is None:
context = {}
covering_ids = []
data_pool = self.pool.get('ir.model.data')
active_ids = context.get('active_ids', [])
do_pool=self.pool.get('stock.picking')
pickings=do_pool.browse(cr,uid,active_ids)
invoice_ids=[x.invoice_id.id for x in pickings if x.invoice_id]
if len(invoice_ids)< len(active_ids):
raise osv.except_osv(_('Error!'), _('salah satu do belum ada invoice'))
applicant_ids=[x.invoice_id.partner_id.id for x in pickings if x.invoice_id.partner_id.id]
if len(set(applicant_ids))>1:
raise osv.except_osv(_('Error!'), _('applicant harus sama'))
res = self.create_covering_doc(cr, uid, ids, context=context)
covering_ids += [res]
action_model = False
action = {}
# if not covering_ids:
# raise osv.except_osv(_('Error!'), _('Please create Cover.'))
action_model,action_id = data_pool.get_object_reference(cr, uid, 'ad_covering_doc', "action_covering_doc")
if action_model:
action_pool = self.pool.get(action_model)
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,covering_ids))+"])]"
return action
def create_covering_doc(self, cr, uid, ids, context=None):
if context is None:
context = {}
covering_pool = self.pool.get('covering.doc')
active_ids = context.get('active_ids', [])
do_pool=self.pool.get('stock.picking')
invoice_ids=[]
for lines in active_ids:
do=do_pool.browse(cr, uid,lines)
invoice_id=do.invoice_id.id
invoice_ids.append(invoice_id)
consignee_id=do.invoice_id.partner_id.id
res=covering_pool.create(cr,uid,{
'date' : time.strftime("%Y-%m-%d"),
'consignee_id' : consignee_id,
'invoice_ids' : [(6,0,list(set(invoice_ids)))],
},context=context)
return res
covering_doc_onshipping()
|
[
"hendrasaputra0501@gmail.com"
] |
hendrasaputra0501@gmail.com
|
46a12423a8d6c6b20f8ab09c075dec14e3b86ed4
|
b6b30fb06124883b074144c419b43d9182efcdff
|
/GA/knights.py
|
04d7d422c42d92594d18da47964b6a7db8d1ff86
|
[] |
no_license
|
JohnnySunkel/BlueSky
|
da9f5107034289bfbdd3ba40458f9b9bd8d01a13
|
5a20eba9ef7509a5a7b7af86e7be848242e1a72f
|
refs/heads/master
| 2021-07-07T09:57:37.256950
| 2020-09-02T23:06:46
| 2020-09-02T23:06:46
| 166,883,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
import random
import datetime
import unittest
import genetic
def get_fitness(genes, boardWidth, boardHeight):
attacked = set(pos
for kn in genes
for pos in get_attacks(kn, boardWidth, boardHeight))
return len(attacked)
def display(candidate, startTime, boardWidth, boardHeight):
timeDiff = datetime.datetime.now() - startTime
board = Board(candidate.Genes, boardWidth, boardHeight)
board.print()
print("{}\n\t{}\t{}".format(
' '.join(map(str, candidate.Genes)),
candidate.Fitness,
timeDiff))
def mutate(genes, boardWidth, boardHeight, allPositions, nonEdgePositions):
count = 2 if random.randint(0, 10) == 0 else 1
while count > 0:
count -= 1
positionToKnightIndexes = dict((p, []) for p in allPositions)
for i, knight in enumerate(genes):
for position in get_attacks(knight, boardWidth, boardHeight):
positionToKnightIndexes[position].append(i)
knightIndexes = set(i for i in range(len(genes)))
unattacked = []
for kvp in positionToKnightIndexes.items():
if len(kvp[1]) > 1:
continue
if len(kvp[1]) == 0:
unattacked.append(kvp[0])
continue
for p in kvp[1]: # len == 1
if p in knightIndexes:
knightIndexes.remove(p)
potentialKnightPositions = \
[p for positions in
map(lambda x: get_attacks(x, boardWidth, boardHeight),
unattacked)
for p in positions if p in nonEdgePositions] \
if len(unattacked) > 0 else nonEdgePositions
geneIndex = random.randrange(0, len(genes)) \
if len(knightIndexes) == 0 \
else random.choice([i for i in knightIndexes])
position = random.choice(potentialKnightPositions)
genes[geneIndex] = position
def create(fnGetRandomPosition, expectedKnights):
genes = [fnGetRandomPosition() for _ in range(expectedKnights)]
return genes
def get_attacks(location, boardWidth, boardHeight):
return [i for i in set(
Position(x + location.X, y + location.Y)
for x in [-2, -1, 1, 2] if 0 <= x + location.X < boardWidth
for y in [-2, -1, 1, 2] if 0 <= y + location.Y < boardHeight
and abs(y) != abs(x))]
class KnightsTests(unittest.TestCase):
def test_3x4(self):
width = 4
height = 3
# 1,0 2,0 3,0
# 0,2 1,2 2,0
# 2 N N N .
# 1 . . . .
# 0 . N N N
# 0 1 2 3
self.find_knight_positions(width, height, 6)
def test_8x8(self):
width = 8
height = 8
self.find_knight_positions(width, height, 14)
def test_10x10(self):
width = 10
height = 10
self.find_knight_positions(width, height, 22)
def test_benchmark(self):
genetic.Benchmark.run(lambda: self.test_10x10())
def find_knight_positions(self, boardWidth, boardHeight,
expectedKnights):
startTime = datetime.datetime.now()
def fnDisplay(candidate):
display(candidate, startTime, boardWidth, boardHeight)
def fnGetFitness(genes):
return get_fitness(genes, boardWidth, boardHeight)
allPositions = [Position(x, y)
for y in range(boardHeight)
for x in range(boardWidth)]
if boardWidth < 6 or boardHeight < 6:
nonEdgePositions = allPositions
else:
nonEdgePositions = [i for i in allPositions
if 0 < i.X < boardWidth - 1 and
0 < i.Y < boardHeight - 1]
def fnGetRandomPosition():
return random.choice(nonEdgePositions)
def fnMutate(genes):
mutate(genes, boardWidth, boardHeight, allPositions,
nonEdgePositions)
def fnCreate():
return create(fnGetRandomPosition, expectedKnights)
optimalFitness = boardWidth * boardHeight
best = genetic.get_best(fnGetFitness, None, optimalFitness,
None, fnDisplay, fnMutate, fnCreate)
self.assertTrue(not optimalFitness > best.Fitness)
class Position:
def __init__(self, x, y):
self.X = x
self.Y = y
def __str__(self):
return "{},{}".format(self.X, self.Y)
def __eq__(self, other):
return self.X == other.X and self.Y == other.Y
def __hash__(self):
return self.X * 1000 + self.Y
class Board:
def __init__(self, positions, width, height):
board = [['.'] * width for _ in range(height)]
for index in range(len(positions)):
knightPosition = positions[index]
board[knightPosition.Y][knightPosition.X] = 'N'
self._board = board
self._width = width
self._height = height
def print(self):
# 0,0 prints in bottom left corner
for i in reversed(range(self._height)):
print(i, "\t", ' '.join(self._board[i]))
print(" \t", ' '.join(map(str, range(self._width))))
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
JohnnySunkel.noreply@github.com
|
2ae877bd29510457c3407d98e4d00cfde6085d75
|
07e8eaeaa6f3493546ba6b499be1593252f3c773
|
/tests/opytimizer/optimizers/social/test_bso.py
|
1b7810493c269d228b55c0c519a4f952dd8f4efa
|
[
"Apache-2.0"
] |
permissive
|
himanshuRepo/opytimizer
|
91dd848fffbe85736d8074169d515e46a8b54d74
|
09e5485b9e30eca622ad404e85c22de0c42c8abd
|
refs/heads/master
| 2023-07-20T18:16:00.565759
| 2021-09-02T19:40:43
| 2021-09-02T19:40:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,984
|
py
|
from types import new_class
import numpy as np
from opytimizer.optimizers.social import bso
from opytimizer.spaces import search
def test_bso_params():
params = {
'm': 5,
'p_replacement_cluster': 0.2,
'p_single_cluster': 0.8,
'p_single_best': 0.4,
'p_double_best': 0.5,
'k': 20
}
new_bso = bso.BSO(params=params)
assert new_bso.m == 5
assert new_bso.p_replacement_cluster == 0.2
assert new_bso.p_single_cluster == 0.8
assert new_bso.p_single_best == 0.4
assert new_bso.p_double_best == 0.5
assert new_bso.k == 20
def test_bso_params_setter():
new_bso = bso.BSO()
try:
new_bso.m = 'a'
except:
new_bso.m = 5
assert new_bso.m == 5
try:
new_bso.m = -1
except:
new_bso.m = 5
assert new_bso.m == 5
try:
new_bso.p_replacement_cluster = 'b'
except:
new_bso.p_replacement_cluster = 0.2
assert new_bso.p_replacement_cluster == 0.2
try:
new_bso.p_replacement_cluster = -1
except:
new_bso.p_replacement_cluster = 0.2
assert new_bso.p_replacement_cluster == 0.2
try:
new_bso.p_single_cluster = 'c'
except:
new_bso.p_single_cluster = 0.8
assert new_bso.p_single_cluster == 0.8
try:
new_bso.p_single_cluster = -1
except:
new_bso.p_single_cluster = 0.8
assert new_bso.p_single_cluster == 0.8
try:
new_bso.p_single_best = 'd'
except:
new_bso.p_single_best = 0.4
assert new_bso.p_single_best == 0.4
try:
new_bso.p_single_best = -1
except:
new_bso.p_single_best = 0.4
assert new_bso.p_single_best == 0.4
try:
new_bso.p_double_best = 'e'
except:
new_bso.p_double_best = 0.5
assert new_bso.p_double_best == 0.5
try:
new_bso.p_double_best = -1
except:
new_bso.p_double_best = 0.5
assert new_bso.p_double_best == 0.5
try:
new_bso.k = 'f'
except:
new_bso.k = 20
assert new_bso.k == 20
try:
new_bso.k = -1
except:
new_bso.k = 20
assert new_bso.k == 20
def test_bso_clusterize():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_bso = bso.BSO()
new_bso._clusterize(search_space.agents)
def test_bso_sigmoid():
new_bso = bso.BSO()
x = 0.5
y = new_bso._sigmoid(x)
assert y == 0.6224593312018546
def test_bso_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=50, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_bso = bso.BSO()
new_bso.evaluate(search_space, square)
new_bso.update(search_space, square, 1, 10)
new_bso.p_replacement_cluster = 1
new_bso.update(search_space, square, 1, 10)
|
[
"gth.rosa@uol.com.br"
] |
gth.rosa@uol.com.br
|
6e24ab11cd0ec830788f36d1221a31b0ac317caf
|
1391218903f06d62735d973829205a6b32754d64
|
/lib/kb_hisat2/util.py
|
49f9a18d9f86f26a395628eccd7293ab313faff1
|
[
"MIT"
] |
permissive
|
JamesJeffryes/kb_hisat2
|
9ff28b19c9f8fcaea8291a79ed05c8a2636a9438
|
9179a9d44d8ae3b6e149d27b64052004eef17fd2
|
refs/heads/master
| 2020-03-28T11:12:29.078124
| 2018-08-20T18:28:20
| 2018-08-20T18:28:20
| 148,188,592
| 0
| 0
| null | 2018-09-10T16:51:25
| 2018-09-10T16:51:25
| null |
UTF-8
|
Python
| false
| false
| 6,051
|
py
|
"""
Some utility functions for the HISAT2 module.
These mainly deal with manipulating files from Workspace objects.
There's also some parameter checking and munging functions.
"""
from __future__ import print_function
import re
from pprint import pprint
from Workspace.WorkspaceClient import Workspace
from DataFileUtil.DataFileUtilClient import DataFileUtil
def check_hisat2_parameters(params, ws_url):
"""
Checks to ensure that the hisat2 parameter set is correct and has the right
mash of options.
Returns a list of error strings if there's a problem, or just an empty list otherwise.
"""
errors = list()
# parameter keys and rules:
# -------------------------
# ws_name - workspace name, string, required
# alignmentset_name - output object name, string, required
# string sampleset_ref - input reads object ref, string, required
# string genome_ref - input genome object ref, string, required
# num_threads - int, >= 1, optional
# quality_score - string, one of phred33 or phred64, optional (default phred33)
# skip - int, >= 0, optional
# trim3 - int, >= 0, optional
# trim5 - int, >= 0, optional
# np - int,
# minins - int,
# maxins - int,
# orientation - string, one of fr, rr, rf, ff, optional (default fr)
# min_intron_length, int, >= 0, required
# int max_intron_length - int, >= 0, required
# bool no_spliced_alignment - 0 or 1, optional (default 0)
# string tailor_alignments - string ...?
print("Checking input parameters")
pprint(params)
if "ws_name" not in params or not valid_string(params["ws_name"]):
errors.append("Parameter ws_name must be a valid workspace "
"name, not {}".format(params.get("ws_name", None)))
if "alignment_suffix" not in params or not valid_string(params["alignment_suffix"]):
errors.append("Parameter alignment_suffix must be a valid Workspace object string, "
"not {}".format(params.get("alignment_suffix", None)))
if "sampleset_ref" not in params or not valid_string(params["sampleset_ref"], is_ref=True):
errors.append("Parameter sampleset_ref must be a valid Workspace object reference, "
"not {}".format(params.get("sampleset_ref", None)))
elif check_ref_type(params["sampleset_ref"], ["PairedEndLibary", "SingleEndLibrary"], ws_url):
if "condition" not in params or not valid_string(params["condition"]):
errors.append("Parameter condition is required for a single "
"PairedEndLibrary or SingleEndLibrary")
if "genome_ref" not in params or not valid_string(params["genome_ref"], is_ref=True):
errors.append("Parameter genome_ref must be a valid Workspace object reference, "
"not {}".format(params.get("genome_ref", None)))
return errors
def valid_string(s, is_ref=False):
is_valid = isinstance(s, basestring) and len(s.strip()) > 0
if is_valid and is_ref:
is_valid = check_reference(s)
return is_valid
def check_reference(ref):
"""
Tests the given ref string to make sure it conforms to the expected
object reference format. Returns True if it passes, False otherwise.
"""
obj_ref_regex = re.compile("^(?P<wsid>\d+)\/(?P<objid>\d+)(\/(?P<ver>\d+))?$")
ref_path = ref.strip().split(";")
for step in ref_path:
if not obj_ref_regex.match(step):
return False
return True
def is_set(ref, ws_url):
return check_ref_type(ref, ["sampleset", "readsset"], ws_url)
def check_ref_type(ref, allowed_types, ws_url):
"""
Validates the object type of ref against the list of allowed types. If it passes, this
returns True, otherwise False.
Really, all this does is verify that at least one of the strings in allowed_types is
a substring of the ref object type name.
Ex1:
ref = 11/22/33, which is a "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "KBaseFile.Assembly"]
returns False
Ex2:
ref = 44/55/66, which is a "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "genome"]
returns True
"""
obj_type = get_object_type(ref, ws_url).lower()
for t in allowed_types:
if t.lower() in obj_type:
return True
return False
def get_object_type(ref, ws_url):
"""
Fetches and returns the typed object name of ref from the given workspace url.
If that object doesn't exist, or there's another Workspace error, this raises a
RuntimeError exception.
"""
ws = Workspace(ws_url)
info = ws.get_object_info3({"objects": [{"ref": ref}]})
obj_info = info.get("infos", [[]])[0]
if len(obj_info) == 0:
raise RuntimeError("An error occurred while fetching type info from the Workspace. "
"No information returned for reference {}".format(ref))
return obj_info[2]
def get_object_names(ref_list, ws_url):
"""
From a list of workspace references, returns a mapping from ref -> name of the object.
"""
ws = Workspace(ws_url)
obj_ids = list()
for ref in ref_list:
obj_ids.append({"ref": ref})
info = ws.get_object_info3({"objects": obj_ids})
name_map = dict()
# might be in a data palette, so we can't just use the ref.
# we already have the refs as passed previously, so use those for mapping, as they're in
# the same order as what's returned.
for i in range(len(info["infos"])):
name_map[ref_list[i]] = info["infos"][i][1]
return name_map
def package_directory(callback_url, dir_path, zip_file_name, zip_file_description):
''' Simple utility for packaging a folder and saving to shock '''
dfu = DataFileUtil(callback_url)
output = dfu.file_to_shock({'file_path': dir_path,
'make_handle': 0,
'pack': 'zip'})
return {'shock_id': output['shock_id'],
'name': zip_file_name,
'description': zip_file_description}
|
[
"wjriehl@lbl.gov"
] |
wjriehl@lbl.gov
|
88a0541b16ace96c3908ebeeb1ad3d953465b14c
|
fc64ba451a7a78238d28400a013ca366a96beb05
|
/pic_analyzer_python/finalwork..py
|
86cf3dac6af7d3b8e4fc8282bbd4a2b5ca2f8ffe
|
[
"MIT"
] |
permissive
|
610yilingliu/simi_pic_detection
|
e11204147d4098e488babb4a7119d867815683e7
|
b523a388831e1968fadd065bb8e77099f9f80567
|
refs/heads/master
| 2021-02-20T09:13:20.405716
| 2020-03-09T05:45:12
| 2020-03-09T05:45:12
| 245,332,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
pic1 = Image.open('../pics/edited_colorlevel_2.jpg')
pic2 = Image.open('../pics/edited_colorlevel_1.jpg')
w = 65
h = 64
def count_dhash(image_obj, width, height):
image = image_obj.resize((width, height), Image.ANTIALIAS)
gray_img = image.convert("L")
image_np = np.array(gray_img)
showimg(image_np)
binary=[0] * ((width - 1) * height)
for i in range(width-1-1):
for j in range(height):
if image_np[i,j] == image_np[i + 1,j]:
binary[(len(binary) -1) - (j * (width - 1) + i)] = 1
binary_str = ''
for i in range(len(binary)):
binary_str += str(binary[i])
octal = oct(int(binary_str, 2))
final = str(octal).zfill(len(binary))
return final
def hamming_dist(otcal1, octal2):
oct1 = otcal1
oct2 = octal2
diff = 0
for i in range(len(otcal1)):
if otcal1[i] != octal2[i]:
diff += 1
return diff
def showimg(image):
plt.imshow(image)
plt.show()
if __name__ == '__main__':
d1 = count_dhash(pic1, w, h)
d2 = count_dhash(pic2, w, h)
print(hamming_dist(d1,d2))
|
[
"yilingliu1994@gmail.com"
] |
yilingliu1994@gmail.com
|
075a9d195c8d0b7d80ec26a4fe420d20d8bfd829
|
f4f6148303cfa9e0123ef73bc3c10f25604bad16
|
/92. Reverse Linked List II.py
|
8e57cef53a36950fa67b1f0ceaa99283065a6a3a
|
[] |
no_license
|
newfull5/LeetCode
|
b1d45345603ed0bfdf45976c4b6fc21fbf10538d
|
97ada948a3a4ca8ca14ab6e2f1c020e12e862dce
|
refs/heads/main
| 2023-07-20T09:00:39.434876
| 2021-08-18T04:02:13
| 2021-08-18T04:02:13
| 325,930,197
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:
arr = []
while head is not None:
arr.append(head.val)
head = head.next
arr = arr[:left-1] + arr[left-1:right][::-1] + arr[right:]
answer = ListNode(arr.pop())
while arr:
answer = ListNode(arr.pop(), answer)
return answer
|
[
"noreply@github.com"
] |
newfull5.noreply@github.com
|
21b4966a8b048a8668344f0e63eed7dee5ea7c38
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/test/test_subsets.py
|
31cbb496ef446cc84619af46c77e190494a4cc15
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from unittest import TestCase
from problems.Subsets import Solution
class TestSolution(TestCase):
def test_subsets(self):
solution = Solution()
res = solution.subsets([1,2,3])
expected = [
[3],
[1],
[2],
[1,2,3],
[1,3],
[2,3],
[1,2],
[]
]
self.assertEqual(sorted(res), sorted(expected))
|
[
"rarry2012@gmail.com"
] |
rarry2012@gmail.com
|
87cbe13b16a4e002ab1e5145656ab99cb2412621
|
015106a1a964305ef8ceb478cc56fd7d4fbd86d5
|
/495.py
|
1523a6d84353326e0f42d942bf3794c7b5599774
|
[] |
no_license
|
zenmeder/leetcode
|
51a0fa4dc6a82aca4c67b5f4e0ee8916d26f976a
|
0fddcc61923d760faa5fc60311861cbe89a54ba9
|
refs/heads/master
| 2020-12-02T18:16:10.825121
| 2018-10-30T11:47:53
| 2018-10-30T11:47:53
| 96,505,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
#!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
class Solution(object):
def findPoisonedDuration(self, timeSeries, duration):
"""
:type timeSeries: List[int]
:type duration: int
:rtype: int
"""
if not timeSeries:
return 0
l = []
for time in timeSeries:
if not l:
l = [time, time + duration]
res = duration
continue
if time >= l[1]:
l = [time, time + duration]
res += duration
else:
res += time + duration - l[1]
l[1] = time + duration
return res
print(Solution().findPoisonedDuration([1, 2, 3, 4, 5], 5))
|
[
"zenmeder@gmail.com"
] |
zenmeder@gmail.com
|
7f773cfe5a7dd4e7d6b2a836e151328b9978e51b
|
814e4ad96172a76d9b72ac35697553980d0db5f1
|
/pyalp/chosen/fields.py
|
442207f9e1d55eaa01973595a36a4c382c74f889
|
[
"MIT"
] |
permissive
|
Mause/pyalp
|
29785037d3b4ebc2822e6ec74621aa65f621bb8e
|
fb0f723070e11f8c9ed57e2475eb963599f442a6
|
refs/heads/master
| 2022-12-05T12:05:01.804305
| 2014-07-02T13:04:21
| 2014-07-02T13:04:21
| 15,419,434
| 0
| 0
|
MIT
| 2022-11-22T00:24:05
| 2013-12-24T14:00:26
|
PHP
|
UTF-8
|
Python
| false
| false
| 2,105
|
py
|
from django import forms
from chosen.widgets import (
ChosenSelect,
ChosenSelectMultiple,
ChosenGroupSelect
)
__all__ = [
'ChosenFieldMixin', 'ChosenChoiceField', 'ChosenMultipleChoiceField',
'ChosenModelChoiceField', 'ChosenModelMultipleChoiceField',
'ChosenGroupChoiceField',
]
class ChosenFieldMixin(object):
def __init__(self, *args, **kwargs):
widget_kwargs = (
"overlay" in kwargs and
{"overlay": kwargs.pop('overlay')} or
{}
)
kwargs['widget'] = self.widget(**widget_kwargs)
super(ChosenFieldMixin, self).__init__(*args, **kwargs)
class ChosenChoiceField(ChosenFieldMixin, forms.ChoiceField):
widget = ChosenSelect
class ChosenMultipleChoiceField(ChosenFieldMixin, forms.MultipleChoiceField):
widget = ChosenSelectMultiple
class ChosenModelChoiceField(ChosenFieldMixin, forms.ModelChoiceField):
widget = ChosenSelect
class ChosenModelMultipleChoiceField(ChosenFieldMixin,
forms.ModelMultipleChoiceField):
widget = ChosenSelectMultiple
class ChosenGroupChoiceField(ChosenFieldMixin, forms.ChoiceField):
"""
This field generate a Single Select with Groups (optgroup support).
To render it correctly, you need to give a choice with the group title and
the list of (id, value) for the subtitles
A good way to do that is to add a Manager, eg::
class MyModelManager(models.Manager):
"Add get_group_choices to MyModel"
def get_group_choices(self):
'''
Will filter the model per name and return tuples (obj.id,
obj.rule)
'''
choices = []
for name in MyModel.objects.values_list("name").distinct():
name = name[0]
name_choices = tuple((obj.id, obj.rule) for obj in
MyModel.objects.filter(name=name))
choices.append((name, name_choices))
return choices
"""
widget = ChosenGroupSelect
|
[
"jack.thatch@gmail.com"
] |
jack.thatch@gmail.com
|
b996caa2550c772ef6e349a1776a0104a8b45f12
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/card-flipping-game.py
|
b755a10e0db7dfd461f122b6fbfbcc3341cd5b83
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678
| 2022-05-18T09:18:32
| 2022-05-18T09:18:32
| 188,701,704
| 240
| 110
| null | 2020-05-08T13:04:36
| 2019-05-26T15:41:03
|
C++
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
# Time: O(n)
# Space: O(n)
import itertools
class Solution(object):
def flipgame(self, fronts, backs):
"""
:type fronts: List[int]
:type backs: List[int]
:rtype: int
"""
same = {n for i, n in enumerate(fronts) if n == backs[i]}
result = float("inf")
for n in itertools.chain(fronts, backs):
if n not in same:
result = min(result, n)
return result if result < float("inf") else 0
|
[
"noreply@github.com"
] |
black-shadows.noreply@github.com
|
b8adc8f00940fea61f6707c605d5d1bf01b846dc
|
703312a73790e17473cccc577a208c3bec2b457f
|
/migrations/versions/d614011d4982_.py
|
0b7cc107ca9a06fe5215029f191ea3be4abc0ae4
|
[] |
no_license
|
t8116189520/flasky
|
0bd1be65d2b1ff0f79f3979f0f3c71dea86253bc
|
c9be14a1182878c6789eccdd0b82e6c9a07484ef
|
refs/heads/master
| 2021-01-21T23:47:51.459924
| 2017-09-02T07:52:29
| 2017-09-02T07:52:29
| 102,181,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
"""empty message
Revision ID: d614011d4982
Revises:
Create Date: 2017-08-10 11:14:25.087757
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd614011d4982'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
|
[
"lsxxxxxx@126.com"
] |
lsxxxxxx@126.com
|
decbd9eefc318e9f340928501dd77dc2fd3feae5
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/GoogleChronicleBackstory/Scripts/ChronicleDBotScoreWidgetScript/ChronicleDBotScoreWidgetScript_test.py
|
233fec01a3ba223ad7e138ca621f62f0ad7167e3
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
from unittest.mock import patch
import demistomock as demisto
import ChronicleDBotScoreWidgetScript
DBOT_SCORE = [{'CustomFields': {'chronicledbotscore': 2}}]
def test_main_success(mocker):
"""
When main function is called, get_html_representation should be called.
"""
mocker.patch.object(demisto, 'incidents', return_value=DBOT_SCORE)
mocker.patch.object(ChronicleDBotScoreWidgetScript, 'get_html_representation',
return_value='')
ChronicleDBotScoreWidgetScript.main()
assert ChronicleDBotScoreWidgetScript.get_html_representation.called
@patch('ChronicleDBotScoreWidgetScript.return_error')
def test_main_failure(mock_return_error, capfd, mocker):
"""
When main function gets some exception then valid message should be printed.
"""
mocker.patch.object(demisto, 'incidents', return_value=DBOT_SCORE)
mocker.patch.object(ChronicleDBotScoreWidgetScript, 'get_html_representation', side_effect=Exception)
with capfd.disabled():
ChronicleDBotScoreWidgetScript.main()
mock_return_error.assert_called_once_with('Could not load widget:\n')
def test_get_html_representation_when_dbotscore_is_1(mocker):
"""
When DBotscore is 1, get_html_representation should return html representation accordingly.
"""
html_representation = ChronicleDBotScoreWidgetScript.get_html_representation(1)
assert "<div style='color:green; text-align:center;'><h1>1<br/>Good</h1></div>" == html_representation
def test_get_html_representation_when_dbotscore_is_2(mocker):
"""
When DBotscore is 2, get_html_representation should return html representation accordingly.
"""
html_representation = ChronicleDBotScoreWidgetScript.get_html_representation(2)
assert "<div style='color:orange; text-align:center;'><h1>2<br/>Suspicious</h1></div>"\
== html_representation
def test_get_html_representation_when_dbotscore_is_3(mocker):
"""
When DBotscore is 3, get_html_representation should return html representation accordingly.
"""
html_representation = ChronicleDBotScoreWidgetScript.get_html_representation(3)
assert "<div style='color:red; text-align:center;'><h1>3<br/>Bad</h1></div>" == html_representation
|
[
"noreply@github.com"
] |
demisto.noreply@github.com
|
eaadf08598bc9363904e1e138f7be007406303dd
|
e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb
|
/src/tests/parsing_test_12.py
|
9cd550d8a61b41538e0f4f4749160a0deb26a210
|
[] |
no_license
|
gsakkas/seq2parse
|
3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9
|
7ae0681f1139cb873868727f035c1b7a369c3eb9
|
refs/heads/main
| 2023-04-09T12:29:37.902066
| 2023-01-18T21:32:12
| 2023-01-18T21:32:12
| 417,597,310
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
def distributeCandies(candies):
"""Given an integer array with even length, where different numbers in this array represent different kinds of candies. Each number means one candy of the corresponding kind. You need to distribute these candies equally in number to brother and sister. Return the maximum number of kinds of candies the sister could gain."""
brother, sister = [], []
c0, c1 = 0, 1
while c1 < len(candies):
if candies[c0] not in sister:
sister += candies[c0]
brother += candies[c1]
else:
sister += candies[c1]
brother += candies[c2]
c0 += 2
c1 += 2
uniqueCandies, i = [], 0
while i < len(sister):
if sister[i] not in uniqueCandies:
uniqueCandies += sister[i]
i += 1
return len(uniqueCandies)
distributeCandies([1,2,1,2,3,4,3,2,5,6,7,4,3,6])
|
[
"george.p.sakkas@gmail.com"
] |
george.p.sakkas@gmail.com
|
8497472c087b3e8dfcbaddcb08b0b577a736e7f8
|
76dfedcfdcc686a7a0c5604309512ee20466ae63
|
/tater/core/visitors.py
|
2a3fa78fc1e2758fe5d68f0efb6687aa00f4352a
|
[
"BSD-3-Clause"
] |
permissive
|
pombredanne/tater
|
1d485e72232b75677ce743a1a84b10a27cbdc50c
|
8df0f686f295fb2fdfa1d0387268105677d70dc0
|
refs/heads/master
| 2021-01-24T14:45:59.917067
| 2013-11-08T22:41:27
| 2013-11-08T22:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,922
|
py
|
from tater.base.visitor import Visitor
class Transformer(Visitor):
'''A visitor that replaces the visited node with the
output of the visitor function.
'''
def visit_nodes(self, node):
'''If the visitor function returns a new node, replace
the current node with it, then stop.
Otherwise, continue on down the tree.
'''
try:
new_node = self.visit_node(node)
except self.Continue:
# Skip visiting the child nodes.
return
if new_node is not None:
if node in node.parent.children:
node.replace(new_node)
return
visit_nodes = self.visit_nodes
for child in node.children[:]:
visit_nodes(child)
class Renderer(Visitor):
'''The visitor functions on this visitor are context managers.
They perform some action initially, then delegate to the node's
child functions all the way down the tree, then perform a final,
closing action, like closing at html tag.
from contextlib import contextmanager
from StringIO import StringIO
form tater.visitor import Renderer
class MyRenderer(Render):
def __init__(self):
self.buf = StringIO()
@contextmanager
def visit_div(self, node):
self.buf.write('<div>')
self.buf.write(node.first_text())
yield
self.buf.write('</div>')
'''
def visit_nodes(self, node):
'''If the visitor function is a context manager, invoke it,
otherwise just run the function.
'''
method = self._methods[node]
# If no function is defined, run the generic visit function.
if method is None:
generic_visit = getattr(self, 'generic_visit', None)
if generic_visit is None:
return
method = generic_visit
self._run_visitor_method(method, node)
def _run_visitor_method(self, method, node):
# Test if the function is a context manager. If so, invoke it.
try:
with method(node):
visit_nodes = self.visit_nodes
for child in self.get_children(node):
try:
visit_nodes(child)
except self.Continue:
continue
except:
return method(node)
class _Orderer(Visitor):
def __init__(self):
self.nodes = []
def visit_node(self, node):
self.nodes.append(node)
def _sortfunc(self, node):
if node.items:
for pos, token, text in node.items:
return pos
def finalize(self):
return sorted(self.nodes, key=self._sortfunc)
class OrderedRenderer(Visitor):
'''In sort nodes, method, chooses the order in which
to visit children based on their index vals. Probz doesn't
need a helper class to do that. ACTUALLY YES IT DOES.
'''
def visit(self, node):
self.ordered = _Orderer().visit(node)
super(OrderedRenderer, self).visit(node)
def visit_nodes(self, node):
'''If the visitor function is a context manager, invoke it,
otherwise just run the function.
'''
func = self._methods[node]
# If no function is defined, run the generic visit function.
if func is None:
generic_visit = getattr(self, 'generic_visit', None)
if generic_visit is None:
return
return generic_visit(node)
# Test if the function is a context manager. If so, invoke it.
else:
with func(node):
visit_nodes = self.visit_nodes
for child in node.children[:]: # sorted(node.children, key=self.ordered.index):
visit_nodes(child)
class DiGraphVisitor(Visitor):
def __init__(self, G):
self.G = G
def get_children(self, node):
return self.G[node]
def finalize(self):
'''Final steps the visitor needs to take, plus the
return value or .visit, if any.
'''
return self
class EtreeVisitor(Visitor):
def visit(self, node):
self.node = node
self.visit_nodes(node)
return self.finalize()
def get_children(self, node):
return tuple(node)
def visit_HtmlComment(self, node):
'''Skip comments.
'''
raise self.Continue()
# ---------------------------------------------------------------------------
# Helpers for figuring out the start/end indexes of a parse tree.
# ---------------------------------------------------------------------------
class IndexVisitor(Visitor):
'''Base for visitors that aggregate information about
string indices of modeled text.
'''
def __init__(self):
self.indices = []
class StartIndexVisitor(IndexVisitor):
'''This visitor finds the starting index of the left-most string
modeled by the ast.
'''
def get_index(self):
if self.indices:
return min(self.indices)
def generic_visit(self, node):
for pos, token, text in node.items:
self.indices.append(pos)
class EndIndexVisitor(IndexVisitor):
'''This visitor finds the ending index of the right-most string
modeled by the ast.
'''
def get_index(self):
if self.indices:
return max(self.indices)
def generic_visit(self, node):
'''The end index will be the `pos` obtained from
the lexer, plus the length of the associated text.
'''
for pos, token, text in node.items:
self.indices.append(pos + len(text))
def get_start(tree):
return StartIndexVisitor().visit(tree).get_index()
def get_end(tree):
return EndIndexVisitor().visit(tree).get_index()
def get_span(tree):
return (get_start(tree), get_end(tree))
|
[
"twneale@gmail.com"
] |
twneale@gmail.com
|
fabe5682cf9ca2d1969332761e1b6359bb59f68b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/469/usersdata/304/111875/submittedfiles/Av2_Parte4.py
|
83fdc5c98c3ffc1bd4ce32042c996c4e2531ca26
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
# -*- coding: utf-8 -*-
m = int(input('Coluna: '))
n = int(input('Linha: '))
matriz = []
for i in range (0,m,1):
linha=[]
for j in range (0,n,1):
linha.append(int(input('Linha: ')))
matriz.append(linha)
soma1 = 0
soma2 = 0
soma3 = 0
soma4 = 0
for j in range (0,m,1):
soma1 = soma1 + matriz[i][0]+matriz[i][0]
for j in range (0,m,1):
soma2 = soma2 + matriz[i][1]+matriz[i][0]
for j in range (0,m,1):
soma3 = soma3 + matriz[i][2]+matriz[i][0]
for j in range (0,m,1):
soma4 = soma4 + matriz[i][3]+matriz[i][0]
if soma1<soma2 and soma1<soma3 and soma1<soma4:
print(soma1)
if soma2<soma1 and soma2<soma3 and soma2<soma4:
print(soma2)
if soma3<soma1 and soma3<soma2 and soma3<soma4:
print(soma1)
if soma4<soma1 and soma4<soma2 and soma4<soma3:
print(soma4)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
d57ca73d1db71a5c04110ccef3b6f4aa3826a8b2
|
1285703d35b5a37734e40121cd660e9c1a73b076
|
/at_coder/abc/old/138/c.py
|
192446c41cea29241ee36a2e3850f0be6b17c42a
|
[] |
no_license
|
takin6/algorithm-practice
|
21826c711f57131108168775f08e4e13d07a3b38
|
f4098bea2085a77d11c29e1593b3cc3f579c24aa
|
refs/heads/master
| 2022-11-30T09:40:58.083766
| 2020-08-07T22:07:46
| 2020-08-07T22:07:46
| 283,609,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
import heapq
N = int(input())
A = [ i for i in list(map(int,input().split()))]
heapq.heapify(A)
for i in range(N-1):
a = heapq.heappop(A)
b = heapq.heappop(A)
heapq.heappush(A, ((a+b)/2))
print(heapq.heappop(A))
|
[
"takayukiinoue116@gmail.com"
] |
takayukiinoue116@gmail.com
|
9bf6f107258718c52bb3048142ac5f1f53abf79c
|
a8a5772674e62beaa4f5b1f115d280103fd03749
|
/boyle_coello_model.py
|
ee18846957fefb1f7050aebdba1dc53f9a62af0e
|
[] |
no_license
|
tahentx/pv_workbook
|
c6fb3309d9acde5302dd3ea06a34ad2aee0de4b7
|
08912b0ef36a5226d23fa0430216a3f277aca33b
|
refs/heads/master
| 2022-12-12T20:39:35.688510
| 2021-03-30T03:20:54
| 2021-03-30T03:20:54
| 172,827,250
| 0
| 1
| null | 2022-12-08T16:47:39
| 2019-02-27T02:25:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
def boyle_coello_model(self,time,rain = 0,rainthreshold = 0,tilt = 0,pm2_5 = 0,pm_10=,**kwargs):
"""
Use the :py:func:`boyle_coello_model` function to determine the impact of dirt accumulation on performance.
Parameters
----------
time : numeric
Importing the datetime module would be suitable here
rain : array-like
Hourly rain accumulation values of the same duration defined in the time parameter. Units in milimeters.
rainthreshold : float
A scalar for the amount of rain in an accumulation period needed to clear the modules. In periods where the accumulated rain meets oro exceeds the threshold, the panels are assumed to be cleaned immediately after the accumulation period.
tilt : int
A scalar or vector for the tilt of the PV panels.
PM2_5 : float
The concentration of particulate matter with diamter less than 2.5 microns, in g/m^3.
PM10 : float
The concentration of particulate matter with diamter less than 10 microns, in g/m^3.
ModelType : int
Optional input to determine the model type to be used in the soiling model. A value of "1"
RainAccPeriod :
optional input that specifies the period, in hours
over which to accumulate rainfall totals before checking against the
rain cleaning threshold.
Returns
-------
See pvsystem.calcparams_cec for details
"""
kwargs = _build_kwargs([''])
|
[
"hendricks.ta@gmail.com"
] |
hendricks.ta@gmail.com
|
0d0c4847d7dd3f15c7552f308fddb0e884b9708f
|
0f20f3e02aa05b8e690190a96e92a524b211338f
|
/프로그래머스/Level2/전화번호 목록.py
|
ba3a8b6089d7dcfebd4b0157b6e4632aec28a767
|
[] |
no_license
|
HYEONAH-SONG/Algorithms
|
ec744b7e775a52ee0756cd5951185c30b09226d5
|
c74ab3ef21a728dcd03459788aab2859560367e6
|
refs/heads/master
| 2023-07-18T14:41:48.360182
| 2021-09-03T13:41:23
| 2021-09-03T13:41:23
| 336,240,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
# ["119", "97674223", "1195524421"]
# zip 활용하기
# 이중 for 문 절대 사용 x
def solution(phone_book):
phone_book.sort()
for p1, p2 in zip(phone_book, phone_book[1:]):
if p1 in p2[:len(p1)]:
return False
return True
phone_book = ["119", "97674223", "1195524421"]
print(solution(phone_book))
#
# def solution2(phone_book):
# phone = {}
# count = 0
# for i in phone_book:
# phone[i] = i
# for i in phone:
# for j in phone_book:
# if i == j[:len(i)] :
# count +=1
# print(i)
# if count ==len(phone_book):
# return True
# return False
|
[
"sha082072@gmail.com"
] |
sha082072@gmail.com
|
9266cc2fa80f7bd6d02259c385446d72e46a40ca
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/PaperdollSculptingGhost.py
|
05f89551022a65a105bc5ddbc5ead7c726d90203
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\modules\nice\client\_nastyspace\PaperdollSculptingGhost.py
from eve.client.script.paperDoll.PaperdollSculptingGhost import PaperdollSculptingGhost
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
914c501cc5e67519401fa1fe18f0b726b7a83dcf
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/lobby/mode_selector/items/epic_mode_selector_item.py
|
5ec8fbb916cf65c72c3009f55d06c1ec08f8e4ad
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,683
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/lobby/mode_selector/items/epic_mode_selector_item.py
import typing
from gui.impl import backport
from gui.impl.gen import R
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_card_types import ModeSelectorCardTypes
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_epic_model import ModeSelectorEpicModel
from gui.impl.lobby.mode_selector.items import setBattlePassState
from gui.impl.lobby.mode_selector.items.base_item import ModeSelectorLegacyItem
from gui.impl.lobby.mode_selector.items.items_constants import ModeSelectorRewardID
from gui.shared.formatters import time_formatters
from gui.shared.formatters.ranges import toRomanRangeString
from helpers import dependency, time_utils
from skeletons.gui.game_control import IEpicBattleMetaGameController
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_normal_card_model import BattlePassState
if typing.TYPE_CHECKING:
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_normal_card_model import ModeSelectorNormalCardModel
class EpicModeSelectorItem(ModeSelectorLegacyItem):
__slots__ = ()
_VIEW_MODEL = ModeSelectorEpicModel
_CARD_VISUAL_TYPE = ModeSelectorCardTypes.EPIC_BATTLE
__epicController = dependency.descriptor(IEpicBattleMetaGameController)
def _getIsDisabled(self):
return not self.__epicController.isEnabled()
def _onInitializing(self):
super(EpicModeSelectorItem, self)._onInitializing()
self.__epicController.onPrimeTimeStatusUpdated += self.__onEpicUpdate
self.__epicController.onUpdated += self.__onEpicUpdate
self.__epicController.onEventEnded += self.__onEventEnded
self.__fillViewModel()
def _onDisposing(self):
self.__epicController.onPrimeTimeStatusUpdated -= self.__onEpicUpdate
self.__epicController.onUpdated -= self.__onEpicUpdate
self.__epicController.onEventEnded -= self.__onEventEnded
super(EpicModeSelectorItem, self)._onDisposing()
def __onEpicUpdate(self, *_):
self.__fillViewModel()
def __onEventEnded(self):
self.onCardChange()
def __fillViewModel(self):
with self.viewModel.transaction() as vm:
self.__resetViewModel(vm)
currentSeason = self.__epicController.getCurrentSeason()
nextSeason = self.__epicController.getNextSeason()
season = currentSeason or nextSeason
currentTime = time_utils.getCurrentLocalServerTimestamp()
vehicleLevels = self.__epicController.getValidVehicleLevels()
localeFolder = R.strings.mode_selector.mode.epicBattle
vm.setConditions(backport.text(localeFolder.conditionSingleLevel() if len(vehicleLevels) == 1 else localeFolder.condition(), levels=toRomanRangeString(vehicleLevels)))
vm.setDescription(backport.text(R.strings.mode_selector.mode.epicBattle.description()))
if season is None:
return
vm.widget.setIsEnabled(True)
if season.hasActiveCycle(currentTime):
self._addReward(ModeSelectorRewardID.CREDITS)
self._addReward(ModeSelectorRewardID.EXPERIENCE)
timeLeftStr = ''
cycleInfo = season.getCycleInfo()
if cycleInfo is not None:
timeLeftStr = time_formatters.getTillTimeByResource(cycleInfo.endDate - currentTime, R.strings.menu.Time.timeLeftShort, removeLeadingZeros=True)
vm.setTimeLeft(timeLeftStr)
currentLevel, _ = self.__epicController.getPlayerLevelInfo()
vm.widget.setLevel(currentLevel)
else:
cycleInfo = season.getNextByTimeCycle(currentTime)
if cycleInfo is not None:
if cycleInfo.announceOnly:
vm.setStatusNotActive(backport.text(R.strings.mode_selector.mode.epicBattle.cycleSoon()))
else:
vm.setStatusNotActive(backport.text(R.strings.mode_selector.mode.epicBattle.cycleNext(), date=backport.getShortDateFormat(cycleInfo.startDate)))
self.viewModel.setBattlePassState(BattlePassState.NONE)
else:
vm.setStatusNotActive(backport.text(R.strings.mode_selector.mode.epicBattle.seasonEnd()))
setBattlePassState(self.viewModel)
return
@staticmethod
def __resetViewModel(vm):
vm.setTimeLeft('')
vm.setStatusActive('')
vm.setStatusNotActive('')
vm.getRewardList().clear()
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
d60c82a2165499d5910f67b4948ff962e472bd62
|
76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a
|
/tags/release-0.7.2/pyformex/examples/Clock.py
|
efd5affc5d3cdb0b4bf75aa8c70dd37a5f479b1f
|
[] |
no_license
|
BackupTheBerlios/pyformex-svn
|
ec2361b1b9967918be65e892217a691a6f8b145d
|
f5404809095711334bbb938d9d119a69ad8fc260
|
refs/heads/master
| 2020-12-24T13:20:47.422165
| 2011-11-15T11:52:23
| 2011-11-15T11:52:23
| 40,749,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,458
|
py
|
#!/usr/bin/env pyformex --gui
# $Id$
##
## This file is part of pyFormex 0.7.2 Release Tue Sep 23 16:18:43 2008
## pyFormex is a Python implementation of Formex algebra
## Website: http://pyformex.berlios.de/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
##
## This program is distributed under the GNU General Public License
## version 2 or later (see file COPYING for details)
##
"""Clock
level = 'advanced'
topics = []
techniques = []
"""
from formex import * # Needed if we want to use this example as a module
from gui.draw import * # Needed if we want to use this example as a module
import simple
from datetime import datetime
from PyQt4 import QtCore
class AnalogClock(object):
"""An analog clock built from Formices"""
def __init__(self,lw=2,mm=0.75,hm=0.85,mh=0.7,hh=0.6, sh=0.9):
"""Create an analog clock."""
self.linewidth = lw
self.circle = simple.circle(a1=2.,a2=2.)
radius = Formex(pattern('2'))
self.mainmark = radius.divide([mm,1.0])
self.hourmark = radius.divide([hm,1.0])
self.mainhand = radius.divide([0.0,mh])
self.hourhand = radius.divide([0.0,hh])
if sh > 0.0:
self.secshand = radius.divide([0.0,sh])
else:
self.secshand = None
self.hands = []
self.timer = None
def draw(self):
"""Draw the clock (without hands)"""
draw(self.circle,color='black',linewidth=self.linewidth)
draw(self.mainmark.rosette(4,90),color='black',linewidth=self.linewidth)
draw(self.hourmark.rot(30).rosette(2,30).rosette(4,90),
color='black',linewidth=0.5*self.linewidth)
def drawTime(self,hrs,min,sec=None):
"""Draw the clock's hands showing the specified time.
If no seconds are specified, no seconds hand is drawn.
"""
hrot = - hrs*30. - min*0.5
mrot = - min*6.
GD.canvas.removeActors(self.hands)
MH = draw(self.mainhand.rot(mrot),bbox=None,color='red',linewidth=self.linewidth)
HH = draw(self.hourhand.rot(hrot),bbox=None,color='red',linewidth=self.linewidth)
self.hands = [MH,HH]
if self.secshand and sec:
srot = - sec*6.
SH = draw(self.secshand.rot(srot),bbox=None,color='orange',linewidth=0.5*self.linewidth)
self.hands.append(SH)
def drawNow(self):
"""Draw the hands showing the current time."""
now = datetime.now()
self.drawTime(now.hour,now.minute,now.second)
def run(self,granularity=1,runtime=100):
"""Run the clock for runtime seconds, updating every granularity."""
if granularity > 0.0:
self.timer = QtCore.QTimer()
self.timer.connect(self.timer,QtCore.SIGNAL("timeout()"),self.drawNow)
self.timer.start(1000*granularity)
if runtime > 0.0:
self.timeout = QtCore.QTimer()
self.timeout.connect(self.timeout,QtCore.SIGNAL("timeout()"),self.stop)
self.timeout.setSingleShot(True)
self.timeout.start(1000*runtime)
def stop(self):
"""Stop a running clock."""
if self.timer:
self.timer.stop()
if __name__ == "draw":
C = AnalogClock()
C.draw()
zoomAll()
C.drawNow()
if ack("Shall I start the clock?"):
C.run()
warning("Please wait until the clock stops running")
|
[
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
] |
bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35
|
6d66b0273af64eacb5f11ceecc9bd3c0e6ec7cd8
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/15/43/13.py
|
0d2d8218a97f9192b5a80288431e4d9cacbbae7a
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
def solve():
n = int(raw_input())
text = [raw_input().split() for _ in xrange(n)]
base = [set(text[0]), set(text[1])]
base_res = len(base[0] & base[1])
if n == 2: return base_res
res = 1e9
for i in xrange(2 ** (n - 2)):
added = [set(), set()]
for j in xrange(n - 2):
lang = ((i >> j) & 1)
for word in text[j + 2]:
if not word in base[lang]:
added[lang].add(word)
r = len(added[0] & base[1]) + len(added[1] & base[0]) + len(added[0] & added[1])
res = min(res, base_res + r)
return res
for i in xrange(input()):
print "Case #%d: %s" % (i + 1, solve())
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
85b562fb5ad9eb517991e871954a55520b0cf345
|
6d162c19c9f1dc1d03f330cad63d0dcde1df082d
|
/util/test/tests/Vulkan/VK_Synchronization_2.py
|
b4ce34cc6a9e18b8c49152887914de8d9b4884b1
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"CC-BY-3.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
baldurk/renderdoc
|
24efbb84446a9d443bb9350013f3bfab9e9c5923
|
a214ffcaf38bf5319b2b23d3d014cf3772cda3c6
|
refs/heads/v1.x
| 2023-08-16T21:20:43.886587
| 2023-07-28T22:34:10
| 2023-08-15T09:09:40
| 17,253,131
| 7,729
| 1,358
|
MIT
| 2023-09-13T09:36:53
| 2014-02-27T15:16:30
|
C++
|
UTF-8
|
Python
| false
| false
| 4,360
|
py
|
import rdtest
import renderdoc as rd
class VK_Synchronization_2(rdtest.TestCase):
demos_test_name = 'VK_Synchronization_2'
def get_capture_options(self):
opts = rd.CaptureOptions()
# Ref all resources to pull in the image with unbound data
opts.refAllResources = True
return opts
def check_capture(self):
self.controller.SetFrameEvent(0, False)
pipe: rd.VKState = self.controller.GetVulkanPipelineState()
# Check that the layout is reported correctly at the start of the frame
for img in pipe.images:
img: rd.VKImageData
res = self.get_resource(img.resourceId)
if res.name == "Image:Preinitialised":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PREINITIALIZED":
raise rdtest.TestFailureException("Pre-initialised image is in {} layout".format(img.layouts[0].name))
elif res.name == "Image:Undefined":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_UNDEFINED":
raise rdtest.TestFailureException("Undefined image is in {} layout".format(img.layouts[0].name))
elif res.name == "Image:Swapchain":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR":
raise rdtest.TestFailureException("Swapchain image is in {} layout".format(img.layouts[0].name))
action = self.find_action("Before Transition")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.VKState = self.controller.GetVulkanPipelineState()
pre_init = rd.ResourceId()
undef_img = rd.ResourceId()
# Check that the layout is reported correctly before transitions still
for img in pipe.images:
img: rd.VKImageData
res = self.get_resource(img.resourceId)
if res.name == "Image:Preinitialised":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PREINITIALIZED":
raise rdtest.TestFailureException("Pre-initialised image is in {} layout".format(img.layouts[0].name))
pre_init = img.resourceId
elif res.name == "Image:Undefined":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_UNDEFINED":
raise rdtest.TestFailureException("Undefined image is in {} layout".format(img.layouts[0].name))
undef_img = img.resourceId
elif res.name == "Image:Swapchain":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR":
raise rdtest.TestFailureException("Swapchain image is in {} layout".format(img.layouts[0].name))
action = self.find_action("vkCmdDraw")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
# Check that the backbuffer didn't get discarded
self.check_triangle(out=action.outputs[0])
col = [float(0x40) / 255.0] * 4
# The pre-initialised image should have the correct data still also
self.check_triangle(out=pre_init, back=col, fore=col)
# we copied its contents into the undefined image so it should also have the right colour
self.check_triangle(out=undef_img, back=col, fore=col)
pipe: rd.VKState = self.controller.GetVulkanPipelineState()
# Check that after transitions, the images are in the right state
for img in pipe.images:
img: rd.VKImageData
res = self.get_resource(img.resourceId)
if res.name == "Image:Preinitialised":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL":
raise rdtest.TestFailureException("Pre-initialised image is in {} layout".format(img.layouts[0].name))
elif res.name == "Image:Undefined":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL":
raise rdtest.TestFailureException("Undefined image is in {} layout".format(img.layouts[0].name))
elif img.resourceId == pipe.currentPass.framebuffer.attachments[0].imageResourceId:
if img.layouts[0].name != "VK_IMAGE_LAYOUT_GENERAL":
raise rdtest.TestFailureException("Rendered swapchain image is in {} layout".format(img.layouts[0].name))
|
[
"baldurk@baldurk.org"
] |
baldurk@baldurk.org
|
7d65874d66433649931e827dc23a00315d029607
|
49cc32d5859e9002cb4b94ade25d72f5f4fe1612
|
/CLASE4_PYTHON/CODIGOS/serial_python.py
|
43e9522e950f183e31d94dd1531efffa2455861a
|
[] |
no_license
|
jorgepdsML/DIGITAL-IMAGE-PROCESSING-PYTHON
|
c8441215b4cf9e912dad1885a82058c1b0bbb872
|
781c8c6d583aebda6381a301cdc33ad4d09f20c5
|
refs/heads/master
| 2021-06-26T00:06:44.344201
| 2021-01-21T17:41:36
| 2021-01-21T17:41:36
| 194,336,928
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
#importar modulo serial
import serial
#usar la clase Serial
#creando un objeto Serial
objeto_serial=serial.Serial()
#CONFIGURANDO LA VELOCIDAD DE COMUNICACIÓN DEL OBJETO SERIAL
objeto_serial.baudrate=9600
objeto_serial.port="COM16"
objeto_serial.open()
objeto_serial.write("sfadg")
dato=objeto_serial.read()
objeto_serial.close()
|
[
"noreply@github.com"
] |
jorgepdsML.noreply@github.com
|
b3f94c94e05413730db58e91ad49543f9373f995
|
f259a50a663a035c7c79bffbe4f7e08fc1ca5ce2
|
/pepysdiary/annotations/__init__.py
|
2e85a18199d3ff74ff2e7cbb8681c9b6532c3a40
|
[] |
no_license
|
eskadah/pepysdiary
|
a22aa5d4c82c9a92410940e5e9562eb232873258
|
7fd706fc862e216216d8d80238516328404f5786
|
refs/heads/master
| 2022-09-13T12:21:19.237047
| 2020-06-03T10:34:54
| 2020-06-03T10:34:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
default_app_config = "pepysdiary.annotations.apps.AnnotationsConfig"
def get_model():
from pepysdiary.annotations.models import Annotation
return Annotation
def get_form():
from pepysdiary.annotations.forms import AnnotationForm
return AnnotationForm
|
[
"phil@gyford.com"
] |
phil@gyford.com
|
d33581d9970083928f2bd10daedf11d919f2aeee
|
8d29fd856250e746f19e086975e83d2dea2cf6a3
|
/ResourceStatusSystem/Agent/test/Test_SSInspectorAgent/fixtures.py
|
5e164fec36b40673dd1bb2d867db9da3fd471a7f
|
[] |
no_license
|
hanyl/DIRAC
|
048c749154192e3940e17b24396afe0e667444b2
|
82eb56888fc039f94ba1033ea4b6d3ad503bf96e
|
refs/heads/master
| 2021-01-16T00:23:00.344192
| 2013-01-19T00:01:05
| 2013-01-19T00:02:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
import unittest, sys
from DIRAC.ResourceStatusSystem.PolicySystem.mock.PEP import PEP
from DIRAC.ResourceStatusSystem.Client.mock.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.mock import CheckingFreqs
from DIRAC.ResourceStatusSystem.Utilities.mock import CS
from DIRAC.ResourceStatusSystem.Agent.mock.AgentModule import AgentModule
from DIRAC.ResourceStatusSystem.Command.mock import knownAPIs
class UnitFixture( unittest.TestCase ):
def setUp( self ):
import DIRAC.ResourceStatusSystem.Agent.SSInspectorAgent as mockedModule
mockedModule.PEP = PEP
mockedModule.ResourceStatusClient = ResourceStatusClient
mockedModule.CheckingFreqs = CheckingFreqs
mockedModule.CS = CS
mockedModule.knownAPIs = knownAPIs
mockedModule.SSInspectorAgent.__bases__ = ( AgentModule, )
self.agent = mockedModule.SSInspectorAgent( '', '')
def tearDown( self ):
#sys.modules = self._modulesBkup
del sys.modules[ 'DIRAC.ResourceStatusSystem.Agent.SSInspectorAgent' ]
|
[
"mario.ubeda.garcia@cern.ch"
] |
mario.ubeda.garcia@cern.ch
|
3c53bda6c1dda03b00707e591ebe42019e901d8d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/K5277r6RmsJRSz27t_1.py
|
8f51eb7e5f7e14c77fa0f9009fe9c23f6d01a7d5
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
"""
The challenge is to recreate the functionality of the `title()` method into a
function called `emphasise()`. The `title()` method capitalises the first
letter of _every word_ and lowercases all of the other letters in the word.
### Examples
emphasise("hello world") ➞ "Hello World"
emphasise("GOOD MORNING") ➞ "Good Morning"
emphasise("99 red balloons!") ➞ "99 Red Balloons!"
### Notes
* You won't run into any issues when dealing with numbers in strings.
* Please don't use the `title()` method directly :(
"""
def emphasise(txt):
return ' '.join(w.capitalize() for w in txt.split())
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
929885ae6922340d428fa9858beeb3cfdd43d863
|
e7b312b4cc3355f4ca98313ef2ac9f3b0d81f245
|
/abc/100/d/d.py
|
6802afb2d5e0137ddc0db2a12ffb1d70809ab5bc
|
[] |
no_license
|
minus9d/programming_contest_archive
|
75466ab820e45ee0fcd829e6fac8ebc2accbbcff
|
0cb9e709f40460305635ae4d46c8ddec1e86455e
|
refs/heads/master
| 2023-02-16T18:08:42.579335
| 2023-02-11T14:10:49
| 2023-02-11T14:10:49
| 21,788,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import array
from bisect import *
from collections import *
import fractions
import heapq
from itertools import *
import math
import random
import re
import string
import sys
N, M = map(int, input().split())
XYZs = []
for n in range(N):
x, y, z = map(int, input().split())
XYZs.append((x,y,z))
ans = 0
for i in (1,-1):
for j in (1,-1):
for k in (1,-1):
scores = []
for x, y, z in XYZs:
val = x * i + y * j + z * k
scores.append(val)
scores.sort()
scores.reverse()
ans = max(ans, sum(scores[:M]))
print(ans)
|
[
"minus9d@gmail.com"
] |
minus9d@gmail.com
|
3fbb25875d5ed9d4a1c9f86aeb665817922af9a8
|
262311e60529868e38c2c57ee3db573f8e11c458
|
/mysite/books/models.py
|
d197441088c1c45f016b482eeb1d336bedac587b
|
[] |
no_license
|
huileizhan227/untitled
|
1c5604736d9ffcce6f7cb7e308cdc0ebd07e116a
|
07df74c89291b1664a28e3c8dcba51a917f1835f
|
refs/heads/master
| 2023-01-27T11:51:37.609210
| 2020-04-16T11:49:59
| 2020-04-16T11:49:59
| 150,606,504
| 1
| 0
| null | 2023-01-09T12:00:12
| 2018-09-27T15:12:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
from django.db import models
# Create your models here.
class Publisher(models.Model):
'''
出版商 - 名称,地址,所在城市,省,国家,网站
'''
name = models.CharField(max_length=30)
address = models.CharField(max_length=100)
city = models.CharField(max_length=60)
state_province = models.CharField(max_length=30)
country = models.CharField(max_length=50)
website = models.URLField()
def __unicode__(self):
# unicode() 方法告诉Python如何将对象以unicode的方式显示出来
return self.name
class Author(models.Model):
'''
作者 - 姓,名,email地址
'''
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __unicode__(self):
return u"{} {}".format(self.first_name, self.last_name)
class Book(models.Model):
'''
书籍 - 书名,作者(一个或多个作者,和作者是多对多的关联关系[many-to-many]),
出版商(和出版商是一对多的关联关系[one-to-many],也被称作外键[foreign key]),
出版日期
'''
title = models.CharField(max_length=100)
author = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE) #ForeignKey中需要加上on_delete=models.CASCADE
publication_date = models.DateField()
def __unicode__(self):
return self.title
|
[
"374826581@qq.com"
] |
374826581@qq.com
|
689b6003d2c9cf04e858dc49e68af06d8e75d941
|
500daf0ee45c943293920e0ce3fd9b539f05fcb6
|
/tensorloaders/redisdataset.py
|
6fafa8c00b901549697cbeec03254ad91eb51bb2
|
[
"BSD-3-Clause"
] |
permissive
|
deeptechlabs/tensorloaders
|
6a8cd5cd97d217d0f6a352a99b68da3729085257
|
f3db29828d5e0fdc28c2089726420fa5fbdf1ff2
|
refs/heads/master
| 2020-04-23T23:48:28.338670
| 2019-03-12T11:41:28
| 2019-03-12T11:41:28
| 171,546,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
"""
Shows how to store and load data from redis using a PyTorch
Dataset and DataLoader (with multiple workers).
@author: ptrblck
"""
import redis
import torch
from torch.utils.data import Dataset
import numpy as np
# Create RedisDataset
class RedisDataset(Dataset):
def __init__(self,
redis_host='localhost',
redis_port=6379,
redis_db=0,
length=0,
transform=None):
self.db = redis.Redis(host=redis_host, port=redis_port, db=redis_db)
self.length = length
self.transform = transform
def __getitem__(self, index):
data = self.db.get(index)
data = np.frombuffer(data, dtype=np.long)
x = data[:-1].reshape(3, 24, 24).astype(np.uint8)
y = torch.tensor(data[-1]).long()
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return self.length
|
[
"suhubdyd@iro.umontreal.ca"
] |
suhubdyd@iro.umontreal.ca
|
8adc6546de6c9c690bd02fbceb11bb4e0269169d
|
1b47e0fb58651a224ca7269c5365c174f9e6197b
|
/django2/cve/venv/bin/static
|
ac9f5aae41fb626d4856b268b1d74de823547d47
|
[] |
no_license
|
ggrecco/python
|
a3f0f6f6f99e6aeb49cacabf11c2e986eada4479
|
d5895a1b8067358e4336e6273b1b493010dab2ce
|
refs/heads/master
| 2022-12-13T19:53:51.706179
| 2020-02-20T19:32:21
| 2020-02-20T19:32:21
| 107,701,916
| 0
| 0
| null | 2022-12-08T02:05:15
| 2017-10-20T16:35:16
|
Python
|
UTF-8
|
Python
| false
| false
| 344
|
#!/home/ggrecco/Documentos/python/django2/cve/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'static3==0.7.0','console_scripts','static'
__requires__ = 'static3==0.7.0'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('static3==0.7.0', 'console_scripts', 'static')()
)
|
[
"gustavo.grecco@hotmail.com"
] |
gustavo.grecco@hotmail.com
|
|
d1ae8ef5423ff53c51eb1efd77f1100c2598de28
|
8d7262650584eb6d66b5874d2e4651963918ad1d
|
/sp/migrations/0004_auto_20200401_1328.py
|
40a4fd3e76205868fb5342f378d749ed3612589d
|
[
"BSD-3-Clause"
] |
permissive
|
imsweb/django-saml-sp
|
dee5757ac9807526849a7382550815c02057d1ae
|
d8c3bdb91e4a5988e282690d79a8069af77a0c7a
|
refs/heads/main
| 2023-03-07T08:41:28.291208
| 2023-02-24T16:32:45
| 2023-02-24T16:32:45
| 220,322,548
| 14
| 11
|
BSD-3-Clause
| 2023-02-24T14:16:00
| 2019-11-07T20:23:22
|
Python
|
UTF-8
|
Python
| false
| false
| 930
|
py
|
# Generated by Django 3.0.4 on 2020-04-01 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sp", "0003_auto_20200331_1934"),
]
operations = [
migrations.AlterField(
model_name="idp",
name="authenticate_method",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="idp",
name="base_url",
field=models.CharField(
help_text=(
"Root URL for the site, including http/https, no trailing slash."
),
max_length=200,
verbose_name="Base URL",
),
),
migrations.AlterField(
model_name="idp",
name="login_method",
field=models.CharField(blank=True, max_length=200),
),
]
|
[
"dcwatson@gmail.com"
] |
dcwatson@gmail.com
|
b43c00902b34c537eae4a4a8cb803bbe7ca2daec
|
102a1e4885a39c9af588ca4215bcf625f9dce312
|
/deploy/deploy12.py
|
f9de7a832c6e399e18e51b127aa742e24617ae8f
|
[] |
no_license
|
lingxiao/good-great-ngrams
|
9c51136b5e7af5d522193d159e8e5e74596140a2
|
ac189b638a34b410a2f7fe84406ad4baf111792c
|
refs/heads/master
| 2021-01-11T21:15:29.648113
| 2017-02-21T01:42:54
| 2017-02-21T01:42:54
| 79,279,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
############################################################
# Module : Applicaton Main
# Date : November 14th
# Author : Xiao Ling
############################################################
from app import *
from prelude import *
from utils import *
from server import *
from client import *
############################################################
# Initialize application
############################################################
# root = '/home1/l/lingxiao/xiao/good-great-ngrams/'
root = "/Users/lingxiao/Documents/research/code/good-great-ngrams"
data = os.path.join(root, 'ngrams/')
a12 = App(root
,data
,'outputs-2'
,'one-sided-patterns'
,'two-sided-patterns'
,'moh-graph/testset-12')
a12.refresh(2)
|
[
"lingxiao@seas.upenn.edu"
] |
lingxiao@seas.upenn.edu
|
58bd14d240242ed58dcff35fe91cebeae4899478
|
18136ff686211c8a1c1938c369c2bacd03f10133
|
/leet/matrix/numberOfIslands.py
|
01201be8ad186c6f9d51c14c688c155234eacb23
|
[
"Apache-2.0"
] |
permissive
|
KshitijSrivastava/python-cp-cheatsheet
|
e13ef9f7260ce18b77a1171c8c8d13e74d8292c5
|
a5514b08816959de1198156f7764c54a7a585f20
|
refs/heads/master
| 2023-07-08T10:24:28.165854
| 2021-08-09T15:33:46
| 2021-08-09T15:33:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
"""
time: X * Y
space: worst case X * Y
"""
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
Y = len(grid)
X = len(grid[0])
def dfs(y, x):
if y < 0 or x < 0 or y > Y-1 or x > X-1:
return
if grid[y][x] == "1":
grid[y][x] = "0"
dfs(y, x-1)
dfs(y, x+1)
dfs(y-1, x)
dfs(y+1, x)
ans = 0
for y in range(Y):
for x in range(X):
if grid[y][x] == "1":
dfs(y, x)
ans += 1
return ans
def numIslands(self, grid : List[List[str]]) -> int:
R = len(grid)
C = len(grid[0])
def dfs(r, c):
if r < 0 or c < 0 or r >= R or c >= C:
return
if grid[r][c] == '1':
grid[r][c] = '0'
dfs(r-1,c)
dfs(r+1,c)
dfs(r,c-1)
dfs(r,c+1)
rtn = 0
for r in range(R):
for c in range(C):
if grid[r][c] == '1':
rtn += 1
dfs(r,c)
return rtn
|
[
"peterrlamar@gmail.com"
] |
peterrlamar@gmail.com
|
b83707b7b3c7b8d985fb452b2237fc9ca45e35ce
|
feef30d93f43b9f5e08c050f0c97237b4fb4ba9c
|
/Login/UserRegistration/admin.py
|
5fa7543a9d186355023c55b6b4fdb2d7ee13fb95
|
[] |
no_license
|
cmrajib/django_resale_market_place
|
3926d051a7a47b6a77fe0a06a570bd8d4fddc98c
|
a8b6557fb50c34ce91a96f2e795837851b3aaf2c
|
refs/heads/main
| 2023-02-11T22:54:41.022680
| 2021-01-10T02:28:08
| 2021-01-10T02:28:08
| 328,287,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
from django.contrib import admin
from django.utils.html import format_html
# Register your models here.
from UserRegistration.models import User, Profile
class CarAdmin(admin.ModelAdmin):
def thumbnail(self, object):
return format_html('<img src="{}" width="40" style="border-radius: 50px;"/>'.format(object.image.url))
thumbnail.short_description = 'Photo'
list_display = ('id', 'thumbnail','full_name', 'phone','city', 'zipcode', 'country')
list_display_links = ('id','thumbnail', 'full_name')
list_filter = ('full_name','city')
# list_editable = ('is_featured',)
search_fields =('full_name', 'city','phone')
list_per_page = 10
admin.site.register(User)
admin.site.register(Profile, CarAdmin)
|
[
"cmrajib@gmail.com"
] |
cmrajib@gmail.com
|
1276f3aadbfeca2b05571f2c36083d8ce4f88638
|
b0f1acbe5cd30c2ade801465924c12403ab7e585
|
/Corda_Api_Library/test/test_net_corda_core_context_auth_service_id.py
|
62b0ee61a3b07529252bc9c118598e42cffe31e6
|
[] |
no_license
|
TanzimAzadNishan/Blockchain-Based-Online-Ticketing-Platform
|
94ea0f06a7761f9033f7a1dc61548ade6f6ff499
|
d04a2696cab4c41743c7c5999c623002d0e57f80
|
refs/heads/main
| 2023-03-09T14:34:27.148340
| 2021-02-24T11:49:26
| 2021-02-24T11:49:26
| 338,845,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
"""
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.net_corda_core_context_auth_service_id import NetCordaCoreContextAuthServiceId
class TestNetCordaCoreContextAuthServiceId(unittest.TestCase):
"""NetCordaCoreContextAuthServiceId unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetCordaCoreContextAuthServiceId(self):
"""Test NetCordaCoreContextAuthServiceId"""
# FIXME: construct object with mandatory attributes with example values
# model = NetCordaCoreContextAuthServiceId() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"64527153+TanzimAzadNishan@users.noreply.github.com"
] |
64527153+TanzimAzadNishan@users.noreply.github.com
|
1ba5eb3665cadc23d5d6eb759ee9015d804c800d
|
b35cf6b82290ef5f3f6d2e3d12d3321aeaba099f
|
/backend/home/migrations/0002_load_initial_data.py
|
80b25ac73b80bc50b0c43a86ef6d30f4aee74408
|
[] |
no_license
|
crowdbotics-apps/check-plannss-dev-1905
|
26339d7cb82bcc623a7006a2a25b8159dd557e3c
|
cb1355e7a58f58671dd71c93d47e9858be60d041
|
refs/heads/master
| 2022-04-08T01:23:31.526664
| 2020-03-10T16:26:04
| 2020-03-10T16:26:04
| 246,351,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "check plannss"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">check plannss</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "check-plannss-dev-1905.botics.co"
site_params = {
"name": "check plannss",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
70c7c0082ae0fd69750b6dffa4ecbc4c018fc7b9
|
85de8e224f9bf8202b25a0aee939f4949f69471c
|
/day_9/Rotting_Oranges.py
|
2f48c21c4b8f2aee0e2c08bcc52ae57ad88e79fd
|
[] |
no_license
|
ahmedmeshref/August-LeetCoding-Challenge
|
78d17ad3bbcaa240ebe53841832ff1f835cae729
|
68b69d30e9d0a32cf6cd1a5f8c05cb2eb572946b
|
refs/heads/master
| 2022-12-05T08:19:55.659127
| 2020-09-01T02:06:56
| 2020-09-01T02:06:56
| 284,369,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
from collections import deque
class Solution:
def orangesRotting(self, grid) -> int:
rotten_q = deque()
self.tot_fresh = 0
def rotten(i, j):
grid[i][j] = 2
self.tot_fresh -= 1
self.rotten = True
return [i, j]
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 2:
rotten_q.append([i, j])
elif grid[i][j] == 1:
self.tot_fresh += 1
min_mins = 0
while rotten_q:
ln = len(rotten_q)
self.rotten = False
for _ in range(ln):
i, j = rotten_q.popleft()
if i - 1 >= 0 and grid[i - 1][j] == 1:
rotten_q.append(rotten(i - 1, j))
if i + 1 < len(grid) and grid[i + 1][j] == 1:
rotten_q.append(rotten(i + 1, j))
if j - 1 >= 0 and grid[i][j - 1] == 1:
rotten_q.append(rotten(i, j - 1))
if j + 1 < len(grid[0]) and grid[i][j + 1] == 1:
rotten_q.append(rotten(i, j + 1))
if self.rotten:
min_mins += 1
return min_mins if not self.tot_fresh else -1
|
[
"a.meshref@alustudent.com"
] |
a.meshref@alustudent.com
|
d7d8d16a859496673b797120662e9289bd77dcbd
|
cb13037cdbd3e0ab6108670108e9497cc1e2a5a7
|
/0.leetcode/3.刷题/1.数据结构系列/1.线性结构/3.栈/1.单调栈/2.最小栈/456.mid_132模式.py
|
d52e63ba018f3ae19666a84483eb475d55592f48
|
[] |
no_license
|
GMwang550146647/network
|
390fe0d1c72dcaca8b6d6dd1307adca0d56b55ce
|
576de9b993f7763789d25a995702b40c9bc6fa57
|
refs/heads/master
| 2023-06-15T04:42:54.306077
| 2021-07-12T06:06:02
| 2021-07-12T06:06:02
| 315,488,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
from fundamentals.test_time import test_time
from math import ceil
class Solution():
def __init__(self):
pass
# @test_time
# def find132pattern_ans(self, nums):
# if len(nums) < 3:
# return False
#
# pre_mins = [nums[0]]
# for i in range(1, len(nums)):
# pre_mins.append(min(pre_mins[-1], nums[i]))
# stack_k = []
# for j in range(len(nums) - 1, -1, -1):
# if nums[j] > pre_mins[j]:
# while stack_k and pre_mins[j] >= stack_k[-1]:
# stack_k.pop()
#
# if stack_k and stack_k[-1] < nums[j]:
# return True
#
# stack_k.append(nums[j])
#
# return False
@test_time
def find132pattern(self, nums):
if len(nums) < 3:
return False
pre_mins = [nums[0]]
for i in range(1, len(nums)):
pre_mins.append(min(pre_mins[-1], nums[i]))
stack = []
for i in range(len(nums) - 1, -1, -1):
if nums[i] > pre_mins[i]:
while stack and pre_mins[i] >= stack[-1]:
stack.pop(-1)
if stack and stack[-1] < nums[i]:
return True
stack.append(nums[i])
return False
def main(self):
nums = [3, 5, 0, 3, 4]
nums = [1, 2, 3, 4, 5]
nums = [-1, 3, 2, 0]
# nums = [1, 0, 1, -4, -3]
nums = [-2, 1, 2, -2, 1, 2]
# nums = [3,1,4,2]
# nums = [-1,3,2,0]
print(self.find132pattern(nums))
print(self.find132pattern_ans(nums))
if __name__ == '__main__':
SL = Solution()
SL.main()
|
[
"gmwang_global@qq.com"
] |
gmwang_global@qq.com
|
ecc18e3af497e0ca2abb1553abd591c27b215a95
|
d54e1b89dbd0ec5baa6a018464a419e718c1beac
|
/Python from start to practice/Chapters/Chatper11_20200408_test_class/employee.py
|
f1860aa47e1c95ce3745cf45e926609001f81631
|
[] |
no_license
|
cjx1996/vscode_Pythoncode
|
eda438279b7318e6cb73211e26107c7e1587fdfb
|
f269ebf7ed80091b22334c48839af2a205a15549
|
refs/heads/master
| 2021-01-03T19:16:18.103858
| 2020-05-07T13:51:31
| 2020-05-07T13:51:31
| 240,205,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
class Employee():
'''存储雇员的信息'''
def __init__(self, first, last, wage):
'''
定义雇员的基本信息,包括姓,名,年薪
'''
self.first = first
self.last = last
self.wage = wage
def give_raise(self, number=5000):
'''给员工增加年薪,默认增加5000,也可以增加其他值'''
self.wage = self.wage + number
|
[
"1121287904@qq.com"
] |
1121287904@qq.com
|
51e9cb3c0299e3467570be9639804c4196211c3b
|
740af83394e19b1e68c08dc0fdbec8c44ac6216e
|
/jesse/indicators/atr.py
|
8116f5d70f6fa5ed96f1da2f0f44026824644b07
|
[
"MIT"
] |
permissive
|
dinet/jesse
|
9a6b60c48b4f51a09e7092bf66df446763dd7b21
|
e6f59fba3f56b01b12bf6c1adf9829afe8dcd2df
|
refs/heads/master
| 2022-06-22T21:06:42.348993
| 2020-05-09T19:45:35
| 2020-05-09T19:45:35
| 262,654,825
| 1
| 0
|
MIT
| 2020-05-09T20:38:11
| 2020-05-09T20:38:10
| null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
import numpy as np
import talib
from typing import Union
def atr(candles: np.ndarray, period=14, sequential=False) -> Union[float, np.ndarray]:
"""
ATR - Average True Range
:param candles: np.ndarray
:param period: int - default=14
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
res = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=period)
if sequential:
return res
else:
return None if np.isnan(res[-1]) else res[-1]
|
[
"fischersully@gmail.com"
] |
fischersully@gmail.com
|
8b63183a6ac2ad40af1c8b51e501977274c5f4b4
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/test_crypt.py
|
e3c98ab63666cc6a76c83babef91ec87ead7bdfa
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,298
|
py
|
"""
tests.pytests.unit.test_crypt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for salt's crypt module
"""
import uuid
import pytest
import salt.crypt
import salt.master
import salt.utils.files
PRIV_KEY = """
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAoAsMPt+4kuIG6vKyw9r3+OuZrVBee/2vDdVetW+Js5dTlgrJ
aghWWn3doGmKlEjqh7E4UTa+t2Jd6w8RSLnyHNJ/HpVhMG0M07MF6FMfILtDrrt8
ZX7eDVt8sx5gCEpYI+XG8Y07Ga9i3Hiczt+fu6HYwu96HggmG2pqkOrn3iGfqBvV
YVFJzSZYe7e4c1PeEs0xYcrA4k+apyGsMtpef8vRUrNicRLc7dAcvfhtgt2DXEZ2
d72t/CR4ygtUvPXzisaTPW0G7OWAheCloqvTIIPQIjR8htFxGTz02STVXfnhnJ0Z
k8KhqKF2v1SQvIYxsZU7jaDgl5i3zpeh58cYOwIDAQABAoIBABZUJEO7Y91+UnfC
H6XKrZEZkcnH7j6/UIaOD9YhdyVKxhsnax1zh1S9vceNIgv5NltzIsfV6vrb6v2K
Dx/F7Z0O0zR5o+MlO8ZncjoNKskex10gBEWG00Uqz/WPlddiQ/TSMJTv3uCBAzp+
S2Zjdb4wYPUlgzSgb2ygxrhsRahMcSMG9PoX6klxMXFKMD1JxiY8QfAHahPzQXy9
F7COZ0fCVo6BE+MqNuQ8tZeIxu8mOULQCCkLFwXmkz1FpfK/kNRmhIyhxwvCS+z4
JuErW3uXfE64RLERiLp1bSxlDdpvRO2R41HAoNELTsKXJOEt4JANRHm/CeyA5wsh
NpscufUCgYEAxhgPfcMDy2v3nL6KtkgYjdcOyRvsAF50QRbEa8ldO+87IoMDD/Oe
osFERJ5hhyyEO78QnaLVegnykiw5DWEF02RKMhD/4XU+1UYVhY0wJjKQIBadsufB
2dnaKjvwzUhPh5BrBqNHl/FXwNCRDiYqXa79eWCPC9OFbZcUWWq70s8CgYEAztOI
61zRfmXJ7f70GgYbHg+GA7IrsAcsGRITsFR82Ho0lqdFFCxz7oK8QfL6bwMCGKyk
nzk+twh6hhj5UNp18KN8wktlo02zTgzgemHwaLa2cd6xKgmAyuPiTgcgnzt5LVNG
FOjIWkLwSlpkDTl7ZzY2QSy7t+mq5d750fpIrtUCgYBWXZUbcpPL88WgDB7z/Bjg
dlvW6JqLSqMK4b8/cyp4AARbNp12LfQC55o5BIhm48y/M70tzRmfvIiKnEc/gwaE
NJx4mZrGFFURrR2i/Xx5mt/lbZbRsmN89JM+iKWjCpzJ8PgIi9Wh9DIbOZOUhKVB
9RJEAgo70LvCnPTdS0CaVwKBgDJW3BllAvw/rBFIH4OB/vGnF5gosmdqp3oGo1Ik
jipmPAx6895AH4tquIVYrUl9svHsezjhxvjnkGK5C115foEuWXw0u60uiTiy+6Pt
2IS0C93VNMulenpnUrppE7CN2iWFAiaura0CY9fE/lsVpYpucHAWgi32Kok+ZxGL
WEttAoGAN9Ehsz4LeQxEj3x8wVeEMHF6OsznpwYsI2oVh6VxpS4AjgKYqeLVcnNi
TlZFsuQcqgod8OgzA91tdB+Rp86NygmWD5WzeKXpCOg9uA+y/YL+0sgZZHsuvbK6
PllUgXdYxqClk/hdBFB7v9AQoaj7K9Ga22v32msftYDQRJ94xOI=
-----END RSA PRIVATE KEY-----
"""
PUB_KEY = """
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoAsMPt+4kuIG6vKyw9r3
+OuZrVBee/2vDdVetW+Js5dTlgrJaghWWn3doGmKlEjqh7E4UTa+t2Jd6w8RSLny
HNJ/HpVhMG0M07MF6FMfILtDrrt8ZX7eDVt8sx5gCEpYI+XG8Y07Ga9i3Hiczt+f
u6HYwu96HggmG2pqkOrn3iGfqBvVYVFJzSZYe7e4c1PeEs0xYcrA4k+apyGsMtpe
f8vRUrNicRLc7dAcvfhtgt2DXEZ2d72t/CR4ygtUvPXzisaTPW0G7OWAheCloqvT
IIPQIjR8htFxGTz02STVXfnhnJ0Zk8KhqKF2v1SQvIYxsZU7jaDgl5i3zpeh58cY
OwIDAQAB
-----END PUBLIC KEY-----
"""
PRIV_KEY2 = """
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAp+8cTxguO6Vg+YO92VfHgNld3Zy8aM3JbZvpJcjTnis+YFJ7
Zlkcc647yPRRwY9nYBNywahnt5kIeuT1rTvTsMBZWvmUoEVUj1Xg8XXQkBvb9Ozy
Gqy/G/p8KDDpzMP/U+XCnUeHiXTZrgnqgBIc2cKeCVvWFqDi0GRFGzyaXLaX3PPm
M7DJ0MIPL1qgmcDq6+7Ze0gJ9SrDYFAeLmbuT1OqDfufXWQl/82JXeiwU2cOpqWq
7n5fvPOWim7l1tzQ+dSiMRRm0xa6uNexCJww3oJSwvMbAmgzvOhqqhlqv+K7u0u7
FrFFojESsL36Gq4GBrISnvu2tk7u4GGNTYYQbQIDAQABAoIBAADrqWDQnd5DVZEA
lR+WINiWuHJAy/KaIC7K4kAMBgbxrz2ZbiY9Ok/zBk5fcnxIZDVtXd1sZicmPlro
GuWodIxdPZAnWpZ3UtOXUayZK/vCP1YsH1agmEqXuKsCu6Fc+K8VzReOHxLUkmXn
FYM+tixGahXcjEOi/aNNTWitEB6OemRM1UeLJFzRcfyXiqzHpHCIZwBpTUAsmzcG
QiVDkMTKubwo/m+PVXburX2CGibUydctgbrYIc7EJvyx/cpRiPZXo1PhHQWdu4Y1
SOaC66WLsP/wqvtHo58JQ6EN/gjSsbAgGGVkZ1xMo66nR+pLpR27coS7o03xCks6
DY/0mukCgYEAuLIGgBnqoh7YsOBLd/Bc1UTfDMxJhNseo+hZemtkSXz2Jn51322F
Zw/FVN4ArXgluH+XsOhvG/MFFpojwZSrb0Qq5b1MRdo9qycq8lGqNtlN1WHqosDQ
zW29kpL0tlRrSDpww3wRESsN9rH5XIrJ1b3ZXuO7asR+KBVQMy/+NcUCgYEA6MSC
c+fywltKPgmPl5j0DPoDe5SXE/6JQy7w/vVGrGfWGf/zEJmhzS2R+CcfTTEqaT0T
Yw8+XbFgKAqsxwtE9MUXLTVLI3sSUyE4g7blCYscOqhZ8ItCUKDXWkSpt++rG0Um
1+cEJP/0oCazG6MWqvBC4NpQ1nzh46QpjWqMwokCgYAKDLXJ1p8rvx3vUeUJW6zR
dfPlEGCXuAyMwqHLxXgpf4EtSwhC5gSyPOtx2LqUtcrnpRmt6JfTH4ARYMW9TMef
QEhNQ+WYj213mKP/l235mg1gJPnNbUxvQR9lkFV8bk+AGJ32JRQQqRUTbU+yN2MQ
HEptnVqfTp3GtJIultfwOQKBgG+RyYmu8wBP650izg33BXu21raEeYne5oIqXN+I
R5DZ0JjzwtkBGroTDrVoYyuH1nFNEh7YLqeQHqvyufBKKYo9cid8NQDTu+vWr5UK
tGvHnwdKrJmM1oN5JOAiq0r7+QMAOWchVy449VNSWWV03aeftB685iR5BXkstbIQ
EVopAoGAfcGBTAhmceK/4Q83H/FXBWy0PAa1kZGg/q8+Z0KY76AqyxOVl0/CU/rB
3tO3sKhaMTHPME/MiQjQQGoaK1JgPY6JHYvly2KomrJ8QTugqNGyMzdVJkXAK2AM
GAwC8ivAkHf8CHrHa1W7l8t2IqBjW1aRt7mOW92nfG88Hck0Mbo=
-----END RSA PRIVATE KEY-----
"""
PUB_KEY2 = """
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+8cTxguO6Vg+YO92VfH
gNld3Zy8aM3JbZvpJcjTnis+YFJ7Zlkcc647yPRRwY9nYBNywahnt5kIeuT1rTvT
sMBZWvmUoEVUj1Xg8XXQkBvb9OzyGqy/G/p8KDDpzMP/U+XCnUeHiXTZrgnqgBIc
2cKeCVvWFqDi0GRFGzyaXLaX3PPmM7DJ0MIPL1qgmcDq6+7Ze0gJ9SrDYFAeLmbu
T1OqDfufXWQl/82JXeiwU2cOpqWq7n5fvPOWim7l1tzQ+dSiMRRm0xa6uNexCJww
3oJSwvMbAmgzvOhqqhlqv+K7u0u7FrFFojESsL36Gq4GBrISnvu2tk7u4GGNTYYQ
bQIDAQAB
-----END PUBLIC KEY-----
"""
def test_get_rsa_pub_key_bad_key(tmp_path):
"""
get_rsa_pub_key raises InvalidKeyError when encoutering a bad key
"""
key_path = str(tmp_path / "key")
with salt.utils.files.fopen(key_path, "w") as fp:
fp.write("")
with pytest.raises(salt.crypt.InvalidKeyError):
salt.crypt.get_rsa_pub_key(key_path)
def test_cryptical_dumps_no_nonce():
master_crypt = salt.crypt.Crypticle({}, salt.crypt.Crypticle.generate_key_string())
data = {"foo": "bar"}
ret = master_crypt.dumps(data)
# Validate message structure
assert isinstance(ret, bytes)
une = master_crypt.decrypt(ret)
une.startswith(master_crypt.PICKLE_PAD)
assert salt.payload.loads(une[len(master_crypt.PICKLE_PAD) :]) == data
# Validate load back to orig data
assert master_crypt.loads(ret) == data
def test_cryptical_dumps_valid_nonce():
nonce = uuid.uuid4().hex
master_crypt = salt.crypt.Crypticle({}, salt.crypt.Crypticle.generate_key_string())
data = {"foo": "bar"}
ret = master_crypt.dumps(data, nonce=nonce)
assert isinstance(ret, bytes)
une = master_crypt.decrypt(ret)
une.startswith(master_crypt.PICKLE_PAD)
nonce_and_data = une[len(master_crypt.PICKLE_PAD) :]
assert nonce_and_data.startswith(nonce.encode())
assert salt.payload.loads(nonce_and_data[len(nonce) :]) == data
assert master_crypt.loads(ret, nonce=nonce) == data
def test_cryptical_dumps_invalid_nonce():
nonce = uuid.uuid4().hex
master_crypt = salt.crypt.Crypticle({}, salt.crypt.Crypticle.generate_key_string())
data = {"foo": "bar"}
ret = master_crypt.dumps(data, nonce=nonce)
assert isinstance(ret, bytes)
with pytest.raises(salt.crypt.SaltClientError, match="Nonce verification error"):
assert master_crypt.loads(ret, nonce="abcde")
def test_verify_signature(tmp_path):
tmp_path.joinpath("foo.pem").write_text(PRIV_KEY.strip())
tmp_path.joinpath("foo.pub").write_text(PUB_KEY.strip())
tmp_path.joinpath("bar.pem").write_text(PRIV_KEY2.strip())
tmp_path.joinpath("bar.pub").write_text(PUB_KEY2.strip())
msg = b"foo bar"
sig = salt.crypt.sign_message(str(tmp_path.joinpath("foo.pem")), msg)
assert salt.crypt.verify_signature(str(tmp_path.joinpath("foo.pub")), msg, sig)
def test_verify_signature_bad_sig(tmp_path):
tmp_path.joinpath("foo.pem").write_text(PRIV_KEY.strip())
tmp_path.joinpath("foo.pub").write_text(PUB_KEY.strip())
tmp_path.joinpath("bar.pem").write_text(PRIV_KEY2.strip())
tmp_path.joinpath("bar.pub").write_text(PUB_KEY2.strip())
msg = b"foo bar"
sig = salt.crypt.sign_message(str(tmp_path.joinpath("foo.pem")), msg)
assert not salt.crypt.verify_signature(str(tmp_path.joinpath("bar.pub")), msg, sig)
|
[
"pedro@algarvio.me"
] |
pedro@algarvio.me
|
59272245968fd3f11a85901ea07d6c1fedcbbda5
|
0a33cc0ebb67c51cc38750f0f04c3e6c088e3b1a
|
/tests/components/sleepiq/test_init.py
|
e468734e06321cc5513d41f33c58d9775346af86
|
[
"Apache-2.0"
] |
permissive
|
robert-alfaro/home-assistant
|
e9bb08ad22a167ed226fb3de8f5b36acfc393548
|
4a53121b58b77a318f08c64ad2c5372a16b800e0
|
refs/heads/dev
| 2023-02-28T06:46:23.217246
| 2022-04-26T17:30:08
| 2022-04-26T17:30:08
| 115,894,662
| 4
| 0
|
Apache-2.0
| 2023-02-22T06:21:08
| 2018-01-01T02:00:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,870
|
py
|
"""Tests for the SleepIQ integration."""
from asyncsleepiq import (
SleepIQAPIException,
SleepIQLoginException,
SleepIQTimeoutException,
)
from homeassistant.components.sleepiq.const import (
DOMAIN,
IS_IN_BED,
PRESSURE,
SLEEP_NUMBER,
)
from homeassistant.components.sleepiq.coordinator import UPDATE_INTERVAL
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import MockConfigEntry, async_fire_time_changed, mock_registry
from tests.components.sleepiq.conftest import (
BED_ID,
SLEEPER_L_ID,
SLEEPER_L_NAME,
SLEEPER_L_NAME_LOWER,
SLEEPIQ_CONFIG,
setup_platform,
)
ENTITY_IS_IN_BED = f"sensor.sleepnumber_{BED_ID}_{SLEEPER_L_NAME_LOWER}_{IS_IN_BED}"
ENTITY_PRESSURE = f"sensor.sleepnumber_{BED_ID}_{SLEEPER_L_NAME_LOWER}_{PRESSURE}"
ENTITY_SLEEP_NUMBER = (
f"sensor.sleepnumber_{BED_ID}_{SLEEPER_L_NAME_LOWER}_{SLEEP_NUMBER}"
)
async def test_unload_entry(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test unloading the SleepIQ entry."""
entry = await setup_platform(hass, "sensor")
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_entry_setup_login_error(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test when sleepiq client is unable to login."""
mock_asyncsleepiq.login.side_effect = SleepIQLoginException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_entry_setup_timeout_error(
hass: HomeAssistant, mock_asyncsleepiq
) -> None:
"""Test when sleepiq client timeout."""
mock_asyncsleepiq.login.side_effect = SleepIQTimeoutException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_update_interval(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test update interval."""
await setup_platform(hass, "sensor")
assert mock_asyncsleepiq.fetch_bed_statuses.call_count == 1
async_fire_time_changed(hass, utcnow() + UPDATE_INTERVAL)
await hass.async_block_till_done()
assert mock_asyncsleepiq.fetch_bed_statuses.call_count == 2
async def test_api_error(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test when sleepiq client is unable to login."""
mock_asyncsleepiq.init_beds.side_effect = SleepIQAPIException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_api_timeout(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test when sleepiq client timeout."""
mock_asyncsleepiq.init_beds.side_effect = SleepIQTimeoutException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_unique_id_migration(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test migration of sensor unique IDs."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data=SLEEPIQ_CONFIG,
unique_id=SLEEPIQ_CONFIG[CONF_USERNAME].lower(),
)
mock_entry.add_to_hass(hass)
mock_registry(
hass,
{
ENTITY_IS_IN_BED: er.RegistryEntry(
entity_id=ENTITY_IS_IN_BED,
unique_id=f"{BED_ID}_{SLEEPER_L_NAME}_{IS_IN_BED}",
platform=DOMAIN,
config_entry_id=mock_entry.entry_id,
),
ENTITY_PRESSURE: er.RegistryEntry(
entity_id=ENTITY_PRESSURE,
unique_id=f"{BED_ID}_{SLEEPER_L_NAME}_{PRESSURE}",
platform=DOMAIN,
config_entry_id=mock_entry.entry_id,
),
ENTITY_SLEEP_NUMBER: er.RegistryEntry(
entity_id=ENTITY_SLEEP_NUMBER,
unique_id=f"{BED_ID}_{SLEEPER_L_NAME}_{SLEEP_NUMBER}",
platform=DOMAIN,
config_entry_id=mock_entry.entry_id,
),
},
)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
ent_reg = er.async_get(hass)
sensor_is_in_bed = ent_reg.async_get(ENTITY_IS_IN_BED)
assert sensor_is_in_bed.unique_id == f"{SLEEPER_L_ID}_{IS_IN_BED}"
sensor_pressure = ent_reg.async_get(ENTITY_PRESSURE)
assert sensor_pressure.unique_id == f"{SLEEPER_L_ID}_{PRESSURE}"
sensor_sleep_number = ent_reg.async_get(ENTITY_SLEEP_NUMBER)
assert sensor_sleep_number.unique_id == f"{SLEEPER_L_ID}_{SLEEP_NUMBER}"
|
[
"noreply@github.com"
] |
robert-alfaro.noreply@github.com
|
505ffc46bb8c74cd11dac8ac80d0d705f1fd86f2
|
8cbeff7328c5e315733ca4f76307be407045f178
|
/backend/comments_app/forms.py
|
825467daedaee00183729d306f0a37e73ab3ff85
|
[] |
no_license
|
eflipe/Blog-App-Adv
|
2586ce2817d61e8c16492e8d03bef720f55b8a58
|
b398cb96bf93757af084fc33fb6055380be2029e
|
refs/heads/master
| 2022-11-20T07:40:21.985089
| 2020-07-17T21:34:24
| 2020-07-17T21:34:24
| 277,913,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from django import forms
class CommentForm(forms.Form):
content_type = forms.CharField(widget=forms.HiddenInput)
object_id = forms.IntegerField(widget=forms.HiddenInput)
# parent_id = forms.IntegerField(widget=forms.HiddenInput, required=False)
content = forms.CharField(widget=forms.Textarea)
|
[
"felipecabaleiro@gmail.com"
] |
felipecabaleiro@gmail.com
|
95a80a4842d0bca8dc17195fdb00391489b67b5b
|
3fda3ff2e9334433554b6cf923506f428d9e9366
|
/hipeac/api/views/awards.py
|
6e5e44adc2596d98b2f46e8d59e69f82d93a5a05
|
[
"MIT"
] |
permissive
|
CreativeOthman/hipeac
|
12adb61099886a6719dfccfa5ce26fdec8951bf9
|
2ce98da17cac2c6a87ec88df1b7676db4c200607
|
refs/heads/master
| 2022-07-20T10:06:58.771811
| 2020-05-07T11:39:13
| 2020-05-07T11:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
from django.views.decorators.cache import never_cache
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.viewsets import GenericViewSet
from hipeac.models import Publication, PublicationConference, TechTransferCall, TechTransferApplication
from ..serializers import (
PublicationListSerializer,
PublicationConferenceListSerializer,
TechTransferCallSerializer,
TechTransferApplicationSerializer,
)
class PublicationConferenceViewSet(ListModelMixin, GenericViewSet):
queryset = PublicationConference.objects.all()
pagination_class = None
serializer_class = PublicationConferenceListSerializer
class PaperAwardViewSet(ListModelMixin, GenericViewSet):
pagination_class = None
serializer_class = PublicationListSerializer
def list(self, request, *args, **kwargs):
year = request.query_params.get("year", False)
if not year:
raise PermissionDenied("Please include a `year` query parameter in your request.")
self.queryset = Publication.objects.awarded(year=int(year))
return super().list(request, *args, **kwargs)
class TechTransferViewSet(RetrieveModelMixin, ListModelMixin, GenericViewSet):
pagination_class = None
serializer_class = TechTransferApplicationSerializer
def get_object(self):
return TechTransferCall.objects.active()
@action(detail=False, serializer_class=TechTransferCallSerializer)
@never_cache
def call(self, request, *args, **kwargs):
return RetrieveModelMixin.retrieve(self, request, *args, **kwargs)
def list(self, request, *args, **kwargs):
self.queryset = TechTransferApplication.objects.filter(awarded=True).prefetch_related("call")
return super().list(request, *args, **kwargs)
|
[
"eneko.illarramendi@ugent.be"
] |
eneko.illarramendi@ugent.be
|
0f2723868f232a4c15d651578491256f668f1061
|
87d5b21265c381104de8f45aa67842a4adc880eb
|
/402.2.py
|
54dab3ccdb05d73e26403a3f64631462212ab2f8
|
[] |
no_license
|
MYMSSENDOG/leetcodes
|
ac047fe0d951e0946740cb75103fc94aae967166
|
8a52a417a903a0742034161471a084bc1e494d68
|
refs/heads/master
| 2020-09-23T16:55:08.579319
| 2020-09-03T19:44:26
| 2020-09-03T19:44:26
| 225,543,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
while k:
z = num.find("0")
if z != -1 and z + 1 < k:
k -= z
num = num[z:]
num = num.lstrip("0")
else:
for i in range(len(num) - 1):
if num[i] > num[i + 1]:
num = (num[:i] + num[i+1:]).lstrip("0")
break
else:
num = num[:-1]
k-=1
if not num:
return "0"
return num
sol = Solution()
num = "1020304050"
k = 3
print(sol.removeKdigits(num,k))
|
[
"fhqmtkfkd@naver.com"
] |
fhqmtkfkd@naver.com
|
75e5080dc01075d32c626fad54cd5ce394d32f88
|
7e8c72c099b231078a763ea7da6bba4bd6bac77b
|
/python/misc_ipata/src/ipa4django/views/json.py
|
033b9896dd602912ef01717ff11f20a7a689ccbe
|
[] |
no_license
|
github188/demodemo
|
fd910a340d5c5fbf4c8755580db8ab871759290b
|
96ed049eb398c4c188a688e9c1bc2fe8cd2dc80b
|
refs/heads/master
| 2021-01-12T17:16:36.199708
| 2012-08-15T14:20:51
| 2012-08-15T14:20:51
| 71,537,068
| 1
| 2
| null | 2016-10-21T06:38:22
| 2016-10-21T06:38:22
| null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
from django.core.serializers.json import Serializer as BuitlInJsonSerializer
from django.utils.encoding import smart_unicode
import simplejson
from django.db.models.query import QuerySet
from django.db import models
from StringIO import StringIO
import types
from ipa4django.db.raw_sql import SQLRow
class DjangoSerializer(BuitlInJsonSerializer):
def end_object( self, obj ):
if(self.selected_fields ):
for field_name in self.selected_fields:
if self._current.has_key(field_name):continue
try:
o = obj
for attr in field_name.split("."):
o = getattr(o, attr)
if callable(o): o = o()
field_name = field_name.replace(".", "_")
if type(o) not in [types.ListType, types.DictType]:
self._current[field_name] = smart_unicode(o, strings_only=True)
else:
self._current[field_name] = o
except:
field_name = field_name.replace(".", "_")
self._current[field_name] = None
BuitlInJsonSerializer.end_object(self, obj)
def end_serialization(self):
pass
def getvalue(self):
return self.objects
class MixedSerializer(object):
#set django base.Serializer
internal_use_only = False
def __init__(self):
pass
def serialize(self, object, **options):
self.stream = options.get("stream", StringIO())
self.selected_fields = options.get("fields")
obj = self.object_to_serializable(object)
from django.core.serializers.json import DjangoJSONEncoder
simplejson.dump(obj, self.stream, cls=DjangoJSONEncoder, **options)
return self.getvalue()
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def dict_to_serializable(self, o):
for k, v in o.items():
o[k] = self.object_to_serializable(v)
return o
def list_to_serializable(self, obj):
r = []
for o in obj:
r.append(self.object_to_serializable(o))
return r
def sql_row_to_serializable(self, obj):
o = {}
if not hasattr(obj, '__json__'):
for attr in obj.field_names:
o[attr] = getattr(obj, attr)
else:
o = obj.__json__()
return o
def object_to_serializable(self, o):
if isinstance(o, types.DictType):
return self.dict_to_serializable(o)
elif isinstance(o, types.TupleType):
if len(o) <= 0:
return []
elif isinstance(o[0], QuerySet):
return self.queryset_to_serializable(*o)
elif isinstance(o[0], models.Model):
return self.django_model_to_serializable(*o)
else:
return self.list_to_serializable(o)
elif isinstance(o, QuerySet):
return self.queryset_to_serializable(o)
elif isinstance(o, models.Model):
return self.django_model_to_serializable(o)
elif isinstance(o, types.ListType):
return self.list_to_serializable(o)
elif isinstance(o, SQLRow):
return self.sql_row_to_serializable(o)
elif hasattr(o, '__json__') and callable(o.__json__):
return o.__json__()
return o
def queryset_to_serializable(self, o, args={}, *dummy):
def pre(r, param):
if param.has_key("pre"):
for i in r:
for p in param["pre"]:
o = getattr(i, p)
if callable(o): o()
del param["pre"]
return param
def merge_ext_fields(r, param):
ext_fields = []
if param.has_key("ext_fields"):
ext_fields = param['ext_fields']
del param['ext_fields']
if len(o) <= 0: return param
r = o[0]
fields = not param.has_key("fields") and \
[ f.attname for f in r._meta.local_fields ] or \
list(param['fields'])
fields.extend(ext_fields)
param['fields'] = fields
return param
django_ser = DjangoSerializer()
if args: args = pre(o, args)
if args: args = merge_ext_fields(o, args)
return django_ser.serialize(o, **args)
def django_model_to_serializable(self, o, args={}, *dummy):
r = self.queryset_to_serializable([o, ], args)
return r[0]
Serializer = MixedSerializer
|
[
"DeonWu@b18a5524-d64a-0410-9f42-ad3cd61580fb"
] |
DeonWu@b18a5524-d64a-0410-9f42-ad3cd61580fb
|
52b0ba351f610b254e933851391b0bb6f48a7e7e
|
94b603a26fd0942181d4a3da2f9f830034c74ce0
|
/adventofcode/2021/11/2.py
|
c5b6f2302004f3bb44d647548985c08755f1f847
|
[
"Unlicense"
] |
permissive
|
jan25/code_sorted
|
55336e10bb9ee74610ce1ba62617db979ddf8e26
|
2a5af4f8c95a66ccca3223a261362a17be05728f
|
refs/heads/master
| 2023-01-25T00:50:56.846461
| 2022-12-21T17:40:30
| 2022-12-21T17:40:30
| 34,155,896
| 3
| 0
|
Unlicense
| 2019-02-07T23:07:10
| 2015-04-18T06:54:30
|
C++
|
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
import fileinput
from collections import deque
grid = [list(map(int, l.strip())) for l in fileinput.input()]
n, m = len(grid), len(grid[0])
def neighs(i, j):
for di in range(-1, 2):
for dj in range(-1, 2):
if di == 0 and dj == 0:
continue
ni, nj = i + di, j + dj
if ni >= 0 and nj >= 0 and ni < n and nj < m:
yield ni, nj
def fill(i, j):
q = deque()
q.append((i, j))
grid[i][j] = 0
while q:
i, j = q.popleft()
for ni, nj in neighs(i, j):
if grid[ni][nj] == 0:
continue
grid[ni][nj] += 1
if grid[ni][nj] > 9:
grid[ni][nj] = 0
q.append((ni, nj))
def step():
for row in grid:
for i in range(len(row)):
row[i] += 1
for i, row in enumerate(grid):
for j, val in enumerate(row):
if val > 9:
fill(i, j)
return sum(row.count(0) for row in grid) == n * m
for s in range(1000):
if step():
print(s + 1)
exit(0)
|
[
"abhilashgnan@gmail.com"
] |
abhilashgnan@gmail.com
|
5a1709cca4ed8430186d843c77dd3b355da9a2e5
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/lllcho_CAPTCHA-breaking/CAPTCHA-breaking-master/test_type4.py
|
cdfa7e2c847d4dde613763837b52778d127047c8
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,087
|
py
|
# coding:utf-8
# __author__ = 'lllcho'
# __date__ = '2015/8/4'
import cv2
import cPickle
import numpy as np
import codecs
import h5py
import theano
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
def word_simialr_score(s1, s2):
score = 0
for j in range(len(s1)):
if s1[j] == s2[j]:
score += 1
return score
def words_simmilar_score(word, words):
word_score = {}
for Word in words:
ws = word_simialr_score(word, Word)
if ws not in word_score.keys():
word_score[ws] = [Word]
else:
word_score[ws].append(Word)
return word_score
np.random.seed(123)
model_path = './model/type4_model.d5'
chars = cPickle.load(open('model/chars_type4.pkl', 'rb'))
words = cPickle.load(open('model/words_type4.pkl', 'rb'))
chars.append('A')
f = h5py.File('./model/type4_train_mean_std.h5', 'r')
x_mean = f['x_mean'][:]
x_std = f['x_std'][:][0]
f.close()
model = Sequential()
model.add(Convolution2D(32, 3, 4, 4, border_mode='full', activation='relu'))
model.add(Convolution2D(32, 32, 4, 4, activation='relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 4, 4, border_mode='full', activation='relu'))
model.add(Convolution2D(64, 64, 4, 4, activation='relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64 * 8 * 8, 512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, 1250, activation='softmax'))
model.load_weights(model_path)
model.compile(loss='categorical_crossentropy', optimizer='adagrad')
get_predict_score = theano.function([model.layers[0].input],
model.layers[-1].get_output(train=False),
allow_input_downcast=True)
comp = 'type4_test1'
img_dir = './image/' + comp + '/'
fcsv = codecs.open("result/" + comp + '.csv', 'w', 'utf-8')
# for nb_img in range(1, 20001):
# name=comp+'_'+str(nb_img)+'.png'
import os
names = os.listdir(img_dir)
for name in names:
print name
imgname = img_dir + name
img = cv2.imread(imgname, cv2.IMREAD_COLOR)
im = 255 - img[4:-4, :, :]
t = im.shape[1] / 4.0
dd = 5
bb = np.zeros((im.shape[0], dd, 3), dtype=np.uint8)
im1 = im[:, 0:np.floor(t) + dd]
im2 = im[:, np.floor(t) - dd:np.floor(2 * t) + dd]
im3 = im[:, np.floor(2 * t) - dd:np.floor(3 * t) + dd]
im4 = im[:, np.floor(3 * t) - dd:]
imgs = np.zeros((4, 3, 32, 32))
imgs[0, :] = cv2.resize(np.concatenate((bb, im1), axis=1), (32, 32)).transpose()
imgs[1, :] = cv2.resize(im2, (32, 32)).transpose()
imgs[2, :] = cv2.resize(im3, (32, 32)).transpose()
imgs[3, :] = cv2.resize(np.concatenate((im4, bb), axis=1), (32, 32)).transpose()
imgs.astype(np.float32)
imgs = imgs - x_mean
imgs = imgs / x_std
classes = model.predict_classes(imgs, verbose=0)
model_predict_score = get_predict_score(imgs)
result = []
for c in classes:
result.append(chars[c])
word = ''.join(result)
old_word = word
if word not in words:
word_score = words_simmilar_score(word, words)
max_score = max(word_score.keys())
if max_score > 0:
candidate_words = word_score[max_score]
predict_similar_score = {}
for candidate_word in candidate_words:
diff_chars = {}
for j in range(len(candidate_word)):
if old_word[j] != candidate_word[j]:
diff_chars[j] = candidate_word[j]
diff_chars_similar_score = 0
for key, iterm in diff_chars.items():
diff_chars_similar_score += model_predict_score[key, chars.index(iterm)]
predict_similar_score[candidate_word] = diff_chars_similar_score
word = max(predict_similar_score.items(), key=lambda x: x[1])[0]
print word
fcsv.write(name + ',' + word + '\n')
fcsv.close()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
1d0c897dba3bb6a7161f8c54cbce22b16c320822
|
8b6cd902deb20812fba07f1bd51a4460d22adc03
|
/back-end/.history/djreact/djreact/settings_20191221114946.py
|
d73de7bc2359fc8e027509c0dad39f0a615e9d53
|
[] |
no_license
|
vishaldenzil/Django-react-
|
f3a49d141e0b6882685b7eaa4dc43c84857f335a
|
35b6d41f6dacb3bddcf7858aa4dc0d2fe039ff98
|
refs/heads/master
| 2022-11-08T09:27:02.938053
| 2020-05-29T04:53:52
| 2020-05-29T04:53:52
| 267,768,028
| 0
| 1
| null | 2022-10-15T14:08:30
| 2020-05-29T04:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,999
|
py
|
"""
Django settings for djreact project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n=0@g2ye2)0n0igcw(-h$b^+4g5_bby2s1q!%dnyr*y*r7@5aq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'corsheaders',
'rest_auth.registration',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'articles',
'users'
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'djreact.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CORS_ORIGIN_ALLOW_ALL = True
WSGI_APPLICATION = 'djreact.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': "django.db.backends.postgresql_psycopg2",
'NAME': "testData",
'USER': "postgres",
'PASSWORD': "root",
'HOST': "localhost",
}
}
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_REQUIRED = False
AUTH_USER_MODEL = 'users.User'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
'rest_framework.permissions.AllowAny'
]
}
|
[
"vishal.denzil@ezedox.com"
] |
vishal.denzil@ezedox.com
|
077af677258eb500890b365e35a67a1aac4d0416
|
6e964d46b8fab9bccbd199ea7ade41297282b0a7
|
/test/PySrc/tests/test_code_tracer_loops.py
|
b466d5a43d643b812be0eb2fb5fa47d94acf4e24
|
[
"MIT"
] |
permissive
|
donkirkby/live-py-plugin
|
1a4cb87a796983245094d7c97c3e72f3cea0c540
|
165b447cc1288c94f24f1e660e0c45a6ef476826
|
refs/heads/master
| 2023-08-29T15:14:37.585327
| 2023-07-23T21:12:19
| 2023-07-23T21:12:19
| 4,332,096
| 257
| 59
|
MIT
| 2023-09-09T18:18:40
| 2012-05-15T04:41:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,499
|
py
|
from space_tracer.main import TraceRunner
def test_loop():
code = """\
i = 1
for j in range(3):
i += j
"""
expected_report = """\
i = 1
j = 0 | j = 1 | j = 2
i = 1 | i = 2 | i = 4"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_loop_target_list():
code = """\
for a,b in [(1,2)]:
c = a + b
"""
expected_report = """\
a = 1 | b = 2
c = 3"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_loop_starred_target_list():
code = """\
words = ['foo', 'bar']
for (head, *tail) in words:
print(head, tail)
"""
expected_report = """\
words = ['foo', 'bar']
head = 'f' | tail = ['o', 'o'] | head = 'b' | tail = ['a', 'r']
print("f ['o', 'o']") | print("b ['a', 'r']")"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_loop_target_list_attribute():
code = """\
class Foo:
def do_foo(self, x):
for self.i in range(x):
print(self.i)
foo = Foo()
foo.do_foo(3)
"""
expected_report = """\
x = 3
self.i = 0 | self.i = 1 | self.i = 2
print('0') | print('1') | print('2')
"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_loop_target_list_attribute_complex():
code = """\
class Foo:
def do_foo(self, x):
self.state = [None]
for self.state[0] in range(x):
print(self.state)
foo = Foo()
foo.do_foo(3)
"""
expected_report = """\
x = 3
self.state = [None]
| |
print('[0]') | print('[1]') | print('[2]')
"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_nested_loop():
code = """\
n = 0
for i in range(2):
n += i
for j in range(3):
n += j
"""
expected_report = """\
n = 0
i = 0 | i = 1
n = 0 | n = 4
j = 0 | j = 1 | j = 2 | j = 0 | j = 1 | j = 2
n = 0 | n = 1 | n = 3 | n = 4 | n = 5 | n = 7"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_for_else():
code = """\
i = 1
for j in range(3):
i += j
else:
i *= 10
"""
expected_report = """\
i = 1
j = 0 | j = 1 | j = 2
i = 1 | i = 2 | i = 4
i = 40"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_while_else():
code = """\
i = 0
while i < 2:
i += 1
else:
i *= 10
"""
expected_report = """\
i = 0
|
i = 1 | i = 2
i = 20"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_loop_conditional():
code = """\
for i in range(3):
if i == 1:
c = 5
c = 2
"""
expected_report = """\
i = 0 | i = 1 | i = 2
| |
| c = 5 |
c = 2"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_infinite_loop_by_count():
code = """\
n = 0
while True:
n += 1
"""
expected_report = """\
n = 0
| |
n = 1 | n = 2 | RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.message_limit = 8
report = tracer.trace_code(code)
assert report == expected_report
def test_infinite_loop_by_width():
code = """\
n = 0
while True:
n += 1
"""
expected_report = """\
n = 0
| |
n = 1 | n = 2 | RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.max_width = 20
report = tracer.trace_code(code)
assert report == expected_report
def test_infinite_loop_pass():
code = """\
while True:
pass
"""
expected_report = """\
RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.message_limit = 3
report = tracer.trace_code(code)
assert report in (expected_report + '\n', '\n' + expected_report)
def test_infinite_loop_pass_in_function():
code = """\
def foo():
while True:
pass
foo()
"""
expected_report1 = """\
RuntimeError: live coding message limit exceeded
RuntimeError: live coding message limit exceeded"""
expected_report2 = """\
RuntimeError: live coding message limit exceeded
RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.message_limit = 3
report = tracer.trace_code(code)
assert report in (expected_report1, expected_report2)
|
[
"donkirkby@gmail.com"
] |
donkirkby@gmail.com
|
ef3c7f577c74802ed75c1fe43a00148d5752b617
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/isbn/211.py
|
f12f4415f953aa0120695d76f38edd57b5552c69
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446
| 2021-02-28T23:28:36
| 2021-02-28T23:28:36
| 240,883,220
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
def som_reeks(reeks):
cijfers = [int(teken) for teken in reeks]
return sum(cijfers)
def isISBN_13(isbn):
if not type(isbn) is str:
return False
if len(isbn) != 13:
return False
if not isbn.isdigit():
return False
if isbn.find('978') != 0 and isbn.find('979') != 0:
return False
som_oneven = som_reeks(isbn[:12:2])
som_even = som_reeks(isbn[1::2])
controle = (10 - (som_oneven + 3 * som_even)%10) %10
return int(isbn[12]) == controle
def overzicht(codes):
land_codes = {"0" : "Engelstalige landen", "1" : "Engelstalige landen", "2" : "Franstalige landen", \
"3": "Duitstalige landen", "4": "Japan", "5": "Russischtalige landen", "7": "China", "6": "Overige landen", \
"8": "Overige landen", "9": "Overige landen"}
foutief = "Fouten"
overzicht = {}
for landtype in land_codes.values():
overzicht[landtype] = 0
overzicht[foutief] = 0
for code in codes:
if isISBN_13(code):
overzicht[land_codes[code[3]]] += 1
else:
overzicht[foutief] += 1
for landtype, aantal in overzicht.items():
print("{}: {}".format(landtype, aantal))
|
[
"bertverstraete22@gmail.com"
] |
bertverstraete22@gmail.com
|
c636f690bc9e4514b26c33b7ed2bd921baee0ad3
|
aa28417be8935d6fa369fcb526174f9e1e30479a
|
/爬虫实战/动态渲染/14.选项卡.py
|
f4109af394e44c7c95bd96a034c9f7244d153be4
|
[] |
no_license
|
YYN117/Demo
|
d6fca95ed8a1a433ef06f1f3fc2e768414e863cb
|
40690040a7422fd5d8f03a0d68f20f1be5d4a836
|
refs/heads/master
| 2020-04-14T12:29:30.129709
| 2019-01-02T13:31:10
| 2019-01-02T13:31:10
| 163,841,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from selenium import webdriver
import time
browser = webdriver.Chrome()
browser.get('https://www.baidu.com')
browser.execute_script('window.open()')
print(browser.window_handles)
browser.switch_to_window(browser.window_handles[1])
browser.get('https://www.taobao.com')
time.sleep(2)
browser.switch_to_window(browser.window_handles[0])
browser.get('https://python.org')
|
[
"41251061+YYN117@users.noreply.github.com"
] |
41251061+YYN117@users.noreply.github.com
|
5d4c1a6a2946425a8bb1b574e9a067700ddd6bac
|
b8ec95cba7c239d8a72e0ce8a88ddb37bb938770
|
/learning/MB_lambda_IAMB_Lner.py
|
0b4858fda13a0da8311cd1324519eba9af101a04
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
artiste-qb-net/quantum-fog
|
a7b95a5177594138aaf909c23b69053bc685672a
|
5b4a3055ea14c2ee9c80c339f759fe2b9c8c51e2
|
refs/heads/master
| 2023-02-19T04:41:35.908393
| 2023-02-13T21:37:05
| 2023-02-13T21:37:05
| 47,056,346
| 95
| 34
|
NOASSERTION
| 2021-03-10T00:52:36
| 2015-11-29T08:49:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,542
|
py
|
from learning.MB_BasedLner import *
class MB_lambda_IAMB_Lner(MB_BasedLner):
"""
The MB_lambda_IAMB_Lner (lambda Incremental Association Markov Blanket
Learner) is a subclass of MB_BasedLner. See docstring for MB_BasedLner
for more info about this type of algo.
lambda refers to the fact tha it uses an extra parameter lambda between
zero and one.
See Ref. 1 below for pseudo code on which this class is based.
References
----------
1. An Improved IAMB Algorithm for Markov Blanket Discovery, by Yishi
Zhang, Zigang Zhang, Kaijun Liu, and Gangyi Qian (JCP 2010 Vol.5(11))
Attributes
----------
lam : float
extra parameter between 0 and 1. The closer it is to 1, the fewer
elements are added to MB
vtx_to_MB : dict[str, list[str]]
A dictionary mapping each vertex to a list of the vertices in its
Markov Blanket. (The MB of a node consists of its parents, children
and children's parents, aka spouses).
"""
def __init__(self, states_df, alpha, verbose=False,
vtx_to_states=None, lam=.5, learn_later=False):
"""
Constructor
Parameters
----------
states_df : pandas.DataFrame
alpha : float
verbose : bool
vtx_to_states : dict[str, list[str]]
A dictionary mapping each node name to a list of its state names.
This information will be stored in self.bnet. If
vtx_to_states=None, constructor will learn vtx_to_states
from states_df
lam : float
learn_later : bool
False if you want to call the function learn_struc() inside the
constructor. True if not.
Returns
-------
"""
self.lam = lam
MB_BasedLner.__init__(self, states_df, alpha, verbose,
vtx_to_states, learn_later)
def find_MB(self, vtx=None):
"""
This function finds the MB of vtx and stores it inside vtx_to_MB[
vtx]. If vtx=None, then it will find the MB of all the vertices of
the graph.
Parameters
----------
vtx : str
Returns
-------
None
"""
if self.verbose:
print('alpha=', self.alpha)
print('lambda=', self.lam)
vertices = self.states_df.columns
if vtx is None:
tar_list = vertices
else:
tar_list = [vtx]
self.vtx_to_MB = {}
def ci__(a, b): # H(a | b)
return DataEntropy.cond_info(self.states_df, a, b)
def MB(a):
return self.vtx_to_MB[a]
for tar in tar_list:
self.vtx_to_MB[tar] = []
# growing phase
if self.verbose:
print('\n****begin growing phase')
growing = True
while growing:
growing = False
ht_y1_min, hty1_min, y1_min = None, None, None
ht_y2_min, hty2_min, y2_min = None, None, None
ht = ci__([tar], MB(tar)) # H(tar | MB(tar))
y1_set = (set(vertices) - {tar}) - set(MB(tar))
for y1 in y1_set:
# H(tar | MB(tar), y1)
ht_y1 = ci__([tar], list(set(MB(tar)) | {y1}))
# H( tar: y1 |MB(tar))
hty1 = ht - ht_y1
if ht_y1_min is None or ht_y1 < ht_y1_min:
ht_y1_min = ht_y1
hty1_min = hty1
y1_min = y1
y2_set = y1_set - {y1_min}
for y2 in y2_set:
# H(tar | MB(tar), y2)
ht_y2 = ci__([tar], list(set(MB(tar)) | {y2}))
# H( tar: y2 |MB(tar))
hty2 = ht - ht_y2
if ht_y2_min is None or ht_y2 < ht_y2_min:
ht_y2_min = ht_y2
hty2_min = hty2
y2_min = y2
if y1_min is not None and hty1_min > self.alpha:
if y2_min is not None and hty2_min > self.alpha and\
ht_y2_min - self.lam*ht_y1_min < (1-self.lam)*ht:
self.vtx_to_MB[tar].append(y1_min)
self.vtx_to_MB[tar].append(y2_min)
growing = True
elif y1_min is not None:
self.vtx_to_MB[tar].append(y1_min)
growing = True
if self.verbose:
print('target, MB(tar) aft-growing, bef-shrinking:')
print(tar, self.vtx_to_MB[tar])
print('end growing phase')
print('****begin shrinking phase')
# shrinking phase
shrinking = True
while shrinking:
shrinking = False
for y in MB(tar):
cmi = DataEntropy.cond_mut_info(self.states_df,
[y], [tar], list(set(MB(tar)) - {y}))
if cmi < self.alpha:
self.vtx_to_MB[tar].remove(y)
shrinking = True
if self.verbose:
print('target, MB(tar) aft-shrinking:')
print(tar, self.vtx_to_MB[tar])
print('end shrinking phase')
if __name__ == "__main__":
def main():
MB_BasedLner.MB_lner_test(MB_lambda_IAMB_Lner, verbose=True)
main()
|
[
"tucci@ar-tiste.com"
] |
tucci@ar-tiste.com
|
33858cfb2091be346894cf7ae45113989b2a2dd4
|
da29f1f5b4459fbfec968bb694bedb9586f87b14
|
/new_algs/Sequence+algorithms/Selection+algorithm/pull_results2.py
|
1e464fe8c9b22f81a07ab0908c52d802e137226d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
coolsnake/JupyterNotebook
|
547806a45a663f090f313dc3e70f779ad9b213c0
|
20d8df6172906337f81583dabb841d66b8f31857
|
refs/heads/master
| 2023-01-13T18:55:38.615312
| 2020-11-17T22:55:12
| 2020-11-17T22:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
"""
Pull results for genomic selection
"""
import os, sys
import pandas as pd
import numpy as np
import re
wkdir = os.path.abspath(sys.argv[0])[:-16]
print(wkdir)
use = ['02_PC', '03_rrBLUP', '04_BayesB','05_BayesA','06_BRR',"07_BL"]
index = []
accuracy = []
stdev = []
sterr = []
notes = []
for j in os.listdir(wkdir):
if j.startswith("."):
pass
#elif re.search('[a-z].*_.*_.*', j): ## Format of GS dataset directories
elif re.search('rice_.*_.*', j):
print("Pulling scores for %s" % j)
for i in os.listdir(wkdir + '/' + j):
if i in use:
for k in os.listdir(wkdir + '/' + j +'/' + i):
if k.startswith('trait_'):
wkdir2 = wkdir + j +'/' + i + '/' + k + '/'
method = i[3:]
# Make 3 level index: ID, Trait, Method
index.append((j,k[6:],method))
yhat_all = pd.read_csv(wkdir2 + 'output/cv_1.csv', header=0, names = ['y', 'cv_1'])
for m in os.listdir(wkdir2 + 'output/'):
if m == 'cv_1.csv':
pass
elif m.startswith('cv_'):
number = m.split('.')[0][3:]
temp = pd.read_csv(wkdir2 + 'output/' + m, header=0, names = ['y', 'cv_' + number])
yhat_all = pd.concat([yhat_all, temp['cv_' + number]], axis = 1)
print(yhat_all)
yhat_all['yhat_mean'] = (yhat_all.filter(like='cv_')).mean(axis=1)
yhat_all['yhat_sd'] = (yhat_all.filter(like='cv_')).std(axis=1)
yhat_all.to_csv(wkdir2 + 'output/yhat_all.csv', sep=',', index=False)
quit()
elif i == '08_ML':
wkdir2 = wkdir + j +'/' + i + '/'
for l in open(wkdir2 + 'RESULTS_reg.txt').readlines():
if l.startswith('DateTime'):
pass
else:
line = l.strip().split('\t')
k = line[3]
method = line[4]
# Make 3 level index: ID, Trait, Method
index.append((j,k,method))
# Calculate acc and stdev from each run (100 cv mixes)
accuracy = np.append(accuracy, line[18])
stdev = np.append(stdev, line[19])
sterr = np.append(sterr, line[20])
notes = np.append(notes, 'na')
elif i == '09_MLP':
method = 'MLP'
wkdir3 = wkdir + j +'/' + i + '/'
mlp = pd.read_table(wkdir3 + 'RESULTS.txt', sep='\t', header=0)
mlp_mean = mlp.groupby(['Trait','Archit','ActFun','LearnRate','Beta']).agg({'Accuracy': ['mean','std']}).reset_index()
mlp_mean.columns = list(map(''.join, mlp_mean.columns.values))
mlp_mean = mlp_mean.sort_values('Accuracymean', ascending=False).drop_duplicates(['Trait'])
for i, row in mlp_mean.iterrows():
index.append((j,row['Trait'],method))
accuracy = np.append(accuracy, row['Accuracymean'])
stdev = np.append(stdev, row['Accuracystd'])
sterr = np.append(sterr, 'na')
notes = np.append(notes, row['ActFun'] + '_' + row['Archit'] + '_' + str(row['LearnRate']) + '_' + str(row['Beta']))
pd_index = pd.MultiIndex.from_tuples(index, names = ['ID','Trait','Method'])
data_array = np.column_stack((np.array(accuracy), np.array(stdev), np.array(sterr), np.array(notes)))
df_acc = pd.DataFrame(data_array, index = pd_index, columns = ('Ac_mean', 'Ac_sd', 'Ac_se', 'Notes'))
print(df_acc.head(20))
df_acc.to_csv('RESULTS.csv', sep=',')
|
[
"chenqh@uci.edu"
] |
chenqh@uci.edu
|
b3d7e17241a103dbbfcbecd1e99ab102a74ebe22
|
3001520af0cb70fb658b081d87181ad62a8354c6
|
/crawler/dedupdata.py
|
940356bed8c2639e25b146b45fe048a7d7a24f9f
|
[] |
no_license
|
praveen97uma/MalwareWebMap
|
508a5c7a09052e74b79b4e7668cade869131c6ef
|
d0e00b52e8f71e23efd4bc262b1065ca0460c922
|
refs/heads/master
| 2020-05-17T00:13:12.186252
| 2014-01-27T14:04:38
| 2014-01-27T14:04:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
import utils
import db_utils
EXPORT_EDGES_FILE = "graph.edges.csv"
EXPORT_NODES_FILE = "graph.nodes.csv"
def export_graph_data():
data_file = open(EXPORT_EDGES_FILE, "w")
db = db_utils.getDBInstance()
nodes = set([])
data_file.write("Source;Target\n")
edges = set([])
pages = db.webpages.find()
for page in pages:
ilinks = page["incoming_links"]
for link in ilinks:
if link.startswith("javascript"):
continue
if page["url"].startswith("javascript"):
continue
link = link.replace(",", "")
nodes.add(utils.domainOf(link))
page["url"] = page["url"].replace(",", "")
nodes.add(utils.domainOf(page["url"]))
edges.add((utils.domainOf(link), utils.domainOf(page["url"])))
#data_file.write(("%s\t%s\n"%(link, page["url"])))
nodes_file = open(EXPORT_NODES_FILE, "w")
nodes_file.write("Id;Label\n")
for node in nodes:
nodes_file.write("%s;%s\n"%(node, utils.domainOf(node)))
nodes_file.close()
for edge in edges:
data_file.write("%s;%s\n"%(edge[0], edge[1]))
data_file.close()
if __name__ == '__main__':
export_graph_data()
|
[
"praveen97uma@gmail.com"
] |
praveen97uma@gmail.com
|
d2e01a8372d6b5b3369a05a5af23b34c574bac70
|
b96ed10d6247e22d4fa1d28bc3314bc319d3109c
|
/LessonSample/mysite/polls/views.py
|
8520924cec34b22ae4e3758c28bd575f78e22b7f
|
[] |
no_license
|
13555785106/PythonPPT-01
|
ac1b22b9b1851f2b3ea6e4ab0a100e5f6896ee8c
|
40e5883f248cb342f3a7fc7ad12ba02ebde4c619
|
refs/heads/master
| 2020-04-26T16:49:59.675964
| 2019-03-04T07:16:21
| 2019-03-04T07:16:21
| 157,095,747
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,810
|
py
|
# -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "你还没有做出一个选择。",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
|
[
"312655424@qq.com"
] |
312655424@qq.com
|
0e4fa25e30758201ddcc318774962240fd3ed97a
|
84abce44bd0278fa99e9556168290675f399834c
|
/EcalAlCaRecoProducers/config/reRecoTags/Cal_Nov2015_newGT_0T_v5.py
|
92aba913315372ce6b1b26f8e76f775b6c3957e6
|
[] |
no_license
|
ECALELFS/ECALELF
|
7c304c6b544b0f22a4b62cf942f47fa8b58abef0
|
62a046cdf59badfcb6281a72923a0f38fd55e183
|
refs/heads/master
| 2021-01-23T13:36:31.574985
| 2017-06-22T12:26:28
| 2017-06-22T12:26:28
| 10,385,620
| 1
| 9
| null | 2017-06-30T12:59:05
| 2013-05-30T15:18:55
|
C++
|
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBSetup_cfi import *
#candidate used for 0T rereco without updating ECAL conditions
# + scaled IC with Bon/Boff corrections and scaled ADCtoGeV
RerecoGlobalTag = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
globaltag = cms.string('74X_dataRun2_2015EOY_Candidate_2016_01_17_21_50_54'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("EcalIntercalibConstantsRcd"),
tag = cms.string("EcalIntercalibConstants_Run1_Run2_V03_offline"),
#connect = cms.untracked.string("sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_ECALCALIB/RunII-IC/Cal_Nov2015/combinations/tags/db/EcalIntercalibConstants_2015_Boff_EEonly.db"),
),
cms.PSet(record = cms.string("EcalADCToGeVConstantRcd"),
tag = cms.string("EcalADCToGeVConstant_Run1_Run2_V03_offline"),
#connect = cms.untracked.string("sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_ECALCALIB/RunII-IC/Cal_Nov2015/ADCtoGeV/tags/db/EcalADCToGeVConstant_2015_Boff_EEonly.db"),
),
)
)
|
[
"shervin.nourbakhsh@cern.ch"
] |
shervin.nourbakhsh@cern.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.