blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6eeced6d1506a1def659d8582180f495fff68a7f
|
50402cc4388dfee3a9dbe9e121ef217759ebdba8
|
/etc/MOPSO-ZDT2/ZDT2-1.py
|
d0f2faf6d992bb8b09ed659299c095a99a98486a
|
[] |
no_license
|
dqyi11/SVNBackup
|
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
|
9ad38e38453ef8539011cf4d9a9c0a363e668759
|
refs/heads/master
| 2020-03-26T12:15:01.155873
| 2015-12-10T01:11:36
| 2015-12-10T01:11:36
| 144,883,382
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
'''
Created on Jan 26, 2014
@author: daqing_yi
'''
if __name__ == '__main__':
from PerformanceAnalyzer import *;
import sys;
trial_time = 30;
figFolder = sys.path[0] + "\\zdt2";
caseName = "ZDT2";
fileList1 = [];
fileList2 = [];
fileList3 = [];
fileList4 = [];
for tt in range(trial_time):
filename1 = "ZDT2-"+str(tt)+"--Div.txt";
filename2 = "ZDT2-"+str(tt)+"--AD.txt";
filename3 = "ZDT2-"+str(tt)+"--Spread.txt";
filename4 = "ZDT2-"+str(tt)+"--Efficiency.txt";
fileList1.append(filename1);
fileList2.append(filename2);
fileList3.append(filename3);
fileList4.append(filename4);
analyzer1 = PerformanceAnalyzer(fileList1, figFolder, "Diversity", 10);
analyzer1.genData();
analyzer1.plot(caseName);
analyzer1.dump(caseName);
analyzer2 = PerformanceAnalyzer(fileList2, figFolder, "Distance", 10);
analyzer2.genData();
analyzer2.plot(caseName);
analyzer2.dump(caseName);
analyzer3 = PerformanceAnalyzer(fileList3, figFolder, "Spread", 10);
analyzer3.genData();
analyzer3.plot(caseName);
analyzer3.dump(caseName);
analyzer4 = PerformanceAnalyzer(fileList4, figFolder, "Efficiency", 10);
analyzer4.genData();
analyzer4.plot(caseName);
analyzer4.dump(caseName);
|
[
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] |
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
|
1511968638f2441910615d9b97b2c2629ea64078
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/product_bidding_category_constant.py
|
6aacc16b169b40875e5f6b751c1c07d2a833a97f
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,334
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import product_bidding_category_level
from google.ads.googleads.v6.enums.types import product_bidding_category_status
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'ProductBiddingCategoryConstant',
},
)
class ProductBiddingCategoryConstant(proto.Message):
r"""A Product Bidding Category.
Attributes:
resource_name (str):
Output only. The resource name of the product bidding
category. Product bidding category resource names have the
form:
``productBiddingCategoryConstants/{country_code}~{level}~{id}``
id (int):
Output only. ID of the product bidding category.
This ID is equivalent to the google_product_category ID as
described in this article:
https://support.google.com/merchants/answer/6324436.
country_code (str):
Output only. Two-letter upper-case country
code of the product bidding category.
product_bidding_category_constant_parent (str):
Output only. Resource name of the parent
product bidding category.
level (google.ads.googleads.v6.enums.types.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel):
Output only. Level of the product bidding
category.
status (google.ads.googleads.v6.enums.types.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus):
Output only. Status of the product bidding
category.
language_code (str):
Output only. Language code of the product
bidding category.
localized_name (str):
Output only. Display value of the product bidding category
localized according to language_code.
"""
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.INT64, number=10, optional=True)
country_code = proto.Field(proto.STRING, number=11, optional=True)
product_bidding_category_constant_parent = proto.Field(proto.STRING, number=12, optional=True)
level = proto.Field(proto.ENUM, number=5,
enum=product_bidding_category_level.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel,
)
status = proto.Field(proto.ENUM, number=6,
enum=product_bidding_category_status.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus,
)
language_code = proto.Field(proto.STRING, number=13, optional=True)
localized_name = proto.Field(proto.STRING, number=14, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
37a4bed3bf5ad368c0622bb623e70c8852cd6ba3
|
c0239d75a8199ec84ad683f945c21785c1b59386
|
/dingtalk/api/rest/CorpDingTaskCreateRequest.py
|
ebe77db44bea52c850f1888fb9ce57aede6aae7f
|
[] |
no_license
|
luss613/oauth_dingtalk
|
9f253a75ce914c577dbabfb84e97fd883e80e04b
|
1e2554642d2b16c642a031670d08efa4a74e8252
|
refs/heads/master
| 2023-04-23T01:16:33.450821
| 2020-06-18T08:22:57
| 2020-06-18T08:22:57
| 264,966,287
| 1
| 1
| null | 2020-06-18T08:31:24
| 2020-05-18T14:33:25
|
Python
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
'''
Created by auto_sdk on 2018.07.25
'''
from dingtalk.api.base import RestApi
class CorpDingTaskCreateRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.task_send_v_o = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.corp.ding.task.create'
|
[
"paul.lu@belstar.com.cn"
] |
paul.lu@belstar.com.cn
|
2151cceac149e0509db788b0da44d68c4d1cd4cb
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/Pseudo_Finder.py
|
2d5054ccbc1b1928f339f8fd026680b8d0102af6
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,854
|
py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2015/10/19
"""
from lpp import *
import os
from optparse import OptionParser
def check_path( path ):
if not os.path.exists(path):
os.makedirs( path )
return os.path.abspath(path)+'/'
def GBLASTA( protein,assemblyresult,output ):
#os.system("""makeblastdb -in %s -title Assem -parse_seqids -out Assem -dbtype nucl"""%(assemblyresult))
COMMAND = open("gblasta_run.bat",'w')
RAW = fasta_check(open(protein,'rU'))
i=0
for t,s in RAW:
i+=1
COMMAND.write("""
genblast -P blast -q $input -t %s -o $output
"""%(assemblyresult))
os.system("""
Genblast_Run.py -i %s -s %s -c %s -o %s
"""%(
protein,COMMAND.name, i,output
)
)
def ParseGblasta(gbaresult,genewiseruncommand):
COMMAND = open(genewiseruncommand,'w')
cache_path = check_path("CACHE/")
i=0
data_cache_hash = {}
GBA = block_reading(open(gbaresult,'rU'), re.escape("//******************END*******************//") )
i=0
for e_b in GBA:
i+=1
k=0
gb_block = re.split("\n\n+", e_b)
if "for query:" not in e_b:
continue
proteinid = re.search("for query\:\s+(\S+)", e_b).group(1)
for align in gb_block[1:]:
if "gene cover" not in align:
continue
aligndata = re.search("cover\:\d+\((\S+)\%\)\|score:([^\|]+)", align)
perc = float(aligndata.group(1))
score = float(aligndata.group(2))
if perc >=80:
i+=1
if i not in data_cache_hash:
PRO= open(cache_path+'%s.pep'%(i),'w')
PRO.write(proteinseqHash[proteinid])
data_cache_hash[i] = [PRO.name]
k+=1
NUC = open(cache_path+'%s_%s.nuc'%(i,k),'w')
align_detail = align.split("\n")[0]
align_detail_list = align_detail.split("|")
subject_detail = align_detail_list[1]
scaffold_name = subject_detail.split(":")[0]
direct = align_detail_list[2]
scaffoldStart,scaffoldEND = subject_detail.split(":")[1].split("..")
scaffoldStart=int(scaffoldStart)
scaffoldEND = int(scaffoldEND)
if scaffoldStart<10000:
scaffoldStart = 0
else:
scaffoldStart =scaffoldStart -10000
scaffoldEND = scaffoldEND+10000
NUC.write(">"+scaffold_name+"__%s\n"%(scaffoldStart)+assemblyseqHash[scaffold_name][scaffoldStart:scaffoldEND]+'\n')
commandline = """Genewise_Psuedeo.py -p %s -n %s -o %s.result.gff"""%(PRO.name,NUC.name,i)
if direct =="-":
commandline += " -d"
COMMAND.write(commandline+'\n')
COMMAND.close()
os.system( "cat %s | parallel -j 64"%(COMMAND.name) )
os.system( "cat *.result.gff > %s"%(output) )
os.system(" rm *.result.gff")
#os.system("cat %s| parallel -j %s >genewise.out")
if __name__=='__main__':
usage = '''usage: python2.7 %prog [options] Kmer
Kmer is a list of K value you want,e.g [ 1, 2, 3, 4 ]'''
parser = OptionParser(usage =usage )
parser.add_option("-c", "--CPU", action="store",
dest="cpu",
type='int',
default = 60,
help="CPU number for each thread")
parser.add_option("-p", "--pro", action="store",
dest="protein",
help="protein sequence!!")
parser.add_option("-a", "--assembly", action="store",
dest="assembly",
help="Assemblied Genome!!")
parser.add_option("-o", "--out", action="store",
dest="output",
default = 'genewise.out',
help="The output file you want!!")
(options, args) = parser.parse_args()
cpu = options.cpu
protein = options.protein
assembly = options.assembly
output = options.output
assemblyseqHash = {}
for t,s in fasta_check(open(assembly,'rU')):
t = t.split()[0][1:]
s = re.sub("\s+",'',s)
assemblyseqHash[t]=s
proteinseqHash = {}
for t,s in fasta_check(open(protein,'rU')):
proteinseqHash[t.split()[0][1:]] = t+s
GBLASTA(protein, assembly,"geneblasta.out")
ParseGblasta("geneblasta.out", "genewise.command")
os.remove("genewise.command")
os.system("rm CACHE -rf")
os.system("rm cache -rf")
os.system( "rm *.xml")
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
207c707157fd441286ecf9952084a3c11def6be1
|
9c8fdfa389eaaf2df4c8ba0e3072d94671b5a622
|
/0163. Missing Ranges.py
|
dbf13be4a24913568795bb380bbbac50fd487f69
|
[] |
no_license
|
aidardarmesh/leetcode2
|
41b64695afa850f9cc7847158abb6f2e8dc9abcd
|
4cf03307c5caeccaa87ccce249322bd02397f489
|
refs/heads/master
| 2023-02-27T11:22:09.803298
| 2021-02-07T06:47:35
| 2021-02-07T06:47:35
| 264,491,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from typing import *
class Solution:
def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:
res = []
nums = [lower-1] + nums + [upper+1]
for i in range(len(nums)-1):
delta = nums[i+1] - nums[i]
if delta == 2:
res.append(str(nums[i]+1))
elif delta > 2:
res.append(str(nums[i]+1) + '->' + str(nums[i+1]-1))
return res
|
[
"darmesh.aidar@gmail.com"
] |
darmesh.aidar@gmail.com
|
06a768b10284ec7d0ca364d50ef7abfd9a2060ff
|
358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e
|
/fonts/wonder16_8x16.py
|
ff96b7c5170caead9f8c94e725a350e50d913b60
|
[
"MIT"
] |
permissive
|
ccccmagicboy/st7735_mpy
|
d2de0046abd81978d5176dace45a40758377af82
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
refs/heads/master
| 2022-08-28T23:18:04.353733
| 2020-05-28T04:19:21
| 2020-05-28T04:19:21
| 254,869,035
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,756
|
py
|
"""converted from ..\fonts\WONDER16__8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x36\x36\x36\x36\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\x6c\xfe\x6c\x6c\xfe\x6c\x6c\x6c\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x7c\xc6\xc0\x78\x3c\x06\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x62\x66\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x38\x30\x76\x7e\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x0c\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\x38\xfe\x38\x6c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xd6\xd6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x78\x18\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\x7e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x0c\x18\x30\x60\xc0\x60\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7e\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x60\x7c\x60\x60\x60\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x60\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc0\xc0\xc0\xce\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\xd8\xd8\x70\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xcc\xd8\xf0\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xee\xee\xfe\xd6\xd6\xd6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xe6\xe6\xf6\xde\xce\xce\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xd6\x7c\x06\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\x70\x1c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x5a\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x38\x6c\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\x60\x60\x60\x60\x60\x60\x60\x60\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x80\xc0\x60\x30\x18\x0c\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x7c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff'\
b'\x00\x18\x18\x18\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xdc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x7c\xcc\xcc\xcc\xcc\xcc\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x36\x30\x30\xfc\x30\x30\x30\x30\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xce\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x0c\x00\x1c\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00'\
b'\x00\x00\xe0\x60\x60\x66\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\xfe\xd6\xd6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\x7c\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x86\x0c\x18\x30\x62\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x10\x38\x38\x6c\x6c\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
|
[
"cuiwei_cv@163.com"
] |
cuiwei_cv@163.com
|
4a84f62d878637adbdc7231f34f39011cb2eb011
|
5563fc38a479bf31b158e22ad381bcc1ef6677df
|
/triangles.py
|
cac783538a7e501568406903122530725b621395
|
[] |
no_license
|
MonRes/tester_school_day5
|
e6a1d84bc32342e0e03061208458581ac4357f59
|
985fdb344bf7009c4ba3cd50910ba6b9b9fa172e
|
refs/heads/master
| 2020-03-19T05:13:38.891646
| 2018-06-03T14:30:07
| 2018-06-03T14:30:07
| 135,911,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
a = 2
b = 4
c = 4
if a>0 and b>0 and c>0:
if a + b > c and a + c > b and b + c > a:
print ("da się utworzyć trójkąt")
else:
print ("nie da się")
else:
print("nie da się")
#lub preferowana wersja
if a <= 0 or b <= 0 or c <= 0:
print ('nie da się utworzyć trójkąta - któras długość jest ujemna')
elif a + b > c and a + c > b and b + c > a:
print ('Da się utworzyć trójkąt')
else:
print ('nie da się utworzyć trójkąta')
#mozna z powtarzającego się warunku utworzyć zmienną np. length_negative = a <= 0 or b<= 0 c <= 0
|
[
"Restek87@gmail.com"
] |
Restek87@gmail.com
|
38bca89d76a9af6298b42dea1ea91f8d1a32682f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_starriest.py
|
b761053999a8675654b8264719f4395358c732c9
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _STARRIEST():
def __init__(self,):
self.name = "STARRIEST"
self.definitions = starry
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['starry']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
2a2b3521345749ce428ed48884a780c98dae6414
|
eb19175c18053e5d414b4f6442bdfd0f9f97e24d
|
/graphene/contrib/django/fields.py
|
ba47047e1fdf7326bacd6da7cfc98592cf5da2b6
|
[
"MIT"
] |
permissive
|
jhgg/graphene
|
6c4c5a64b7b0f39c8f6b32d17f62e1c31ca03825
|
67904e8329de3d69fec8c82ba8c3b4fe598afa8e
|
refs/heads/master
| 2020-12-25T21:23:22.556227
| 2015-10-15T19:56:40
| 2015-10-15T19:56:40
| 43,073,008
| 1
| 0
| null | 2015-09-24T14:47:19
| 2015-09-24T14:47:19
| null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
from graphene.core.fields import (
ListField
)
from graphene import relay
from graphene.core.fields import Field, LazyField
from graphene.utils import cached_property, memoize, LazyMap
from graphene.relay.types import BaseNode
from django.db.models.query import QuerySet
from django.db.models.manager import Manager
@memoize
def get_type_for_model(schema, model):
schema = schema
types = schema.types.values()
for _type in types:
type_model = hasattr(_type, '_meta') and getattr(
_type._meta, 'model', None)
if model == type_model:
return _type
def lazy_map(value, func):
if isinstance(value, Manager):
value = value.get_queryset()
if isinstance(value, QuerySet):
return LazyMap(value, func)
return value
class DjangoConnectionField(relay.ConnectionField):
def wrap_resolved(self, value, instance, args, info):
schema = info.schema.graphene_schema
return lazy_map(value, self.get_object_type(schema))
class LazyListField(ListField):
def resolve(self, instance, args, info):
schema = info.schema.graphene_schema
resolved = super(LazyListField, self).resolve(instance, args, info)
return lazy_map(resolved, self.get_object_type(schema))
class ConnectionOrListField(LazyField):
@memoize
def get_field(self, schema):
model_field = self.field_type
field_object_type = model_field.get_object_type(schema)
if field_object_type and issubclass(field_object_type, BaseNode):
field = DjangoConnectionField(model_field)
else:
field = LazyListField(model_field)
field.contribute_to_class(self.object_type, self.name)
return field
class DjangoModelField(Field):
def __init__(self, model, *args, **kwargs):
super(DjangoModelField, self).__init__(None, *args, **kwargs)
self.model = model
def resolve(self, instance, args, info):
resolved = super(DjangoModelField, self).resolve(instance, args, info)
schema = info.schema.graphene_schema
_type = self.get_object_type(schema)
assert _type, ("Field %s cannot be retrieved as the "
"ObjectType is not registered by the schema" % (
self.field_name
))
return _type(resolved)
@memoize
def internal_type(self, schema):
_type = self.get_object_type(schema)
if not _type and self.object_type._meta.only_fields:
raise Exception(
"Model %r is not accessible by the schema. "
"You can either register the type manually "
"using @schema.register. "
"Or disable the field %s in %s" % (
self.model,
self.field_name,
self.object_type
)
)
return _type and _type.internal_type(schema) or Field.SKIP
def get_object_type(self, schema):
return get_type_for_model(schema, self.model)
|
[
"me@syrusakbary.com"
] |
me@syrusakbary.com
|
8278b2891590710961bc86a4918e67d99a0fd397
|
7dc4413967a57c95bda3037154d151190a9309a3
|
/django/mysite/mysite/ilib.py
|
a6d101b8121a4f3fce0e90b946e21d9a56f0aac0
|
[] |
no_license
|
connectthefuture/PythonCode
|
de0e74d81ef46ab34144172588455964d75d6648
|
01bb8c8052c2d89f0aed881f3ae886c8d04f1655
|
refs/heads/master
| 2021-05-14T23:31:26.334953
| 2016-05-21T13:04:34
| 2016-05-21T13:04:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
import re
import cStringIO
from django.http import HttpRequest
class DynamicFormException(Exception):
pass
class DynamicForm:
def __init__(self,):
self.fielddesc = []
# property, max_length, pattern, enum
def add(self, name, ** kwargs):
self.fielddesc.append((name, kwargs))
def valid(self):
for x, y in self.fielddesc:
pass
def as_table(self):
tmp = u'<tr><th><label for="id_%s">%s:</label></th><td><input id="id_%s" name="%s" type="text" /></td></tr>'
cio = cStringIO.StringIO()
for key, value in self.fielddesc:
lower_key = key.lower()
cio.write(tmp % (lower_key, key, lower_key, key))
return cio.getvalue()
def valid(self, request):
# fields = [x for x, y in self.fielddesc]
tmp = dict(self.fielddesc)
for key, value, in request.POST.items():
if key in tmp:
if 'max_length' in tmp[key] and len(value) > tmp[key]['max_length']:
raise DynamicFormException('field length too long')
if 'pattern' in tmp[key] and not re.search(value, tmp[key]['pattern']):
raise DynamicFormException('value dont match pattern')
# def NeedLogin():
# if not request.user.is_authenticated():
# return HttpResponseRedirect('/accounts/login')
if __name__ == '__main__':
df = DynamicForm()
df.add('A')
print(df.as_table())
|
[
"hizhouhan@gmail.com"
] |
hizhouhan@gmail.com
|
3e013ccefdef52f15ef3f49e35457dfbaad52bc4
|
be0898ceaee2a7758ffe0365b976f597b2ad26dd
|
/rls/common/recorder.py
|
15420a8f27c34b97cd49f7aeb8b188faf7054628
|
[
"Apache-2.0"
] |
permissive
|
violet712/RLs
|
1edaa6427108e3e36d513cb6038be771837ecca4
|
25cc97c96cbb19fe859c9387b7547cbada2c89f2
|
refs/heads/master
| 2023-08-25T12:04:24.174034
| 2021-10-03T15:37:32
| 2021-10-03T15:37:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,179
|
py
|
from abc import ABC, abstractmethod
from collections import defaultdict
from copy import deepcopy
from typing import Dict
import numpy as np
from rls.utils.np_utils import arrprint
class Recoder(ABC):
def __init__(self):
pass
@abstractmethod
def episode_reset(self):
pass
@abstractmethod
def episode_step(self, rewards, dones):
pass
@abstractmethod
def episode_end(self):
pass
class SimpleMovingAverageRecoder(Recoder):
def __init__(self,
n_copies,
agent_ids,
gamma=0.99,
verbose=False,
length=10):
super().__init__()
self.n_copies = n_copies
self.agent_ids = agent_ids
self.gamma = gamma
self.verbose = verbose
self.length = length
self.now = 0
self.r_list = []
self.max = defaultdict(int)
self.min = defaultdict(int)
self.mean = defaultdict(int)
self.total_step = 0
self.episode = 0
self.steps = None
self.total_returns = None
self.discounted_returns = None
self.already_dones = None
def episode_reset(self):
self.steps = defaultdict(lambda: np.zeros((self.n_copies,), dtype=int))
self.total_returns = defaultdict(lambda: np.zeros((self.n_copies,), dtype=float))
self.discounted_returns = defaultdict(lambda: np.zeros((self.n_copies,), dtype=float))
self.already_dones = defaultdict(lambda: np.zeros((self.n_copies,), dtype=bool))
def episode_step(self, rewards: Dict[str, np.ndarray], dones: Dict[str, np.ndarray]):
for id in self.agent_ids:
self.total_step += 1
self.discounted_returns[id] += (self.gamma ** self.steps[id]) * (1 - self.already_dones[id]) * rewards[id]
self.steps[id] += (1 - self.already_dones[id]).astype(int)
self.total_returns[id] += (1 - self.already_dones[id]) * rewards[id]
self.already_dones[id] = np.logical_or(self.already_dones[id], dones[id])
def episode_end(self):
# TODO: optimize
self.episode += 1
self.r_list.append(deepcopy(self.total_returns))
if self.now >= self.length:
r_old = self.r_list.pop(0)
for id in self.agent_ids:
self.max[id] += (self.total_returns[id].max() - r_old[id].max()) / self.length
self.min[id] += (self.total_returns[id].min() - r_old[id].min()) / self.length
self.mean[id] += (self.total_returns[id].mean() - r_old[id].mean()) / self.length
else:
self.now = min(self.now + 1, self.length)
for id in self.agent_ids:
self.max[id] += (self.total_returns[id].max() - self.max[id]) / self.now
self.min[id] += (self.total_returns[id].min() - self.min[id]) / self.now
self.mean[id] += (self.total_returns[id].mean() - self.mean[id]) / self.now
@property
def is_all_done(self): # TODO:
if len(self.agent_ids) > 1:
return np.logical_or(*self.already_dones.values()).all()
else:
return self.already_dones[self.agent_ids[0]].all()
@property
def has_done(self): # TODO:
if len(self.agent_ids) > 1:
return np.logical_or(*self.already_dones.values()).any()
else:
return self.already_dones[self.agent_ids[0]].any()
def summary_dict(self, title='Agent'):
_dicts = {}
for id in self.agent_ids:
_dicts[id] = {
f'{title}/total_rt_mean': self.total_returns[id].mean(),
f'{title}/total_rt_min': self.total_returns[id].min(),
f'{title}/total_rt_max': self.total_returns[id].max(),
f'{title}/discounted_rt_mean': self.discounted_returns[id].mean(),
f'{title}/discounted_rt_min': self.discounted_returns[id].min(),
f'{title}/discounted_rt_max': self.discounted_returns[id].max(),
f'{title}/sma_max': self.max[id],
f'{title}/sma_min': self.min[id],
f'{title}/sma_mean': self.mean[id]
}
if self.verbose:
_dicts[id].update({
f'{title}/first_done_step': self.steps[id][
self.already_dones[id] > 0].min() if self.has_done else -1,
f'{title}/last_done_step': self.steps[id][
self.already_dones[id] > 0].max() if self.has_done else -1
})
return _dicts
def __str__(self):
_str = f'Eps: {self.episode:3d}'
for id in self.agent_ids:
_str += f'\n Agent: {id.ljust(10)} | S: {self.steps[id].max():4d} | R: {arrprint(self.total_returns[id], 2)}'
if self.verbose:
first_done_step = self.steps[id][self.already_dones[id] > 0].min() if self.has_done else -1
last_done_step = self.steps[id][self.already_dones[id] > 0].max() if self.has_done else -1
_str += f' | FDS {first_done_step:4d} | LDS {last_done_step:4d}'
return _str
|
[
"keavnn.wjs@gmail.com"
] |
keavnn.wjs@gmail.com
|
5932b28ef3e56a2c7b55c65e689ac09cb368b2aa
|
72a03df85a6b1b06148338b9119b0b25d4fca164
|
/goods/migrations/0008_auto_20191022_0228.py
|
0a43eb86b65337692f50444b6527fb7210f08651
|
[] |
no_license
|
zeetec20/django-EComerce
|
f60bcc73ebb8d88ca06d5c8a77331681abc958ff
|
5cf8e2aed3f9babe76043337a39f1dfbd0967916
|
refs/heads/master
| 2022-12-12T03:45:47.710718
| 2019-12-06T10:31:18
| 2019-12-06T10:31:18
| 216,199,678
| 1
| 0
| null | 2022-12-08T06:55:45
| 2019-10-19T12:02:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
# Generated by Django 2.2.5 on 2019-10-22 02:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0007_auto_20191018_2140'),
]
operations = [
migrations.RenameModel(
old_name='SemuaBarang',
new_name='SemuaBrand',
),
]
|
[
"jusles363@gmail.com"
] |
jusles363@gmail.com
|
3362db548136e579197bb364e3296c92ff316937
|
7aa9f79ce2dc379e1139ee5cdf545a1d8aba8f39
|
/pygame_menu/examples/other/dynamic_widget_update.py
|
5f12d964b99e455d1adc88bf769d1109ae870a2e
|
[
"MIT"
] |
permissive
|
arpruss/pygame-menu
|
7a755cad7bd36bda8750b6e820146a1037e5d73f
|
25cefb5cfc60383544d704b83a32d43dfc621c23
|
refs/heads/master
| 2021-07-23T17:51:24.536494
| 2021-05-08T17:27:47
| 2021-05-08T17:27:47
| 248,988,541
| 0
| 0
|
MIT
| 2020-04-03T17:24:25
| 2020-03-21T14:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,566
|
py
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - DYNAMIC WIDGET UPDATE
Dynamically updates the widgets based on user events.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['main']
import pygame
import pygame_menu
from pygame_menu.examples import create_example_window
import math
from typing import Dict, Any
class App(object):
"""
The following object creates the whole app.
"""
image_widget: 'pygame_menu.widgets.Image'
item_description_widget: 'pygame_menu.widgets.Label'
menu: 'pygame_menu.Menu'
modes: Dict[int, Dict[str, Any]]
quit_button: 'pygame_menu.widgets.Button'
quit_button_fake: 'pygame_menu.widgets.Button'
selector_widget: 'pygame_menu.widgets.Selector'
surface: 'pygame.Surface'
def __init__(self) -> None:
"""
Constructor.
"""
self.surface = create_example_window('Example - Dynamic Widget Update',
(640, 480), flags=pygame.NOFRAME)
# Load image
default_image = pygame_menu.BaseImage(
image_path=pygame_menu.baseimage.IMAGE_EXAMPLE_PYGAME_MENU
).scale(0.2, 0.2)
# Set theme
theme = pygame_menu.themes.THEME_DEFAULT.copy()
theme.title_bar_style = pygame_menu.widgets.MENUBAR_STYLE_UNDERLINE_TITLE
theme.title_close_button_cursor = pygame_menu.locals.CURSOR_HAND
theme.title_font_color = (35, 35, 35)
# This dict stores the values of the widgets to be changed dynamically
self.modes = {
1: {
'image': default_image.copy(),
'label': {
'color': theme.widget_font_color,
'size': theme.widget_font_size,
'text': 'The first one is very epic'
}
},
2: {
'image': default_image.copy().to_bw(),
'label': {
'color': (0, 0, 0),
'size': 20,
'text': 'This other one is also epic, but fancy'
}
},
3: {
'image': default_image.copy().flip(False, True).pick_channels('r'),
'label': {
'color': (255, 0, 0),
'size': 45,
'text': 'YOU D I E D'
}
}
}
# Create menus
self.menu = pygame_menu.Menu(
height=480,
onclose=pygame_menu.events.CLOSE,
theme=theme,
title='Everything is dynamic now',
width=640
)
self.selector_widget = self.menu.add.selector(
title='Pick one option: ',
items=[('The first', 1),
('The second', 2),
('The final mode', 3)],
onchange=self._on_selector_change
)
self.image_widget = self.menu.add.image(
image_path=self.modes[1]['image'],
padding=(25, 0, 0, 0) # top, right, bottom, left
)
self.item_description_widget = self.menu.add.label(title='')
self.quit_button = self.menu.add.button('Quit', pygame_menu.events.EXIT)
self.quit_button_fake = self.menu.add.button('You cannot quit', self.fake_quit,
font_color=(255, 255, 255))
self.quit_button_fake.add_draw_callback(self.animate_quit_button)
# Update the widgets based on selected value from selector get_value
# returns selected item tuple and index, so [0][1] means the second object
# from ('The first', 1) tuple
self._update_from_selection(int(self.selector_widget.get_value()[0][1]))
def animate_quit_button(
self,
widget: 'pygame_menu.widgets.Widget',
menu: 'pygame_menu.Menu'
) -> None:
"""
Animate widgets if the last option is selected.
:param widget: Widget to be updated
:param menu: Menu
:return: None
"""
if self.current == 3:
t = widget.get_counter_attribute('t', menu.get_clock().get_time() * 0.0075, math.pi)
widget.set_padding(10 * (1 + math.sin(t))) # Oscillating padding
widget.set_background_color((int(125 * (1 + math.sin(t))), 0, 0), None)
c = int(127 * (1 + math.cos(t)))
widget.update_font({'color': (c, c, c)}) # Widget font now is in grayscale
# widget.translate(10 * math.cos(t), 10 * math.sin(t))
widget.rotate(5 * t)
@staticmethod
def fake_quit() -> None:
"""
Function executed by fake quit button.
:return: None
"""
print('I said that you cannot quit')
def _update_from_selection(self, index: int) -> None:
"""
Change widgets depending on index.
:param index: Index
:return: None
"""
self.current = index
self.image_widget.set_image(self.modes[index]['image'])
self.item_description_widget.set_title(self.modes[index]['label']['text'])
self.item_description_widget.update_font(
{'color': self.modes[index]['label']['color'],
'size': self.modes[index]['label']['size']}
)
# Swap buttons using hide/show
if index == 3:
self.quit_button.hide()
self.quit_button_fake.show()
else:
self.quit_button.show()
self.quit_button_fake.hide()
def _on_selector_change(self, selected: Any, value: int) -> None:
"""
Function executed if selector changes.
:param selected: Selector data containing text and index
:param value: Value from the selected option
:return: None
"""
print('Selected data:', selected)
self._update_from_selection(value)
def mainloop(self, test: bool) -> None:
"""
App mainloop.
:param test: Test status
"""
self.menu.mainloop(self.surface, disable_loop=test)
def main(test: bool = False) -> 'App':
"""
Main function.
:param test: Indicate function is being tested
:return: App object
"""
app = App()
app.mainloop(test)
return app
if __name__ == '__main__':
main()
|
[
"pablo.pizarro@ing.uchile.cl"
] |
pablo.pizarro@ing.uchile.cl
|
89280ef30b0eb48a4d06dff7f8128783ab05c9f9
|
ce15a162d71254d86207b60ec6c1c75117f4fe7c
|
/NiaPy/algorithms/other/ts.py
|
dd8050570c98318c15e3f7ab10433f47ffb341f5
|
[
"MIT"
] |
permissive
|
sowmya-debug/NiaPy
|
eadfceabe939f08acdda87d0879abf72952d4cd1
|
1b8fa9949d238a01523a9822977e32dec4d86aa5
|
refs/heads/master
| 2022-04-18T05:20:05.140735
| 2020-04-18T16:35:30
| 2020-04-18T16:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,789
|
py
|
# encoding=utf8
import logging
from numpy import random as rand
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.other')
logger.setLevel('INFO')
__all__ = ['TabuSearch']
# TODO implement algorithm
def TabuSearchF(task, SR=None, TL_size=25, rnd=rand):
if SR == None: SR = task.bRange
x = rnd.uniform(task.Lower, task.Upper)
x_f = task.eval(x)
# while not task.stopCondI():
# Generate neigours
# evaluate x not in ts
# get best of of evaluated
# compare new best with best
return x, x_f
class TabuSearch(Algorithm):
r"""Implementation of Tabu Search Algorithm.
Algorithm:
Tabu Search Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
http://www.cleveralgorithms.com/nature-inspired/stochastic/tabu_search.html
Reference paper:
Attributes:
Name (List[str]): List of strings representing algorithm name.
"""
Name = ['TabuSearch', 'TS']
@staticmethod
def typeParameters(): return {
'NP': lambda x: isinstance(x, int) and x > 0
}
def setParameters(self, **ukwargs):
r"""Set the algorithm parameters/arguments."""
Algorithm.setParameters(self, **ukwargs)
def move(self): return list()
def runIteration(self, task, pop, fpop, xb, fxb, **dparams):
r"""Core function of the algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current population.
fpop (numpy.ndarray): Individuals fitness/objective values.
xb (numpy.ndarray): Global best solution.
fxb (float): Global best solutions fitness/objective value.
**dparams (dict):
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]:
"""
return pop, fpop, xb, fxb, dparams
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
[
"roxor1992@gmail.com"
] |
roxor1992@gmail.com
|
56085b3164c256eb63983021c193772e29f849b1
|
de413f085b8c185ac4314a3c875bb2725ae1783a
|
/python/ThirteenTeV/Hadronizer/Hadronizer_TuneCP5_13TeV_SUSYGluGluToBBHToTauTau_M-90-amcatnlo-pythia8_cff.py
|
df7a7c19e1e37e8b2d8549bb7b5010c22c6b5bf8
|
[] |
no_license
|
good-soul/genproductions
|
17b14eade1501207c0c4f389a2d3270239acf8a7
|
12bf6275067b332930e5fc7d65f1a05575d8d549
|
refs/heads/master
| 2021-04-18T18:48:18.575337
| 2018-03-24T13:29:56
| 2018-03-24T13:29:56
| 126,669,480
| 1
| 0
| null | 2018-03-25T06:04:53
| 2018-03-25T06:04:53
| null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.Pythia8aMCatNLOSettings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8aMCatNLOSettingsBlock,
processParameters = cms.vstring(
'TimeShower:nPartonsInBorn = 2', #number of coloured particles (before resonance decays) in born matrix element
'SLHA:useDecayTable = off',
'25:onMode = off', # turn OFF all H decays
'25:onIfAny = 15', # turn ON H->tautau
'25:m0 = 90' # mass of H
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8aMCatNLOSettings',
'processParameters'
)
)
)
|
[
"perrozzi@cern.ch"
] |
perrozzi@cern.ch
|
b76ff91f6f8b759a8badf1e850fa18b4717619a1
|
7d122748fb075ffe16e82e3616cf5e5b60dee5bb
|
/custom/plm_date_bom-11.0.1.1/plm_date_bom/extended_class/mrp_bom_extension.py
|
ca28d1564b38428586c80c1d1071c319df543794
|
[] |
no_license
|
kulius/odoo11_uw
|
95cd3b9cfdb18676e61d3565901f8ded0ee537d3
|
a6f950a4c05c90ac5f53c1602ac2cda33faf41ee
|
refs/heads/master
| 2021-08-07T07:53:15.585825
| 2018-07-23T03:33:20
| 2018-07-23T03:33:20
| 131,130,935
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,842
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OmniaSolutions, Your own solutions
# Copyright (C) 2010 OmniaSolutions (<http://omniasolutions.eu>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
'''
Created on 18 Jul 2016
@author: Daniel Smerghetto
'''
import logging
from odoo import models
from odoo import fields
from odoo import api
from odoo import _
from odoo.exceptions import UserError
class mrp_bom_extension_data(models.Model):
_name = 'mrp.bom'
_inherit = 'mrp.bom'
@api.multi
def _obsolete_compute(self):
'''
Verify if obsolete lines are present in current bom
'''
for bomObj in self:
obsoleteFlag = False
for bomLine in bomObj.bom_line_ids:
if bomLine.product_id.state == 'obsoleted':
obsoleteFlag = True
break
bomObj.sudo().obsolete_presents = obsoleteFlag
bomObj.sudo().write({'obsolete_presents': obsoleteFlag}) # don't remove this force write or when form is opened the value is not updated
# If store = True is set you need to provide @api.depends because odoo has to know when to compute that field.
# If you decide to compute that field each time without store you have always to put it in the view or the field will not be computed
obsolete_presents_computed = fields.Boolean(string=_("Obsolete presents computed"), compute='_obsolete_compute')
obsolete_presents = fields.Boolean(_("Obsolete presents"))
@api.onchange('bom_line_ids')
def onchangeBomLine(self):
self._obsolete_compute()
@api.multi
def action_wizard_compute_bom(self):
return {
'domain': [],
'name': _('Bom Computation Type'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'plm.temporary_date_compute',
'type': 'ir.actions.act_window',
'target': 'new',
}
@api.multi
def showAllBomsToCompute(self):
outLines = []
def recursion(bomBrwsList):
for bomBrws in bomBrwsList:
for bomLineBrws in bomBrws.bom_line_ids:
templateBrws = bomLineBrws.product_id.product_tmpl_id
bomIds = self.getBomFromTemplate(templateBrws, 'normal')
recursion(bomIds)
if not templateBrws:
logging.warning('Product %s is not related to a product template.' % (bomLineBrws.product_id.id))
continue
if templateBrws.state == 'obsoleted':
outLines.append(bomBrws.id)
recursion(self)
outLines = list(set(outLines))
return {
'type': 'ir.actions.act_window',
'name': _('Product Engineering'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'mrp.bom',
'domain': [('id', 'in', outLines)],
}
def getBomFromTemplate(self, prodTmplBrws, bomType):
'''
Return bom object from product template and bom type
'''
return self.search([('product_tmpl_id', '=', prodTmplBrws.id), ('type', '=', bomType)])
class mrp_bom_data_compute(models.Model):
_name = 'plm.temporary_date_compute'
compute_type = fields.Selection([
('update', _('Update Bom replacing obsoleted bom lines with components at the latest revision.')),
('new_bom', _('Create new bom using last revision of all components.'))
],
_('Compute Type'),
required=True)
@api.multi
def action_compute_bom(self):
'''
Divide due to choosen operation
'''
bomIds = self.env.context.get('active_ids', []) # Surely one record a time arrive here because comes from xml
if self.compute_type == 'update':
self.updateObsoleteBom(bomIds)
elif self.compute_type == 'new_bom':
self.copyObsoleteBom(bomIds)
else:
raise _('You must select at least one option!')
def updateObsoleteBom(self, bomIds=[], recursive=False):
'''
Update all obsoleted bom lines with last released product
'''
bomObj = self.env['mrp.bom']
prodProdObj = self.env['product.product']
for bomBrws in bomObj.browse(bomIds):
if bomBrws.type != 'normal':
raise UserError(_('This functionality is avaible only for normal bom.'))
for bomLineBrws in bomBrws.bom_line_ids:
templateBrws = bomLineBrws.product_id.product_tmpl_id
if recursive:
bomIds = bomObj.getBomFromTemplate(templateBrws, 'normal').ids
self.updateObsoleteBom(bomIds)
if not templateBrws:
logging.warning('Product %s is not related to a product template.' % (bomLineBrws.product_id.id))
continue
if templateBrws.state == 'obsoleted':
eng_code = templateBrws.engineering_code
prodProdBrws = prodProdObj.search([('engineering_code', '=', eng_code)], order='engineering_revision DESC', limit=1)
for prodBrws in prodProdBrws:
bomLineBrws.product_id = prodBrws
if recursive:
# Check if new added product has boms
self.updateObsoleteBom(prodBrws.product_tmpl_id.bom_ids.ids)
bomBrws._obsolete_compute()
return {
'type': 'ir.actions.act_window',
'name': _('Product Engineering'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mrp.bom',
'domain': [('id', 'in', bomIds)],
}
def copyObsoleteBom(self, bomIds=[]):
'''
Copy current bom containing obsoleted components and update the copy with the last product revisions
'''
bomObject = self.env['mrp.bom']
for bomId in bomIds:
newBomBrws = bomObject.browse(bomId).copy()
self.updateObsoleteBom(newBomBrws.ids)
bomObject.browse(bomIds).write({'active': False})
return {
'type': 'ir.actions.act_window',
'name': _('Product Engineering'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mrp.bom',
'domain': [('id', 'in', newBomBrws.id)],
}
class bom_line_obsoleted_extension(models.Model):
_name = 'mrp.bom.line'
_inherit = 'mrp.bom.line'
@api.onchange('state')
def onchange_line_state(self):
'''
Force update flag every time bom line state changes
'''
for bomLineObj in self:
bomBrws = bomLineObj.bom_id
bomBrws._obsolete_compute()
|
[
"kulius@gmail.com"
] |
kulius@gmail.com
|
2ffa97dd0cdc7445f2d8b22a44b850c4c88178f4
|
c8ed3e3997475ffb27c82ee5902def1b7b6753d0
|
/src/mdscripts/insertprotein.py
|
012bb07cc6c6db5f117ba6f5578effb0c4a616b2
|
[
"BSD-3-Clause"
] |
permissive
|
awacha/mdscripts
|
b6174c0791ad50bfae1abacdae1f5865560bb889
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
refs/heads/master
| 2020-03-22T10:05:49.910710
| 2019-09-27T07:30:52
| 2019-09-27T07:30:52
| 74,377,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,762
|
py
|
#!/usr/bin/env python
import argparse
import os
import re
import subprocess
import sys
def get_areaperlipid(areafile):
with open(areafile, 'rt', encoding='utf-8') as f:
total, upperleaflet, lowerleaflet = f.readline().split()
return float(total), float(upperleaflet), float(lowerleaflet)
def shrink(inputfile, shrinkfactor, lipidname, searchcutoff, shrunkfile, gridspacing, areafile):
result = subprocess.run(
['perl', 'inflategro.pl', inputfile, str(shrinkfactor), lipidname, str(searchcutoff), shrunkfile,
str(gridspacing), areafile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def inflate(inputfile, inflatedfile, inflategro, inflationfactor, lipidname, searchcutoff, gridspacing,
areafile='areaperlipid.dat'):
result = subprocess.run(
['perl', inflategro, inputfile, str(inflationfactor), lipidname, str(searchcutoff), inflatedfile,
str(gridspacing), areafile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
result.check_returncode()
except subprocess.CalledProcessError:
print(result.stdout)
print(result.stderr)
raise
return get_areaperlipid(areafile)
def find_lipid_indices(inputfile, lipidname):
"""Find the lipid indices in the .gro file"""
with open(inputfile, 'rt', encoding='utf-8') as f:
matches = [re.match('\s*(?P<index>\d+)%s\s+' % lipidname, l) for l
in f]
indices = {int(m.group('index')) for m in matches if m is not None}
return indices
def adjust_topology(topology, newtopology, moleculename, number):
"""Adjust the topology to have the correct number of the given molecules"""
with open(topology, 'rt', encoding='utf-8') as topin:
with open(newtopology, 'wt', encoding='utf-8') as topout:
molecules_seen = False
while topin:
l = topin.readline()
if not l:
break
if re.match('\s*\[\s*molecules\s*\]', l):
molecules_seen = True
elif re.match('\s*\[\s*', l):
molecules_seen = False
if re.match('\s*%s\s+' % moleculename, l) and molecules_seen:
topout.write('{} {:d}\n'.format(moleculename, number))
else:
topout.write(l)
def run():
parser = argparse.ArgumentParser(description="Insert a protein by using InflateGRO")
parser.add_argument('-i', action='store', dest='inflationfactor', type=float, help='Inflation factor', default=4)
parser.add_argument('-d', action='store', dest='shrinkfactor', type=float, help='Shrinking factor', default=0.95)
parser.add_argument('-l', action='store', dest='lipidname', type=str, help='Lipid name')
parser.add_argument('-f', action='store', dest='inputfile', type=str, help='Input .gro file')
parser.add_argument('-c', action='store', dest='searchcutoff', type=float, help='Search cutoff (Ångström)',
default=14)
parser.add_argument('-g', action='store', dest='gridspacing', type=float, help='Grid spacing (Ångström)', default=5)
parser.add_argument('-t', action='store', dest='topology', type=str, help='Topology file (.top)',
default='topol.top')
parser.add_argument('-m', action='store', dest='mdpfile', type=str, help='.mdp file for energy minimization',
default='minim.mdp')
parser.add_argument('-o', action='store', dest='finalgro', type=str, help='The output .gro file',
default='confout.gro')
parser.add_argument('--inflategro', action='store', dest='inflategro', type=str,
help='path to the inflategro.pl script',
default='inflategro.pl')
# parser.add_help()
args = vars(parser.parse_args())
print(args)
if (args['lipidname'] is None) or (args['inputfile'] is None):
parser.print_help()
sys.exit(1)
# inflate the lipids
indices_pre = find_lipid_indices(args['inputfile'], args['lipidname'])
inflatedfile = os.path.splitext(args['inputfile'])[0] + '_inflated.gro'
# do a dummy inflation just to calculate the area per lipid
areaperlipid = []
areaperlipid.append(inflate(args['inputfile'], os.devnull, args['inflategro'], 1.0, args['lipidname'],
args['searchcutoff'], args['gridspacing']))
# now inflate for real.
areaperlipid.append(
inflate(args['inputfile'], inflatedfile, args['inflategro'], args['inflationfactor'], args['lipidname'],
args['searchcutoff'], args['gridspacing']))
indices = find_lipid_indices(inflatedfile, args['lipidname'])
indices_removed = [i for i in indices_pre if i not in indices]
print('{:d} lipids removed during inflation:'.format(len(indices_removed)),
', '.join(str(i) for i in indices_removed))
# update the topology
topology = os.path.splitext(args['topology'])[0] + '_shrink0.top'
adjust_topology(args['topology'], topology, args['lipidname'], len(indices))
# do the enegy minimization
minimize(inflatedfile, args['mdpfile'], topology) # -> confout.gro
i = 0
while areaperlipid[-1][0] > areaperlipid[0][0]:
i += 1
print('Shrinking step #{:d}'.format(i))
# shrink the structure
indices_pre = indices
shrunkfile = os.path.splitext(args['inputfile'])[0] + '_shrunk.gro'.format(i)
areaperlipid.append(
inflate('confout.gro', shrunkfile, args['inflategro'], args['shrinkfactor'], args['lipidname'],
0, args['gridspacing'])
)
print('Area per lipid: {:f}'.format(areaperlipid[-1][0]))
indices = find_lipid_indices(shrunkfile, args['lipidname'])
indices_removed = [j for j in indices_pre if not j in indices]
print('{:d} lipids removed: {}'.format(len(indices_removed), ', '.join(str(x) for x in indices_removed)))
topology = os.path.splitext(args['topology'])[0] + '_shrink.top'.format(i)
adjust_topology(args['topology'], topology, args['lipidname'], len(indices))
minimize(shrunkfile, args['mdpfile'], topology)
print('Shrinking done. Area per lipid history:')
for apl in areaperlipid:
print('{}\t{}\t{}'.format(*apl))
finaltop = os.path.splitext(args['topology'])[0] + '_insertprotein.top'
if args['finalgro'] != 'confout.gro':
os.rename('confout.gro', args['finalgro'])
adjust_topology(args['topology'], finaltop, args['lipidname'], len(indices))
os.rename(finaltop, args['topology'])
print('You can find the final structure in {}. The topology file {} has been adjusted'.format(args['finalgro'],
args['topology']))
def minimize(grofile, mdpfile, topology, tprfile='shrinking.tpr'):
print('Minimizing...')
result = subprocess.run(['gmx', 'grompp', '-c', grofile, '-f', mdpfile, '-p', topology, '-o', tprfile],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
result.check_returncode()
except subprocess.CalledProcessError:
# print(result.stdout.decode('utf-8'))
print(result.stderr.decode('utf-8'))
# print(*(result.stdout.split('\n')))
# print(*(result.stderr.split('\n')))
raise
result = subprocess.run(['gmx', 'mdrun', '-s', tprfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
result.check_returncode()
except subprocess.CalledProcessError:
# print(result.stdout.decode('utf-8'))
print(result.stderr.decode('utf-8'))
raise
|
[
"awacha@gmail.com"
] |
awacha@gmail.com
|
f271b8ae35a2d87f5a6edfd3a2164f29bfca683e
|
5781bda84c1af759e7b0284f0489d50e68044c89
|
/app/model/network.py
|
0fd902e34350e3e8251d9ad86c8abc47d54292d6
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Stanford-PERTS/triton
|
43306a582630ac6ef8d2d14c8b2a56279335a7fb
|
5a4f401fc7019d59ce4c41eafa6c5bda822fae0a
|
refs/heads/master
| 2022-10-17T11:51:10.220048
| 2020-06-14T17:37:54
| 2020-06-14T17:37:54
| 272,251,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,984
|
py
|
"""
Network
===========
Network, one-to-one with its team, with all classroom's students participating,
comprised of growth conditions.
"""
import logging
import string
from model import SqlModel, SqlField as Field
import mysql_connection
import os_random
class InvalidNetworkAssociation(Exception):
"""Provided id(s) are circular, of the wrong kind, or otherwise invalid."""
pass
class Network(SqlModel):
table = 'network'
py_table_definition = {
'table_name': table,
'fields': [
# name, type, length, unsigned, null, default, on_update
Field('uid', 'varchar', 50, None, False, None, None),
Field('short_uid', 'varchar', 50, None, False, None, None),
Field('created', 'datetime',None, None, False, SqlModel.sql_current_timestamp, None),
Field('modified', 'datetime',None, None, False, SqlModel.sql_current_timestamp, SqlModel.sql_current_timestamp),
Field('name', 'varchar', 200, None, False, None, None),
Field('program_id', 'varchar', 50, None, False, None, None),
Field('association_ids','varchar',3500, None, False, '[]', None),
Field('code', 'varchar', 50, None, False, None, None),
],
'primary_key': ['uid'],
'indices': [
{
'unique': True,
'name': 'code',
'fields': ['code'],
},
],
'engine': 'InnoDB',
'charset': 'utf8mb4',
'collate': 'utf8mb4_unicode_ci',
}
json_props = ['association_ids']
@classmethod
def create(klass, **kwargs):
if 'code' not in kwargs:
kwargs['code'] = klass.generate_unique_code()
# else the code is specified, and if it's a duplicate, MySQL will raise
# an exception b/c there's a unique index on that field.
return super(klass, klass).create(**kwargs)
@classmethod
def generate_unique_code(klass):
chars = string.ascii_uppercase + string.digits
for x in range(5):
code = ''.join(os_random.choice(chars) for x in range(6))
matches = klass.get(code=code)
if len(matches) == 0:
break
if len(matches) > 0:
raise Exception("After five tries, could not generate a unique"
"network invitation code.")
return code
@classmethod
def query_by_user(klass, user, program_id=None):
if len(user.owned_networks) == 0:
return []
query = '''
SELECT *
FROM `{table}`
WHERE `uid` IN ({ids}) {program_clause}
ORDER BY `name`
'''.format(
table=klass.table,
ids=','.join('%s' for uid in user.owned_networks),
program_clause='AND `program_id` = %s' if program_id else ''
)
params = tuple(user.owned_networks +
([program_id] if program_id else []))
with mysql_connection.connect() as sql:
row_dicts = sql.select_query(query, params)
return [klass.row_dict_to_obj(d) for d in row_dicts]
def before_put(self, init_kwargs, *args, **kwargs):
# Allow this to raise an exception to prevent bad associations from
# being saved.
self.associated_organization_ids(pending_network=self)
if self.uid in self.association_ids:
raise InvalidNetworkAssociation(
"Networks can't reference themselves: {}".format(self.uid)
)
def associated_organization_ids(self, depth=0, pending_network=None):
"""Traverse all network-to-network relationships to associated orgs.
Returns a flat and unique list of org ids.
"""
# While we support network-to-network, this recursive function could
# generate many inefficient db calls if we get carried away.
if depth >= 4:
raise InvalidNetworkAssociation(
"Too much depth in network associations: {}"
.format(self.uid)
)
org_ids = set()
for assc_id in self.association_ids:
kind = SqlModel.get_kind(assc_id)
if kind == 'Network':
# Note! This function is often run as a before_put check that
# the associations are valid. This means we have to consider
# the as-of-yet-unsaved "root" network (the `pending_network`)
# and not any version of it we might fetch from the db in order
# to catch the introduction of circular references.
if pending_network and assc_id == pending_network.uid:
child_network = pending_network
else:
child_network = Network.get_by_id(assc_id)
if child_network:
child_org_ids = child_network.associated_organization_ids(
depth=depth + 1,
pending_network=pending_network,
)
org_ids.update(child_org_ids)
else:
# No exception here because we don't want Networks to
# become unusable if an associated thing gets deleted.
# @todo: consider having this actually remove the
# association ids from the list.
logging.warning(
"Bad reference in {}: association {} doesn't exist."
.format(self.uid, assc_id)
)
elif kind == 'Organization':
org_ids.add(assc_id)
else:
raise InvalidNetworkAssociation(
"Invalid association kind: {}".format(kind))
return org_ids
|
[
"chris@perts.net"
] |
chris@perts.net
|
46134b5c30ca0b532262f67addad92fdbd03a9eb
|
1a1b7f607c5e0783fd1c98c8bcff6460e933f09a
|
/core/lib/password_lib.py
|
6cb7556dd99b92dd6678be4ca31f740a93006b5b
|
[] |
no_license
|
smrmohammadi/freeIBS
|
14fb736fcadfaea24f0acdafeafd2425de893a2d
|
7f612a559141622d5042614a62a2580a72a9479b
|
refs/heads/master
| 2021-01-17T21:05:19.200916
| 2014-03-17T03:07:15
| 2014-03-17T03:07:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
import crypt
import random
import re
import types
import random
def getPasswords(_count,_type,_len):
"""
generate _count password of _type, and return a list of Password instances
_type(integer): password contains 1: alphabets only, 2: digits only, 3:alphabets + digits
"""
if _type==1:
chars="abcdefghijklmnopqrstuvwxyz"
elif _type==2:
chars="1234567890"
else:
chars="abcdefghijkmnpqrstuvwxyz23456789" #don't include 1&l , 0&o they are hard to distinguish
return map(lambda x:Password(generateRandomPassword(chars,_len)),range(_count))
def generateRandomPassword(chars,_len):
"""
generate a random password from characters in "chars" and length of "_len"
"""
return "".join(map(lambda x:chars[random.randint(0,len(chars)-1)],range(_len)))
class Password:
pass_chars_match=re.compile("[^A-Za-z0-9_\-]")
def __init__(self,password):
self.password=password
def __eq__(self,password_obj):
if type(password_obj)==types.StringType:
password_obj=Password(password_obj)
if self.isMd5Hash():
enc_pass=self.getMd5Crypt()
return enc_pass==password_obj.getMd5Crypt(enc_pass)
elif password_obj.isMd5Hash():
enc_pass=password_obj.getMd5Crypt()
return enc_pass==self.getMd5Crypt(enc_pass)
else:
return self.getPassword()==password_obj.getPassword()
def checkPasswordChars(self):
"""
Check Password characters
return "1" if it's OK and "0" if it's not
"""
if not len(self.password):
return 0
if self.pass_chars_match.search(self.password) != None:
return 0
return 1
def getMd5Crypt(self,salt=None):
"""
md5crypt "self.password" with "salt",
If "salt" is None,a new salt will be randomly generated and used
If "text" is already md5crypted, return it, else return crypted pass
"""
if self.isMd5Hash():
return self.password
else:
return self.__md5Crypt(salt)
def getPassword(self):
return self.password
def __md5Crypt(self,salt):
if salt==None:
salt=self.__generateRandomSalt()
return crypt.crypt(self.password,salt)
def __generateRandomSalt(self):
salt='$1$'
for i in range(8):
rand=random.randint(0,61)
if rand<10:
salt+=str(rand)
elif rand<36:
salt+=chr(rand-10+65)
else:
salt+=chr(rand-36+97)
salt += '$'
return salt
def isMd5Hash(self):
if self.password[0:3]=='$1$':
return 1
return 0
|
[
"farshad_kh"
] |
farshad_kh
|
7292c8b2f5ac0b7e96916f04b5a65237836d49e9
|
766ca0a00ad1df5163306d2d5a6f722bc67002d3
|
/mailviews/tests/manage.py
|
1549d37fb3ba441106c14033ab25cfa33112d0f1
|
[
"Apache-2.0"
] |
permissive
|
agroptima/django-mailviews
|
8999746eff926661635160eee7b743331737f0bc
|
b75fabadad66a697592abb98a417f6efec55a88d
|
refs/heads/master
| 2021-01-24T12:03:52.787509
| 2019-11-13T13:49:15
| 2019-11-13T13:49:15
| 123,114,820
| 1
| 0
|
Apache-2.0
| 2019-11-13T13:49:17
| 2018-02-27T10:43:48
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
#!/usr/bin/env python
import logging
import sys
from mailviews.tests import settings
logging.basicConfig(level=logging.DEBUG)
if __name__ == "__main__":
try:
from django.core.management import execute_manager
execute_manager(settings)
except ImportError:
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"ted@kaemming.com"
] |
ted@kaemming.com
|
17c8fd8389e918362c50a26cc24b9369815a1a80
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/examples/asr/asr_hybrid_transducer_ctc/speech_to_text_hybrid_rnnt_ctc_bpe.py
|
2de150c7132853121bcc899167c134fc7ffb54d0
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,432
|
py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_hybrid_rnnt_ctc_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
model.aux_ctc.ctc_loss_weight=0.3 \
trainer.devices=-1 \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecHybridRNNTCTCBPEModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(
config_path="../conf/conformer/hybrid_transducer_ctc/", config_name="conformer_hybrid_transducer_ctc_bpe"
)
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecHybridRNNTCTCBPEModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
bece9a5234650c40fc71e3cf6f5df1b6f1412b8e
|
dcba6985b2b0d4743c2eefa44ecd0ff6dfb0e280
|
/day7/note/demo_窗口滚动.py
|
ea934a97838f987855545fff7c0c918b1893d2a4
|
[] |
no_license
|
liyaozr/web_auto
|
3c16da295ff5d6c33303f0c6176acf53f8a8cbd6
|
5a33365bfac3fc6afe07a93f9ef7935c30bc3f56
|
refs/heads/master
| 2021-04-13T21:49:29.677090
| 2020-04-11T07:22:17
| 2020-04-11T07:22:17
| 249,192,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(executable_path=r"d:\chromedriver.exe")
# 添加隐士等待
driver.implicitly_wait(30)
driver.get("https://www.12306.cn/index/")
# e = driver.find_element_by_class_name('mr')
# 将元素滑动到可见区域(为了点击或者其他的进一步操作)
# e.location_once_scrolled_into_view
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(2)
|
[
"lyz_fordream@163.com"
] |
lyz_fordream@163.com
|
e000bcf1bfe5e0f03b0cc8a584f325a2051a6376
|
b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e
|
/neural/neural-005/neunet.py
|
14c865c2367af10d1782c0e97d545ba6a6697690
|
[] |
no_license
|
pglen/pgpygtk
|
4d1405478a714f003984cf3e3db04ff1f767470b
|
33f58010e304f1a312f2356de453ecedb7aa21ef
|
refs/heads/master
| 2021-01-22T01:18:52.238415
| 2019-01-01T01:37:24
| 2019-01-01T01:37:24
| 102,215,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,780
|
py
|
#!/usr/bin/env python
# ------------------------------------------------------------------------
# Neural network
import sys
from neulev import *
# ------------------------------------------------------------------------
# Globals
verbose = 0
pgdebug = 0
def pn(num):
return "%+0.3f" % num
# ------------------------------------------------------------------------
# The whole net:
# __ __
# --| | /------| |
# | |---x | |-----
# --|__| \ /----|__|
# __ / __
# --| | / \----| |
# | |---x | |-----
# --|__| \------|__|
#
class neunet():
# --------------------------------------------------------------------
# neumap = Spec of the network to create. Layer description in
# tuple in the form of inputs, neurons, outputs
# Generally the number of outputs and neurons match as a neuron is
# defined as a neuron with one output
def __init__(self, neumap):
# Undo related
self.last_neuron = None
self.last_bias = self.last_bias2 = None
self.last_weight = None
self.last_post = None
# Store a copy of the parameters
self.neumap = neumap[:]
self.curr = 0 # Current Neuron in creation progress
# Create neurons
self.levarr = []
for ins, neus, outs in neumap:
if verbose:
print "creating level", self.curr
lev = neulev(self, ins, neus, outs)
self.levarr.append(lev)
self.curr += 1
# Diagnostic dump
def dump(self):
#print self
for bb in self.levarr:
print "Level ", self.curr
for cc in bb.membarr:
print " Neu:", self.curr, cc.num
for dd in cc.tentarr:
print " Tent:",
print " [ in:", pn(dd.input), "w:", pn(dd.weight), "m:", pn(dd.multi), \
"b:", pn(dd.bias), "b2:", pn(dd.bias2), "p:", pn(dd.post), "]"
print
print " ",
print "%+0.3f " % cc.output,
print
# Reverse the last poke
def undo(self):
if self.last_neuron != None:
self.last_neuron.bias = self.last_bias
self.last_neuron.parent.bias = self.last_bias2
self.last_neuron.weight = self.last_weight
self.last_neuron.post = self.last_post
self.last_neuron.multi = self.last_multi
self.last_neuron = None
else:
print "duplicate undo"
# Recalculate whole net
def fire(self):
xlen = len(self.levarr)
for bb in range(xlen-1, -1, -1):
if verbose:
print "firing level", bb
self.levarr[bb].fire()
if bb > 0:
self._transfer(self.levarr[bb], self.levarr[bb - 1])
#print
# Propagate down the net
def _transfer(self, src, targ):
if verbose:
print "transfer src", src.curr, "targ", targ.curr
nlen = len(src.membarr); tlen = len(targ.membarr[0].tentarr)
for aa in range(tlen): # tenticle loop
for bb in range(nlen): # neuron loop
if pgdebug > 3:
print " transfer ", "tent", aa, "neu", bb, "src", bb, src.membarr[bb].output
try:
targ.membarr[bb].tentarr[aa].input = src.membarr[aa].output
except:
print sys.exc_info()
def showin(self):
#print "NeuNet input:",
arr = self.levarr[len(self.levarr) - 1]
for aa in arr.membarr:
for bb in aa.tentarr:
print "%+0.3f" % bb.input,
print
def showout(self):
#print "NeuNet output:",
arr = self.levarr[0]
for aa in arr.membarr:
print "%+0.3f" % aa.output,
print
def getout(self):
ret = []; arr = self.levarr[0]
for aa in arr.membarr:
ret.append(aa.output)
return ret
def sum(self):
xsum = 0.
arr = self.levarr[len(self.levarr) - 1]
for aa in arr.membarr:
xsum += aa.output
return xsum
def randtip(self):
randmemb(self.levarr).randtip()
# --------------------------------------------------------------------
# Set input value on the basis of the data coming in
def setinputbits(self, val):
#print "setinput", val, type(val)
inparr = self.levarr[len(self.levarr)-1];
xlen = len(inparr.membarr);
xshift = 1; xx = 0.
#print "xlen", xlen
for aa in range(xlen):
if val & xshift != 0: xx = 1.
else: xx = 0.
print "bit", aa, ":", xx, " xshift ", xshift
for bb in range(xlen):
inparr.membarr[aa].tentarr[bb].input = xx
xshift <<= 1
print
def setinput(self, val, ignore = True):
#print "setinput", val, type(val)
inparr = self.levarr[len(self.levarr)-1];
xlen = len(inparr.membarr)
ylen = len(inparr.membarr[0].tentarr)
#print xlen, ylen, len(val)
if not ignore:
if xlen * ylen != len(val):
msg = "Input size must match network size of %d " % (xlen * ylen)
raise ValueError(msg)
cnt = 0
for aa in range(xlen):
for bb in range(ylen):
inparr.membarr[aa].tentarr[bb].input = val[cnt]
cnt += 1
# Compare outputs with expected data
def cmp(self, val):
diff = 0; outarr = self.levarr[0].membarr
xlen = len(outarr)
for aa in range(xlen):
diff += abs(val[aa] - outarr[aa].output)
return diff / xlen
# Train this particular input to expected output
def trainone(self, val, passes = 1000):
#print "origin:", ; neu.showout()
cnt = 0; cnt2 = 0
diff = 0.; old_sum = -100.
for aa in range(passes):
self.randtip()
self.fire()
diff = self.cmp(val)
if abs(diff) >= abs(old_sum):
#print sum
self.undo()
#self.fire()
#print "undone:",
else:
print " ", "%+0.3f " % diff,
cnt += 1
#neu.showout()
old_sum = diff
#if diff < 0.01:
# break
cnt2 += 1
print
return cnt
|
[
"peterglen99@gmail.com"
] |
peterglen99@gmail.com
|
003afde634b2dbdf9963104880cecb922fe56bfa
|
c6818c06aacb1eca1fffa8bbc51b6f3aac25c177
|
/acre/settings.py
|
039fa2a786d7f1bc584f1052a125472bea4cb0ef
|
[] |
no_license
|
Acon94/ACRE
|
2d0769780c9f81eba05085ffd8b0af225666d6de
|
73622a6dc4ba0f30e8d3e90b02d23c8efd14a5e1
|
refs/heads/master
| 2022-08-02T02:07:53.004308
| 2020-05-29T15:25:50
| 2020-05-29T15:25:50
| 267,840,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
"""
Django settings for acre project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
from django.contrib.messages import constants as messages
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '14tmc3zctdr=(@n&nwwoq#ms9v#)x-3*!#!5pl&%gi=v!0uh-k'
GOOGLE_MAPS_API_KEY = 'AIzaSyCXKJ3T-HIJwFLuS4aBq15Lg6tsiPcAXJ0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pages.apps.PagesConfig',
'listings.apps.ListingsConfig',
'realtors.apps.RealtorsConfig',
'accounts.apps.AccountsConfig',
'contacts.apps.ContactsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multiselectfield',
'django_google_maps',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'acre.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'acre.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'acredb',
'USER':'postgres',
'password':'Oldhead@12',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS =[
os.path.join(BASE_DIR, 'acre/static')
]
# media Folder settings
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
#messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
try:
from .local_settings import *
except ImportError:
pass
|
[
"andrew@Andrews-MacBook-Pro.local"
] |
andrew@Andrews-MacBook-Pro.local
|
944ba56ff7aca83e2eb127f4da13c740715ee035
|
f57e34d0a708ea1139f80f8e5b968c55f6fd2621
|
/dassl/utils/logger.py
|
9b37774ef48a52e330761d229098b3e3627aa44b
|
[
"MIT"
] |
permissive
|
MohammadJavadD/Dassl.pytorch
|
bfdac8f28781af5f198eb7a1318043e04dc544d3
|
5e83fdce6fb51d8d4fbe0441a016eade2ebda423
|
refs/heads/master
| 2022-07-06T06:33:53.655489
| 2020-05-11T20:55:24
| 2020-05-11T20:55:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
import os
import sys
import time
import os.path as osp
from .tools import mkdir_if_missing
__all__ = ['Logger', 'setup_logger']
class Logger(object):
"""Write console output to external text file.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`_
Args:
fpath (str): directory to save logging file.
Examples::
>>> import sys
>>> import os.path as osp
>>> save_dir = 'output/experiment-1'
>>> log_name = 'train.log'
>>> sys.stdout = Logger(osp.join(save_dir, log_name))
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def setup_logger(output=None):
if output is None:
return
if output.endswith('.txt') or output.endswith('.log'):
fpath = output
else:
fpath = osp.join(output, 'log.txt')
if osp.exists(fpath):
# make sure the existing log file is not over-written
fpath += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(fpath)
|
[
"k.zhou@surrey.ac.uk"
] |
k.zhou@surrey.ac.uk
|
3b973ffb45eaa591cd1b658a60bc480604c2573e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2227/60668/288820.py
|
84a8ef21d2e2f35a0dcb5b7d7fa5bc722b3f800e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
def nums_6_CodeSec(n,k):
seen = set()
ans = []
def dfs(node):
for x in map(str, range(k)):
nei = node + x
if nei not in seen:
seen.add(nei)
dfs(nei[1:])
ans.append(x)
dfs("0" * (n - 1))
if n == 1 and k == 2:
print("01")
else:
print(n,k)
if __name__=='__main__':
n = int(input())
k = int(input())
nums_6_CodeSec(n,k)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
b37a8243749b1cbb1fb274960fb8cc5a20a84f1b
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/14195637.py
|
19a1f73398d726879f251757b9c3658f6d49a240
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/14195637.py generated: Wed, 25 Jan 2017 15:25:33
#
# Event Type: 14195637
#
# ASCII decay Descriptor: [B_c+ => (D*_s+ => (D_s+ -> K- K+ pi+) gamma) (D*(2007)~0 => (D~0 -> K+ pi-) pi0) ]CC
#
from Configurables import Generation
Generation().EventType = 14195637
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_DsstDst0,Dsgamma,KKpi,D0pi0,Kpi=BcVegPy,DecProdCut,HELAMP010.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
d78dae8aa293992ac876084340178bc18620f645
|
9b617418cfadc6b6deb10c675723485ae49fb221
|
/code/resources/users.py
|
86659c752c29c1bed0415d2aab2b25db6338b7ac
|
[
"MIT"
] |
permissive
|
borko81/flask_udemy
|
455a555b3892da6d9fee04ba53ea2408dfe23f2b
|
e8f9192feda1458d1ea44b62d2485e911f16acef
|
refs/heads/main
| 2023-08-07T14:33:47.697962
| 2021-10-08T14:01:13
| 2021-10-08T14:01:13
| 411,242,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
import sqlite3
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
"""
Register class, usinf reqparse from flask_restful
:validation from unique username in form and db too
"""
parser = reqparse.RequestParser()
parser.add_argument("username", type=str, required=True, help="Insert username")
parser.add_argument("password", type=str, required=True, help="Insert password")
def post(self):
data = UserRegister.parser.parse_args()
# this validata username unique
if UserModel.find_by_username(data['username']):
return {"message": "This username not allowed!"}, 400
try:
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
except sqlite3.Error as er:
raise ValueError(er)
else:
query = "INSERT INTO users VALUES (NULL, ?, ?)"
try:
cursor.execute(query, (data['username'], data['password']))
except sqlite3.Error as er:
raise ValueError(er)
else:
connection.commit()
finally:
connection.close()
return {"message": "User created successfully"}, 201
if __name__ == '__main__':
u = UserModel.find_by_username("borko")
print(u)
|
[
"bstoilov81@gmail.com"
] |
bstoilov81@gmail.com
|
64183ac4cc465a42829ec69748f9176d1f426207
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/Match makers/test.py
|
e518e3812e5879cbd0b0cddd6bd1e2e5c19ffda8
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 606
|
py
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'4',
'1 6 9 12',
'4 12 3 9',
'4',
'2 2 2 2',
'2 2 2 2',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'2\n' +
'4\n')
if __name__ == '__main__':
unittest.main()
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
2590c023d108e24d8b87283bf38c9ad7246bd708
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_testifying.py
|
54d585fa8f0db28a02a10621604b7a87579812f2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from xai.brain.wordbase.verbs._testify import _TESTIFY
#calss header
class _TESTIFYING(_TESTIFY, ):
def __init__(self,):
_TESTIFY.__init__(self)
self.name = "TESTIFYING"
self.specie = 'verbs'
self.basic = "testify"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
072e40a242d378c1a17f9f2a3f62a08178177a55
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_202/64.py
|
65c7985126860c68e994a7482a5134f0be8da6ab
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,998
|
py
|
q = int(input())
for case in range(1,q+1):
n,m = [int(x) for x in input().split()]
cols = [0]*n
rows = [0]*n
firstrow_plus = [0]*n
orgmatrix = [[0]*n for _ in range(n)]
matrix = [[0]*n for _ in range(n)]
backwards = [0]*(2*n-1)
forwards = [0]*(2*n-1)
points = 0
for _ in range(m):
c,b,a = input().split()
a = int(a)-1
b = int(b)-1
if c == 'x' or c == 'o':
cols[a] += 1
rows[b] += 1
points += 1
orgmatrix[b][a] += 1
if c == '+' or c == 'o':
c1,c2 = a+b,a-b
backwards[c2]+=1
forwards[c1]+=1
firstrow_plus[a] += 1
points += 1
orgmatrix[b][a] += 2
numbackwards = [0]*(2*n-1)
numforwards = [0]*(2*n-1)
for i in range(n):
for j in range(n):
c1,c2 = i+j,i-j
numbackwards[c2]+=1
numforwards[c1]+=1
def cover(pos):
i,j = pos
c1,c2 = i+j,i-j
return numbackwards[c2] + numforwards[c1]
poi = [(i,j) for i in range(n) for j in range(n)]
poi.sort(key = lambda x: cover(x))
for pos in poi:
i,j = pos
c1,c2 = i+j,i-j
if backwards[c2]== 0 and forwards[c1] == 0:
matrix[j][i] += 2
points += 1
backwards[c2]+=1
forwards[c1]+=1
i = 0
j = 0
while i < n and j < n:
while i < n and rows[i]>0:
i+=1
while j<n and cols[j]>0:
j+=1
if i >= n or j >= n:
continue
rows[i] += 1
cols[j] += 1
matrix[i][j] += 1
points += 1
#for j in range(n):
# if firstrow_plus[j] == 0:
# matrix[0][j] += 2
# points += 1
#for j in range(1,n-1):
# matrix[n-1][j] += 2
# points += 1
changes = 0
for i in range(n):
for j in range(n):
if matrix[i][j]>0:
changes += 1
print('Case #%d: %d %d' %(case,points,changes))
for i in range(n):
for j in range(n):
if matrix[i][j]==1:
if orgmatrix[i][j]>0:
print('o %d %d' %(i+1,j+1))
else:
print('x %d %d' %(i+1,j+1))
elif matrix[i][j]==2:
if orgmatrix[i][j]>0:
print('o %d %d' %(i+1,j+1))
else:
print('+ %d %d' %(i+1,j+1))
elif matrix[i][j]>2:
print('o %d %d' %(i+1,j+1))
#prmat = [['.']*n for _ in range(n)]
#for i in range(n):
# for j in range(n):
# dumhet = matrix[i][j] + orgmatrix[i][j]
# if dumhet == 1:
# prmat[i][j] = 'x'
# elif dumhet == 2:
# prmat[i][j] = '+'
# elif dumhet == 3:
# prmat[i][j] = 'o'
#for i in range(n):
# print(*prmat[i])
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1cc1d640e21ab0f100c76bc623beac7409d3eb4f
|
2b01f8f7ee7e841f64629f88896478d3b4221480
|
/flatpages_i18n/middleware.py
|
70654537b556fffc508c62e7155e802524587996
|
[
"BSD-3-Clause"
] |
permissive
|
PragmaticMates/django-flatpages-i18n
|
fde6821774419c1a3cbafe317c5c3c91730339a5
|
434903e60518e0c1a54f0aea24e1d3e8eb0449bd
|
refs/heads/master
| 2023-06-09T18:13:11.192887
| 2023-06-05T07:32:10
| 2023-06-05T07:32:10
| 6,462,420
| 16
| 19
|
BSD-3-Clause
| 2019-07-12T07:03:15
| 2012-10-30T18:40:25
|
Python
|
UTF-8
|
Python
| false
| false
| 817
|
py
|
# from django.conf import settings
# from django.http import Http404
# from django.utils.deprecation import MiddlewareMixin
#
# from flatpages_i18n.views import flatpage
#
#
# class FlatpageFallbackMiddleware(MiddlewareMixin):
# def process_response(self, request, response):
# if response.status_code != 404:
# # No need to check for a flatpage for non-404 responses.
# return response
# try:
# return flatpage(request, request.path_info)
# # Return the original response if any errors happened. Because this
# # is a middleware, we can't assume the errors will be caught elsewhere.
# except Http404:
# return response
# except:
# if settings.DEBUG:
# raise
# return response
|
[
"erik.telepovsky@gmail.com"
] |
erik.telepovsky@gmail.com
|
0feb26db0b3e1ad462a9a055b1f25937d285fe82
|
3f327d2654b85b922909925b9f475315d78f4652
|
/Backend/newsapi/lib/python2.7/site-packages/newsapi/sources.py
|
a2865f6348bc04ca28a13159efcf5462a1d5167c
|
[
"MIT"
] |
permissive
|
brianwang1217/SelfImprovementWebApp
|
8db45914027537aee9614f9d218c93cc08dc90f8
|
7892fc4ee5434307b74b14257b29a5f05a0a0dd7
|
refs/heads/master
| 2022-12-13T15:01:08.595735
| 2018-06-23T04:46:06
| 2018-06-23T04:46:06
| 137,548,289
| 1
| 1
|
MIT
| 2022-05-25T01:28:29
| 2018-06-16T02:48:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,265
|
py
|
from newsapi.base_news import BaseNews
class Sources(BaseNews):
def __init__(self, API_KEY):
super(Sources, self).__init__(API_KEY)
self.endpoint = "https://newsapi.org/v1/sources"
self.sources = []
self.sources_base_info = {}
self.sources_id_info = {}
self.categories = {}
self.languages = {}
self.countries = {}
def get(self, category="", language="", country="", attributes_format=True):
self.payload['category'] = category
self.payload['language'] = language
self.payload['country'] = country
r = self.requests.get(self.endpoint, params=self.payload)
if r.status_code != 200:
raise BaseException("Either server didn't respond or has resulted in zero results.")
try:
content = r.json()
except ValueError:
raise ValueError("No json data could be retrieved.")
if attributes_format:
return self.AttrDict(content)
return content
def all(self):
return self.get()
def get_by_category(self, category):
return self.get(category=category)
def get_by_language(self, language):
return self.get(language=language)
def get_by_country(self, country):
return self.get(country=country)
def information(self):
content = self.get()
self.sources = content.sources
for index, source in enumerate(self.sources):
category_name = source['category']
language_name = source['language']
country_name = source['country']
identifier = source['id']
name = source['name']
desciption = source['description']
url = source['url']
urls_to_logos = source['urlsToLogos']
sort_bys_available = source['sortBysAvailable']
self.sources_base_info[name] = url
self.sources_id_info[name] = identifier
temp_dict = {
"id": identifier, "name": name,
"description": desciption, "url": url,
"urls_to_logos": urls_to_logos,
'sort_bys_available': sort_bys_available
}
if category_name in self.categories:
self.categories[category_name].append([temp_dict])
else:
self.categories[category_name] = [temp_dict]
if language_name in self.languages:
self.languages[language_name].append([temp_dict])
else:
self.languages[language_name] = [temp_dict]
if country_name in self.countries:
self.countries[country_name].append([temp_dict])
else:
self.countries[country_name] = [temp_dict]
return self
def all_sorted_information(self):
return self.sources
def all_categories(self, detailed=False):
if detailed:
return self.categories
return self.categories.keys()
def all_languages(self, detailed=False):
if detailed:
return self.languages
return self.languages.keys()
def all_countries(self, detailed=False):
if detailed:
return self.countries
return self.countries.keys()
def all_base_information(self):
return self.sources_base_info
def all_ids(self, detailed=False):
if detailed:
return self.sources_id_info
return self.sources_id_info.values()
def all_names(self, detailed=False):
if detailed:
return self.sources_base_info
return self.sources_base_info.keys()
def all_urls(self, detailed=False):
if detailed:
return self.sources_base_info
return self.sources_base_info.values()
def search(self, name):
matches = []
if not self.sources:
self.information()
for source in self.sources:
if name.lower() in source['name'].lower():
matches.append(source)
if not matches:
return "No match found!"
return matches
|
[
"talk2shreyas@gmail.com"
] |
talk2shreyas@gmail.com
|
c8742c451b8f04a368affdf326423791bc8e12b1
|
92237641f61e9b35ff6af6294153a75074757bec
|
/Algorithm/programmers/lv1_약수의 합.py
|
efcd4bdb217f84c5119f9de34b91c09be9bd7a7f
|
[] |
no_license
|
taepd/study
|
8ded115765c4f804813e255d9272b727bf41ec80
|
846d3f2a5a4100225b750f00f992a640e9287d9c
|
refs/heads/master
| 2023-03-08T13:56:57.366577
| 2022-05-08T15:24:35
| 2022-05-08T15:24:35
| 245,838,600
| 0
| 1
| null | 2023-03-05T23:54:41
| 2020-03-08T15:25:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 513
|
py
|
"""
문제 설명
정수 n을 입력받아 n의 약수를 모두 더한 값을 리턴하는 함수, solution을 완성해주세요.
제한 사항
n은 0 이상 3000이하인 정수입니다.
입출력 예
n return
12 28
5 6
입출력 예 설명
입출력 예 #1
12의 약수는 1, 2, 3, 4, 6, 12입니다. 이를 모두 더하면 28입니다.
입출력 예 #2
5의 약수는 1, 5입니다. 이를 모두 더하면 6입니다.
"""
def solution(n):
return sum([i for i in range(1, n//2+1) if n % i == 0]) + n
|
[
"taepd1@gmail.com"
] |
taepd1@gmail.com
|
79a1d1b99544c0df6ff3fa556be040c933b22cd8
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/4AtqpqKdXAFofa566_16.py
|
ea18541665d34bf4ba2b25fbe302826b6e09da0e
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
"""
Create a function that takes in a _number as a string_ `n` and returns the
number **without trailing and leading zeros**.
* **Trailing Zeros** are the zeros _after_ a decimal point which _don't affect the value_ (e.g. the _last three_ zeros in `3.4000` and `3.04000`).
* **Leading Zeros** are the zeros _before_ a whole number which _don't affect the value_ (e.g. the _first three_ zeros in `000234` and `000230`).
### Examples
remove_leading_trailing("230.000") ➞ "230"
remove_leading_trailing("00402") ➞ "402"
remove_leading_trailing("03.1400") ➞ "3.14"
remove_leading_trailing("30") ➞ "30"
### Notes
* Return a **string**.
* If you get a number with `.0` on the end, return the _integer value_ (e.g. return `"4"` rather than `"4.0"`).
* If the number is `0`, `0.0`, `000`, `00.00`, etc... return `"0"`.
"""
def remove_leading_trailing(n):
f = (float(n))
i = int(f)
if (f == float(i)):
return str(i)
else:
return str(f)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f36ffffea1f4374ec233376ec27a22b0aaeb5bf5
|
43c24c890221d6c98e4a45cd63dba4f1aa859f55
|
/test/tests/thread_contention_test.py
|
518e5dcd40cd8122d86907338a77f8d5d156ebea
|
[
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
jmgc/pyston
|
c8e4df03c33c6b81d20b7d51a781d9e10148238e
|
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
|
refs/heads/master
| 2020-12-11T07:51:58.968440
| 2020-09-11T14:38:38
| 2020-09-11T14:38:38
| 39,242,644
| 0
| 0
|
NOASSERTION
| 2020-09-11T14:38:39
| 2015-07-17T08:09:31
|
Python
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
from thread import start_new_thread
import time
work = []
done = []
def run(num):
for i in xrange(num):
t = work.pop()
work.append(t - 1)
done.append(num)
print "starting!"
nthreads = 2
N = 100000
for i in xrange(nthreads):
work.append(N)
for i in xrange(nthreads):
t = start_new_thread(run, (N,))
while len(done) < nthreads:
time.sleep(0)
# print work
assert sum(work) == 0
|
[
"kmod@dropbox.com"
] |
kmod@dropbox.com
|
227c8e7d7c7faf708582ddde5050af8f34a85ecd
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flashblade/FB_2_4/models/smtp_server_get_response.py
|
b9d7c1f33db6e8adf4bc96d6abeba9d4958fb2b7
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,221
|
py
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.4, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_4 import models
class SmtpServerGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[SmtpServer]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.SmtpServer]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[SmtpServer])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SmtpServerGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SmtpServerGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmtpServerGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"tlewis@purestorage.com"
] |
tlewis@purestorage.com
|
dbe872211a4755d36304647f012be4e14b572c81
|
8da76aabcf9cfea3478f56037edbb5fa1513140b
|
/maisemapaikka/dev_jt_01/maisemapaikka/apps/geomaps/widgets.py
|
c1ab884dc936bd34d6cf3c3c038d530f9564201b
|
[] |
no_license
|
mikanyman/.virtualenvs-legacy
|
039479f31f2ca9f9a3d3544d8837429ddd0a7492
|
5486128b5b3b7ddb9ec81d43e3bb601a23b4025a
|
refs/heads/master
| 2020-12-31T07:10:07.018881
| 2017-02-01T02:16:55
| 2017-02-01T02:16:55
| 80,566,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
from django import forms
from django.db import models
from django.conf import settings
class LocationPickerWidget(forms.TextInput):
class Media:
css = {
'all': (
settings.ADMIN_MEDIA_PREFIX + 'css/location_picker.css',
)
}
js = (
'http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js',
'http://www.google.com/jsapi?key=ABQIAAAA4NIx2jg3c_um-4n9lSUsUBQpzvvHaH8wLU269kY3vQUW6nVQBRTnCoPQWn83MqmlDy6i0XFj9TqLxw',
settings.ADMIN_MEDIA_PREFIX + 'js/jquery.location_picker.js',
)
def __init__(self, attrs=None):
super(LocationPickerWidget, self).__init__(attrs=attrs)
def render(self, name, value, attrs=None):
if attrs == None:
attrs = {}
attrs['class'] = 'location_picker'
return super(LocationPickerWidget, self).render(name, value, attrs)
class LocationField(models.CharField):
def formfield(self, **kwargs):
kwargs['widget'] = LocationPickerWidget
return super(LocationField, self).formfield(**kwargs)
|
[
"mika.nyman@synapse-computing.com"
] |
mika.nyman@synapse-computing.com
|
46b43aae14e1edbb429984d0ea5f710a308d5c7d
|
7433bb9a3e3a1ea89314c05a0ca22f52323eb33a
|
/task_LSTM_inbuild/step3_evaluate_line.py
|
a40d65169d18b78db2ed6db0a1619992e589b2ea
|
[
"Apache-2.0"
] |
permissive
|
tianyunzqs/text_classifier_tasks
|
685a70c70216865c28204d48bdbf9a1239edea86
|
444ac6676547f4e3ee0ccd5fb36439e8e02f56a9
|
refs/heads/master
| 2022-11-05T00:31:09.070088
| 2022-10-17T02:46:38
| 2022-10-17T02:46:38
| 175,388,072
| 10
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,879
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/7/26 10:18
# @Author : tianyunzqs
# @Description :
import os
import sys
import numpy as np
import tensorflow as tf
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from task_LSTM_inbuild.data_helper import Vocab, CategoryDict
from task_LSTM_inbuild.text_lstm import LSTM_Model
# lstm 需要的参数
def get_default_params():
return tf.contrib.training.HParams(
num_embedding_size = 16, # 每个词语的向量的长度
# 指定 lstm 的 步长, 一个sentence中会有多少个词语
# 因为执行的过程中是用的minibatch,每个batch之间还是需要对齐的
# 在测试时,可以是一个变长的
num_timesteps = 50, # 在一个sentence中 有 50 个词语
num_lstm_nodes = [32, 32], # 每一层的size是多少
num_lstm_layers = 2, # 和上句的len 是一致的
# 有 两层 神经单元,每一层都是 32 个 神经单元
num_fc_nodes = 32, # 全连接的节点数
batch_size = 100,
clip_lstm_grads = 1.0,
# 控制lstm的梯度,因为lstm很容易梯度爆炸或者消失
# 这种方式就相当于给lstm设置一个上限,如果超过了这个上限,就设置为这个值
learning_rate = 0.001,
num_word_threshold = 10, # 词频太少的词,对于模型训练是没有帮助的,因此设置一个门限
)
hps = get_default_params() # 生成参数对象
def load_model():
vocab_file = 'D:/alg_file/data/cnews/cnews.vocab.txt'
category_file = 'D:/alg_file/data/cnews/cnews.category.txt'
vocab = Vocab(vocab_file, hps.num_word_threshold)
category = CategoryDict(category_file)
graph = tf.Graph() # 为每个类(实例)单独创建一个graph
sess = tf.Session(graph=graph) # 创建新的sess
with sess.as_default():
with graph.as_default():
lstm = LSTM_Model(hps, vocab.size(), category.size())
saver = tf.train.Saver() # defaults to saving all variables - in this case w and b
# Initialize all variables
sess.run(tf.global_variables_initializer())
checkpoint_dir = os.path.abspath(os.path.join(os.path.curdir, "checkpoints"))
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
return vocab, category, lstm, sess
vocab, category, lstm, sess = load_model()
def evaluate_line(text):
id_words = vocab.sentence_to_id(text)
id_words = id_words[0: hps.num_timesteps]
padding_num = hps.num_timesteps - len(id_words)
id_words = id_words + [vocab.unk for _ in range(padding_num)]
batch_x = [id_words] * hps.batch_size
_, predict_label = sess.run(
[lstm.train_op, lstm.y_pred],
feed_dict={
lstm.inputs: np.array(batch_x),
lstm.outputs: np.array([0] * hps.batch_size),
lstm.keep_prob: 1.0
}
)
return category.id_to_category.get(predict_label[0])
if __name__ == '__main__':
import time
# 财经
text = """交银货币清明假期前两日暂停申购和转换入全景网3月30日讯 交银施罗德基金周一公告称,公司旗下的交银施罗德货币市场证券投资基金将于2009年"清明"假期前两日暂停申购和转换入业务。公告表示,交银施罗德货币将于2009年4月2日、3日两天暂停办理基金的申购和转换入业务。转换出、赎回等其他业务以及公司管理的其他开放式基金的各项交易业务仍照常办理。自2009年4月7日起,所有销售网点恢复办理基金的正常申购和转换入业务。(全景网/雷鸣)"""
t1 = time.time()
label = evaluate_line(text=text)
t2 = time.time()
print(label)
print('cost time: {0}ms'.format(t2 - t1))
|
[
"qszhu@fiberhome.com"
] |
qszhu@fiberhome.com
|
f9c790eb2cc47ba5039ad06c28c4aece60bbd206
|
8e0149f00f82f57216584b53180ec02870dee7e8
|
/python/linked_list/heap/lc23.py
|
80140fd3c24babfcf1832732796fa94aec1ba01e
|
[] |
no_license
|
chao-ji/LeetCode
|
5880e0fa91d84ad70b5abd47e24ac75646fdcdf9
|
69a960dd8f39e9c8435a3678852071e1085fcb72
|
refs/heads/master
| 2020-12-13T03:35:42.009204
| 2019-06-15T04:45:38
| 2019-06-15T04:45:38
| 51,043,575
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,748
|
py
|
"""23. Merge k Sorted Lists
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
"""
import heapq
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Solution1, use min-heap
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists:
return None
# We have `k` linked lists
# a11 < a12 < a13 < ... a1n1
# a21 < a22 < a21 < ... a2n2
# ...
# ak1 < ak2 < ak3 < ... aknk
# The smallest number must be the minimum of a11, a21, ..., ak1
# Say it's a11.
# What will be the next smallest number?
# It can't be `a13`, ..., `a1n1`, because we have `a12` smaller than them.
# It can't be `a22`, ..., `a2nk`, because we have `a21` smaller than them.
# ...
# It can't be `ak2`, ..., `aknk`, because we have `ak1` smaller than them.
# So again, the next smallest number must be the minimum of a12, a21, ..., ak1
# We know how to merge two sorted lists: LC 21 Merge Two Sorted Lists
# We can use the same approach:
# 1. scan the first node of all remainling non-empty linked lists,
# and find which one is the smallest
# 2. remove that from the original linked list and add to the new,
# growing linked list
# 3. repeat step 1 and 2
# However, this would require us to scan all linked list over and over
# again.
# KEY Idea:
# When we scan the first node of all linked lists, if we put them in
# min-heap (keyed on the node's value), we can easily extract the node
# with minimum value in time O(log k), and insert its successor in the original
# linked list that it came from
# Initialize the growing linked list
# `dummy.next` always points to the start of the growing list, initially empty
# `curr` always points to the last node of the growling list, initially empty
curr = dummy = ListNode(0)
# initialize `heap` with 2-tuples (node's key, node) using the first
# node (i.e. curr) of all linked lists
heap = [(node.val, node) for node in lists if node]
heapq.heapify(heap)
while heap:
# Extract the node with minimum value from heap
_, node = heapq.heappop(heap)
# take note of the successor of `node`
next = node.next
# disconnect it from the rest of the linked list it's from
node.next = None
# add to the growing linked list
curr.next = node
curr = curr.next
# insert the successor of the popped node, if it's non-empty
if next:
heapq.heappush(heap, (next.val, next))
return dummy.next
# Solution2, divide & conquer
# time: O(N*logk), N = total number of nodes, k = number of lists
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# KEY Insight: divide and conquer
# If the number of lists is 2, we already know how to merge two
# sorted lists. See LC 21 Merge Two Sorted Lists.
# We can split the `lists in two halves:
# list_1, list_2, ... list_m
#
# list_m+1, list_m+2, ..., list_k
# And recursively merge the lists in the first half and second half.
#
# Let `left` and `right` be the outcomes of the two recursion.
#
# Since they are already sorted, we can simply merge them into a single
# sorted list.
# Time complexity:
#
# The height of the recursion is O(log k), and in each level
# of recursion, the number of nodes to be visited is at most O(N) over
# all merges, where `N` is the total number of nodes
# In total: we have O(N*logk)
# Base case:
# when the number of lists <= 1
if len(lists) == 0:
return None
elif len(lists) == 1:
return lists[0]
size = len(lists)
left = self.mergeKLists(lists[:size//2])
right = self.mergeKLists(lists[size//2:])
merged = node = ListNode(0)
l1 = left
l2 = right
while l1 and l2:
if l1.val < l2.val:
node.next = l1
l1 = l1.next
else:
node.next = l2
l2 = l2.next
node = node.next
if l1:
node.next = l1
if l2:
node.next = l2
return merged.next
|
[
"chocobo1985@gmail.com"
] |
chocobo1985@gmail.com
|
e6cdba46aeece3e020f759b8414108b144310136
|
255dc7ff8fb676027021a674bd624fb6587fa2f7
|
/compiler/tests/22_sram_func_test.py
|
3a7ff5a3e9521545a1be95c25171bfa119148a16
|
[
"BSD-3-Clause"
] |
permissive
|
orbe7947/OpenRAM
|
80b40462fb7c1044fdacf34908432820b71f6092
|
29c5ab48f0a82972337f4b17ee90695ff1f8f825
|
refs/heads/master
| 2021-08-16T11:35:10.528368
| 2017-11-14T21:24:14
| 2017-11-14T21:24:14
| 110,760,794
| 0
| 0
| null | 2017-11-15T00:14:12
| 2017-11-15T00:14:12
| null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
#!/usr/bin/env python2.7
"""
Run a regresion test on various srams
"""
import unittest
from testutils import header
import sys,os
sys.path.append(os.path.join(sys.path[0],".."))
import globals
import debug
import calibre
OPTS = globals.get_opts()
#@unittest.skip("SKIPPING 21_timing_sram_test")
class sram_func_test(unittest.TestCase):
def runTest(self):
OPTS.analytical_delay = False
globals.init_openram("config_20_{0}".format(OPTS.tech_name))
# we will manually run lvs/drc
OPTS.check_lvsdrc = False
import sram
debug.info(1, "Testing timing for sample 1bit, 16words SRAM with 1 bank")
s = sram.sram(word_size=OPTS.config.word_size,
num_words=OPTS.config.num_words,
num_banks=OPTS.config.num_banks,
name="sram_func_test")
OPTS.check_lvsdrc = True
import delay
tempspice = OPTS.openram_temp + "temp.sp"
s.sp_write(tempspice)
probe_address = "1" * s.addr_size
probe_data = s.word_size - 1
debug.info(1, "Probe address {0} probe data {1}".format(probe_address, probe_data))
d = delay.delay(s,tempspice)
d.set_probe(probe_address,probe_data)
# This will exit if it doesn't find a feasible period
import tech
load = tech.spice["FF_in_cap"]*4
slew = tech.spice["rise_time"]*2
feasible_period = d.find_feasible_period(load,slew)
os.remove(tempspice)
OPTS.analytical_delay = True
globals.end_openram()
# instantiate a copdsay of the class to actually run the test
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main()
|
[
"mrg@ucsc.edu"
] |
mrg@ucsc.edu
|
1da2e624120594b03a58fd0253262dd9d3ce45bb
|
cb836bde47c790c7ad990d44d86d60c13f43a2a0
|
/markdown_it/token.py
|
0c03ac1fc8f64c41339533f0ce672a0994fe315f
|
[
"MIT"
] |
permissive
|
iooxa/markdown-it-py
|
133028a981af715ce244554e26b92b16fc4443ac
|
21837dfa0ce9be249de372bb10733a534f8e0a50
|
refs/heads/master
| 2022-11-19T14:23:23.618106
| 2020-07-20T15:57:16
| 2020-07-20T15:57:16
| 281,160,226
| 0
| 0
|
MIT
| 2020-07-20T15:57:17
| 2020-07-20T15:50:02
| null |
UTF-8
|
Python
| false
| false
| 5,690
|
py
|
from typing import List, Optional, Tuple, Union
import attr
@attr.s(slots=True)
class Token:
# Type of the token (string, e.g. "paragraph_open")
type: str = attr.ib()
# html tag name, e.g. "p"
tag: str = attr.ib()
# Level change (number in {-1, 0, 1} set), where:
# - `1` means the tag is opening
# - `0` means the tag is self-closing
# - `-1` means the tag is closing
nesting: int = attr.ib()
# Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]`
attrs: Optional[list] = attr.ib(default=None)
# Source map info. Format: `[ line_begin, line_end ]`
map: Optional[Tuple[int, int]] = attr.ib(default=None)
# nesting level, the same as `state.level`
level: int = attr.ib(default=0)
# An array of child nodes (inline and img tokens)
children: Optional[List["Token"]] = attr.ib(default=None)
# In a case of self-closing tag (code, html, fence, etc.),
# it has contents of this tag.
content: str = attr.ib(default="")
# '*' or '_' for emphasis, fence string for fence, etc.
markup: str = attr.ib(default="")
# fence infostring
info: str = attr.ib(default="")
# A place for plugins to store an arbitrary data
meta: dict = attr.ib(factory=dict)
# True for block-level tokens, false for inline tokens.
# Used in renderer to calculate line breaks
block: bool = attr.ib(default=False)
# If it's true, ignore this element when rendering.
# Used for tight lists to hide paragraphs.
hidden: bool = attr.ib(default=False)
def attrIndex(self, name: str) -> int:
if not self.attrs:
return -1
for i, at in enumerate(self.attrs):
if at[0] == name:
return i
return -1
def attrPush(self, attrData: Tuple[str, str]):
"""Add `[ name, value ]` attribute to list. Init attrs if necessary."""
if self.attrs:
self.attrs.append(attrData)
else:
self.attrs = [attrData]
def attrSet(self, name: str, value: str):
"""Set `name` attribute to `value`. Override old value if exists."""
idx = self.attrIndex(name)
if idx < 0:
self.attrPush([name, value])
else:
self.attrs[idx] = [name, value]
def attrGet(self, name: str) -> str:
""" Get the value of attribute `name`, or null if it does not exist."""
idx = self.attrIndex(name)
if idx >= 0:
return self.attrs[idx][1]
return None
def attrJoin(self, name, value):
"""Join value to existing attribute via space.
Or create new attribute if not exists.
Useful to operate with token classes.
"""
idx = self.attrIndex(name)
if idx < 0:
self.attrPush([name, value])
else:
self.attrs[idx][1] = self.attrs[idx][1] + " " + value
def copy(self):
"""Return a shallow copy of the instance."""
return attr.evolve(self)
def as_dict(self, children=True, filter=None, dict_factory=dict):
"""Return the token as a dict.
:param bool children: Also convert children to dicts
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attr.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
"""
return attr.asdict(
self, recurse=children, filter=filter, dict_factory=dict_factory
)
@classmethod
def from_dict(cls, dct):
token = cls(**dct)
if token.children:
token.children = [cls.from_dict(c) for c in token.children]
return token
@attr.s(slots=True)
class NestedTokens:
"""A class that closely resembles a Token,
but for a an opening/closing Token pair, and their containing children.
"""
opening: Token = attr.ib()
closing: Optional[Token] = attr.ib()
children: List[Union[Token, "NestedTokens"]] = attr.ib(factory=list)
def __getattr__(self, name):
return getattr(self.opening, name)
def attrGet(self, name: str) -> str:
""" Get the value of attribute `name`, or null if it does not exist."""
return self.opening.attrGet(name)
def nest_tokens(tokens: List[Token]) -> List[Union[Token, NestedTokens]]:
"""Convert the token stream to a list of tokens and nested tokens.
``NestedTokens`` contain the open and close tokens and a list of children
of all tokens in between (recursively nested)
"""
output = []
tokens = list(reversed(tokens))
while tokens:
token = tokens.pop()
if token.nesting == 0:
token = token.copy()
output.append(token)
if token.children:
token.children = nest_tokens(token.children)
continue
assert token.nesting == 1, token.nesting
nested_tokens = [token]
nesting = 1
while tokens and nesting != 0:
token = tokens.pop()
nested_tokens.append(token)
nesting += token.nesting
if nesting != 0:
raise ValueError(f"unclosed tokens starting {nested_tokens[0]}")
child = NestedTokens(nested_tokens[0], nested_tokens[-1])
output.append(child)
child.children = nest_tokens(nested_tokens[1:-1])
return output
|
[
"chrisj_sewell@hotmail.com"
] |
chrisj_sewell@hotmail.com
|
b6723850310e650934ca18886791a71dee495084
|
2fd8f1cafdabfdf9507a1a7e232e13ac7756767f
|
/data/data_models.py
|
ca393ea6ffc156bcc2ffa70f40a7fab6b96bb7a7
|
[] |
no_license
|
dewmal/fx_agent_sma
|
34f3571fe37bfc18c72b8f9ec101dbbe5610a0bb
|
7ecec6ab432d8d43daa6d9cb4a838b1ade1e0c13
|
refs/heads/master
| 2020-06-16T10:16:20.359791
| 2019-07-17T06:09:52
| 2019-07-17T06:09:52
| 195,536,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,508
|
py
|
import datetime
from utils import round_time
class TickStream:
__type__ = "tick_stream"
symbol: str
ask: float
bid: float
quote: float
epoch: int
value: float
def __init__(self, tickId, symbol, ask, bid, quote, epoch, version="1.0") -> None:
super().__init__()
self.tickId = tickId
self.bid = bid
self.ask = ask
self.symbol = symbol
self.quote = quote
self.epoch = epoch
self.version = version
self.value = (self.ask + self.bid) / 2
def as_dict(self):
return {
"tickId": self.tickId,
"symbol": self.symbol,
"ask": self.ask,
"bid": self.bid,
"quote": self.quote,
"epoch": self.epoch,
}
@classmethod
def from_dict(cls, tick_data):
# print(tick_data['pair'])
return TickStream(
tick_data['tickId'],
tick_data['symbol'],
tick_data['ask'],
tick_data['bid'],
tick_data['quote'],
tick_data['epoch']
)
def __str__(self) -> str:
return f"{self.as_dict()}"
class TickWindow:
__type__ = "window_stream"
open = 0
high = 0
low = 0
close = 0
epoch = 0
symbol: str
last_epoch_time = 0
def __init__(self, open, high, low, close, epoch, symbol, last_epoch_time=0, tick_list=[], id=None) -> None:
self.id = id
self.last_epoch_time = last_epoch_time
self.open = open
self.high = high
self.low = low
self.close = close
self.epoch = epoch
self.symbol = symbol
self.tick_list = tick_list
def as_dict(self):
return {
"open": self.open,
"high": self.high,
"low": self.low,
"close": self.close,
"epoch": self.epoch,
"symbol": self.symbol,
"last_epoch_time": self.last_epoch_time,
}
@classmethod
def from_dict(cls, _data):
return TickWindow(
_data['open'],
_data['high'],
_data['low'],
_data['close'],
_data['epoch'],
_data['symbol'],
_data['last_epoch_time'],
[],
_data['_id'],
)
@staticmethod
def from_tick_list(tick_list: [TickStream]):
if len(tick_list) > 0:
open_tick = tick_list[0]
high_tick = max(tick_list, key=lambda tick: tick.value)
low_tick = min(tick_list, key=lambda tick: tick.value)
close_tick = tick_list[-1]
return TickWindow(open_tick.value, high_tick.value, low_tick.value, close_tick.value,
round_time(datetime.datetime.fromtimestamp(open_tick.epoch)).timestamp(),
open_tick.symbol,
tick_list)
else:
return None
def __str__(self) -> str:
return f"{self.symbol} OLHC - {self.open},{self.high},{self.low},{self.close},{self.epoch},{self.last_epoch_time},{self.id}"
class TIData:
def __init__(self, name, time_interval, epoch, data, symbol) -> None:
super().__init__()
self.time_interval = time_interval
self.symbol = symbol
self.data = data
self.epoch = epoch
self.name = name
def __str__(self):
return f"{self.name}-{self.time_interval},{self.data},{self.epoch},{self.symbol}"
|
[
"dewmalnilanka@gmail.com"
] |
dewmalnilanka@gmail.com
|
3d817787469b94efb5701656e528c260991baace
|
d5751e2f2b2128079d3473cf14b02c67515dba72
|
/flask_fundamentals/2.form_test/server.py
|
e5183bece53a77565141c9f57f8e5f49966d5057
|
[] |
no_license
|
seymakara/dojo_python
|
814ed49b561703e3a993a1ade0f084c234e82b13
|
ff8a56020d9ab337d930ec4ce4039f0bca2cfead
|
refs/heads/master
| 2021-05-13T20:44:27.327035
| 2018-01-10T06:32:27
| 2018-01-10T06:32:27
| 116,917,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
# our index route will handle rendering our form
@app.route('/')
def index():
return render_template("index.html")
# this route will handle our form submission
# notice how we defined which HTTP methods are allowed by this route
@app.route('/users', methods=['POST'])
def create_user():
print "Got Post Info"
# we'll talk about the following two lines after we learn a little more
# about forms
name = request.form['name']
email = request.form['email']
# redirects back to the '/' route
return redirect('/')
app.run(debug=True) # run our server
|
[
"seymakara88@gmail.com"
] |
seymakara88@gmail.com
|
6aaab26b75b53923e8a74876b16f34d30fbe0c44
|
3c5657492c401994eaaebcf16c2b13a5ebc0efd8
|
/cresthh/tools/reduceSWW.py
|
768a6bf1a89c6bb4c6c7d17e1ebe895143fd9d7a
|
[] |
no_license
|
peggypan0411/CREST-iMAP
|
0b01d1123f7be7806971ead4835ea2e7f61f81a9
|
b2d80e2c3eb3fb575c678915fd89a96bdb30dbde
|
refs/heads/master
| 2023-07-16T08:11:26.736833
| 2021-04-14T21:44:02
| 2021-04-14T21:44:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,949
|
py
|
#!/home/ZhiLi/CRESTHH/python2/bin/python2
'''
A command line tool to get reduced geotiffs quickly
__author__: Zhi Li
__Date__: 2021/02/07
'''
import argparse
import numpy as np
import sys
sys.path.append('/home/ZhiLi/CRESTHH')
from cresthh.anuga.file_conversion.sww2dem import sww2dem
from cresthh.anuga import SWW_plotter
from netCDF4 import Dataset
from osgeo import gdal
import os
import numpy as np
import matplotlib.tri as mtri
from pyproj import CRS, transform
def export_tif(dst, lons, lats, arr, sample):
# print arr.shape, lons.shape, lats.shape
rows, cols= arr.shape
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(dst, cols, rows, 1, gdal.GDT_Float32)
outdata.SetGeoTransform([lons[0], np.diff(lons)[0],0,
lats[0], 0, np.diff(lats)[0]])##sets same geotransform as input
outdata.SetProjection(sample.GetProjection())##sets same projection as input
outdata.GetRasterBand(1).WriteArray(arr)
outdata.GetRasterBand(1).SetNoDataValue(-9999)##if you want these values transparent
outdata.FlushCache() ##saves to disk!!
outdata = None
band=None
ds=None
parser= argparse.ArgumentParser(description='Quick retrieval of flood depth\nAuthor: Allen Zhi Li\nDate: 2021/02/07')
parser.add_argument('--sww', type=str, metavar='sww file', required=True,
help='SWW file to be retrieved from')
parser.add_argument('--dst', type=str, metavar='destination', required=True,
help='File path to store transformed file')
parser.add_argument('--tif', type=bool, metavar='output GeoTiff', required=False,
default=True, help='Whether output tif format, default True')
parser.add_argument('--quantity', type=str, metavar='output quantity', required=False,
default='depth', help= 'which quantity to output, default depth')
parser.add_argument('--reduce', type=str, metavar='reduction', required=False,
default='max', help= 'choose a method to reduce time dimension, default max.')
parser.add_argument('--tr', type=float, metavar='resolution', required=False,
default=None, help= 'choose whether to rescale image, default 10m; method: bilinear interpolation')
parser.add_argument('--s_srs', type=str, required=False, default="EPSG:32215", help= 'source projection system')
parser.add_argument('--t_srs', type=str, required=False, default="EPSG:4326", help= 'target projection system')
parser.add_argument('--interp', type=str, required=False, default="square", help= 'interpolation method')
parser.add_argument('--DSM', type=str, required=False, default=None, help="surface elevation model to use")
parser.add_argument('--flood_fill', type=bool, required=False, default=False, help="whether to use flood fill")
if __name__=='__main__':
args= parser.parse_args()
sww_file= args.sww
dst= args.dst
isTiff= args.tif
toReduce= args.reduce
res= args.tr
quantity= args.quantity
s_srs= args.s_srs
t_srs= args.t_srs
interp= args.interp
dsm= args.DSM
ifFloodFill= args.flood_fill
base_name=dst.split('.')[:-1]
if quantity not in ['depth', 'xmomentum', 'elevation', 'ymomentum', 'excRain']:
raise ValueError('expected quantity in ["depth", "xmomentum", "elevation", "ymomentum", "excRain"]')
if toReduce=='max':
reduce=max
elif toReduce=='mean':
reduce=mean
else:
reduce= int(toReduce) #choose time series
if res is None:
res=10
if interp=='square':
#use inherent 2nd order extrapolation
sww2dem(sww_file, base_name+'.asc', quantity=quantity, verbose=True, reduction=reduce, cellsize=res)
if isTiff:
os.system('gdalwarp -co COMPRESS=LZW -ot Float32 -s_srs %s -t_srs %s %s %s'%(s_srs, t_srs, base_name+'.asc', base_name+'.tif'))
os.system('rm %s'%(base_name+'.asc'))
os.system('rm %s'%(base_name+'.prj'))
elif interp in ['linear', 'cubic']:
# use Triangulation interpolation and refined with digital surface model
if dsm is None:
msg= "you have to provide a surface elevation model"
raise ValueError(msg)
dsm= gdal.Open(dsm)
dsm_arr= dsm.ReadAsArray()
geo= dsm.GetGeoTransform()
lons= np.linspace(geo[0], geo[1]*(dsm.RasterXSize)+geo[0], dsm.RasterXSize)
lats= np.linspace(geo[3], geo[-1]*dsm.RasterYSize+geo[3], dsm.RasterYSize)
lons2d, lats2d= np.meshgrid(lons, lats)
from cresthh.anuga.file.netcdf import NetCDFFile
p = NetCDFFile(sww_file)
z= np.array(p.variables['stage'])
x = np.array(p.variables['x']) + p.xllcorner
y = np.array(p.variables['y']) + p.yllcorner
_y, _x= transform(s_srs, t_srs, x, y)
triangles = np.array(p.variables['volumes'])
triang = mtri.Triangulation(_x, _y, triangles)
if isinstance(toReduce,int):
_z= z[toReduce]
else:
_z= z.max(axis=0)
if interp=='linear':
interpolator= mtri.LinearTriInterpolator(triang, _z)
elif interp=='cubic':
interpolator= mtri.CubicTriInterpolator(triang, _z, kind='geom')
zi_interp= interpolator(lons2d,lats2d)
if ifFloodFill:
from skimage.morphology import reconstruction
zi_interp[zi_interp<dsm_arr]= dsm_arr[zi_interp<dsm_arr]
filled = reconstruction(zi_interp, dsm_arr, method='erosion')
export_tif(base_name+'.tif', lons, lats, filled-dsm_arr, dsm)
else:
zi_interp[zi_interp<dsm_arr]= dsm_arr[zi_interp<dsm_arr]
export_tif(base_name+'.tif', lons, lats, zi_interp-dsm_arr, dsm)
else:
raise ValueError('invalid argument, only supports LSI and cubic')
# os.system('rm %s && mv %s %s'%(dst, dst+'.temp',dst))
print('Completed! output file name: %s'%dst)
|
[
"chrimerss@gmail.com"
] |
chrimerss@gmail.com
|
3622942b7c93de7b6819004d190c5034570c3137
|
eb33957e7b140c762fb77e5c83e5bba14aaeb8d3
|
/jam/server/api/v1/namespace.py
|
67e81b1ea025c04385aaeeced47b442bdb4acd19
|
[] |
no_license
|
AndrewSallans/jamdb
|
8a4a9d5ec03ca77bd0ad45404f8031b558898270
|
6eb4c0b465034e7ef5a648873be2353c4093c863
|
refs/heads/develop
| 2021-01-15T11:19:59.679368
| 2016-03-04T23:37:02
| 2016-03-04T23:37:02
| 53,345,931
| 0
| 0
| null | 2016-03-07T17:46:52
| 2016-03-07T17:46:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,996
|
py
|
import operator
import functools
from jam import Q
from jam import NamespaceManager
from jam.auth import Permissions
from jam.server.api.v1.base import View
from jam.server.api.v1.base import Serializer
from jam.server.api.v1.base import Relationship
class NamespaceView(View):
name = 'namespace'
plural = 'namespaces'
MANAGER = NamespaceManager()
@classmethod
def load(self, id):
return self.MANAGER.get_namespace(id)
def __init__(self, resource=None):
super().__init__(resource=resource)
self._namespace = resource
def get_permissions(self, request):
if request.method == 'GET' and self.resource is None:
return Permissions.NONE
return super().get_permissions(request)
def read(self, user):
return self.MANAGER.read(self.resource.name)
def update(self, patch, user):
return self.MANAGER.update(self._namespace.name, patch, user.uid)
def list(self, filter, sort, page, page_size, user):
query = functools.reduce(operator.or_, [
Q('data.permissions.*', 'and', Permissions.READ),
Q('data.permissions.{0.type}-*'.format(user), 'and', Permissions.READ),
Q('data.permissions.{0.type}-{0.provider}-*'.format(user), 'and', Permissions.READ),
Q('data.permissions.{0.type}-{0.provider}-{0.id}'.format(user), 'and', Permissions.READ),
])
if filter:
filter &= query
else:
filter = query
return self.MANAGER.select().where(filter).page(page, page_size).order_by(sort)
class CollectionRelationship(Relationship):
@classmethod
def view(cls, namespace):
from jam.server.api.v1.collection import CollectionView
return CollectionView(namespace)
@classmethod
def serializer(cls):
from jam.server.api.v1.collection import CollectionSerializer
return CollectionSerializer
@classmethod
def self_link(cls, request, namespace):
if request.path.startswith('/v1/id'):
return '{}://{}/v1/id/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
return '{}://{}/v1/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
@classmethod
def related_link(cls, request, namespace):
if request.path.startswith('/v1/id'):
return '{}://{}/v1/id/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
return '{}://{}/v1/namespaces/{}/collections'.format(request.protocol, request.host, namespace.ref)
class NamespaceSerializer(Serializer):
type = 'namespaces'
relations = {
'collections': CollectionRelationship
}
@classmethod
def attributes(cls, inst):
return {
'name': inst.ref,
'permissions': {
sel: Permissions(perm).name
for sel, perm in inst.data['permissions'].items()
}
}
|
[
"chriskseto@gmail.com"
] |
chriskseto@gmail.com
|
cd94742c9c7694054d5b7b202660c0becf1c5052
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/1700.py
|
4c068771cbd023f47f2d94b37f052921066dddfa
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
def data(filename):
fi = open(filename,'r')
o = open(filename+".out",'w')
tests = fi.readline().strip()
a = fi.readlines()
for i in range(0,int(tests)):
c,f,x = map(float,a[i].strip().split())
nf = 1
t1 = x/2
t2 = c/2+x/(2+nf*f)
while (t1-t2 > 10**-7):
nf += 1
t1 = t2
t2 = buy(c,f,nf) + x/(2+nf*f)
o.write("Case #" + str(i+1) + ": %.7f\n" % t1)
fi.close()
o.close()
def buy(c,f,nf):
time = 0
for i in range(0,nf):
time += c/(2+i*f)
return time
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d2c0be537b266e7b741920df715f2b942cf343bb
|
3437f523434b86af02476fc0056030a67feaa9a5
|
/examples/mpc_linear_svm/launcher.py
|
d88a1d33341b60a05cf1faa29f11c82ceea9e982
|
[
"MIT"
] |
permissive
|
facebookresearch/CrypTen
|
481d0bfc94582eedef8b3510d91fd6b3ce253097
|
99c3a046b705c9d69d7a10fcab59a444ffbee39a
|
refs/heads/main
| 2023-09-04T21:10:29.331999
| 2023-08-25T22:11:00
| 2023-08-25T22:11:00
| 202,443,088
| 1,388
| 323
|
MIT
| 2023-09-01T16:34:22
| 2019-08-15T00:00:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_linear_svm example in multiprocess mode:
$ python3 examples/mpc_linear_svm/launcher.py --multiprocess
To run mpc_linear_svm example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_linear_svm/mpc_linear_svm.py \
examples/mpc_linear_svm/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Linear SVM Training")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=50, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--examples", default=50, type=int, metavar="N", help="number of examples per epoch"
)
parser.add_argument(
"--features",
default=100,
type=int,
metavar="N",
help="number of features per example",
)
parser.add_argument(
"--lr", "--learning-rate", default=0.5, type=float, help="initial learning rate"
)
parser.add_argument(
"--skip_plaintext",
default=False,
action="store_true",
help="skip evaluation for plaintext svm",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format="%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s",
)
from mpc_linear_svm import run_mpc_linear_svm
run_mpc_linear_svm(
args.epochs, args.examples, args.features, args.lr, args.skip_plaintext
)
def main(run_experiment):
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
e9dbfe8104201e8d10606f7234b30f1427f85c8c
|
c39f999cae8825afe2cdf1518d93ba31bd4c0e95
|
/PYME/DSView/LUT/__init__.py
|
ad67a04cf4d26dc2ca675547218211dd1692f45c
|
[] |
no_license
|
WilliamRo/CLipPYME
|
0b69860136a9b2533f2f29fc29408d7471cb934d
|
6596167034c727ad7dad0a741dd59e0e48f6852a
|
refs/heads/master
| 2023-05-11T09:50:58.605989
| 2023-05-09T02:17:47
| 2023-05-09T02:17:47
| 60,789,741
| 3
| 1
| null | 2016-06-17T08:52:44
| 2016-06-09T16:30:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
#!/usr/bin/python
##################
# __init__.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
from lut import *
def applyLUT(seg, gain, offset, lut, ima):
if seg.dtype == 'uint8':
applyLUTu8(seg, gain, offset, lut, ima)
elif seg.dtype == 'uint16':
#print lut.strides
applyLUTu16(seg, gain, offset, lut, ima)
else:
applyLUTf(seg.astype('f'), gain, offset, lut, ima)
|
[
"willi4m@zju.edu.cn"
] |
willi4m@zju.edu.cn
|
333f6a2ec3a9229e86c9318beb30008c51908041
|
591a1a5b334efc878d890c2492a2f1b6cf475b6c
|
/fixJobAccountant.py
|
41d6c79ffadfea61bdaa333e5338c42660fc5f43
|
[] |
no_license
|
amaltaro/ProductionTools
|
b827fa8a80006443b00004f90658791fdea4fc26
|
df85a4d4ae35e4f4c7523fcba9b22a1300329e06
|
refs/heads/master
| 2023-06-08T22:59:19.377928
| 2023-05-24T18:04:26
| 2023-05-24T18:04:26
| 22,791,862
| 0
| 3
| null | 2021-03-22T11:41:18
| 2014-08-09T17:55:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
"""
__fixJobAccountant.py__
Fixes Report.pkl files when JobAccountant crashes reporting that
TaskName does not exist in the FJR.
Created on Oct 15, 2014.
@author: amaltaro
"""
import sys, os, subprocess
import threading
import logging
import time
from pprint import pprint
from optparse import OptionParser
try:
from collections import defaultdict
from WMCore.WMInit import connectToDB
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.FwkJobReport.Report import Report
except ImportError:
print "You do not have a proper environment, please source the following:"
print "source /data/srv/wmagent/current/apps/wmagent/etc/profile.d/init.sh"
sys.exit(1)
getQuery = """
SELECT wj.fwjr_path, ww.task FROM wmbs_workflow ww
INNER JOIN wmbs_subscription ws ON ws.workflow = ww.id
INNER JOIN wmbs_jobgroup wjg ON wjg.subscription = ws.id
INNER JOIN wmbs_job wj ON wj.jobgroup = wjg.id
WHERE wj.id = """
def main():
"""
_main_
"""
usage = "Usage: %prog -j jobId"
parser = OptionParser(usage = usage)
parser.add_option('-j', '--jobId', help = 'Wmbs jobId reported in the component log', dest = 'jobId')
(options, args) = parser.parse_args()
if not options.jobId:
parse.error('You must provide at least one jobId')
print 'Example: python fixJobAccountant.py -j "1678 1679"'
sys.exit(1)
if 'WMAGENT_CONFIG' not in os.environ:
os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'
if 'manage' not in os.environ:
os.environ['manage'] = '/data/srv/wmagent/current/config/wmagent/manage'
connectToDB()
myThread = threading.currentThread()
formatter = DBFormatter(logging, myThread.dbi)
for job in options.jobId.split():
myQuery = getQuery + str(job)
output = myThread.transaction.processData(myQuery)
result = formatter.format(output)
reportPath = result[0][0]
taskName = result[0][1]
#print 'Report path: %s' % reportPath
#print 'Task name: %s' % taskName
jr = Report(reportPath)
if jr.getTaskName():
print "Job id %s already has a TaskName %s.\nSkipping .." % (job, jr.getTaskName())
continue
jr.setTaskName(taskName)
jr.save(reportPath)
print "Updated TaskName for fwjr for jobId: %s" % job
print "Done!"
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"alan.malta@cern.ch"
] |
alan.malta@cern.ch
|
38059ca498d3f9141b84156a522fd5bca676ddfa
|
0afc72deeb8928e6d488a1c3cb762ed0c4bd73fc
|
/scripts/02.transform/02.encode.py
|
da3e9cc96bd537a4a9ab5f8a61a53ebb8d4cb6cf
|
[] |
no_license
|
Asky-M/dscnf-06
|
cb5cd4a402938bcc53723dbc7bbf4e95b548c0f0
|
cbe021d568c94b14929759e905592a11cefc7626
|
refs/heads/master
| 2023-03-27T03:28:05.390087
| 2021-04-03T06:30:51
| 2021-04-03T06:30:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
# encode/decode base64 with codecs
import codecs
s = b"Satria Ady Pradana"
t = codecs.encode(s, "base64")
print(t)
u = codecs.decode(t, "base64")
print(u)
# encode/decode base64 with base64
import base64
s = b"Satria Ady Pradana"
t = base64.b64encode(s)
print(t)
u = base64.b64decode(t)
print(u)
# encode/decode hex
import binascii
key = b"\x17U\r\xda'US8\x99c\x80\x97\x83s\x9f\xd3"
print(key)
h = binascii.hexlify(key)
print(h)
u = binascii.unhexlify(h)
print(u)
|
[
"me@xathrya.id"
] |
me@xathrya.id
|
72abe96e55888b2f816aee1fbf0a969dc95b4989
|
ba602dc67ad7bb50133aeb312f3c6c54627b3dec
|
/data/3919/AC_py/508160.py
|
e46139c365eab6e0455d028613d93c2cc248b297
|
[] |
no_license
|
Dearyyyyy/TCG
|
0d21d89275906157372d775f33309ce337e6bc95
|
7b80de16de2d3f5d95a7c4ed95d45a9e38882e67
|
refs/heads/master
| 2020-12-27T23:19:44.845918
| 2020-02-04T01:59:23
| 2020-02-04T01:59:23
| 238,101,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# coding=utf-8
import sys
while True:
a,b,c=map(int,input().split())
if(a+b>c and a+c>b and b+c>a):
if a==b and b==c:
print("DB")
else:
if a==b or b==c:
print('DY')
else:
if b*b+c*c==a*a or a*a+c*c==b*b or a*a+b*b==c*c:
print('ZJ')
else:
print("PT")
else:
print("ERROR")
|
[
"543271544@qq.com"
] |
543271544@qq.com
|
988e4a4ea56c347a5b6641d6283315ce241c7248
|
884a128552b5f7e698194de22e4a8b4fd43e5db6
|
/setup.py
|
f89439a6f5392e8b10d0a0504621730a8a2d2e6d
|
[
"Apache-2.0"
] |
permissive
|
naivenlp/naivenlp-legacy
|
b3057bdeb54bc54b1df3de8fd3eb5a1af909690b
|
dbe0d6ac3b422618fe41a763c256077b27f75347
|
refs/heads/master
| 2022-11-25T11:48:18.411506
| 2020-07-22T08:01:25
| 2020-07-22T08:01:25
| 269,890,191
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="naivenlp",
version="0.0.9",
description="NLP toolkit, including tokenization, sequence tagging, etc.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/luozhouyang/naivenlp",
author="ZhouYang Luo",
author_email="zhouyang.luo@gmail.com",
packages=setuptools.find_packages(),
# include_package_data=True,
package_data={
},
install_requires=[
"jieba",
"numpy",
"strsimpy",
"fake_useragent",
"requests",
],
dependency_links=[
],
extras_require={
"tf": ["tensorflow>=2.2.0"]
},
license="Apache Software License",
classifiers=(
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
)
)
|
[
"zhouyang.luo@gmail.com"
] |
zhouyang.luo@gmail.com
|
8353cc7bb8452e9ef9ae1467ef3f8ec6c9d9f34e
|
2a24dba82767419cf7d2269875bf0a297f41580c
|
/vispy/app/backends/_pyside.py
|
00949a4951a577f056a9c62b2874f44b3055f725
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
shjoshi/vispy
|
58b300d23486b7478b786977b3548dd7225de847
|
2f3d169aa60c738467e766c59096f51570483d6f
|
refs/heads/master
| 2020-12-25T12:40:36.545768
| 2014-08-06T22:59:35
| 2014-08-06T22:59:35
| 22,704,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" PySide proxy backend for the qt backend.
"""
import sys
from .. import backends
try:
# Try importing
from PySide import QtGui, QtCore, QtOpenGL # noqa
except Exception as exp:
# Fail: this backend cannot be used
available, testable, why_not, which = False, False, str(exp), None
else:
# Success
available, testable, why_not = True, True, None
has_uic = False
import PySide
which = ('PySide', PySide.__version__, QtCore.__version__)
# Remove _qt module to force an import even if it was already imported
sys.modules.pop(__name__.replace('_pyside', '_qt'), None)
# Import _qt. Keep a ref to the module object!
backends.qt_lib = 'pyside' # Signal to _qt what it should import
from . import _qt # noqa
from ._qt import * # noqa
|
[
"almar.klein@gmail.com"
] |
almar.klein@gmail.com
|
d33c3d0bc0f509999dd2ca1132e50bf6291c76f8
|
4ebdc7053d9341ce7ad45f1e859ff86ef1455177
|
/52_global.py
|
e2b5ca2d678a6f222101e9a4cc221547301aa89d
|
[] |
no_license
|
daikiante/python
|
1f4d55e1fd04eef22702b364148b8e1a2beea2d3
|
9d604b8dcd9e3cbe8b4db24ef16c5c969f6f894f
|
refs/heads/master
| 2020-09-17T00:14:24.034179
| 2019-12-02T09:03:25
| 2019-12-02T09:03:25
| 223,928,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# global
'''
def関数の中(local)ではglobal変数は使えない。
global変数を使う場合はlocalの中でglobal変数宣言をして使う。
'''
name = 'lohit'
age = 20
def person():
global age
print('my age is',age)
per = 'lohit badiger'
print('my name is',per)
person()
print('global name are',name)
print('global age is',age)
# Exsample
num1 = 10
num2 = 3000
def sum():
global num1
print(num1 + num2)
sum()
print(num1 + num2)
|
[
"daikiante@gmail.com"
] |
daikiante@gmail.com
|
b113e7e6e71c42480977c18e82a7bf4d3ecbfc8a
|
2e10314f0a6a32cbfdce6b80c7767b84de421741
|
/精品真题/精品-one.py
|
e2135999ef9f92009ca10a79d4df38384cd13fdb
|
[] |
no_license
|
tang1323/Ing_Interview
|
06a9cb19c932b2852dd55655b0d46b814ffa9095
|
a1068d3739d2088a2edcf8314e18659e0e9003f8
|
refs/heads/master
| 2023-04-06T14:17:37.757618
| 2021-04-14T14:14:01
| 2021-04-14T14:14:01
| 357,929,558
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
# def add_Run(L=None):
# if L is None:
# L = []
# L.append('Run')
# return L
# add_Run()
# add_Run()
# print(add_Run(['Lying']))
# ds = {'av':2, 'vr':4, 'ls':9, 'path':6}
# print(ds.popitem(), len(ds))
# with open('D:/Py-Project/Ing_Interview/精品真题/txt/a', 'r') as f:
# print(f.read().split(','))
# aaa = [8, 5, 2, 2]
# with open('D:/Py-Project/Ing_Interview/精品真题/txt/output', 'w') as f:
# for aa in aaa:
# f.write(';'.join.str(aa))
# x, y = 1, 2
# while x < 20:
# x, y = y, x + y
# print(x)
# ls = [2, 0, 6]
# x = 100
# try:
# for i in ls:
# y = 100 // i
# print(y)
# except:
# print('error')
# import random as r
# zmb = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz'
# r.seed(1)
# code = ''
# for i in range(4):
# code += r.choice(zmb)
# print(code)
# import turtle as t
#
# color = ['red','pink','green']
# ra = [20, 50, 100]
# for i in range(3):
# t.pu()
# t.goto(0, -ra[i])
# t.pd()
# t.pencolor(color[i])
# t.circle(ra[i])
# t.done()
|
[
"1171242903@qq.com"
] |
1171242903@qq.com
|
737ec987dfe8f44ec60ce95839fb21130c803793
|
2a1a175efc9c482db0e6d96569f92b9583990acc
|
/eventex/subscriptions/tests/test_view_new.py
|
351daeb6ab3b8abda88f2861141510e7c1378d8c
|
[] |
no_license
|
mazulo/wttd_eventex
|
2e97e3724f2b8396b8cc73175d15defd09b4a86b
|
691008562d2143cc57c8b4bb5042aa2c1fdc6602
|
refs/heads/master
| 2021-01-10T07:29:20.343157
| 2016-03-16T18:21:10
| 2016-03-16T18:21:10
| 48,304,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
from django.core import mail
from django.test import TestCase
from django.shortcuts import resolve_url as r
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
class SubscriptionsNewGet(TestCase):
def setUp(self):
self.resp = self.client.get(r('subscriptions:new'))
def test_get(self):
"""GET /inscricao/ must return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""Must use subscriptions/subscription_form.html"""
self.assertTemplateUsed(
self.resp,
'subscriptions/subscription_form.html'
)
def test_html(self):
"""Html must contain input tags"""
tags = (
('<form', 1),
('<input', 6),
('type="text"', 3),
('type="email"', 1),
('type="submit"', 1),
)
for text, count in tags:
with self.subTest():
self.assertContains(self.resp, text, count)
def test_csrf(self):
"""Html must contain csrf"""
self.assertContains(self.resp, 'csrfmiddlewaretoken')
def test_has_form(self):
"""Context must have subscription form"""
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
class SubscriptionsNewPost(TestCase):
def setUp(self):
data = dict(name='Patrick Mazulo', cpf='03286218383',
email='pmazulo@gmail.com', phone='86-99988-7848')
self.resp = self.client.post(r('subscriptions:new'), data)
def test_post(self):
"""Valid POST should redirect to /inscricao/1/"""
self.assertRedirects(self.resp, r('subscriptions:detail', 1))
def test_send_subscribe(self):
self.assertEqual(1, len(mail.outbox))
def test_save_subscription(self):
self.assertTrue(Subscription.objects.exists())
class SubscriptionsNewPostInvalid(TestCase):
def setUp(self):
self.resp = self.client.post(r('subscriptions:new'), {})
def test_post(self):
"""Invalid POST should not redirect"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
self.assertTemplateUsed(self.resp,
'subscriptions/subscription_form.html')
def test_has_form(self):
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
def test_form_has_errors(self):
form = self.resp.context['form']
self.assertTrue(form.errors)
def test_dont_save_subscription(self):
self.assertFalse(Subscription.objects.exists())
class TestTemplateRegressionTest(TestCase):
def test_template_has_non_field_errors(self):
invalid_data = dict(name='Patrick Mazulo', cpf='03286218383')
response = self.client.post(r('subscriptions:new'), invalid_data)
self.assertContains(response, '<ul class="errorlist nonfield">')
|
[
"pmazulo@gmail.com"
] |
pmazulo@gmail.com
|
327bf3ff951ee285a77e0a2dfa30a0a852ac1426
|
cceb97ce3d74ac17090786bc65f7ed30e37ad929
|
/server/newfirst/migrations/0005_auto_20201024_0316.py
|
baaa7f017786874e8c0a9b6e7a9c50db448d3ef2
|
[] |
no_license
|
Catxiaobai/project
|
b47310efe498421cde794e289b4e753d843c8e40
|
76e346f69261433ccd146a3cbfa92b4e3864d916
|
refs/heads/master
| 2023-01-08T04:37:59.232492
| 2020-11-10T12:00:34
| 2020-11-10T12:00:34
| 291,014,545
| 1
| 4
| null | 2020-11-09T01:22:11
| 2020-08-28T10:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
# Generated by Django 3.1.1 on 2020-10-23 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newfirst', '0004_scenes'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='item_date',
),
migrations.RemoveField(
model_name='item',
name='item_leader',
),
]
|
[
"2378960008@qq.com"
] |
2378960008@qq.com
|
91db8116494945ac4447f2c14fec8b83a4d5f470
|
66d184a2b36ab1db564305ea36be891aaf0e236b
|
/py/Python_Crash_Course/project2/two_d8.py
|
52743e7fbb2329663e1615be5f979d1fb0082ff0
|
[] |
no_license
|
joyDDT/python_code
|
bef57936a1167fa65e28b6c52ab7857b34dc74a8
|
3aae56c51660579a4eaaa087ac2459c9bf2f2e23
|
refs/heads/master
| 2021-10-30T10:22:21.328633
| 2019-04-26T04:45:01
| 2019-04-26T04:45:01
| 112,004,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
import pygal
from die import Die
#创建两个个D8
die_1 = Die(8)
die_2 = Die(8)
#投掷色子多次,并将结果储存在一个列表中
results = [ ]
for roll_num in range(1000):
result = die_1.roll( ) + die_2.roll( )
results.append(result)
#结果分析
frequencies = [ ]
max_num = die_1.num_sides + die_2.num_sides
for value in range(2, max_num+1):
frequency = results.count(value)
frequencies.append(frequency)
#结果可视化
hist = pygal.Bar( )
hist.title = 'Results of rolling two D8 1000 times.'
hist.x_labels = [x for x in range(2, max_num+1)]
hist.x_title = 'Result'
hist.y_title = 'Frequency of Result'
hist.add('D8+D8', frequencies)
hist.render_to_file('two_d8.svg')
|
[
"15894500833@163.com"
] |
15894500833@163.com
|
05f3cbf560213a1004237cd81daf92637628f3b9
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/panpanpandas_ultrafinance/ultrafinance-master/deprecated/example/usTradingStrategy.py
|
ed1e6d309c7080a44956603b6c80661f73986aa1
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 216
|
py
|
'''
Created on July 17, 2010
@author: ppa
'''
from ultrafinance.processChain.processChain import runProcessChain
if __name__ == '__main__':
configFile1 = 'usTradingStrategy.ini'
runProcessChain(configFile1)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
819c11fb2ff6e9bbda0cb03380c26525458095b7
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=35/sched.py
|
a3b742d5b6b87242902b200cb99b1c50add5a6e7
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
-X FMLP -Q 0 -L 3 120 400
-X FMLP -Q 0 -L 3 93 400
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 1 -L 2 73 400
-X FMLP -Q 1 -L 2 63 250
-X FMLP -Q 2 -L 1 55 200
-X FMLP -Q 2 -L 1 45 400
-X FMLP -Q 3 -L 1 35 125
-X FMLP -Q 3 -L 1 35 150
22 100
21 100
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
e330b2c4ec0d709e389aa70c7b230a248e40cdff
|
500b03fa6cb776c1d51db4a3a3aa252ddf5a50e6
|
/book_exercise/py_intro/basics/Chapter 3: Numbers/name_random.py
|
f71fc4d460bda5325b87858fc9109a256951f46c
|
[] |
no_license
|
carloslvm/learning-python
|
b3796a0a5b751baae8c551a9f6fe262f98980691
|
07f885454cf21b7d215a58da7fcb907715e546bd
|
refs/heads/master
| 2022-07-27T21:39:11.937801
| 2022-07-09T17:47:56
| 2022-07-09T17:47:56
| 163,447,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
#!/usr/bin/python3
# Printing a name randomly between 1 and 10.
from random import randint
for name in range(randint(1, 10)+ 1):
name = 'David'
print(name)
|
[
"cvaldez553@gmail.com"
] |
cvaldez553@gmail.com
|
2c3b054d93f45003c87f98cb0129da2c90a07b02
|
5551361c02ee4a78036e2452fea615fc912f406b
|
/tut4.py
|
465358ee61083a2301be6d3e8df1a5bc8be26084
|
[
"MIT"
] |
permissive
|
Demfier/cs116-tut-solns
|
3d93752e4ca2d3debbb36c901a13e7201e5bf0fe
|
8277dae848ebf66522245fe15492ab91e48dac93
|
refs/heads/master
| 2020-12-13T13:32:52.551815
| 2020-02-14T05:25:45
| 2020-02-14T05:25:45
| 234,433,306
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,503
|
py
|
# This file contains solutions to CS116, Tutorial 4
import math
import check
# CQ1: E)
def create_cards(values, suits):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda x, y: [x, y], values, suits))
def create_cards_alt(values, suits):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda i: [x[i], y[i]], range(len(values))))
# Tests for create_cards go here
def choose_by_color(loC, color): # Abs. list impl. (really unoptimized!!)
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if color == 'red':
lookup_list = ['diamonds', 'hearts']
else:
lookup_list = ['spades', 'clubs']
return list(map(lambda x: x[0], filter(lambda x: x[1] in lookup_list, loC)))
def filter_and_convert(loC, lookup_list, val_list):
if loC == []:
return val_list
if loC[0][1] in lookup_list:
val_list.append(loC[0][0])
return filter_and_convert(loC[1:], lookup_list, val_list)
def choose_by_color(loC, color): # recursive impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if color == 'red':
lookup_list = ['diamonds', 'hearts']
elif color == 'black':
lookup_list = ['spades', 'clubs']
return filter_and_convert(loC, lookup_list, [])
# Tests for choose_by_color go here
def flip_color(c): # fancy impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
flip_list_1 = ['hearts', 'spades']
flip_list_2 = ['diamonds', 'clubs']
# new_index = len(flip_list) - index of curr suit in flip_list - 1
if c[1] in flip_list_1:
new_index = 1-flip_list_1.index(c[1])
c[1] = flip_list_1[new_index]
else:
new_index = 1-flip_list_2.index(c[1])
c[1] = flip_list_2[new_index]
def flip_color(c): # bland impl.
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if c[1] == 'spades':
c[1] = 'hearts'
elif c[1] == 'hearts':
c[1] = 'spades'
elif c[1] == 'diamonds':
c[1] = 'clubs'
else:
c[1] = 'diamonds'
# Tests for flip_color go here
def flip_hand_helper(loC, pos):
if pos == len(loC) or loC == []:
return loC
flip_color(loC[pos])
return flip_hand_helper(loC, pos+1)
def flip_hand(loC):
return flip_hand_helper(loC, 0)
# Tests for flip_hand go here
def last_occ_index(list_of_vals, val, pos):
if pos < 0:
return -1
if list_of_vals[pos] == val:
return pos
return last_occ_index(list_of_vals, val, pos-1)
def modify_list(nums, n):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
if n not in nums:
nums.append(n)
elif nums.count(n) == 1:
nums.remove(n)
elif nums.count(n) >= 2:
nums.remove(n)
nums.pop(last_occ_index(nums, n, len(nums) - 1))
# Tests for modify_list go here
def sanitize(s):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return ''.join(list(filter(lambda c: c.isalnum(), s)))
# Tests for sanitize go here
def reversed_list(L):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(lambda i: L[-(i+1)], range(len(L))))
def reversed_list_alt(L):
"""
Purpose, Contracts & Requirements, and Examples go here
"""
return list(map(L.pop, [-1]*len(L)))
# Tests for reversed_list go here
|
[
"sahu.gaurav719@gmail.com"
] |
sahu.gaurav719@gmail.com
|
2cc437f24c473125f7825b073b35dbc910657b40
|
963cac9e78c4b742f7e7800200de8d1582799955
|
/lib/veetou/pzh/pzhmodel_.py
|
fe393d78cc1da6d7aec46d2741a126f14b156e44
|
[] |
no_license
|
ptomulik/veetou
|
c79ceb3ca3d7ef7b261b2219489b6f0a7a83e1fa
|
b30be2a604f4426f832ec9805547ecd6cc9083fe
|
refs/heads/master
| 2021-01-22T17:28:57.271251
| 2019-01-05T01:46:43
| 2020-05-04T16:23:44
| 85,016,513
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,677
|
py
|
# -*- coding: utf8 -*-
"""`veetou.pzh.pzmodel_`
Defines data model for pzh (Protokół Zaliczeń - HTML)
"""
from veetou.model import *
__all__ = ( 'PzHReport',
'PzHPreamble',
'PzHTr',
'PzHSummary',
'PzHDataModel' )
##def strn(s=None):
## if s is None:
## return None
## else:
## return str(s)
PzHReport = declare( DataType, 'PzhReport',
('source', 'datetime'),
# 5 * (strn,),
plural = 'PzhReports'
)
PzHPreamble = declare( DataType, 'PzhPreamble',
( 'title', 'sheet_id', 'semester_code', 'sheet_serie',
'sheet_number', 'sheet_type', 'sheet_state', 'subj_name',
'subj_department', 'subj_code', 'subj_grade_type', 'subj_tutor',
'return_date', 'approved_by', 'modified_datetime', 'modified_date',
'modified_time', 'return_deadline'),
## 17 * (strn,),
plural = 'PzhPreambles'
)
PzHTr = declare( DataType, 'PzhTr',
( 'tr_ord_no', 'student_name', 'student_index', 'subj_grade',
'subj_grade_final', 'subj_grade_project', 'subj_grade_lecture',
'subj_grade_class', 'subj_grade_lab', 'subj_grade_seminar',
'subj_grade_p', 'subj_grade_n', 'edited_by', 'edited_datetime',
'edited_date', 'edited_time' ),
## 16 * (strn,),
plural = 'PzhTrs'
)
PzHSummary = declare( DataType, 'PzhSummary',
( 'caption', 'th', 'content' ),
## 3 * (strn,),
plural = 'PzhSummaries'
)
class PzHDataModel(DataModel):
_datatypes = ( PzHReport,
PzHPreamble,
PzHTr,
PzHSummary )
def _mk_initial_tables(self):
tables = map( lambda t: (tablename(t), t), map(lambda dt : tableclass(dt)(), self._datatypes))
self.tables.update(tables)
def _mk_initial_relations(self):
strings = ( ( 'pzh_report_preamble', ('pzh_reports', 'pzh_preambles'), ('pzh_preamble', 'pzh_report') ),
( 'pzh_report_trs', ('pzh_reports', 'pzh_trs'), ('pzh_trs', 'pzh_report') ) )#,
#( 'report_summary', ('reports', 'summaries'), ('summary', 'report') ) )
relations = map( lambda x : (x[0],Junction(map(self.tables.__getitem__,x[1]),x[2])), strings )
self.relations.update(relations)
def __init__(self):
super().__init__()
self._mk_initial_tables()
self._mk_initial_relations()
@property
def prefix(self):
return 'pzh_'
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
02afa1f3ac1912f2e42968b1a9f8c171135d839e
|
fa795af74cda4d92604fa3332179ba939460a9b5
|
/JUBioactivities/QSARDB/Papa_Property_pkNO3_Degradation_by_NO3_radicals_as_logkNO3/__init__.py
|
ed8bd3df2d2321a9d042e6cc65b02e98c183d8a1
|
[] |
no_license
|
JenniferHemmerich/JUBioactivities
|
7329a89db0e2790aff9bcfe153ab4dcd2c19a489
|
87054ac135d91e034dcfb6028562b4a7930a3433
|
refs/heads/master
| 2020-04-26T03:56:36.177955
| 2019-03-07T13:08:08
| 2019-03-07T13:08:08
| 173,284,341
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
import os.path
import glob
import pandas as pd
import numpy as np
from ... import utils
__data_src__ = list(sorted(glob.glob(os.path.join(__path__[0], "compounds/0*.mol"))))
__data_src__ += [os.path.join(__path__[0], "properties/pkNO3.txt")]
def read_data(raw=False):
df = pd.DataFrame({'pkNO3_Index_Papa': np.loadtxt(__data_src__[-1], usecols=1, skiprows=1, delimiter='\t')},
index=__data_src__[:-1])
inchi_index = utils.convert_index(df.index, filenames=True)
df.index = inchi_index
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='cont')
return df
def read_structures(raw=False):
df = pd.DataFrame(index=__data_src__[:-1])
df = utils.get_smiles_from_index(df, filenames=True)
inchi_index = utils.convert_index(df.index, filenames=True)
df.index = inchi_index
if raw:
return df
df = utils.drop_rows(df)
df = utils.handle_duplicates(df, type='str')
return df
|
[
"jennifer.hemmerich@univie.ac.at"
] |
jennifer.hemmerich@univie.ac.at
|
c0d9310f0cd5790e4e0888b620c63cf325dc4d58
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-aom/huaweicloudsdkaom/v2/model/list_log_items_response.py
|
4e3d6377a22a133cbef0e053c955f2b4d0817543
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,115
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListLogItemsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'error_code': 'str',
'error_message': 'str',
'result': 'str'
}
attribute_map = {
'error_code': 'errorCode',
'error_message': 'errorMessage',
'result': 'result'
}
def __init__(self, error_code=None, error_message=None, result=None):
"""ListLogItemsResponse
The model defined in huaweicloud sdk
:param error_code: 响应码,SVCSTG_AMS_2000000代表正常返回。
:type error_code: str
:param error_message: 响应信息描述。
:type error_message: str
:param result: 查询结果元数据信息,包括返回总数及结果。
:type result: str
"""
super(ListLogItemsResponse, self).__init__()
self._error_code = None
self._error_message = None
self._result = None
self.discriminator = None
if error_code is not None:
self.error_code = error_code
if error_message is not None:
self.error_message = error_message
if result is not None:
self.result = result
@property
def error_code(self):
"""Gets the error_code of this ListLogItemsResponse.
响应码,SVCSTG_AMS_2000000代表正常返回。
:return: The error_code of this ListLogItemsResponse.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ListLogItemsResponse.
响应码,SVCSTG_AMS_2000000代表正常返回。
:param error_code: The error_code of this ListLogItemsResponse.
:type error_code: str
"""
self._error_code = error_code
@property
def error_message(self):
"""Gets the error_message of this ListLogItemsResponse.
响应信息描述。
:return: The error_message of this ListLogItemsResponse.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this ListLogItemsResponse.
响应信息描述。
:param error_message: The error_message of this ListLogItemsResponse.
:type error_message: str
"""
self._error_message = error_message
@property
def result(self):
"""Gets the result of this ListLogItemsResponse.
查询结果元数据信息,包括返回总数及结果。
:return: The result of this ListLogItemsResponse.
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this ListLogItemsResponse.
查询结果元数据信息,包括返回总数及结果。
:param result: The result of this ListLogItemsResponse.
:type result: str
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListLogItemsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
fc80dafb1d1d6e219b60357d8bd2a5f407c26ca4
|
228ebc9fb20f25dd3ed2a6959aac41fd31314e64
|
/google/cloud/aiplatform_v1beta1/types/index.py
|
289ef763b8977f8503af013acbc9cfaa2abd7f63
|
[
"Apache-2.0"
] |
permissive
|
orionnye/python-aiplatform
|
746e3df0c75025582af38223829faeb2656dc653
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
refs/heads/main
| 2023-08-03T06:14:50.689185
| 2021-09-24T03:24:14
| 2021-09-24T03:24:14
| 410,091,957
| 1
| 0
|
Apache-2.0
| 2021-09-24T20:21:01
| 2021-09-24T20:21:00
| null |
UTF-8
|
Python
| false
| false
| 5,038
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"Index",},
)
class Index(proto.Message):
r"""A representation of a collection of database items organized
in a way that allows for approximate nearest neighbor (a.k.a
ANN) algorithms search.
Attributes:
name (str):
Output only. The resource name of the Index.
display_name (str):
Required. The display name of the Index.
The name can be up to 128 characters long and
can be consist of any UTF-8 characters.
description (str):
The description of the Index.
metadata_schema_uri (str):
Immutable. Points to a YAML file stored on Google Cloud
Storage describing additional information about the Index,
that is specific to it. Unset if the Index does not have any
additional information. The schema is defined as an OpenAPI
3.0.2 `Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
Note: The URI given on output will be immutable and probably
different, including the URI scheme, than the one given on
input. The output URI will point to a location where the
user only has a read access.
metadata (google.protobuf.struct_pb2.Value):
An additional information about the Index; the schema of the
metadata can be found in
[metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri].
deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]):
Output only. The pointers to DeployedIndexes
created from this Index. An Index can be only
deleted if all its DeployedIndexes had been
undeployed first.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]):
The labels with user-defined metadata to
organize your Indexes.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Index was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Index was most recently
updated. This also includes any update to the contents of
the Index. Note that Operations working on this Index may
have their
[Operations.metadata.generic_metadata.update_time]
[google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time]
a little after the value of this timestamp, yet that does
not mean their results are not already reflected in the
Index. Result of any successfully completed Operation on the
Index is reflected in it.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
metadata_schema_uri = proto.Field(proto.STRING, number=4,)
metadata = proto.Field(proto.MESSAGE, number=6, message=struct_pb2.Value,)
deployed_indexes = proto.RepeatedField(
proto.MESSAGE, number=7, message=deployed_index_ref.DeployedIndexRef,
)
etag = proto.Field(proto.STRING, number=8,)
labels = proto.MapField(proto.STRING, proto.STRING, number=9,)
create_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
orionnye.noreply@github.com
|
a0b4d2b6558019c0e406f1ef097a97fcefb6b50f
|
e5f49057eac43349a7fa999d90cb951e49617440
|
/filter/docclass.py
|
e6a113f9f13d3049b9b891fe7adaa77184535832
|
[] |
no_license
|
Hsingmin/CI_py2.7
|
2ae9464c687a1ecfadc7928c6e4915d828ffc10e
|
ef2906755d498a054beec20a99c4784351816cce
|
refs/heads/master
| 2021-08-30T06:23:09.630058
| 2017-12-16T13:01:19
| 2017-12-16T13:01:19
| 110,184,772
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,701
|
py
|
# docclass.py
import re
import math
def sampletrain(c1):
c1.train('Nobody owns the water.', 'good')
c1.train('the quick rabbit jumps fences', 'good')
c1.train('buy pharmaceuticals now', 'bad')
c1.train('make quick money at the online casino', 'bad')
c1.train('the quick brown fox jumps', 'good')
def getwords(doc):
splitter = re.compile('\\W*')
words = [s.lower() for s in splitter.split(doc) if len(s)>2 and len(s)<20]
return dict([w,1] for w in words)
class classifier:
def __init__(self, getfeatures, filename = None):
self.fc = {}
self.cc = {}
self.getfeatures = getfeatures
self.thresholds = {}
def incf(self, f, cat):
self.fc.setdefault(f, {})
self.fc[f].setdefault(cat, 0)
self.fc[f][cat] += 1
def incc(self, cat):
self.cc.setdefault(cat, 0)
self.cc[cat] += 1
def fcount(self, f, cat):
if f in self.fc and cat in self.fc[f]:
return float(self.fc[f][cat])
return 0.0
def catcount(self, cat):
if cat in self.cc:
return float(self.cc[cat])
return 0
def totalcount(self):
return sum(self.cc.values())
def categories(self):
return self.cc.keys()
def train(self, item, cat):
features = self.getfeatures(item)
for f in features:
self.incf(f, cat)
self.incc(cat)
def fprob(self, f, cat):
if self.catcount(cat) == 0:
return 0
return self.fcount(f, cat)/self.catcount(cat)
def weightedprob(self, f, cat, prf, weight=1.0, ap=0.5):
basicprob = prf(f, cat)
totals = sum([self.fcount(f,c) for c in self.categories()])
bp = ((weight*ap) + (totals*basicprob))/(weight+totals)
return bp
def setthresholds(self, cat, t):
self.thresholds[cat] = t
def getthresholds(self, cat):
if cat not in self.thresholds:
return 1.0
return self.thresholds[cat]
class naivebayes(classifier):
def docprob(self, item, cat):
features = self.getfeatures(item)
p = 1
for f in features:
p *= self.weightedprob(f, cat, self.fprob)
return p
def prob(self, item, cat):
catprob = self.catcount(cat)/self.totalcount()
docprob = self.docprob(item, cat)
return catprob*docprob
def classify(self, item, default = None):
probs = {}
max = 0.0
for cat in self.categories():
probs[cat] = self.prob(item, cat)
if probs[cat] > max:
max = probs[cat]
best = cat
for cat in probs:
if cat == best:
continue
if probs[cat]*self.getthresholds(best)>probs[best]:
return default
return best
class fisherclassifier(classifier):
def __init__(self, getfeatures):
classifier.__init__(self, getfeatures)
self.minimums = {}
def setminimum(self, cat, min):
self.minimums[cat] = min
def getminimum(self, cat):
if cat not in self.minimums:
return 0
return self.minimums[cat]
def classify(self, item, default = None):
best = default
max = 0.0
for c in self.categories():
p = self.fisherprob(item, c)
if p > self.getminimum(c) and p > max:
best = c
max = p
return best
def cprob(self, f, cat):
clf = self.fprob(f, cat)
if clf == 0:
return 0
freqsum = sum([self.fprob(f, c) for c in self.categories()])
p = clf/freqsum
return p
def fisherprob(self, item, cat):
p = 1
features = self.getfeatures(item)
for f in features:
p *= (self.weightedprob(f, cat, self.cprob))
fscores = -2*math.log(p)
return self.invchi2(fscores, len(features)*2)
def invchi2(self, chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m/i
sum += term
return min(sum, 1.0)
|
[
"alfred_bit@sina.cn"
] |
alfred_bit@sina.cn
|
86aa8e4a31017d6d63b19ac4cd3b040d922f3902
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200628181659.py
|
ef1424ea237252dfb40fa01bde4bf24ab2c06ba7
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 2,603
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
import lxml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593305918113.1593310282256.42; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593140975947.1593145813576.21; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593310282; _lxsdk_s=172f8db8281-bbf-e4f-981%7C%7C1',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
yield scrapy.Request(url=url,headers=self.header,callback=self.parse)
def parse(self, response):
selec
soup = bs(response.text,'html.parser')
for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}):
item = MaoyanspidersItem()
title = i.find('p',attrs={'class':'name'}).find('a')
name = title.get('title')
link = 'https://maoyan.com'+ title.get('href')
time = i.find('p',attrs={'class' : 'releasetime'}).text
item['films_name'] = name
item['release_time'] = time
print(link)
yield scrapy.Request(url=link, headers = self.header, meta={'item':item},callback=self.parse1)
def parse1(self, response):
item = response.meta['item']
# soup = bs(response.text,'html.parser')
soup = bs('./week01/homework02/1375.html')
type = soup.find('div',attrs={'class' :'banner'}).find_all('li')[0].text.replace('\n',' ')
print(soup)
# print(type)
item['films_type'] = type
print(item)
yield item
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
42c55265daabb2470cae40ea23ca66ff4211931f
|
94567834d7ca69fba178a9d2d0ae89a73f813741
|
/analyses/monthly_time_series/China_prmsl/plot_ts.py
|
630af662e02fee6a55708d2f481b49b4f71496c9
|
[] |
no_license
|
philip-brohan/Yangtze_floods
|
41b6d655fd4f06f8129c4e5c10d51d5e74d6cec4
|
8ad376328f5b7866d82dd3613e6157cfa31abea1
|
refs/heads/master
| 2021-09-07T23:56:51.250070
| 2021-08-23T16:43:58
| 2021-08-23T16:43:58
| 243,798,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
#!/usr/bin/env python
# 20CRv3 time-series: Monthly average, regional average.
# Each ensemble member as a seperate line.
# Uses pre-calculated time-series.
import os
import iris
import numpy
import datetime
import pickle
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
start=datetime.datetime(1926,1,1,0,0)
end=datetime.datetime(1935,12,31,23,59)
ylim = (-300,250)
dts=[]
ndata=None
for year in range(start.year,end.year+1,1):
sfile="%s/20CR/version_3/analyses/Yangtze_ts/PRMSL_v3/%04d.pkl" % \
(os.getenv('SCRATCH'),year)
with open(sfile, "rb") as f:
(ndyr,dtyr) = pickle.load(f)
dts.extend([dtyr[0:11]])
if ndata is None:
ndata = ndyr[0:11,:]
else:
ndata = numpy.ma.concatenate((ndata,ndyr[0:11,:]))
# Plot the resulting array as a set of line graphs
fig=Figure(figsize=(19.2,6), # Width, Height (inches)
dpi=300,
facecolor=(0.5,0.5,0.5,1),
edgecolor=None,
linewidth=0.0,
frameon=False,
subplotpars=None,
tight_layout=None)
canvas=FigureCanvas(fig)
font = {'family' : 'sans-serif',
'sans-serif' : 'Arial',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
# Plot the lines
ax = fig.add_axes([0.05,0.05,0.93,0.93],
xlim=((start-datetime.timedelta(days=1)),
(end+datetime.timedelta(days=1))),
ylim=ylim)
ax.set_ylabel('PRMSL anomaly')
for m in range(80):
ax.add_line(Line2D(dts,
ndata[:,m],
linewidth=0.5,
color=(0,0,1,1),
alpha=0.1,
zorder=200))
fig.savefig('PRMSL_ts.png')
|
[
"philip@brohan.org"
] |
philip@brohan.org
|
a7a89e0b98c823da3182800cda0c3e9b0acfaecc
|
09a1d8a920ddb9193dfcc9b05ddd842b83b18e0d
|
/aerosandbox_legacy_v0/examples_legacy_v0/vlm4_conventional.py
|
6d4ece711f7bb8d718aaa5d3f6e9995720f1a915
|
[
"MIT"
] |
permissive
|
aqreed/AeroSandbox
|
8564b6adb1f297e94aec96872b55f59171ae8ac1
|
a0c5f3b2760fcddee28cff2715eeddcb8bcbe655
|
refs/heads/master
| 2021-03-24T21:02:14.881986
| 2020-03-15T22:43:55
| 2020-03-15T22:43:55
| 247,564,677
| 1
| 0
|
MIT
| 2020-03-15T22:46:25
| 2020-03-15T22:46:24
| null |
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
from aerosandbox import *
glider = Airplane(
name="Conventional",
xyz_ref=[0, 0, 0], # CG location
wings=[
Wing(
name="Main Wing",
xyz_le=[0, 0, 0], # Coordinates of the wing's leading edge
symmetric=True,
xsecs=[ # The wing's cross ("X") sections
WingXSec( # Root
xyz_le=[0, 0, 0], # Coordinates of the XSec's leading edge, relative to the wing's leading edge.
chord=0.18,
twist=2, # degrees
airfoil=Airfoil(name="naca4412"),
control_surface_type='symmetric', # Flap # Control surfaces are applied between a given XSec and the next one.
control_surface_deflection=0, # degrees
control_surface_hinge_point=0.75 # as chord fraction
),
WingXSec( # Mid
xyz_le=[0.01, 0.5, 0],
chord=0.16,
twist=0,
airfoil=Airfoil(name="naca4412"),
control_surface_type='asymmetric', # Aileron
control_surface_deflection=30,
control_surface_hinge_point=0.75
),
WingXSec( # Tip
xyz_le=[0.08, 1, 0.1],
chord=0.08,
twist=-2,
airfoil=Airfoil(name="naca4412"),
)
]
),
Wing(
name="Horizontal Stabilizer",
xyz_le=[0.6, 0, 0.1],
symmetric=True,
xsecs=[
WingXSec( # root
xyz_le=[0, 0, 0],
chord=0.1,
twist=-10,
airfoil=Airfoil(name="naca0012"),
control_surface_type='symmetric', # Elevator
control_surface_deflection=0,
control_surface_hinge_point=0.75
),
WingXSec( # tip
xyz_le=[0.02, 0.17, 0],
chord=0.08,
twist=-10,
airfoil=Airfoil(name="naca0012")
)
]
),
Wing(
name="Vertical Stabilizer",
xyz_le=[0.6, 0, 0.15],
symmetric=False,
xsecs=[
WingXSec(
xyz_le=[0, 0, 0],
chord=0.1,
twist=0,
airfoil=Airfoil(name="naca0012"),
control_surface_type='symmetric', # Rudder
control_surface_deflection=0,
control_surface_hinge_point=0.75
),
WingXSec(
xyz_le=[0.04, 0, 0.15],
chord=0.06,
twist=0,
airfoil=Airfoil(name="naca0012")
)
]
)
]
)
# glider.set_paneling_everywhere(20, 20)
ap = vlm4(
airplane=glider,
op_point=OperatingPoint(
velocity=10,
alpha=5,
beta=0,
p=0,
q=0,
r=0,
),
)
ap.run()
ap.draw()
# Answer you should get: (XFLR5)
# CL = 0.797
# CDi = 0.017
# CL/CDi = 47.211
|
[
"peterdsharpe@gmail.com"
] |
peterdsharpe@gmail.com
|
920bde8494004fccb4a049249d10f17b7726fe68
|
f0181afd2eea9b086ce9487fb8d7fd949282140a
|
/bin/countgenbank.py
|
173a4ff2ea62bc564b9bd89f321a8135b513e0b3
|
[
"MIT"
] |
permissive
|
linsalrob/EdwardsLab
|
4a571676859c8b7238e733a0d3ad98ceb2e83c63
|
3c466acc07f1a56b575860ad26c92f900b272a53
|
refs/heads/master
| 2023-08-20T17:13:35.466103
| 2023-08-17T09:17:36
| 2023-08-17T09:17:36
| 25,702,093
| 36
| 25
|
MIT
| 2020-09-23T12:44:44
| 2014-10-24T18:27:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
"""
Count features in a genbank file or directory of files
"""
import os
import sys
import argparse
from roblib import message, genbank_seqio
__author__ = 'Rob Edwards'
__copyright__ = 'Copyright 2020, Rob Edwards'
__credits__ = ['Rob Edwards']
__license__ = 'MIT'
__maintainer__ = 'Rob Edwards'
__email__ = 'raedwards@gmail.com'
def count_feats(gbkf, verbose=False):
if verbose:
message(f"Reading {gbkf}", "BLUE")
count = {}
for seq in genbank_seqio(gbkf):
for feat in seq.features:
count[feat.type] = count.get(feat.type, 0) + 1
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('-f', help='genbank file')
parser.add_argument('-d', help='directory of genbank files')
parser.add_argument('-t', help='feature type(s) (at least one must be provided)', nargs="+")
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
files = []
if args.f:
files.append(args.f)
if args.d:
for f in os.listdir(args.d):
files.append(os.path.join(args.d, f))
if len(files) == 0:
message("Fatal. Either -d or -f is required", "RED")
if len(args.t) == 0:
message("Fatal. Please provide at least one feature type to count", "RED")
print("File", end="")
for t in args.t:
print(f"\t{t}", end="")
print()
for f in files:
c = count_feats(f, args.v)
print(f, end="")
for t in args.t:
if t in c:
print(f"\t{c[t]}", end="")
else:
print("\t0")
print()
|
[
"raedwards@gmail.com"
] |
raedwards@gmail.com
|
4079d5185261835ffa9df17e29142566cf46c3bd
|
dece3eb22be792aeac65ea12a1f183dd73498add
|
/coding/Mysql/1.py
|
10119b94c419e57e3114923e1eb5292e80410ffd
|
[] |
no_license
|
santanu5670/Python
|
352515ad82f94157e7f74467c5e7dedd6c9069c8
|
48c2779ccf934588f2bfce7cd322088acec04080
|
refs/heads/master
| 2023-06-24T09:22:49.925654
| 2021-07-22T15:17:34
| 2021-07-22T15:17:34
| 387,683,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
import mysql.connector
mydb=mysql.connector.connect(host='localhost',user='nsec',password='nsec',database='mysql')
print(mydb)
if(mydb):
print('Connection successful')
else:
print('Connection Unsuccessful')
|
[
"santanu2539@gmail.com"
] |
santanu2539@gmail.com
|
0ae5aa472863f78daed685a05bb3eafc6c6f559c
|
fb6e7922df3da2e9cdc37a00150d6d7663e907ff
|
/environment/rtfm/dynamics/item/weapon/tridents.py
|
7f513dd0c963025c039f762d29b6e88477da154d
|
[
"Apache-2.0"
] |
permissive
|
Spiph/GTG
|
c54a587002c42a032c89e8eceb5ec638f6c8c05f
|
4a45032290d0c1364e4398684582c51094b245f5
|
refs/heads/main
| 2023-09-02T14:44:14.946624
| 2021-10-27T12:29:05
| 2021-10-27T12:29:05
| 393,086,007
| 0
| 0
|
Apache-2.0
| 2021-08-05T15:09:07
| 2021-08-05T15:09:07
| null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_weapon import Weapon
from ... import dice as D, material as M
class BaseTrident(Weapon):
pass
class Trident(BaseTrident):
def __init__(self):
super().__init__('trident', weight=25, damage=D.Dice.from_str('3d4'), material=M.Iron, hit=0)
|
[
"jzyjiangzhengyao@gmail.com"
] |
jzyjiangzhengyao@gmail.com
|
259e794cad0040bcd4708de22d5d229d14681030
|
c085b06c9eb220eb40b5ada840886c09a152f053
|
/Libros de Python/web/web/ejer/tema-01/compose1.py.txt
|
d8040cca2291387aa8843870ff3af3f23cb0674a
|
[] |
no_license
|
thanggc/libros
|
7d3bf564c5a227f08390fbcc6721a0aed160e3e0
|
430c03fe97544d263b5c3a665327b527d9c223a8
|
refs/heads/master
| 2020-12-25T13:23:38.527089
| 2013-04-29T23:14:08
| 2013-04-29T23:14:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
txt
|
def compose (*funcs):
def composed (x):
for f in reversed (funcs):
x = f (x)
return x
return composed
|
[
"mini.guero@hotmail.com"
] |
mini.guero@hotmail.com
|
1f9f4decc5db879cfc598fe5c9b819fbed4f43a3
|
b79bce0cf363d2b6dd11371d378d78d48e973270
|
/kashgari/tasks/classification/base_model.py
|
7000ff01695ba716d958546221c048d0d0394381
|
[
"Apache-2.0"
] |
permissive
|
CharlotteSean/Kashgari
|
2d9338761b16d9804fb81ff92ce2ab1d256c80a7
|
ab9970ecf6c0164416bfbbec1378c690b0f00d76
|
refs/heads/master
| 2022-01-22T03:52:12.284458
| 2019-07-17T03:48:04
| 2019-07-17T03:48:04
| 197,900,673
| 2
| 0
|
Apache-2.0
| 2019-07-20T08:15:03
| 2019-07-20T08:15:03
| null |
UTF-8
|
Python
| false
| false
| 8,165
|
py
|
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_classification_model.py
# time: 2019-05-22 11:23
import random
import logging
import kashgari
from typing import Dict, Any, Tuple, Optional, List
from kashgari.tasks.base_model import BaseModel, BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
from sklearn import metrics
class BaseClassificationModel(BaseModel):
__task__ = 'classification'
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
super(BaseClassificationModel, self).__init__(embedding, hyper_parameters)
if hyper_parameters is None and \
self.embedding.processor.__getattribute__('multi_label') is True:
last_layer_name = list(self.hyper_parameters.keys())[-1]
self.hyper_parameters[last_layer_name]['activation'] = 'sigmoid'
logging.warning("Activation Layer's activate function changed to sigmoid for"
" multi-label classification question")
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None and self.embedding.processor.multi_label:
kwargs['loss'] = 'binary_crossentropy'
super(BaseClassificationModel, self).compile_model(**kwargs)
def predict(self,
x_data,
batch_size=32,
multi_label_threshold: float = 0.5,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
multi_label_threshold:
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
if self.embedding.processor.multi_label:
if debug_info:
logging.info('raw output: {}'.format(pred))
pred[pred >= multi_label_threshold] = 1
pred[pred < multi_label_threshold] = 0
else:
pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(pred)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return res
def predict_top_k_class(self,
x_data,
top_k=5,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None) -> List[Dict]:
"""
Generates output predictions with confidence for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
top_k: int
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
single-label classification:
[
{
"label": "chat",
"confidence": 0.5801531,
"candidates": [
{ "label": "cookbook", "confidence": 0.1886314 },
{ "label": "video", "confidence": 0.13805099 },
{ "label": "health", "confidence": 0.013852648 },
{ "label": "translation", "confidence": 0.012913573 }
]
}
]
multi-label classification:
[
{
"candidates": [
{ "confidence": 0.9959336, "label": "toxic" },
{ "confidence": 0.9358089, "label": "obscene" },
{ "confidence": 0.6882098, "label": "insult" },
{ "confidence": 0.13540423, "label": "severe_toxic" },
{ "confidence": 0.017219543, "label": "identity_hate" }
]
}
]
"""
if predict_kwargs is None:
predict_kwargs = {}
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
new_results = []
for sample_prob in pred:
sample_res = zip(self.label2idx.keys(), sample_prob)
sample_res = sorted(sample_res, key=lambda k: k[1], reverse=True)
data = {}
for label, confidence in sample_res[:top_k]:
if 'candidates' not in data:
if self.embedding.processor.multi_label:
data['candidates'] = []
else:
data['label'] = label
data['confidence'] = confidence
data['candidates'] = []
continue
data['candidates'].append({
'label': label,
'confidence': confidence
})
new_results.append(data)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return new_results
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
output_dict=False,
debug_info=False) -> Optional[Tuple[float, float, Dict]]:
y_pred = self.predict(x_data, batch_size=batch_size)
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y : {}'.format(y_data[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
if self.pre_processor.multi_label:
y_pred_b = self.pre_processor.multi_label_binarizer.fit_transform(y_pred)
y_true_b = self.pre_processor.multi_label_binarizer.fit_transform(y_data)
report = metrics.classification_report(y_pred_b,
y_true_b,
target_names=self.pre_processor.multi_label_binarizer.classes_,
output_dict=output_dict,
digits=digits)
else:
report = metrics.classification_report(y_data,
y_pred,
output_dict=output_dict,
digits=digits)
if not output_dict:
print(report)
else:
return report
if __name__ == "__main__":
print("Hello world")
|
[
"eliyar917@gmail.com"
] |
eliyar917@gmail.com
|
4887675c21970c73fbb8d10f2891370c490380cb
|
387587c753e76d98a6a0401327766c45561d5109
|
/ros_catkin_ws/devel_isolated/roslaunch/lib/python2.7/dist-packages/roslaunch/__init__.py
|
8752f22becaf4ebc75be508c7fbdbba3736db545
|
[
"MIT"
] |
permissive
|
letrend/neopixel_fpga
|
7a4819a566fab02bd602c3338b8aaa0ddf4bee85
|
d9247417a9d311eceebad5898571846c6e33a44a
|
refs/heads/master
| 2021-01-23T01:00:55.290431
| 2017-05-30T20:15:38
| 2017-05-30T20:15:38
| 92,855,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/root/ros_catkin_ws/src/ros_comm/roslaunch/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"simon.trendel@tum.de"
] |
simon.trendel@tum.de
|
a63d008f4a88eae9d409a21dec51ccc26b7b1055
|
0946fc233478fec9eac9eb247d45667c3b3989e8
|
/reassignment.py
|
ab002343bf6d67cf8ea6059c7d08a196ae10471d
|
[] |
no_license
|
hoinx/music-processing-experiments
|
0da954e268d6a7120729cb3b012f53d836cd1dc7
|
bdd76fa8a8e1b90c8d4e610dcd3a6beadc2e7b1d
|
refs/heads/master
| 2020-04-30T13:00:35.283119
| 2015-11-02T19:54:46
| 2015-11-02T19:54:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,545
|
py
|
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy
from files import load_wav
from spectrogram import real_half, create_window
from analysis import split_to_blocks
from tuning import pitch_to_freq, freq_to_pitch, pitch_bin_range, quantize_freqs_to_pitch_bins
def cross_spectrum(spectrumA, spectrumB):
'''
Returns a cross-spectrum, ie. spectrum of cross-correlation of two signals.
This result does not depend on the order of the arguments.
Since we already have the spectra of signals A and B and and want the
spectrum of their cross-correlation, we can replace convolution in time
domain with multiplication in frequency domain.
'''
return spectrumA * spectrumB.conj()
def shift_right(values):
'''
Shifts the array to the right by one place, filling the empty values with
zeros.
TODO: use np.roll()
'''
return np.hstack([np.zeros((values.shape[0],1)), values[..., :-1]])
def shifted_amplitude_pair(amplitudes):
'''
Fakes looking at the previous frame shifted by one sample.
In order to work only with one frame of size N and not N + 1, we fill the
missing value with zero. This should not introduce a large error, since the
borders of the amplitude frame will go to zero anyway due to applying a
window function in the STFT tranform.
Returns: (previous amplitudes, current amplitudes)
'''
prevAmplitudes = shift_right(amplitudes)
return prevAmplitudes, amplitudes
def arg(crossSpectrum):
return np.mod(np.angle(crossSpectrum) / (2 * np.pi), 1.0)
def estimate_instant_freqs(crossTimeSpectrum):
'''
Channelized instantaneous frequency - the vector of simultaneous
instantaneous frequencies computed over a single frame of the digital
short-time Fourier transform.
Instantaneous frequency - derivative of phase by time.
cif = angle(crossSpectrumTime) * sampleRate / (2 * pi)
In this case the return value is normalized (not multiplied by sampleRate).
Basically it is phase normalized to the [0.0; 1.0] interval,
instead of absolute [0.0; sampleRate].
'''
return arg(crossTimeSpectrum)
def estimate_group_delays(crossFreqSpectrum):
return 0.5 - arg(crossFreqSpectrum)
def open_file(filename, block_size, hop_size):
song, fs = load_wav(filename)
x, times = split_to_blocks(song, block_size, hop_size=hop_size)
return x, times, fs
def compute_spectra(x, w):
X = np.fft.fft(x * w)
X_prev_time = np.fft.fft(shift_right(x) * w)
X_prev_freq = shift_right(X)
X_cross_time = cross_spectrum(X, X_prev_time)
X_cross_freq = cross_spectrum(X, X_prev_freq)
X_inst_freqs = estimate_instant_freqs(X_cross_time)
X_group_delays = estimate_group_delays(X_cross_freq)
return X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays
def db_scale(magnitude_spectrum):
# min_amplitude = 1e-6
# threshold = -np.log10(min_amplitude)
# return ((threshold + np.log10(np.maximum(min_amplitude, magnitude_spectrum))) / threshold)
return 20 * np.log10(np.maximum(1e-6, magnitude_spectrum))
def requantize_f_spectrogram(X_cross, X_instfreqs, to_log=True):
'''Only requantize by frequency'''
X_reassigned = np.empty(X_cross.shape)
N = X_cross.shape[1]
magnitude_spectrum = abs(X_cross) / N
weights = magnitude_spectrum
for i in range(X_cross.shape[0]):
X_reassigned[i, :] = np.histogram(X_instfreqs[i], N, range=(0,1), weights=weights[i])[0]
X_reassigned = X_reassigned ** 2
if to_log:
X_reassigned = db_scale(X_reassigned)
return X_reassigned
def requantize_tf_spectrogram(X_group_delays, X_inst_freqs, times, block_size, fs, weights=None):
block_duration = block_size / fs
block_center_time = block_duration / 2
X_time = np.tile(times + block_center_time, (X_group_delays.shape[1], 1)).T \
+ X_group_delays * block_duration
time_range = (times[0], times[-1] + block_duration)
freq_range = (0, 1)
bins = X_inst_freqs.shape
# time_range = (0, 2)
# freq_range = (0, 0.4)
counts, x_edges, y_edges = np.histogram2d(
X_time.flatten(), X_inst_freqs.flatten(),
weights=weights.flatten(),
range=(time_range, freq_range),
bins=bins)
return counts, x_edges, y_edges
def process_spectrogram(filename, block_size, hop_size):
x, times, fs = open_file(filename, block_size, hop_size)
w = create_window(block_size)
X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays = compute_spectra(x, w)
X_reassigned_f = requantize_f_spectrogram(X_cross_time, X_inst_freqs)
# N = X_cross.shape[1]
# magnitude_spectrum = abs(X_cross_time) / N
# weights = db_scale(magnitude_spectrum)
X_magnitudes = abs(X_cross_time) / X.shape[1]
weights = X_magnitudes
X_reassigned_tf = requantize_tf_spectrogram(X_group_delays, X_inst_freqs, times, block_size, fs, weights)[0]
X_reassigned_tf = db_scale(X_reassigned_tf ** 2)
image_filename = os.path.basename(filename).replace('.wav', '.png')
scipy.misc.imsave('reassigned_f_' + image_filename, real_half(X_reassigned_f).T[::-1])
scipy.misc.imsave('reassigned_tf_' + image_filename, real_half(X_reassigned_tf).T[::-1])
scipy.misc.imsave('normal_' + image_filename, real_half(X_magnitudes).T[::-1])
# X_time = X_group_delays + np.tile(np.arange(X.shape[0]).reshape(-1, 1), X.shape[1])
# idx = (abs(X).flatten() > 10) & (X_inst_freqs.flatten() < 0.5)
# plt.scatter(X_time.flatten()[idx], X_inst_freqs.flatten()[idx], alpha=0.1)
# plt.savefig('scatter_' + image_filename)
def reassigned_spectrogram(x, w, to_log=True):
X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays = compute_spectra(x, w)
X_reassigned_f = requantize_f_spectrogram(X_cross_time, X_inst_freqs, to_log)
return real_half(X_reassigned_f)
def chromagram(x, w, fs, bin_range=(-48, 67), bin_division=1, to_log=True):
"complete reassigned spectrogram with requantization to pitch bins"
# TODO: better give frequency range
X, X_cross_time, X_cross_freq, X_inst_freqs, X_group_delays = compute_spectra(x, w)
n_blocks, n_freqs = X_cross_time.shape
X_mag = abs(X_cross_time) / n_freqs
weights = real_half(X_mag).flatten()
eps = np.finfo(np.float32).eps
pitch_bins = quantize_freqs_to_pitch_bins(np.maximum(fs * real_half(X_inst_freqs), eps), bin_division=bin_division).flatten()
nonzero_ix = abs(weights) > eps
X_chromagram = np.histogram2d(
np.repeat(np.arange(n_blocks), n_freqs / 2),
pitch_bins,
bins=(np.arange(n_blocks + 1),
np.arange(bin_range[0], bin_range[1] + 1, 1 / bin_division)),
weights=weights
)[0]
X_chromagram = X_chromagram ** 2
if to_log:
X_chromagram = db_scale(X_chromagram)
return X_chromagram
def tf_scatter():
idx = (abs(X).flatten() > 10) & (X_inst_freqs.flatten() < 0.5)
scatter(X_time.flatten()[idx], X_inst_freqs.flatten()[idx], alpha=0.1)
def test_cross_spectrum():
a = np.array([1j, 1+3j])
b = np.array([2, 4j])
c = np.array([-2j, 12+4j])
assert_array_equals(cross_spectrum(a, b), c)
def test_shifted_amplitude_pair():
actual = shifted_amplitude_pair(np.array([1,2,3]))
assert_array_equals(actual[0], np.array([0, 1, 2]))
assert_array_equals(actual[1], np.array([1, 2, 3]))
def assert_array_equals(a, b):
assert (a == b).all()
if __name__ == '__main__':
import sys
process_spectrogram(filename=sys.argv[1], block_size=2048, hop_size=512)
|
[
"bohumir.zamecnik@gmail.com"
] |
bohumir.zamecnik@gmail.com
|
aa58c72a61686c3fcbfc652c3ea34db79cf29d43
|
ec5c35ac5163c4e81262a81a6a6c46667c01733d
|
/server/auth.py
|
b279dc4faf0008a333cb253cdb50ed329e219f6b
|
[] |
no_license
|
kotawiw/bytedance-exercise-2
|
27b32d81aa7e8040c1c8448acbe9c4ff20ff5b26
|
8db190487a6490ec852d8418d93ba62251a5437f
|
refs/heads/master
| 2022-12-24T00:04:53.047395
| 2020-09-23T11:48:13
| 2020-09-23T11:48:13
| 297,948,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
import functools
from flask import Blueprint
from flask import g
from flask import request
from flask import session
from flask import abort
from flask import jsonify
from server.models.users import User
bp = Blueprint("auth", __name__, url_prefix="/api/auth")
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'user' not in g:
return abort(401, "Login required")
return view(**kwargs)
return wrapped_view
@bp.before_app_request
def load_logged_in_user():
user_id = session.get("user_id")
if user_id:
g.user = User.query.get(user_id)
@bp.route("/status")
def get_status():
if 'user' not in g:
return jsonify({"loggedIn": False})
return jsonify({
"loggedIn": True,
"email": g.user.email
})
@bp.route("/register", methods=("POST",))
def register():
autologin = request.args.get("autologin", default="True").lower() == "true"
email = request.json.get('email')
password = request.json.get('password')
user = User.try_register(email, password)
if not user:
print(email, password)
return abort(400, "Invalid email or password")
if autologin:
session["user_id"] = user.id
return jsonify({'email': user.email, 'loggedIn': autologin})
@bp.route("/login", methods=("POST",))
def login():
email = request.json['email']
password = request.json['password']
user = User.by_email_password(email, password)
if not user:
return abort(401, "Incorrect email or password")
session["user_id"] = user.id
return jsonify({'email': user.email, 'loggedIn': True})
@bp.route("/logout")
def logout():
user = None
if 'user' in g:
user = g.user
session.clear()
if not user:
return jsonify({"loggedIn": False})
return jsonify({
"loggedIn": True,
"email": user.email
})
|
[
"you@example.com"
] |
you@example.com
|
0053a7079faf9fa027992cb3f4a7f048e6e8bee4
|
0e2768a4a21367c4c2f89976f2dadd5c94fec09b
|
/StarGAN/age_test.py
|
b6cf116b145149480910e3303ec1f6fc45ca770a
|
[] |
no_license
|
xogus1107/Capstone
|
1dcea5d7fd1d603c8b31133d7a64325fee8b72ed
|
e84fd5a2e5419da969a8013a4ee0b98b4c1d22cf
|
refs/heads/master
| 2020-03-30T03:47:23.028610
| 2018-12-11T02:53:33
| 2018-12-11T02:53:33
| 150,708,066
| 1
| 1
| null | 2018-12-11T02:53:34
| 2018-09-28T08:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,984
|
py
|
import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
def str2bool(v):
return v.lower() in ('true')
def main(config):
# For fast training.
cudnn.benchmark = True
# Create directories if not exist.
if not os.path.exists(config.log_dir):
os.makedirs(config.log_dir)
if not os.path.exists(config.model_save_dir):
os.makedirs(config.model_save_dir)
if not os.path.exists(config.sample_dir):
os.makedirs(config.sample_dir)
if not os.path.exists(config.result_dir):
os.makedirs(config.result_dir)
# Data loader.
data_loader = get_loader(config.image_dir, config.crop_size, config.image_size, config.batch_size,
'test', config.num_workers)
# Solver for training and testing StarGAN.
solver = Solver(data_loader, config)
solver.test()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--c_dim', type=int, default=10, help='dimension of domain labels')
parser.add_argument('--crop_size', type=int, default=128, help='crop size for the dataset')
parser.add_argument('--image_size', type=int, default=128, help='image resolution')
parser.add_argument('--g_conv_dim', type=int, default=64, help='number of conv filters in the first layer of G')
parser.add_argument('--d_conv_dim', type=int, default=64, help='number of conv filters in the first layer of D')
parser.add_argument('--g_repeat_num', type=int, default=6, help='number of residual blocks in G')
parser.add_argument('--d_repeat_num', type=int, default=6, help='number of strided conv layers in D')
parser.add_argument('--lambda_cls', type=float, default=1, help='weight for domain classification loss')
parser.add_argument('--lambda_rec', type=float, default=10, help='weight for reconstruction loss')
parser.add_argument('--lambda_gp', type=float, default=10, help='weight for gradient penalty')
# Training configuration.
parser.add_argument('--batch_size', type=int, default=16, help='mini-batch size')
parser.add_argument('--num_iters', type=int, default=1000000, help='number of total iterations for training D')
parser.add_argument('--num_iters_decay', type=int, default=100000, help='number of iterations for decaying lr')
parser.add_argument('--g_lr', type=float, default=0.0001, help='learning rate for G')
parser.add_argument('--d_lr', type=float, default=0.0001, help='learning rate for D')
parser.add_argument('--n_critic', type=int, default=5, help='number of D updates per each G update')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')
parser.add_argument('--resume_iters', type=int, default=None, help='resume training from this step')
# Test configuration.
parser.add_argument('--test_iters', type=int, default=1000000, help='test model from this step')
# Miscellaneous.
parser.add_argument('--num_workers', type=int, default=1)
parser.add_argument('--use_tensorboard', type=str2bool, default=True)
# Directories.
parser.add_argument('--image_dir', type=str, default='age/test')
parser.add_argument('--log_dir', type=str, default='age/logs')
parser.add_argument('--model_save_dir', type=str, default='age/models')
parser.add_argument('--sample_dir', type=str, default='age/samples')
parser.add_argument('--result_dir', type=str, default='age/results')
# Step size.
parser.add_argument('--log_step', type=int, default=10)
parser.add_argument('--sample_step', type=int, default=1000)
parser.add_argument('--model_save_step', type=int, default=10000)
parser.add_argument('--lr_update_step', type=int, default=1000)
config = parser.parse_args()
print(config)
main(config)
|
[
"lkjim0757@naver.com"
] |
lkjim0757@naver.com
|
b3aa4d9fb003f4ac0049040ec5cd7a6d3f657b93
|
2c4a2790457a2c16c9c92bc2e6feeb6cc9271994
|
/laptop/catkin_ws/build/turtlebot3/turtlebot3_pointop/catkin_generated/pkg.installspace.context.pc.py
|
2bf9e1ccc9e204260bada0f003da4a1983ed2144
|
[] |
no_license
|
Zoltan3057/skalman
|
df37e6f376665363b6ea73c6c16fe8312e473e07
|
0e4cead39f6328c74622dd11688837b77152ff17
|
refs/heads/master
| 2021-09-22T22:26:07.265321
| 2018-09-17T20:47:05
| 2018-09-17T20:47:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_pointop"
PROJECT_SPACE_DIR = "/home/fregu856/skalman/laptop/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"fregu856@student.liu.se"
] |
fregu856@student.liu.se
|
1ccb569c8cd950c90ba8af01abc664229472ddcc
|
172c5da69ed8914dc899f65be8716e0fac55249b
|
/surveys/views.py
|
d8838fa92b0dc94e0cbbf96fee6d71ae1036af1a
|
[] |
no_license
|
TheProrok29/django_questionnaires
|
cd75f0271e3b1012351c94b0fde4cb87f795ef5c
|
2e62bf2e96fa58f96b80c84c04d08825dfb9ac37
|
refs/heads/master
| 2020-05-01T05:11:52.268230
| 2019-04-07T19:44:30
| 2019-04-07T19:44:30
| 177,295,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,222
|
py
|
from django.shortcuts import render, HttpResponseRedirect
from .models import Survey, Question, Answer
from django.contrib.auth.decorators import login_required
from . import forms
from django.urls import reverse
from django.contrib import messages
@login_required
def surveys(request):
kwargs = {}
kwargs['surveys'] = Survey.objects.filter(user=request.user)
return render(request, 'surveys.html', kwargs)
@login_required
def create(request):
if request.method == 'GET':
kwargs = {}
kwargs['survey_creation_form'] = forms.SurveyCreationForm(
prefix='survey_creation_form')
return render(request, 'create.html', kwargs)
elif request.method == 'POST':
form = forms.SurveyCreationForm(data=request.POST,
prefix='survey_creation_form')
if form.is_valid():
new_survey = form.save(commit=False)
new_survey.user = request.user
new_survey.save()
messages.success(
request, 'Ankieta została utworzona, możesz przystąpić do tworzenia pytań.')
return HttpResponseRedirect(reverse('survey',
kwargs={
'survey_id': new_survey.id
}))
messages.error(request, 'Niepoprawne wywołanie trasy.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def survey(request, survey_id):
try:
user_survey = Survey.objects.get(id=survey_id)
if request.method == 'GET':
kwargs = {}
kwargs['survey'] = user_survey
kwargs['questions'] = Question.objects.filter(survey=user_survey)
kwargs['survey_edit_form'] = forms.SurveyCreationForm(prefix='survey_creation_form',
instance=user_survey)
return render(request, 'survey.html', kwargs)
elif request.method == 'POST':
form = forms.SurveyCreationForm(data=request.POST,
prefix='survey_creation_form',
instance=user_survey)
if form.is_valid():
form.save()
messages.success(request, 'Dane ankiety zostały zmienione.')
return HttpResponseRedirect(reverse('survey',
kwargs={
'survey_id': survey_id
}))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def delete(request, survey_id):
try:
user_survey = Survey.objects.get(id=survey_id)
user_survey.delete()
messages.success(request, 'Wybrana ankieta została usunięta.')
return HttpResponseRedirect(reverse('surveys'))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def create_question(request, survey_id):
try:
user_survey = Survey.objects.get(id=survey_id)
if request.method == 'GET':
kwargs = {}
kwargs['survey'] = user_survey
kwargs['question_creation_form'] = forms.QuestionCreationForm(
prefix='question_creation_form')
return render(request, 'question_create.html', kwargs)
elif request.method == 'POST':
form = forms.QuestionCreationForm(data=request.POST,
prefix='question_creation_form')
if form.is_valid():
new_question = form.save(commit=False)
new_question.survey = user_survey
new_question.save()
messages.success(request, 'Pytanie zostało utworzone.')
return HttpResponseRedirect(reverse('survey',
kwargs={
'survey_id': user_survey.id
}))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('surveys'))
@login_required
def delete_question(request, survey_id, question_id):
try:
survey_question = Question.objects.get(id=question_id)
survey_question.delete()
messages.success(request, 'Wybrane pytanie zostało usunięte.')
except Question.DoesNotExist:
messages.error(request, 'Wybrane pytanie nie istnieje.')
return HttpResponseRedirect(reverse('survey', kwargs={
'survey_id': survey_id
}))
def share(request, survey_id):
try:
kwargs = {}
user_survey = Survey.objects.get(id=survey_id)
survey_questions = Question.objects.filter(survey_id=survey_id)
if request.method == 'GET':
kwargs['survey'] = user_survey
kwargs['questions'] = survey_questions
return render(request, 'share.html', kwargs)
elif request.method == 'POST':
first_name = request.POST['first-name']
answers = "<p>"
for question in survey_questions:
answers += 'Pytanie: %s <br /> Odpowiedź: <em>%s' % (question.name,
request.POST.get(str(question.id), 'Brak'))
answers += '</em><br /><br />'
answers += '</p>'
new_answer = Answer()
new_answer.user = user_survey.user
new_answer.survey = user_survey
new_answer.first_name = first_name
new_answer.answers = answers
new_answer.save()
messages.success(
request, 'Dziękujemy, Twoje odpowiedzi zostały przesłane.')
return HttpResponseRedirect(reverse('share-survey',
kwargs={
'survey_id': user_survey.id
}))
except Survey.DoesNotExist:
messages.error(request, 'Wybrana ankieta nie istnieje.')
return HttpResponseRedirect(reverse('home'))
@login_required
def answers(request, survey_id):
kwargs = {}
kwargs['answers'] = Answer.objects.filter(survey_id=survey_id).order_by('-created')
return render(request, 'answers.html', kwargs)
@login_required
def delete_answer(request, survey_id, answer_id):
try:
survey_answer = Answer.objects.get(id=answer_id)
survey_answer.delete()
messages.success(request, 'Wybrana odpowiedź została usunięta.')
except Answer.DoesNotExist:
messages.error(request, 'Wybrane pytanie nie istnieje.')
return HttpResponseRedirect(reverse('answers', kwargs={
'survey_id': survey_id
}))
|
[
"tomaszdbogacki@gmail.com"
] |
tomaszdbogacki@gmail.com
|
73e4c6bd217f7949adbe8e395dd344f590caaa88
|
57e37e1df9596cdaf86d3dc81cfbe15c5303157f
|
/SocialMediaenv/social_media/login_app/migrations/0007_auto_20200625_0059.py
|
b85ab48b079026324aa416a8e4f4f1f51ed52538
|
[] |
no_license
|
Tanzin-Ul-Islam/Django_Social_Media
|
45d9240b25456768b4cc1976f4971c7921a6ceae
|
7578aeb98702bbbb6ae820de11c2c5a4c237243a
|
refs/heads/master
| 2022-11-29T11:37:07.949118
| 2020-08-10T20:15:45
| 2020-08-10T20:15:45
| 286,519,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
# Generated by Django 3.0.7 on 2020-06-24 18:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('login_app', '0006_auto_20200625_0056'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profilepic',
field=models.ImageField(blank=True, upload_to='userpics'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL),
),
]
|
[
"tanzin.cse@gmail.com"
] |
tanzin.cse@gmail.com
|
90593ff711ea932ec1a2f7a1ec6b49f285920849
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_75/1267.py
|
24da43f932ff6c1352b5a0a632af37122acf1eab
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
import unittest
def cut(s, size):
for i in range(0, len(s), size):
yield s[i:i + size]
def parse_combining(s):
d = {}
for mom, dad, kiddo in cut(s, 3):
d[frozenset((mom, dad))] = kiddo
return d
def parse_opposing(s):
l = []
for mom, dad in cut(s, 2):
l.append(frozenset((mom, dad)))
return l
def invoke(s, combining, opposing):
combining_pairs = frozenset(combining.keys())
opposing_pairs = frozenset(opposing)
result = ""
for c in s:
result += c
# print result
while len(result) >= 2:
pair = frozenset((result[-1], result[-2]))
if pair in combining_pairs:
# print "*", pair
result = result[:-2] + combining[pair]
continue
fs = frozenset(result)
for pair in opposing_pairs:
if fs >= pair:
# print "**", pair
result = ""
continue
break
return result
class TestMagicka(unittest.TestCase):
def assertInvoke(self, output, combining, opposing, input):
c = parse_combining(combining)
o = parse_opposing(opposing)
self.assertEqual(output, invoke(input, c, o))
def test_cut(self):
self.assertEqual(["12", "34", "56"], [s for s in cut("123456", 2)])
def test_parse(self):
self.assertEqual({frozenset('QR'):'I'}, parse_combining("QRI"))
def test_all(self):
self.assertInvoke("EA", "", "", "EA")
self.assertInvoke("RIR", "QRI", "", "RRQR")
self.assertInvoke("FDT", "QFT", "QF", "FAQFDFQ")
self.assertInvoke("ZERA", "EEZ", "QE", "QEEEERA")
self.assertInvoke("", "", "QW", "QW")
self.assertInvoke("", "", "QW", "WQ")
self.assertInvoke("CF", "ABCDEF", "XY", "XYABDE")
self.assertInvoke("G", "ABCDEFCFG", "XY", "XYABDE")
if __name__ == '__main__':
# unittest.main()
count = int(raw_input())
for i in range(count):
it = iter(raw_input().split(" "))
combining = parse_combining(it.next() if int(it.next()) > 0 else "")
opposing = parse_opposing(it.next() if int(it.next()) > 0 else "")
s = it.next() if int(it.next()) > 0 else ""
print 'Case #%d: [%s]' % (i + 1, ", ".join(invoke(s, combining, opposing)))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
a3e3b87d073f41bae0250078bf35cd961afc03ef
|
49f61714a6f78d984fd2194d6064d84e891bc5b7
|
/2019-1/230/users/3356/codes/1599_842.py
|
15af2a9064e98315942e60e03b8d7ff9b43b637d
|
[] |
no_license
|
psbarros/Variaveis3
|
b5c4e1517e7d94a846ee03791d25d5821a1c651c
|
3dcf6f810709ce03c78335acf9533e008a2ae125
|
refs/heads/master
| 2023-06-13T07:05:00.878430
| 2021-07-06T17:51:37
| 2021-07-06T17:51:37
| 383,549,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
n = int(input())
soma = 0
while (n > 0):
resto = n % 10
n = n // 10
soma = soma + resto
print(soma)
|
[
"psb@icomp.ufam.edu.br"
] |
psb@icomp.ufam.edu.br
|
a23d0a0133f0e15711d6f9797955758dc75ae16e
|
4148260054c2cf4605dacb8bdef3605c82eca470
|
/temboo/Library/Wordnik/Words/RandomWords.py
|
682599bdf169426d581dbe717544f4ca5ebb9bb6
|
[] |
no_license
|
wimsy/actuarize-web
|
0f23d5f00afe3d36d430621cdb497d2e64998416
|
5f43af3019da6fb08cafeec9ff0a89df5196b864
|
refs/heads/master
| 2021-03-12T19:38:21.887681
| 2012-12-19T01:13:50
| 2012-12-19T01:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,155
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# RandomWords
# Retrieves a list of random words.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class RandomWords(Choreography):
"""
Create a new instance of the RandomWords Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Wordnik/Words/RandomWords')
def new_input_set(self):
return RandomWordsInputSet()
def _make_result_set(self, result, path):
return RandomWordsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RandomWordsChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the RandomWords
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class RandomWordsInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API Key from Wordnik.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the ExcludePartOfSpeech input for this choreography. ((optional, string) Excludes the specified comma-delimited parts of speech from the results returned. Acceptable values include: adjective, noun, etc. See docs for full list.)
"""
def set_ExcludePartOfSpeech(self, value):
InputSet._set_input(self, 'ExcludePartOfSpeech', value)
"""
Set the value of the HasDefinition input for this choreography. ((optional, string) Only returns words that have dictionary definitions when true. Otherwise false. Defaults to true.)
"""
def set_HasDefinition(self, value):
InputSet._set_input(self, 'HasDefinition', value)
"""
Set the value of the IncludePartOfSpeech input for this choreography. ((optional, string) Only includes the specified comma-delimited parts of speech. Acceptable values include: adjective, noun, etc. See docs for full list.)
"""
def set_IncludePartOfSpeech(self, value):
InputSet._set_input(self, 'IncludePartOfSpeech', value)
"""
Set the value of the Limit input for this choreography. ((optional, integer) Maximum number of results to return. Defaults to 10.)
"""
def set_Limit(self, value):
InputSet._set_input(self, 'Limit', value)
"""
Set the value of the MaxCorpus input for this choreography. ((optional, integer) Results include a corpus frequency count for each word returned. When this input is specified, results are limited to words with a corpus frequency count below the given number.)
"""
def set_MaxCorpus(self, value):
InputSet._set_input(self, 'MaxCorpus', value)
"""
Set the value of the MaxDictionaries input for this choreography. ((optional, integer) Maximum number of dictionaries in which the words appear.)
"""
def set_MaxDictionaries(self, value):
InputSet._set_input(self, 'MaxDictionaries', value)
"""
Set the value of the MaxLength input for this choreography. ((optional, integer) Maximum word length.)
"""
def set_MaxLength(self, value):
InputSet._set_input(self, 'MaxLength', value)
"""
Set the value of the MinCorpus input for this choreography. ((optional, integer) Results include a corpus frequency count for each word returned. When this input is specified, results are limited to words with a corpus frequency count above the given number.)
"""
def set_MinCorpus(self, value):
InputSet._set_input(self, 'MinCorpus', value)
"""
Set the value of the MinDictionaries input for this choreography. ((optional, integer) Minimum number of dictionaries in which the words appear.)
"""
def set_MinDictionaries(self, value):
InputSet._set_input(self, 'MinDictionaries', value)
"""
Set the value of the MinLength input for this choreography. ((optional, integer) Minimum word length.)
"""
def set_MinLength(self, value):
InputSet._set_input(self, 'MinLength', value)
"""
Set the value of the ResponseType input for this choreography. ((optional, string) Response can be either JSON or XML. Defaults to JSON.)
"""
def set_ResponseType(self, value):
InputSet._set_input(self, 'ResponseType', value)
"""
Set the value of the SortBy input for this choreography. ((optional, string) Results can be sorted by: alpha, count, or length.)
"""
def set_SortBy(self, value):
InputSet._set_input(self, 'SortBy', value)
"""
Set the value of the SortOrder input for this choreography. ((optional, string) Indicate the order to sort, either asc (ascending) or desc (descending).)
"""
def set_SortOrder(self, value):
InputSet._set_input(self, 'SortOrder', value)
"""
A ResultSet with methods tailored to the values returned by the RandomWords choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class RandomWordsResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Wordnik.)
"""
def get_Response(self):
return self._output.get('Response', None)
class RandomWordsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RandomWordsResultSet(response, path)
|
[
"mike.wimsatt@gmail.com"
] |
mike.wimsatt@gmail.com
|
2fec48c9bc4689497d616c4fab84161b86faed78
|
ef61c5f177ee44ac08325335fc28a12f3fccbb58
|
/resource_management/tests/interactors/test_create_resource.py
|
2c7bedc0881b90bbee655ebca9a3137cfff9d3c2
|
[] |
no_license
|
bammidichandini/resource_management-chandini
|
3c11c7b2eb5e2f8d3df5b55e4d3ee86a27ed5c3a
|
aa4ec50f0b36a818bebc2033cb39ee928e5be13c
|
refs/heads/master
| 2022-12-01T19:59:25.366843
| 2020-07-23T09:10:42
| 2020-07-23T09:10:42
| 269,610,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
import pytest
from unittest.mock import create_autospec
#from resource_management.exceptions.exceptions import UserCannotManipulateException
from django_swagger_utils.drf_server.exceptions import Forbidden
from resource_management.interactors.storages.resources_storage_interface import StorageInterface
from resource_management.interactors.presenters.presenter_interface import PresenterInterface
from resource_management.interactors.create_resources_interactor import CreateResourceInteractor
@pytest.mark.django_db()
def test_create_resource(resource_dtos):
#arrange
user_id = 1
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
storage.is_admin.return_value = True
interactor = CreateResourceInteractor(
storage=storage,
presenter=presenter
)
#act
interactor.create_resource_interactor(
resource_dtos,
user_id=user_id
)
#assert
storage.create_resource.assert_called_once_with(
resource_dtos,
user_id=user_id
)
storage.is_admin.assert_called_once_with(user_id)
def test_create_resource_with_user(resource_dtos):
#arrange
user_id = 1
storage = create_autospec(StorageInterface)
presenter = create_autospec(PresenterInterface)
storage.is_admin.return_value =False
presenter.raise_user_cannot_manipulate_exception.side_effect = \
Forbidden
interactor = CreateResourceInteractor(
storage=storage,
presenter=presenter
)
#act
with pytest.raises(Forbidden):
interactor.create_resource_interactor(
resource_dtos,
user_id=user_id
)
# #assert
# storage.create_resource.assert_called_once_with(
# resource_dtos,
# user_id=user_id
# )
# presenter.raise_user_cannot_manipulate_exception.assert_called_once()
|
[
"chandini.bammidi123@gmail.com"
] |
chandini.bammidi123@gmail.com
|
a3078ed764f5a832bc8be4f0ed37f7616cbcaff7
|
a52b63889017a7f099a2575abf251bfadc672349
|
/main.py
|
40f0b0bd54fb73bd86e5354c476a419e1fd0ed16
|
[] |
no_license
|
602p/siscalc
|
cda58454a10780cceb018d0405377dbd49444911
|
0c82d1e1acdb0c1c99a09a7d02f47367ef036989
|
HEAD
| 2016-09-01T15:45:22.996359
| 2016-03-19T06:18:09
| 2016-03-19T06:18:09
| 54,244,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
import browser
class Category(object):
def __init__(self, *a, **k):
self.container=browser.html.DIV()
self.assignments_container=browser.html.DIV()
self.percent=browser.html.INPUT(readonly=True)
self.weight=browser.html.INPUT(type="number")
self.add=browser.html.BUTTON("Add Assignement")
self.delete_this=browser.html.BUTTON("X")
self.container<=browser.html.INPUT(value="Category")
self.container<=" Weight:"
self.container<=self.weight
self.container<=" "
self.container<=self.percent
self.container<=self.add
self.container<=" "
self.container<=self.delete_this
self.container<=browser.html.BR()
self.container<=self.assignments_container
browser.document.getElementById("categories_container")<=self.container
self.add.bind("click", self.add_assignment)
self.delete_this.bind("click", self.remove_this)
self.weight.bind("input", self.update)
categories.append(self)
self.assignments=[]
def register_assignement(self, assignment):
self.assignments.append(assignment)
def update(self, *a, **k):
sum_score=sum([float(a.score.value) for a in self.assignments])
sum_max=sum([float(a.max.value) for a in self.assignments])
self._sum_max=sum_max
self._percent=(sum_score/sum_max)*100
self.percent.value=str(self._percent)+"%"
_update_class()
def _delete(self, a):
self.assignments.remove(a)
self.update()
def remove_this(self, *a, **k):
self.container.clear()
categories.remove(self)
def add_assignment(self, *a, **k):
Assignment(self)
class Assignment(object):
def __init__(self, parent):
self.container=browser.html.DIV("-->")
self.score=browser.html.INPUT(type="number")
self.max=browser.html.INPUT(type="number")
self.percent=browser.html.INPUT(readonly=True)
self.remove=browser.html.BUTTON("X")
self.as_pct=browser.html.BUTTON("%")
self.container<=browser.html.INPUT(value="Assignment")
self.container<=":"
self.container<=self.score
self.container<="/"
self.container<=self.max
self.container<=self.percent
self.container<=self.remove
self.container<=self.as_pct
self.container<=browser.html.BR()
self.parent=parent
self.parent.assignments_container<=self.container
self.score.bind("input", self.update)
self.max.bind("input", self.update)
self.remove.bind("click", self.delete)
self.as_pct.bind("click", self.alert_as_pct)
self.parent.register_assignement(self)
def alert_as_pct(self, *a, **k):
browser.alert("This assignement is "+str((float(self.max.value)/self.parent._sum_max)*100*float(self.parent.weight.value))+"% of your overall grade")
def update(self, *a, **k):
self.percent.value=str((float(self.score.value)/float(self.max.value))*100)+"%"
self.parent.update()
def delete(self, *a, **k):
self.container.clear()
self.parent._delete(self)
categories=[]
browser.document["add_category"].bind("click", Category)
def _update_class():
print("foo")
browser.document["class_pct"].value=str(sum([float(c.weight.value)*c._percent for c in categories]))+"%"
|
[
"louis@goessling.com"
] |
louis@goessling.com
|
9006a9071155da1ddc1db98dd44bc61448f755f0
|
7374204324f6326663d12b3dd1fecc5bebb6854e
|
/algorithm-pattern/binary_op/136.py
|
8443de71e568342720ea162d84e376931fc2706a
|
[] |
no_license
|
KevinChen1994/leetcode-algorithm
|
c18b58df398027078b0c0f468c4c873e9419433b
|
1bcf3206cd3acc428ec690cb883c612aaf708aac
|
refs/heads/master
| 2023-02-07T11:35:47.747207
| 2023-01-29T11:08:49
| 2023-01-29T11:08:49
| 230,386,123
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
# !usr/bin/env python
# -*- coding:utf-8 _*-
# author:chenmeng
# datetime:2020/7/17 13:51
'''
solution: 使用异或运算,两个数相同为0,不同为1;因为异或操作满足交换律,例如:1^2^2=2^2^1=0^1=1(0异或任意数是等于任意数本身)
'''
class Solution:
def singleNumber(self, nums):
result = nums[0]
for num in nums[1:]:
result = result ^ num
return result
if __name__ == '__main__':
solution = Solution()
nums = [3, 2, 2]
print(solution.singleNumber(nums))
|
[
"346521888@qq.com"
] |
346521888@qq.com
|
cf253b4a79d908574d1f8ec1b551a5b8cb92373c
|
1825283527f5a479204708feeaf55f4ab6d1290b
|
/leetcode/python/546/original/546.remove-boxes.0.py
|
4414c0fc7ebe8b59905309e520e365b774e2b427
|
[] |
no_license
|
frankieliu/problems
|
b82c61d3328ffcc1da2cbc95712563355f5d44b5
|
911c6622448a4be041834bcab25051dd0f9209b2
|
refs/heads/master
| 2023-01-06T14:41:58.044871
| 2019-11-24T03:47:22
| 2019-11-24T03:47:22
| 115,065,956
| 1
| 0
| null | 2023-01-04T07:25:52
| 2017-12-22T02:06:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
#
# @lc app=leetcode id=546 lang=python3
#
# [546] Remove Boxes
#
# https://leetcode.com/problems/remove-boxes/description/
#
# algorithms
# Hard (37.29%)
# Total Accepted: 8.4K
# Total Submissions: 22.4K
# Testcase Example: '[1,3,2,2,2,3,4,3,1]'
#
# Given several boxes with different colors represented by different positive
# numbers.
# You may experience several rounds to remove boxes until there is no box left.
# Each time you can choose some continuous boxes with the same color (composed
# of k boxes, k >= 1), remove them and get k*k points.
# Find the maximum points you can get.
#
#
# Example 1:
# Input:
#
# [1, 3, 2, 2, 2, 3, 4, 3, 1]
#
# Output:
#
# 23
#
# Explanation:
#
# [1, 3, 2, 2, 2, 3, 4, 3, 1]
# ----> [1, 3, 3, 4, 3, 1] (3*3=9 points)
# ----> [1, 3, 3, 3, 1] (1*1=1 points)
# ----> [1, 1] (3*3=9 points)
# ----> [] (2*2=4 points)
#
#
#
# Note:
# The number of boxes n would not exceed 100.
#
#
#
class Solution:
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
|
[
"frankie.y.liu@gmail.com"
] |
frankie.y.liu@gmail.com
|
cf3ff688eca559e72b7a03c95a74663b58dfad04
|
83a506a501561602ad3b259341225ddfbddab160
|
/GameServer/socket_server/server/factorys/tornadoFactory.py
|
287f7c09b26205ca9792a3d77952117fda7e9526
|
[] |
no_license
|
daxingyou/SouYouJi_Game
|
9dc5f02eb28b910efb229653a8d0bffe425a7911
|
7311a994c9aba15b7234331709975ebc37e8453d
|
refs/heads/master
| 2023-03-28T01:36:48.955107
| 2020-04-05T01:24:17
| 2020-04-05T01:24:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,412
|
py
|
# -*- coding:utf-8 -*-
# !/bin/python
"""
Author: Winslen
Date: 2019/10/15
Revision: 1.0.0
Description: Description
"""
import traceback
import tornado
import tornado.httpserver
import tornado.ioloop
import tornado.log
import tornado.options
import tornado.web
from typing import *
from define.define_redis_key import *
from define.define_consts import *
from public.public_logger import *
class baseFactory(object):
def __init__(self, address='127.0.0.1', port=9797, debug=False, *args, **kwargs):
self.address = address
self.port = port
self.serverTag = '%s:%s' % (self.address, self.port)
self.debug = debug
self.runtime = int(time.time())
self.OrderJob = {}
self._logger_ = None
self.curServerStage = ServerStage.none
self.setServerOrderJob()
def getLogger(self):
logger = getHandlerLogger(fileLabel='%s_%s_server' % (self.address, self.port), loggerLabel='server',
level=logging.DEBUG, handler_types=[Handler_Class.HourFile], when='H')
logger.setLevel(logging.DEBUG)
return logger
def log(self, msg='', level='info'):
if not self._logger_:
self._logger_ = self.getLogger()
try:
if level in ['warn', 'warning']:
self._logger_.warning(msg)
elif level == 'error':
self._logger_.error(msg)
else:
self._logger_.info(msg)
except:
traceback.print_exc()
print(msg)
def setServerOrderJob(self):
self.OrderJob['closeServer'] = self.closeServer
def closeServer(self, waitSecond: int = 60, *args, **kwargs):
waitSecond = int(waitSecond)
if self.curServerStage == ServerStage.readyClose:
return
self.curServerStage = ServerStage.readyClose
self.log('服务器[%s]正在关闭,将在[%s]秒后关闭' % (self.serverTag, waitSecond))
self.add_timeLater_callFunc(delay=waitSecond, callback=self.doCloseServer)
def doCloseServer(self, *args, **kwargs):
self.curServerStage = ServerStage.Closed
tornado.ioloop.IOLoop.current().stop()
self.log('服务器[%s]已经关闭' % (self.serverTag))
def onHeartbeat(self):
timeStamp = int(time.time() * 1000)
try:
self.onTick(timeStamp)
except:
traceback.print_exc()
def onTick(self, timeStamp):
self.checkOrderJobs()
def checkOrderJobs(self):
orderServices = self.getOrderServices()
for _order in orderServices:
_orderArgs = _order.split('|')
jobKey = _orderArgs.pop(0)
jobFunc = self.OrderJob.get(jobKey, None)
if jobFunc:
self.doOrderJobs_before(jobFunc, _orderArgs, _order)
doResult, err = self.doOrderJobs_doing(jobFunc, _orderArgs, _order)
if doResult:
self.doOrderJobs_afterSuc(jobFunc, _orderArgs, _order)
else:
self.doOrderJobs_afterFaild(jobFunc, _orderArgs, _order, err)
def getOrderServices(self):
return []
def notFoundOrderJob(self, jobKey, orderArgs):
self.log('[notFoundOrderJob] 未知任务[%s]=> %s' % (jobKey, orderArgs))
def doOrderJobs_before(self, jobFunc, orderArgs, order):
pass
def doOrderJobs_doing(self, jobFunc, orderArgs, order):
self.log('将要执行[%s]' % (order))
try:
jobFunc(*orderArgs)
except Exception as err:
traceback.print_exc()
self.log('[ERROR][doOrderJobs_doing]执行[%s]失败' % (order), level='error')
return False, err
else:
return True, ''
def doOrderJobs_afterSuc(self, job, _orderArgs, _order):
pass
def doOrderJobs_afterFaild(self, job, _orderArgs, _order, err=''):
pass
def add_timeLater_callFunc(self, delay: float = 0, callback=None, **kwargs):
if not callback:
return
tornado.ioloop.IOLoop.current().call_later(delay=delay, callback=callback, **kwargs)
def add_callAt_callFunc(self, when: float, callback=None, **kwargs):
if not callback:
return
return tornado.ioloop.IOLoop.current().call_at(when=when, callback=callback, **kwargs)
def add_PeriodicCallback(self, callback: Callable, callback_time: float, rightAwayDo: bool = False,
jitter: float = 0):
if rightAwayDo:
callback()
periodicClass = tornado.ioloop.PeriodicCallback(callback, callback_time, jitter=jitter)
periodicClass.start()
return periodicClass
class TornadoFactory(baseFactory):
def __init__(self, *args, **kwargs):
super(TornadoFactory, self).__init__(*args, **kwargs)
self.httpServer = None
def getAppRouterHandler(self):
return []
def getApplicationConfigs(self):
return dict(
static_path=os.path.join(os.path.dirname(__file__), "..\\static"),
template_path=os.path.join(os.path.dirname(__file__), "..\\template"),
debug=self.debug,
compiled_template_cache=False
)
def initApplication(self):
app = tornado.web.Application(self.getAppRouterHandler(), **self.getApplicationConfigs())
app.factory = self
return app
def doBeforeServerStart(self):
self.curServerStage = ServerStage.readyStart
def doAfterServerStart(self):
self.curServerStage = ServerStage.doing
def run_server(self):
self.log('服务器[%s]正在启动' % (self.serverTag))
self.doBeforeServerStart()
app = self.initApplication()
self.httpServer = tornado.httpserver.HTTPServer(app)
self.httpServer.listen(self.port, '0.0.0.0')
self.httpServer.address = self.address
self.httpServer.port = self.port
self.httpServer.factory = self
tornado.ioloop.PeriodicCallback(self.onHeartbeat, 3000).start()
self.log('服务器[%s]已启动' % (self.serverTag))
self.doAfterServerStart()
tornado.ioloop.IOLoop.current().start()
|
[
"ronnyzh@yeah.net"
] |
ronnyzh@yeah.net
|
ac7db187efbe323fedbbcc02c5b41ba4416264b0
|
57ec8eff01275cdae43243900a422b87836b807b
|
/arithmetic.py
|
bc77393ea397e581d0238d878fc231f7fe59759a
|
[] |
no_license
|
Ihyatt/calculator-1
|
15624b997c1c19a6bf30ac422f55669838ea7115
|
43f5d63eb81398aff3cf10e3d2d77c4296bdddc8
|
refs/heads/master
| 2021-01-10T16:29:56.063120
| 2016-04-06T19:54:49
| 2016-04-06T19:54:49
| 55,635,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
def add(num1, num2):
""" This is function will add two numbers """
return num1 + num2
def subtract(num1, num2):
""" This function will subtract two numbers """
return num1 - num2
def multiply(num1, num2):
""" This function will multiply two numbers """
return num1 * num2
def divide(num1, num2):
""" This funtion will divide two numbers """
return float(num1) / num2
def square(num1):
"""This function will square a number"""
return num1 ** 2
def cube(num1):
""" This function will cube a number """
return num1 ** 3
def power(num1, num2):
""" This function will return the power of one number by the second number """
return num1 ** num2
def mod(num1, num2):
""" This number will return remainder of two numbers when divided """
return num1 % num2
|
[
"info@hackbrightacademy.com"
] |
info@hackbrightacademy.com
|
2cf45b3d2a4735668005b263d62b610abb28794a
|
d897c2bc4ba9a84e7e8a2fe3e998d78cd116f920
|
/max_seq/gen/generatore
|
64aabc5dc9d2141313929567a4aedb167fe56ad7
|
[] |
no_license
|
romeorizzi/problemsCMS_for_LaboProg
|
8907622744bc89752391024f24025a7e9706501b
|
027b1b204efe602461e93d8b1c194a09eb6526cd
|
refs/heads/master
| 2020-04-03T17:33:52.384915
| 2020-03-25T07:10:17
| 2020-03-25T07:10:17
| 155,449,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
#!/usr/bin/env python2
from limiti import *
from varie import *
from sys import argv, exit, stderr
import os
from numpy.random import random, randint, seed as nseed
from random import choice, sample, shuffle, seed as rseed
usage="""Generatore di "prova".
Parametri:
* N (numero)
* S (seed)
Constraint:
* 1 <= N <= %d
""" % (MAXN)
def run(N):
print N
print " ".join(map(str, [randint(-70, 99) for i in xrange(0, N)]))
if __name__ == "__main__":
if len(argv) != 3:
print usage
exit(1)
N, S = map(int, argv[1:])
assert (1 <= N <= MAXN)
# su seed non positivo copia un input di esempio dal .tex
if S <= 0:
print extract_input()[-S],
exit(0)
nseed(S)
rseed(S)
run(N)
|
[
"romeo.rizzi@univr.it"
] |
romeo.rizzi@univr.it
|
|
f2f2a7316f41e31019494e9057e0c5e91b6b7285
|
117f066c80f3863ebef74463292bca6444f9758a
|
/api/4min/search/python-flask-server/swagger_server/test/test_search_controller.py
|
81b88eb6a8b94cfdb820b62714ac0fb1f63005a0
|
[] |
no_license
|
cottrell/notebooks
|
c6de3842cbaeb71457d270cbe6fabc8695a6ee1b
|
9eaf3d0500067fccb294d064ab78d7aaa03e8b4d
|
refs/heads/master
| 2023-08-09T22:41:01.996938
| 2023-08-04T22:41:51
| 2023-08-04T22:41:51
| 26,830,272
| 3
| 1
| null | 2023-03-04T03:58:03
| 2014-11-18T21:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.search_request import SearchRequest
from swagger_server.models.search_response import SearchResponse
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestSearchController(BaseTestCase):
""" SearchController integration test stubs """
def test_search_items_to_client(self):
"""
Test case for search_items_to_client
todo
"""
body = SearchResponse()
response = self.client.open('/v0/search/items_to_client',
method='GET',
data=json.dumps(body),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_search_items_to_items(self):
"""
Test case for search_items_to_items
todo
"""
body = SearchRequest()
response = self.client.open('/v0/search/items_to_items',
method='GET',
data=json.dumps(body),
content_type='application/json')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
[
"cottrell@users.noreply.github.com"
] |
cottrell@users.noreply.github.com
|
2d590385ddba87310af206021f891c91aea028ef
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py
|
de4a8a5e9f030f1e8a8802596885186163f23eed
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py'
model = dict(
pretrained='open-mmlab://resnet101_v1c',
backbone=dict(
depth=101,
dilations=(1, 1, 1, 2),
strides=(1, 2, 2, 1),
multi_grid=(1, 2, 4)),
decode_head=dict(
dilations=(1, 6, 12, 18),
sampler=dict(type='OHEMPixelSampler', min_kept=100000)))
|
[
"1178396201@qq.com"
] |
1178396201@qq.com
|
6a2271851da9a4bd341bde931f2a28406cfaf4b2
|
741333ced9ea1b326997dc031e5de27529bad04a
|
/glue_vispy_viewers/extern/vispy/scene/cameras/_base.py
|
f3829d981ac94a9eacfd344537214be1e3f3a7af
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
jzuhone/glue-vispy-viewers
|
f1b7f506d3263c4b0c2f4032d4940b931b2c1ada
|
d940705f4ba95f8d7a9a74d37fb68c71080b490a
|
refs/heads/master
| 2020-06-20T19:10:02.866527
| 2019-06-24T11:40:39
| 2019-06-24T11:40:39
| 197,217,964
| 0
| 0
|
BSD-2-Clause
| 2019-07-16T15:14:53
| 2019-07-16T15:14:52
| null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from .base_camera import BaseCamera
from .perspective import PerspectiveCamera
from .panzoom import PanZoomCamera
from .arcball import ArcballCamera
from .turntable import TurntableCamera
from .fly import FlyCamera
def make_camera(cam_type, *args, **kwargs):
""" Factory function for creating new cameras using a string name.
Parameters
----------
cam_type : str
May be one of:
* 'panzoom' : Creates :class:`PanZoomCamera`
* 'turntable' : Creates :class:`TurntableCamera`
* None : Creates :class:`Camera`
Notes
-----
All extra arguments are passed to the __init__ method of the selected
Camera class.
"""
cam_types = {None: BaseCamera}
for camType in (BaseCamera, PanZoomCamera, PerspectiveCamera,
TurntableCamera, FlyCamera, ArcballCamera):
cam_types[camType.__name__[:-6].lower()] = camType
try:
return cam_types[cam_type](*args, **kwargs)
except KeyError:
raise KeyError('Unknown camera type "%s". Options are: %s' %
(cam_type, cam_types.keys()))
|
[
"thomas.robitaille@gmail.com"
] |
thomas.robitaille@gmail.com
|
a55df25f9f74de9e0ec69b926948719fa010268d
|
6518c74441a68fc99b2b08423b5ea11480806499
|
/tests/resources/mlflow-test-plugin/mlflow_test_plugin/dummy_evaluator.py
|
c88bd21d093219c4d2d59ada023cd3f754142dc2
|
[
"Apache-2.0"
] |
permissive
|
criteo-forks/mlflow
|
da58e64d09700623810da63999a1aca81b435b90
|
499284d8dc9e9ec79d8d9dbd03c58d162a2b7eaa
|
refs/heads/master
| 2023-04-14T17:59:29.997458
| 2022-01-11T09:50:26
| 2022-01-11T09:50:26
| 191,391,769
| 5
| 4
|
Apache-2.0
| 2023-04-07T15:16:20
| 2019-06-11T14:44:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,149
|
py
|
import mlflow
from mlflow.models.evaluation import (
ModelEvaluator,
EvaluationMetrics,
EvaluationArtifact,
EvaluationResult,
)
from mlflow.tracking.artifact_utils import get_artifact_uri
from mlflow.entities import Metric
from sklearn import metrics as sk_metrics
import time
import pandas as pd
import io
class Array2DEvaluationArtifact(EvaluationArtifact):
def save(self, output_artifact_path):
pd.DataFrame(self._content).to_csv(output_artifact_path, index=False)
def _load_content_from_file(self, local_artifact_path):
pdf = pd.read_csv(local_artifact_path)
return pdf.to_numpy()
class DummyEvaluator(ModelEvaluator):
# pylint: disable=unused-argument
def can_evaluate(self, *, model_type, evaluator_config, **kwargs):
return model_type in ["classifier", "regressor"]
def _log_metrics(self, run_id, metrics, dataset_name):
"""
Helper method to log metrics into specified run.
"""
client = mlflow.tracking.MlflowClient()
timestamp = int(time.time() * 1000)
client.log_batch(
run_id,
metrics=[
Metric(key=f"{key}_on_{dataset_name}", value=value, timestamp=timestamp, step=0)
for key, value in metrics.items()
],
)
# pylint: disable=unused-argument
def evaluate(
self, *, model, model_type, dataset, run_id, evaluator_config, **kwargs
) -> EvaluationResult:
client = mlflow.tracking.MlflowClient()
X = dataset.features_data
y = dataset.labels_data
y_pred = model.predict(X)
if model_type == "classifier":
accuracy_score = sk_metrics.accuracy_score(y, y_pred)
metrics = EvaluationMetrics(accuracy_score=accuracy_score)
self._log_metrics(run_id, metrics, dataset.name)
confusion_matrix = sk_metrics.confusion_matrix(y, y_pred)
confusion_matrix_artifact_name = f"confusion_matrix_on_{dataset.name}.csv"
confusion_matrix_artifact = Array2DEvaluationArtifact(
uri=get_artifact_uri(run_id, confusion_matrix_artifact_name),
content=confusion_matrix,
)
confusion_matrix_csv_buff = io.StringIO()
confusion_matrix_artifact.save(confusion_matrix_csv_buff)
client.log_text(
run_id, confusion_matrix_csv_buff.getvalue(), confusion_matrix_artifact_name
)
artifacts = {confusion_matrix_artifact_name: confusion_matrix_artifact}
elif model_type == "regressor":
mean_absolute_error = sk_metrics.mean_absolute_error(y, y_pred)
mean_squared_error = sk_metrics.mean_squared_error(y, y_pred)
metrics = EvaluationMetrics(
mean_absolute_error=mean_absolute_error, mean_squared_error=mean_squared_error
)
self._log_metrics(run_id, metrics, dataset.name)
artifacts = {}
else:
raise ValueError(f"Unsupported model type {model_type}")
return EvaluationResult(metrics=metrics, artifacts=artifacts)
|
[
"noreply@github.com"
] |
criteo-forks.noreply@github.com
|
097f90f0262110b4a39fec200710fa8f135f45a9
|
db61f1b3db18ef698f08e456c60e2162a5479807
|
/experiments/timit/data/test_load_dataset_joint_ctc_attention.py
|
5825b2d4d7443ffef8d010fe251f20028c20fa14
|
[
"MIT"
] |
permissive
|
fresty/tensorflow_end2end_speech_recognition
|
00aa1d827aa9b04862389ff1a8169259adcd6db9
|
a95ba6a29208e70d6ea102bbca2b03ea492c708c
|
refs/heads/master
| 2021-01-01T06:14:25.378685
| 2017-07-10T11:35:05
| 2017-07-10T11:35:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,269
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import unittest
import tensorflow as tf
sys.path.append('../../../')
from experiments.timit.data.load_dataset_joint_ctc_attention import Dataset
from experiments.utils.labels.character import num2char
from experiments.utils.labels.phone import num2phone
from experiments.utils.sparsetensor import sparsetensor2list
from experiments.utils.measure_time_func import measure_time
class TestLoadDatasetJointCTCAttention(unittest.TestCase):
def test(self):
self.check_loading(label_type='character', num_gpu=1, is_sorted=True)
self.check_loading(label_type='character', num_gpu=1, is_sorted=False)
self.check_loading(label_type='phone61', num_gpu=1, is_sorted=True)
self.check_loading(label_type='phone61', num_gpu=1, is_sorted=False)
# self.check_loading(label_type='character', num_gpu=2, is_sorted=True)
# self.check_loading(label_type='character', num_gpu=2, is_sorted=False)
# self.check_loading(label_type='phone61', num_gpu=2, is_sorted=True)
# self.check_loading(label_type='phone61', num_gpu=2, is_sorted=False)
# For many GPUs
# self.check_loading(label_type='character', num_gpu=7, is_sorted=True)
def check_loading(self, label_type, num_gpu, is_sorted):
print('----- label_type: ' + label_type + ', num_gpu: ' +
str(num_gpu) + ', is_sorted: ' + str(is_sorted) + ' -----')
batch_size = 64
eos_index = 2 if label_type == 'character' else 1
dataset = Dataset(data_type='train', label_type=label_type,
batch_size=batch_size, eos_index=eos_index,
is_sorted=is_sorted, is_progressbar=True,
num_gpu=num_gpu)
tf.reset_default_graph()
with tf.Session().as_default() as sess:
print('=> Loading mini-batch...')
if label_type == 'character':
att_map_file_path = '../metrics/mapping_files/attention/char2num.txt'
ctc_map_file_path = '../metrics/mapping_files/ctc/char2num.txt'
map_fn = num2char
else:
att_map_file_path = '../metrics/mapping_files/attention/phone2num_' + \
label_type[5:7] + '.txt'
ctc_map_file_path = '../metrics/mapping_files/ctc/phone2num_' + \
label_type[5:7] + '.txt'
map_fn = num2phone
mini_batch = dataset.next_batch(session=sess)
iter_per_epoch = int(dataset.data_num /
(batch_size * num_gpu)) + 1
for i in range(iter_per_epoch + 1):
return_tuple = mini_batch.__next__()
inputs = return_tuple[0]
att_labels = return_tuple[1]
ctc_labels_st = return_tuple[2]
att_labels_seq_len = return_tuple[4]
if num_gpu > 1:
for inputs_gpu in inputs:
print(inputs_gpu.shape)
inputs = inputs[0]
att_labels = att_labels[0]
ctc_labels_st = ctc_labels_st[0]
att_labels_seq_len = att_labels_seq_len[0]
ctc_labels = sparsetensor2list(
ctc_labels_st, batch_size=len(inputs))
if num_gpu == 1:
for inputs_i, labels_i in zip(inputs, ctc_labels):
if len(inputs_i) < len(labels_i):
print(len(inputs_i))
print(len(labels_i))
raise ValueError
att_str_true = map_fn(
att_labels[0][0: att_labels_seq_len[0]], att_map_file_path)
ctc_str_true = map_fn(ctc_labels[0], ctc_map_file_path)
att_str_true = re.sub(r'_', ' ', att_str_true)
ctc_str_true = re.sub(r'_', ' ', ctc_str_true)
# print(att_str_true)
# print(ctc_str_true)
# print('-----')
if __name__ == '__main__':
unittest.main()
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
1204cb5de1ac2f1d3306c831671aeefb0c4286c2
|
1b9075ffea7d4b846d42981b41be44238c371202
|
/tags/2007-EOL/applications/editors/qt_xhtmledit/actions.py
|
e00311a209fc6a3d1204dd6195e524b1ba17a2e5
|
[] |
no_license
|
pars-linux/contrib
|
bf630d4be77f4e484b8c6c8b0698a5b34b3371f4
|
908210110796ef9461a1f9b080b6171fa022e56a
|
refs/heads/master
| 2020-05-26T20:35:58.697670
| 2011-07-11T11:16:38
| 2011-07-11T11:16:38
| 82,484,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import shelltools
from pisi.actionsapi import pisitools
WorkDir = "htmledit"
def setup():
shelltools.system("lrelease-qt4 src/locale/edit_tr.ts -qm src/locale/edit_tr.qm")
shelltools.system("qmake-qt4 htmledit.pro")
def build():
autotools.make()
def install():
pisitools.dobin("edithtml")
pisitools.insinto("/usr/share/qt_xhtmledit/locale", "src/locale/*.qm")
pisitools.insinto("/usr/share/qt_xhtmledit","src/img")
pisitools.insinto("/usr/share/pixmaps/", "wp.png", "qt_xhtmledit.png")
pisitools.remove("/usr/share/qt_xhtmledit/img/Thumbs.db")
pisitools.dodoc("LICENSE")
|
[
"turkay.eren@gmail.com"
] |
turkay.eren@gmail.com
|
0bbcecd2761b1b9dbb8ba24462c8f20153c402a6
|
41d9b92ef2a74a4ba05d27ffbe3beb87884c4ce7
|
/supervised_learning/0x06-keras/0-sequential.py
|
e0175875bc17bcbc0416647a5a71d94f1109cf53
|
[] |
no_license
|
JosephK89/holbertonschool-machine_learning
|
3f96d886c61d8de99a23e4348fb045b9c930740e
|
aa5c500f7d8ebeec951f9ab5ec017cae64007c25
|
refs/heads/main
| 2023-08-14T18:42:53.481354
| 2021-10-10T19:53:40
| 2021-10-10T19:53:40
| 386,248,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/env python3
"""build nn with keras"""
import tensorflow.keras as K
def build_model(nx, layers, activations, lambtha, keep_prob):
"""function that build nn with keras library"""
sequential = []
shape = (nx,)
reg_l2 = K.regularizers.l2(lambtha)
for i in range(len(layers)):
if i is 0:
sequential.append(K.layers.Dense(layers[i],
activation=activations[i],
kernel_regularizer=reg_l2,
input_shape=shape))
else:
sequential.append(K.layers.Dropout(1 - keep_prob))
sequential.append(K.layers.Dense(layers[i],
activation=activations[i],
kernel_regularizer=reg_l2))
model = K.Sequential(sequential)
return model
|
[
"josephkamel262@gmail.com"
] |
josephkamel262@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.