blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc76df2f50876491c6194483bb5da74af6344ea2
|
7771130ea6eb1f076a7d18e672d3d82d5996e957
|
/contrib/testgen/base58.py
|
70fccf5b3202c12cb969d604d6ee3cb3e8055d00
|
[
"MIT"
] |
permissive
|
gdrcoin/gdrcoin
|
49707508dfc1b14ace3817854416355a925539df
|
f9f2137b3d9069bfc8e3c69c90a684a061dfb6aa
|
refs/heads/master
| 2020-03-10T18:01:49.563615
| 2018-04-14T12:36:52
| 2018-04-14T12:36:52
| 129,511,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
# Copyright (c) 2012-2016 The Gdrcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Gdrcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Gdrcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/gdrcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
[
"37983255+spineinhalb@users.noreply.github.com"
] |
37983255+spineinhalb@users.noreply.github.com
|
ad2f495082c2cab1b514035b13c0660751bebe7f
|
d0e379bfe75597bb7f20bd7cd5d62ea9c59478a4
|
/appengine/clients/models.py
|
dfeccfd8287c5e9ccfeaa17dc95eaa3fcdd2e64a
|
[] |
no_license
|
slee124565/flh-homecenter-appeng
|
3064224dc8e7b1ce2b2e381242f26da823b89c5c
|
92b508c3d39d173e250d221019cd0914ff65e5cd
|
refs/heads/master
| 2021-04-30T11:10:02.808188
| 2018-02-27T06:40:28
| 2018-02-27T06:40:28
| 121,348,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# from django.db import models
from firebasedb.models import FirebaseDB
import logging
logger = logging.getLogger(__name__)
def get_dev_db_obj(pi_serial):
firebase_db = FirebaseDB()
db_dev = firebase_db.get_dev_info(pi_serial)
return db_dev
class ClientDev(object):
_pi_serial = None
def __init__(self, pi_serial):
self._pi_serial = pi_serial
self._db_obj = get_dev_db_obj(self._pi_serial)
if self._db_obj is None:
raise ValueError('device ({obj._pi_serial} not exist in db)'.format(obj=self))
def __str__(self):
return '{obj.__class__.__name__}({obj._pi_serial})'.format(obj=self)
def get_dev_http_url(self):
http_tunnel = None
if self._db_obj.get('info',{}).get('tunnels',None):
dev_tunnels = self._db_obj.get('info',{}).get('tunnels',None)
for tunnel in dev_tunnels:
if tunnel.find('http://') >= 0:
http_tunnel = tunnel
logger.debug('{obj} get_dev_http_url: {tunnel}'.format(obj=self, tunnel=http_tunnel))
return http_tunnel
|
[
"lee.shiueh@gmail.com"
] |
lee.shiueh@gmail.com
|
6e7b9d64a9343c09209781e9882c730170067fb8
|
66fbb969c8844f6a0db7cf8939c3412516bf54ca
|
/binary_tree_maximum_path_sum.py
|
ac6419d3ffe9ca2ad49c162cc8be4b6fa92bc5c3
|
[] |
no_license
|
chunweiliu/leetcode2
|
67a86d5a0d8c3ffe41f53a46b0e5960edc64c56d
|
086b7c9b3651a0e70c5794f6c264eb975cc90363
|
refs/heads/master
| 2021-01-19T08:13:12.667448
| 2017-04-08T06:02:35
| 2017-04-08T06:02:35
| 87,612,004
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxPathSum(self, root):
"""
Each path in this tree is either
1) starting from a root to a leaf, or
2) accrossing a root from one subtree to another.
We exam each node as a root in this tree.
:type root: TreeNode
:rtype: int
Questions:
* Does the node have negative value?
- If not than just call max_path_value(root)
Example:
1
2 3
=> 6
"""
def max_path_to_root(root):
if not root:
return 0
l = max(0, max_path_to_root(root.left))
r = max(0, max_path_to_root(root.right))
# The value won't pass upon.
# root
# / \
# left right
self.max_path_value = max(self.max_path_value,
l + root.val + r)
# The value can be passed to its partent.
# parent
# \
# root
# /
# left
return root.val + max(l, r)
self.max_path_value = None
max_path_value(root)
return self.max_path_value
|
[
"gtoniliu@gmail.com"
] |
gtoniliu@gmail.com
|
e0a2821d8090488a95f78871663fc5e00b3fc88c
|
08e2c58c06ec587cc20c2bcd084efb9308d86043
|
/milp/gen_data.py
|
b84d5e6d592b0fcc941730fa9ea3dcece023aef9
|
[] |
no_license
|
horacepan/qap_scratch
|
f866ac6c62715b779039488bb0e9f2b13d859e5b
|
437ff78226173d9cfb465198c6e7183d86e948c6
|
refs/heads/master
| 2023-03-04T12:01:39.259432
| 2021-02-09T05:50:53
| 2021-02-09T05:50:53
| 287,471,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
import numpy as np
def generate_max_cut(n, e):
c = np.zeros(n + e)
c[n:] = -1
all_edges = [(i, j) for i in range(n) for j in range(i)]
picked_idx = np.random.choice(len(all_edges), size=e, replace=False)
edges = [all_edges[i] for i in picked_idx]
A = np.zeros((3*e, n + e))
b = np.zeros(3*e)
row = 0
for idx, (u, v) in enumerate(edges):
e_idx = n + idx
b[row] = 0
A[row, e_idx] = 1
A[row, u] = -1
A[row, v] = -1
b[row + 1] = 2
A[row + 1, u] = 1
A[row + 1, v] = 1
A[row + 1, e_idx] = 1
A[row + 2, e_idx] = 1
b[row + 2] = 1
row += 3
return A, b, c
|
[
"hopan@uchicago.edu"
] |
hopan@uchicago.edu
|
5ebe93bba8504216fbee8a7b1e51360b0fb5b8ed
|
cb657cd7c2260af788f9e03c7b0f3d9139c222fa
|
/Bath/Bath/settings.py
|
6851b51e0c2223a8217166d31986589b34311507
|
[] |
no_license
|
west789/Bath-Of-University
|
f1b8cdcbb55795a04e3cf4ff869a59aa6576dc29
|
c850e57b9e23be906c83883ce478aeb4cb618986
|
refs/heads/master
| 2020-03-08T04:36:45.474999
| 2018-04-03T15:16:40
| 2018-04-03T15:16:40
| 127,927,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,328
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for Bath project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Bath'
SPIDER_MODULES = ['Bath.spiders']
NEWSPIDER_MODULE = 'Bath.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Bath (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
DEFAULT_REQUEST_HEADERS = {
"User-Agent": " Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Bath.middlewares.BathSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Bath.middlewares.BathDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Bath.pipelines.BathPipeline': 300,
#}
ITEM_PIPELINES = {
# 'Australia_2.pipelines.Australia2Pipeline': 300,
"Bath.pipelines.MyprojectPipeline": 1
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"738758058@qq.com"
] |
738758058@qq.com
|
0df5c1a67bb3e0337a8ec0bcb8c2f0998e0366e9
|
ee79e734486c0ca550bb8238ef54c78c7727384a
|
/BisulfiteSeq Bismark/samMethylationExtractor.py
|
c2b8bae5b238ace1d7cb0b812a37a996efa81c77
|
[] |
no_license
|
neilrobertson/BICRCode
|
212636e5395f0c0e4dfb3ac3c133f01eb07273ee
|
7b3f4da9cdefd7680f07b707339aee59faece1d2
|
refs/heads/master
| 2020-03-30T16:07:22.637571
| 2018-10-03T10:12:57
| 2018-10-03T10:12:57
| 151,394,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,001
|
py
|
'''
Created on 19 Aug 2011
@author: mcbryan
'''
import getopt
import sys
import csv
from sam.SamFormat import SAMFile
from sequence.genome import MemoryGenome
from genemapping.chrmEnds import ChromosomeEnds
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["sam="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
debug = False
infile = None
strand = "+"
for o, a in opts:
if o=="--sam":
infile = a
print "SAM:", a
assert infile != None
with open(infile+".methcalls","w") as outfile:
csvout = csv.writer(outfile,delimiter="\t")
genome = MemoryGenome("hg18")
ends = ChromosomeEnds("hg18")
sam = SAMFile(infile)
def findGenerator(haystack,needle):
index = 0
while True:
index = haystack.find(needle,index)
if index == -1:
break
else:
yield index
index += 1
for read in sam:
# chromosomes we don't know about (chrL is probably lambda)
if read.chrm not in ends:
continue
# check strand of the read
strand = "-" if read.checkFlag(0x10) else "+"
# get the genomic sequence (extended by 1 base on each side)
# check the genomic sequence for CG dinucleotides
# for each CG, extract the base from the sequence read corresponding to the C (make sure we do this right for both strands)
# if it is a C then it is methylated
# if it is a T then it is unmethylated
# only valid for Lister et al. style sequencing (where complementary sequences to original DNA are not sequenced)
# we want to get the genomic sequence + 1 base on each side
# assume we can't go off the beginning and the end of the sequence at the same time
# ie. our chromosomes are comparatively large compared to our reads
assert read.start-1 > 0 or read.start+len(read.sequence)+1 <= ends[read.chrm]
if read.start-1 < 0:
# we will go off the beginning of the chromosome, pad with one N
genomeseq = "N" + genome.getSequence(read.chrm,read.start,read.start+len(read.sequence)+1)
elif read.start+len(read.sequence)+1 > ends[read.chrm]:
# we will go off the end of the chromosome, pad with one N
genomeseq = genome.getSequence(read.chrm,read.start-1,read.start+len(read.sequence)) + "N"
else:
genomeseq = genome.getSequence(read.chrm,read.start-1,read.start+len(read.sequence)+1)
# make the two sequences comparable in terms of character set (all uppercase) + start positions
read.sequence = "N"+read.sequence.upper()+"N"
genomeseq = genomeseq.upper()
# do a check to see if there are any CG's in there first (slower than not checking of course)
# only searches genomic forward strand but this is fine since CG's are the same on both strands
if "CG" in genomeseq:
if debug:
print
print read.chrm, read.start, len(read.sequence)+read.start
print read.sequence
print genomeseq
print "CG".join(["-"*len(seq) for seq in genomeseq.split("CG")])
# outputs (C,G) locations
locs = [(C,C+1) for C in findGenerator(genomeseq,"CG")]
if strand == "+":
bases = [(read.start+C,read.sequence[C:C+1]) for C,G in locs]
else:
# we want the G from the CG (which is a C on the opposite strand and which is one base along)
# note that the sequence in the SAM file is complemented compared to the genome. i.e. it's the actual
# sequence from the sequencer and will still have C or T as the basis for meth / unmeth calls
bases = [(read.start+G,read.sequence[G:G+1]) for C,G in locs]
for pos, base in bases:
if base in ["C","T"]: # ignore anything that's got an N or a SNP at that position
# we can make a meth call
# C methylated, T unmethylated
methCall = "z" if base == "T" else "Z"
methState = "-" if base == "T" else "+"
csvout.writerow([read.key,methState,read.chrm,pos,methCall])
|
[
"neil.alistair.robertson@hotmail.co.uk"
] |
neil.alistair.robertson@hotmail.co.uk
|
6a3d89ee3512651ca1fc08d58c99c8187d13625d
|
4da29a053ecbf0b4f35bbb0d9718436b271df74c
|
/language_skills/migrations/0020_auto_20200723_1017.py
|
daa9961d9ddee5010ddae913ecb51bae80c4ae73
|
[] |
no_license
|
alirezahi/QA-System
|
cd232234aa4310d0d8358b18f41e9156214cc503
|
28adc482ed2563374b70cfbea8ab935a2aa4eb2e
|
refs/heads/master
| 2021-07-10T10:17:12.475850
| 2020-11-07T03:13:53
| 2020-11-07T03:13:53
| 211,319,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
# Generated by Django 2.1.1 on 2020-07-23 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('language_skills', '0019_auto_20200424_2159'),
]
operations = [
migrations.AddField(
model_name='blankquestion',
name='part_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='multiplechoicequestion',
name='part_text',
field=models.TextField(blank=True, null=True),
),
]
|
[
"alireza97hi@gmail.com"
] |
alireza97hi@gmail.com
|
901d85aaa0f2e67d7829c00644e6973faf379c81
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/CDK2_input/L17/17-1Q_wat_20Abox/set_1ns_equi.py
|
8ebca1c961ba017ec56400ce4e7ed9b2321bdc17
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/CDK2/L17/wat_20Abox/ti_one-step/17_1Q/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../17-1Q_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
014b4a3ae3f9aff05afd54c12352843fa94c84d7
|
bfd41fc543f6dbfc821341522cf8e7a9d2e34ce8
|
/venvc/bin/webassets
|
310b3858768f4cb03f0811a3a3fb16df0ce55f40
|
[] |
no_license
|
MaraKovalcik/Flask
|
783243560ead637a381f76d3893da2b212eff898
|
1ff8413f3551b051f8e6c76db6cf402fc7428188
|
refs/heads/master
| 2021-01-22T09:09:16.165734
| 2015-02-24T16:57:14
| 2015-02-24T16:57:14
| 31,268,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
#!/home/student/PycharmProjects/flask-skeleton/venvc/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'webassets==0.9','console_scripts','webassets'
__requires__ = 'webassets==0.9'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('webassets==0.9', 'console_scripts', 'webassets')()
)
|
[
"mara.kovalcik@gmail.com"
] |
mara.kovalcik@gmail.com
|
|
a888103bae83d2ead7218fd412b06478ce9351a5
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2514/60666/252527.py
|
05e3a91ada6e526e829a7483701080f0f6eb25d1
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
s=input()
t=input()
if not s:
print(True)
else:
sIndex=0
tIndex=0
while tIndex<len(t):
if sIndex==len(s):
print(True)
elif s[sIndex]==t[tIndex]:
sIndex+=1
tIndex+=1
else:
tIndex+=1
flag=(sIndex==len(s))
print(flag)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
0cfc589f684db4f5aed19ee3dc3ff803135e1f8a
|
f992a5b264b8ba117b5f90f3de942a6ce7a531d1
|
/Code-Generation/v5/obj/Block.py
|
2bb6eb3b7875e83d0022abd34026a7faa588c557
|
[] |
no_license
|
pean1128/UI2CODE-1
|
8f137c092199fb59171c64ccaa853d4df768f6c1
|
39bfe4780f823bc9bdafb933e441d97441e1abed
|
refs/heads/master
| 2023-04-27T03:20:28.171023
| 2021-01-06T09:26:50
| 2021-01-06T09:26:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,123
|
py
|
import pandas as pd
import cv2
from obj.Compo_HTML import CompoHTML
from obj.HTML import HTML
from obj.CSS import CSS
block_id = 0
def slice_blocks(compos_html, direction='v', is_slice_sub_block=True):
'''
Vertically or horizontally scan compos
:param compos_html: CompoHTML objects, including elements and lists
:return blocks: list of [block], block: list of [CompoHTML objects]
'''
blocks = []
block_compos = []
global block_id
dividers = []
divider = -1
prev_divider = 0
if direction == 'v':
compos_html.sort(key=lambda x: x.top)
for compo in compos_html:
# new block
if divider < compo.top:
prev_divider = divider
dividers.append(compo.top)
divider = compo.bottom
dividers.append(divider)
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_top=str(int(compo.top - prev_divider)) + 'px', clear='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-'+str(block_id), css={css_name: css}))
block_compos = []
# extend block
elif compo.top < divider < compo.bottom:
divider = compo.bottom
dividers[-1] = divider
block_compos.append(compo)
# collect left compos
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_top=str(int(block_compos[0].top - prev_divider)) + 'px', clear='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-' + str(block_id), css={css_name: css}))
elif direction == 'h':
compos_html.sort(key=lambda x: x.left)
for compo in compos_html:
# new block
if divider < compo.left:
prev_divider = divider
dividers.append(compo.left)
divider = compo.right
dividers.append(divider)
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_left=str(int(compo.left - prev_divider)) + 'px', float='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-' + str(block_id), css={css_name: css}))
block_compos = []
# extend block
elif compo.left < divider < compo.right:
divider = compo.right
dividers[-1] = divider
block_compos.append(compo)
# collect left compos
if len(block_compos) > 0:
block_id += 1
css_name = '#block-' + str(block_id)
css = CSS(css_name, margin_left=str(int(block_compos[0].left - prev_divider)) + 'px', float='left', border="solid 2px black")
blocks.append(Block(id=block_id, compos=block_compos, is_slice_sub_block=is_slice_sub_block,
html_id='block-' + str(block_id), css={css_name: css}))
return blocks
def visualize_blocks(blocks, img, img_shape):
board = cv2.resize(img, img_shape)
for block in blocks:
board = block.visualize(board, img_shape, show=False)
cv2.imshow('compos', board)
cv2.waitKey()
cv2.destroyWindow('compos')
class Block:
def __init__(self, id, compos,
is_slice_sub_block=True, html_tag=None, html_id=None, html_class_name=None, css=None):
self.block_id = id
self.compos = compos # list of CompoHTML objs
self.block_obj = None # CompoHTML obj
self.block_img = None
self.sub_blocks = [] # list of Block objs
self.top = None
self.left = None
self.bottom = None
self.right = None
self.width = None
self.height = None
# html info
self.html = None # HTML obj
self.html_tag = 'div' if html_tag is None else html_tag
self.html_id = html_id
self.html_class_name = html_class_name
self.html_script = '' # sting
self.css = css # CSS objs
self.css_script = '' # string
# only slice sub-block once
if is_slice_sub_block:
self.slice_sub_blocks()
if css is not None:
self.init_css()
self.init_boundary()
self.init_html()
def init_boundary(self):
self.top = min(self.compos, key=lambda x: x.top).top
self.bottom = max(self.compos, key=lambda x: x.bottom).bottom
self.left = min(self.compos, key=lambda x: x.left).left
self.right = max(self.compos, key=lambda x: x.right).right
def init_html(self):
self.html = HTML(tag=self.html_tag, id=self.html_id, class_name=self.html_class_name)
if len(self.sub_blocks) > 1:
# add compos of sub blocks
for sub_block in self.sub_blocks:
self.html.add_child(sub_block.html_script)
else:
for compo in self.compos:
self.html.add_child(compo.html_script)
self.html_script = self.html.html_script
def init_css(self):
if len(self.sub_blocks) > 1:
for sub_block in self.sub_blocks:
self.css.update(sub_block.css)
else:
for compo in self.compos:
self.css.update(compo.css)
self.css_script = self.css
self.assembly_css()
def assembly_css(self):
self.css_script = ''
for i in self.css:
self.css_script += self.css[i].css_script
# self.block_obj.css = self.css
def slice_sub_blocks(self):
'''
Horizontally slice the block into sub-blocks
'''
self.sub_blocks = slice_blocks(self.compos, 'h', is_slice_sub_block=False)
def clip_block_img(self, org, show=False):
self.block_img = org[self.top: self.bottom, self.left: self.right]
if show:
self.show_block_img()
def show_block_img(self):
cv2.imshow('block', self.block_img)
cv2.waitKey()
cv2.destroyWindow('block')
def visualize(self, img=None, img_shape=None, flag='line', show=True):
fill_type = {'line': 2, 'block': -1}
img_shape = img_shape
board = cv2.resize(img, img_shape)
board = cv2.rectangle(board, (self.left, self.top), (self.right, self.bottom), (0, 255, 0), fill_type[flag])
if show:
cv2.imshow('compo', board)
cv2.waitKey()
cv2.destroyWindow('compo')
return board
|
[
"dsh15325@163.com"
] |
dsh15325@163.com
|
f3fbe02ead08747a6e90915969d89590f9fef352
|
4520f56d4952c788e198ee7eee39911c9a76c60f
|
/01_Jump_to_python/0_example/4_0704_t/3_rhombus_v3.py
|
80617b22c286553148a0003f08aed8440ff5eb4d
|
[] |
no_license
|
SuHyeonJung/iot_python2019
|
bef8877a1cd41981ad2125291f5af44f4fd1701c
|
7860630ae28c53677a3c2761c9e997b28ea55f26
|
refs/heads/master
| 2020-06-14T22:18:27.503781
| 2019-11-08T05:50:41
| 2019-11-08T05:50:41
| 195,142,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
odd = 0
while True:
odd = int(input("홀수를 입력하세요(0 <- 종료): "))
end = int(odd / 2)
value = int(odd / 2)
second_end = end + 1
if odd == 0:
break
elif odd % 2== 0:
print("잘못 입력하셨습니다.다시 입력하세요.")
else:
count = 1
i = 1
print(end=' ')
print(odd*'-')
print()
while second_end > 0:
print('|', end = '')
while end != 0:
print(end=' ')
end = end - 1
end = int(odd /2) - i
print(count*'*', end = value*' ')
count = count + 2
value = value - 1
second_end = second_end - 1
i = i + 1
print('|')
end = int(odd / 2)
mirror = int(odd / 2)
star = odd - 2
k = mirror - 1
j = 1
second_value = 1
while end >= 1:
print('|', end = '')
while k < mirror:
print(end=' ')
k = k + 1
print(star*'*', end = second_value*' ')
star = star - 2
end = end - 1
j = j + 1
k = k - j
second_value = second_value + 1
print('|')
print()
print(end=' ')
print(odd*'-')
print("프로그램을 종료합니다.")
|
[
"galma94815@naver.com"
] |
galma94815@naver.com
|
bc97a879f29cc25c84cbe0858be8759cafd2f892
|
1ac7f1035545fc9b1cbb1fee84bf5bdd1b70a991
|
/neural/othermodels.py
|
a03cc3b06439630551e05442d7794741b0a44f6c
|
[] |
no_license
|
HussainAther/neuroscience
|
1e1225d811c64c260825540c210a0f7925386085
|
624bf82ce5c610c2ca83a0c4c49d3f4d0b92a1e2
|
refs/heads/master
| 2023-05-25T15:25:36.617994
| 2023-05-13T23:25:18
| 2023-05-13T23:25:18
| 157,743,251
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
import numpy as np
import pylab as plt
from scipy.integrate import odeint
"""
McCulloch-Pitts replaces the involved Hodgkin-Huxley system by a threshold device
with only two states (0 and 1) in which 0 denotes the inactivated, silent condition
and 1 denotes the activiated, firing state. We use the equation:
X(t+1) = Theta(I_i - theta_i)
in which t is the discretized time, theta_i is the activation threshold for unit i,
and I_i = X_i are the data that has been identified. In this case,
Postsynpatic current I_iPSC = - summation of j=1 to n of (w_ij * I_j - I-i)
in which w_ij is the synaptic weight of the connection from unit j to unit i,
dependent on the synpatic gain g_ij that evolves during learning and reflecting the synaptic plasticity
and the intracelular resistances. The capacitance is deliberately neglected and I_iext is the
externally controlled input to neuron i.
"""
def simplemodel(w, I):
"""
Simple conductance model for some neuron i.
w is a list of weights of connection from unit j to i for each neuron.
I is a list of currents from each neuron j.
"""
I_iPSC = 0 # post-synaptic current
I_ext = 5 # external current to neuron i
for j in range(len(w)): # simple summation
I_iPSC += w[j] * I[j]
I_iPSC -= I_iext # exteranl current
I_iPSC *= -1 # for the opposite direction
return I_iPSC
def mp(simplemodel, theta_i):
"""
McCulloch-Pitts model. theta_i is the activation threshold for unit i.
"""
X = [] # state of neuron i
for j in simplemodel:
if j - theta_i >= 0:
X.append(1)
else:
X.append(0)
return X
def fn(s):
"""
FitzHugh and Nagumo approximated the Hodgkin-Huxley equations using a general linear relation (h = a-bn)
used in combination with coordinate transformation and rescaling to arrive at the Bonhoeffer-Van-der-Pol
or FitzHugh-Nagumo equations. Takes in s, an array of states of the voltage for each neuron. It must have
the four states: voltage of first synapse, work of the first synapse, voltage of the second, and work of the second.
Solve for two synapses using ordinary differential equations.
"""
(v, w, v2, w2) = (state[0], state[1], state[3], state[4])
# constants used for fitting to general linear relation
x = 0.08
y = 0.7
z = 0.8
theta = 0 # voltage phase shift
Vs = 2 # applied voltage
Iapp = 1.2 # applied current
gsyn = 30 # synaptic conductance in pS
# Synaptic currents
Isyn = gsyn*((v-Vs))/(1+np.power(np.e,(v2-theta)))
Isyn2 = gsyn*((v2-Vs))/(1+np.power(np.e,(v-theta)))
# synapse 1
vd = v - np.power(v, 3)/3 - w + Iapp + Isyn
wd = x*(v + y - z*w)
# synapse 2
v2d = v2 - np.power(v2, 3)/3 - w2 + Iapp + Isyn2
w2d = x*(v2 + y - z*w2)
# return state derivatives that odeint uses
return [vd, wd, v2d, w2d]
s = ([-1.2, 1.2, -1.2, 1.2])
t = np.arange(0.0, 2800.0, 0.01)
odeint(fn, s, t, rtol=1.49012e-13, atol=1.49012e-13)
"""
Morris-Lecar model described spiking dynamics of potassium- and calcium-controlled muscle fibers.
"""
# Constants
C_m = 1.0 # membrane capacitance, in uF/cm^2
g_Ca = 1.1 # maximum conducances, in mS/cm^2
g_K = 2.0
g_L = 0.5
E_Ca = 100.0 # Nernst reversal potentials, in mV
E_K = -70.0
E_L = -50.0
def m_infty(V):
"""
Membrane voltage derived from Fourier transform of the derivative of the signal.
Returns the open-state probability function of the open state of the channel. They"re
partitioned according to a Boltzmann distribution.
"""
return (1.0 + sp.tanh((V + 1.0) / 15.0)) / 2.0
def w_infty(V):
"""
Same but for the closed state of the channel.
"""
return (1.0 + sp.tanh(V / 30.0)) / 2.0
def tau_w(V):
"""
Memnbrane time constant.
"""
return 5.0 / sp.cosh(V / 60.0) # in ms
def I_ext(t):
"""
Input voltage.
"""
return 10*sp.floor(t/100)
t = sp.arange(0.0, 400.0, 0.1)
I = I_ext(t)
def ml(V, w,t):
"""
Morris-Lecar using odeint again. V and w are initial conditions.
"""
V, w = X # initial conditions for
dVdt = (I_ext(t) - I_Ca(V) - I_K(V, w) - I_L(V)) / C_m
dwdt = (w_infty(V) - w) / tau_w(V)
return dVdt, dwdt
X = odeint(ml(-44, .05) t)
|
[
"shussainather@gmail.com"
] |
shussainather@gmail.com
|
5a115ced7ec699776c2e747f924d8bf8722a9b7d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02661/s028332585.py
|
9e89e79c8a1f08abee3a9c1c567320963f0d6e4a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
N = int(input())
A = []
B = []
for _ in range(N):
a, b = map(int, input().split())
A.append(a)
B.append(b)
def median(arr):
arr.sort()
n = len(arr)
if n % 2 == 1:
return arr[(n + 1) // 2 - 1]
else:
return (arr[n//2 - 1] + arr[n//2]) / 2
med_a = median(A)
med_b = median(B)
if N % 2 == 1:
ans = int(med_b) - int(med_a) + 1
else:
ans = med_b * 2 - med_a * 2 + 1
ans = int(ans)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
21af6fdb705f8c486819d5022b336934d697fd99
|
7f8084cc37db622fb50cfa656ed1d0d4da007fe3
|
/model.py
|
1eb0af7c003f16f41327fe1b5e67595823862521
|
[] |
no_license
|
jessicagamio/testing-py
|
a3454878f1dbe1e2f914d2a126748b70dcbdc701
|
9d1745f1078dd3754dd62a622a94d61d78342e24
|
refs/heads/master
| 2021-06-22T14:25:58.300155
| 2019-08-09T23:34:54
| 2019-08-09T23:34:54
| 201,549,658
| 0
| 0
| null | 2021-03-20T01:28:20
| 2019-08-09T22:41:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Game(db.Model):
"""Board game."""
__tablename__ = "games"
game_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, unique=True)
description = db.Column(db.String(100))
def connect_to_db(app, db_uri="postgresql:///games"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db.app = app
db.init_app(app)
def example_data():
"""Create example data for the test database."""
# FIXME: write a function that creates a game and adds it to the database.
Game.query.delete()
game1 = Game(name="Game One", description="This is Game One")
game2 = Game(name="Game Two", description="This is Game Two")
game3 = Game(name="Game Three", description="This is Game Three")
db.session.add_all([game1, game2, game3])
db.session.commit()
if __name__ == '__main__':
from party import app
connect_to_db(app)
print("Connected to DB.")
|
[
"you@example.com"
] |
you@example.com
|
541da3b572ffefac450ea689dfd561a0cf40fba9
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/HiFaceGAN/src/util.py
|
d803a60fc82b93ac6689d07c104cb16cc4b1c51c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for model"""
import random
import cv2
import mindspore as ms
import mindspore.nn as nn
import numpy as np
def set_global_seed(i):
"""Set global seed"""
ms.set_seed(i)
np.random.seed(i)
random.seed(i)
def image2numpy(image):
"""Transform image to numpy array"""
image = image.asnumpy()
image = np.rint(np.clip(np.transpose(image, (1, 2, 0)) * 255, a_min=0, a_max=255)).astype(np.uint8)
return image
def make_joined_image(im1, im2, im3):
"""Create joined image"""
im1 = image2numpy(im1)
im2 = image2numpy(im2)
im3 = image2numpy(im3)
height, _, _ = im1.shape
joined_image = np.zeros((height, height * 3, 3), dtype=np.uint8)
joined_image[:, :height] = im1
joined_image[:, height: 2 * height] = im2
joined_image[:, 2 * height:] = im3
return joined_image
def save_image(image, image_path):
"""Save image"""
cv2.imwrite(image_path, image)
def clip_adam_param(beta):
"""Clip Adam betas"""
return min(max(1e-6, beta), 1 - 1e-6)
def get_lr(initial_lr, lr_policy, num_epochs, num_epochs_decay, dataset_size):
"""
Learning rate generator.
For 'linear', we keep the same learning rate for the first <num_epochs>
epochs and linearly decay the rate to zero over the next
<num_epochs_decay> epochs.
"""
if lr_policy == 'linear':
lrs = [initial_lr] * dataset_size * num_epochs
for epoch in range(num_epochs_decay):
lr_epoch = initial_lr * (num_epochs_decay - epoch) / num_epochs_decay
lrs += [lr_epoch] * dataset_size
return ms.Tensor(np.array(lrs).astype(np.float32))
if lr_policy == 'constant':
return initial_lr
raise ValueError(f'Unknown lr_policy {lr_policy}')
def enable_batch_statistics(net):
"""Enable batch statistics in all BatchNorms"""
if isinstance(net, nn.BatchNorm2d):
net.use_batch_statistics = True
else:
for cell in net.cells():
enable_batch_statistics(cell)
|
[
"a.denisov@expasoft.tech"
] |
a.denisov@expasoft.tech
|
b9c3df3f4295b0a7f0bc2813d087a40f6b0aafff
|
0f274618632a0cc96ac0b49e1f784759cfaf3a41
|
/pytorch_models/unet/blocks.py
|
d45af78870e1646648fb4215ecce94f7de54dea5
|
[] |
no_license
|
brookisme/pytorch_models
|
2cd0adf5104606ffd38c90af11fe330ce15fc7c5
|
2bd1c8527d7da7dda414250c28fab2fb02315821
|
refs/heads/master
| 2020-06-01T23:02:00.046636
| 2020-01-30T05:37:53
| 2020-01-30T05:37:53
| 190,959,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
import torch.nn as nn
import pytorch_models.blocks as blocks
from pytorch_models.deeplab.blocks import ASPP
class RSPP(nn.Module):
""" Residual Spatial Pyramid Pooling
Removes the "Atrous" from a modified "Atrous Spatial Pyramid Pooling"
blocks and added a residual skip connection. By default also
turns off the pooling in the ASSP.
Defaults Example: x-> F_5(x)+ F_3(x) + x
Args:
in_ch<int>: number of input channels
out_ch<int|None>: if out_ch is None out_ch=in_ch
kernel_sizes<list[int]>: kernel_size for each conv in stack
pooling<bool>: include image_pooling block
residual<bool>:
- if False just return the block without residual
- for use in architectures where the skip connection is optional
shortcut_method<str>: see blocks.Residual docs
spp_config: config for underlying aspp block
"""
def __init__(self,
in_ch,
out_ch=None,
kernel_sizes=[5,3],
pooling=False,
residual=True,
shortcut_method=blocks.Residual.AUTO_SHORTCUT,
spp_config={}):
super(RSPP, self).__init__()
if out_ch is None:
out_ch=in_ch
self.in_ch=in_ch
self.out_ch=out_ch
spp=self._spp(kernel_sizes,pooling,spp_config)
self.rspp=blocks.Residual(
in_ch=self.in_ch,
out_ch=self.out_ch,
block=spp,
is_residual_block=residual,
shortcut_stride=1,
shortcut_method=shortcut_method)
def forward(self,x):
return self.rspp(x)
def _spp(self,kernel_sizes,pooling,config):
config['kernel_sizes']=kernel_sizes
config['pooling']=pooling
config['dilations']=config.get('dilations',[1]*len(kernel_sizes))
config['join_method']=ASPP.ADD
if config.get('out_conv_config') is None:
config['out_conv_config']=False
return ASPP(self.in_ch,self.out_ch,**config)
|
[
"bguzder-williams@wri.org"
] |
bguzder-williams@wri.org
|
25e771061a927d9aeb0fc3aae302dc795ffb378e
|
5068bc927a7fff73923ce95862ff70120160c491
|
/electrum_axe/gui/qt/request_list.py
|
3be7ce6113404ba464b82efed458c087817fdc27
|
[
"MIT"
] |
permissive
|
AXErunners/electrum-axe
|
cdbce2dbb92e23e32e9f9b733ae9f65f51c0ae9f
|
7ef05088c0edaf0688fb167df353d6da619ebf2f
|
refs/heads/master
| 2021-04-03T09:40:37.109317
| 2020-08-27T16:53:18
| 2020-08-27T16:53:18
| 124,705,752
| 336
| 75
|
MIT
| 2020-10-17T18:30:25
| 2018-03-10T23:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,068
|
py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QMenu
from PyQt5.QtCore import Qt
from electrum_axe.i18n import _
from electrum_axe.util import format_time, age
from electrum_axe.plugin import run_hook
from electrum_axe.paymentrequest import PR_UNKNOWN
from electrum_axe.wallet import InternalAddressCorruption
from .util import MyTreeView, pr_tooltips, pr_icons, read_QIcon
class RequestList(MyTreeView):
class Columns(IntEnum):
DATE = 0
ADDRESS = 1
SIGNATURE = 2
DESCRIPTION = 3
AMOUNT = 4
STATUS = 5
headers = {
Columns.DATE: _('Date'),
Columns.ADDRESS: _('Address'),
Columns.SIGNATURE: '',
Columns.DESCRIPTION: _('Description'),
Columns.AMOUNT: _('Amount'),
Columns.STATUS: _('Status'),
}
filter_columns = [Columns.DATE, Columns.ADDRESS, Columns.SIGNATURE, Columns.DESCRIPTION, Columns.AMOUNT]
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.DESCRIPTION,
editable_columns=[])
self.setModel(QStandardItemModel(self))
self.setSortingEnabled(True)
self.setColumnWidth(self.Columns.DATE, 180)
self.update()
self.selectionModel().currentRowChanged.connect(self.item_changed)
def item_changed(self, idx):
# TODO use siblingAtColumn when min Qt version is >=5.11
addr = self.model().itemFromIndex(idx.sibling(idx.row(), self.Columns.ADDRESS)).text()
req = self.wallet.receive_requests.get(addr)
if req is None:
self.update()
return
expires = age(req['time'] + req['exp']) if req.get('exp') else _('Never')
amount = req['amount']
message = req['memo']
self.parent.receive_address_e.setText(addr)
self.parent.receive_message_e.setText(message)
self.parent.receive_amount_e.setAmount(amount)
self.parent.expires_combo.hide()
self.parent.expires_label.show()
self.parent.expires_label.setText(expires)
self.parent.new_request_button.setEnabled(True)
def update(self):
self.wallet = self.parent.wallet
# hide receive tab if no receive requests available
if self.parent.isVisible():
b = len(self.wallet.receive_requests) > 0
self.setVisible(b)
self.parent.receive_requests_label.setVisible(b)
if not b:
self.parent.expires_label.hide()
self.parent.expires_combo.show()
# update the receive address if necessary
current_address = self.parent.receive_address_e.text()
domain = self.wallet.get_receiving_addresses()
try:
addr = self.wallet.get_unused_address()
except InternalAddressCorruption as e:
self.parent.show_error(str(e))
addr = ''
if current_address not in domain and addr:
self.parent.set_receive_address(addr)
self.parent.new_request_button.setEnabled(addr != current_address)
self.parent.update_receive_address_styling()
self.model().clear()
self.update_headers(self.__class__.headers)
self.hideColumn(self.Columns.ADDRESS)
for req in self.wallet.get_sorted_requests(self.config):
address = req['address']
if address not in domain:
continue
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
message = req['memo']
date = format_time(timestamp)
status = req.get('status')
signature = req.get('sig')
requestor = req.get('name', '')
amount_str = self.parent.format_amount(amount) if amount else ""
labels = [date, address, '', message, amount_str, pr_tooltips.get(status,'')]
items = [QStandardItem(e) for e in labels]
self.set_editability(items)
if signature is not None:
items[self.Columns.SIGNATURE].setIcon(read_QIcon("seal.png"))
items[self.Columns.SIGNATURE].setToolTip(f'signed by {requestor}')
if status is not PR_UNKNOWN:
items[self.Columns.STATUS].setIcon(read_QIcon(pr_icons.get(status)))
items[self.Columns.DESCRIPTION].setData(address, Qt.UserRole)
self.model().insertRow(self.model().rowCount(), items)
self.filter()
def create_menu(self, position):
idx = self.indexAt(position)
item = self.model().itemFromIndex(idx)
# TODO use siblingAtColumn when min Qt version is >=5.11
item_addr = self.model().itemFromIndex(idx.sibling(idx.row(), self.Columns.ADDRESS))
if not item_addr:
return
addr = item_addr.text()
req = self.wallet.receive_requests.get(addr)
if req is None:
self.update()
return
column = idx.column()
column_title = self.model().horizontalHeaderItem(column).text()
column_data = item.text()
menu = QMenu(self)
if column != self.Columns.SIGNATURE:
if column == self.Columns.AMOUNT:
column_data = column_data.strip()
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data))
menu.addAction(_("Copy URI"), lambda: self.parent.view_and_paste('URI', '', self.parent.get_request_URI(addr)))
menu.addAction(_("Save as BIP70 file"), lambda: self.parent.export_payment_request(addr))
menu.addAction(_("Delete"), lambda: self.parent.delete_payment_request(addr))
run_hook('receive_list_menu', menu, addr)
menu.exec_(self.viewport().mapToGlobal(position))
|
[
"slowdive@me.com"
] |
slowdive@me.com
|
70d55931728e32b6b0ada6a287f875e2e79e7fb5
|
ccb9425e4655cee0c06f3759e21e8477eb111d8f
|
/django_eveonline_connector/migrations/0010_auto_20191211_1514.py
|
a0b18ff1beebbef596039a7f2c63880f6b0a313f
|
[
"MIT"
] |
permissive
|
KryptedGaming/django-eveonline-connector
|
38fac86666598e0e9b6f0e330806e54678363a88
|
95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0
|
refs/heads/master
| 2021-06-12T10:02:26.495615
| 2021-05-03T17:24:05
| 2021-05-03T17:24:05
| 201,830,836
| 3
| 2
|
MIT
| 2021-06-10T20:39:05
| 2019-08-12T00:34:21
|
Python
|
UTF-8
|
Python
| false
| false
| 530
|
py
|
# Generated by Django 2.2.8 on 2019-12-11 15:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_eveonline_connector', '0009_auto_20191210_1538'),
]
operations = [
migrations.AlterField(
model_name='evecharacter',
name='token',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='django_eveonline_connector.EveToken'),
),
]
|
[
"porowns@gmail.com"
] |
porowns@gmail.com
|
e8e1ea746c2d2d55c761d003d71ad43f43480823
|
94180b918cc2c590c5868113d3e604ade34473b3
|
/jayd3e/models.py
|
44f37beec4764775f20a77652560387d2d785d77
|
[] |
no_license
|
jayd3e-archive/jayd3e
|
beb9daf007915c96645512884387054adb247a51
|
88a0d91b7aaa7907260d802935b198892623934e
|
refs/heads/master
| 2020-06-07T21:51:21.056787
| 2013-04-28T18:43:10
| 2013-04-28T18:43:10
| 1,386,947
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
from sqlalchemy import (
Column,
String,
Integer,
Date,
DateTime
)
from sqlalchemy.ext.declarative import declarative_base
class BaseClass(object):
id = Column(Integer, primary_key=True)
Base = declarative_base(cls=BaseClass)
def initializeDb(engine):
Base.metadata.bind = engine
class Post(Base):
__tablename__ = 'posts'
title = Column(String(50))
body = Column(String(2000))
date = Column(Date)
created = Column(DateTime)
change_time = Column(DateTime)
def __repr__(self):
return "<Post('%s')>" % (self.id)
|
[
"jd.dallago@gmail.com"
] |
jd.dallago@gmail.com
|
fd6f2e143779b6958f7dd6418992c04c7fb590d1
|
05cd0bee988d6d02841b23689517304defe7d00f
|
/goaway/emitter.py
|
36188322a55e67ed3f97af2abc659314bd67f188
|
[] |
no_license
|
podhmo/goaway
|
cd41043c6386f58958a99f26009ef6f873716e23
|
f870b4833a3aaf303c7ce161b78096e0b33e3c11
|
refs/heads/master
| 2021-10-11T11:05:11.639598
| 2021-10-06T01:57:56
| 2021-10-06T01:57:56
| 90,339,657
| 0
| 1
| null | 2021-10-06T01:52:23
| 2017-05-05T05:20:58
|
Python
|
UTF-8
|
Python
| false
| false
| 697
|
py
|
import os.path
import logging
logger = logging.getLogger(__name__)
class Emitter:
def __init__(self, repository):
self.repository = repository
def emit_package(self, package, d=None, onemit=None):
return dumptree(self.repository.writer, package, d=d, onemit=onemit)
emit = emit_package
def dumptree(writer, package, d=None, onemit=None):
d = d or package.filepath
os.makedirs(d, exist_ok=True)
for f in package.files.values():
fpath = os.path.join(d, f.name)
with open(fpath, "w") as wf:
logger.info("write: %s", fpath)
wf.write(str(writer.write(f)))
if onemit is not None:
onemit(f, fpath)
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
4fe1ebc7236a6d0e7e5805f2cd720f6a72951b89
|
e1cf0e9941e72d06870baa63c792f1123f325762
|
/classify/english_classify/SST_FlyAI/processor.py
|
c70e6453c71c1d7febd4757ea8ac1d301aa1819b
|
[] |
no_license
|
yphacker/flyai_nlp
|
1ab79be510d82fb0e9bc7d5d823c3fbaf9bf2ce5
|
78a8cd8680190dacc053993fe4a00d2391a62408
|
refs/heads/master
| 2020-07-16T02:22:03.338890
| 2020-01-02T12:52:14
| 2020-01-02T12:52:14
| 205,699,001
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
# -*- coding: utf-8 -*
import os
from flyai.processor.base import Base
import config
import bert.tokenization as tokenization
from bert.run_classifier import convert_single_example_simple
# class Processor(Base):
# def __init__(self):
# self.token = None
#
# def input_x(self, sentence):
# '''
# 参数为csv中作为输入x的一条数据,该方法会被Dataset多次调用
# '''
# if self.token is None:
# bert_vocab_file = os.path.join(DATA_PATH, "model", "uncased_L-12_H-768_A-12", 'vocab.txt')
# self.token = tokenization.CharTokenizer(vocab_file=bert_vocab_file)
# word_ids, word_mask, word_segment_ids = convert_single_example_simple(max_seq_length=256, tokenizer=self.token,
# text_a=sentence)
# return word_ids, word_mask, word_segment_ids
#
# def input_y(self, label):
# '''
# 参数为csv中作为输入y的一条数据,该方法会被Dataset多次调用
# '''
# return [label]
#
# def output_y(self, index):
# '''
# 验证时使用,把模型输出的y转为对应的结果
# '''
#
# if index >= 0.5:
# return 1
# return 0
class Processor(Base):
def __init__(self):
self.token = None
def input_x(self, sentence):
'''
参数为csv中作为输入x的一条数据,该方法会被Dataset多次调用
'''
if self.token is None:
bert_vocab_file = os.path.join(config.DATA_PATH, "model", "uncased_L-24_H-1024_A-16", 'vocab.txt')
self.token = tokenization.FullTokenizer(vocab_file=bert_vocab_file)
word_ids, word_mask, word_segment_ids = \
convert_single_example_simple(config.max_seq_length, tokenizer=self.token, text_a=sentence)
return word_ids, word_mask, word_segment_ids
def input_y(self, label):
'''
参数为csv中作为输入y的一条数据,该方法会被Dataset多次调用
'''
return label
def output_y(self, data):
'''
验证时使用,把模型输出的y转为对应的结果
'''
return data[0]
|
[
"yphacker@163.com"
] |
yphacker@163.com
|
15f3f035f2e8a0fe2a55846ba021bfa4732d8946
|
b6d8049568e8068422bc4ac2b957972dc1ee0ab7
|
/ui_extensions/cloudendure/__init__.py
|
01817f14f2dc5e1ff60b1d569706c52ba539c117
|
[
"Apache-2.0"
] |
permissive
|
CloudBoltSoftware/cloudbolt-forge
|
a6dffd52bd074ad48a61527502fcddf8113508ff
|
a5584a84841be49bea69df506a91d18cb3f215d8
|
refs/heads/master
| 2023-08-11T08:08:07.730807
| 2023-08-10T11:40:07
| 2023-08-10T11:40:07
| 39,207,603
| 37
| 44
|
Apache-2.0
| 2023-09-14T20:06:44
| 2015-07-16T16:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
"""
Provides features to support CloudEndure server Migration integration.
This integration helps with creating a cloudendure project, adding a vm
to a project and starting replication. It also enables users to start
migration to AWS. Users can also install agents to servers without an
agent.
"""
|
[
"klaratta@cloudboltsoftware.com"
] |
klaratta@cloudboltsoftware.com
|
6feb5f7e88ae1c948ac192a971e99bcfe1b13bd8
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-CoreServices/PyObjCTest/test_lssharedfilelist.py
|
9624ae2b03efaa607e39d773668c2e0fcb634b77
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,812
|
py
|
from PyObjCTools.TestSupport import *
import CoreServices
import os
class TestLSSharedFileList (TestCase):
def testTypes(self):
self.assertIsCFType(CoreServices.LSSharedFileListRef)
self.assertIsCFType(CoreServices.LSSharedFileListItemRef)
@min_os_level('10.5')
def testConstants10_5(self):
self.assertIsInstance(CoreServices.kLSSharedFileListFavoriteVolumes, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListFavoriteItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentApplicationItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentDocumentItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentServerItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListSessionLoginItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListGlobalLoginItems, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListRecentItemsMaxAmount, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListVolumesComputerVisible, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListVolumesIDiskVisible, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListVolumesNetworkVisible, unicode)
self.assertIsInstance(CoreServices.kLSSharedFileListItemHidden, unicode)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(CoreServices.kLSSharedFileListLoginItemHidden, unicode)
@min_os_level('10.5')
def testMagicConstants10_5(self):
self.assertIsInstance(CoreServices.kLSSharedFileListItemBeforeFirst, CoreServices.LSSharedFileListItemRef)
self.assertIsInstance(CoreServices.kLSSharedFileListItemLast, CoreServices.LSSharedFileListItemRef)
def testConstants(self):
self.assertEqual(CoreServices.kLSSharedFileListNoUserInteraction, 1)
self.assertEqual(CoreServices.kLSSharedFileListDoNotMountVolumes, 2)
def testFunctions(self):
self.assertIsInstance(CoreServices.LSSharedFileListGetTypeID(), (int, long))
self.assertIsInstance(CoreServices.LSSharedFileListItemGetTypeID(), (int, long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListCreate)
lst = CoreServices.LSSharedFileListCreate(None, CoreServices.kLSSharedFileListRecentDocumentItems, None)
self.assertIsInstance(lst, CoreServices.LSSharedFileListRef)
rl = CoreServices.CFRunLoopGetCurrent()
self.assertArgIsFunction(CoreServices.LSSharedFileListAddObserver, 3, b'v^{OpaqueLSSharedFileListRef=}^v', True)
self.assertArgHasType(CoreServices.LSSharedFileListAddObserver, 4, b'^v')
@objc.callbackFor(CoreServices.LSSharedFileListAddObserver)
def callback(lst, ctxt):
pass
CoreServices.LSSharedFileListAddObserver(lst, rl, CoreServices.kCFRunLoopDefaultMode, callback, None)
CoreServices.LSSharedFileListRemoveObserver(lst, rl, CoreServices.kCFRunLoopDefaultMode, callback, None)
v = CoreServices.LSSharedFileListGetSeedValue(lst)
self.assertIsInstance(v, (int, long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListCopyProperty)
self.assertResultHasType(CoreServices.LSSharedFileListCopyProperty, b'@')
v = CoreServices.LSSharedFileListCopyProperty(lst, b"pyobjc.name".decode('latin1'))
v = CoreServices.LSSharedFileListSetProperty(lst, b"pyobjc.name".decode('latin1'), b"value".decode('latin1'))
self.assertIsInstance(v, (int, long))
v = CoreServices.LSSharedFileListCopyProperty(lst, b"pyobjc.name".decode('latin1'))
self.assertEqual(v, b"value".decode('latin1'))
self.assertArgIsOut(CoreServices.LSSharedFileListCopySnapshot, 1)
v, seed = CoreServices.LSSharedFileListCopySnapshot(lst, None)
self.assertIsInstance(v, CoreServices.CFArrayRef)
self.assertIsInstance(seed, (int,long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListInsertItemURL)
url = CoreServices.CFURLCreateWithString(None, "file://" + os.path.expanduser('~'), None)
title = b"PyObjC.Test".decode("latin1")
item = CoreServices.LSSharedFileListInsertItemFSRef(lst, CoreServices.kLSSharedFileListItemLast, title, None, objc.FSRef.from_pathname(os.path.expanduser('~')), None, None)
self.assertIsInstance(item, CoreServices.LSSharedFileListItemRef)
item = CoreServices.LSSharedFileListInsertItemURL(lst, CoreServices.kLSSharedFileListItemLast, title, None, url, None, None)
self.assertIsInstance(item, CoreServices.LSSharedFileListItemRef)
v = CoreServices.LSSharedFileListItemGetID(item)
self.assertIsInstance(v, (int, long))
v = CoreServices.LSSharedFileListItemCopyIconRef(item)
if v is not None:
self.assertIsInstance(v, CoreServices.IconRef)
self.assertResultIsCFRetained(CoreServices.LSSharedFileListItemCopyDisplayName)
v = CoreServices.LSSharedFileListItemCopyDisplayName(item)
self.assertIsInstance(v, unicode)
self.assertArgIsOut(CoreServices.LSSharedFileListItemResolve, 2)
self.assertArgIsOut(CoreServices.LSSharedFileListItemResolve, 3)
self.assertArgIsCFRetained(CoreServices.LSSharedFileListItemResolve, 2)
v, url, ref = CoreServices.LSSharedFileListItemResolve(item, 0, None, objc.NULL)
self.assertIsInstance(v, (int, long))
if url is not None:
self.assertIsInstance(url, CoreServices.CFURLRef)
v = CoreServices.LSSharedFileListItemSetProperty(item, b"pyobjc.name".decode('latin1'), b"pyobjc.test".decode('latin1'))
self.assertIsInstance(v, (int, long))
self.assertResultIsCFRetained(CoreServices.LSSharedFileListItemCopyProperty)
v = CoreServices.LSSharedFileListItemCopyProperty(item, b"pyobjc.name".decode('latin1'))
if v is not None:
self.assertEqual(v, "pyobjc.test")
v = CoreServices.LSSharedFileListItemMove(lst, item, CoreServices.kLSSharedFileListItemBeforeFirst)
self.assertIsInstance(v, (int, long))
v = CoreServices.LSSharedFileListItemRemove(lst, item)
self.assertIsInstance(v, (int, long))
CoreServices.LSSharedFileListRemoveAllItems
@expectedFailure
def testMissing(self):
# Needs more infrastructure
self.fail('LSSharedFileListSetAuthorization')
# FSRef suckage
self.fail('LSSharedFileListItemRef')
@min_os_level('10.10')
def testFunctions10_10(self):
self.assertResultIsCFRetained(CoreServices.LSSharedFileListItemCopyResolvedURL)
self.assertArgIsOut(CoreServices.LSSharedFileListItemCopyResolvedURL, 2)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
041299a89176a6f13777916bbe8296efdd7198f6
|
f16294a2b14754a0a3be754b8f5c5384ccf108c9
|
/EPAD/virtual/bin/gunicorn
|
e4445b255bdfe421b20479d7fe8cafab19cc3145
|
[] |
no_license
|
antomuli/Work_Pad
|
bb24aac8b81cb7d8ae9173d7e099ae46eaf0b09b
|
7cff39d27102398dd4c484933b620e00562fa812
|
refs/heads/master
| 2022-12-10T11:44:10.855797
| 2020-04-05T18:43:59
| 2020-04-05T18:43:59
| 252,779,135
| 2
| 0
| null | 2022-12-08T03:59:54
| 2020-04-03T16:04:19
|
Python
|
UTF-8
|
Python
| false
| false
| 293
|
#!/home/moringaschool/Documents/moringa-schools-projects/Capstone/EPAD/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"mulianthony561@gmail.com"
] |
mulianthony561@gmail.com
|
|
e20baf3afedeae9e58e816b7163dd3d484d2c808
|
43aeee48c1f6fc468a43f9bb0d4edae8ee0dbee1
|
/testforothers/testSentence.py
|
568acfc3bc4ca9acd1e2c6968eb8c53ccff96a4c
|
[] |
no_license
|
wiky2/mytestproject
|
f694cf71dd3031e4597086f3bc90d246c4b26298
|
e7b79df6304476d76e87f9e8a262f304b30ca312
|
refs/heads/master
| 2021-09-07T20:54:19.569970
| 2018-02-28T23:39:00
| 2018-02-28T23:39:00
| 100,296,844
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,096
|
py
|
#!/usr/bin/env python
# coding: utf-8
#声明必须放在前两行,# coding=<encoding name>
'''
@author:
@license:
@contact:
@software: Test
@file: testList2.py
@time: 2017/8/27 上午12:28
@desc:学习老王python-控制语句
python语句讲解
1.print语句
1.1 基本输出
1.2 print的逗号
1.2 输出到文件 >>为重定向
2.控制流语句(control flow)
2.1 由条件和执行代码块组成。
2.1.1 条件可分为决策、循环和分支
2.2 格式(冒号与4个空格永不忘)
2.3 if while for 函数,皆为contorl flow
3.布尔值
3.1 控制流与真假值息息相关
3.1.1 不要误解了真假与布尔值
3.2 布尔值的几个最基本运算符
3.2.1 and
3.2.2 or
3.2.3 is 检查共享
3.2.4 == 检查值
3.2.5 not
3.2.6 其他若干比较符号
4. if语句 (控制流语句)
4.1 if的组成 if else elif pass
4.1.1 if与elif替代了switch
4.1.2 pass
4.2 奇技淫巧 三元表达式
4.2.1 x if else
4.2.2 活用list
4.2.3 三元表达式玩玩就好
'''
f=open('printtest.txt','w')
print >>f,'hahahahaha'
print >>f,'hahahahaha'
f.close()
x=3
if x:
print 4
if x is True:
print 5# 一个是int ,一个是bool,两个不等,检查是否引用同一个数据对象
print True and False
print True & False
print 4 if True else 3
print [4,3][True]#[假的答案,真的答案][条件]
for x in 'i am lilei':
print x
for x in 'i am lilei'.split(' '):
print x
for x in 'i am lilei'.split(' '):
continue
print x
else:
print 'end-----'
print True and False and False and True#从左到右,遇到计算为False则为空。
a='aAsfASD'
print a.swapcase()#大写改小写
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
print ''.join([s for s in a if s.isdigit()])
a=a.lower()
print dict([(x,a.count(x)) for x in set(a)])#统计字符串的个数。
a_list=list(a)
set_list=list(set(a_list))
set_list.sort(key=a_list.index)#index是个函数
print ''.join(set_list)
print a[::-1]
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
l=sorted(a)
a_upper_list=[]
a_lower_list=[]
for x in l:
if x.isupper():
a_upper_list.append(x)
elif x.islower():
a_lower_list.append(x)
else:
pass
for y in a_upper_list:
y_lower=y.lower()
if y_lower in a_lower_list:
a_lower_list.insert(a_lower_list.index(y_lower),y)
print ''.join(a_lower_list)
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
search='boy'
u=set(a)
u.update(list(search))
print len(set(a))== len(u)
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
search=['boy','girl']
u=set(a)
for s in search:
u.update(list(s))
print len(set(a))== len(u)
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
l=([(x,a.count(x)) for x in set(a)])#
l.sort(key=lambda k:k[1],reverse=True)#k[1]代表第二个键,从0开始
print l[0][0]
print l
import os
m=os.popen('python -m this').read()
m=m.replace('\n','')
l=m.split(' ')
print [(x,l.count(x)) for x in ['be','this','than']]
size=1023147201
print '%s kb' % (size >>10)
print '%s mb' % (size >>20)
a=[1,2,3,6,8,9,10,14,17]
print str(a)#[1, 2, 3, 6, 8, 9, 10, 14, 17]
print ''.join(str(a))
print str(a)[1:-1:3]#多位数不行
print str(a)[1:-1].replace(', ','')#先去方括号,再去逗号,空格
a={'key1':'value1','key2':'value2'}
for i in a.keys():
print i
a={'key1':'value1','key2':'value2'}
for x,y in a.items():
print x,y
a={'a':'haha','b':'xixi','d':'haha'}
search_value='haha'
key_list=[]
for x,y in a.items():
if y==search_value:
key_list.append(x)
print key_list
import string
a='aA141safd2afa534234vAUJADAWWEGFDfgiuRHIUOIKNLNey52fA78447SD'
a=''.join([x for x in a if not x.isdigit()])
print sorted(a,key=string.upper)
a='i am lilei. We need to go'
c=string.maketrans('i','I')#第一个参数,第二个参数,逐一对应。
b=a.translate(c,'lei')#第二个参数是要删除的参数,翻译后要赋值
print b
with open('printtest.txt','a') as g:#不需要自己关闭。
g.write('xixixi')
|
[
"jerry_136510@aliyun.com"
] |
jerry_136510@aliyun.com
|
53284561112646ecce78b97cfe2b53bf34de1289
|
324764c9dba09bb3c2c5af24db292cd27d9e81c8
|
/2018/09/part1.py
|
113581e23ee454494dc922ff94b6f92099555fe8
|
[] |
no_license
|
thatsokay/advent-of-code
|
cebcde362e7f0f4b0587e7e2c01d874e886dcd5e
|
3781b9da93e3064f28f11f22db374cf896159488
|
refs/heads/master
| 2022-12-24T23:48:43.549678
| 2022-12-21T15:55:39
| 2022-12-22T05:04:45
| 161,293,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
from operator import itemgetter
from collections import defaultdict, deque
def marbles(players, last):
circle = deque([0])
scores = defaultdict(int)
for marble in range(1, last + 1):
if marble % 23 == 0:
circle.rotate(7)
scores[marble % players + 1] += marble + circle.popleft()
else:
circle.rotate(-2)
circle.appendleft(marble)
return max(scores.values())
if __name__ == '__main__':
with open('input.txt') as f:
players, last = map(int, itemgetter(0, 6)(f.readline().split()))
print(marbles(players, last))
|
[
"andre@andre-wong.com"
] |
andre@andre-wong.com
|
a37487f3fd56ae30f901da031243cd773ef35b7d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq3223.py
|
e080a241bb5691ff2e6ed02cf7ee28f88d4b9702
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,319
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=45
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[3])) # number=36
c.append(cirq.H.on(input_qubit[3])) # number=16
c.append(cirq.CZ.on(input_qubit[1],input_qubit[3])) # number=17
c.append(cirq.H.on(input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=37
c.append(cirq.Z.on(input_qubit[1])) # number=35
c.append(cirq.Y.on(input_qubit[3])) # number=38
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=32
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.H.on(input_qubit[3])) # number=26
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=27
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=42
c.append(cirq.X.on(input_qubit[3])) # number=43
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=44
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=29
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=30
c.append(cirq.H.on(input_qubit[2])) # number=31
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.H.on(input_qubit[2])) # number=39
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=40
c.append(cirq.H.on(input_qubit[2])) # number=41
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=14
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3223.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
90b6785c7f5223d1dc216cddaf8db8fd5e3b3c37
|
f2bec1dbb86b218fc1b7c9106ff13c15dea8c301
|
/Interactive Tower of Hanoi/column/column.py
|
bd3deef5fada044b704a88cf513f425691695c0d
|
[] |
no_license
|
PuffyShoggoth/hatch
|
59e0f3684f041846084316f5bfafda1601cf5d2e
|
e1b32787cb0571469cd06a469b24890e23b78a58
|
refs/heads/master
| 2021-01-01T16:52:57.758161
| 2017-07-28T18:47:47
| 2017-07-28T18:47:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
class Column:
def __init__(self, position, disks, C):
self.position = position
self.disks = disks
self.ids = []
for k in range(len(self.disks)):
self.ids.append(self.display_disk(self.disks[k], C, k))
def can_add(self, column):
return len(self.disks)==0 or (len(column.disks)>0 and self.disks[-1] > column.disks[-1])
def add(self, disk, C):
k = len(self.disks)
self.ids.append(self.display_disk(disk, C, k))
self.disks.append(disk)
def remove(self, C):
C.delete(self.ids.pop())
return self.disks.pop()
def clear_all(self, C):
while len(self.ids) > 0:
C.delete(self.ids.pop())
self.disks.pop()
def display_disk(self, disk, C, k, currfill=""):
sz = disk.size/2
# height = C.winfo_height()
# height = 300
return C.create_rectangle(self.position-sz, 600-k*30, self.position+sz, 600-(k+1)*30, fill=currfill)
#def display(self, C):
#C.create_rectangle(0, 200, 200, 0, fill = "black")
# sz = self.disks[k]/2
# height = C.winfo_height()
# C.create_rectangle(self.position-sz, height-k*10, self.position+sz, height-(k+1)*10, fill="black")
|
[
"shaar@utschools.ca"
] |
shaar@utschools.ca
|
8e839a17907ffc5edd0047756692fc187be920e7
|
1635e722e7ede72f4877671f36bbbc4199abae81
|
/revised-addons/mrp_product_cost_calculation/mrp_production.py
|
7f27de0d02cc1b22907e862f281f35ad313647a3
|
[] |
no_license
|
ecosoft-odoo/sqp
|
7c09617048091ac6de4b25a33ad88127d36de452
|
7a7fc6b88087d98d536dd4ec39f9fb572918090e
|
refs/heads/master
| 2023-08-08T00:07:48.405000
| 2023-08-04T15:47:43
| 2023-08-04T15:47:43
| 40,047,976
| 3
| 9
| null | 2023-08-02T08:38:53
| 2015-08-01T13:48:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,254
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Mentis d.o.o.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_production(osv.osv):
_inherit = 'mrp.production'
def action_production_end(self, cr, uid, ids, context=None):
write_res = super(mrp_production, self).action_production_end(cr, uid, ids, context)
if write_res:
_production_ids = self.pool.get('mrp.production').browse(cr, uid, ids, context=None)
for _production_id in _production_ids:
_name = _production_id.name
_product_id = _production_id.product_id.id
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, _product_id, context)
if _production_id.product_id.cost_method == 'average' and accounts['stock_account_input'] and accounts['property_stock_valuation_account_id']:
_debit= 0.00
_credit = 0.00
_move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('name','=',_name),
('product_id','!=',_product_id)])
_move_lines = self.pool.get('account.move.line').browse(cr, uid, _move_line_ids, context=None)
for _move_line in _move_lines:
_debit += _move_line.debit
_credit += _move_line.credit
_move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('name','=',_name),
('product_id','=',_product_id)], order='id')
_move_lines = self.pool.get('account.move.line').browse(cr, uid, _move_line_ids, context=None)
for _move_line in _move_lines:
if _move_line.account_id.id == accounts['stock_account_input']:
_move_line.write({'credit': _credit}, context)
elif _move_line.account_id.id == accounts['property_stock_valuation_account_id']:
_move_line.write({'debit': _debit}, context)
if _debit and _debit != 0.00:
_old_inventory_qty = _production_id.product_id.qty_available or 0.00
_old_inventory_value = _old_inventory_qty * _production_id.product_id.standard_price
_new_inventory_value = _production_id.product_qty * _debit
_new_inventory_qty = _old_inventory_qty + _production_id.product_qty
if _new_inventory_qty and _new_inventory_qty != 0.00:
_new_standard_price = (_old_inventory_value + _new_inventory_value) / _new_inventory_qty
elif _production_id.product_qty and _product_id.product_qty != 0.00:
_new_standard_price = _debit / _production_id.product_qty
else:
_new_standard_price = _debit
product_obj.write(cr, uid, [_product_id], {'standard_price': _new_standard_price}, context)
return write_res
mrp_production()
|
[
"kittiu@gmail.com"
] |
kittiu@gmail.com
|
51a1fe32d632ed84c07b28f8a561d7fbd57759b0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/85/usersdata/224/52688/submittedfiles/funcoes1.py
|
210a3b6367327c545086dc3ad9ee60b36b4fb544
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
# -*- coding: utf-8 -*-
def cres (Ca):
cont=0
for i in range(0,len(Ca),1):
if Ca[i]<Ca[i+1]:
cont=cont+1
if cont==len(Ca)-1:
return True
else:
return False
def decre (lista):
cont=0
for i in range(0,len(lista),1):
if lista[i]>lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
def consecutivo(b):
cont=0
for i in range(0,len(b),1):
if b[i+1]-b[1]==1:
cont=cont+1
if cont==len(b)-1:
return true
n=int(input('Digite o tamanho da lista: '))
g=[]
for i in range(1,n+1,1):
x=int(input('Digite os numeros: '))
g.append(x)
c=[]
for i in range(1,n+1,1):
x=int(input('Digite os numeros: '))
c.append(x)
w=[]
for i in range(1,n+1,1):
x=int(input('Digite os numeros: '))
w.append(x)
if cres(g):
print('S')
else:
print('N')
if decre(g):
prin('S')
else:
print('N')
if consecutivo(g):
print('S')
else:
print('N')
if cres(c):
print('S')
else:
print('N')
if decre(c):
prin('S')
else:
print('N')
if consecutivo(c):
print('S')
else:
print('N')
if cres(w):
print('S')
else:
print('N')
if decre(w):
prin('S')
else:
print('N')
if consecutivo(w):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9c7d441f980e1aaef94cc526e2ba03edbd3154fe
|
6a0d42149f8bbe5f7d6cb8103fe557d0d048c832
|
/carts/views.py
|
822a318160da7420c9dc5c3bc34769e0481eb52e
|
[] |
no_license
|
syfqpipe/product-public
|
8f3b2f81d0c9fdc61bb5841db1d4d9d26bb618a1
|
62b918bd9f24b4a47fab04398ca8112268e1e2b1
|
refs/heads/master
| 2023-01-22T05:14:19.132567
| 2020-12-05T03:22:56
| 2020-12-05T03:22:56
| 318,689,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,799
|
py
|
import hashlib
import json
import datetime
from django.http import JsonResponse
from django.shortcuts import render,redirect
from django.db.models import Q
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework import viewsets, status
from rest_framework_extensions.mixins import NestedViewSetMixin
from django_filters.rest_framework import DjangoFilterBackend
from entities.models import Entity
from products.models import Product, ProductSearchCriteria
from services.models import Service, ServiceRequest
from quotas.models import Quota
from .models import (
Cart,
CartItem
)
from .serializers import (
CartSerializer,
CartExtendedSerializer,
CartItemSerializer
)
from transactions.models import Transaction
from users.models import CustomUser
class CartViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Cart.objects.all()
serializer_class = CartSerializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filterset_fields = ['user', 'cart_status']
def get_permissions(self):
if self.action == 'list':
permission_classes = [AllowAny]
else:
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def get_queryset(self):
queryset = Cart.objects.all()
return queryset
@action(methods=['POST'], detail=False)
def check_cart(self, request, *args, **kwargs):
request_ = json.loads(request.body)
request_user_id_ = request_['user']
request_user_ = CustomUser.objects.filter(
id=request_user_id_
).first()
cart_ = Cart.objects.filter(
user=request_user_id_,
cart_status='CR'
).first()
print('hello', cart_)
if cart_:
print('ada')
serializer = CartExtendedSerializer(cart_)
else:
print('xde')
new_cart_ = Cart.objects.create(
user=request_user_
)
serializer = CartExtendedSerializer(new_cart_)
return Response(serializer.data)
@action(methods=['GET'], detail=True)
def with_item(self, request, *args, **kwargs):
cart = self.get_object()
serializer = CartExtendedSerializer(cart)
return Response(serializer.data)
@action(methods=['POST'], detail=True)
def add_item_to_cart(self, request, *args, **kwargs):
cart_item_request = json.loads(request.body)
# print('cit', cart_item_request)
# Post.objects.filter(user=request.user)
# product_length = CartItem.objects.filter(cart_item_type = 'PR').count()
# print("{0:0>6}".format(product_length))
# Item product
if cart_item_request['item_type'] == 'product':
entity_id = cart_item_request['entity']
product_id = cart_item_request['product']
image_version_id = cart_item_request['image_version_id']
image_form_type = cart_item_request['image_form_type']
year1 = cart_item_request['year1']
year2 = cart_item_request['year2']
cart = self.get_object()
entity = Entity.objects.filter(id=entity_id).first()
product = Product.objects.filter(id=product_id).first()
cart_items = CartItem.objects.filter(cart=cart.id)
print(cart_items)
# Document and image
if image_version_id:
new_cart_item = CartItem.objects.create(
entity=entity,
product=product,
image_form_type=image_form_type,
image_version_id=image_version_id,
cart=cart,
cart_item_type='PR'
)
# if aaa is None:
# user_id_ = cart_item_request['user']
# delta = datetime.timedelta(hours=24)
# current_time = datetime.datetime.now(tz=timezone.utc)
# date_filter = current_time - delta
# transactions_ = Transaction.objects.filter(
# created_date__gte=date_filter,
# user=user_id_,
# ).all()
# if transactions_:
# product_viewing_fee = Product.objects.filter(slug='document_form_viewing_fee').first()
# new_cart_item_viewing_fee = CartItem.object.create(
# product=product_viewing_fee,
# cart=cart,
# cart_item_type='SE'
# )
# Financial historical
elif year1 and year2:
new_cart_item = CartItem.objects.create(
entity=entity,
product=product,
year1=year1,
year2=year2,
cart= cart,
cart_item_type='PR'
)
# Products
else:
new_cart_item = CartItem.objects.create(
entity=entity,
product=product,
cart= cart,
cart_item_type='PR'
)
# Item service
elif cart_item_request['item_type'] == 'service':
service_request_id = str(cart_item_request['service_request_id'])
service_request = ServiceRequest.objects.filter(id=service_request_id).first()
cart = self.get_object()
new_cart_item = CartItem.objects.create(
service_request=service_request,
cart= cart,
cart_item_type='SE'
)
# Item quota
elif cart_item_request['item_type'] == 'quota':
quota_id = str(cart_item_request['quota_id'])
quota = Quota.objects.filter(id=quota_id).first()
cart = self.get_object()
new_cart_item = CartItem.objects.create(
quota = quota,
cart= cart,
cart_item_type='QU'
)
elif cart_item_request['item_type'] == 'product_search_criteria':
product_search_criteria_id = str(cart_item_request['product_search_criteria_id'])
product_search_criteria = ProductSearchCriteria.objects.filter(id=product_search_criteria_id).first()
cart = self.get_object()
new_cart_item = CartItem.objects.create(
product_search_criteria=product_search_criteria,
cart= cart,
cart_item_type='PS'
)
else:
pass
serializer = CartExtendedSerializer(cart)
return Response(serializer.data)
@action(methods=['POST'], detail=True)
def add_item_to_cart_bulk(self, request, *args, **kwargs):
cart_item_request_ = json.loads(request.body)
for item in cart_item_request_:
pass
print('Hello')
@action(methods=['POST'], detail=True)
def remove_item_from_cart(self, request, *args, **kwargs):
cart_item_id = json.loads(request.body)['cart_item_id']
cart_item = CartItem.objects.filter(id=cart_item_id).first()
cart = self.get_object()
cart.cart_item.remove(cart_item)
cart.save()
serializer = CartExtendedSerializer(cart)
return Response(serializer.data)
|
[
"syafiqbasri@pipeline-network.com"
] |
syafiqbasri@pipeline-network.com
|
446ef7b61a2cbc8d7ca4a6da2a5caf36c3e620a7
|
d4fac8ce52d8a058bb12fda402b9d25e24271ae6
|
/examples/twisted/websocket/echo_site_tls/server.py
|
a5a43b7321a46fdf3dc8da70287bd241a662bbc8
|
[
"MIT"
] |
permissive
|
workingwim/AutobahnPython
|
392ce2f11c320fe4b36bc0eefed1971418575394
|
3fce8aca718335db99aba7adbd4426c8a81cb0e0
|
refs/heads/master
| 2020-12-25T04:27:52.868814
| 2015-08-09T15:12:40
| 2015-08-09T15:12:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,782
|
py
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource, \
HTTPChannelHixie76Aware
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key',
'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:8080",
debug=debug,
debugCodePaths=debug)
factory.protocol = EchoServerProtocol
factory.setProtocolOptions(allowHixie76=True) # needed if Hixie76 is to be supported
resource = WebSocketResource(factory)
# we server static files under "/" ..
root = File(".")
# and our WebSocket server under "/ws"
root.putChild("ws", resource)
# both under one Twisted Web Site
site = Site(root)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenSSL(8080, site, contextFactory)
reactor.run()
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
8b14a5f5b37c67b4d3fa56854765efd86eaa81b9
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/demos/legacy_pusher/bc1.py
|
b253935fcf875c8d3cd2a4123dc8b9009f53c566
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,529
|
py
|
"""Example behavior cloning script for pointmass.
If you are trying to run this code, ask Ashvin for the demonstration file:
demos/pointmass_demos_100.npy (which should go in your S3 storage)
"""
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2
from multiworld.envs.pygame.point2d import Point2DWallEnv
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multienv import SawyerPushAndReachXYEasyEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.arglauncher import run_variants
import numpy as np
def her_td3_experiment(variant):
import gym
import multiworld.envs.mujoco
import multiworld.envs.pygame
import rlkit.samplers.rollout_functions as rf
import rlkit.torch.pytorch_util as ptu
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.torch.grill.launcher import get_video_save_func
from rlkit.demos.her_bc import HerBC
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
from rlkit.data_management.obs_dict_replay_buffer import (
ObsDictRelabelingBuffer
)
if 'env_id' in variant:
env = gym.make(variant['env_id'])
else:
env = variant['env_class'](**variant['env_kwargs'])
observation_key = variant['observation_key']
desired_goal_key = variant['desired_goal_key']
variant['algo_kwargs']['her_kwargs']['observation_key'] = observation_key
variant['algo_kwargs']['her_kwargs']['desired_goal_key'] = desired_goal_key
if variant.get('normalize', False):
raise NotImplementedError()
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
replay_buffer = ObsDictRelabelingBuffer(
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
obs_dim = env.observation_space.spaces['observation'].low.size
action_dim = env.action_space.low.size
goal_dim = env.observation_space.spaces['desired_goal'].low.size
exploration_type = variant['exploration_type']
if exploration_type == 'ou':
es = OUStrategy(
action_space=env.action_space,
**variant['es_kwargs']
)
elif exploration_type == 'gaussian':
es = GaussianStrategy(
action_space=env.action_space,
**variant['es_kwargs'],
)
elif exploration_type == 'epsilon':
es = EpsilonGreedy(
action_space=env.action_space,
**variant['es_kwargs'],
)
else:
raise Exception("Invalid type: " + exploration_type)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhMlpPolicy(
input_size=obs_dim + goal_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = HerBC(
env,
exploration_policy,
policy,
variant["demo_path"],
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
if variant.get("save_video", False):
rollout_function = rf.create_rollout_function(
rf.multitask_rollout,
max_path_length=algorithm.max_path_length,
observation_key=algorithm.observation_key,
desired_goal_key=algorithm.desired_goal_key,
)
video_func = get_video_save_func(
rollout_function,
env,
policy,
variant,
)
algorithm.post_epoch_funcs.append(video_func)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
size = 0.1
low = (-size, 0.4 - size, 0)
high = (size, 0.4 + size, 0.1)
variant = dict(
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1001,
num_steps_per_epoch=1,
num_steps_per_eval=1000,
max_path_length=100,
num_updates_per_env_step=1,
batch_size=128,
discount=0.99,
min_num_steps_before_training=0,
reward_scale=100,
render=False,
collection_mode='online',
parallel_env_params=dict(
num_workers=1,
),
),
her_kwargs=dict(
observation_key='state_observation',
desired_goal_key='state_desired_goal',
),
td3_kwargs=dict(
weight_decay=0.0,
),
),
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_are_rollout_goals=1.0,
fraction_resampled_goals_are_env_goals=0.0,
ob_keys_to_save=['state_observation', 'state_desired_goal'],
),
qf_kwargs=dict(
hidden_sizes=[64, 64],
),
policy_kwargs=dict(
hidden_sizes=[64, 64],
),
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
max_sigma=.8,
),
exploration_type='ou',
observation_key='state_observation',
desired_goal_key='state_desired_goal',
init_camera=sawyer_pusher_camera_upright_v2,
do_state_exp=True,
save_video=False,
imsize=84,
snapshot_mode='gap_and_last',
snapshot_gap=50,
env_class=SawyerPushAndReachXYEasyEnv,
env_kwargs=dict(
hide_goal=True,
reward_info=dict(
type="state_distance",
),
),
demo_path="demos/pusher_demos_100.npy",
num_exps_per_instance=1,
)
search_space = {
# 'env_id': ['SawyerPushAndReacherXYEnv-v0', ],
'seedid': range(3),
'algo_kwargs.base_kwargs.num_updates_per_env_step': [1, ],
'replay_buffer_kwargs.fraction_goals_are_rollout_goals': [0.0, 0.5, 1.0],
'algo_kwargs.td3_kwargs.weight_decay': [0.0, 1e-3, 1e-4, 1e-5],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 1
mode = 'ec2'
exp_prefix = 'sawyer_pusher_state_final'
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(her_td3_experiment, variants, run_id=1)
# for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
# for i in range(n_seeds):
# run_experiment(
# her_td3_experiment,
# exp_prefix=exp_prefix,
# mode=mode,
# snapshot_mode='gap_and_last',
# snapshot_gap=50,
# variant=variant,
# use_gpu=True,
# num_exps_per_instance=5,
# )
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
dfc4d96c5e1e422d9a810b84251872ff50ca2b40
|
cae22103ac9b5d3aa90a6ee48e9e6694474fe64f
|
/recipes.py
|
e994a76f42ccd32316d88a12329c623614cb7446
|
[
"MIT"
] |
permissive
|
lennykioko/Yummy_recipes_2
|
22cf7f47971bc0197bf371696939c2c1a5dcb54b
|
45e75612eff9cf5190014b4b3a7b6366d1479c7f
|
refs/heads/master
| 2021-09-09T02:07:02.043545
| 2018-03-13T09:38:00
| 2018-03-13T09:38:00
| 114,108,725
| 1
| 0
|
MIT
| 2018-01-23T11:48:35
| 2017-12-13T10:44:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
"""Handle data on recipes
Created: 2018
Author: Lenny
"""
all_recipes = {}
class Recipe(object):
"""Contain recipe creation, update and delete methods"""
def create(self, title='', category='', description=''):
"""create a new recipe"""
global all_recipes
if title != '' and category != '' and description != '':
if title not in all_recipes:
all_recipes[title] = [title, category, description]
return "Recipe created succesfully"
return "Title already exists"
return "Please fill in all fields"
def update(self, title='', category='', description=''):
"""update an existing recipe"""
global all_recipes
if title != '' and category != '' and description != '':
if title in all_recipes:
all_recipes[title] = [title, category, description]
return "Sucessfully updated"
return "Recipe does not exist"
return "Please fill in all fields"
def delete(self, title=''):
"""delete an existing recipe"""
global all_recipes
if title != '':
try:
del all_recipes[title]
return "Successfully deleted"
except KeyError:
return "Recipe does not exist"
return "Please fill in all fields"
|
[
"lennykmutua@gmail.com"
] |
lennykmutua@gmail.com
|
62879a3a29b2b619eb653cd7fb95819aca929e32
|
4adc1d1b8f9badefcd8c25c6e0e87c6545ccde2c
|
/OrcLib/LibProcess.py
|
1b49001ddde66a2937ebcb8166f2bdfb5335b08d
|
[] |
no_license
|
orange21cn/OrcTestToolsKit
|
eb7b67e87a608fb52d7bdcb2b859fa588263c136
|
69b6a3c382a7043872db1282df4be9e413d297d6
|
refs/heads/master
| 2020-04-15T07:30:35.485214
| 2017-09-30T06:16:17
| 2017-09-30T06:16:17
| 68,078,991
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,107
|
py
|
# coding=utf-8
from OrcLib.LibLog import OrcLog
from OrcLib.LibNet import OrcResource
from OrcLib.LibNet import ResourceCheck
_logger = OrcLog('basic.process')
def get_mark(p_flag, p_id):
"""
获取标识
:param p_flag:
:param p_id:
:return:
"""
if "BATCH" == p_flag:
return get_batch_mark(p_id)
elif "CASE" == p_flag:
return get_case_mark(p_id)
elif "STEP" == p_flag:
return get_step_mark(p_id)
elif "ITEM" == p_flag:
return get_item_mark(p_id)
elif "PAGE_DEF" == p_flag:
return get_page_def_mark(p_id)
elif "PAGE_DET" == p_flag:
return get_page_det_mark(p_id)
elif "WIDGET" == p_flag:
return get_widget_mark(p_id)
elif 'DATA' == p_flag:
return get_data_mark(p_id)
else:
return None
def get_batch_mark(p_batch_id):
"""
获取计划标识
:param p_batch_id:
:return:
"""
resource_batch_def = OrcResource('BatchDef')
# 获取计划信息
batch_def_info = resource_batch_def.get(path=p_batch_id)
if not ResourceCheck.result_status(batch_def_info, u'查询计划数据', _logger):
return None
if not batch_def_info.data:
return None
return batch_def_info.data['batch_no']
def get_case_mark(p_case_id):
"""
获取用例显示标识
:param p_case_id:
:return:
"""
resource_case_def = OrcResource('CaseDef')
# 获取用例信息
case_def_info = resource_case_def.get(path=p_case_id)
if not ResourceCheck.result_status(case_def_info, u'查询用例数据', _logger):
return None
if not case_def_info.data:
return None
return case_def_info.data['case_path']
def get_step_mark(p_step_id):
"""
获取步骤显示标识
:param p_step_id:
:return:
"""
resource_case_det = OrcResource('CaseDet')
# 获取用例步骤数据
case_det_info = resource_case_det.get(parameter=dict(step_id=p_step_id))
if not ResourceCheck.result_status(case_det_info, u'查询用例步骤数据', _logger):
return None
if not case_det_info.data:
return None
# 获取用例数据
case_mark = get_case_mark(case_det_info.data[0]['case_id'])
if case_mark is None:
return None
return "%s:%s" % (case_mark, case_det_info.data[0]['step_no'])
def get_item_mark(p_item_id):
"""
获取执行项显示标识
:param p_item_id:
:return:
"""
resource_step_det = OrcResource('StepDet')
# 获取步骤步骤项数据
step_det_info = resource_step_det.get(parameter=dict(item_id=p_item_id))
if not ResourceCheck.result_status(step_det_info, u'查询步骤步骤项数据', _logger):
return None
if not step_det_info.data:
return None
# 获取步骤标识
step_mark = get_step_mark(step_det_info.data[0]['step_id'])
if step_mark is None:
return None
return "%s:%s" % (step_mark, step_det_info.data[0]['item_no'])
def get_page_def_mark(p_page_def_id):
"""
获取页面显示标识
:param p_page_def_id:
:return:
"""
resource_page_def = OrcResource('PageDef')
# 获取 page_def 信息
page_def_info = resource_page_def.get(path=p_page_def_id)
if not ResourceCheck.result_status(page_def_info, u'查询页面数据', _logger):
return None
if not page_def_info.data:
return None
print page_def_info.data
return page_def_info.data['page_flag']
def get_page_det_mark(p_page_det_id):
"""
获取环境.页面显示标识
:param p_page_det_id:
:return:
"""
resource_page_det = OrcResource('PageDet')
resource_dict = OrcResource('Dict')
# 查询环境页面信息
page_det_info = resource_page_det.get(path=p_page_det_id)
if not ResourceCheck.result_status(page_det_info, u'查询环境页面信息', _logger):
return None
if not page_det_info.data:
return None
# 查询页面信息
page_def_info = get_page_def_mark(page_det_info.data['page_id'])
if not ResourceCheck.result_status(page_det_info, u'查询页面信息', _logger):
return None
if not page_det_info.data:
return None
# 查询环境信息
env_info = resource_dict.get(parameter=dict(dict_flag='test_env', dict_value=page_det_info.data['page_env']))
if not env_info:
return None
return "%s:%s" % (env_info.data[0]['dict_text'], page_def_info)
def get_widget_mark(p_widget_id):
"""
获取控件显示标识
:param p_widget_id:
:return:
"""
resource_widget_def = OrcResource("WidgetDef")
# 查询控件信息
widget_def_info = resource_widget_def.get(path=p_widget_id)
if not ResourceCheck.result_status(widget_def_info, u'查询控件信息', _logger):
return None
if not widget_def_info.data:
return None
print "-=-=-=", widget_def_info.data
return widget_def_info.data['widget_path']
def get_data_mark(p_id):
"""
获取数据标识
:param p_id:
:return:
"""
return p_id
|
[
"orange21cn@126.com"
] |
orange21cn@126.com
|
99b05296f13f43de6bc7287bc0cbef34834095d2
|
085f74ad9dcaf192914cf191694f60201d1c271e
|
/mindinsight/datavisual/data_transform/loader_generators/loader_generator.py
|
44e7210efb31379a38366eb7976f163dfe774dfd
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
fapbatista/mindinsight
|
8aeb48c739c505de1a2b32c694b6310a4b7d4e85
|
db5769eb80cbd13a2a9af7682c11f5667d8bf141
|
refs/heads/master
| 2023-02-08T17:25:03.950321
| 2020-12-29T12:38:28
| 2020-12-29T12:38:28
| 325,083,601
| 0
| 0
|
Apache-2.0
| 2020-12-29T12:38:30
| 2020-12-28T18:13:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base loader generator."""
from abc import abstractmethod
MAX_DATA_LOADER_SIZE = 15
class LoaderGenerator:
"""Base loader generator for loader generators."""
@abstractmethod
def generate_loaders(self, loader_pool):
"""
Abstract method for generating loaders.
Args:
loader_pool (dict[str, LoaderStruct]): Current loader pool in data_manager.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
@abstractmethod
def check_train_job_exist(self, train_id):
"""
Abstract method for checking if train job exists.
Args:
train_id (str): Train ID.
Returns:
bool, if train job exists, return True.
"""
@abstractmethod
def generate_loader_by_train_id(self, train_id):
"""
Abstract method for generating loader by train id.
Args:
train_id (str): Train ID.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
|
[
"gaocongli@huawei.com"
] |
gaocongli@huawei.com
|
520660869fff6f627576c21a06b8c5e65de775b0
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/leap/acc42607206743369b303b27783bb589.py
|
ef6fcb7229e7a8455b6ddfa3d7f2e026020e8853
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
#leap year exercise
def is_leap_year(year):
if year % 400 == 0:
return True
elif year % 100 == 0:
return False
elif year % 4 == 0:
return True
else:
return False
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
d52cdf7164481357e9cc6e6bcf6c8d7b3ad73a73
|
17265d8af472ffd4dc629145f85497ce89dc3139
|
/JaPanda/asgi.py
|
90a3f1cd809c42ed4e1c04758670d9cb3e361280
|
[] |
no_license
|
Tomo-zhiye/JaPanda
|
88eb69329d4432b4dd5d028b006f632bafff136c
|
0391140563b2c324738335b6b82018f08f596dfe
|
refs/heads/master
| 2023-04-21T19:41:35.344682
| 2021-04-29T08:14:20
| 2021-04-29T08:14:20
| 362,740,629
| 0
| 1
| null | 2021-04-30T12:46:48
| 2021-04-29T08:12:57
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
ASGI config for JaPanda project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'JaPanda.settings')
application = get_asgi_application()
|
[
"you@example.com"
] |
you@example.com
|
e9b50774cd67f89ff82ec7dfb29f5273a1cbcc3d
|
8e542d1217ba58c5e04866b9a34cc0c0306701bb
|
/backend/users/api/v1/urls.py
|
5554d6d5775e2f23807cf3a0ef413a5c40d7ec57
|
[] |
no_license
|
crowdbotics-apps/mobile-11-aug-dev-8774
|
6e3d2d3faa372dbad3d18924f3c37640715a2184
|
37669c511daa89cb38804a125156531f9b798d84
|
refs/heads/master
| 2023-07-06T02:47:39.279077
| 2020-08-11T11:33:03
| 2020-08-11T11:33:03
| 286,648,832
| 0
| 0
| null | 2021-08-03T20:01:40
| 2020-08-11T04:53:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import HfkhfjfhkViewSet
router = DefaultRouter()
router.register("hfkhfjfhk", HfkhfjfhkViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
05d05ed538bd5f49ce99838e53ce9c249b735d0d
|
6bdad555fd073e8b1c73e373782249c364a0b5bd
|
/quotes/migrations/0017_auto_20170803_0027.py
|
20fa87cd87f26592cfdceb32a2102aaf0d656be7
|
[] |
no_license
|
h4k1m0u/gistutorials
|
35383f5e1197553c528bc75405094118815e72fd
|
795da31428a469becb948deefe57c725116266be
|
refs/heads/master
| 2023-01-05T20:38:39.552302
| 2019-04-29T01:03:34
| 2019-04-29T01:03:34
| 309,973,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-03 00:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quotes', '0016_auto_20170325_0158'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='user',
),
migrations.RemoveField(
model_name='quote',
name='member',
),
migrations.DeleteModel(
name='Member',
),
]
|
[
"h.benoudjit@gmail.com"
] |
h.benoudjit@gmail.com
|
b048c82de8d6c4b233cee4aa77e581fe72753f18
|
f889bc01147869459c0a516382e7b95221295a7b
|
/test/test_quote_data_shipping_assignment_extension_interface.py
|
b82703cfb180e7b13b38325c6450a7fa4026bb1f
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.quote_data_shipping_assignment_extension_interface import QuoteDataShippingAssignmentExtensionInterface
class TestQuoteDataShippingAssignmentExtensionInterface(unittest.TestCase):
""" QuoteDataShippingAssignmentExtensionInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testQuoteDataShippingAssignmentExtensionInterface(self):
"""
Test QuoteDataShippingAssignmentExtensionInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.quote_data_shipping_assignment_extension_interface.QuoteDataShippingAssignmentExtensionInterface()
pass
if __name__ == '__main__':
unittest.main()
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
2f571add472e396515f178e0a28ad895ec6c58e3
|
24d33b98fb16cace92989a414a9121e505f47d68
|
/gromacs/helper/create_request.py
|
4813a9e35308d26978a07a3d83ee1c748483fa27
|
[
"Apache-2.0"
] |
permissive
|
michellab/BioSimSpaceCloud
|
eae9b2aff3184f097ffa667e987d7dd9a99c40e0
|
456b146a2131565e354352872d3e75a08c3652d1
|
refs/heads/master
| 2022-11-22T22:58:45.794442
| 2018-11-06T12:30:40
| 2018-11-06T12:30:40
| 137,510,733
| 2
| 1
|
Apache-2.0
| 2022-11-16T01:28:44
| 2018-06-15T16:38:06
|
Python
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
"""
This script helps debugging by creating the login and request
from the .oci/config and hard-coded compartment and bucket names
"""
import oci
import json
import os
import sys
config = oci.config.from_file()
compartment = "ocid1.compartment.oc1..aaaaaaaat33j7w74mdyjenwoinyeawztxe7ri6qkfbm5oihqb5zteamvbpzq"
bucket = "test-gromacs-bucket"
key_lines = open(os.path.expanduser(config["key_file"]), "r").readlines()
del config["key_file"]
config["key_lines"] = key_lines
data = {}
data["login"] = config
data["compartment"] = compartment
data["bucket"] = bucket
try:
data["task"] = sys.argv[1]
except:
pass
print(json.dumps(data))
|
[
"chryswoods@gmail.com"
] |
chryswoods@gmail.com
|
65d06f0ba3e1e4830c7736caf8f8c72d0924672f
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0651_0700/LeetCode661_ImageSmoother.py
|
ea380490539357f477d176685b5386c8a0c01c7b
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
'''
Created on Oct 8, 2017
@author: MT
'''
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
import math
matrix = M
if not matrix or not matrix[0]:
return []
m, n = len(matrix), len(matrix[0])
res = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
count = float(matrix[i][j])
num = 1.0
for x, y in (i+1, j), (i-1, j), (i, j+1), (i, j-1),\
(i+1, j+1), (i-1, j-1), (i+1, j-1), (i-1, j+1):
if 0 <= x < m and 0 <= y < n:
if matrix[x][y] != 0:
count += float(matrix[x][y])
num += 1
tmp = int(math.floor(count/num))
res[i][j] = tmp
return res
def test(self):
testCases = [
[
[2,3],
],
[
[1,1,1],
[1,0,1],
[1,1,1],
],
[
[2, 3, 4],
[5, 6, 7],
[8, 9, 10],
[11,12,13],
[14,15,16],
]
]
for matrix in testCases:
print('matrix:')
print('\n'.join([str(row) for row in matrix]))
result = self.imageSmoother(matrix)
print('result:')
print('\n'.join([str(row) for row in result]))
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
c1c62be621c780e8e93f7cadb39985bb6590d277
|
15b53554ef4f9418e2aaffe663789c3a86dfc269
|
/ratings/admin.py
|
99a24a50c982cc72615f41ccd703d2258670666f
|
[
"MIT"
] |
permissive
|
felkiriinya/Awards
|
698c94c96f18c2dd19056adf9b94a2aaf6fc0f0e
|
2f9aa725eafde02c648281c97635ac7242b05d2f
|
refs/heads/master
| 2023-01-23T20:59:26.081077
| 2020-11-30T14:00:02
| 2020-11-30T14:00:02
| 316,494,399
| 0
| 0
|
MIT
| 2020-11-27T17:54:14
| 2020-11-27T12:22:54
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
from django.contrib import admin
from .models import Project,Profile
# Register your models here.
admin.site.register(Profile)
admin.site.register(Project)
|
[
"felkiriinya@gmail.com"
] |
felkiriinya@gmail.com
|
dcd5f09d699ae8005ad13ed0471cc5fc9cd3ad98
|
7791cae9c097ffbfeefd34dea31586b1963eb675
|
/0x0A-python-inheritance/0-lookup.py
|
569a633018f8263664a1cd264b61d7d4a4a2f514
|
[] |
no_license
|
jicruz96/holbertonschool-higher_level_programming
|
a33b6c326e832868be791cad87ac703cccbabd64
|
84361c552b7ba1cb173d1e4bd2ea077bb3999b0d
|
refs/heads/master
| 2022-12-18T06:07:34.248796
| 2020-09-28T22:33:59
| 2020-09-28T22:33:59
| 259,228,049
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
#!/usr/bin/python3
""" defines lookup """
def lookup(obj):
""" returns list of available attributes and methods of an object """
return list(dir(obj))
|
[
"accounts@jicruz.com"
] |
accounts@jicruz.com
|
c0776c2b71dbc2bdb19e3384ee68199840921c0c
|
b3d552675b36cb88a1388fcfc531e497ad7cbee9
|
/day6/view_method_decorator_demo/front/views.py
|
1ec7acae722b4aed907b645bc8aa11d6e724563a
|
[] |
no_license
|
gaohj/1902_django
|
3cea1f0935fd983f25c6fd832b103ac5165a2e30
|
822af7b42120c6edc699bf97c800887ff84f5621
|
refs/heads/master
| 2022-12-11T10:02:50.233398
| 2019-11-26T08:33:38
| 2019-11-26T08:33:38
| 209,241,390
| 2
| 0
| null | 2022-12-08T07:28:24
| 2019-09-18T07:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
from django.shortcuts import render
from .models import Article
from django.views.decorators.http import require_http_methods,require_GET,require_POST,require_safe
from django.http import HttpResponse
# Create your views here.
# @require_http_methods(['GET'])
#@require_GET = @require_http_methods(['GET'])
@require_GET
def index(request):
articles = Article.objects.all()
return render(request,'index.html',context={"articles":articles})
@require_http_methods(['GET','POST'])
def add_article(request):
if request.method == 'GET':
return render(request,'add_article.html')
else:
title = request.POST.get('title')
content = request.POST.get('content')
price = request.POST.get('price')
Article.objects.create(title=title,content=content,price=price)
return HttpResponse("success")
@require_safe
def hello(request):
return HttpResponse("我只允许相对安全的请求方式来访问视图")
#get head 这两个都是进行查看 并没有增删改的操作
|
[
"gaohj@126.com"
] |
gaohj@126.com
|
9983fb155c2903b301cad1850061e7032e0161b4
|
8eab8ab725c2132bb8d090cdb2d23a5f71945249
|
/virt/Lib/site-packages/comtypes/test/test_QueryService.py
|
d3c01ad2e505a3952d4b4fe8e118d3bf5894e9af
|
[
"MIT"
] |
permissive
|
JoaoSevergnini/metalpy
|
6c88a413a82bc25edd9308b8490a76fae8dd76ca
|
c2d0098a309b6ce8c756ff840bfb53fb291747b6
|
refs/heads/main
| 2023-04-18T17:25:26.474485
| 2022-09-18T20:44:45
| 2022-09-18T20:44:45
| 474,773,752
| 3
| 1
|
MIT
| 2022-11-03T20:07:50
| 2022-03-27T22:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
import unittest
from ctypes import POINTER
import comtypes
from comtypes.client import CreateObject, GetModule
GetModule('oleacc.dll')
from comtypes.gen.Accessibility import IAccessible
@unittest.skip("This IE test is not working. We need to move it to using some other win32 API.")
class TestCase(unittest.TestCase):
def setUp(self):
self.ie = CreateObject('InternetExplorer.application')
def tearDown(self):
self.ie.Quit()
del self.ie
def test(self):
ie = self.ie
ie.navigate2("about:blank", 0)
sp = ie.Document.Body.QueryInterface(comtypes.IServiceProvider)
pacc = sp.QueryService(IAccessible._iid_, IAccessible)
self.assertEqual(type(pacc), POINTER(IAccessible))
if __name__ == "__main__":
unittest.main()
|
[
"joao.a.severgnini@gmail.com"
] |
joao.a.severgnini@gmail.com
|
a703dd3d46d7f867a7b64c23fdeb6357570abaf4
|
ac8e5e7d52bd54f1b3690aac154920e003dbba2e
|
/DA_Faster_ICR_CCR/lib/model/nms/nms_cpu.py
|
0b6ab667b4a17d2522f767cc5f43477ad70043b4
|
[] |
no_license
|
wanghui-cloud/CCR-ICR
|
16b4704cb1eff031825fb3052080fe76597626fd
|
155ff46f12627c84bde3b9c55ab6a5c8e1d1cd11
|
refs/heads/master
| 2023-04-26T07:30:43.384645
| 2021-05-28T12:21:20
| 2021-05-28T12:21:20
| 371,691,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
from __future__ import absolute_import
import numpy as np
import torch
def nms_cpu(dets, thresh):
# dets [12000, 5]= proposals_single, scores_single.squeeze(1)
dets = dets.numpy()
# x1、y1、x2、y2、以及score赋值
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
order = scores.argsort()[::-1] # argsort函数返回的是数组值从小到大的索引值
# 保留的结果框集合
keep = []
while order.size > 0:
# 保留得分最高的一个的索引
i = order.item(0)
keep.append(i) # 将其作为保留的框
# 计算置信度最大的框(order[0])与其它所有的框(order[1:],即第二到最后一个)框的IOU,
xx1 = np.maximum(x1[i], x1[order[1:]]) # 逐位比较取其大者
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.maximum(x2[i], x2[order[1:]])
yy2 = np.maximum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h #计算相交框的面积
# 计算IOU:重叠面积/(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 取出IOU小于阈值的框
# 只有条件 (condition),没有x和y,则输出满足条件 (即非0) 元素的坐标 (等价于numpy.nonzero)
inds = np.where(ovr <= thresh)[0]
# 更新排序序列
order = order[inds + 1]
# 删除IOU大于阈值的框,因为从第二个数开始,当作第一个数,所以需要+1,如[1,2,3,4],将从[2,3,4]开始,
# 若选择第一个数2,下标为0,所以需要+1,才能对应原来数[1,2,3,4],选择为2.
return torch.IntTensor(keep) # 返回索引值
|
[
"wh@hnu.edu.cn"
] |
wh@hnu.edu.cn
|
541f0193507c2dc7add9c3db34f5c64f48609ab5
|
2b6116b967f6b02a6c62392058623ba8824f5ee2
|
/deal/migrations/0037_auto_20190809_0846.py
|
fe6f69d2904c855aac6077b60b714befb45a1654
|
[] |
no_license
|
tayursky/med-crm
|
68a16d771a91a9a5ff3e61acd00c08ad6297c405
|
8e39904968a8217b9cd4593acc3afa27ff4584ba
|
refs/heads/master
| 2023-01-11T08:28:23.762631
| 2020-03-15T20:53:59
| 2020-03-15T20:53:59
| 247,546,343
| 0
| 0
| null | 2023-01-06T02:27:23
| 2020-03-15T20:30:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
# Generated by Django 2.2.1 on 2019-08-09 08:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0005_auto_20190802_0616'),
('deal', '0036_auto_20190801_1443'),
]
operations = [
migrations.AddField(
model_name='historicalservice',
name='default_master',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='company.User', verbose_name='Правщик по умолчанию'),
),
migrations.AddField(
model_name='service',
name='default_master',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='service_default_master', to='company.User', verbose_name='Правщик по умолчанию'),
),
migrations.AlterField(
model_name='service',
name='masters',
field=models.ManyToManyField(blank=True, limit_choices_to={'account__group__in': ['Правщики']}, related_name='service_masters', to='company.User', verbose_name='Правщики'),
),
]
|
[
"tayursky@gmail.com"
] |
tayursky@gmail.com
|
74f9f70c5cab3a26af146ed5bb0ee63971b5fea2
|
5ed2d0e107e4cdcd8129f418fdc40f1f50267514
|
/bnb/PreferenceList/test.py
|
f69a3fdf4ef1e513313bbc0f839736e895ebbe5f
|
[] |
no_license
|
tliu57/Leetcode
|
6cdc3caa460a75c804870f6615653f335fc97de1
|
c480697d174d33219b513a0b670bc82b17c91ce1
|
refs/heads/master
| 2020-05-21T03:14:07.399407
| 2018-07-08T18:50:01
| 2018-07-08T18:50:01
| 31,505,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
class Solution(object):
def preferenceList(self, preferences):
map = {}
degree = {}
result = []
if not preferences:
return result
for pref in preferences:
for elem in pref:
degree[elem] = 0
for pref in preferences:
for i in range(1, len(pref)):
for j in range(i):
if pref[j] not in map:
post_elem_set = set()
post_elem_set.add(pref[i])
map[pref[j]] = post_elem_set
else:
map[pref[j]].add(pref[i])
for key in map:
for elem in map[key]:
degree[elem] += 1
q = []
for c in degree:
if degree[c] == 0:
q.append(c)
while q:
char = q.pop(0)
result.append(char)
if char in map:
for c in map[char]:
degree[c] -= 1
if degree[c] == 0:
q.append(c)
return result
sol = Solution()
preferences = [
[3, 5, 7, 9],
[2, 3, 8],
[5, 8]
]
print sol.preferenceList(preferences)
|
[
"tliu57@asu.edu"
] |
tliu57@asu.edu
|
9873cc835d18dd5753bb7da09d533cd4103af5ed
|
0682b9249e65c3bf0ee70f3c4fe85196562f1a95
|
/tests/core/parameter/test_parameter_node.py
|
3e1da62026cec97c59dd05aa327a0e469348d528
|
[
"Apache-2.0"
] |
permissive
|
perillaroc/takler
|
7039cc3ba1e53be851993820fe8d684f84615fd2
|
654c2224e529c2f7c5fd600ee9272dcc24fd0287
|
refs/heads/master
| 2023-09-02T21:25:34.347695
| 2023-08-25T02:09:10
| 2023-08-25T02:09:10
| 23,487,487
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
import pytest
from takler.core import Parameter
@pytest.fixture
def simple_flow_with_parameter(simple_flow):
flow1 = simple_flow.flow1
flow1.add_parameter("ECF_HOME", "/home/johndoe")
flow1.add_parameter("NODES", 4)
flow1.add_parameter("TIME_INTERVAL", 0.1)
container1 = simple_flow.container1
container1.add_parameter("TASKS", 32)
task1 = simple_flow.task1
task1.add_parameter("FLAG", True)
return simple_flow
def test_add_parameter(simple_flow):
flow1 = simple_flow.flow1
flow1.add_parameter("ECF_HOME", "/home/johndoe")
flow1.add_parameter("NODES", 4)
flow1.add_parameter("TIME_INTERVAL", 0.1)
assert flow1.user_parameters["ECF_HOME"] == Parameter(name="ECF_HOME", value="/home/johndoe")
assert flow1.user_parameters["NODES"] == Parameter(name="NODES", value=4)
assert flow1.user_parameters["TIME_INTERVAL"] == Parameter(name="TIME_INTERVAL", value=0.1)
container1 = simple_flow.container1
container1.add_parameter("TASKS", 32)
assert container1.user_parameters["TASKS"] == Parameter(name="TASKS", value=32)
task1 = simple_flow.task1
task1.add_parameter("FLAG", True)
assert task1.user_parameters["FLAG"] == Parameter(name="FLAG", value=True)
def test_find_parameter(simple_flow_with_parameter):
flow1 = simple_flow_with_parameter.flow1
assert flow1.find_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
assert flow1.find_parameter("NO_EXIST") is None
container1 = simple_flow_with_parameter.container1
assert container1.find_parameter("TASKS") == Parameter("TASKS", 32)
assert container1.find_parameter("NO_EXIST") is None
assert container1.find_parameter("ECF_HOME") is None
task1 = simple_flow_with_parameter.task1
assert task1.find_parameter("FLAG") == Parameter("FLAG", True)
assert task1.find_parameter("NO_EXIST") is None
assert task1.find_parameter("TASKS") is None
assert task1.find_parameter("ECF_HOME") is None
def test_find_parent_parameter(simple_flow_with_parameter):
flow1 = simple_flow_with_parameter.flow1
assert flow1.find_parent_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
assert flow1.find_parent_parameter("NO_EXIST") is None
container1 = simple_flow_with_parameter.container1
assert container1.find_parent_parameter("TASKS") == Parameter("TASKS", 32)
assert container1.find_parent_parameter("NO_EXIST") is None
assert container1.find_parent_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
task1 = simple_flow_with_parameter.task1
assert task1.find_parent_parameter("FLAG") == Parameter("FLAG", True)
assert task1.find_parent_parameter("NO_EXIST") is None
assert task1.find_parent_parameter("TASKS") == Parameter("TASKS", 32)
assert task1.find_parent_parameter("ECF_HOME") == Parameter("ECF_HOME", "/home/johndoe")
|
[
"perillaroc@gmail.com"
] |
perillaroc@gmail.com
|
28dc8edf1052517b8b4c2dd925c7538b887ebf09
|
3958e68814826e7104c4f013ea2aac4e5d77223a
|
/test/test_accuracy_large_files.py
|
c0aa5296d3c686623a79dabbbb057db7662534fd
|
[
"Apache-2.0"
] |
permissive
|
phutares/ocreval
|
2a1c935d36922755e1d89769c45371d56d344c70
|
873a0de5796c0b9ccf07a549afdd30159a9e0b3e
|
refs/heads/master
| 2023-04-19T06:41:15.393399
| 2021-04-16T20:57:09
| 2021-04-16T20:57:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
#!/usr/bin/env python
# -*- encoding: UTF-8 -*-
"""
Tests accuracy on LARGE UTF-8 files.
"""
import random
import subprocess
import tempfile
import os.path as p
# Alias range as xrange in Python 3:
try:
xrange
except NameError:
xrange = range
# Create a Python 2/3 Unicode string literal:
try:
unicode
except NameError:
u = str
else:
u = lambda s: s.decode('UTF-8')
# Path to accuracy program
ACCURACY = p.join(p.dirname(p.dirname(p.realpath(__file__))),
'bin', 'accuracy')
assert p.exists(ACCURACY), 'Could not find ' + ACCURACY
# http://www.languagegeek.com/isolate/haidastory.html
corpus = u('''\
Aaniisuu tangaa g̱aging.ang ’wan suuga. ’Ll xidgwangaas, x̱uyaa’aa. Tllgu
ḵ’aawgyaa hllng.aaygi ’laa ḵyaang.aas. Ḵawdiuu gwaay g̱ud gwaa nang ḵadlaa
ḵ’ayg̱udyaas ’laagu ḵ’aawgaay g̱an ’laa g̱á ’laa xidaas. Á tl’l sg̱aana ḵidaads
’yaahlgaagaas g̱iinuus gangaang ’laagu gud gwii x̱iihlt’ahliyaagaas. Ga
sg̱aanag̱waa g̱ax̱aas ’laa t’isda ḵ’a sḵ’agilaang.aas, tll gwii x̱an, hahl gwii’ad
wah gwii’aa. G̱adagaas gyaanuu’asing g̱aalgaagaang ’wan suuga.
Nang kilsdlaas naag̱ag̱a.aw tadll chi’a’aawaagan. Sing ḵ’alg̱ada ’ll ḵaaxuhls
gyaan ’ll kindagaang.aas. Sda ’laa xid k’udahldats’aasii gyaan gagu ’laa
ḵ’aw’aawaasgu x̱an ’laa ḵ’aawgangas.
''')
dictionary = tuple(word for word in corpus.split())
alphabet = [char for char in corpus if char not in ' \n']
def one_in(n):
return random.choice(xrange(n)) == 1
def change_letter(word):
letter_index = random.choice(xrange(len(word)))
mutation = random.choice(alphabet)
return word[:letter_index] + mutation + word[letter_index + 1:]
if __name__ == '__main__':
import sys
amount_of_words = int(sys.argv[1]) if len(sys.argv) > 1 else 32768
# Create temporary files for each...
with tempfile.NamedTemporaryFile('wb') as correct_file,\
tempfile.NamedTemporaryFile('wb') as generated_file:
# Generate A LOT of random words
for _ in xrange(amount_of_words):
end = b'\n' if one_in(10) else b' '
word = random.choice(dictionary)
correct_file.write(word.encode('UTF-8'))
# Occasionally, typo a word in the generated file.
generated_word = change_letter(word) if one_in(1000) else word
generated_file.write(generated_word.encode('UTF-8'))
# Write a space or newline.
correct_file.write(end)
generated_file.write(end)
# Finish off the file with a new line and flush the output.
if end != b'\n':
correct_file.write(b'\n')
generated_file.write(b'\n')
correct_file.flush()
generated_file.flush()
# This will fail if accuracy itself fails.
subprocess.check_call([ACCURACY,
correct_file.name, generated_file.name])
|
[
"easantos@ualberta.ca"
] |
easantos@ualberta.ca
|
63d0f327304779eea8cc5e57fe84a233c6c7a91a
|
ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3
|
/abc193/b/main.py
|
0e3c7e0f78cd78c0c45811485390821a3ab19d13
|
[] |
no_license
|
kussy-tessy/atcoder
|
5604919747242ee9740b9131bb6e168e96af0151
|
ee917fa5a5218d4a9e72f710d0d844e7c203f13b
|
refs/heads/master
| 2023-07-21T09:25:15.464881
| 2021-09-04T14:06:02
| 2021-09-04T14:06:02
| 311,221,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
#!/usr/bin/env python3
N = int(input())
APXs = []
for _ in range(N):
APXs.append(tuple(map(int,(input().split()))))
ans = float('inf')
for A, P, X in APXs:
if A < X:
ans = min(P, ans)
print(-1 if ans == float('inf') else ans)
|
[
"teppei04285000@gmail.com"
] |
teppei04285000@gmail.com
|
e52d4984fc5ca34ed8bb3b24514f0d20e84ac60e
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/lib-injection/dd-lib-python-init-test-django-uvicorn/django_app.py
|
dc0e14bad8c09edf3e6da8eadcefc9bd8dde30c3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
import os
from django.core.asgi import get_asgi_application
from django.http import HttpResponse
from django.urls import path
filepath, extension = os.path.splitext(__file__)
ROOT_URLCONF = os.path.basename(filepath)
DEBUG = False
SECRET_KEY = "fdsfdasfa"
ALLOWED_HOSTS = ["*"]
def index(request):
return HttpResponse("test")
urlpatterns = [
path("", index),
]
application = get_asgi_application()
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
e3d5ef7c2bf734988331695db571c4c9fba3f293
|
a7744d27342514682189ff5fac7f198b380d1997
|
/robonova/kinematics/kinelib/dh.py
|
83496565a6147db910fb662d666da5fa958f5cd6
|
[] |
no_license
|
roboshepherd/myro-epuck
|
b6469cb2c30b50d625068eb9e306b8ac2cbe4fad
|
564b7f2a7d262b11d8adc86b5f5efb1b825aef53
|
refs/heads/master
| 2020-05-17T00:21:56.339519
| 2010-03-16T16:36:26
| 2010-03-16T16:36:26
| 565,242
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
#DH Matrix representation of manipulator kinematics
#
# The original robot toolbox functions used a DH matrix to describes the
# kinematics of a manipulator in a general way.
#
# For an n-axis manipulator, DH is an nx4 or nx5 matrix, whose rows
# comprise
#
# 1 alpha link twist angle
# 2 A link length
# 3 theta link rotation angle
# 4 D link offset distance
# 5 sigma joint type, 0 for revolute, non-zero for prismatic
#
# If the last column is not given the manipulator is all-revolute.
#
# The first 5 columns of a DYN matrix contain the kinematic parameters
# and maybe used anywhere that a DH kinematic matrix is required -- the
# dynamic data is ignored.
#
# The functionality of the DH matrix has been replaced by the ROBOT object.
#
# See also: ROBOT, DYN.
# MOD.HISTORY
# 1/95 reverse labels on A & D
# $Log: dh.m,v $
# Revision 1.2 2002/04/01 11:47:11 pic
# General cleanup of code: help comments, see also, copyright, remnant dh/dyn
# references, clarification of functions.
#
# $Revision: 1407 $
# Copyright (C) 1993-2002, by Peter I. Corke
|
[
"Mdomarfaruque.Sarker@newport.ac.uk"
] |
Mdomarfaruque.Sarker@newport.ac.uk
|
193ce4e88456d4dcb2d636d9453aa8ed841f6d0e
|
407490cf6e79695276236480bb8c00dd97a6b007
|
/weekly_reports.py
|
2f1432a1d63e51a0d84a222f554eabf428dd6a6d
|
[] |
no_license
|
skreynolds/weekly_reports_dev
|
d17ac47c4642d946d294634152da77defbb2bd85
|
cee840e62f21cdc69f6320f2ec0dc538743a1940
|
refs/heads/master
| 2020-04-06T16:43:45.733017
| 2018-11-16T02:27:27
| 2018-11-16T02:27:27
| 157,631,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,769
|
py
|
#!/usr/bin/python
import psycopg2
from utils.config import config
from utils.file_conversion import *
from xref.tables import *
from xref.fixed_widths import *
# Specify the current year the routine is running
DATE = 2018
if __name__ == '__main__':
#############################################################
# RUN VALIDATION TO ENSURE SUCCESSFUL EXECUTION
#############################################################
# SECTION OF CODE SHOULD CHECK FOR ESSENTIAL FILES AND ESSENTIAL
# CONDITIONS TO ENSURE THAT THE SCRIPT WILL SUCCESSFULLY EXECUTE
# Connect to the PostgreSQL database server
conn = None
try:
#############################################################
# PROCESS FIXED WIDTH FILES OUTPUT FROM CALLISTA
#############################################################
# File path for VET_weekly_AHC_YYYY.txt
file = './data/VET_weekly_AHC_' + str(DATE) + '.txt'
# Execute file conversion script
convert_fw_to_csv_AHC(file, weekly_ahc_slices)
# File path for VET_weekly_AHC_[YYYY-1].txt
file = './data/VET_weekly_AHC_' + str(DATE-1) + '.txt'
# Execute file conversion script
convert_fw_to_csv_AHC(file, weekly_ahc_slices)
# File path for VET_Course_Completions_2018.txt
file = './data/VET_Course_Completions_' + str(DATE) + '.txt'
# Execute file conversion script
convert_fw_to_csv_completions(file, completions_slices)
# File path for VET_2018_Apprentice.txt
file = './data/VET_2018_Apprentice.txt'
# Execute file conversion script
convert_fw_to_csv_apprentices(file, apprentices_slices)
#############################################################
# CONNECT TO THE WEEKLY REPORTS DATABASE
#############################################################
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
#############################################################
# BUILD XLOOKUP
#############################################################
# create all of the xlookup tables
for table_name in xlookup_tables.keys():
# create table
cur.execute(xlookup_tables[table_name])
# define the file path for loading the data
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\xlookup\\' + table_name + '.txt'
# load the data into the xlookup table
copy_sql = """
COPY """ + table_name + """ FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
#############################################################
# BUILD TABLES
#############################################################
# create errors_for_correction_by_vet_teams table
cur.execute(tables['xref_error_for_correction_template'].format('errors_for_correction_by_vet_teams'))
# create errors_for_correction_by_vet_stats_officer table
cur.execute(tables['xref_error_for_correction_template'].format('errors_for_correction_by_vet_stats_officer'))
# create errors_for_correction_by_vet_stats_officer table
cur.execute(tables['xref_error_for_correction_template'].format('errors_for_correction_by_vet_teams_course_intention'))
# create vet_course_completion_YYYY table
cur.execute(tables['xref_vet_course_completions_YYYY_template'].format('vet_course_completions_' + str(DATE)))
# create weekly_current table - (VET_weekly_AHC_YYYY is imported later)
cur.execute(tables['xref_weekly_template'].format('current'))
# create weekly_current table - (VET_weekly_AHC_[YYYY-1] is imported later)
cur.execute(tables['xref_weekly_template'].format(str(DATE-1)))
# create vet_course_completions table - (VET_Course_Completions_YYYY imported later)
cur.execute(tables['xref_course_completions'])
# create vet_apprentices table - (VET_YYYY_Apprentice is imported later)
cur.execute(tables['xref_vet_apprentice_template'])
# create student table
cur.execute(tables['xref_student_template'])
# create student_course_attempt table
cur.execute(tables['xref_student_course_attempt_template'])
# create student_unit_attempt table
cur.execute(tables['xref_student_unit_attempt_template'])
# create activity_pattern_trend table
cur.execute(tables['xref_activity_pattern_trend_template'])
# create unresulted_sua_2017 table
cur.execute(tables['xref_unresulted_sua_template'].format(str(DATE-1)))
# create team_activity table
cur.execute(tables['xref_team_activity_template'])
# create apprentice_sua table
cur.execute(tables['xref_apprentice_sua_template'])
# create apprentice_course table
cur.execute(tables['xref_apprentice_course_template'])
# create xref_vfh_unit_tp table
cur.execute(tables['xref_vfh_unit_tp_template'])
#############################################################
# IMPORT DATA TO TABLES
#############################################################
############################################################
# Import the VET_weekly_AHC_YYYY.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_weekly_AHC_' + str(DATE) + '.csv'
copy_sql = """
COPY weekly_current FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the VET_weekly_AHC_[YYYY-1].csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_weekly_AHC_' + str(DATE-1) + '.csv'
copy_sql = """
COPY weekly_""" + str(DATE-1) + """ FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the VET_Course_Completions_YYYY.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_Course_Completions_' + str(DATE) + '.csv'
copy_sql = """
COPY vet_course_completions FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the VET_2018_Apprentice.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\VET_' + str(DATE) + '_Apprentice.csv'
copy_sql = """
COPY vet_apprentice FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
############################################################
# Import the xref_vfh_unit_tp.csv
csv_path = 'C:\\Users\\sreynolds2\\Documents\\dev\\weekly_reports_dev\\data\\xlookup\\xref_vfh_unit_tp.txt'
copy_sql = """
COPY xref_vfh_unit_tp FROM stdin WITH DELIMITER as ',' CSV QUOTE as '"'
"""
with open(csv_path, 'r') as f:
cur.copy_expert(sql=copy_sql, file=f)
#############################################################
# RUN QUERIES TO BUILD REPORTS AND ERROR TABLES
#############################################################
#############################################################
# EXPORT DATA
#############################################################
#############################################################
# CLOSE THE DATABASE
#############################################################
# close the communication with the PostgreSQL
cur.close()
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
|
[
"shane.k.reynolds@gmail.com"
] |
shane.k.reynolds@gmail.com
|
0b2b34071fffde256b2760d76923b04e0df0a9a3
|
02dc1f70da529c7c2aa45dcfe5e0a3aeeb1f98cc
|
/src/063_unique_paths_ii/063_unique_paths_ii.py
|
d86011baf7cc85f588f4a5609c367411ac6c3ac4
|
[] |
no_license
|
ypliu/leetcode-python
|
2a5a14de6310cae19b9cc42091d81586e697fffb
|
13e61c13c406a73debcfc996937cf16f715d55d1
|
refs/heads/master
| 2020-03-27T06:17:32.303442
| 2019-02-24T14:50:11
| 2019-02-24T14:50:11
| 146,094,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid or 0 == len(obstacleGrid) or 0 == len(obstacleGrid[0]) or 0 != obstacleGrid[0][0]:
return 0
m, n = len(obstacleGrid), len(obstacleGrid[0])
res = [0 for _ in xrange(n)]
for j in xrange(n-1, -1, -1):
if 0 == obstacleGrid[m-1][j]:
res[j] = 1
elif 1 == obstacleGrid[m-1][j]:
break
else:
print 'Ilegal value: %d at (%d, %d).' %(obstacleGrid[m-1][j], (m-1), j)
return
for i in xrange(m-2, -1, -1):
if 1 == obstacleGrid[i][-1]:
res[-1] = 0
for j in xrange(n-2, -1, -1):
val = obstacleGrid[i][j]
if 1 == val:
res[j] = 0
elif 0 == val:
res[j] += res[j+1]
else:
print 'Ilegal value: %d at (%d, %d).' %(val, i, j)
return
return res[0]
# debug
s = Solution()
print s.uniquePathsWithObstacles([ [0,0,0], [0,1,0], [0,0,0] ])
print s.uniquePathsWithObstacles([ [0,0,0], [0,1,0], [0,2,0] ])
|
[
"noreply@github.com"
] |
ypliu.noreply@github.com
|
01555860ba4b5b854365fb301034b8d6369a242b
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/third_party/webrtc/tools/internal_tools.gyp
|
bee8a8bf0e2186703fd1d761f9891e99b7d53ecf
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-webrtc",
"LicenseRef-scancode-takuya-ooura",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MS-LPL",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218
| 2017-04-30T10:59:06
| 2017-04-30T23:35:58
| 89,884,931
| 5
| 3
|
BSD-3-Clause
| 2022-11-23T20:52:53
| 2017-05-01T00:09:08
| null |
UTF-8
|
Python
| false
| false
| 842
|
gyp
|
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This file is used for internal tools used by the WebRTC code only.
{
'includes': [
'../build/common.gypi',
],
'targets': [
{
'target_name': 'command_line_parser',
'type': 'static_library',
'sources': [
'simple_command_line_parser.h',
'simple_command_line_parser.cc',
],
'dependencies': [
'<(webrtc_root)/base/base.gyp:gtest_prod',
],
}, # command_line_parser
],
}
|
[
"enrico.weigelt@gr13.net"
] |
enrico.weigelt@gr13.net
|
21fdc4b381f624fa34e85401ed931bd31dd672a9
|
1e6871bc3bc87b67b1a18a0c69f17d901e1a1a7f
|
/tests/test_api_v1.py
|
285f2da27d336deb7e49dec7ffd00bb72e7ee108
|
[
"Apache-2.0"
] |
permissive
|
fabric8-analytics/fabric8-analytics-jobs
|
24ada2f21c728840df935be792c744839535e094
|
545b932a0eb4acac5f04753010dca446b0425a6a
|
refs/heads/master
| 2023-04-20T19:53:42.321637
| 2023-03-23T12:14:18
| 2023-03-23T12:14:18
| 89,703,953
| 6
| 29
|
Apache-2.0
| 2023-03-23T12:14:20
| 2017-04-28T12:27:05
|
Python
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
"""Tests for the module 'api_v1'."""
# TODO enable when new test(s) will be added
# from f8a_jobs.api_v1 import *
class TestApiV1Functions(object):
"""Tests for the module 'api_v1'."""
def setup_method(self, method):
"""Set up any state tied to the execution of the given method in a class."""
assert method
def teardown_method(self, method):
"""Teardown any state that was previously setup with a setup_method call."""
assert method
|
[
"ptisnovs@redhat.com"
] |
ptisnovs@redhat.com
|
b56c74b0a911abe2b46939ff7fcbdd05407bb9e3
|
00414b9d72c922b873cc2ebcb4d1ce068de5007f
|
/src/backend/partaj/core/migrations/0016_add_referral_activity.py
|
d7cf100798aec83db3fde67ef48c14dd605cf6a7
|
[
"MIT"
] |
permissive
|
MTES-MCT/partaj
|
1de9691dc6e7615c1d228a0e39c9208b97222dab
|
22e4afa728a851bb4c2479fbb6f5944a75984b9b
|
refs/heads/main
| 2023-08-07T08:22:30.290701
| 2023-08-04T16:57:38
| 2023-08-04T17:22:26
| 237,007,942
| 4
| 3
|
MIT
| 2023-09-14T19:10:26
| 2020-01-29T14:54:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
# Generated by Django 3.0.5 on 2020-05-20 13:05
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0015_add_referral_answer"),
]
operations = [
migrations.CreateModel(
name="ReferralActivity",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
help_text="Primary key for the referral activity as UUID",
primary_key=True,
serialize=False,
verbose_name="id",
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="created at"),
),
(
"verb",
models.CharField(
choices=[
("assigned", "assigned"),
("answered", "answered"),
("created", "created"),
],
help_text="Verb expressing the action this activity represents",
max_length=50,
verbose_name="verb",
),
),
(
"item_object_id",
models.CharField(
blank=True,
help_text="ID of the linked item",
max_length=255,
verbose_name="item object id",
),
),
(
"actor",
models.ForeignKey(
blank=True,
help_text="User who generated this activity",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="referral_activity",
related_query_name="referral_activity",
to=settings.AUTH_USER_MODEL,
verbose_name="actor",
),
),
(
"item_content_type",
models.ForeignKey(
blank=True,
help_text="Model for the linked item",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
verbose_name="item content type",
),
),
(
"referral",
models.ForeignKey(
help_text="Referral on which the activity took place",
on_delete=django.db.models.deletion.CASCADE,
related_name="activity",
related_query_name="activity",
to="core.Referral",
verbose_name="referral",
),
),
],
),
]
|
[
"me@mbenadda.com"
] |
me@mbenadda.com
|
334590c3e97d988d696edd6ac40aef16b17a0f72
|
29a04fe1221acab7b3d799a4c25db8fd8c141995
|
/reagent/training/world_model/compress_model_trainer.py
|
7adc1b03ff2630541e6440d402d5e0bc505b1449
|
[
"BSD-3-Clause"
] |
permissive
|
jaynotleno/ReAgent
|
fb588656890ac9d2b19618528ae21bb750a6eaa6
|
acb98f8de7a5604487cd921545b631fdd2541021
|
refs/heads/master
| 2023-04-18T05:45:24.823728
| 2021-04-22T06:35:01
| 2021-04-22T06:35:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import Seq2RewardTrainerParameters
from reagent.models.fully_connected_network import FullyConnectedNetwork
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.training.reagent_lightning_module import ReAgentLightningModule
from reagent.training.utils import gen_permutations
from reagent.training.world_model.seq2reward_trainer import get_Q
logger = logging.getLogger(__name__)
class CompressModelTrainer(ReAgentLightningModule):
""" Trainer for fitting Seq2Reward planning outcomes to a neural network-based policy """
def __init__(
self,
compress_model_network: FullyConnectedNetwork,
seq2reward_network: Seq2RewardNetwork,
params: Seq2RewardTrainerParameters,
):
super().__init__()
self.compress_model_network = compress_model_network
self.seq2reward_network = seq2reward_network
self.params = params
# permutations used to do planning
self.all_permut = gen_permutations(
params.multi_steps, len(self.params.action_names)
)
def configure_optimizers(self):
optimizers = []
optimizers.append(
torch.optim.Adam(
self.compress_model_network.parameters(),
lr=self.params.compress_model_learning_rate,
)
)
return optimizers
def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int):
loss, accuracy = self.get_loss(training_batch)
detached_loss = loss.cpu().detach().item()
accuracy = accuracy.item()
logger.info(
f"Seq2Reward Compress trainer MSE/Accuracy: {detached_loss}, {accuracy}"
)
self.reporter.log(mse_loss=detached_loss, accuracy=accuracy)
yield loss
# pyre-ignore inconsistent override because lightning doesn't use types
def validation_step(self, batch: rlt.MemoryNetworkInput, batch_idx: int):
mse, acc = self.get_loss(batch)
detached_loss = mse.cpu().detach().item()
acc = acc.item()
state_first_step = batch.state.float_features[0]
# shape: batch_size, action_dim
q_values_all_action_all_data = (
self.compress_model_network(state_first_step).cpu().detach()
)
q_values = q_values_all_action_all_data.mean(0).tolist()
action_distribution = torch.bincount(
torch.argmax(q_values_all_action_all_data, dim=1),
minlength=len(self.params.action_names),
)
# normalize
action_distribution = (
action_distribution.float() / torch.sum(action_distribution)
).tolist()
self.reporter.log(
eval_mse_loss=detached_loss,
eval_accuracy=acc,
eval_q_values=[q_values],
eval_action_distribution=[action_distribution],
)
return (detached_loss, q_values, action_distribution, acc)
def get_loss(self, batch: rlt.MemoryNetworkInput):
# shape: batch_size, num_action
compress_model_output = self.compress_model_network(
batch.state.float_features[0]
)
state_first_step = batch.state.float_features[0]
target = get_Q(
self.seq2reward_network,
state_first_step,
self.all_permut,
)
assert (
compress_model_output.size() == target.size()
), f"{compress_model_output.size()}!={target.size()}"
mse = F.mse_loss(compress_model_output, target)
with torch.no_grad():
# pyre-fixme[16]: `Tuple` has no attribute `indices`.
target_action = torch.max(target, dim=1).indices
model_action = torch.max(compress_model_output, dim=1).indices
accuracy = torch.mean((target_action == model_action).float())
return mse, accuracy
def warm_start_components(self):
logger.info("No warm start components yet...")
components = []
return components
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4c07dcbfb8366a5624a377756e0b8e8daa6b8ed4
|
0018a3b0429ce07f026534b61274e8c5fa25a0ef
|
/tests/test_crawler.py
|
2896f6fe151f9a66353f6aa3029c8bac7b4ce43e
|
[] |
no_license
|
carlosmaniero/cptm_analytics
|
ee184bf44efc0170c11261323f8214041d594d53
|
b1e3d1da081e429a48ce9b8fb4e9d56efd1217d5
|
refs/heads/master
| 2021-01-01T04:33:42.996511
| 2016-05-24T20:43:30
| 2016-05-24T20:43:30
| 59,150,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,542
|
py
|
import pytest
from tornado import gen
from crawler.crawler import Crawler
from crawler.data import CrawlerDataControl
from crawler.tasks import CrawlerTasks
from tests import setup_module # NOQA
def test_download_data():
'''
Run a single test downloading the CPTM data and checking the status_code
using the Crawler.download_data().
'''
crawler = Crawler()
response = crawler.download_data()
assert response['status_code'] == 200
def test_parse_content():
'''
Test the Crawler.parse_content() method. This check if all line from the
Crawler.LINES is in parsed_content function return.
'''
crawler = Crawler()
response = crawler.download_data()
parsed = crawler.parse_content(response['content'])
for line in Crawler.LINES:
assert line in parsed
@pytest.mark.gen_test
def test_download_task():
'''
Test the CrawlerTask.task_download_data() this will run the task, and
check if this work comparing if the total of responses of the database is
increased before 3 seconds.
'''
tasks = CrawlerTasks()
data = CrawlerDataControl()
# Check for downloaded data
total_downloaded = yield data.count_response()
# Running downloads task
tasks.task_download_data()
# Wait for 2 seconds
yield gen.sleep(2)
# Check fot downloaded date
new_total = yield data.count_response()
# Assert if the crawler works
assert new_total > total_downloaded
@pytest.mark.gen_test
def test_process_task():
'''
This test will call the CrawlerTasks.task_process_data() and will compare
if the response collection will be decreased and the processed collection
are increased.
'''
tasks = CrawlerTasks()
data = CrawlerDataControl()
# Check for downloaded data
total_downloaded = yield data.count_response()
# Check if no responses found
if total_downloaded == 0:
# Running downloads task
task_download = tasks.task_download_data()
total = 0
while total_downloaded == 0:
total += 1
total_downloaded = yield data.count_response()
# Wait for 3 seconds to get a response
# If this fails check for CPTM conection
assert total <= 3
yield gen.sleep(1)
# Stop the task_download
task_download.cancel()
total_downloaded = yield data.count_response()
# Check total processed in the database
total_processed = yield data.count_processed()
# Start processing task
task_process = tasks.task_process_data()
total = 3
new_total_downloaded = total_downloaded
while new_total_downloaded == total_downloaded:
# Wait 3 seconds to process the response
yield gen.sleep(1)
new_total_downloaded = yield data.count_response()
if total_downloaded == new_total_downloaded:
total -= 1
assert total > 0
else:
# Stop task_process
task_process.cancel()
# Check the total in downloaded queue
new_total_downloaded = yield data.count_response()
# Check the total processed
new_total_processed = yield data.count_processed()
# Calculate the total removed from the downloaded queue
processed = total_downloaded - new_total_downloaded
# Check if the total processed is increased
# in the processed collection
assert total_processed == new_total_processed - processed
|
[
"carlosmaniero@gmail.com"
] |
carlosmaniero@gmail.com
|
18421b654dce2b71a36d0440ba6ced2729a412e6
|
659a8da3331f50e91578d48d47210abd5e88364b
|
/samples/BulkAQ.py
|
bb86c79f88772cb5139d6a726ba12381ac06b694
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ramboma/python-cx_Oracle
|
e15a073848ca273cf2d36c0be3ed2da30681a8cf
|
3ab21c944705802b7433a5e3916143fe10af2b3f
|
refs/heads/master
| 2020-05-23T23:57:16.166097
| 2019-05-03T21:49:57
| 2019-05-03T21:49:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# BulkAQ.py
# This script demonstrates how to use bulk enqueuing and dequeuing of
# messages with advanced queuing using cx_Oracle. It makes use of a RAW queue
# created in the sample setup.
#
# This script requires cx_Oracle 7.2 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import SampleEnv
QUEUE_NAME = "DEMORAW"
PAYLOAD_DATA = [
"The first message",
"The second message",
"The third message",
"The fourth message",
"The fifth message",
"The sixth message",
"The seventh message",
"The eighth message",
"The ninth message",
"The tenth message",
"The eleventh message",
"The twelfth and final message"
]
# connect to database
connection = cx_Oracle.connect(SampleEnv.GetMainConnectString())
cursor = connection.cursor()
# create queue
queue = connection.queue(QUEUE_NAME)
queue.deqOptions.wait = cx_Oracle.DEQ_NO_WAIT
queue.deqOptions.navigation = cx_Oracle.DEQ_FIRST_MSG
# dequeue all existing messages to ensure the queue is empty, just so that
# the results are consistent
while queue.deqOne():
pass
# enqueue a few messages
print("Enqueuing messages...")
batchSize = 6
dataToEnq = PAYLOAD_DATA
while dataToEnq:
batchData = dataToEnq[:batchSize]
dataToEnq = dataToEnq[batchSize:]
messages = [connection.msgproperties(payload=d) for d in batchData]
for data in batchData:
print(data)
queue.enqMany(messages)
connection.commit()
# dequeue the messages
print("\nDequeuing messages...")
batchSize = 8
while True:
messages = queue.deqMany(batchSize)
if not messages:
break
for props in messages:
print(props.payload.decode())
connection.commit()
print("\nDone.")
|
[
"anthony.tuininga@oracle.com"
] |
anthony.tuininga@oracle.com
|
7f80ab0319cfb31d50e0c4989e81ffc1f9d987c3
|
5a07e1afa5d172dcd4288f12636edd9c53148073
|
/tests/test_calculator.py
|
97b5fa4e3d9da0bf9e1d033cbced933a20b95265
|
[
"Apache-2.0"
] |
permissive
|
scikit-hep/pyhf
|
3df3f9b12d1b362919629275b8746060833713f3
|
205eecfb0b57591eb6b70e98b01511797340a0c7
|
refs/heads/main
| 2023-09-02T18:50:35.990103
| 2023-08-31T00:10:41
| 2023-08-31T00:10:41
| 118,789,569
| 246
| 82
|
Apache-2.0
| 2023-09-13T21:57:02
| 2018-01-24T16:14:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,471
|
py
|
import pytest
import pyhf
import pyhf.infer.calculators
def test_calc_dist():
asymptotic_dist = pyhf.infer.calculators.AsymptoticTestStatDistribution(0.0)
assert asymptotic_dist.pvalue(-1) == 1 - asymptotic_dist.cdf(-1)
@pytest.mark.parametrize("return_fitted_pars", [False, True])
def test_generate_asimov_can_return_fitted_pars(return_fitted_pars):
model = pyhf.simplemodels.uncorrelated_background([1, 1], [1, 1], [1, 1])
data = [2, 2, 1, 1] # [main x 2, aux x 2]
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
fixed_params = model.config.suggested_fixed()
result = pyhf.infer.calculators.generate_asimov_data(
1.0,
data,
model,
init_pars,
par_bounds,
fixed_params,
return_fitted_pars=return_fitted_pars,
)
if return_fitted_pars:
assert len(result) == 2
result, asimov_pars = result
assert pytest.approx([1.0, 1.0, 1.0]) == pyhf.tensorlib.tolist(asimov_pars)
assert pytest.approx([2.0, 2.0, 1.0, 1.0]) == pyhf.tensorlib.tolist(result)
# test different test stats because those affect the control flow
# in AsymptotiCalculator.teststatistic, where the fit results should be set
# the other kwargs don't impact the logic of that method,
# so leave them at the default so as not to put a burden on future changes
@pytest.mark.parametrize('test_stat', ['qtilde', 'q', 'q0'])
def test_asymptotic_calculator_has_fitted_pars(test_stat):
model = pyhf.simplemodels.uncorrelated_background([1], [1], [1])
data = [2, 1] # [main, aux]
calc = pyhf.infer.calculators.AsymptoticCalculator(data, model, test_stat=test_stat)
calc.teststatistic(0 if test_stat == 'q0' else 1)
assert hasattr(calc, 'fitted_pars')
fitted_pars = calc.fitted_pars
assert hasattr(fitted_pars, 'asimov_pars')
assert hasattr(fitted_pars, 'fixed_poi_fit_to_data')
assert hasattr(fitted_pars, 'fixed_poi_fit_to_asimov')
assert hasattr(fitted_pars, 'free_fit_to_data')
assert hasattr(fitted_pars, 'free_fit_to_asimov')
rtol = 1e-5
if test_stat == 'q0':
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.asimov_pars
)
assert pytest.approx([0.0, 1.5], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_data
)
assert pytest.approx([0.0, 1.5], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_asimov
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.free_fit_to_data
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.free_fit_to_asimov
)
else:
assert pytest.approx([0.0, 1.5], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.asimov_pars
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_data
)
assert pytest.approx([1.0, 1.1513553], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.fixed_poi_fit_to_asimov
)
assert pytest.approx([1.0, 1.0], rel=rtol) == pyhf.tensorlib.tolist(
fitted_pars.free_fit_to_data
)
assert pytest.approx(
[7.6470499e-05, 1.4997178], rel=rtol
) == pyhf.tensorlib.tolist(fitted_pars.free_fit_to_asimov)
|
[
"noreply@github.com"
] |
scikit-hep.noreply@github.com
|
1210fa97dd02d25bcf6f192141e2e585187fb4d8
|
d31d744f62c09cb298022f42bcaf9de03ad9791c
|
/runtime/mlir_tests/lit.cfg.py
|
d3cc5aa1abc19890ca261746033475c5e3deb1c0
|
[
"Apache-2.0"
] |
permissive
|
yuhuofei/TensorFlow-1
|
b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0
|
36eb6994d36674604973a06159e73187087f51c6
|
refs/heads/master
| 2023-02-22T13:57:28.886086
| 2021-01-26T14:18:18
| 2021-01-26T14:18:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
# Copyright 2020 The TensorFlow Runtime Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# pylint: disable=undefined-variable
# name: The name of this test suite.
config.name = 'TFRT'
# test_format: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.mlir']
# test_source_root: The root path where tests are located.
config.test_source_root = config.tfrt_test_dir
# test_exec_root: The root path where tests should be run.
config.test_exec_root = config.runfile_srcdir
llvm_config.use_default_substitutions()
llvm_config.config.substitutions.append(
('%tfrt_bindir', 'tensorflow/compiler/aot'))
tool_dirs = config.tfrt_tools_dirs + [config.llvm_tools_dir]
tool_names = [
'bef_executor', 'bef_name', 'tfrt_translate', 'tfrt_opt',
'tfrt_gpu_translate', 'tfrt_gpu_opt', 'code_size_test_driver'
]
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
llvm_config.add_tool_substitutions(tools, tool_dirs)
# pylint: enable=undefined-variable
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
596451015e496db3243616f6d052da8d175442f8
|
752116ef4b69a3049fef0cfe9b3d212548cc81b1
|
/sources/actions/profile/show.py
|
cd5a597e53b9b334781c5f88d5e5e68611ecb4f7
|
[] |
no_license
|
VDOMBoxGroup/runtime2.0
|
e54af4af7a642f34b0e07b5d4096320494fb9ae8
|
cb9932f5f75d5c6d7889f26d58aee079b4127299
|
refs/heads/develop
| 2023-07-07T11:06:10.817093
| 2023-07-03T06:11:55
| 2023-07-03T06:11:55
| 62,622,255
| 0
| 12
| null | 2023-05-23T02:55:00
| 2016-07-05T09:09:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,565
|
py
|
import pstats
import re
from itertools import izip
import settings
import managers
import file_access
from utils.console import CONSOLE_WIDTH
from utils.tracing import BINARY_ALIAS, SERVER_ALIAS, TYPES_ALIAS, APPLICATIONS_ALIAS, format_source_point
from utils.auxiliary import fit, fill
from ..auxiliary import section, show, warn
LOCATION_WIDTH = 99
CALLS_WIDTH = 9
TIME_WIDTH = 11
COLUMNS = (
(-LOCATION_WIDTH, "name", "%*s"),
(CALLS_WIDTH, "calls", "%*d"),
(TIME_WIDTH, "total", "%*.4f"),
(TIME_WIDTH, "cumulative", "%*.4f")
)
SEPARATOR = " "
FILLER = "-"
SORT_BY_NAME = "SORT BY NAME"
SORT_BY_CALLS = "SORT BY CALLS"
SORT_BY_TOTAL = "SORT BY TOTAL"
SORT_BY_CUMULATIVE = "SORT BY CUMULATIVE"
SORT_VALUES = {
"n": SORT_BY_NAME,
"name": SORT_BY_NAME,
"ca": SORT_BY_CALLS,
"calls": SORT_BY_CALLS,
"t": SORT_BY_TOTAL,
"total": SORT_BY_TOTAL,
"cu": SORT_BY_CUMULATIVE,
"cumulative": SORT_BY_CUMULATIVE
}
ORDER_BY_ASCENDING = "ORDER BY ASCENDING"
ORDER_BY_DESCENDING = "ORDER BY DESCENDING"
ORDER_VALUES = {
"a": ORDER_BY_ASCENDING,
"asc": ORDER_BY_ASCENDING,
"ascending": ORDER_BY_ASCENDING,
"d": ORDER_BY_DESCENDING,
"desc": ORDER_BY_DESCENDING,
"descending": ORDER_BY_DESCENDING
}
SORT_MAPPING = {
SORT_BY_NAME: lambda item: item[0],
SORT_BY_CALLS: lambda item: item[1],
SORT_BY_TOTAL: lambda item: item[2],
SORT_BY_CUMULATIVE: lambda item: item[3]
}
BUILD_IN_PATTERN = re.compile("\<built-in method (?P<name>.+)\>")
METHOD_PATTERN = re.compile("\<method '(?P<name>.+)' of '(?P<class>.+)' objects\>")
def make_name(path, line, function):
if path == "~":
match = METHOD_PATTERN.match(function)
if match:
name = "%s.%s" % (match.group("class"), match.group("name"))
else:
match = BUILD_IN_PATTERN.match(function)
if match:
name = "%s" % match.group("name")
else:
name = function[1:-1]
return fit(name, LOCATION_WIDTH)
else:
return format_source_point(path, line, function, width=LOCATION_WIDTH)
def run(name=None, location=None, headers=False, sort=None, order=None, limit=50, nolimit=False, all=False):
"""
show server last profile statistics: name, calls, total and cumulative times
:arg name: specifies profile name
:arg location: input file location with stored profile statistics
:key switch headers: show columns headers
:key sort: sort entries by "name", by "calls", by "total" or by "cumulative"
:key order: sort entries "asc"ending or "desc"ending
:key switch nolimit: disable output entries limit
:key int limit: limit output to specified number of entries
:key switch all: show all entries including from non-server code
"""
if location is None:
location = settings.PROFILE_FILENAME_TEMPLATE % (name or settings.PROFILE_DEFAULT_NAME)
elif name is not None:
warn("name and location are mutually exclusive options")
return
if not managers.file_manager.exists(file_access.FILE, None, location):
warn("no profile")
return
sort = SORT_VALUES.get((sort or "").lower(), SORT_BY_TOTAL)
if sort is SORT_BY_NAME and order is None:
order = "asc"
order = ORDER_VALUES.get((order or "").lower(), ORDER_BY_DESCENDING)
if nolimit:
limit = None
profile = pstats.Stats(location)
statistics = tuple((make_name(path, line, function), calls, total, cumulative)
for (path, line, function), (calls, stack, total, cumulative, more)
in profile.stats.iteritems())
key = SORT_MAPPING[sort]
reverse = order is ORDER_BY_DESCENDING
entries = sorted(statistics, key=key, reverse=reverse)
with section("statistics", width=CONSOLE_WIDTH):
if headers:
show(SEPARATOR.join("%*s" % (width, label) for width, label, template in COLUMNS))
show(SEPARATOR.join(fill(FILLER, abs(width)) for width, label, template in COLUMNS))
index = 0
for entry in entries:
if not (all
or entry[0].startswith(BINARY_ALIAS)
or entry[0].startswith(TYPES_ALIAS)
or entry[0].startswith(APPLICATIONS_ALIAS)
or entry[0].startswith(SERVER_ALIAS)):
continue
show(SEPARATOR.join(template % (width, value) for value, (width, label, template) in izip(entry, COLUMNS)))
if index == limit:
break
index += 1
|
[
"nikolay.grishkov@vdombox.ru"
] |
nikolay.grishkov@vdombox.ru
|
a9661e7b5c5f4b1de005a245508c6ca122738ecc
|
effeae00f945e10e5c5a52f28d813c0b8b76d569
|
/app/Test/tasks-dev.py
|
e6d6732d024a33c32d90de0a4b00bc618df38dc0
|
[] |
no_license
|
maro99/Celery-tutorials
|
3e4833e44dcf9cee69f07303929cbdafd760234c
|
c554e17d210071168dac06c8798a889048014f9f
|
refs/heads/master
| 2020-03-26T16:15:13.393049
| 2018-08-17T07:55:00
| 2018-08-17T07:55:00
| 145,090,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# Create your tasks here
from __future__ import absolute_import, unicode_literals
import datetime
import time
from celery import shared_task
# from Test.models import Task
@shared_task
def add(x, y):
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
# @shared_task
# def long_task():
# start = datetime.datetime.now()
# task = Task.objects.create(
# start_at=datetime.datetime.now(),
# end_at=datetime.datetime.now(),
# )
# time.sleep(10)
# end = datetime.datetime.now()
# task.start_at=start
# task.end_at=end
# task.save()
|
[
"nadcdc4@gmail.com"
] |
nadcdc4@gmail.com
|
56b13cb050bed40abe9d9b579c1da59cc8dc2c0d
|
4d360320e06339a4f7d2a2723cddf02ff02a306e
|
/0x10-python-network_0/6-peak.py
|
574e06c979ee4c5f8dcc9677cbca278c37e8ad95
|
[] |
no_license
|
AmineNeifer/holbertonschool-higher_level_programming
|
fd6ccdb1b5f0dc85e10750e9f2c7824290697e85
|
f5c42bff003b85a7c19702e0233997645fce2fb1
|
refs/heads/master
| 2020-09-29T02:56:52.286548
| 2020-05-15T00:12:50
| 2020-05-15T00:12:50
| 226,933,206
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
#!/usr/bin/python3
""" funtion find_peak"""
def find_peak(list_of_integers):
""" find a peak in a list of integers
Arguments:
list_of_integers: list of int
Returns:
int or None if list is empty
"""
if list_of_integers == []:
return None
length = len(list_of_integers)
if length == 1:
return list_of_integers[0]
for i in range(length):
current = list_of_integers[i]
n = list_of_integers[i+1]
p = list_of_integers[i-1]
if current >= n and current >= p:
return current
|
[
"amineneifer2000@gmail.com"
] |
amineneifer2000@gmail.com
|
46d57dcd5a3dd2d3ed7f508c20929ed0f84302ea
|
1bd61847ecbaa8394776ebf1c8ccc866d38cf01d
|
/src/gevent/testing/__init__.py
|
98a66a66d387f0b9959edbab873219288c4c6812
|
[
"MIT",
"Python-2.0"
] |
permissive
|
sergkot2020/gevent
|
f9fcbc9a277b063ec024df24cf1e0486a62e59ab
|
b5e9c638d7e1e6bd9b1e354fff1bffbcefdbcecc
|
refs/heads/master
| 2022-02-01T14:56:06.596018
| 2019-04-25T19:34:15
| 2019-04-25T19:34:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,621
|
py
|
# Copyright (c) 2008-2009 AG Projects
# Copyright 2018 gevent community
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
# pylint:disable=unused-import
from .sysinfo import VERBOSE
from .sysinfo import WIN
from .sysinfo import LINUX
from .sysinfo import LIBUV
from .sysinfo import CFFI_BACKEND
from .sysinfo import DEBUG
from .sysinfo import RUN_LEAKCHECKS
from .sysinfo import RUN_COVERAGE
from .sysinfo import PY2
from .sysinfo import PY3
from .sysinfo import PY36
from .sysinfo import PY37
from .sysinfo import PYPY
from .sysinfo import PYPY3
from .sysinfo import CPYTHON
from .sysinfo import PLATFORM_SPECIFIC_SUFFIXES
from .sysinfo import NON_APPLICABLE_SUFFIXES
from .sysinfo import SHARED_OBJECT_EXTENSION
from .sysinfo import RUNNING_ON_TRAVIS
from .sysinfo import RUNNING_ON_APPVEYOR
from .sysinfo import RUNNING_ON_CI
from .sysinfo import RESOLVER_NOT_SYSTEM
from .sysinfo import RESOLVER_DNSPYTHON
from .sysinfo import RESOLVER_ARES
from .sysinfo import EXPECT_POOR_TIMER_RESOLUTION
from .sysinfo import CONN_ABORTED_ERRORS
from .skipping import skipOnWindows
from .skipping import skipOnAppVeyor
from .skipping import skipOnCI
from .skipping import skipOnPyPy3OnCI
from .skipping import skipOnPyPy
from .skipping import skipOnPyPyOnCI
from .skipping import skipOnPyPy3
from .skipping import skipIf
from .skipping import skipUnless
from .skipping import skipOnLibev
from .skipping import skipOnLibuv
from .skipping import skipOnLibuvOnWin
from .skipping import skipOnLibuvOnCI
from .skipping import skipOnLibuvOnCIOnPyPy
from .skipping import skipOnLibuvOnPyPyOnWin
from .skipping import skipOnPurePython
from .skipping import skipWithCExtensions
from .skipping import skipOnLibuvOnTravisOnCPython27
from .skipping import skipOnPy37
from .exception import ExpectedException
from .leakcheck import ignores_leakcheck
from .params import LARGE_TIMEOUT
from .params import DEFAULT_LOCAL_HOST_ADDR
from .params import DEFAULT_LOCAL_HOST_ADDR6
from .params import DEFAULT_BIND_ADDR
from .params import DEFAULT_SOCKET_TIMEOUT
from .params import DEFAULT_XPC_SOCKET_TIMEOUT
main = unittest.main
from .hub import QuietHub
import gevent.hub
gevent.hub.set_default_hub_class(QuietHub)
from .sockets import bind_and_listen
from .sockets import tcp_listener
from .openfiles import get_number_open_files
from .openfiles import get_open_files
from .testcase import TestCase
from .modules import walk_modules
BaseTestCase = unittest.TestCase
from .flaky import reraiseFlakyTestTimeout
from .flaky import reraiseFlakyTestRaceCondition
from .flaky import reraises_flaky_timeout
from .flaky import reraises_flaky_race_condition
def gc_collect_if_needed():
"Collect garbage if necessary for destructors to run"
import gc
if PYPY: # pragma: no cover
gc.collect()
# Our usage of mock should be limited to '@mock.patch()'
# and other things that are easily...mocked...here on Python 2
# when mock is not installed.
try:
from unittest import mock
except ImportError: # Python 2
try:
import mock
except ImportError: # pragma: no cover
# Backport not installed
class mock(object):
@staticmethod
def patch(reason):
return unittest.skip(reason)
mock = mock
# zope.interface
try:
from zope.interface import verify
except ImportError:
class verify(object):
@staticmethod
def verifyObject(*_):
import warnings
warnings.warn("zope.interface is not installed; not verifying")
return
verify = verify
|
[
"jamadden@gmail.com"
] |
jamadden@gmail.com
|
9921d3365e871fe5795b68e176ff4fa124e9586a
|
f6290b7b8ffb263b7f0d252a67e2c6320a4c1143
|
/Array/element_with_leftSmaller_rightGreater.py
|
c79aa40176f43b75abb61162f592c37ce7d7de69
|
[] |
no_license
|
datAnir/GeekForGeeks-Problems
|
b45b0ae80053da8a1b47a2af06e688081574ef80
|
c71f11d0349ed3850dfaa9c7a078ee70f67e46a1
|
refs/heads/master
| 2023-05-29T15:21:59.680793
| 2020-12-15T04:55:01
| 2020-12-15T04:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
'''
https://practice.geeksforgeeks.org/problems/unsorted-array/0
Given an unsorted array of size N. Find the first element in array such that all of its left elements are smaller and all right elements to it are greater than it.
Note: Left and right side elements can be equal to required element. And extreme elements cannot be required element.
Input:
3
4
4 2 5 7
3
11 9 12
6
4 3 2 7 8 9
Output:
5
-1
7
'''
# method - 1
# create left max array and right min storing max/min value before/after current position
# at current position, check (arr[i] >= left_max) and (arr[i] <= right_min)
def getElement(arr, n):
right_min = [float('inf')]*n
for i in range(n-2, -1, -1):
right_min[i] = min(right_min[i+1], arr[i+1])
left_max = float('-inf')
for i in range(1, n-1):
left_max = max(left_max, arr[i-1])
if (arr[i] >= left_max) and (arr[i] <= right_min[i]):
return arr[i]
return -1
# method - 2
def getElement(arr, n):
if n <= 2:
return -1
max_val = arr[0] # left maximum value
element = arr[0] # potential value
idx = -1 # index of potential value
bit = -1 # check whether element is from if condition or else
#check = 0
i = 1
while i < (n-1):
# if current element is less so it is not potential value
if arr[i] < max_val and i < (n-1):
i += 1
bit = 0
else:
# it is potential value
if arr[i] >= max_val:
element = arr[i]
idx = i
max_val = arr[i]
#check = 1
#if check == 1:
i += 1
# update bit state that we found potential value
bit = 1
# process all elements after current element as they are greater
while i < (n-1) and arr[i] >= element:
if arr[i] > max_val:
max_val = arr[i]
i += 1
#check = 0
# it checks element is not from extreme ends and element is not updated after else condition
if element <= arr[n-1] and bit == 1:
return arr[idx]
else:
return -1
t = int(input())
while t:
n = int(input())
arr = list(map(int, input().split()))
ans = getElement(arr, n)
print(ans)
t -= 1
|
[
"komalbansal97@gmail.com"
] |
komalbansal97@gmail.com
|
359ae6f1a016aebd6256a4ea3a9b760efbd8ee4a
|
0487c30d3d2a26ee62eb9e82c1b1e6edb7cb8b36
|
/tests/platform_tests/link_flap/test_link_flap.py
|
97062bc66dc6231cbc6a5ba3a328b1db92594d5b
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
gord1306/sonic-mgmt
|
e4047cbcdb600591816215e765c7f30664cc4543
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
refs/heads/master
| 2022-12-17T08:05:58.944208
| 2022-06-06T02:34:48
| 2022-06-06T02:34:48
| 195,778,851
| 1
| 0
|
NOASSERTION
| 2019-07-08T09:21:07
| 2019-07-08T09:21:07
| null |
UTF-8
|
Python
| false
| false
| 4,285
|
py
|
"""
Tests the link flap in SONiC.
"""
import logging
import pytest
import random
from tests.platform_tests.link_flap.link_flap_utils import toggle_one_link, check_orch_cpu_utilization
from tests.common.platform.device_utils import fanout_switch_port_lookup
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait_until
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.disable_loganalyzer,
pytest.mark.topology('any'),
]
def get_port_list(duthost, tbinfo):
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
return mg_facts["minigraph_ports"].keys()
@pytest.mark.usefixtures("bgp_sessions_config")
@pytest.mark.platform('physical')
def test_link_flap(request, duthosts, rand_one_dut_hostname, tbinfo, fanouthosts, get_loop_times):
"""
Validates that link flap works as expected
"""
duthost = duthosts[rand_one_dut_hostname]
orch_cpu_threshold = request.config.getoption("--orch_cpu_threshold")
# Record memory status at start
memory_output = duthost.shell("show system-memory")["stdout"]
logger.info("Memory Status at start: %s", memory_output)
# Record Redis Memory at start
start_time_redis_memory = duthost.shell("redis-cli info memory | grep used_memory_human | sed -e 's/.*:\(.*\)M/\\1/'")["stdout"]
logger.info("Redis Memory: %s M", start_time_redis_memory)
# Make Sure Orch CPU < orch_cpu_threshold before starting test.
logger.info("Make Sure orchagent CPU utilization is less that %d before link flap", orch_cpu_threshold)
pytest_assert(wait_until(100, 2, 0, check_orch_cpu_utilization, duthost, orch_cpu_threshold),
"Orch CPU utilization {} > orch cpu threshold {} before link flap"
.format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold))
loop_times = get_loop_times
port_lists = get_port_list(duthost, tbinfo)
candidates = []
for port in port_lists:
fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, duthost.hostname, port)
candidates.append((port, fanout, fanout_port))
for loop_time in range(0, loop_times):
watch = False
check_status = False
if loop_time == 0 or loop_time == loop_times - 1:
watch = True
check_status = True
for dut_port, fanout, fanout_port in candidates:
toggle_one_link(duthost, dut_port, fanout, fanout_port, watch=watch, check_status=check_status)
# Record memory status at end
memory_output = duthost.shell("show system-memory")["stdout"]
logger.info("Memory Status at end: %s", memory_output)
# Record orchagent CPU utilization at end
orch_cpu = duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"]
logger.info("Orchagent CPU Util at end: %s", orch_cpu)
# Record Redis Memory at end
end_time_redis_memory = duthost.shell("redis-cli info memory | grep used_memory_human | sed -e 's/.*:\(.*\)M/\\1/'")["stdout"]
logger.info("Redis Memory at start: %s M", start_time_redis_memory)
logger.info("Redis Memory at end: %s M", end_time_redis_memory)
# Calculate diff in Redis memory
incr_redis_memory = float(end_time_redis_memory) - float(start_time_redis_memory)
logger.info("Redis absolute difference: %d", incr_redis_memory)
# Check redis memory only if it is increased else default to pass
if incr_redis_memory > 0.0:
percent_incr_redis_memory = (incr_redis_memory / float(start_time_redis_memory)) * 100
logger.info("Redis Memory percentage Increase: %d", percent_incr_redis_memory)
pytest_assert(percent_incr_redis_memory < 5, "Redis Memory Increase more than expected: {}".format(percent_incr_redis_memory))
# Orchagent CPU should consume < orch_cpu_threshold at last.
logger.info("watch orchagent CPU utilization when it goes below %d", orch_cpu_threshold)
pytest_assert(wait_until(45, 2, 0, check_orch_cpu_utilization, duthost, orch_cpu_threshold),
"Orch CPU utilization {} > orch cpu threshold {} before link flap"
.format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold))
|
[
"noreply@github.com"
] |
gord1306.noreply@github.com
|
dd847827652454aacee8c0f58a0ef8bcb3aff680
|
a8be4698c0a43edc3622837fbe2a98e92680f48a
|
/SSAFY알고리즘정규시간 Problem Solving/10월 Problem Solving/1002/1808지희의고장난계산기.py
|
bfc9012338cbb27387fc9f786b5ac10ed2465b92
|
[] |
no_license
|
blueboy1593/algorithm
|
fa8064241f7738a12b33544413c299e7c1e1a908
|
9d6fdd82b711ba16ad613edcc041cbecadd85e2d
|
refs/heads/master
| 2021-06-23T22:44:06.120932
| 2021-02-21T10:44:16
| 2021-02-21T10:44:16
| 199,543,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
import sys
sys.stdin = open("1808_input.txt", "r")
T = int(input())
def make_number(num, cnt):
cnt += 1
num_save = num
for su in cal_num:
num = 10*num_save + su
if num <= goal:
if DP_list[num] != 0:
return
DP_list[num] = cnt
number.append([num, cnt])
make_number(num, cnt)
else:
return
for tc in range(1, T + 1):
calculator = list(map(int, input().split()))
cal_num = list()
for i in range(10):
if calculator[i] == 1:
cal_num.append(i)
goal = int(input())
DP_list = [0] * (goal + 1)
number = []
for su in cal_num:
cnt = 1
number.append([su, cnt])
if su <= goal:
DP_list[su] = 1
make_number(su, cnt)
number.sort()
def jaegui(i):
for num in number:
new_num = i * num[0]
if new_num > goal:
return
else:
new_cnt = DP_list[i] + num[1] + 1
if DP_list[new_num] != 0:
if new_cnt < DP_list[new_num]:
DP_list[new_num] = new_cnt
jaegui(new_num)
else:
DP_list[new_num] = new_cnt
jaegui(new_num)
for i in range(len(DP_list)):
if DP_list[i] != 0:
jaegui(i)
result = DP_list[-1] + 1
if result == 1:
result = -1
print("#%d %d" %(tc,result))
|
[
"snb0303@naver.com"
] |
snb0303@naver.com
|
f373373fa28c72bb281a07d685aaab9ccc377505
|
52a7b1bb65c7044138cdcbd14f9d1e8f04e52c8a
|
/users/migrations/0002_auto_20210502_1502.py
|
32752b25b8d547043fe70bc289f7cf3142d2620f
|
[] |
no_license
|
rds0751/aboota
|
74f8ab6d0cf69dcb65b0f805a516c5f94eb8eb35
|
2bde69c575d3ea9928373085b7fc5e5b02908374
|
refs/heads/master
| 2023-05-03T00:54:36.421952
| 2021-05-22T15:40:48
| 2021-05-22T15:40:48
| 363,398,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# Generated by Django 2.2.13 on 2021-05-02 15:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='mobile',
field=models.CharField(max_length=255),
),
]
|
[
"you@example.com"
] |
you@example.com
|
99a3aefd5bdd482ec28d142fc733942e1d628904
|
0ae2bb21d7ca71a691e33cb044a0964d380adda2
|
/uber/uber_algo/LC10RegularExpressionMatching2.py
|
5205e9dc7e8b16a2a360ee64bef12bb586e857a0
|
[] |
no_license
|
xwang322/Coding-Interview
|
5d27ec92d6fcbb7b929dd98bb07c968c1e1b2a04
|
ee5beb79038675ce73c6d147ba9249d9a5ca346a
|
refs/heads/master
| 2020-03-10T08:18:34.980557
| 2018-06-24T03:37:12
| 2018-06-24T03:37:12
| 129,282,263
| 2
| 6
| null | 2018-04-19T19:31:24
| 2018-04-12T16:41:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
/*
* 今天上午刚面的,上来简历聊了5分钟,题就一题 李扣的 第石题,字符匹配。做之前和烙印面试官讨论了一下,做的比较顺利,但是写完代码后
* 因为是dp所以烙印完全不懂,我每行都逐一解释了解释了10分钟最后他表示懂了,所有的test case都是过了,
* 他最后结束的时候说出来他想要的答案其实是recursion。刚收到hr邮件说烙印说技术没问题,但是交流很有问题,要求第二个电话面试重点看交流。这就是烙印啊,什么都挑不到刺的时候就拿交流做文章。第二轮如果还是印度人估计结果也一样,dp这种东西如果对方完全不懂电话里不容易解释的。感觉现在的面试因素非常多,真的是不容易啊
**/
class Solution(object):
def isMatch(self, s, p):
dp = [[False for i in range(len(s)+1)] for j in range(len(p)+1)]
dp[0][0] = True
for i in range(len(p)):
if p[i] == '*':
if i == 0:
dp[i][0] = True
elif dp[i-1][0]:
dp[i+1][0] = True
for i in range(len(p)):
for j in range(len(s)):
if p[i] == s[j]:
dp[i+1][j+1] = dp[i][j]
elif p[i] == '.':
dp[i+1][j+1] = dp[i][j]
elif p[i] == '*':
if p[i-1] != '.' and p[i-1] != s[j]:
dp[i+1][j+1] = dp[i-1][j+1]
else:
dp[i+1][j+1] = dp[i-1][j+1] or dp[i][j+1] or dp[i+1][j]
# dp[i+1][j] is for the case '.*' the '*' is for more of '.'
# dp[i][j+1] is for the case p[i-1] == s[j]
# dp[i+1][j] is for the case '.*' the '*' is for zero of '.'
return dp[-1][-1]
|
[
"noreply@github.com"
] |
xwang322.noreply@github.com
|
1a756ce91f5c6230811163082385e3e480af2e06
|
a4a754bb5d2b92707c5b0a7a669246079ab73633
|
/8_kyu/what_is.py
|
1e8e194499803d2fcee8f36249e2f828d1625f87
|
[] |
no_license
|
halfendt/Codewars
|
f6e0d81d9b10eb5bc66615eeae082adb093c09b3
|
8fe4ce76824beece0168eb39776a2f9e078f0785
|
refs/heads/master
| 2023-07-11T13:58:18.069265
| 2021-08-15T18:40:49
| 2021-08-15T18:40:49
| 259,995,259
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
def what_is(x):
"""
How do I compare numbers? Kata
https://www.codewars.com/kata/55d8618adfda93c89600012e
"""
if x == 42:
return 'everything'
elif x == 42 * 42:
return 'everything squared'
else:
return 'nothing'
|
[
"36609861+halfendt@users.noreply.github.com"
] |
36609861+halfendt@users.noreply.github.com
|
f6734e044aee84efb95c26ed091e0a9e9d69b96e
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/E/Emil/spacescrape.py
|
aaafcbe2711f3b1f8244f0d8fd4b747f8c29653a
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,526
|
py
|
#Cant seem to figure out what im doing wrong build this scraper off of Zarino Zappia hyperisland student scraper "https://scraperwiki.com/scrapers/hyper_island_student_profiles/"
#
#
import scraperwiki
import requests
import lxml.html
def scrape_ads():
for i in range(1,10):
r = requests.get('http://airbnb.herokuapp.com/s/Stockholm--Sweden?page=%s' % i) #doubelcheck %s
if r.status_code==200: #what does this mean?
dom = lxml.html.fromstring(r.text)
targetList = dom.cssselect('.search_result')
if len(targetList):
# Great! This page contains people to scrape.
ads = [] #changed from people = []
for results in targetList: #Hey Daniel suspect the problem is somewhere in this loop.
ad = {
'name': get_element_or_none(results, 'a.name'),
'price': get_element_or_none(results, '.price_data'),
'url': get_element_or_none(results, 'a.name', 'href')
}
print ad['name']
# add this person to the list
ads.append(ad)
# we've done all the people on this page… let's save.
print 'saving page %s' % i
scraperwiki.sqlite.save(['url'], ads)
else:
break
# A handy function to get text or attributes out of HTML elements
def get_element_or_none(context, css, attribute=None):
try:
element = context.cssselect(css)[0]
except:
return None
else:
if attribute:
return element.get(attribute)
else:
return element.text_content()
scrape_ads()
#Cant seem to figure out what im doing wrong build this scraper off of Zarino Zappia hyperisland student scraper "https://scraperwiki.com/scrapers/hyper_island_student_profiles/"
#
#
import scraperwiki
import requests
import lxml.html
def scrape_ads():
for i in range(1,10):
r = requests.get('http://airbnb.herokuapp.com/s/Stockholm--Sweden?page=%s' % i) #doubelcheck %s
if r.status_code==200: #what does this mean?
dom = lxml.html.fromstring(r.text)
targetList = dom.cssselect('.search_result')
if len(targetList):
# Great! This page contains people to scrape.
ads = [] #changed from people = []
for results in targetList: #Hey Daniel suspect the problem is somewhere in this loop.
ad = {
'name': get_element_or_none(results, 'a.name'),
'price': get_element_or_none(results, '.price_data'),
'url': get_element_or_none(results, 'a.name', 'href')
}
print ad['name']
# add this person to the list
ads.append(ad)
# we've done all the people on this page… let's save.
print 'saving page %s' % i
scraperwiki.sqlite.save(['url'], ads)
else:
break
# A handy function to get text or attributes out of HTML elements
def get_element_or_none(context, css, attribute=None):
try:
element = context.cssselect(css)[0]
except:
return None
else:
if attribute:
return element.get(attribute)
else:
return element.text_content()
scrape_ads()
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
d8df5fdc4ab28682df8c64911a380763eb8c27e3
|
2656f92d8329bc1b28188802badc7b3a945fa978
|
/src/platform/coldfusion/fingerprints/CF61.py
|
45110b4ed05f857c1ab3ddeced6bc60964d73df5
|
[
"MIT"
] |
permissive
|
koutto/clusterd
|
81828698574bc7301cd4eb0ad87d3115ddf74612
|
93db0a50210dcc6147c3122a539104a36e92f02b
|
refs/heads/master
| 2020-05-03T17:51:55.430955
| 2019-03-31T23:20:22
| 2019-03-31T23:20:22
| 178,751,876
| 2
| 1
|
MIT
| 2019-03-31T23:04:14
| 2019-03-31T23:04:13
| null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from src.platform.coldfusion.interfaces import AdminInterface
class FPrint(AdminInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "6.1"
|
[
"shodivine@gmail.com"
] |
shodivine@gmail.com
|
3ff95127c22e25708b91f49114442b37354fa34b
|
657ed6e579679ba82525f9a4b2021b96e8ea2685
|
/src/domain/issue.py
|
987346d708e1ee87d97961a9fbc318280addb69f
|
[
"MIT"
] |
permissive
|
pgecsenyi/jira-report-generator
|
f2f7c1598ef4e592012f6badab334855383c1a02
|
48d9c7dc8e8bc5e7e9cc69c0c05a644f320c41d2
|
refs/heads/master
| 2022-12-12T18:19:23.829329
| 2019-09-16T18:38:11
| 2019-09-16T18:38:11
| 208,871,013
| 0
| 0
|
MIT
| 2022-12-08T06:11:01
| 2019-09-16T18:34:39
|
Python
|
UTF-8
|
Python
| false
| false
| 440
|
py
|
class Issue:
def __init__(self, key, summary, url, time_data):
self._key = key
self._summary = summary
self._url = url
self._time_data = time_data
@property
def key(self):
return self._key
@property
def summary(self):
return self._summary
@property
def url(self):
return self._url
@property
def time_data(self):
return self._time_data
|
[
"pgecsenyi@protonmail.com"
] |
pgecsenyi@protonmail.com
|
61acfb88100c03ba2e7bd2dbd695b46df3d4e4ee
|
9cdfe7992090fb91696eec8d0a8ae15ee12efffe
|
/dp/prob85.py
|
c8bd310e57fb75fe8dfd3c382c6c5ae90176db5d
|
[] |
no_license
|
binchen15/leet-python
|
e62aab19f0c48fd2f20858a6a0d0508706ae21cc
|
e00cf94c5b86c8cca27e3bee69ad21e727b7679b
|
refs/heads/master
| 2022-09-01T06:56:38.471879
| 2022-08-28T05:15:42
| 2022-08-28T05:15:42
| 243,564,799
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,283
|
py
|
# 85. Maximal Rectangle
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
m = len(matrix)
if not m:
return 0
n = len(matrix[0])
if not n:
return 0
dp1 = [ [0] * n for _ in range(m)] # 1s to the left of matrix[i][j]
dp3 = [ [0] * n for _ in range(m)] # 1s above matrix[i][j]
dp2 = [ [ [0, 0] for _ in range(n)] for _ in range(m)]
for i in range(m):
dp1[i][0] = int(matrix[i][0])
for j in range(n):
dp3[0][j] = int(matrix[0][j])
for i in range(m):
for j in range(1, n):
if matrix[i][j] == '1':
dp1[i][j] = dp1[i][j-1] + 1
for i in range(1, m):
for j in range(n):
if matrix[i][j] == '1':
dp3[i][j] = dp3[i-1][j] + 1
for j in range(n):
if matrix[0][j] == '1':
dp2[0][j] = [dp1[0][j], 1]
for i in range(1, m):
if matrix[i][0] == '1':
dp2[i][0] = [1, dp3[i][0]]
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == '1':
l1 = dp1[i][j]
w3 = dp3[i][j]
if l1 >= w3:
pair = [l1, 1]
else:
pair = [1, w3]
if matrix[i-1][j] == '1':
l2, w = dp2[i-1][j]
tmp = [min(l1, l2), w+1]
if tmp[0] * tmp[1] > pair[0]*pair[1]:
pair = tmp
if matrix[i][j-1] == '1':
l2, w = dp2[i][j-1]
tmp = [l2+1, min(w, w3)]
if tmp[0] * tmp[1] > pair[0]*pair[1]:
pair = tmp
dp2[i][j] = pair
ans = 0
for i in range(m):
for j in range(n):
pix = dp2[i][j]
ans = max(ans, pix[0]*pix[1])
return ans
|
[
"binchen.devops@gmail.com"
] |
binchen.devops@gmail.com
|
ca423a2221899c04d07db89f0d428d2ed0c60766
|
fc1141aabffe60455898b014fd8b4a2e8307ce85
|
/chapter6_other_flowables/preformatted_paragraph.py
|
66bc96289ad24dfb976a98fd125ed48aac64bf70
|
[] |
no_license
|
Karagul/reportlabbookcode
|
b5bff1609d62fe2bcfb17bfd7b65777121ac175c
|
e271348d5562f4842b9d1628ef917539a8ebcd5d
|
refs/heads/master
| 2020-09-21T14:58:43.427964
| 2018-12-19T17:40:46
| 2018-12-19T17:40:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
# preformatted_paragraph.py
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.platypus import Preformatted
from reportlab.lib.styles import getSampleStyleSheet
def preformatted_paragraph():
doc = SimpleDocTemplate("preformatted_paragraph.pdf",
pagesize=letter
)
styles = getSampleStyleSheet()
flowables = []
text = "<para align=center>Hello, I'm a Paragraph</para>"
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
text = "<para align=center>Hello, I'm a Preformatted Paragraph</para>"
para = Preformatted(text, style=styles["Code"])
flowables.append(para)
doc.build(flowables)
if __name__ == '__main__':
preformatted_paragraph()
|
[
"mike@pythonlibrary.org"
] |
mike@pythonlibrary.org
|
735f1cdb82cf0021cd92e7edd13b73b2b9aab014
|
48a522b031d45193985ba71e313e8560d9b191f1
|
/programmers/python/최솟값_만들기.py
|
a05410d4330c44680abcdf51a31bd1a2a893f064
|
[] |
no_license
|
dydwnsekd/coding_test
|
beabda0d0aeec3256e513e9e0d23b43debff7fb3
|
4b2b4878408558239bae7146bb4f37888cd5b556
|
refs/heads/master
| 2023-09-04T12:37:03.540461
| 2023-09-03T15:58:33
| 2023-09-03T15:58:33
| 162,253,096
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
# https://programmers.co.kr/learn/courses/30/lessons/12941
def solution(A,B):
answer = 0
A.sort()
B.sort(reverse=True)
for i in range(len(A)):
answer += A[i] * B[i]
return answer
|
[
"dydwnsekd123@gmail.com"
] |
dydwnsekd123@gmail.com
|
363abc4b7429936fa87f6e77f0217f5c4ee8c069
|
9d1701a88644663277342f3a12d9795cd55a259c
|
/CSC148/test1 review/past test1/winter2014/swap.py
|
890f0b0ad23947deba362c3189245991f4653e5f
|
[] |
no_license
|
xxcocoymlxx/Study-Notes
|
cb05c0e438b0c47b069d6a4c30dd13ab97e4ee6d
|
c7437d387dc2b9a8039c60d8786373899c2e28bd
|
refs/heads/master
| 2023-01-13T06:09:11.005038
| 2020-05-19T19:37:45
| 2020-05-19T19:37:45
| 252,774,764
| 2
| 0
| null | 2022-12-22T15:29:26
| 2020-04-03T15:44:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
from stack import Stack
class StackException(Exception):
pass
def swap_top(s: Stack) -> None:
'''Swap the top two elements of s.
If there are fewer than two items on the stack,
the stack is unchanged and a StackException is raised.
>>> s = Stack()
>>> s.push(1)
>>> s.push(2)
>>> swap_top(s)
>>> s.pop()
1
'''
if s.is_empty():
raise StackException
first = s.pop()
if s.is_empty():
s.push(first)
raise StackException
second = s.pop()
s.push(first)
s.push(second)
|
[
"coco.yang@mail.utoronto.ca"
] |
coco.yang@mail.utoronto.ca
|
9cc221a21f3e344d6a27b535cc61fff49468ee85
|
77ab53380f74c33bb3aacee8effc0e186b63c3d6
|
/5425_max_area.py
|
60293ac4a351d8aa1d4077b088a3c24c602195f0
|
[] |
no_license
|
tabletenniser/leetcode
|
8e3aa1b4df1b79364eb5ca3a97db57e0371250b6
|
d3ebbfe2e4ab87d5b44bc534984dfa453e34efbd
|
refs/heads/master
| 2023-02-23T18:14:31.577455
| 2023-02-06T07:09:54
| 2023-02-06T07:09:54
| 94,496,986
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
'''
5425. Maximum Area of a Piece of Cake After Horizontal and Vertical Cuts
User Accepted:0
User Tried:0
Total Accepted:0
Total Submissions:0
Difficulty:Medium
Given a rectangular cake with height h and width w, and two arrays of integers horizontalCuts and verticalCuts where horizontalCuts[i] is the distance from the top of the rectangular cake to the ith horizontal cut and similarly, verticalCuts[j] is the distance from the left of the rectangular cake to the jth vertical cut.
Return the maximum area of a piece of cake after you cut at each horizontal and vertical position provided in the arrays horizontalCuts and verticalCuts. Since the answer can be a huge number, return this modulo 10^9 + 7.
Input: h = 5, w = 4, horizontalCuts = [1,2,4], verticalCuts = [1,3]
Output: 4
Explanation: The figure above represents the given rectangular cake. Red lines are the horizontal and vertical cuts. After you cut the cake, the green piece of cake has the maximum area.
Example 2:
Input: h = 5, w = 4, horizontalCuts = [3,1], verticalCuts = [1]
Output: 6
Explanation: The figure above represents the given rectangular cake. Red lines are the horizontal and vertical cuts. After you cut the cake, the green and yellow pieces of cake have the maximum area.
Example 3:
Input: h = 5, w = 4, horizontalCuts = [3], verticalCuts = [3]
Output: 9
Constraints:
2 <= h, w <= 10^9
1 <= horizontalCuts.length < min(h, 10^5)
1 <= verticalCuts.length < min(w, 10^5)
1 <= horizontalCuts[i] < h
1 <= verticalCuts[i] < w
It is guaranteed that all elements in horizontalCuts are distinct.
It is guaranteed that all elements in verticalCuts are distinct.
'''
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts, verticalCuts) -> int:
horizontalCuts.append(h)
verticalCuts.append(w)
horizontalCuts.sort()
verticalCuts.sort()
prev = 0
max_h = 0
for hc in horizontalCuts:
max_h = max(hc-prev, max_h)
prev = hc
prev = 0
max_w = 0
for vc in verticalCuts:
max_w = max(vc-prev, max_w)
prev = vc
return max_h*max_w
s = Solution()
h = 5
w = 4
horizontalCuts = [3,1]
verticalCuts = [1]
res = s.maxArea(h, w, horizontalCuts, verticalCuts)
print(res)
|
[
"tabletenniser@gmail.com"
] |
tabletenniser@gmail.com
|
db562c684f9e79cdfd86e2cf62911028da643d8b
|
cd14295215c9ab48012e2756c842be682ae3bd38
|
/med/haodf_doctor/jibing_list_parser.py
|
ff0e88aced030940b3dfadbb5b5bb31959e4fba9
|
[] |
no_license
|
vertigo235/crawlers
|
bbe3667164b4f65474b9e0e14f484c7add9404f9
|
1911a28803b2e8e1c46061c27c980a985eb703e6
|
refs/heads/master
| 2021-09-02T19:57:06.094505
| 2017-01-23T02:42:24
| 2017-01-23T02:42:24
| 115,954,916
| 0
| 0
| null | 2018-01-01T23:12:31
| 2018-01-01T23:12:31
| null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
#!/usr/bin/python
# encoding: utf-8
import re
import sys
import requests
from bs4 import BeautifulSoup
from med.haodf_doctor import db
reload(sys)
sys.setdefaultencoding('utf8')
class JibingListParser:
def __init__(self, url, section):
self.url = url
self.section = section
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36',
}
def run(self):
if db.get_url(self.url) is not None:
print 'section ' + self.url + ' exists'
return True
content = requests.get(self.url, headers=self.headers).text
soup = BeautifulSoup(content, 'lxml')
results = soup.select('.m_ctt_green a')
if len(results) == 0:
return False
sql = "INSERT INTO jibing (`name`) VALUE (%s)"
for row in results:
href = row.attrs['href']
jibing = re.compile(r'(\w+)\.htm').findall(href)[0]
print jibing
try:
db.execute(sql, [jibing])
except Exception:
continue
db.save_url(self.url)
return True
|
[
"chxj1992@gmail.com"
] |
chxj1992@gmail.com
|
02436cdd33f146350f128afe1498191a9d91a8ce
|
5a45df3d23d5fecc169a8395034cca9e0497fa94
|
/scripts/convert_deps.py
|
3ff157e0a0d7ba1f23d358b9b7aadeac254ffe84
|
[
"BSD-3-Clause"
] |
permissive
|
IHackPy/pandas
|
68fe97d687b2bdbd3ad9fb84872c899593d77997
|
0a0c1b4a76f718599346e4aa5c9b88140efa7b9c
|
refs/heads/master
| 2021-09-24T14:44:01.266388
| 2018-10-10T16:57:36
| 2018-10-10T16:57:36
| 124,025,370
| 2
| 0
|
BSD-3-Clause
| 2018-10-10T16:57:38
| 2018-03-06T05:24:36
|
Python
|
UTF-8
|
Python
| false
| false
| 978
|
py
|
"""
Convert the conda environment.yaml to a pip requirements.txt
"""
import re
import yaml
exclude = {'python=3'}
rename = {'pytables': 'tables'}
with open("ci/environment-dev.yaml") as f:
dev = yaml.load(f)
with open("ci/requirements-optional-conda.txt") as f:
optional = [x.strip() for x in f.readlines()]
required = dev['dependencies']
required = [rename.get(dep, dep) for dep in required if dep not in exclude]
optional = [rename.get(dep, dep) for dep in optional if dep not in exclude]
optional = [re.sub("(?<=[^<>])=", '==', dep) for dep in optional]
with open("ci/requirements_dev.txt", 'wt') as f:
f.write("# This file was autogenerated by scripts/convert_deps.py\n")
f.write("# Do not modify directly\n")
f.write('\n'.join(required))
with open("ci/requirements-optional-pip.txt", 'wt') as f:
f.write("# This file was autogenerated by scripts/convert_deps.py\n")
f.write("# Do not modify directly\n")
f.write("\n".join(optional))
|
[
"jeff@reback.net"
] |
jeff@reback.net
|
1fd9cb664f5134da8db15234569fa205c72d68cd
|
6e43937c521b841595fbe7f59268ffc72dfefa9d
|
/GSP_WEB/models/Rules_Profile.py
|
05a544c4b0aea61d71c641a0c7dd1bae6331d794
|
[] |
no_license
|
MiscCoding/gsp_web
|
a5e50ce7591157510021cae49c6b2994f4eaabbe
|
a24e319974021ba668c5f8b4000ce96d81d1483e
|
refs/heads/master
| 2020-03-28T15:11:30.301700
| 2019-08-12T04:47:42
| 2019-08-12T04:47:42
| 148,565,440
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
import datetime
from GSP_WEB import db
from GSP_WEB.common.encoder.alchemyEncoder import Serializer
from GSP_WEB.models.CommonCode import CommonCode
from GSP_WEB.models.Rules_Profile_Group import Rules_Profile_Group
class Rules_Profile(db.Model, Serializer):
__table_args__ = {"schema": "GSP_WEB"}
__tablename__ = 'Rules_Profile'
# region parameter input
seq = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(500) )
description = db.Column(db.String(2000))
pattern_ui = db.Column(db.String(2000))
pattern_query = db.Column(db.String(2000))
pattern_operation = db.Column(db.String(2000))
cre_dt = db.Column(db.DateTime, default=datetime.datetime.now())
del_yn = db.Column(db.String(1), default='N')
group_code = db.Column(db.Integer )
def __init__(self ):
return
def __repr__(self):
return '<Rules_Profile %r>' % (self.seq)
def serialize(self):
d = Serializer.serialize(self)
d['cre_dt'] = self.cre_dt.strftime("%Y-%m-%d")
if self.group_code is not None:
group = Rules_Profile_Group.query.filter_by(seq = self.group_code).first()
d['group_name'] =group.name
else:
d['group_name'] = ''
#del d['pattern_ui']
return d
@property
def search_tag_list(self):
if (self.search_tag is not None):
return self.search_tag.split(',')
else:
return ''
|
[
"neogeo-s@hanmail.net"
] |
neogeo-s@hanmail.net
|
098a9bcd210e2cd68504ce525cfb2dde7b1c958b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03103/s952732705.py
|
6cc3a3a2c52754509a032f9bba9419110d4a1551
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
import sys
sys.setrecursionlimit(10 ** 8)
ini = lambda: int(sys.stdin.readline())
inm = lambda: map(int, sys.stdin.readline().split())
inl = lambda: list(inm())
ins = lambda: sys.stdin.readline().rstrip()
debug = lambda *a, **kw: print("\033[33m", *a, "\033[0m", **dict(file=sys.stderr, **kw))
N, M = inm()
prices = []
for i in range(N):
a, b = inm()
prices.append((a, b))
prices.sort()
def solve():
cnt = 0
amount = 0
for a, b in prices:
x = min(b, M - cnt)
amount += x * a
cnt += x
if cnt >= M:
break
return amount
print(solve())
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
55425031853ed25baa8a4ce17c10bf59652f7cf1
|
1fcdccf5d651b60bfe906f2ddafd6745f4e29860
|
/nufeeb.button/finance/test_paynoteCase.py
|
7b0d9c5876534caf09256eb3d4ed432dffe7a1bb
|
[] |
no_license
|
LimXS/workspace
|
6728d6517a764ef2ac8d47fe784c4dba937a1f1d
|
9669d653f4a7723947da645de526f4c580ddc88b
|
refs/heads/master
| 2021-01-21T06:39:14.126933
| 2017-04-14T03:24:36
| 2017-04-14T03:24:36
| 83,257,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,145
|
py
|
#*-* coding:UTF-8 *-*
import time
import re
import datetime
import unittest
import xml.dom.minidom
import traceback
import requests
import json
from common import browserClass
browser=browserClass.browser()
class paynoteTest(unittest.TestCase):
u'''财务-付款单'''
def setUp(self):
self.driver=browser.startBrowser('chrome')
browser.set_up(self.driver)
cookie = [item["name"] + "=" + item["value"] for item in self.driver.get_cookies()]
#print cookie
self.cookiestr = ';'.join(item for item in cookie)
time.sleep(2)
pass
def tearDown(self):
print "test over"
self.driver.close()
pass
def test_payNote(self):
u'''财务-付款单'''
#零售出库单要在第一个
header={'cookie':self.cookiestr,"Content-Type": "application/json"}
comdom=xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\data\commonlocation')
dom = xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\finance\financelocation')
dom2 = xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\stock\stocklocation')
module=browser.xmlRead(dom,'module',0)
moduledetail=browser.xmlRead(dom,'moduledetail',6)
browser.openModule2(self.driver,module,moduledetail)
#页面id
pageurl=browser.xmlRead(dom,"payurl",0)
pageid=browser.getalertid(pageurl,header)
#print pageid
commid=browser.getallcommonid(comdom)
try:
#付款账户名称
itemgrid=browser.xmlRead(dom2,"itemgrid",0)
payxpath=commid["basetype"]+pageid+itemgrid+str(4)+"]"
browser.findXpath(self.driver,payxpath).click()
paygridid=pageid+commid["grid_fullname"]
whichjs="$(\"div[class=GridBodyCellText]:contains('全部银行存款')\").attr(\"id\",\"allbankacc\")"
browser.nebecompany(self.driver,paygridid,whichjs)
#金额
paymonxpath=commid["basetype"]+pageid+itemgrid+str(5)+"]"
browser.findXpath(self.driver,paymonxpath).click()
pamonid=pageid+commid["grid_total"]
browser.findId(self.driver,pamonid).send_keys("8.88")
#收款单位
payid=pageid+browser.xmlRead(dom2,'edBType',0)
#print payid
browser.delaytime(3,self.driver)
browser.nebecompany(self.driver,payid)
#经手人
peoid=pageid+browser.xmlRead(dom2,'edEType',0)
browser.peoplesel(self.driver,peoid,1)
#部门
depid=pageid+browser.xmlRead(dom2,'edDept',0)
browser.passpeople(self.driver,depid)
#摘要
summid=pageid+browser.xmlRead(dom2,'edSummary',0)
browser.findId(self.driver,summid).send_keys(u"finance paynote summary中文蘩軆饕餮!@#¥%……&*()?; 。.")
#附加说明
commentid=pageid+browser.xmlRead(dom2,'edComment',0)
browser.findId(self.driver,commentid).send_keys(u"中文蘩軆饕餮!@#¥%……&*()?; 。.finance paynote commentid")
#配置>>
configid=pageid+browser.xmlRead(dom2,'btnMore',0)
jsentype="$(\"td[class=MenuCaption]:contains('结算方式配置')\").last().click()"
jsconbil="$(\"td[class=MenuCaption]:contains('录单配置')\").last().click()"
browser.contype(self.driver,configid,jsentype,jsconbil)
#保存退出
saexid=pageid+commid["selclose"]
browser.delaytime(1)
browser.savedraftexit(self.driver,saexid,payxpath,paygridid,1)
browser.openModule2(self.driver,module,moduledetail)
except:
print traceback.format_exc()
filename=browser.xmlRead(dom,'filename',0)
#print filename+u"常用-单据草稿.png"
#browser.getpicture(self.driver,filename+u"notedraft.png")
browser.getpicture(self.driver,filename+u"财务-付款单.png")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"xsx2018@yahoo.com"
] |
xsx2018@yahoo.com
|
084704ff72a4087136cdc26b7ef9452bfde9172a
|
bfaf64c553eb43684970fb3916eedaafbecf0506
|
/Player/set10/odd_sum.py
|
cac3e4392f920356746d1a76fcc65f7a3dfa9430
|
[] |
no_license
|
santhoshbabu4546/GUVI-9
|
879e65df0df6fafcc07166b2eaecf676ba9807a2
|
b9bfa4b0fa768e70c8d3f40b11dd1bcc23692a49
|
refs/heads/master
| 2022-01-24T15:22:34.457564
| 2019-07-21T14:20:35
| 2019-07-21T14:20:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
a1 = list(map(int,input().split()))
x=a1[0]
if a1[0]%2 == 0:
x=a1[0]+1
a=0
for i in range(x,a1[1],2):
a = a+i
print(a)
|
[
"noreply@github.com"
] |
santhoshbabu4546.noreply@github.com
|
34af985ebd1a1b309b782852a6283a23f119881f
|
21fec19cb8f74885cf8b59e7b07d1cd659735f6c
|
/chapter_13/downloadflat_modular.py
|
d503d2092813c06d23edd5a97edb3ee6bca8861d
|
[
"MIT"
] |
permissive
|
bimri/programming_python
|
ec77e875b9393179fdfb6cbc792b3babbdf7efbe
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
refs/heads/master
| 2023-09-02T12:21:11.898011
| 2021-10-26T22:32:34
| 2021-10-26T22:32:34
| 394,783,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,428
|
py
|
"Refactoring Uploads and Downloads for Reuse"
'Refactoring with functions'
#!/usr/bin/env python
"""
##############################################################################
use FTP to copy (download) all files from a remote site and directory
to a directory on the local machine; this version works the same, but has
been refactored to wrap up its code in functions that can be reused by the
uploader, and possibly other programs in the future - else code redundancy,
which may make the two diverge over time, and can double maintenance costs.
##############################################################################
"""
import os, sys, ftplib
from getpass import getpass
from mimetypes import guess_type, add_type
defaultSite = 'home.rmi.net'
defaultRdir = '.'
defaultUser = 'lutz'
def configTransfer(site=defaultSite, rdir=defaultRdir, user=defaultUser):
"""
get upload or download parameters
uses a class due to the large number
"""
class cf: pass
cf.nonpassive = False # passive FTP on by default in 2.1+
cf.remotesite = site # transfer to/from this site
cf.remotedir = rdir # and this dir ('.' means acct root)
cf.remoteuser = user
cf.localdir = (len(sys.argv) > 1 and sys.argv[1]) or '.'
cf.cleanall = input('Clean target directory first? ')[:1] in ['y','Y']
cf.remotepass = getpass(
'Password for %s on %s:' % (cf.remoteuser, cf.remotesite))
return cf
def isTextKind(remotename, trace=True):
"""
use mimetype to guess if filename means text or binary
for 'f.html, guess is ('text/html', None): text
for 'f.jpeg' guess is ('image/jpeg', None): binary
for 'f.txt.gz' guess is ('text/plain', 'gzip'): binary
for unknowns, guess may be (None, None): binary
mimetype can also guess name from type: see PyMailGUI
"""
add_type('text/x-python-win', '.pyw') # not in tables
mimetype, encoding = guess_type(remotename, strict=False) # allow extras
mimetype = mimetype or '?/?' # type unknown?
maintype = mimetype.split('/')[0] # get first part
if trace: print(maintype, encoding or '')
return maintype == 'text' and encoding == None # not compressed
def connectFtp(cf):
print('connecting...')
connection = ftplib.FTP(cf.remotesite) # connect to FTP site
connection.login(cf.remoteuser, cf.remotepass) # log in as user/password
connection.cwd(cf.remotedir) # cd to directory to xfer
if cf.nonpassive: # force active mode FTP
connection.set_pasv(False) # most servers do passive
return connection
def cleanLocals(cf):
"""
try to delete all locals files first to remove garbage
"""
if cf.cleanall:
for localname in os.listdir(cf.localdir): # local dirlisting
try: # local file delete
print('deleting local', localname)
os.remove(os.path.join(cf.localdir, localname))
except:
print('cannot delete local', localname)
def downloadAll(cf, connection):
"""
download all files from remote site/dir per cf config
ftp nlst() gives files list, dir() gives full details
"""
remotefiles = connection.nlst() # nlst is remote listing
for remotename in remotefiles:
if remotename in ('.', '..'): continue
localpath = os.path.join(cf.localdir, remotename)
print('downloading', remotename, 'to', localpath, 'as', end=' ')
if isTextKind(remotename):
# use text mode xfer
localfile = open(localpath, 'w', encoding=connection.encoding)
def callback(line): localfile.write(line + '\n')
connection.retrlines('RETR ' + remotename, callback)
else:
# use binary mode xfer
localfile = open(localpath, 'wb')
connection.retrbinary('RETR ' + remotename, localfile.write)
localfile.close()
connection.quit()
print('Done:', len(remotefiles), 'files downloaded.')
if __name__ == '__main__':
cf = configTransfer()
conn = connectFtp(cf)
cleanLocals(cf) # don't delete if can't connect
downloadAll(cf, conn)
|
[
"bimri@outlook.com"
] |
bimri@outlook.com
|
f36324c40758f9f90f0d236308b012e5b49fca9a
|
2710355e4f7d3373117b9068c720047820d6f83b
|
/toucans/settings/storage_backends.py
|
5f76be51b68a04842864f4dd1a03638691a4fc94
|
[
"MIT"
] |
permissive
|
davidjrichardson/toucans
|
a28901b62dbfc4a2c8c4edc425ded0174909be3e
|
2c6c216a0e8d23a97f55a973bfe01d8f386ed6d1
|
refs/heads/main
| 2023-08-24T01:02:10.439566
| 2023-06-21T19:48:15
| 2023-06-21T19:48:15
| 156,083,027
| 2
| 0
|
MIT
| 2023-07-19T22:59:05
| 2018-11-04T13:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 487
|
py
|
from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
class StaticStorage(S3Boto3Storage):
location = 'static'
default_acl = 'public-read'
custom_domain=f'{setting("AWS_S3_CUSTOM_DOMAIN")}/{setting("AWS_STORAGE_BUCKET_NAME")}'
class MediaStorage(S3Boto3Storage):
location = 'media'
default_acl = 'public-read'
custom_domain=f'{setting("AWS_S3_CUSTOM_DOMAIN")}/{setting("AWS_STORAGE_BUCKET_NAME")}'
file_overwrite = False
|
[
"david@tankski.co.uk"
] |
david@tankski.co.uk
|
9c6b47bff504e60100554e078d241329bf2401b9
|
a3385f7636ceb232e97ae30badee0ba9145138f8
|
/egs/yomdle_tamil/v1/local/yomdle/normalized_scoring/utils/insert_empty_hyp.py
|
fa9e51e38fc8e17241088333d35bf49e3a27dd14
|
[
"Apache-2.0"
] |
permissive
|
samsucik/prosodic-lid-globalphone
|
b6a6ccdcece11d834fc89abaa51031fc9f9e37e1
|
ca6a8e855441410ab85326d27b0f0076d48d3f33
|
refs/heads/master
| 2022-11-29T09:17:24.753115
| 2021-02-03T18:17:16
| 2021-02-03T18:17:16
| 149,014,872
| 3
| 2
|
Apache-2.0
| 2022-09-23T22:17:01
| 2018-09-16T16:38:53
|
Shell
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
#!/usr/bin/env python3
""" This script adds ids with empty utterance. It is used during scoring
in cases where some of the reference ids are missing in the hypothesis.
Eg. insert_empty_hyp.py <ids-to-insert> <in-hyp-file> <out-hyp-file>
"""
import sys
from snor import SnorIter
if len(sys.argv) != 4:
print("Usage: insert_empty_hyp.py <ids-to-insert> <in-hyp-file> <out-hyp-file>")
sys.exit(1)
ids_file = sys.argv[1]
hyp_in_file = sys.argv[2]
hyp_out_file = sys.argv[3]
def main():
with open(hyp_in_file, 'r', encoding='utf-8') as hyp_in_fh, open(hyp_out_file, 'w', encoding='utf-8') as hyp_out_fh, open(ids_file, 'r') as ids_fh:
# First just copy input hyp file over
for line in hyp_in_fh:
hyp_out_fh.write(line)
# Now add missing ids
for line in ids_fh:
uttid = line.strip()
hyp_out_fh.write("(%s)\n" % uttid)
if __name__ == "__main__":
main()
|
[
"s1531206@ed.ac.uk"
] |
s1531206@ed.ac.uk
|
59afd2d55b472e87a9346c8c4fa47a53e678e473
|
bfce2d5ae36e410bfd10fc98c2d6ea594f51cc81
|
/tests/test_backup.py
|
b6f0666dfc29ee49429269d56d9f30388e24d0d5
|
[
"Apache-2.0"
] |
permissive
|
simonw/datasette-backup
|
1ce4ddba3fd87a03c68064637a72851dba468264
|
b69eb953c480a0de189cb532aaf699d7cb831f47
|
refs/heads/main
| 2022-12-11T14:05:26.501414
| 2020-09-07T02:26:42
| 2020-09-07T02:26:42
| 293,164,447
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,493
|
py
|
from datasette.app import Datasette
import pytest
import sqlite_utils
import sqlite3
import textwrap
import httpx
@pytest.fixture(scope="session")
def ds(tmp_path_factory):
db_directory = tmp_path_factory.mktemp("dbs")
db_path = db_directory / "test.db"
db = sqlite_utils.Database(db_path)
db["dogs"].insert_all(
[{"id": 1, "name": "Cleo", "age": 5}, {"id": 2, "name": "Pancakes", "age": 4}],
pk="id",
)
return Datasette([str(db_path)])
@pytest.mark.asyncio
async def test_plugin_is_installed():
app = Datasette([], memory=True).app()
async with httpx.AsyncClient(app=app) as client:
response = await client.get("http://localhost/-/plugins.json")
assert 200 == response.status_code
installed_plugins = {p["name"] for p in response.json()}
assert "datasette-backup" in installed_plugins
@pytest.mark.asyncio
async def test_backup_sql(ds):
async with httpx.AsyncClient(app=ds.app()) as client:
assert (
await client.get("http://localhost/-/backup/nope.sql")
).status_code == 404
response = await client.get("http://localhost/-/backup/test.sql")
assert response.status_code == 200
assert (
response.text.strip()
== textwrap.dedent(
"""
BEGIN TRANSACTION;
CREATE TABLE IF NOT EXISTS [dogs] (
[id] INTEGER PRIMARY KEY,
[name] TEXT,
[age] INTEGER
);
INSERT INTO "dogs" VALUES(1,'Cleo',5);
INSERT INTO "dogs" VALUES(2,'Pancakes',4);
COMMIT;
"""
).strip()
)
@pytest.mark.asyncio
async def test_backup_sql_fts(tmpdir):
db_path = str(tmpdir / "fts.db")
db = sqlite_utils.Database(db_path)
db["dogs"].insert_all(
[{"id": 1, "name": "Cleo", "age": 5}, {"id": 2, "name": "Pancakes", "age": 4}],
pk="id",
)
db["dogs"].enable_fts(["name"])
ds = Datasette([db_path])
async with httpx.AsyncClient(app=ds.app()) as client:
response = await client.get("http://localhost/-/backup/fts.sql")
assert response.status_code == 200
restore_db_path = str(tmpdir / "restore.db")
sqlite3.connect(restore_db_path).executescript(response.text)
restore_db = sqlite_utils.Database(restore_db_path)
assert restore_db["dogs"].detect_fts() == "dogs_fts"
assert restore_db["dogs_fts"].schema.startswith(
"CREATE VIRTUAL TABLE [dogs_fts] USING FTS"
)
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
5408a1ccd1e327fe36da7283fa3e809507856721
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-libdispatch/PyObjCTest/test_base.py
|
85cf9ec761ff0b1d92241fae7f27b1101baf4aa2
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
import dispatch
from PyObjCTools.TestSupport import TestCase
class TestBase(TestCase):
def test_constants(self):
self.assertFalse(hasattr(dispatch, "DISPATCH_SWIFT3_OVERLAY"))
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
d2fd77c15781a7970da80298e40848fdeb23cdb0
|
ba3231b25c60b73ca504cd788efa40d92cf9c037
|
/nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/urlfiltering/urlfilteringparameter.py
|
e4193fa98c35cacbbe0bd6c2dd0248e9f302d921
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zhuweigh/vpx13
|
f6d559ae85341e56472e3592cbc67062dac34b93
|
b36caa3729d3ca5515fa725f2d91aeaabdb2daa9
|
refs/heads/master
| 2020-07-04T22:15:16.595728
| 2019-09-20T00:19:56
| 2019-09-20T00:19:56
| 202,435,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,010
|
py
|
#
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class urlfilteringparameter(base_resource) :
""" Configuration for URLFILTERING paramter resource. """
def __init__(self) :
self._hoursbetweendbupdates = None
self._timeofdaytoupdatedb = None
self._localdatabasethreads = None
self._cloudhost = None
self._seeddbpath = None
self._maxnumberofcloudthreads = None
self._cloudkeepalivetimeout = None
self._cloudserverconnecttimeout = None
self._clouddblookuptimeout = None
self._proxyhostip = None
self._proxyport = None
self._proxyusername = None
self._proxypassword = None
self._seeddbsizelevel = None
@property
def hoursbetweendbupdates(self) :
r"""URL Filtering hours between DB updates.<br/>Maximum length = 720.
"""
try :
return self._hoursbetweendbupdates
except Exception as e:
raise e
@hoursbetweendbupdates.setter
def hoursbetweendbupdates(self, hoursbetweendbupdates) :
r"""URL Filtering hours between DB updates.<br/>Maximum length = 720
"""
try :
self._hoursbetweendbupdates = hoursbetweendbupdates
except Exception as e:
raise e
@property
def timeofdaytoupdatedb(self) :
r"""URL Filtering time of day to update DB.
"""
try :
return self._timeofdaytoupdatedb
except Exception as e:
raise e
@timeofdaytoupdatedb.setter
def timeofdaytoupdatedb(self, timeofdaytoupdatedb) :
r"""URL Filtering time of day to update DB.
"""
try :
self._timeofdaytoupdatedb = timeofdaytoupdatedb
except Exception as e:
raise e
@property
def localdatabasethreads(self) :
r"""URL Filtering Local DB number of threads.<br/>Minimum length = 1<br/>Maximum length = 4.
"""
try :
return self._localdatabasethreads
except Exception as e:
raise e
@localdatabasethreads.setter
def localdatabasethreads(self, localdatabasethreads) :
r"""URL Filtering Local DB number of threads.<br/>Minimum length = 1<br/>Maximum length = 4
"""
try :
self._localdatabasethreads = localdatabasethreads
except Exception as e:
raise e
@property
def cloudhost(self) :
r"""URL Filtering Cloud host.
"""
try :
return self._cloudhost
except Exception as e:
raise e
@cloudhost.setter
def cloudhost(self, cloudhost) :
r"""URL Filtering Cloud host.
"""
try :
self._cloudhost = cloudhost
except Exception as e:
raise e
@property
def seeddbpath(self) :
r"""URL Filtering Seed DB path.
"""
try :
return self._seeddbpath
except Exception as e:
raise e
@seeddbpath.setter
def seeddbpath(self, seeddbpath) :
r"""URL Filtering Seed DB path.
"""
try :
self._seeddbpath = seeddbpath
except Exception as e:
raise e
@property
def maxnumberofcloudthreads(self) :
r"""URL Filtering hours between DB updates.<br/>Minimum value = 1<br/>Maximum value = 128.
"""
try :
return self._maxnumberofcloudthreads
except Exception as e:
raise e
@property
def cloudkeepalivetimeout(self) :
r"""URL Filtering Cloud keep alive timeout in msec.<br/>Minimum value = 1000<br/>Maximum value = 600000.
"""
try :
return self._cloudkeepalivetimeout
except Exception as e:
raise e
@property
def cloudserverconnecttimeout(self) :
r"""URL Filtering Cloud server connect timeout in msec.<br/>Minimum value = 1000<br/>Maximum value = 600000.
"""
try :
return self._cloudserverconnecttimeout
except Exception as e:
raise e
@property
def clouddblookuptimeout(self) :
r"""URL Filtering CloudDB send/receive timeout in msec.<br/>Minimum value = 1000<br/>Maximum value = 600000.
"""
try :
return self._clouddblookuptimeout
except Exception as e:
raise e
@property
def proxyhostip(self) :
r"""URL Filtering Cloud Proxy HostIp.<br/>Minimum length = 1.
"""
try :
return self._proxyhostip
except Exception as e:
raise e
@property
def proxyport(self) :
r"""URL Filtering Cloud Proxy Port.
"""
try :
return self._proxyport
except Exception as e:
raise e
@property
def proxyusername(self) :
r"""URL Filtering Cloud Proxy Username.<br/>Minimum length = 1.
"""
try :
return self._proxyusername
except Exception as e:
raise e
@property
def proxypassword(self) :
r"""URL Filtering Cloud Proxy Password.<br/>Minimum length = 1.
"""
try :
return self._proxypassword
except Exception as e:
raise e
@property
def seeddbsizelevel(self) :
r"""URL Filtering Seed DB Size Level to get downloaded.<br/>Minimum value = 1<br/>Maximum value = 5.
"""
try :
return self._seeddbsizelevel
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(urlfilteringparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.urlfilteringparameter
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update urlfilteringparameter.
"""
try :
if type(resource) is not list :
updateresource = urlfilteringparameter()
updateresource.hoursbetweendbupdates = resource.hoursbetweendbupdates
updateresource.timeofdaytoupdatedb = resource.timeofdaytoupdatedb
updateresource.localdatabasethreads = resource.localdatabasethreads
updateresource.cloudhost = resource.cloudhost
updateresource.seeddbpath = resource.seeddbpath
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of urlfilteringparameter resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = urlfilteringparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the urlfilteringparameter resources that are configured on netscaler.
"""
try :
if not name :
obj = urlfilteringparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class urlfilteringparameter_response(base_response) :
def __init__(self, length=1) :
self.urlfilteringparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.urlfilteringparameter = [urlfilteringparameter() for _ in range(length)]
|
[
"zhuwei@xsky.com"
] |
zhuwei@xsky.com
|
3dd4a01abcbe1d33242f636f2247ecb542da68d2
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/9YmYQTdPSdr8K8Bnz_16.py
|
5054e98348184561ef5da9cb9fe5f8bc0c129f00
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
def unique_lst(lst):
positive_numbers = []
for item in lst:
if item > 0 and item not in positive_numbers:
positive_numbers.append(item)
return list(positive_numbers)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7f6ba4c11f65aa75306fdced215dc9745c1da707
|
08a68e32dc80f99a37a30ddbbf943337546cc3d5
|
/.history/count/urls_20200419183106.py
|
04b192775edb18286c91a040ee26cc5d6439701c
|
[] |
no_license
|
Space20001/word-count-project
|
dff1b4b44d2f7230070eef0d95dd968b655d92f7
|
795b5e8ad5c59109e96bf7a8e9192efaefa7770e
|
refs/heads/master
| 2022-04-20T17:54:05.511449
| 2020-04-20T15:25:46
| 2020-04-20T15:25:46
| 257,327,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include(count.urls),
path('admin/', admin.site.urls),
|
[
"steve.h@blueyonder.com"
] |
steve.h@blueyonder.com
|
4e38b644b6c4d196dd99a26f13133204bb604aa4
|
f2ebedd68cca732fba98d0038695972f917b46f5
|
/ICP6/ICP6_1_LDA_Model.py
|
f92313b44a088693f37a25a79f7da7f3280ed85b
|
[] |
no_license
|
sxb42660/CS5560SivaBuddi
|
15cc5bbea7fef096728c9ca27a8d8163e42950b1
|
0827b0af67e037524987ac8da639f6252bdb4626
|
refs/heads/master
| 2022-07-02T01:55:52.817393
| 2020-05-11T17:57:27
| 2020-05-11T17:57:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,791
|
py
|
#text processing
import re
import string
import nltk
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pandas as pd
import numpy as np
#read the csv file with amazon reviews
reviews_df=pd.read_csv('/home/sivakumar/Desktop/CS5560SivaBuddi/ICP6/voted-kaggle-dataset.csv',error_bad_lines=False)
reviews_df['Description'] = reviews_df['Description'].astype(str)
print(reviews_df.head(6))
def initial_clean(text):
"""
Function to clean text-remove punctuations, lowercase text etc.
"""
text = re.sub("[^a-zA-Z ]", "", text)
text = text.lower() # lower case text
text = nltk.word_tokenize(text)
return (text)
stop_words = stopwords.words('english')
stop_words.extend(['news', 'say','use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do','took','time','year',
'done', 'try', 'many', 'some','nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line','even', 'also', 'may', 'take', 'come', 'new','said', 'like','people'])
def remove_stop_words(text):
return [word for word in text if word not in stop_words]
stemmer = PorterStemmer()
def stem_words(text):
"""
Function to stem words
"""
try:
text = [stemmer.stem(word) for word in text]
text = [word for word in text if len(word) > 1] # no single letter words
except IndexError:
pass
return text
def apply_all(text):
"""
This function applies all the functions above into one
"""
return stem_words(remove_stop_words(initial_clean(text)))
# clean reviews and create new column "tokenized"
import time
t1 = time.time()
reviews_df['Tokenized_Description'] = reviews_df['Description'].apply(apply_all)
t2 = time.time()
print("Time to clean and tokenize", len(reviews_df), "reviews:", (t2-t1)/60, "min") #Time to clean and tokenize 3209 reviews: 0.21254388093948365 min
print('\n')
print("reviews with their respective tokenize version:" )
print(reviews_df.head(5))
#LDA
import gensim
import pyLDAvis.gensim
#Create a Gensim dictionary from the tokenized data
tokenized = reviews_df['Tokenized_Description']
#Creating term dictionary of corpus, where each unique term is assigned an index.
dictionary = corpora.Dictionary(tokenized)
#Filter terms which occurs in less than 1 review and more than 80% of the reviews.
dictionary.filter_extremes(no_below=1, no_above=0.8)
#convert the dictionary to a bag of words corpus
corpus = [dictionary.doc2bow(tokens) for tokens in tokenized]
print(corpus[:1])
print([[(dictionary[id], freq) for id, freq in cp] for cp in corpus[:1]])
#LDA
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = 7, id2word=dictionary, passes=15)
#saving the model
ldamodel.save('model_combined_Description.gensim')
topics = ldamodel.print_topics(num_words=4)
print('\n')
print("Now printing the topics and their composition")
print("This output shows the Topic-Words matrix for the 7 topics created and the 4 words within each topic")
for topic in topics:
print(topic)
#finding the similarity of the first review with topics
print('\n')
print("first review is:")
print(reviews_df.Description[0])
get_document_topics = ldamodel.get_document_topics(corpus[0])
print('\n')
print("The similarity of this review with the topics and respective similarity score are ")
print(get_document_topics)
#visualizing topics
lda_viz = gensim.models.ldamodel.LdaModel.load('model_combined_Description.gensim')
lda_display = pyLDAvis.gensim.prepare(lda_viz, corpus, dictionary, sort_topics=True)
pyLDAvis.show(lda_display)
|
[
"sivakumar.umkc.fall2019@gmail.com"
] |
sivakumar.umkc.fall2019@gmail.com
|
88dee361384ca062336575c3836ef4710e625363
|
efde08c021b7d90d3470573851c4d4e675afe265
|
/backend/detector/tests/test_views.py
|
942cc839305e7c37d0034148497be589151c5fb2
|
[] |
no_license
|
DragonSavA/Soil-State-Tracker-2
|
64141afb11dd244b5765f45cb067a4b7138815aa
|
8cf05286bb324119d511add2bbced5e3d1ea9e77
|
refs/heads/main
| 2023-02-23T20:33:22.010674
| 2021-01-12T14:28:01
| 2021-01-12T14:28:01
| 329,017,856
| 0
| 0
| null | 2021-01-12T14:48:05
| 2021-01-12T14:48:04
| null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from django.conf import settings
from django.test import override_settings
from client.models import Client
from group.models import Cluster
from detector.models import Detector, DetectorData
from backend.service import get_response
class TestViews(APITestCase):
def setUp(self):
self.user1 = Client.objects.create_user(
email='test@case1.test',
first_name='admin',
last_name='admin',
password='very_strong_psw'
)
self.free_detector = Detector.objects.create(
user=self.user1,
x=1,
y=2
)
DetectorData.create_random(self.free_detector)
@override_settings(CACHEOPS_ENABLED=False)
def test_free_detectors_unauth(self):
response = get_response('free-detectors', 'get')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_free_detectors_auth(self):
response = get_response('free-detectors', 'get', self.user1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['id'], self.free_detector.id)
def test_detector_data_auth(self):
response = get_response('detector-data', 'get', self.user1, kwargs={'pk': self.free_detector.id})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), self.free_detector.data.count())
|
[
"54814200+reqww@users.noreply.github.com"
] |
54814200+reqww@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.