blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a1045f155f346fcd6be4db7820ecffde97a490b
|
eef285b8c4530a7dc1187e08292bf246e3732915
|
/chat_project/chat_backend/chat_backend/user/admin.py
|
24c2b5b4d4948b6624de91964a94edf55e80a494
|
[] |
no_license
|
wlgud0402/making_projects
|
32ba45817e48c3d21b174c823177d96af10d9a20
|
6d86d09c61eb70339423f33d6e42ca0cdff391a6
|
refs/heads/master
| 2023-03-21T09:45:25.600251
| 2021-03-22T13:02:26
| 2021-03-22T13:02:26
| 338,810,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from django.contrib import admin
from .models import User
# Register your models here.
# admin.site.register(User)
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ('email', 'nickname', 'user_type', 'room_id', 'created_at',)
|
[
"wlgudrlgus@naver.com"
] |
wlgudrlgus@naver.com
|
a81fb81b960fc04f18e1042929aa6c944cfb1007
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/networkx/algorithms/flow/capacityscaling.py
|
9a6a5c7270f10e7645babe83c8286a14cb73b66f
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0ba80a661465f023b9da7444cd18a123358f4112509665b5d25721a9fb176ec0
size 14535
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
03c5e284066da9d303f45f391c6d39151fb59a4b
|
6be2b9c3a7dcc95ed04ce8a5af912014833b769a
|
/app/main/views.py
|
7bd0437c264b1feb1213cb896371a4b80751cd1f
|
[
"MIT"
] |
permissive
|
MaryMbugua/Newshighlighttwo
|
05219428c9e568122cb59f7a2ea90b758edf8c76
|
143fd75b7c0e36a48e25240ff150d10781c77470
|
refs/heads/master
| 2020-03-08T19:47:28.086076
| 2018-04-23T08:50:25
| 2018-04-23T08:50:25
| 128,363,872
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,157
|
py
|
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_sources,get_articles
from ..models import Newsarticle,Newssources
#views
@main.route('/')
def index():
'''
view root page function that returns the index page and its data
'''
title = 'Home - Welcome to The best News Update Website Online'
return render_template('index.html',title = title)
@main.route('/Business/')
def BusinessSources():
'''
view page function that returns business news from various news sources
'''
business_sources = get_sources('business')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('biz.html',title = title,biznews = business_sources)
@main.route('/Entertainment/')
def EntertainmentSources():
'''
view page function that returns entertainment news from various news sources
'''
entertainment_sources = get_sources('entertainment')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('enta.html',title = title,enta = entertainment_sources)
@main.route('/Health/')
def HealthSources():
'''
view page function that returns health news from various news sources
'''
health_sources = get_sources('health')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('health.html',title = title,healthsource = health_sources)
@main.route('/General/')
def GeneralSources():
'''
view page function that returns general news from various news sources
'''
general_sources = get_sources('general')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('gen.html',title = title,general = general_sources)
@main.route('/Science/')
def ScienceSources():
'''
view page function that returns science news from various news sources
'''
science_sources = get_sources('science')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('science.html',title = title,science = science_sources)
@main.route('/Sports/')
def SportsSources():
'''
view page function that returns sports news from various news sources
'''
sports_sources = get_sources('sports')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('sports.html',title = title,sports = sports_sources)
@main.route('/Technology/')
def TechnologySources():
'''
view page function that returns technology news from various news sources
'''
technology_sources = get_sources('technology')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('tech.html',title = title,tech = technology_sources)
@main.route('/source/<id>/')
def NewsGetArticles(id):
'''
view page function that returns technology news from various news sources
'''
news = get_articles(id)
title = 'Home - Welcome to The best News Update Website Online'
return render_template('article.html',title = title,news=news)
|
[
"marymbugua.nm@gmail.com"
] |
marymbugua.nm@gmail.com
|
576e761485b9b3fbcdc1ce8d9b9405d34e242c90
|
a1ad2715e306fd4e7eaeda5348e00e1a363e7884
|
/leetcode/concepts.py/houserobber2.py
|
d7ea4c294868b76061a7f6960164e761560f91cc
|
[] |
no_license
|
MayankMaheshwar/DS-and-Algo-solving
|
cef54a800b3e8a070a707f97b4f30fccaa17d5c6
|
ac6ea8f880920242a55d40c747368d68cb6f7534
|
refs/heads/master
| 2022-12-07T07:55:08.380505
| 2022-12-05T09:32:14
| 2022-12-05T09:32:14
| 237,103,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
def robberse(self, nums):
def simple_rob(nums, i, j):
rob, not_rob = 0, 0
for idx in range(i, j):
num = nums[idx]
rob, not_rob = not_rob + num, max(rob, not_rob)
return max(rob, not_rob)
if not nums:
return 0
elif len(nums) == 1:
return nums[0]
else:
n = len(nums)
return max(simple_rob(nums, 1, n), simple_rob(nums, 0, n-1))
|
[
"mayank.maheshwari625@gmail.com"
] |
mayank.maheshwari625@gmail.com
|
196a8d28085738fdda38ea2f078b8fb542ec2300
|
189c99816118ac6334fb65f1ef611c71eb756ead
|
/0x08-python-more_classes/6-rectangle.py
|
76a2c927a7745e18c819895c16ccb3f69c5e9f0f
|
[] |
no_license
|
MadmanSilver/holbertonschool-higher_level_programming
|
4d51e308636ccc37271869dd830f700d8201948e
|
09e47b319d9bd674519b13263a74822198d5932c
|
refs/heads/master
| 2022-12-18T23:40:37.862157
| 2020-09-25T02:41:01
| 2020-09-25T02:41:01
| 259,349,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
#!/usr/bin/python3
""" Contains the Rectangle class. """
class Rectangle:
""" Defines a rectangle. """
number_of_instances = 0
def __init__(self, width=0, height=0):
""" Sets up the rectangle instance. """
self.width = width
self.height = height
Rectangle.number_of_instances += 1
@property
def width(self):
""" Width of the rectangle. """
return self.__width
@width.setter
def width(self, value):
""" Sets the width of the rectangle. """
if type(value) is not int:
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
""" Height of the rectangle. """
return self.__height
@height.setter
def height(self, value):
""" Sets the height of the rectangle. """
if type(value) is not int:
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
""" Calculates the area of the rectangle. """
return self.width * self.height
def perimeter(self):
""" Calculates the perimeter of the rectangle. """
if self.width == 0 or self.height == 0:
return 0
return self.width * 2 + self.height * 2
def __str__(self):
""" Returns a string representation of the rectangle. """
res = ""
if self.width == 0:
return res
for y in range(self.height):
res += "#" * self.width
if y + 1 != self.height:
res += "\n"
return res
def __repr__(self):
""" Returns a string that can be used with eval to duplicate. """
return "Rectangle({}, {})".format(self.width, self.height)
def __del__(self):
""" Prints a message when rectangle is deleted. """
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
|
[
"silverwolf4350@gmail.com"
] |
silverwolf4350@gmail.com
|
181a25b998b188559a7c17997e8ce525d68a3cf4
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fv/aeppconfissues.py
|
c23c2662a913b2fdb31f332c39cb53a959687641
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,453
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AEpPConfIssues(Mo):
meta = ClassMeta("cobra.model.fv.AEpPConfIssues")
meta.isAbstract = True
meta.moClassName = "fvAEpPConfIssues"
meta.moClassName = "fvAEpPConfIssues"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of EpP Configuration Issues"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.RsStPathAtt")
meta.parentClasses.add("cobra.model.fv.AEPgCont")
meta.parentClasses.add("cobra.model.fv.ExtStPathAtt")
meta.parentClasses.add("cobra.model.fv.RsStGrpAtt")
meta.parentClasses.add("cobra.model.fv.RsNodePortAtt")
meta.parentClasses.add("cobra.model.fv.InBEpP")
meta.parentClasses.add("cobra.model.vz.ToEPgAny")
meta.parentClasses.add("cobra.model.vz.ToEPg")
meta.parentClasses.add("cobra.model.fv.RtdEpP")
meta.parentClasses.add("cobra.model.fv.Locale")
meta.parentClasses.add("cobra.model.fv.AttEntityPathAtt")
meta.parentClasses.add("cobra.model.fv.StPathAtt")
meta.parentClasses.add("cobra.model.fv.DyPathAtt")
meta.parentClasses.add("cobra.model.fv.ConfigState")
meta.parentClasses.add("cobra.model.fv.EpP")
meta.parentClasses.add("cobra.model.fv.OoBEpP")
meta.parentClasses.add("cobra.model.fv.BrEpP")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.fv.AConfIssues")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.pol.AConfIssues")
meta.concreteSubClasses.add("cobra.model.fv.NwIssues")
meta.concreteSubClasses.add("cobra.model.fv.StorageIssues")
meta.concreteSubClasses.add("cobra.model.fv.CompIssues")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "configSt", "configSt", 4993, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "not-applied"
prop._addConstant("applied", "applied", 2)
prop._addConstant("applying", "applying", 1)
prop._addConstant("failed-to-apply", "failed-to-apply", 3)
prop._addConstant("not-applied", "not-applied", 0)
prop._addConstant("temp-failed-to-apply", "temp-failed-to-apply", 4)
meta.props.add("configSt", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "epgPKey", "epgPKey", 1831, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("epgPKey", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "temporaryError", "temporaryError", 16106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("temporaryError", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
97208d08e04cecb91c78591585387c2390b581fd
|
90333a3140dc20036ad1ae88aaeec4b72a93914b
|
/tensorflow/example04.py
|
8c309c32db68466a1c8c20d8d572042450f9d5a6
|
[] |
no_license
|
jw0831/AI-Study
|
1f026acbc46a5f133921efc3c07d233ec1e8e284
|
02a2bd7469691facc5b6b283aa5edb8e90841456
|
refs/heads/master
| 2023-05-12T13:05:55.499517
| 2021-06-13T14:32:19
| 2021-06-13T14:32:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
#(4) example04
#import tensorflow and numpy
import tensorflow as tf
import numpy as np
#[feather, wing]
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
#[etc, mammal, bird]
#one-hot encoding(label)
y_data = np.array([
[1, 0, 0], #etc
[0, 1, 0], #mammal
[0, 0, 1], #bird
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
#make simple model
#make placeholder
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
#input size is 2, output size is 3
weight1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.))
weight2 = tf.Variable(tf.random_uniform([10, 3], -1., 1.))
bias1 = tf.Variable(tf.zeros([10]))
bias2 = tf.Variable(tf.zeros([3]))
#activation function
layer1 = tf.add(tf.matmul(X, weight1), bias1)
layer2 = tf.nn.relu(layer1)
model = tf.add(tf.matmul(layer1, weight2), bias2)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)
#training
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(100):
sess.run(train_op, feed_dict={X: x_data, Y: y_data})
if (step + 1) % 10 == 0:
print(step + 1, sess.run(cost, feed_dict={X: x_data, Y: y_data}))
prediction = tf.argmax(model, 1)
ground_truth = tf.argmax(Y, 1)
print('Prediction:', sess.run(prediction, feed_dict={X: x_data}))
print('Ground Truth:', sess.run(ground_truth, feed_dict={Y: y_data}))
is_correct = tf.equal(prediction, ground_truth)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('Accuracy: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))
|
[
"wodbs9522@gmail.com"
] |
wodbs9522@gmail.com
|
daf6a6648337c20c1e3b7fc6492df080328c9231
|
32e55bf28b9f22265bcbc1d8c0ebf52a3608187d
|
/303. Range Sum Query - Immutable.py
|
0c4562b35da42903720903add0d94136f25c38aa
|
[] |
no_license
|
Garacc/LeetCode
|
9f843672a18701d032f36769c9025761199d8caf
|
215d12703b2cac4c1ad49d5a0e1060948fbbacd2
|
refs/heads/master
| 2018-10-10T03:37:48.889898
| 2018-09-17T08:38:22
| 2018-09-17T08:38:22
| 120,304,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
class NumArray:
'''
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
ans = 0
for idx in range(i, j + 1):
ans += self.nums[idx]
return ans
TLE
'''
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
self.sums = nums
for i in range(1, len(nums)):
self.sums[i] += self.sums[i - 1]
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
if i == 0:
return self.sums[j]
else:
return self.sums[j] - self.sums[i - 1]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
|
[
"xgyxmxl@163.com"
] |
xgyxmxl@163.com
|
8c567d804437e17644ed5f3c11c0cd3e47b52c03
|
56b63ee537f872af0fc028016d1508b4c1dd5c60
|
/school/migrations/0284_auto_20210507_1155.py
|
e0f316d5d0463336216b57b0e361ecb8d4b458c7
|
[] |
no_license
|
jacknjillsolutionsrevanth/EMS1
|
01fc571120f765b0fbfe3aa654b15ff578d6e9b9
|
db14d8e6c15669b5938aa9276c5e22006218814a
|
refs/heads/main
| 2023-08-03T19:40:50.073133
| 2021-10-01T07:02:37
| 2021-10-01T07:02:37
| 410,202,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
# Generated by Django 3.1.2 on 2021-05-07 06:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0283_auto_20210430_1342'),
]
operations = [
migrations.AddField(
model_name='rpt_consolidatedreport',
name='branch',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='centername',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='net',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='routename',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='tsrate',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='rpt_consolidatedreport',
name='centercode',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='rpt_excel_bankwise',
name='amount',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='rpt_excel_bankwise',
name='total',
field=models.FloatField(blank=True, default=0.0, null=True),
),
]
|
[
"jacknjillsolutions.revanth@gmail.com"
] |
jacknjillsolutions.revanth@gmail.com
|
e9d7a998d87c612243828fe66e6007202c86f686
|
0b40232eb2395c27353c892ef4ccb5c604bb75be
|
/Hash Table/Find_the_difference.py
|
7428e82614a50c081c15dbd870c2e3841fab9f12
|
[] |
no_license
|
HareshNasit/LeetCode
|
971ae9dd5e4f0feeafa5bb3bcf5b7fa0a514d54d
|
674728af189aa8951a3fcb355b290f5666b1465c
|
refs/heads/master
| 2021-06-18T07:37:40.121698
| 2021-02-12T12:30:18
| 2021-02-12T12:30:18
| 168,089,751
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
map_s = collections.defaultdict(int)
for i in s:
map_s[i] += 1 #Automatically adds any missing value to the dictionary.
for j in t:
map_s[j] -= 1
if map_s[j] == -1:
return j
|
[
"harsh.nasit@mail.utoronto.ca"
] |
harsh.nasit@mail.utoronto.ca
|
b04e5ea723f86e59d1873259177661d9672e62f6
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/advanced/050_make_pybites_search_engine_feedparser/save4_nopass.py
|
8dbd7f2c13334bb8bd7ec12ff179a1105f74a635
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,865
|
py
|
from datetime import datetime
from collections import namedtuple
from time import mktime
from feedparser import parse
import re
import xml.etree.ElementTree as ET
# FEED = 'https://bites-data.s3.us-east-2.amazonaws.com/all.rss.xml'
Entry = namedtuple('Entry', 'date title link tags')
class AttrDict(dict):
"""feedparser lets you access dict keys as attributes, hence a bit of
mocking, got this from https://stackoverflow.com/a/14620633.
PyBites uses this class for parsing"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
dt1 = datetime(2018, 2, 18, 19, 52, 0).timetuple()
dt2 = datetime(2017, 1, 6, 11, 0, 0).timetuple()
FEED = AttrDict({'entries':
[AttrDict({'author': 'PyBites',
'link':
'https://pybit.es/twitter_digest_201808.html', # noqa E501
'published': 'Sun, 18 Feb 2018 20:52:00 +0100', # noqa E501
'published_parsed': dt1,
'summary': 'Every weekend we share ...',
'tags': [AttrDict({'term': 'twitter'}),
AttrDict({'term': 'Flask'}),
AttrDict({'term': 'Python'}),
AttrDict({'term': 'Regex'})],
'title': 'Twitter Digest 2018 Week 08'}),
AttrDict({'author': 'Julian',
'link': 'https://pybit.es/pyperclip.html',
'published': 'Fri, 06 Jan 2017 12:00:00 +0100', # noqa E501
'published_parsed': dt2,
'summary': 'Use the Pyperclip module to ...',
'tags': [AttrDict({'term': 'python'}),
AttrDict({'term': 'tips'}),
AttrDict({'term': 'tricks'}),
AttrDict({'term': 'code'}),
AttrDict({'term': 'pybites'})],
'title': 'Copy and Paste with Pyperclip'})]})
def _convert_struct_time_to_dt(stime):
"""Convert a time.struct_time as returned by feedparser into a
datetime.date object, so:
time.struct_time(tm_year=2016, tm_mon=12, tm_mday=28, ...)
-> date(2016, 12, 28)
"""
if type(stime) == str:
format = '%a, %d %b %Y %H:%M:%S %z'
dt_object = datetime.strptime(stime, format)
return dt_object.date()
else:
return datetime.fromtimestamp(mktime(stime)).date()
def get_feed_entries(feed=FEED):
"""Use feedparser to parse PyBites RSS feed.
Return a list of Entry namedtuples (date = date, drop time part)
"""
if type(feed) == AttrDict:
file = feed
else:
file = parse(feed)
output = []
for entry in file.entries:
date = _convert_struct_time_to_dt(entry.published)
tag_list = [tag['term'].lower() for tag in entry.tags]
output.append(Entry(date, entry.title, entry.link, tag_list))
return output
def filter_entries_by_tag(search, entry):
"""Check if search matches any tags as stored in the Entry namedtuple
(case insensitive, only whole, not partial string matches).
Returns bool: True if match, False if not.
Supported searches:
1. If & in search do AND match,
e.g. flask&api should match entries with both tags
2. Elif | in search do an OR match,
e.g. flask|django should match entries with either tag
3. Else: match if search is in tags
"""
search = search.lower()
tag_list = [tag for tag in entry.tags]
if not re.search(r'\|', search) and not re.search(r'\&', search):
return search in tag_list
if re.search(r'\|', search):
search = re.split(r'\|', search)
return any([item in tag_list for item in search])
if re.search(r'\&', search):
search = re.split(r'\&', search)
return all([item in tag_list for item in search])
return search
def main():
"""Entry point to the program
1. Call get_feed_entries and store them in entries
2. Initiate an infinite loop
3. Ask user for a search term:
- if enter was hit (empty string), print 'Please provide a search term'
- if 'q' was entered, print 'Bye' and exit/break the infinite loop
4. Filter/match the entries (see filter_entries_by_tag docstring)
5. Print the title of each match ordered by date ascending
6. Secondly, print the number of matches: 'n entries matched'
(use entry if only 1 match)
"""
entries = get_feed_entries()
while True:
try:
search_term = input('Search for (q for exit): ').lower()
except EOFError:
break
if search_term == '':
print('Please provide a search term')
if search_term != '' and search_term != 'q':
output_list = []
for entry in entries:
if filter_entries_by_tag(search_term, entry):
output_list.append(entry)
output_list = sorted(output_list, key=lambda x: x.date)
titles = ', '.join([entry.title for entry in output_list])
output_number = len(output_list)
if output_number < 1:
print(f'{output_number} entries matched')
if output_number == 1:
print(titles)
print(f'{output_number} entry matched')
if output_number > 1:
print(titles)
print(f'{output_number} entries matched')
if search_term == 'q':
print('Bye')
break
if __name__ == '__main__':
main()
main()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
5516c3e347802f4b350ee2dbcccabaeb477b3a74
|
9447fc5874b2edbc5d50d97d1415459d7c0a9a23
|
/env/bin/rstpep2html.py
|
80e9bd3423e0ff556c539c40519718befa2fd10f
|
[] |
no_license
|
ivan-podorozhnyi-tr/flask_zappa
|
b9d11976a2b5d1a315258984ffde6199b4013576
|
aba4c482d90ceb5161010e4e4edb9b63feb00735
|
refs/heads/master
| 2022-11-02T04:11:20.531599
| 2019-10-10T10:31:03
| 2019-10-10T10:31:03
| 214,153,564
| 0
| 1
| null | 2022-10-10T20:02:44
| 2019-10-10T10:28:49
|
Python
|
UTF-8
|
Python
| false
| false
| 701
|
py
|
#!/home/ivan/projects/flask_zappa/env/bin/python3
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
|
[
"you@example.com"
] |
you@example.com
|
1ecb741be63c3d4f4cf27a1f9a5077afa206c9e7
|
587bd3458aadb1f06bd576aab46a7d294d6a0ee2
|
/session47.py
|
cba6a34daa4a8c2c7d7d7bcde7ea6d8900069b81
|
[] |
no_license
|
Shimpa11/PythonTutorial
|
913635dbbd1f8ed6e2331614040a4f76d22ebacf
|
5e5dfe003e5ccf84d4e9754d3192e43009c20c56
|
refs/heads/master
| 2020-12-13T16:48:12.093667
| 2020-05-08T12:21:27
| 2020-05-08T12:21:27
| 234,344,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,587
|
py
|
"""
overfitting and underfitting->regression line does not fit
# predicted values
# good fit / robust fit
# DT limitations
# computationally expensive to train
# carry big risk if overfitting
ENSEMBLE LEARNING
Supervised learning where no of models are combined for prediction
BOOSTING->grp of algos that utilizes weighted avgs to make weak learners into stronger learners
each model predicts the feature for next model
kind synchronus
BOOTSTRAP AGGREGATION(BAGGING)
Running models independently and aggregates output at the end without pref to other model
Ansync or multuthreading
Random Forest Algorithn
->classification and regression
a bagging technique
moedls running parallel with no interaction
tress in RF
operates by constructing a mutltitude of DT at training time and outputting the class is the
model of classes
1.how many DTtrees to be used
2. Dataset to be divided in n number of instances
eg; dataset with 100 records
choose n as 3
T1=33
T2=33
T3->34
three DTrees
predictions from T1, T2 and T2 will be used for final prediction
"""
# working on covid 19 dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
from sklearn.preprocessing import FunctionTransformer
df=pd.read_csv("covid19-world.csv")
print(df)
indiaDF=df[df['Country']=='India']
print(indiaDF)
X=indiaDF['Date']
Y=indiaDF['Confirmed']
# log=PowerTransformer()
# log.fit(df[['Date']])
# df['log_convertedDF']=log.transform(df[['Date']])
# X=df['log_convertedDF']
print("data for X:")
print(X)
print("Data for Y:")
print(Y)
# plt.plot(X,Y)
# plt.xlabel("Date")
# plt.ylabel("Confirmed cases")
# plt.grid(True)
# plt.show()
# formatting date for our graph
fig,ax=plt.subplots()
ax.plot_date(X,Y, marker='',linestyle="-.")
fig.autofmt_xdate()
plt.show()
# create the model
# 100 DTrees in our model who shall work with bagging technique
model=RandomForestRegressor(n_estimators=100)
# train the model
# transform into 2D array
# X=X[:,np.newaxis]
# date is in string format cannot train model on string
# so we get an error
# So we data Preprocessing-> refining dataset so optimally so that model works perfectly fine
# new transformation
X1=pd.to_datetime(indiaDF['Date'],infer_datetime_format=True)
print(type(X1))
# lg=np.log(indiaDF['Date'])
# X1=pd.to_datetime(indiaDF['Date'],format='%Y-%m-%d')
# converting date type string to datetime which is mathematical
X1=X1[:,np.newaxis]
print(X1)
print()
print(type(X1))
# model.fit(X,Y)-> generates error withX
x_train, x_test, y_train,y_test=train_test_split(X1,Y,test_size=0.2,random_state=1)
# model.fit(X1,Y)
model.fit(x_train,y_train)
# X=sm.add_constant(X)
# model=sm.OLS(y_train,X)
print("Model Trained")
y_pred=model.predict(x_test)
# print(y_pred)
# print(x_test)
#
futurePredictionDates=pd.Series(['2020-02-12','2020-03-12','2020-04-12','2020-05-12'])
futurePredictionDates=pd.to_datetime(futurePredictionDates,infer_datetime_format=True)
print("==========================================")
# 2D array
futurePredictionDates=futurePredictionDates[:,np.newaxis]
futureConfirmedPredictions=model.predict(futurePredictionDates)
print(futurePredictionDates)
print(futureConfirmedPredictions)
# regression model is lagging because predictions are not accurate as data is exponential not linear
# Conclusion : Predictions are not accurate.
# Since as per our dataset, we do have exponential behaviour in our data.
# So we need to do some more of pre-processing
|
[
"er.shimpa@gmail.com"
] |
er.shimpa@gmail.com
|
fd673bb693206262ce291422603e04587290cc7c
|
5686d1a31b87a47a4774270c00cd141c221cf065
|
/axonius_api_client/api/json_api/assets/history_dates_human.py
|
35f27f8e20051c05403c10b6c37ebc27af60b4a1
|
[
"MIT"
] |
permissive
|
Axonius/axonius_api_client
|
e7eec0845eee9e1b314446121551c584655c2631
|
be49566e590834df1b46494c8588651fa029b8c5
|
refs/heads/master
| 2023-08-19T04:43:13.717989
| 2023-08-10T18:49:40
| 2023-08-10T18:49:40
| 194,601,817
| 17
| 22
|
MIT
| 2023-08-30T18:45:15
| 2019-07-01T04:52:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,266
|
py
|
# -*- coding: utf-8 -*-
"""Models for API requests & responses."""
import dataclasses
import datetime
import logging
import typing as t
from ....exceptions import ApiError
from ....tools import coerce_int, dt_now, dt_parse
from ..base import BaseModel
LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass
class AssetTypeHistoryDate(BaseModel):
"""Human exposure of history date for a specific asset type."""
date_api: str
date_api_exact: str
asset_type: str
document_meta: t.Optional[dict] = dataclasses.field(default_factory=dict)
@property
def days_ago(self) -> int:
"""Number of days since date_api passed."""
return self.delta.days
@property
def delta(self) -> datetime.timedelta:
"""Pass."""
return dt_now() - self.date
def calculate_delta(self, value: datetime.datetime) -> datetime.timedelta:
"""Calculate the delta between the date property and a given datetime object."""
return abs(self.date - value)
def calculate_days_ago(self, value: datetime.datetime) -> int:
"""Calculate the number of days between the date property and a given datetime object."""
return self.calculate_delta(value=value).days
@property
def date(self) -> datetime.datetime:
"""Get the history date as datetime object."""
if not hasattr(self, "_date"):
setattr(self, "_date", dt_parse(obj=self.date_api_exact, default_tz_utc=True))
return getattr(self, "_date")
def __str__(self) -> str:
"""Pass."""
return f"date={self.date}, days_ago={self.days_ago}"
@staticmethod
def get_schema_cls() -> t.Any:
"""Get the schema for this model."""
return None
@dataclasses.dataclass
class AssetTypeHistoryDates(BaseModel):
"""Human exposure of history dates for a specific asset type."""
asset_type: str
values: dict
document_meta: t.Optional[dict] = dataclasses.field(default_factory=dict)
DATE_ONLY_FMT: t.ClassVar[str] = "%Y-%m-%d"
DATE_ONLY_VALID_FMTS: t.ClassVar[t.List[str]] = ["YYYY-MM-DD", "YYYYMMDD"]
@property
def dates(self) -> t.List[AssetTypeHistoryDate]:
"""Get the valid history dates for this asset type."""
if not hasattr(self, "_dates"):
# noinspection PyAttributeOutsideInit
self._dates = [
AssetTypeHistoryDate(date_api=k, date_api_exact=v, asset_type=self.asset_type)
for k, v in self.values.items()
]
return self._dates
@property
def dates_by_days_ago(self) -> t.Dict[int, AssetTypeHistoryDate]:
"""Get the valid history dates for this asset type keyed by days_ago."""
return {x.days_ago: x for x in self.dates}
def get_date_nearest(
self, value: t.Union[str, bytes, datetime.timedelta, datetime.datetime]
) -> t.Optional[AssetTypeHistoryDate]:
"""Get a valid history date that is nearest to the supplied value."""
nearest: t.Optional[AssetTypeHistoryDate] = None
if self.dates:
pivot: datetime.datetime = dt_parse(obj=value, default_tz_utc=True)
nearest: AssetTypeHistoryDate = min(self.dates, key=lambda x: x.calculate_delta(pivot))
LOGGER.info(f"Closest {self.asset_type} history date to {pivot} found: {nearest}")
return nearest
def get_date_nearest_days_ago(self, value: int) -> t.Optional[AssetTypeHistoryDate]:
"""Get a valid history date that is nearest to the supplied value."""
nearest: t.Optional[AssetTypeHistoryDate] = None
if self.dates:
pivot: int = coerce_int(value)
nearest = min(
self.dates,
key=lambda x: x.days_ago - pivot if x.days_ago >= pivot else pivot - x.days_ago,
)
LOGGER.info(f"Closest {self.asset_type} history days ago to {pivot} found: {nearest}")
return nearest
def get_date_by_date(
self,
value: t.Optional[t.Union[str, datetime.timedelta, datetime.datetime]] = None,
exact: bool = True,
) -> t.Optional[str]:
"""Get a valid history date.
Args:
value: date to get history date for
exact: if True, raise error if date is not valid, else return nearest valid date
"""
if value:
try:
dt: datetime.datetime = dt_parse(obj=value, default_tz_utc=True)
except Exception:
valid = " or ".join(self.DATE_ONLY_VALID_FMTS)
raise ApiError(f"Invalid history date format {value!r}, format must be {valid}")
date_api: str = dt.strftime(self.DATE_ONLY_FMT)
if date_api in self.values:
return self.values[date_api]
if exact:
err = f"Invalid exact history date {date_api!r}"
raise ApiError(f"{err}\n\n{self}\n\n{err}")
nearest: t.Optional[AssetTypeHistoryDate] = self.get_date_nearest(value=dt)
if isinstance(nearest, AssetTypeHistoryDate):
return nearest.date_api_exact
def get_date_by_days_ago(
self, value: t.Optional[t.Union[int, str]] = None, exact: bool = True
) -> t.Optional[str]:
"""Get date by number of days ago.
Args:
value: days ago to get history date for
exact: if True, raise error if days ago is not valid, else return nearest valid date
"""
if value is not None:
value: int = coerce_int(value)
if value in self.dates_by_days_ago:
return self.dates_by_days_ago[value].date_api_exact
if exact and value != 0:
nums = sorted(list(self.dates_by_days_ago))
err = f"Invalid exact days ago {value!r} (highest={nums[-1]}, lowest={nums[0]})"
raise ApiError(f"{err}\n{self}\n\n{err}")
nearest: t.Optional[AssetTypeHistoryDate] = self.get_date_nearest_days_ago(value=value)
if isinstance(nearest, AssetTypeHistoryDate):
return nearest.date_api_exact
def get_date(
self,
date: t.Optional[t.Union[str, datetime.timedelta, datetime.datetime]] = None,
days_ago: t.Optional[t.Union[int, str]] = None,
exact: bool = True,
) -> t.Optional[str]:
"""Get a valid history date by a specific date or number of days ago.
Args:
date: date to get history date for
days_ago: days ago to get history date for
exact: if True, raise error if date is not valid, else return nearest valid date
"""
return self.get_date_by_date(value=date, exact=exact) or self.get_date_by_days_ago(
value=days_ago, exact=exact
)
@staticmethod
def get_schema_cls() -> t.Any:
"""Get the schema for this model."""
return None
def __repr__(self) -> str:
"""Pass."""
return f"asset_type={self.asset_type}, count={len(self.dates)}"
def __str__(self) -> str:
"""Pass."""
items = [
f"Valid history dates for {self.asset_type}:",
*[f"{x}" for x in self.dates],
]
return "\n".join(items)
|
[
"jim@axonius.com"
] |
jim@axonius.com
|
4d9e7f2b00593771459ae1d452b172e575e963f1
|
4a2c9299dfd009a614934ee82910adaa17ff3186
|
/app/tasks/models.py
|
75bc9c44588e8100eca8370379fc0d70e88d4cfe
|
[
"MIT"
] |
permissive
|
sampathweb/blueprint_app
|
be8ab9c5bd956bc393b61542158c325ad27fffed
|
149225db4291519a6de56d8930e3a36ff9cd7888
|
refs/heads/master
| 2016-09-06T08:30:25.918934
| 2013-10-24T22:48:52
| 2013-10-24T22:48:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
from datetime import datetime
from app import db
class Task(db.Model):
"""A Task list."""
__tablename__ = 'tasks'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), unique=True, nullable=False)
description = db.Column(db.String(255))
active = db.Column(db.Boolean, default=True)
|
[
"ramesh@sampathweb.com"
] |
ramesh@sampathweb.com
|
7cf821bf15a32a637688390b90127482667b71d9
|
fe6eaa2f3656dedcb2c1e937cc1363d19a0d3ec1
|
/leetcode_python/231.power-of-two.py
|
6c402eec05c33366e999760635c0f1d508c79f71
|
[] |
no_license
|
toyijiu/my_code
|
4619ac6bc06c5032e01d5c215dbae516bbc4fe77
|
dd163cc47e2c706504aba1d42322167fb93dd9e9
|
refs/heads/master
| 2020-08-31T14:49:18.188393
| 2019-11-27T08:57:55
| 2019-11-27T08:57:55
| 218,714,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
#
# @lc app=leetcode id=231 lang=python3
#
# [231] Power of Two
#
# https://leetcode.com/problems/power-of-two/description/
#
# algorithms
# Easy (41.63%)
# Total Accepted: 213.5K
# Total Submissions: 512.8K
# Testcase Example: '1'
#
# Given an integer, write a function to determine if it is a power of two.
#
# Example 1:
#
#
# Input: 1
# Output: true
# Explanation: 20 = 1
#
#
# Example 2:
#
#
# Input: 16
# Output: true
# Explanation: 24 = 16
#
# Example 3:
#
#
# Input: 218
# Output: false
#
#
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
#通常的办法是递归判断是否能被整除2然后递归除以2
#但其实这个 数的最高bit为1,后面的为0就行
if n < 1:
return False
while n > 1:
if n % 2:
return False
n /= 2
return n == 1
|
[
"309378714@qq.com"
] |
309378714@qq.com
|
7d31cb5286fe189f97049fbbe823e25ef89ce2d5
|
7a596dc0e121054fe5f05fae6c78774a57cf94ac
|
/setup.py
|
c6a683babc0762c4c1be30bfcd7d184061c327bb
|
[
"MIT"
] |
permissive
|
nhoffman/swarmwrapper
|
8fffae5ed5824313f054fd7edb1ed2d3897b9c02
|
b62f955f843c76c4696320e2b2a14ce2b80e1807
|
refs/heads/master
| 2021-01-17T07:20:15.826516
| 2018-03-14T18:21:32
| 2018-03-14T18:21:32
| 42,473,732
| 3
| 1
| null | 2016-06-07T18:33:22
| 2015-09-14T20:00:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
import os
import subprocess
from setuptools import setup, find_packages
subprocess.call(
('mkdir -p swarmwrapper/data && '
'git describe --tags --dirty > swarmwrapper/data/ver.tmp'
'&& mv swarmwrapper/data/ver.tmp swarmwrapper/data/ver '
'|| rm -f swarmwrapper/data/ver.tmp'),
shell=True, stderr=open(os.devnull, "w"))
from swarmwrapper.swarmwrapper import __version__
setup(
author='Noah Hoffman',
author_email='noah.hoffman@gmail.com',
description='wrapper for using swarm with pplacer',
url='https://github.com/nhoffman/swarmwrapper',
name='swarmwrapper',
packages=find_packages(),
package_dir={'swarmwrapper': 'swarmwrapper'},
package_data={'swarmwrapper': ['data/ver']},
entry_points={'console_scripts': ['swarmwrapper = swarmwrapper.swarmwrapper:main']},
version=__version__,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
],
)
|
[
"noah.hoffman@gmail.com"
] |
noah.hoffman@gmail.com
|
f6b85844ae241476a94fc75c33793cf360b02aa6
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/batch_set_objects_response.py
|
1e18f86df848790e44e33d3a24e185439a9fa96c
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,672
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class BatchSetObjectsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'all_counts': 'int',
'results': 'list[DatabaseObjectResp]'
}
attribute_map = {
'all_counts': 'all_counts',
'results': 'results'
}
def __init__(self, all_counts=None, results=None):
"""BatchSetObjectsResponse - a model defined in huaweicloud sdk"""
super(BatchSetObjectsResponse, self).__init__()
self._all_counts = None
self._results = None
self.discriminator = None
if all_counts is not None:
self.all_counts = all_counts
if results is not None:
self.results = results
@property
def all_counts(self):
"""Gets the all_counts of this BatchSetObjectsResponse.
总数
:return: The all_counts of this BatchSetObjectsResponse.
:rtype: int
"""
return self._all_counts
@all_counts.setter
def all_counts(self, all_counts):
"""Sets the all_counts of this BatchSetObjectsResponse.
总数
:param all_counts: The all_counts of this BatchSetObjectsResponse.
:type: int
"""
self._all_counts = all_counts
@property
def results(self):
"""Gets the results of this BatchSetObjectsResponse.
批量对象选择响应列表
:return: The results of this BatchSetObjectsResponse.
:rtype: list[DatabaseObjectResp]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this BatchSetObjectsResponse.
批量对象选择响应列表
:param results: The results of this BatchSetObjectsResponse.
:type: list[DatabaseObjectResp]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchSetObjectsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
556cc3314b023aaab5a462d23db49fc1d46593c8
|
bfe6c95fa8a2aae3c3998bd59555583fed72900a
|
/MyCircularQueue.py
|
cc36333e54e793ae307e426135a9c5b28f28d8dd
|
[] |
no_license
|
zzz136454872/leetcode
|
f9534016388a1ba010599f4771c08a55748694b2
|
b5ea6c21bff317884bdb3d7e873aa159b8c30215
|
refs/heads/master
| 2023-09-01T17:26:57.624117
| 2023-08-29T03:18:56
| 2023-08-29T03:18:56
| 240,464,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
class Node:
def __init__(self, val):
self.val = val
self.next = None
class MyCircularQueue:
def __init__(self, k: int):
self.size = k
self.front = Node(-1)
p = self.front
for i in range(k):
p.next = Node(-1)
p = p.next
p.next = self.front
self.rear = p
self.c = 0
def enQueue(self, value: int) -> bool:
if self.c == self.size:
return False
self.c += 1
self.rear = self.rear.next
self.rear.val = value
return True
def deQueue(self) -> bool:
if self.c == 0:
return False
self.c -= 1
self.front = self.front.next
return True
def Front(self) -> int:
if self.c == 0:
return -1
return self.front.val
def Rear(self) -> int:
if self.c == 0:
return -1
return self.rear.val
def isEmpty(self) -> bool:
return self.c == 0
def isFull(self) -> bool:
return self.c == self.size
m = MyCircularQueue(3)
m.enQueue(1)
m.enQueue(2)
m.enQueue(3)
m.enQueue(4)
print(m.Rear())
|
[
"zzz136454872@163.com"
] |
zzz136454872@163.com
|
0d9157411e4013eb8d23e1592693f64f4d2340c9
|
282e6905cbcdc7795f5bd145f5310d4eef4d199d
|
/Dog Walking/Python-Solution.py
|
a3e0fd05d8058ce6172de8cfe0c2f836dd633d76
|
[] |
no_license
|
Lizonghang/IEEEX
|
d9e41d7ba00dc73706afe4ae8aca9dae2d10ee37
|
8e5998820f9e0ba600e1b3f0366981f30e391ae1
|
refs/heads/master
| 2021-07-15T00:37:39.099411
| 2017-10-17T09:42:24
| 2017-10-17T09:42:24
| 105,604,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
T = input()
for t in xrange(T):
N, K = map(int, raw_input().strip().split())
w = []
for i in xrange(N):
w.append(int(input()))
w.sort()
d = []
for i in xrange(1, N):
d.append(w[i] - w[i - 1])
d.sort()
print sum(d[:N-K])
|
[
"870644199@qq.com"
] |
870644199@qq.com
|
862c4269fd260804df4f1319bae80e4d6604e0b0
|
d48ddc1e4c4b1e379ed1c1971c84aa3c104edff2
|
/pymachinetalk/application/constants.py
|
1ab9e927720c09d627c62449d2566163a69b1225
|
[
"MIT"
] |
permissive
|
machinekit/pymachinetalk
|
1b66e472f364c2d3fe6206823d6a50e41effce9e
|
be4bffd011ea76039407f043553552b8a0b69f2d
|
refs/heads/master
| 2021-01-12T19:19:52.051968
| 2020-09-22T19:40:52
| 2020-09-22T19:40:52
| 44,979,021
| 6
| 7
|
MIT
| 2020-09-22T19:40:54
| 2015-10-26T15:47:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
# coding=utf-8
# protobuf
import machinetalk.protobuf.types_pb2 as types
import machinetalk.protobuf.motcmds_pb2 as motcmds
# noinspection PyUnresolvedReferences
from machinetalk.protobuf.status_pb2 import *
ORIGIN_G54 = types.ORIGIN_G54
ORIGIN_G55 = types.ORIGIN_G55
ORIGIN_G56 = types.ORIGIN_G56
ORIGIN_G57 = types.ORIGIN_G57
ORIGIN_G58 = types.ORIGIN_G58
ORIGIN_G59 = types.ORIGIN_G59
ORIGIN_G59_1 = types.ORIGIN_G59_1
ORIGIN_G59_2 = types.ORIGIN_G59_2
ORIGIN_G59_3 = types.ORIGIN_G59_2
MOTION_UNINITIALIZED = types.UNINITIALIZED_STATUS
MOTION_DONE = types.RCS_DONE
MOTION_EXEC = types.RCS_EXEC
MOTION_ERROR = types.RCS_ERROR
MOTION_RECEIVED = types.RCS_RECEIVED
MOTION_TYPE_NONE = motcmds._EMC_MOTION_TYPE_NONE
MOTION_TYPE_TRAVERSE = motcmds._EMC_MOTION_TYPE_TRAVERSE
MOTION_TYPE_FEED = motcmds._EMC_MOTION_TYPE_FEED
MOTION_TYPE_ARC = motcmds._EMC_MOTION_TYPE_ARC
MOTION_TYPE_TOOLCHANGEE = motcmds._EMC_MOTION_TYPE_TOOLCHANGE
MOTION_TYPE_PROBING = motcmds._EMC_MOTION_TYPE_PROBING
MOTION_TYPE_INDEXROTARY = motcmds._EMC_MOTION_TYPE_INDEXROTARY
RELEASE_BRAKE = 0
ENGAGE_BRAKE = 1
JOG_STOP = 0
JOG_CONTINUOUS = 1
JOG_INCREMENT = 2
SPINDLE_FORWARD = 0
SPINDLE_REVERSE = 1
SPINDLE_OFF = 2
SPINDLE_DECREASE = 3
SPINDLE_INCREASE = 4
SPINDLE_CONSTANT = 5
NML_ERROR = types.MT_EMC_NML_ERROR
NML_TEXT = types.MT_EMC_NML_TEXT
NML_DISPLAY = types.MT_EMC_NML_DISPLAY
OPERATOR_ERROR = types.MT_EMC_OPERATOR_ERROR
OPERATOR_TEXT = types.MT_EMC_OPERATOR_TEXT
OPERATOR_DISPLAY = types.MT_EMC_OPERATOR_DISPLAY
|
[
"mail@roessler.systems"
] |
mail@roessler.systems
|
ca7dc3b58354ec5f7b8177aa40ae4f2f1c8c1694
|
242086b8c6a39cbc7af3bd7f2fd9b78a66567024
|
/python/PP4E-Examples-1.4/Examples/PP4E/Internet/Web/dev/PyMailCGI_2.1/cgi-bin/onEditPageSend.py
|
aaeda84076959bbddb4003c8c1a90814ed51323b
|
[] |
no_license
|
chuzui/algorithm
|
7537d0aa051ac4cbe9f6a7ca9a3037204803a650
|
c3006b24c4896c1242d3ceab43ace995c94f10c8
|
refs/heads/master
| 2021-01-10T13:05:30.902020
| 2015-09-27T14:39:02
| 2015-09-27T14:39:02
| 8,404,397
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
#!/usr/bin/python
###############################################################
# On submit in edit window--finish a write, reply, or forward;
# in 2.0, we reuse the send tools in mailtools to construct
# and send the message, instead of older manual string scheme;
# we also now inherit attachment composition from that module;
###############################################################
import cgi, sys, commonhtml, os
from externs import mailtools
def saveAttachments(form, maxattach=3, savedir='partsupload'):
"""
save uploaded attach files in local files on
server from which mailtools will add to mail
"""
partnames = []
for i in range(1, maxattach+1):
fieldname = 'attach%d' % i
if form.has_key(fieldname) and form[fieldname].filename:
fileinfo = form[fieldname] # sent and filled?
filedata = fileinfo.value # read into string
filename = fileinfo.filename # client's path name
if '\\' in filename:
basename = filename.split('\\')[-1] # try dos clients
elif '/' in filename:
basename = filename.split('/')[-1] # try unix clients
else:
basename = filename # assume dir stripped
pathname = os.path.join(savedir, basename)
open(pathname, 'wb').write(filedata)
os.chmod(pathname, 0666) # need for some srvrs
partnames.append(pathname) # list of local paths
return partnames # gets type from name
#commonhtml.dumpstatepage(0)
form = cgi.FieldStorage() # parse form input data
attaches = saveAttachments(form) # cgi.print_form(form) to see
# server name from module or get-style url
smtpservername = commonhtml.getstandardsmtpfields(form)
# parms assumed to be in form or url here
from commonhtml import getfield # fetch value attributes
From = getfield(form, 'From') # empty fields may not be sent
To = getfield(form, 'To')
Cc = getfield(form, 'Cc')
Subj = getfield(form, 'Subject')
text = getfield(form, 'text')
if Cc == '?': Cc = ''
# tools reused from PyMailGUI
Tos = [addr.strip() for addr in To.split(';')] # multiple recip lists
Ccs = (Cc and [addr.strip() for addr in Cc.split(';')]) or ''
extraHdrs = [('Cc', Ccs), ('X-Mailer', 'PyMailCGI2')]
sender = mailtools.SilentMailSender(smtpservername)
try:
sender.sendMessage(From, Tos, Subj, extraHdrs, text, attaches)
except:
commonhtml.errorpage('Send mail error')
else:
commonhtml.confirmationpage('Send mail')
|
[
"zui"
] |
zui
|
cf48693e97cb38f9ccb91a41e55b51a294037776
|
7cf119239091001cbe687f73018dc6a58b5b1333
|
/datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_ZYCJ/ZX_CJXW_ZYCJ_BQW_YW.py
|
ba8ecd5014776a7fa8dbfac0a2059a57780b2dab
|
[
"Apache-2.0"
] |
permissive
|
ILKKAI/dataETL
|
0f5b80c3482994f735f092a1e01fa1009bac4109
|
32f7ec3aaaf32b5074536a615cb9cd5c28bd499c
|
refs/heads/master
| 2022-04-04T19:27:05.747852
| 2020-02-28T11:17:48
| 2020-02-28T11:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
# -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_ZYCJ_BQW_YW", mongo_collection="ZX_CJXW_ZYCJ")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
|
[
"499413642@qq.com"
] |
499413642@qq.com
|
599b2f6f2bda0dc8ed7f1276ca0b9a3c34c3d5df
|
200a7e17f51f7a2b959e6b0313b76effd9edb2ea
|
/image_classification/valid_resnet152.py
|
2f26314780c2291382dd4996d630e4bf1bb0bec8
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ZhiangChen/tornado_ML
|
43f243c0e8371830a104afa5b177deebfc14440d
|
d8bded61a6a234ca67e31776bc8576c6c18f5621
|
refs/heads/main
| 2023-04-29T04:40:05.850645
| 2021-05-20T04:50:32
| 2021-05-20T04:50:32
| 358,980,904
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,370
|
py
|
"""
training.py
Zhiang Chen, April 2020
"""
import torch
import torch.utils.data
import torchvision.datasets
import torch.nn as nn
import torchvision.transforms as transforms
from utils import *
import torchvision.models as models
from data import EurekaDataset
import os
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
torch.manual_seed(0)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eureka_normalize = transforms.Normalize(mean=[0.44, 0.50, 0.43],
std=[0.26, 0.25, 0.26])
eureka_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
eureka_normalize,])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,])
test_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
normalize,])
def neural_network(architecture, nm_classes, pretrained=True, change_last_layer=True):
assert architecture in model_names
print("=> creating model '{}'".format(architecture))
model = models.__dict__[architecture](pretrained=pretrained)
if change_last_layer:
if architecture.startswith('densenet'):
in_features = model.classifier.in_features
model.classifier = nn.Linear(in_features=in_features, out_features=nm_classes)
else:
in_features = model.fc.in_features
model.fc = nn.Linear(in_features=in_features, out_features=nm_classes)
return model
def cifar10(root='./datasets/cifar10/', val=True):
train = torchvision.datasets.CIFAR10(root, train=True, download=True, transform=train_transform)
test = torchvision.datasets.CIFAR10(root, train=False, download=True, transform=test_transform)
"""
if val:
indices = torch.randperm(len(train)).tolist()
train_set = torch.utils.data.Subset(train, indices[:-10000])
val_set = torch.utils.data.Subset(train, indices[-10000:])
return train_set, val_set, test
"""
return train, test
def eureka():
train = EurekaDataset('./datasets/Eureka/images/','./datasets/Eureka/class.json', eureka_transform)
test = EurekaDataset('./datasets/Eureka/images_test/','./datasets/Eureka/class.json', eureka_transform)
test.addJson('./datasets/Eureka/label_102.json')
return train, test
if __name__ == '__main__':
cuda = 'cuda:0'
device = torch.device(cuda)
nm_classes = 3
train_dataset, test_dataset = eureka()
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)
model = neural_network('resnet152', nm_classes)
#if you want to load weight
#model.load_state_dict(torch.load("trained_param_eureka_cls/epoch_0002.param"))
#model.eval()
model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.00001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.65)
#init_epoch = 0
#num_epochs = 60
#print_freq = 100
#save_param = "trained_param3_resnext101/epoch_{:04d}.param".format(init_epoch)
#torch.save(model.state_dict(), save_param)
weight_path = "trained_param_resnet152"
weights = [f for f in os.listdir(weight_path) if f.endswith(".param")]
weights.sort()
for w in weights:
weight_name = os.path.join(weight_path, w)
#save_param = "trained_param3_resnext101/epoch_{:04d}.param".format(epoch)
#train(train_dataloader, model, criterion, optimizer, epoch, device, print_freq)
#lr_scheduler.step()
print(weight_name)
model.load_state_dict(torch.load(weight_name))
validate(test_dataloader, model, criterion, device)
#acc = test(model, test_dataset, device)
#print("acc: %f" % acc)
#torch.save(model.state_dict(), save_param)
|
[
"zxc251@case.edu"
] |
zxc251@case.edu
|
71f6171f7aaed83d059577c3d31fc17bf81f12e2
|
2a4a17a67b9069c19396c0f8eabc8b7c4b6ff703
|
/BGP3D/Chapter10/Examples/InputManagerClass_01.py
|
692c2663e7045b2d047b6e25f1ff8cc495719df4
|
[] |
no_license
|
kaz101/panda-book
|
0fa273cc2df5849507ecc949b4dde626241ffa5e
|
859a759c769d9c2db0d11140b0d04506611c2b7b
|
refs/heads/master
| 2022-12-19T09:36:05.794731
| 2020-09-16T19:04:10
| 2020-09-16T19:04:10
| 295,784,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,187
|
py
|
''' InputManager Class
The purpose of this class is to have an object
that will record user input and retain that
information for use by other classes.
'''
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
class InputManager(DirectObject):
def __init__(self):
self.keyMap = {"up" : False,
"down" : False,
"left" : False,
"right" : False,
"fire" : False,
"mouse1" : False,
"mouse3" : False}
# Creates a key map to store the state of relevant keyboard keys.
self.accept("w", self.setKey, ["up", True])
self.accept("s", self.setKey, ["down", True])
self.accept("a", self.setKey, ["left", True])
self.accept("d", self.setKey, ["right", True])
self.accept("enter", self.setKey, ["fire", True])
self.accept("mouse1", self.setKey, ["mouse1", True])
self.accept("mouse3", self.setKey, ["mouse3", True])
# Registers the events for key and mouse presses and
# connects them to the setKey method.
self.accept("w-up", self.setKey, ["up", False])
self.accept("s-up", self.setKey, ["down", False])
self.accept("a-up", self.setKey, ["left", False])
self.accept("d-up", self.setKey, ["right", False])
self.accept("enter-up", self.setKey, ["fire", False])
self.accept("mouse1-up", self.setKey, ["mouse1", False])
self.accept("mouse3-up", self.setKey, ["mouse3", False])
# Registers the events for key and mouse releases and
# connects them to the setKey method.
self.setupMouseAim()
# creates the collision objects used for aiming with the mouse.
def setKey(self, key, value):
self.keyMap[key] = value
return
# setKey: stores the given value in the given key within the key map dictionary.
def setupMouseAim(self):
self.CN = CollisionNode("RayCN")
self.cRay = CollisionRay()
self.CN.addSolid(self.cRay)
self.CN.setFromCollideMask(BitMask32.bit(8))
self.CN.setIntoCollideMask(BitMask32.allOff())
self.CN = base.camera.attachNewNode(self.CN)
# This creates new collision ray and puts it into a collision node.
# It's bitmask is set to 8, and it will be the only collider at bit 8.
self.aimPlaneCN = CollisionNode("aimPlaneCN")
self.aimPlane = CollisionPlane(Plane(Vec3(0,-1,0),
Point3(0,30,0)))
self.aimPlaneCN.addSolid(self.aimPlane)
self.aimPlaneCN.setFromCollideMask(BitMask32.allOff())
self.aimPlaneCN.setIntoCollideMask(BitMask32.bit(8))
self.aimPlaneCNP = base.camera.attachNewNode(self.aimPlaneCN)
# This creates an inverted collision sphere and puts it into a collision node.
# It's bitmask is set to 8, and it will be the only collidable object at bit 8.
# The collision node is attached to the camera so that it will move with the camera.
self.cTrav = CollisionTraverser()
# Creates a traverser to do collision testing
self.cHanQ = CollisionHandlerQueue()
# Creates a queue type handler to receive the collision event info.
self.cTrav.addCollider(self.CN, self.cHanQ)
# register the ray as a collider with the traverser,
# and register the handler queue as the handler to be used for the collisions.
def getMouseAim(self):
#This function takes a base node and checks that node and it's children for collision with the mouse ray. It also makes
#sure that the ray is positioned correctly and aimed at the mouse pointer.
if base.mouseWatcherNode.hasMouse():
#We must check to make sure the window has the mouse to prevent a crash error caused by accessing the mouse
#when it's not in the window.
mpos = base.mouseWatcherNode.getMouse()
#get the mouse position in the window
self.cRay.setFromLens(
base.camNode, mpos.getX(), mpos.getY())
#sets the ray's origin at the camera and directs it to shoot through the mouse cursor
self.cTrav.traverse(self.aimPlaneCNP)
#performs the collision checking pass
self.cHanQ.sortEntries()
# Sort the handler entries from nearest to farthest
if(self.cHanQ.getNumEntries() > 0):
entry = self.cHanQ.getEntry(0)
colPoint = entry.getSurfacePoint(render)
return(colPoint)
|
[
"kaz101130@gmail.com"
] |
kaz101130@gmail.com
|
c6a4ab92e7015536946f440f0ffb7bc101b5570f
|
214230d0796377be0bfdda286c2c389b92a19555
|
/Codegate/2022 Quals/nft/monitor.py
|
4922b3db2a6b542e3d5b30e586133eea5016c4fd
|
[
"Unlicense"
] |
permissive
|
Qwaz/solved-hacking-problem
|
fa5ebfeb98ec979cf57dac1470a651199f2dc50d
|
cda0db4888322cce759a7362de88fff5cc79f599
|
refs/heads/master
| 2023-08-24T03:45:12.481496
| 2023-07-16T12:38:08
| 2023-07-16T12:38:08
| 49,208,719
| 100
| 28
| null | 2022-03-24T00:51:04
| 2016-01-07T14:18:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 604
|
py
|
import json
import time
from account import *
from web3 import Web3
url = "http://13.124.97.208:8545"
provider = Web3(Web3.HTTPProvider(url))
with open("abi.json") as f:
nft_abi = json.load(f)
nft = provider.eth.contract(TARGET_ADDRESS, abi=nft_abi)
while True:
print(
{
"Balance": provider.eth.getBalance(SENDER_ADDRESS),
"Block number": provider.eth.block_number,
"My transactions": provider.eth.get_transaction_count(SENDER_ADDRESS),
"NFTs": nft.functions.getIDs().call({"from": SENDER_ADDRESS}),
}
)
time.sleep(3)
|
[
"qwazpia@gmail.com"
] |
qwazpia@gmail.com
|
dceb561fd9b18dfb85b1c5185bbee23385340b30
|
cd9e707df25dd641163c0f89f33bdbcaa4f11a0c
|
/app/launcher.py
|
35b960fefa33b4f16d990cbfd903bb0ea5170691
|
[] |
no_license
|
depixusgenome/libanalysis
|
80e50953d4fad1654091bbaf59f181803671a242
|
3565db8c0e42d62c1adee1d664846227499f1302
|
refs/heads/master
| 2020-09-24T08:40:10.498554
| 2019-11-29T13:03:31
| 2019-11-29T13:03:31
| 225,716,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,795
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Updates app manager so as to deal with controllers"
from contextlib import closing
from typing import Dict, Any
import sys
import asyncio
import socket
import random
from tornado.platform.asyncio import AsyncIOMainLoop
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.server.server import Server
from bokeh.settings import settings
from bokeh.resources import DEFAULT_SERVER_PORT
from utils.logconfig import getLogger
from .scripting import orders
from .maincontrol import createview as _creator
LOGS = getLogger(__name__)
CAN_LOAD_JS = "."
class _FunctionHandler(FunctionHandler):
def __init__(self, view, stop = False):
self.__gotone = False
self.server = None
self.stoponnosession = stop
self.view = view
super().__init__(self.__start)
def on_session_created(self, session_context):
LOGS.debug('started session')
def on_session_destroyed(self, session_context):
LOGS.debug('destroyed session')
if not self.__gotone:
return
if self.server is not None and self.stoponnosession:
server, self.server = self.server, None
if len(server.get_sessions()) == 0:
LOGS.info('no more sessions -> stopping server')
server.stop()
@classmethod
def serveapplication(cls, view, **kwa):
"Launches a bokeh server"
# monkeypatch the js production: it's been done once & saved during compilation
cls.__monkeypatch_bokeh()
cls.__setport(kwa)
cls.__server_kwargs(kwa)
fcn = cls(view)
server = Server(Application(fcn), **kwa)
fcn.server = server
server.MainView = view
server.appfunction = fcn
return server
@classmethod
def launchflexx(cls, view, **kwa):
"Launches a bokeh server"
from webruntime import launch as _flexxlaunch
port = cls.__setport(kwa)
if isinstance(kwa.get('size', ()), list):
kwa['size'] = tuple(kwa['size'])
if isinstance(view, Server):
server = view
else:
server = cls.serveapplication(view, **kwa.pop('server', {}), port = port)
if kwa.get('runtime', 'app').endswith('app'):
cls.__monkeypatch_flexx(server)
view.MainControl.FLEXXAPP = _flexxlaunch('http://localhost:{}/'.format(port),
**kwa)
elif kwa.get('runtime', '') != 'none':
server.io_loop.add_callback(lambda: server.show("/"))
return server
@staticmethod
def __monkeypatch_flexx(server):
from webruntime._common import StreamReader
def run(self, __old__ = StreamReader.run):
"Stop the stream reader"
__old__(self)
server.stop()
StreamReader.run = run
@staticmethod
def __monkeypatch_bokeh():
# pylint: disable=import-outside-toplevel
from bokeh.core.properties import Seq
def from_json(self, json, models=None, __old__ = Seq.from_json):
"parse docstring"
if isinstance(json, dict):
json = {int(i): j for i, j in json.items()}
keys = sorted(json)
assert keys == list(range(max(json)+1))
json = [json[i] for i in keys]
return __old__(self, json, models = models)
Seq.from_json = from_json
def _stop(self, wait=True, __old__ = Server.stop):
if not getattr(self, '_stopped', False):
__old__(self, wait)
self.io_loop.stop()
Server.stop = _stop
@staticmethod
def __server_kwargs(kwa)-> Dict[str, Any]:
kwa.setdefault('sign_sessions', settings.sign_sessions())
kwa.setdefault('secret_key', settings.secret_key_bytes())
kwa.setdefault('generate_session_ids', True)
kwa.setdefault('use_index', True)
kwa.setdefault('redirect_root', True)
kwa.pop('runtime', None)
if isinstance(kwa.get('size', ()), list):
kwa['size'] = tuple(kwa['size'])
LOGS.debug("dynamic loads: %s", orders().dynloads())
LOGS.info(' http://localhost:%s', kwa['port'])
for mdl in orders().dynloads():
getattr(sys.modules.get(mdl, None), 'server', lambda x: None)(kwa)
return kwa
@staticmethod
def __setport(kwa):
if kwa.get('port', None) == 'random':
while True:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
kwa['port'] = random.randint(2000, 8000)
if sock.connect_ex(("127.0.0.1", kwa['port'])) != 0:
break
else:
kwa['port'] = int(kwa.get('port', DEFAULT_SERVER_PORT))
return kwa['port']
def __onloaded(self):
if self.__gotone is False:
self.__gotone = True
LOGS.debug("GUI loaded")
def __start(self, doc):
doc.title = self.view.launchkwargs()['title']
orders().run(self.view, doc, self.__onloaded)
def setup(locs, #
creator = _creator,
defaultcontrols = tuple(),
defaultviews = tuple(),
):
"""
Populates a module with launch and serve functions for a given app context.
The context is created as follows, say in module `app.mycontext`:
```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Updates app manager so as to deal with controllers"
from .launcher import setup
VIEWS = ('undo.UndoView', 'view.tasksview.TasksView',)
CONTROLS = ('control.taskcontrol.TaskController',
'taskstore.control',
'undo.UndoController')
setup(locals(), defaultcontrols = CONTROLS, defaultviews = VIEWS)
```
To launch a `webruntime` window displayng `myview.MyView`:
```python
from app.mycontext import launch
launch("myview.MyView")
```
See `app.toolbar` for an example which sets-up a toolbar above any view provided
as a argument.
"""
def _install():
asyncio.set_event_loop(asyncio.new_event_loop())
AsyncIOMainLoop().make_current()
def application(main,
creator = creator,
controls = defaultcontrols,
views = defaultviews):
"Creates a main view"
return creator(main, controls, views)
def serve(main,
creator = creator,
controls = defaultcontrols,
views = defaultviews,
apponly = False,
**kwa):
"Creates a browser app"
_install()
app = application(main, creator, controls, views)
if apponly:
return app
return _FunctionHandler.serveapplication(app, **kwa)
def launch(main,
creator = creator,
controls = defaultcontrols,
views = defaultviews,
apponly = False,
**kwa):
"Creates a desktop app"
_install()
app = application(main, creator, controls, views)
if apponly:
return app
return _FunctionHandler.launchflexx(app, **app.launchkwargs(**kwa))
locs.setdefault('application', application)
locs.setdefault('serve', serve)
locs.setdefault('launch', launch)
|
[
"pol.davezac@depixus.com"
] |
pol.davezac@depixus.com
|
150f246e7cffd52c4816f26c2ce92dcb16d63e69
|
bfe6c95fa8a2aae3c3998bd59555583fed72900a
|
/trapRainWater.py
|
90bf3bb91de30cedf3d4da4078594bb04fe33a9b
|
[] |
no_license
|
zzz136454872/leetcode
|
f9534016388a1ba010599f4771c08a55748694b2
|
b5ea6c21bff317884bdb3d7e873aa159b8c30215
|
refs/heads/master
| 2023-09-01T17:26:57.624117
| 2023-08-29T03:18:56
| 2023-08-29T03:18:56
| 240,464,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
from heapq import heappop, heappush
from typing import List
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m = len(heightMap)
n = len(heightMap[0])
heap = []
water = [[12345678] * n for i in range(m)]
for i in range(m):
heappush(heap, (heightMap[i][0], i, 0))
heappush(heap, (heightMap[i][n - 1], i, n - 1))
water[i][0] = 0
water[i][n - 1] = 0
for j in range(1, n - 1):
heappush(heap, (heightMap[0][j], 0, j))
heappush(heap, (heightMap[m - 1][j], m - 1, j))
water[0][j] = 0
water[m - 1][j] = 0
while len(heap) > 0:
now = heappop(heap)
nh = now[0]
if now[1] > 0:
if water[now[1] - 1][now[2]] + heightMap[now[1] - 1][
now[2]] > nh and water[now[1] - 1][now[2]] > 0:
water[now[1] - 1][now[2]] = max(
0, nh - heightMap[now[1] - 1][now[2]])
heappush(
heap,
(water[now[1] - 1][now[2]] +
heightMap[now[1] - 1][now[2]], now[1] - 1, now[2]))
if now[1] < m - 1:
if water[now[1] + 1][now[2]] + heightMap[now[1] + 1][
now[2]] > nh and water[now[1] + 1][now[2]] > 0:
water[now[1] + 1][now[2]] = max(
0, nh - heightMap[now[1] + 1][now[2]])
heappush(
heap,
(water[now[1] + 1][now[2]] +
heightMap[now[1] + 1][now[2]], now[1] + 1, now[2]))
if now[2] > 0:
if water[now[1]][now[2] - 1] + heightMap[now[1]][
now[2] - 1] > nh and water[now[1]][now[2] - 1] > 0:
water[now[1]][now[2] - 1] = max(
0, nh - heightMap[now[1]][now[2] - 1])
heappush(
heap,
(water[now[1]][now[2] - 1] +
heightMap[now[1]][now[2] - 1], now[1], now[2] - 1))
if now[2] < n - 1:
if water[now[1]][now[2] + 1] + heightMap[now[1]][
now[2] + 1] > nh and water[now[1]][now[2] + 1] > 0:
water[now[1]][now[2] + 1] = max(
0, nh - heightMap[now[1]][now[2] + 1])
heappush(
heap,
(water[now[1]][now[2] + 1] +
heightMap[now[1]][now[2] + 1], now[1], now[2] + 1))
return sum(sum(w) for w in water)
heightMap = [[1, 4, 3, 1, 3, 2], [3, 2, 1, 3, 2, 4], [2, 3, 3, 2, 3, 1]]
heightMap = [[3, 3, 3, 3, 3], [3, 2, 2, 2, 3], [3, 2, 1, 2, 3],
[3, 2, 2, 2, 3], [3, 3, 3, 3, 3]]
# heightMap=[[12,13,1,12],[13,4,13,12],[13,8,10,12],[12,13,12,12],[13,13,13,13]]
print(Solution().trapRainWater(heightMap))
|
[
"zzz136454872@163.com"
] |
zzz136454872@163.com
|
9c767873c7d94a6b7c04e62f428978616df72b28
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/P/pinakighosh/state18.py
|
8edcffd229b35ac73c3e1e4c249b99569bd7806f
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,650
|
py
|
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=18"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=0
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
break
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=18"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=0
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
break
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
20786461b76ff1f1326b20fb8848a9ae5e46f159
|
007e187c7d91702fc900b75f771a2470e1c091e1
|
/tests/test_docker.py
|
28b67c058bb792d1b640d3f6bcd6e4c2eb60caf8
|
[] |
no_license
|
bibi21000/janitoo_raspberry_i2c_ht16k33
|
bb3d05bdb395a29862c4e6bbb57c5e369aaca1e8
|
3dbb883cdc3439fd164edff21ffc0a0da7ee160f
|
refs/heads/master
| 2021-01-21T04:42:30.553870
| 2018-01-01T23:43:29
| 2018-01-01T23:43:29
| 55,532,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,899
|
py
|
# -*- coding: utf-8 -*-
"""Unittests for Janitoo-common.
"""
__license__ = """
This file is part of Janitoo.
Janitoo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Janitoo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Janitoo. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = 'bibi21000@gmail.com'
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
import warnings
warnings.filterwarnings("ignore")
import sys, os
import time
import unittest
import logging
import threading
import mock
import logging
from janitoo_nosetests import JNTTBase
from janitoo_nosetests.server import JNTTDockerServerCommon, JNTTDockerServer
from janitoo.runner import Runner, jnt_parse_args
from janitoo.server import JNTServer
from janitoo.utils import HADD_SEP, HADD
from janitoo_raspberry.server import PiServer
class TestRaspberryHT16K33Serser(JNTTDockerServer, JNTTDockerServerCommon):
"""Test the server
"""
loglevel = logging.DEBUG
path = '/tmp/janitoo_test'
broker_user = 'toto'
broker_password = 'toto'
server_class = PiServer
server_conf = "tests/data/janitoo_raspberry_i2c_ht16k33.conf"
hadds = [HADD%(144,0), HADD%(144,1)]
def test_040_server_start_no_error_in_log(self):
JNTTDockerServer.onlyDockerTest()
JNTTDockerServerCommon.minimal_040_server_start_reload_restart(self)
|
[
"bibi21000@gmail.com"
] |
bibi21000@gmail.com
|
ea84847a897152e526e739c1b328a0e72c02ca0e
|
7fdac5209f86de756b9a8123a0911b70738eceeb
|
/pySDC/playgrounds/other/plots_overresolve_iter.py
|
74944e603d1cf9c88e95f665c70286392dffcc72
|
[
"BSD-2-Clause"
] |
permissive
|
Parallel-in-Time/pySDC
|
edc66e399f6066effc5aaa376883e88e06b5332b
|
1a51834bedffd4472e344bed28f4d766614b1537
|
refs/heads/master
| 2023-08-30T23:17:56.017934
| 2023-08-30T05:42:00
| 2023-08-30T05:42:00
| 26,165,004
| 30
| 31
|
BSD-2-Clause
| 2023-09-14T06:40:13
| 2014-11-04T10:56:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
import pySDC.helpers.plot_helper as plt_helper
def beautify_plot(nprocs, fname):
plt_helper.plt.grid()
plt_helper.plt.legend(loc=2)
plt_helper.plt.xlabel('Number of parallel steps')
plt_helper.plt.ylabel('Theoretical speedup')
plt_helper.plt.xlim(0.9 * nprocs[0], 1.1 * nprocs[-1])
plt_helper.plt.ylim(0.25, 6.5)
plt_helper.plt.xticks(nprocs, nprocs)
plt_helper.plt.minorticks_off()
# save plot, beautify
plt_helper.savefig(fname)
def plot_data():
nprocs = [1, 2, 4, 8]
niter_overres = [9, 5, 11, 23]
alpha_overres = 1.0 / 4.0
speedup_overres = [
p / (p / niter_overres[0] * alpha_overres + k / niter_overres[0] * (1 + alpha_overres))
for p, k in zip(nprocs, niter_overres)
]
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
plt_helper.plt.semilogx(
nprocs,
speedup_overres,
color='orange',
marker='o',
markersize=6,
label=r'$Nx_\mathcal{F}=512, \alpha=\frac{1}{4}$',
)
beautify_plot(nprocs, 'fool_speedup_overres_iter')
niter_wellres_1 = [9, 11, 16, 28]
alpha_wellres_1 = 1.0 / 4.0
speedup_wellres_1 = [
p / (p / niter_wellres_1[0] * alpha_wellres_1 + k / niter_wellres_1[0] * (1 + alpha_wellres_1))
for p, k in zip(nprocs, niter_wellres_1)
]
niter_wellres_2 = [9, 11, 16, 29]
alpha_wellres_2 = 1.0 / 2.0
speedup_wellres_2 = [
p / (p / niter_wellres_2[0] * alpha_wellres_2 + k / niter_wellres_2[0] * (1 + alpha_wellres_2))
for p, k in zip(nprocs, niter_wellres_2)
]
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
plt_helper.plt.semilogx(
nprocs, speedup_wellres_1, color='r', marker='d', markersize=6, label=r'$Nx_\mathcal{F}=32, \alpha=\frac{1}{4}$'
)
plt_helper.plt.semilogx(
nprocs, speedup_wellres_2, color='b', marker='s', markersize=6, label=r'$Nx_\mathcal{F}=32, \alpha=\frac{1}{2}$'
)
beautify_plot(nprocs, 'fool_speedup_wellres_iter')
if __name__ == '__main__':
plot_data()
|
[
"r.speck@fz-juelich.de"
] |
r.speck@fz-juelich.de
|
76359312a5bbde79e5804a8ff7620d844d4189e4
|
ebacefb163f31b3dd43f15ebdc91c5b76f6b703b
|
/lib/github/tasks.py
|
c6dd31a374dbbdae389ee01476928217efdc4fde
|
[
"MIT"
] |
permissive
|
xyzlat/django-htk
|
a0180d3104c7e716cb07e075408acc14702abbc2
|
051256698ce7a593a8a9365c36ad9d265c6e0d80
|
refs/heads/master
| 2023-04-29T18:48:23.205203
| 2021-05-22T04:15:13
| 2021-05-22T04:15:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,373
|
py
|
# HTK Imports
from htk.constants.time import (
BUSINESS_HOURS_START,
ISOWEEKDAY_WEEKDAYS,
MORNING_HOURS_END,
)
from htk.tasks import BaseTask
from htk.utils.text.transformers import get_symbols
# isort: off
class GitHubReminderTask(BaseTask):
def __init__(self):
from htk.lib.github.cachekeys import GitHubReminderCooldown
super(GitHubReminderTask, self).__init__(cooldown_class=GitHubReminderCooldown)
def has_cooldown(self, user):
_has_cooldown = super(GitHubReminderTask, self).has_cooldown(user)
#_has_cooldown = False
return _has_cooldown
def get_users(self):
import htk.apps.accounts.filters as _filters
from htk.apps.accounts.utils.lookup import get_users_with_attribute_value
users = get_users_with_attribute_value('github_reminders', True, as_bool=True)
users = _filters.users_currently_at_local_time(users, BUSINESS_HOURS_START, MORNING_HOURS_END, isoweekdays=ISOWEEKDAY_WEEKDAYS)
return users
def execute(self, user):
now = user.profile.get_local_time()
valid_chars = 'A-Za-z0-9_\-/'
github_organizations = get_symbols(
user.profile.get_attribute('github_organizations') or '',
valid_chars=valid_chars
)
github_repositories = get_symbols(
user.profile.get_attribute('github_repositories') or '',
valid_chars=valid_chars
)
self.send_github_reminders(
user,
organizations=github_organizations,
repositories=github_repositories
)
def send_github_reminders(self, user, organizations=None, repositories=None):
github_access_token = user.profile.get_attribute('github_access_token')
slack_webhook_url = user.profile.get_attribute('slack_webhook_url')
slack_channel = user.profile.get_attribute('github_reminders_slack_channel')
mention_here = user.profile.get_attribute('github_reminders_slack_mention_here')
from htk.lib.github.bots import GitHubReminderSlackBot
bot = GitHubReminderSlackBot(
slack_webhook_url,
slack_channel,
github_access_token,
organizations=organizations,
repositories=repositories,
mention_here=mention_here
)
bot.remind_pull_requests()
|
[
"hello@jontsai.com"
] |
hello@jontsai.com
|
fcaccd3bf997e4178ad0a6a92d0e8fd872093ed1
|
e838ea567fe5216bd83b72d5cc549363a666ac3d
|
/registry/serializers/data_category.py
|
1513f6a2ef2eb029d60b448425a25df85bfe014e
|
[] |
no_license
|
iuriramos/swim-registry
|
f7ffee9a57b92021e7066820249092d1558a944d
|
7c71d294b5aa7cb40e01ed559e2fcb81d2e1f43a
|
refs/heads/master
| 2021-09-13T20:22:29.624535
| 2018-05-03T21:30:26
| 2018-05-03T21:30:26
| 85,312,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
from registry.models.data_category import DataCategory
from rest_framework import serializers, viewsets
class DataCategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DataCategory
fields = ('pk', 'name', )
class DataCategoryViewSet(viewsets.ModelViewSet):
queryset = DataCategory.objects.all()
serializer_class = DataCategorySerializer
|
[
"iuri.srb@gmail.com"
] |
iuri.srb@gmail.com
|
084e8d38f2a41afdbacc05279be12f974947234b
|
b72f9d9f0769265cdea2b8caff145af9c532ea09
|
/rcl_contest_2020_final/a.py
|
8f28af7d879a8f73eacf82b2b20564e09dc9ba94
|
[] |
no_license
|
ritzcr/AtCoder
|
3335fefa8fb1989a0f9da80fe6d0902b46aa2d1f
|
15097b0c2568ace653e5080d789047531e50edde
|
refs/heads/master
| 2021-02-12T19:16:41.757421
| 2020-07-05T06:30:57
| 2020-07-05T06:30:57
| 244,620,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
N, M = map(int, input().split())
dice = [1, 2, 3, 4, 5, 6]
dice[0] = 6
output = " ".join(map(str, dice))
print(output)
for _ in range(M):
d, v, x = map(int, input().split())
dice[1] = 6
output = " ".join(map(str, dice))
print(output)
|
[
"ritz@freex.ltd"
] |
ritz@freex.ltd
|
019d0099a757ba400b6c6c8ff733026d56b60154
|
79e5a3733b261f11cf13526460c39d3d722744dd
|
/strawberry/types/datetime.py
|
7abbf73d51ab32343b67f6c505b81f80fa1f7cc7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yngvarsh/strawberry
|
73cc0e0a862dc4796549f925ac167ec6768d5ade
|
96b8c701caaa0510e7118928e9e3c00d4ef5a05c
|
refs/heads/master
| 2020-12-11T13:13:57.579007
| 2020-01-15T11:50:17
| 2020-01-15T11:50:17
| 233,857,471
| 0
| 0
|
MIT
| 2020-01-14T14:17:58
| 2020-01-14T14:17:57
| null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
import datetime
import aniso8601
from ..custom_scalar import scalar
def _serialize_isoformatted(value):
return value.isoformat()
Date = scalar(
datetime.date,
name="Date",
description="Date (isoformat)",
serialize=_serialize_isoformatted,
parse_value=aniso8601.parse_date,
)
DateTime = scalar(
datetime.datetime,
name="DateTime",
description="Date with time (isoformat)",
serialize=_serialize_isoformatted,
parse_value=aniso8601.parse_datetime,
)
Time = scalar(
datetime.time,
name="Time",
description="Time (isoformat)",
serialize=_serialize_isoformatted,
parse_value=aniso8601.parse_time,
)
|
[
"patrick.arminio@gmail.com"
] |
patrick.arminio@gmail.com
|
bacb702adf0fd1d047d9ffe824ab720ad30d31ad
|
2eaade99a8073faaf68c46eac48d8826b351fe17
|
/main.py
|
0ae913ee6b2c31ee876af199db61f9af08dd795d
|
[] |
no_license
|
StevenMaharaj/traderv1
|
30ebb6f0242d986aace29ebc6e956bd78e68f02b
|
a1edab9722c2735302126d23ad1c9cd107152635
|
refs/heads/main
| 2023-07-22T21:56:37.266832
| 2021-09-08T05:34:50
| 2021-09-08T05:34:50
| 400,120,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
from executionHandlers.execution import ExecutionHandler
from queue import Queue
from threading import Thread
from dataHandlers.deribit import DeribitTOB
import sys
from event import Event, SignalEvent
from accountHandlers.deribit import DeribitOrder
from executionHandlers.deribit import DeribitExecutionHandler
from datetime import datetime
from time import sleep
import argparse
import logging
import os
from Strategies.ScalperDeribit import scalper_deribit
from portfolio import Portfolio
log_folder = 'logs'
now: datetime = datetime.now()
now_string = datetime.strftime(now, '%y%m%d%H-%M-%S')
logging.basicConfig(filename=os.path.join(log_folder, f'{now_string}.log'),
level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--isLive", help="Live trading or test")
args = parser.parse_args()
is_live: bool = eval(args.isLive)
if is_live:
print("This seesion will run live")
else:
print("This session is a test")
event_queue = Queue()
signal_event_queue = Queue()
portfolio = Portfolio({}, {})
deribit_scalper = scalper_deribit.Scalper(event_queue, signal_event_queue,
working_orders=5, portfolio=portfolio,
exchange='deribit',symbols=["BTC-PERPETUAL"], is_live=is_live,order_dist=20)
deribit_scalper.run()
|
[
"="
] |
=
|
0c82b151fa1b84f52808f5d9cba3874637a21ab4
|
7e0f0662faee84f49794fb342199a59e570d4d15
|
/env/bin/mako-render
|
480cddd311d240ca506dd3dfdd95adb94f717789
|
[] |
no_license
|
Samkanja/headline
|
03cfed21a21d71e91d9e37edf821c5d3d1a432c4
|
be792d6ac7e23ba04fbcacbfec84ea659ba67e32
|
refs/heads/master
| 2023-06-19T14:01:21.431839
| 2021-07-08T13:28:37
| 2021-07-08T13:28:37
| 380,808,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
#!/home/kskanja/stuff/down/headline/env/bin/python3.9
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
|
[
"samuelkanja4@gmail.com"
] |
samuelkanja4@gmail.com
|
|
848cbab84018f841b970a1feb2b51d070c619871
|
2afc3ec1dae42403c2b208a21045e402636ca336
|
/models.py
|
28fa465795448218958b3af0e1a68d65f42181c9
|
[] |
no_license
|
Jagadishbommareddy/myapp
|
6a5227f33ff5093eaf38f93ce9d69341e9ae6024
|
972b245c0fd33a4242ba17d3562f3e30acb20771
|
refs/heads/master
| 2021-01-22T13:52:49.093137
| 2017-08-18T09:14:59
| 2017-08-18T09:14:59
| 100,020,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
from django.db import models
from .validators import *
class ContactInfo(models.Model):
mobile_no = models.CharField(max_length=15,validators=[validate_mobile_no])
phone_no = models.CharField(max_length=15, validators=[validate_phone_no])
email_id = models.EmailField(max_length=50)
class Address(models.Model):
address1 = models.CharField(max_length=50)
address2 = models.CharField(max_length=50)
city = models.CharField(max_length=20, validators=[validate_city])
state = models.CharField(max_length=20, validators=[validate_state])
landmark = models.CharField(max_length=50, validators=[validate_landmark])
pincode = models.CharField(max_length=10, validators=[validate_pincode])
class Customer(ContactInfo):
cuid = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=15, validators=[validate_first_name])
last_name = models.CharField(max_length=15, validators=[validate_last_name])
age = models.CharField(max_length=2, validators=[validate_age])
Addresses = models.ManyToManyField("Address")
|
[
"noreply@github.com"
] |
Jagadishbommareddy.noreply@github.com
|
e4eee8eb36da949a57ccfad7303d36932864ac6a
|
496419c3802640fda626241b313d15499747c451
|
/model_gravity_interactions.py
|
de84416634f6bd5a2ac930e9fbbc68dea580a9ac
|
[
"MIT"
] |
permissive
|
simberaj/interactions
|
edf37bdf889f80e49032284621a2f0e38be99af8
|
8c7a29e97bc8f8d49901f6bdc406471f940c494e
|
refs/heads/master
| 2021-01-17T13:13:14.414343
| 2016-05-28T12:13:29
| 2016-05-28T12:13:29
| 42,062,951
| 1
| 0
| null | 2015-09-22T14:24:35
| 2015-09-07T16:16:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
import arcpy, common, modeling, loaders
REPORT_TEMPLATE = u'''Interaction gravity modelling analysis
Input interactions: %s
Interaction selection query: %s
Origin mass field (m1): %s
Destination mass field (m2): %s
Interaction real strength field: %s
Interaction length field (d): %s
Output model strength field: %s
Optimization method used: %s
Interactions found: %i
Using gravity model in form G*m1*m2*d^(-B)
MODEL OUTPUT
Calculated parameters calibrated on real interactions
B parameter value: %g
G parameter value: %g
STATISTICAL ANALYSIS
'''
with common.runtool(9) as parameters:
interactions, selQuery, massFromFld, massToFld, interactFld, lengthFld, optimizationMethod, outputFld, reportFileName = parameters
## ASSEMBLE INPUT
common.progress('counting interactions')
count = common.count(interactions)
if count == 0:
raise ValueError, 'no interactions found'
common.message('Found ' + str(count) + ' interactions.')
common.progress('loading interactions')
modelInters = loaders.BasicReader(interactions, {'strength' : interactFld, 'distance' : lengthFld, 'massFrom' : massFromFld, 'massTo' : massToFld}, targetClass=modeling.GravityInteraction, where=selQuery).read()
# rows = arcpy.SearchCursor(interactions, selQuery)
# modelInters = []
# for row in rows:
# try:
# modelInters.append(GravityInteraction(row.getValue(interactFld), row.getValue(lengthFld), row.getValue(massFromFld), row.getValue(massToFld)))
# except ValueError:
# pass # neplatna interakce
## OPTIMALIZE
common.progress('creating gravity model')
opt = modeling.GravityOptimizer(modelInters)
common.progress('optimizing model parameters')
opt.optimize(optimizationMethod)
common.message('Model parameters found:')
common.message('B parameter value: ' + str(opt.getB()))
common.message('G parameter value: ' + str(opt.getG()))
common.progress('calculating model interactions')
modelStrengths = opt.theoreticalInteractions()
common.progress('calculating residuals')
report = opt.report(modelStrengths)
common.message('\nStatistical report\n\n' + report)
common.progress('saving model interactions')
loaders.SequentialUpdater(interactions, {'s' : outputFld}, where=selQuery).update([{'s' : st} for st in modelStrengths])
# rows = arcpy.UpdateCursor(interactions, selQuery)
# i = 0
# for row in rows:
# row.setValue(outputFld, modelStrengths[i])
# rows.updateRow(row)
# i += 1
if reportFileName:
common.progress('creating report')
out = (REPORT_TEMPLATE % (interactions, selQuery, massFromFld, massToFld, interactFld, lengthFld, outputFld, optimizationMethod, count, opt.getB(), opt.getG())) + report
opt.writeReport(out, reportFileName)
|
[
"simbera.jan@gmail.com"
] |
simbera.jan@gmail.com
|
59dc9eb4875328342d35aa350be38f2fd480157f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02988/s646336284.py
|
b26f4503b3a987d425115b8af58ec10880ae0e19
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
n = int(input())
P = [int(i) for i in input().split()]
ans = 0
for i in range(1, n-1):
if P[i-1] < P[i] < P[i+1]:
ans += 1
elif P[i-1] > P[i] > P[i+1]:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3b3ca2bace9cc78bd6f70523ff06faf3a1e3b10e
|
8d1264d9257eba418f92dbbbc8aac6773c4ec715
|
/core/signals/handlers.py
|
fe560bf24bf5e2578f83b7ab830653fd8a1cdd46
|
[
"MIT"
] |
permissive
|
aldwyn/effigia
|
5f3e9e37eb7d169983034b61c7455baedc2d8817
|
eb456656949bf68934530bbec9c15ebc6d0236b8
|
refs/heads/main
| 2023-02-18T00:09:53.905711
| 2021-06-10T22:04:51
| 2021-06-10T22:04:51
| 96,387,903
| 1
| 1
|
MIT
| 2023-02-15T20:04:00
| 2017-07-06T04:21:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import factory
from django.contrib.auth import get_user_model
from django.core.files.base import ContentFile
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.text import slugify
from core.models import Category
from apps.accounts.models import UserProfile
from apps.galleries.models import Gallery
@receiver(post_save, sender=get_user_model())
def user_created(sender, instance, created, **kwargs):
if created and not instance.is_superuser:
UserProfile.objects.create(user=instance)
Gallery.objects.create(
name='Default',
is_default=True,
slug=slugify('default-gallery-by-%s' % instance),
created_by=instance,
description=('This is the gallery intended for your default portfolio storage. '
'Upload portfolios here to familiarize how Effigia galleries work.'),
category=Category.objects.get(name='Uncategorized'),
cover_image=ContentFile(
factory.django.ImageField()._make_data(
{'width': 1024, 'height': 768}
), 'default-gallery-cover.jpg'
))
|
[
"aldwyn.up@gmail.com"
] |
aldwyn.up@gmail.com
|
69758bba6726950a002212f930181c065e9e2d13
|
175e4e031471e5cdbc9bcaee2df10f5ec44871d3
|
/LESSON2b/.history/test/webapitest/app_20200606195359.py
|
714e72278231784e6b0e68430eaf0ccb9e8fd6b3
|
[] |
no_license
|
hiyacins/uma_study
|
c329d29a9c3899ab4feca21b9c47ef546b69b0bd
|
067e66f258a0c89f7670c645dd7c40feee8536fa
|
refs/heads/master
| 2023-01-23T06:40:12.435047
| 2020-06-17T15:59:34
| 2020-06-17T15:59:34
| 239,077,726
| 0
| 0
| null | 2023-01-06T08:36:26
| 2020-02-08T05:56:52
|
Python
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
from flask import Flask, jsonify, request
import json
app = Flask(__name__)
number = []
# クライアント側からPostされてくる
@app.route('/incomes')
def get_incomes():
return jsonify(number)
# postされてきた情報を追加する。No Contentの場合のみ返す。
@app.route('/incomes', methods=['POST'])
def add_income():
number.append(request.get_json())
return '', 204
# jsonで取得したデータのvalueを足し算してクライアントに返す。
@app.route('/')
def calc_income():
print("きたよ")
x = json.load(request.get_json())
print(x)
z = int(x[0]) + int(x[1])
print(z)
return jsonify(z)
if __name__ == '__main__':
app.run()
|
[
"hiyacins@gmail.com"
] |
hiyacins@gmail.com
|
779cddc46f1d979f1b14262ace1f13380aa72d7e
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/native_client/tests/unittests/shared/platform/build.scons
|
bfeb276a5229c844b09fb3c0810f6fc5f369a80a
|
[
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 959
|
scons
|
# -*- python -*-
# Copyright 2008 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
Import('env')
nacl_thread_create_joinable_test_exe = env.ComponentProgram(
'nacl_thread_create_joinable_test',
'nacl_thread_create_joinable_test.c',
EXTRA_LIBS=['platform',
'gio'])
node = env.CommandTest(
'nacl_thread_create_joinable_test.out',
command=[nacl_thread_create_joinable_test_exe])
env.AddNodeToTestSuite(node,
['small_tests'],
'run_nacl_thread_create_joinable_test')
atomic_ops_test_exe = env.ComponentProgram(
'atomic_ops_test',
'atomic_ops_test.c',
EXTRA_LIBS=['platform', 'gio'])
node = env.CommandTest(
'atomic_ops_test.out',
command=[atomic_ops_test_exe, '25'],
size='medium')
env.AddNodeToTestSuite(node, ['medium_tests'], 'run_atomic_ops_test')
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
5d3905192683a5c50a7325311d5ace07612a462d
|
14be8bcd7e66aad90b98d7c76a78fdb94c7c4f65
|
/l06/class6.py
|
7bbaa747c2aab0de8091ab7eb6f5675803a36373
|
[] |
no_license
|
alexbyz/HW070172
|
f6231d7ccd0fb06a88db9bd6b0c718ed70ce62a2
|
e0e4946f82ba71b4d3860c570fadb9cd96a6c9a1
|
refs/heads/main
| 2023-03-03T06:16:17.630740
| 2021-02-09T09:55:04
| 2021-02-09T09:55:04
| 305,626,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
import regex
recordIdealtype = {
"recodType": "recordType",
"citationKey": "citationKey",
"title": "title",
"author": "author",
"date": "date",
"location": "location"
}
def readFile(): #reads in the file
infile = input("inputfile: ")
infile = open(infile, "r", encoding="UTF-8")
data ="" #string to read the file into
#for line in infile:
# data = data + line
date = infile.read()
return data
def getRecords(inString): #splits the long string along '\n@'
return inString.split("\n@")
def main():
data = readFile()
records = getRecords(data)
#for i in range(len(records)): #loops throug the records
for record in records[1:]:
input(record)
entry = records[i].split('\n')
print(entry)
firstEntry = entry[0].split('{')
recordType = firstEntry[0].strip()
citationKey = firstEntry[1].strip()
record["recodType"] = recordType
record["citationKey"] = citationKey
for x in range(1,len(entry)-1,1):
key, value = entry[x].split("=")
#print(key,"\t", value)
if key.strip() in recordIdealtype:
record[key] = value
print(record)
main()
|
[
"huber.alexander1993@gmail.com"
] |
huber.alexander1993@gmail.com
|
ebd10a0a4af3d11227b8cc8c42118b2079ceeef1
|
31f56a696a0a5ada4aa2d583f8b340201696b3c7
|
/nabu/neuralnetworks/classifiers/__init__.py
|
42fc49fb14a326860f2012974c7b5b9d567b6e93
|
[
"MIT"
] |
permissive
|
DavidKarlas/nabu
|
6d7fcdcd46f97b8886382079d04251b6862203db
|
fb530cf617ff86fe8a249d4582dfe90a303da295
|
refs/heads/master
| 2020-12-30T13:29:38.735236
| 2017-04-28T14:50:57
| 2017-04-28T14:50:57
| 91,229,041
| 1
| 0
| null | 2017-05-14T08:09:03
| 2017-05-14T08:09:03
| null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
'''@package classifiers
Contains the neural net classifiers (e.g. dnn) and their components
(e.g. layers)
'''
from . import activation, classifier, layer, asr, lm
|
[
"vincent.renkens@esat.kuleuven.be"
] |
vincent.renkens@esat.kuleuven.be
|
73e5dbdc975e9d650c27382bce182c16f1722617
|
a8f275638f6bab07644b6b6d2ff4a1eabc4a3b4b
|
/class-Animal.py
|
aba9a9201900ff4e823d33444234c6b42432f825
|
[] |
no_license
|
evamaina/OOP
|
1bfd49706365f3d297f2383ffd995b2159ade283
|
b1623cf76896c21a4ac49526070d8f4ebd3b90a8
|
refs/heads/master
| 2021-08-17T20:44:47.516263
| 2017-11-21T17:22:12
| 2017-11-21T17:22:12
| 107,859,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
"""Inheritance provides a way to share functionality between classes.
This similarity can be expressed by making them all inherit from a superclass Animal,
which contains the shared functionality"""
class Animal:
def __init__(self, name, color):
self.name = name
self.color = color
class Cat(Animal):
def purr(self):
print("Purr...")
class Dog(Animal):
def bark(self):
print("Woof!")
fido = Dog("Fido", "brown")
print(fido.color)
fido.bark()
|
[
"evajohnson714@gmail.com"
] |
evajohnson714@gmail.com
|
f10d7333e8172120f845c6ba2d0052fc407fff29
|
2aa9432798d681a9a21535397bf3414d04bf014e
|
/test/loggingDemo.py
|
ec95bf2285c991a9c4e84eaa52b2932ef7680438
|
[] |
no_license
|
RaunakJalan/Selenium_Automation
|
babd426e9a12b3cfffe28a34af6486fcce57ce23
|
47d4faa275590b8f9c2d6922689275c13d3650c2
|
refs/heads/master
| 2023-03-14T14:42:40.308146
| 2021-03-09T16:29:16
| 2021-03-09T16:29:16
| 346,070,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import logging
logging.basicConfig(filename="test.log",
format='%(asctime)s: %(levelname)s: %(message)s',
datefmt="%m/%d/%Y %I:%M:%S %p"
)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("This is a critical message")
|
[
"ronakjalan98@gmail.com"
] |
ronakjalan98@gmail.com
|
9eee040f0ac6b5d1d6039900ac3d403d046bf926
|
cf7b827958166c8569eb58deb511cc3f07567741
|
/in_Python_v2/1074 Number of Submatrices That Sum to Target.py
|
e3b4d58aa31bd7bded1a018b1d37e2c0dae32354
|
[] |
no_license
|
YangLiyli131/Leetcode2020
|
e4e36eb36b1983f73b0e733455b4a7953dfebe6d
|
20623defecf65cbc35b194d8b60d8b211816ee4f
|
refs/heads/master
| 2023-08-22T06:00:55.924112
| 2021-09-18T19:04:15
| 2021-09-18T19:04:15
| 251,426,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
from collections import defaultdict
class Solution(object):
def numSubmatrixSumTarget(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: int
"""
row, col = len(matrix), len(matrix[0])
ps = [[0] * (col + 1) for _ in range(row + 1)]
for i in range(1, row + 1):
for j in range(1, col + 1):
ps[i][j] = ps[i-1][j] + ps[i][j-1] - ps[i-1][j-1] + matrix[i-1][j-1]
res = 0
for r1 in range(1, row+1):
for r2 in range(r1, row+1):
h = defaultdict(int)
h[0] = 1
for c in range(1, col+1):
curs = ps[r2][c] - ps[r1-1][c]
res += h[curs - target]
h[curs] += 1
return res
|
[
"noreply@github.com"
] |
YangLiyli131.noreply@github.com
|
7601d4d19e420178a28cc601c74ab7d5147f8d3c
|
a6a2997ecc7dd8406f4e190d357cba1d301489c3
|
/users/admin.py
|
531bc47e3dd64703dae1fc4ae821ee05804a0ffb
|
[] |
no_license
|
Shatki/itreactor
|
de306bd0a06d9b498645eeb76e191cfa70cdca04
|
a657ad7fb4a9051f9ab845539a7369fe0da17d26
|
refs/heads/master
| 2023-02-21T15:29:00.840747
| 2021-01-28T05:37:15
| 2021-01-28T05:37:15
| 317,347,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,236
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from .forms import UserCreationForm, UserChangeForm
from .models import User, Feedback
@admin.register(User)
class UserAdmin(BaseUserAdmin):
model = User
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
list_display = (
'email',
'first_name',
'last_name',
'last_login')
list_filter = (
'date_joined',
'last_login',
)
readonly_fields = (
'date_joined',
'date_updated',
'last_login',
)
fieldsets = (
(None, {
'fields': (
'email',
'password',
)
}),
(u'Персональная информация', {
'fields': (
'first_name',
'last_name',
'photo',
)
}),
(u'Права доступа', {
'fields': (
'groups',
'user_permissions',
'is_superuser',
'is_staff',
'is_active',
)
}),
(u'Важные даты', {
'fields': (
'last_login',
'date_joined',
'date_updated',
)
}),
)
add_fieldsets = (
(None, {
'classes':
('wide',),
'fields': (
'email',
'password1',
'password2',
'is_superuser',
)
}),
)
search_fields = (
'email',)
ordering = (
'date_joined',)
filter_horizontal = (
'groups',
'user_permissions',
)
# Register your models here.
@admin.register(Feedback)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ('name',
'date',
'email',
'subject',
'message',
)
search_fields = ('name',)
ordering = ('date',)
|
[
"Shatki@mail.ru"
] |
Shatki@mail.ru
|
a3de30095b503bfe0aca308080e3d8f013346b36
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/cirq/startCirq407.py
|
4cd57412338a48432208219d046554ef403b1008
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,268
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.Y.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[1])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=3
c.append(cirq.rx(2.0860175219836226).on(input_qubit[1])) # number=7
c.append(cirq.X.on(input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[0])) # number=6
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.Y.on(input_qubit[0])) # number=16
c.append(cirq.Y.on(input_qubit[0])) # number=17
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=18
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=19
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq407.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
ddca45160dbe21af63be36d6a989d3787788a108
|
29145db13229d311269f317bf2819af6cba7d356
|
/january circuits/equalSub.py
|
379cf14defb37168832a4605de814de52080f970
|
[] |
no_license
|
rocket3989/hackerEarth2019
|
802d1ca6fd03e80657cbe07a3f123e087679af4d
|
42c0a7005e52c3762496220136cc5c1ee93571bb
|
refs/heads/master
| 2021-07-05T01:32:42.203964
| 2020-12-22T03:40:20
| 2020-12-22T03:40:20
| 211,607,143
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
N = int(input())
K = int(input())
A = [int(x) for x in input().split()]
maxOf = A[0]
l, r = 0, 1
sumEl = A[0]
maxLen = 1
while r < N + 1:
if sumEl + K < maxOf * (r - l):
sumEl -= A[l]
if A[l] == maxOf:
maxOf = max(A[l + 1:r])
l += 1
continue
maxLen = max(maxLen, r - l)
if r == N: break
maxOf = max(maxOf, A[r])
sumEl += A[r]
r += 1
print(maxLen)
|
[
"rocket3989@gmail.com"
] |
rocket3989@gmail.com
|
198932798010531a6e5ee431ea85d0f3e5ca76b1
|
c9642233f1de71f1a61ae28c695c2d9228825156
|
/echecs_hall/app/views/mj_hall_api/good.py
|
9ff47539c15f03ae8f000dca757f3e9985f69aac
|
[
"AFL-3.0"
] |
permissive
|
obespoir/echecs
|
d8314cffa85c8dce316d40e3e713615e9b237648
|
e4bb8be1d360b6c568725aee4dfe4c037a855a49
|
refs/heads/master
| 2022-12-11T04:04:40.021535
| 2020-03-29T06:58:25
| 2020-03-29T06:58:25
| 249,185,889
| 16
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
# coding=utf-8
from app.views.base_handler import BaseHandler
from app.controller.mj_hall_controller import good_controller
from app.controller.mj_hall_controller import login_hall_controller
import json
from . import mj_hall
from app.share.error_code import *
import time
from app.extensions.common import md5
from tornado.web import authenticated
@mj_hall.route('/setgood')
class SetGood(BaseHandler):
def get(self):
print('good')
def post(self):
pass
@mj_hall.route('/getgood')
class GetGood(BaseHandler):
isLogin = True
tag = __name__
@authenticated
def get(self):
data = []
goods_list = good_controller.get_all_good_info()
if not good_controller.get_all_good_info():
self.return_data(NOT_GOODS, data)
for good in goods_list:
data.append({'id': good['id'], 'title': good['name'], 'rmb_price': good['rmb_price'],
'icon': good['icon'],
'selling_price': good['selling_price']})
self.return_success(data)
def post(self):
pass
@mj_hall.route('/buygood')
class BuyGood(BaseHandler):
isLogin = True
tag = __name__
@authenticated
def get(self):
param = json.loads(self.get_argument('base'))
sub_param = param['param']
good_id = int(self.get_param('id', sub_param))
# 获取玩家信息
user = self.current_user
uid = int(user['uid'])
user_money = int(user['money'])
user_diamond = int(user['diamond'])
# 根据id获取商品信息
good_info = good_controller.get_good_info_by_id(good_id)
# 商品价格
selling_price = int(good_info['selling_price'])
# 商品数量
quantity = int(good_info['quantity'])
if not good_info:
self.return_error(PARAM_ERROR)
# 判断玩家钻石是否够买此商品
if user_diamond >= selling_price:
diamond = user_diamond - selling_price
money = user_money + quantity
data = {'diamond': diamond, 'money': money}
login_hall_controller.update_user_in_cache(uid, data)
self.return_success(data)
else:
self.return_error(NOT_ENOUGH_DIAMOND)
def post(self):
pass
|
[
"jamonhe1990@gmail.com"
] |
jamonhe1990@gmail.com
|
a40038fb0b6957d262599096d21a59dd2890bc91
|
e526543920e4974504cb62802c393d5bc46559db
|
/python-repos/python_repos.py
|
cddc2b416e8f6a10ff5f96a8cd45f8641eb00818
|
[] |
no_license
|
mare-astrorum/python-crash-course-practice
|
b843f2067208b749558c4423556498e643c5fa42
|
47423808902b75af9d7888d4f9fa9f083bce88f4
|
refs/heads/master
| 2020-09-06T19:02:09.837740
| 2019-11-08T17:30:52
| 2019-11-08T17:30:52
| 220,516,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
import requests
import sys
import pygal
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
# Make an API call and store the response.
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print("Status code:", r.status_code)
# Store API response in a variable.
response_dict = r.json()
print("Total repositories:", response_dict['total_count'])
# Explore information about the repositories.
repo_dicts = response_dict['items']
print("Repositories returned:", len(repo_dicts))
names, plot_dicts = [], []
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
description = repo_dict['description']
if description == None:
good_description = 'No description.'
else:
good_description = description.translate(non_bmp_map)
plot_dict = {
'value': repo_dict['stargazers_count'],
'label': good_description,
'xlink': repo_dict['html_url']
}
plot_dicts.append(plot_dict)
# Make visualization.
my_style = LS('#333366', base_style=LCS)
my_config = pygal.Config()
my_config.x_label_rotation = 45
my_config.show_legend = False
my_config.title_font_size = 24
my_config.label_font_size = 14
my_config.major_label_font_size = 18
my_config.show_y_guides = False
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.title = 'Python Projects'
chart.x_labels = names
chart.add('', plot_dicts)
chart.render_to_file('example_outcome_python_repos.svg')
|
[
"a@a.com"
] |
a@a.com
|
e017100cb679bf6f1ae3e8f315b984bb2e457cfb
|
6375b7e4dfe11ced7dcd3fad1a7a2de9a504910d
|
/excel/xlutils_demo.py
|
d4506129814db5dc73782bc3726332f7f72f039b
|
[] |
no_license
|
yaowenqiang/lpthw
|
b65e6b8ce576e7caa5cfba5570550e546d1e0549
|
4bbd7ebb4e8c570a39bf9c55df9bd97e4f86e1e5
|
refs/heads/master
| 2020-04-01T10:57:32.959389
| 2019-05-01T09:27:25
| 2019-05-01T09:27:25
| 153,140,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import xlwt
import xlrd
import xlutils
# 打开excel文件
workbook = xlrd.open_workbook("myexclee.xls")
worksheet = workbook.sheet_by_index(0)
data = worksheet.cell_value(0,0)
wb = xlwt.Workbook()
# 新建excel
wb = xlwt.Workbook()
sh = wb.add_sheet('Sheet1')
sh.write(0,0,'data')
wb.save('myexcel.xls')
|
[
"yaowenqiang111@163.com"
] |
yaowenqiang111@163.com
|
0050e58aa4d71e43e495f50afeae7d51b46087dc
|
b457be31ac024f2a80ad553068544779d0680f48
|
/dnawf/templates/clean_template.py
|
5f1b01683a55986af6a84efe60413edbf363fd6e
|
[] |
no_license
|
daxm/dnac-api-demo
|
9ef0782b9d780aad7ece1112814dbe3a794d687c
|
dbf8432c84d9e47255b184310df69af48d1f1fee
|
refs/heads/master
| 2022-11-03T14:42:26.021264
| 2020-06-16T20:00:32
| 2020-06-16T20:00:32
| 271,582,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
#!/usr/bin/env python
# -*- coding: utf-
import json
from argparse import ArgumentParser
import glob
import os
import pprint
def clean_template(template):
pprint.pprint(t)
# template.pop('id')
template.pop('createTime')
template.pop('lastUpdateTime')
template.pop('parentTemplateId')
template.pop("projectId")
template.pop("projectName")
# need to clean vars.
for var in template['templateParams']:
var.pop('id')
return template
def remove_dict_key(key, var):
if hasattr(var, 'iteritems'):
if key in var.keys():
var.pop(key)
for k, v in var.iteritems():
if isinstance(v, dict):
for result in remove_dict_key(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in remove_dict_key(key, d):
yield result
def saveTemplate(template, orig_filename):
dir_name = os.path.dirname(orig_filename)
filename_extension = os.path.basename(orig_filename)
(basename, extension) = os.path.splitext(filename_extension)
out_f = open(dir_name + "/" + basename + "_clean" + extension, "a")
out_f.write(json.dumps(template, indent=4, sort_keys=True))
out_f.close()
def printTemplateContent(template):
print(100 * "#")
pprint.pprint(template)
print(100 * "#")
def removePreviousVersion(dir_name):
file_list = glob.glob(dir_name + "/*clean*")
# Iterate over the list of filepaths & remove each file.
for filePath in file_list:
try:
print("Deleting file : ", filePath)
os.remove(filePath)
except:
print("Error while deleting file : ", filePath)
if __name__ == "__main__":
parser = ArgumentParser(description='Select options.')
parser.add_argument('dir', help="directory where input json files are ")
args = parser.parse_args()
removePreviousVersion(args.dir)
for file_name in glob.glob(args.dir + "/*.json"):
print(file_name)
with open(file_name) as f:
template = json.load(f)
c_template = clean_template(template)
printTemplateContent(c_template)
saveTemplate(c_template, file_name)
|
[
"dmickels@cisco.com"
] |
dmickels@cisco.com
|
637b6f1e6ec46f1d584c268016837a63e14fff30
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/get_20200810155009.py
|
13e9a436036562052c378984fa398589f08fdc7a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
def produce(num1,num2):
totalValue = 0
for i in range(abs(num1)):
totalValue +=abs(num2)
if num1 < 0 and num2 > 0:
str1 = str(totalValue)
newStr = "-"+ str1
return int(newStr)
elif num1 > 0 and num2 < 0:
str1 = str(totalValue)
newStr = "-"+ str1
return int(newStr)
else:
return totalValue
# print(produce(2,3))
def findProduct(num):
str1 = str(num)
totalValue = 1
for i in str1:
totalValue *=int(i)
print(totalValue)
# 4513 = 4 * 5 * 1 * 3
# A similar way
def getProduct(n):
product = 1
while n != 0:
product *= n %10
n = n // 10
print(product)
def product(num1,num2):
if num2 < 0:
return -product(num1,-num2)
elif num2 == 0 or num1 == 0:
return 0
elif num2 == 1:
print('hh')
print('num1',num1,'num2',num2)
return num1
elif num1 == 1:
print('h')
return num2
else:
print('num1',num1,'num2',num2)
return num1 + product(num1,num2-1)
# print(product(2,3) )
def product1(x,y):
answer = x/(1/y)
print(answer)
# product1(2,3)
# using a while loop
def mult(a,b):
if a == 0 or b == 0:
return 0
result = 0
while abs(b) > 0:
result +=abs(a)
b -=1
if (b < 0 and a > 0 ) or (b > 0 or a < 0):
num = str(result)
num = "-" + num
return int(num)
else:
return result
print(mult(2,3))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ac725be804118071e93d65cfe070174d556c8388
|
bcc7011cb121e653d831e77206e541675e348337
|
/Global_and_Local_Inversions.py
|
17fc67eb30ac3008140d41569fefa9ea03994983
|
[] |
no_license
|
Built00/Leetcode
|
2115c20bf91e9f9226ce952293132bc7a852fe86
|
ec3c0d4bd368dd1039f0fed2a07bf89e645a89c3
|
refs/heads/master
| 2020-11-24T09:12:08.172973
| 2018-03-27T01:23:08
| 2018-03-27T01:23:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# We have some permutation A of [0, 1, ..., N - 1], where N is the length of A.
# The number of (global) inversions is the number of i < j with 0 <= i < j < N and A[i] > A[j].
# The number of local inversions is the number of i with 0 <= i < N and A[i] > A[i+1].
# Return true if and only if the number of global inversions is equal to the number of local inversions.
# Example 1:
# Input: A = [1,0,2]
# Output: true
# Explanation: There is 1 global inversion, and 1 local inversion.
# Example 2:
# Input: A = [1,2,0]
# Output: false
# Explanation: There are 2 global inversions, and 1 local inversion.
# Note:
# A will be a permutation of [0, 1, ..., A.length - 1].
# A will have length in range [1, 5000].
# The time limit for this problem has been reduced.
# Leetcode Weekly Contest 69.
# 208 / 208 test cases passed.
# Status: Accepted
# Runtime: 125 ms
class Solution(object):
def isIdealPermutation(self, A):
"""
:type A: List[int]
:rtype: bool
"""
pre_max = float('-inf')
for i in range(len(A) - 2):
pre_max = max(pre_max, A[i])
if pre_max > A[i + 2]:
return False
return True
if __name__ == '__main__':
print(Solution().isIdealPermutation([1, 0, 2]))
print(Solution().isIdealPermutation([1, 2, 0]))
print(Solution().isIdealPermutation([2, 0, 1]))
|
[
"binwengan@gmail.com"
] |
binwengan@gmail.com
|
c0e0ba14f5b0d6553e0ab8ea2c7ab3c584612b90
|
b65c1f6000af4ddeb7280e7d93bf861fbf1964bc
|
/docs/conf.py
|
f738f1816dbe005da47a911e42eba2c58f773d96
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
EricSchles/calc
|
ef00aaddfec010321867a8287db0a565dbb7985e
|
eaa1ab227a5a07f5f4f7d2c64a278977cd43cb18
|
refs/heads/develop
| 2021-01-25T14:33:58.124300
| 2017-10-11T19:29:20
| 2017-10-11T19:29:20
| 72,668,485
| 1
| 0
| null | 2016-11-02T18:17:57
| 2016-11-02T18:17:57
| null |
UTF-8
|
Python
| false
| false
| 5,864
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CALC documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 10 12:27:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
import sphinx_rtd_theme
DOCS_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(DOCS_DIR)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CALC'
copyright = '2017, 18F'
author = '18F'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
VERSION_PY_PATH = os.path.join(BASE_DIR, 'hourglass', 'version.py')
_globs = {}
exec(open(VERSION_PY_PATH).read(), _globs) # nosec
version = _globs['__version__']
del _globs
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CALCdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CALC.tex', 'CALC Documentation',
'18F', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'calc', 'CALC Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CALC', 'CALC Documentation',
author, 'CALC', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
class PathNotFoundError(Exception):
pass
def resolve_md_url(url):
abspath = os.path.normpath(os.path.join(DOCS_DIR, url))
if not os.path.exists(abspath):
raise PathNotFoundError(
'"{}" is referenced in markdown documentation but "{}" '
'does not exist'.format(url, abspath)
)
return 'https://github.com/18F/calc/tree/develop/docs/' + url
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': resolve_md_url,
}, True)
app.add_transform(AutoStructify)
|
[
"varmaa@gmail.com"
] |
varmaa@gmail.com
|
18b7044e434e0878ff4a74e4acebf367bfd3596e
|
1ea36bc61aed79d9ae198350e221c8d6a7073b08
|
/venv/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py
|
813792d12ff37cacd53a4186dfedd722b748684d
|
[] |
no_license
|
RicardoAltamiranoSanchez/Proyecto_Tienda_virtual_API_7-Sunburts
|
f3f0b7f166520d3d91832ac13aa0686d7b5211d8
|
4c95220415277f8561740a8da78ef68ff576f1d6
|
refs/heads/master
| 2023-04-25T23:40:43.804848
| 2021-05-15T16:50:44
| 2021-05-15T16:50:44
| 324,892,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,180
|
py
|
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .base import Requirement, format_name
if MYPY_CHECK_RUNNING:
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._internal.req.req_install import InstallRequirement
from .base import Candidate, CandidateLookup
class ExplicitRequirement(Requirement):
def __init__(self, candidate):
# type: (Candidate) -> None
self.candidate = candidate
def __str__(self):
# type: () -> str
return str(self.candidate)
def __repr__(self):
# type: () -> str
return "{class_name}({candidate!r})".format(
class_name=self.__class__.__name__,
candidate=self.candidate,
)
@property
def project_name(self):
# type: () -> str
# No need to canonicalise - the candidate did this
return self.candidate.project_name
@property
def name(self):
# type: () -> str
# No need to canonicalise - the candidate did this
return self.candidate.name
def format_for_error(self):
# type: () -> str
return self.candidate.format_for_error()
def get_candidate_lookup(self):
# type: () -> CandidateLookup
return self.candidate, None
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
return candidate == self.candidate
class SpecifierRequirement(Requirement):
def __init__(self, ireq):
# type: (InstallRequirement) -> None
assert ireq.link is None, "This is a link, not a specifier"
self._ireq = ireq
self._extras = frozenset(ireq.extras)
def __str__(self):
# type: () -> str
return str(self._ireq.req)
def __repr__(self):
# type: () -> str
return "{class_name}({requirement!r})".format(
class_name=self.__class__.__name__,
requirement=str(self._ireq.req),
)
@property
def project_name(self):
# type: () -> str
return canonicalize_name(self._ireq.req.name)
@property
def name(self):
# type: () -> str
return format_name(self.project_name, self._extras)
def format_for_error(self):
# type: () -> str
# Convert comma-separated specifiers into "A, B, ..., F and G"
# This makes the specifier a bit more "human readable", without
# risking a change in meaning. (Hopefully! Not all edge cases have
# been checked)
parts = [s.strip() for s in str(self).split(",")]
if len(parts) == 0:
return ""
elif len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def get_candidate_lookup(self):
# type: () -> CandidateLookup
return None, self._ireq
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
assert candidate.name == self.name, \
"Internal issue: Candidate is not for this requirement " \
" {} vs {}".format(candidate.name, self.name)
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
spec = self._ireq.req.specifier
return spec.contains(candidate.version, prereleases=True)
class RequiresPythonRequirement(Requirement):
"""A requirement representing Requires-Python metadata.
"""
def __init__(self, specifier, match):
# type: (SpecifierSet, Candidate) -> None
self.specifier = specifier
self._candidate = match
def __str__(self):
# type: () -> str
return "Python {}".format(self.specifier)
def __repr__(self):
# type: () -> str
return "{class_name}({specifier!r})".format(
class_name=self.__class__.__name__,
specifier=str(self.specifier),
)
@property
def project_name(self):
# type: () -> str
return self._candidate.project_name
@property
def name(self):
# type: () -> str
return self._candidate.name
def format_for_error(self):
# type: () -> str
return str(self)
def get_candidate_lookup(self):
# type: () -> CandidateLookup
if self.specifier.contains(self._candidate.version, prereleases=True):
return self._candidate, None
return None, None
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
assert candidate.name == self._candidate.name, "Not Python candidate"
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
return self.specifier.contains(candidate.version, prereleases=True)
|
[
"hacker@hacker.home"
] |
hacker@hacker.home
|
800e01d96d0377b922a3b2148405d4d37c964fa3
|
531caac957596fc623e534bce734ef6b45be0b07
|
/tests/operators/vector/test_floor_001.py
|
350c04c90cd1257905c6cfa67ca849dde2b97a12
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
wxyhv/akg
|
02e64d81bbb84472e0bf1c57a691b688ea743d6e
|
fc9b6f5b6fa024da89bf90466a815359ca54015d
|
refs/heads/master
| 2023-03-11T02:59:18.472826
| 2021-02-23T07:44:16
| 2021-02-23T07:44:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import datetime
import os
import pytest
from base import TestBase
from nose.plugins.attrib import attr
from test_run.floor_run import floor_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_floor_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg_ci = [
#caseflag,opfuncname,testRunArgs, dimArgs
# Deeplab v3
# ("004_floor_4_33_33_256", floor_run, ((4, 33, 33, 256), "float16", "cce_floor_fp16")),
("005_floor", floor_run, ((128, 1280), "float32", "cce_floor_fp32")),
]
self.testarg = [
#caseflag,opfuncname,testRunArgs, dimArgs
("001_floor_8192_1024", floor_run, ((8192, 1024), "float16", "cce_floor_fp16"), ((8, 8), (1024, 1024))),
("002_floor_64_16_128_128", floor_run, ((64, 16, 128, 128), "float16", "cce_floor_fp16"), ((1, 1), (1, 1), (64, 64), (128, 128))),
("003_floor_64_128_1024", floor_run, ((64, 128, 1024), "float16", "cce_floor_fp16"), ((1, 1), (8, 8), (1024, 1024))),
]
return
def test_run_ci(self):
self.common_run(self.testarg_ci)
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
[
"ckey.chengbin@huawei.com"
] |
ckey.chengbin@huawei.com
|
fadc77c7549de3d2eee4a4343e2f9d1d1c73a6b8
|
1bba82345900327ed1c128e8046dc91f90a0ccb5
|
/tax_debts/apps.py
|
0f0af72642fe3c3364064a1e06acab7be13c0dc7
|
[
"MIT"
] |
permissive
|
dchaplinsky/ragoogle
|
40bd093682e41d1ee2a77f446c69d09e82bb3948
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
refs/heads/master
| 2021-06-11T10:07:41.142843
| 2020-10-12T10:30:39
| 2020-10-12T10:30:39
| 136,800,715
| 3
| 3
|
MIT
| 2021-03-19T23:20:02
| 2018-06-10T10:51:30
|
CSS
|
UTF-8
|
Python
| false
| false
| 682
|
py
|
from abstract.apps import AbstractConfig
from .loader import TaxDebtsLoader
from .elastic_models import ElasticTaxDebtsModel, tax_debts_idx
class TaxDebtsConfig(AbstractConfig):
name = "tax_debts"
verbose_name = "Податковий борг"
short_name = "ДФС"
loader_class = TaxDebtsLoader
@property
def data_model(self):
# Doing that to prevent circular imports of some kind
from .models import TaxDebtsModel
return TaxDebtsModel
@property
def sitemap(self):
from .sitemaps import TaxDebtsSitemap
return TaxDebtsSitemap
elastic_model = ElasticTaxDebtsModel
elastic_index = tax_debts_idx
|
[
"dchaplinsky@conversionscience.co.uk"
] |
dchaplinsky@conversionscience.co.uk
|
84e37ca82405d9435b0a64bdc32f81f785b186e4
|
5f9e0c226c6f99f04446d60cd21282e7e6b05d2c
|
/sequence.py
|
4def0862fac63ace7d5d9b90c17358bee744d122
|
[] |
no_license
|
JONNY-ME/my-kattis-solution
|
867ac267dbb5faa6f7c2af35b435498a22ae269d
|
51c70e0fd25f1f369cdcd2ce49a54d5d0df2358e
|
refs/heads/main
| 2023-06-17T20:04:04.701038
| 2021-07-16T09:35:35
| 2021-07-16T09:35:35
| 386,583,581
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from math import log, ceil
k = log(int(input()), 2)
if k == int(k):
k = int(k)+1
else:
k = ceil(k)
print(k)
for i in range(k):
print(2**i, end=' ')
print()
|
[
"yohannesmelese4@gmail.com"
] |
yohannesmelese4@gmail.com
|
653ca533eb3f53b29e27b7eb8e5b17df34a8b2eb
|
6203105c774913bbb3dc7e2d9bb99f739d9d24fa
|
/discrete_autoencoder/layers/activation.py
|
249418a2662f3838a89576ab80e8e36e9d2b2c95
|
[] |
no_license
|
bstriner/discrete_autoencoder
|
f5eae31e155b2c4c440c9fe89b060c3be61de888
|
baca5d23964b08ff7e3062c07d74b0ff9a631e98
|
refs/heads/master
| 2021-07-23T02:12:36.405089
| 2017-11-03T09:37:02
| 2017-11-03T09:37:02
| 108,048,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
from .layer import Layer
class ActivationLayer(Layer):
def __init__(self, activation=None):
self.activation = activation
params = []
non_trainable_weights = []
super(ActivationLayer, self).__init__(params=params, non_trainable_weights=non_trainable_weights)
def call(self, x):
out = self.activation(x)
return out, []
|
[
"bstriner@gmail.com"
] |
bstriner@gmail.com
|
c03a75610d9c6359e5b0034a66cbf001dc6401f2
|
2c94c322b5e1f53e131f7d680bcd4413ff11a10b
|
/bubble_s_ascending.py
|
dd23b2b99c326a2660e536c4be43b9aee8db5ab4
|
[] |
no_license
|
Nehanavgurukul/list
|
66996ad6f30183f8d6c758ab824fd5a7840ba4dd
|
e4aa2686c0f007477e147c733ac98708773570cb
|
refs/heads/main
| 2023-01-11T23:38:15.450749
| 2020-10-29T16:10:29
| 2020-10-29T16:10:29
| 308,381,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
num=[23,50,56,20,11,70]
index=0
while(index<len(num)):
j=0
while(j<len(num)):
if(num[index]<num[j]):
tamp=num[index]
num[index]=num[j]
num[j]=tamp
j=j+1
index=index+1
print(num)
|
[
"nehay19@navgurukul.org"
] |
nehay19@navgurukul.org
|
2cb1b08ed78ba1f7af4b2d62d28e84f0291436db
|
b8498a35832f5d14bd15332a3dd2b93d64351448
|
/fluent-python/15-context-mngr/else_block.py
|
adac5f3d0a2da378b4e488ca68369bc802379035
|
[] |
no_license
|
luzzyzhang/my-python-cookbook
|
8b3daf4c4354a98ff305375c8a3a35551eee67e7
|
8d160f6d6d18b7a9801d433f6e3868d054432bde
|
refs/heads/master
| 2021-09-06T20:25:39.026278
| 2018-02-11T04:22:13
| 2018-02-11T04:22:13
| 46,793,714
| 2
| 1
| null | 2017-06-17T13:57:26
| 2015-11-24T13:35:26
|
Python
|
UTF-8
|
Python
| false
| false
| 527
|
py
|
# -*- coding: utf-8 -*-
"""This is just demo code
"""
# for ... else ...
for item in my_list:
if item.flavor == 'bananas':
break
else:
raise ValueError('No banana flavor found')
try:
dangerous_call()
after_call()
except OSError:
log('OSError ...')
# VS
# For clarity and correctness, the body of a try block should only have the
# statements that may generate the expected exceptions. This is much better:
try:
dangerous_call()
except OSError:
log('OSError ...')
else:
after_call()
|
[
"luzzyzhang@gmail.com"
] |
luzzyzhang@gmail.com
|
54691aec62e64eee0903528551d1bc0d23f22069
|
1b57d2f689903d9937f77e26be40784af2ff2669
|
/view_helpers/home.py
|
f7682825ddec4cb03015fe1ee82b08065034f3d6
|
[] |
no_license
|
FMularski/passwordkeeper
|
b1ef31c04bcfa7f012f28852fd8ae1f33efeff98
|
5859fbef89a3f80b27d52fd124971180e12e4fef
|
refs/heads/main
| 2023-07-16T21:09:34.938196
| 2021-09-06T21:05:01
| 2021-09-06T21:05:01
| 347,174,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
def encode_password(password, pin):
pin_sum = 0
for char in pin:
pin_sum += ord(char)
encoded = ''
for char in password:
encoded += chr(ord(char) + pin_sum)
return encoded
def decode_password(encoded, pin):
pin_sum = 0
for char in pin:
pin_sum += ord(char)
decoded = ''
for char in encoded:
decoded += chr(ord(char) - pin_sum)
return decoded
|
[
"mularskif@gmail.com"
] |
mularskif@gmail.com
|
9459f7c0aa601f412b8a975d03fa86c2914ac96c
|
797761aeb37a8de4288696875f1a3c6c0eff3824
|
/mlfromscratch/unsupervised_learning/partitioning_around_medoids.py
|
6fdab2470e836fd1065b261519fafc06d73dad7f
|
[
"MIT"
] |
permissive
|
kauziishere/ML-From-Scratch
|
27df8a6565e964309d25729fe373746845b57b6b
|
a5040b84102fc2259b925d7337e7ff87080b6e5f
|
refs/heads/master
| 2021-05-14T17:06:48.691808
| 2017-08-30T09:27:19
| 2017-08-30T09:27:19
| 116,040,355
| 1
| 1
| null | 2018-01-02T17:38:06
| 2018-01-02T17:31:24
|
Python
|
UTF-8
|
Python
| false
| false
| 5,357
|
py
|
import sys
import os
import math
import random
from sklearn import datasets
import numpy as np
# Import helper functions
from mlfromscratch.utils.data_manipulation import normalize
from mlfromscratch.utils.data_operation import euclidean_distance
from mlfromscratch.unsupervised_learning import PCA
from mlfromscratch.utils import Plot
class PAM():
"""A simple clustering method that forms k clusters by first assigning
samples to the closest medoids, and then swapping medoids with non-medoid
samples if the total distance (cost) between the cluster members and their medoid
is smaller than prevoisly.
Parameters:
-----------
k: int
The number of clusters the algorithm will form.
"""
def __init__(self, k=2):
self.k = k
# Initialize the medoids as random samples
def _init_random_medoids(self, X):
n_samples, n_features = np.shape(X)
medoids = np.zeros((self.k, n_features))
for i in range(self.k):
medoid = X[np.random.choice(range(n_samples))]
medoids[i] = medoid
return medoids
# Return the index of the closest medoid to the sample
def _closest_medoid(self, sample, medoids):
closest_i = None
closest_distance = float("inf")
for i, medoid in enumerate(medoids):
distance = euclidean_distance(sample, medoid)
if distance < closest_distance:
closest_i = i
closest_distance = distance
return closest_i
# Assign the samples to the closest medoids to create clusters
def _create_clusters(self, X, medoids):
clusters = [[] for _ in range(self.k)]
for sample_i, sample in enumerate(X):
medoid_i = self._closest_medoid(sample, medoids)
clusters[medoid_i].append(sample_i)
return clusters
# Calculate the cost (total distance between samples and their medoids)
def _calculate_cost(self, X, clusters, medoids):
cost = 0
# For each cluster
for i, cluster in enumerate(clusters):
medoid = medoids[i]
for sample_i in cluster:
# Add distance between sample and medoid as cost
cost += euclidean_distance(X[sample_i], medoid)
return cost
# Returns a list of all samples that are not currently medoids
def _get_non_medoids(self, X, medoids):
non_medoids = []
for sample in X:
if not sample in medoids:
non_medoids.append(sample)
return non_medoids
# Classify samples as the index of their clusters
def _get_cluster_labels(self, clusters, X):
# One prediction for each sample
y_pred = np.zeros(np.shape(X)[0])
for cluster_i in range(len(clusters)):
cluster = clusters[cluster_i]
for sample_i in cluster:
y_pred[sample_i] = cluster_i
return y_pred
# Do Partitioning Around Medoids and return the cluster labels
def predict(self, X):
# Initialize medoids randomly
medoids = self._init_random_medoids(X)
# Assign samples to closest medoids
clusters = self._create_clusters(X, medoids)
# Calculate the initial cost (total distance between samples and
# corresponding medoids)
cost = self._calculate_cost(X, clusters, medoids)
# Iterate until we no longer have a cheaper cost
while True:
best_medoids = medoids
lowest_cost = cost
for medoid in medoids:
# Get all non-medoid samples
non_medoids = self._get_non_medoids(X, medoids)
# Calculate the cost when swapping medoid and samples
for sample in non_medoids:
# Swap sample with the medoid
new_medoids = medoids.copy()
new_medoids[medoids == medoid] = sample
# Assign samples to new medoids
new_clusters = self._create_clusters(X, new_medoids)
# Calculate the cost with the new set of medoids
new_cost = self._calculate_cost(
X, new_clusters, new_medoids)
# If the swap gives us a lower cost we save the medoids and cost
if new_cost < lowest_cost:
lowest_cost = new_cost
best_medoids = new_medoids
# If there was a swap that resultet in a lower cost we save the
# resulting medoids from the best swap and the new cost
if lowest_cost < cost:
cost = lowest_cost
medoids = best_medoids
# Else finished
else:
break
final_clusters = self._create_clusters(X, medoids)
# Return the samples cluster indices as labels
return self._get_cluster_labels(final_clusters, X)
def main():
# Load the dataset
X, y = datasets.make_blobs()
# Cluster the data using K-Medoids
clf = PAM(k=3)
y_pred = clf.predict(X)
# Project the data onto the 2 primary principal components
p = Plot()
p.plot_in_2d(X, y_pred, title="PAM Clustering")
p.plot_in_2d(X, y, title="Actual Clustering")
if __name__ == "__main__":
main()
|
[
"eriklindernoren@live.se"
] |
eriklindernoren@live.se
|
aeedf79574fb645c330f2c9285caa73a7e7cc84a
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py200703_python1/day18_py200830/if_ex_2_b.py
|
86d87ce269e727558d85bff7d25188b797e7433c
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136
| 2021-08-24T23:03:51
| 2021-08-24T23:03:51
| 210,029,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
"""
ex 2.
write a program to find the smallest number among 3 given numbers
"""
a = 24
b = 15
c = 6
d = 78
max = a
# 1st round
if max < a:
max = a
# 2nd round
if max < b:
max = b
# 3rd round
if max < c:
max = c
# 4th round
if max < d:
max = d
print("The max number is {}".format(max))
#
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
num3 = float(input("Enter third number: "))
min = num1
if num1<min:
min = num1
if num2<min:
min = num2
if num3<min:
min = num3
print("The smallest number is {}".format(min))
|
[
"lada314@gmail.com"
] |
lada314@gmail.com
|
e5a530fc5cf0c4e945782e013924a8ad746bce15
|
933376c11498a6567da8d7eb7d2675100895c3ba
|
/pyzoo/zoo/chronos/examples/auto_model/autoprophet_nyc_taxi.py
|
db4282b6e4d0bc5f4e012b570aa603848f11ebcc
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/analytics-zoo
|
320a461765f86d41dd456b598b1cf1d51d57f4c4
|
7cc3e2849057d6429d03b1af0db13caae57960a5
|
refs/heads/master
| 2023-08-13T20:47:58.621714
| 2023-07-06T00:49:11
| 2023-07-06T00:49:11
| 90,328,920
| 3,104
| 996
|
Apache-2.0
| 2023-09-06T01:51:18
| 2017-05-05T02:27:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,730
|
py
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import argparse
from zoo.chronos.forecaster.prophet_forecaster import ProphetForecaster
from zoo.chronos.autots.model.auto_prophet import AutoProphet
from zoo.orca.common import init_orca_context, stop_orca_context
def get_data(args):
dataset = args.datadir if args.datadir else args.url
df = pd.read_csv(dataset, parse_dates=[0])
return df
if __name__ == '__main__':
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers', type=int, default=2,
help="The number of nodes to be used in the cluster. "
"You can change it depending on your own cluster setting.")
parser.add_argument('--cluster_mode', type=str, default='local',
help="The mode for the Spark cluster.")
parser.add_argument('--cores', type=int, default=4,
help="The number of cpu cores you want to use on each node."
"You can change it depending on your own cluster setting.")
parser.add_argument('--memory', type=str, default="10g",
help="The memory you want to use on each node."
"You can change it depending on your own cluster setting.")
parser.add_argument('--cpus_per_trial', type=int, default=1,
help="Int. Number of cpus for each trial")
parser.add_argument('--n_sampling', type=int, default=20,
help="Number of times to sample from the search_space.")
parser.add_argument('--datadir', type=str,
help="Use local csv file by default.")
parser.add_argument('--url', type=str, default="https://raw.githubusercontent.com/numenta/NAB"
"/v1.0/data/realKnownCause/nyc_taxi.csv",
help="Download link of dataset.")
args = parser.parse_args()
# data prepare
df = get_data(args)
df = df.rename(columns={'timestamp': 'ds', 'value': 'y'})
# train/test split
end_date = '2015-1-28' # split by 1-28, which take the last 3 days as horizon
df_train = df[df['ds'] <= end_date]
df_test = df[df['ds'] > end_date]
# use prophet forecaster
prophet = ProphetForecaster()
prophet.fit(df_train, validation_data=df_test)
# use autoprophet for HPO
num_nodes = 1 if args.cluster_mode == "local" else args.num_workers
init_orca_context(cluster_mode=args.cluster_mode, cores=args.cores,
memory=args.memory, num_nodes=num_nodes, init_ray_on_spark=True)
autoprophet = AutoProphet(cpus_per_trial=args.cpus_per_trial)
autoprophet.fit(df_train, n_sampling=args.n_sampling)
stop_orca_context()
# evaluate
auto_searched_mse = autoprophet.evaluate(df_test, metrics=['mse'])[0]
nonauto_searched_mse = prophet.evaluate(df_test, metrics=['mse'])[0]
print("Autoprophet improve the mse by",
str(((nonauto_searched_mse - auto_searched_mse)/nonauto_searched_mse)*100), '%')
print("auto_searched_mse:", auto_searched_mse)
print("nonauto_searched_mse:", nonauto_searched_mse)
|
[
"noreply@github.com"
] |
intel-analytics.noreply@github.com
|
a0a3b9b675a2be13b66e294e41e133e675115ea0
|
a424742e3e784c33625bf29295483469d2a8962a
|
/eval.py
|
0056f8bcb5514f1b0642a7c179a71a5194665ab7
|
[] |
no_license
|
eeyrw/PyTorch_YOLOv2
|
82b3b8bf6c9562a43367f7cfcbdea8954c685dd5
|
4ccf8fb8a61d484e70af611f021013fe53133178
|
refs/heads/main
| 2023-04-27T03:20:27.680365
| 2021-05-20T12:28:14
| 2021-05-20T12:28:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,541
|
py
|
import torch
import torch.nn as nn
from data import *
import argparse
from utils.vocapi_evaluator import VOCAPIEvaluator
from utils.cocoapi_evaluator import COCOAPIEvaluator
parser = argparse.ArgumentParser(description='YOLOv2 Detector Evaluation')
parser.add_argument('-v', '--version', default='yolov2',
help='yolov2.')
parser.add_argument('-d', '--dataset', default='voc',
help='voc, coco-val, coco-test.')
parser.add_argument('--trained_model', type=str,
default='weights/yolov2/',
help='Trained state_dict file path to open')
parser.add_argument('-size', '--input_size', default=416, type=int,
help='input_size')
parser.add_argument('--cuda', action='store_true', default=False,
help='Use cuda')
args = parser.parse_args()
def voc_test(model, device, input_size):
evaluator = VOCAPIEvaluator(data_root=VOC_ROOT,
img_size=input_size,
device=device,
transform=BaseTransform(input_size),
labelmap=VOC_CLASSES,
display=True
)
# VOC evaluation
evaluator.evaluate(model)
def coco_test(model, device, input_size, test=False):
if test:
# test-dev
print('test on test-dev 2017')
evaluator = COCOAPIEvaluator(
data_dir=coco_root,
img_size=input_size,
device=device,
testset=True,
transform=BaseTransform(input_size)
)
else:
# eval
evaluator = COCOAPIEvaluator(
data_dir=coco_root,
img_size=input_size,
device=device,
testset=False,
transform=BaseTransform(input_size)
)
# COCO evaluation
evaluator.evaluate(model)
if __name__ == '__main__':
# dataset
if args.dataset == 'voc':
print('eval on voc ...')
num_classes = 20
elif args.dataset == 'coco-val':
print('eval on coco-val ...')
num_classes = 80
elif args.dataset == 'coco-test':
print('eval on coco-test-dev ...')
num_classes = 80
else:
print('unknow dataset !! we only support voc, coco-val, coco-test !!!')
exit(0)
# cuda
if args.cuda:
print('use cuda')
torch.backends.cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
# input size
input_size = args.input_size
# load net
if args.version == 'yolov2':
from models.yolov2 import YOLOv2
anchor_size = ANCHOR_SIZE if args.dataset == 'voc' else ANCHOR_SIZE_COCO
net = YOLOv2(device, input_size=input_size, num_classes=num_classes, anchor_size=anchor_size)
# load net
net.load_state_dict(torch.load(args.trained_model, map_location='cuda'))
net.eval()
print('Finished loading model!')
net = net.to(device)
# evaluation
with torch.no_grad():
if args.dataset == 'voc':
voc_test(net, device, input_size)
elif args.dataset == 'coco-val':
coco_test(net, device, input_size, test=False)
elif args.dataset == 'coco-test':
coco_test(net, device, input_size, test=True)
|
[
"1394571815@qq.com"
] |
1394571815@qq.com
|
1572e5579606a9957144da853c20f0c8c39c58bf
|
520fcbe076fb1e04187512ddd33802b5c30b2f1a
|
/blender/nodes/converter/seratate_vector.py
|
2fd1c13aae88a4b65d1eb496d93390704478ad49
|
[
"MIT"
] |
permissive
|
LewisOrton/taichi_elements_houdini
|
c604fa85c662369ee3db94224c5e0166482b0512
|
50ef3232f080030213bcb7578a48d03647a9445b
|
refs/heads/master
| 2022-04-24T08:11:42.173539
| 2020-04-25T08:12:29
| 2020-04-25T08:12:29
| 259,212,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
from .. import base
def get_out_value_x(socket):
node = socket.node
vectors = node.inputs['Vector'].get_value()
x = node.outputs['X']
# scene
scn = bpy.context.scene
key = '{0}.{1}'.format(node.name, x.name)
res = []
for vector in vectors:
res.append(vector[0])
scn.elements_sockets[key] = res
def get_out_value_y(socket):
node = socket.node
vectors = node.inputs['Vector'].get_value()
y = node.outputs['Y']
# scene
scn = bpy.context.scene
key = '{0}.{1}'.format(node.name, y.name)
res = []
for vector in vectors:
res.append(vector[1])
scn.elements_sockets[key] = res
def get_out_value_z(socket):
node = socket.node
vectors = node.inputs['Vector'].get_value()
z = node.outputs['Z']
# scene
scn = bpy.context.scene
key = '{0}.{1}'.format(node.name, z.name)
res = []
for vector in vectors:
res.append(vector[2])
scn.elements_sockets[key] = res
class ElementsSeparateVectorNode(base.BaseNode):
bl_idname = 'elements_separate_vector_node'
bl_label = 'Separate Vector'
category = base.CONVERTER
get_value = {
'X': get_out_value_x,
'Y': get_out_value_y,
'Z': get_out_value_z
}
def init(self, context):
# x, y, z outputs
x = self.outputs.new('elements_float_socket', 'X')
x.text = 'X'
x.hide_value = True
y = self.outputs.new('elements_float_socket', 'Y')
y.text = 'Y'
y.hide_value = True
z = self.outputs.new('elements_float_socket', 'Z')
z.text = 'Z'
z.hide_value = True
# input vector
vector_in = self.inputs.new('elements_vector_socket', 'Vector')
vector_in.text = ''
|
[
"stalkermodkytia@yandex.ru"
] |
stalkermodkytia@yandex.ru
|
eed3afbb97e48fae80a14a977c246d46fc89030a
|
0c469c4100fe9d352e83731688e388062a3c55c7
|
/Binary_Search/374. Guess Number Higher or Lower.py
|
48760947821ff471c3bde8720687f3f3cb131280
|
[] |
no_license
|
asperaa/back_to_grind
|
9e055c7e6561384e5b7ae52f01063e4beb34a298
|
5ea1976b9d5c6d04800e296e45e8ff90fdde5001
|
refs/heads/master
| 2022-12-16T18:32:01.443743
| 2020-09-05T13:29:39
| 2020-09-05T13:29:39
| 254,910,528
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""374. Guess Number Higher or Lower
"""
def guess(num):
pass
class Solution:
def guessNumber(self, n):
left, right = 0, n
while left <= right:
mid = left + (right - left) // 2
result = guess(mid)
if result == 0:
return mid
elif result > 0:
left = mid + 1
else:
right = mid - 1
return -1
|
[
"adityaankur44@gmail.com"
] |
adityaankur44@gmail.com
|
2d8f1205dd887ce96442a59c6b67c95c73400344
|
70b5c70d67dfed37c0317c605821c747516a3b13
|
/browser/sarafi/views.py
|
618372a1c7abb02e788df944d7bf05c7d9389ac6
|
[] |
no_license
|
jaxman020/browser
|
ff153106ba12d0e74daa6cec388e283914e1cd41
|
fff8dff28a6437f8bde739e2bd91f86ad767d1e6
|
refs/heads/master
| 2021-01-10T10:36:12.154569
| 2015-12-15T16:18:09
| 2015-12-15T16:18:09
| 47,606,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from django.shortcuts import render
def sarafi(request):
context = {'message':'Django 很棒'}
return render(request, 'sarafi/sarafi.html', context)
|
[
"m516@m516"
] |
m516@m516
|
32874edb86c1dd5846853da3cc4de69aabcc9f6a
|
8d24ade82e14cca109da4b535ba08e77dc29b6ae
|
/Ex_DSA/ex_cap3a.py
|
c1ff816913f8375be3c5855f59dba0fe857c9953
|
[
"MIT"
] |
permissive
|
LuanGermano/DSAexercicios
|
882f6ece355723d101cd5a7b1a39db58857ffa14
|
2de12be75fda2d113f302e8f1272e00e2d8622af
|
refs/heads/main
| 2023-07-23T12:43:13.898367
| 2021-08-30T01:42:04
| 2021-08-30T01:42:04
| 397,451,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,943
|
py
|
# Exercício 1 - Crie uma estrutura que pergunte ao usuário qual o dia da semana. Se o dia for igual a Domingo ou
# igual a sábado, imprima na tela "Hoje é dia de descanso", caso contrário imprima na tela "Você precisa trabalhar!"
"""
dia = str(input("Qual dia da semana é hoje? ")).lower()
if dia == "sabado" or dia == "domingo":
print('Hoje é dia de descanso')
else:
print('Voce precisa trabalhar!')"""
# Exercício 2 - Crie uma lista de 5 frutas e verifique se a fruta 'Morango' faz parte da lista
cont3 = 0
lista1 = ['Abacaxi', 'Laranja', 'Pera', 'maçã', 'goiaba']
lista2 = ['Abacaxi', 'Laranja', 'Pera', 'maçã', 'morango']
for i in lista2:
if i.lower() == "morango":
cont3 += 1
if cont3 == 0:
print('Não tem morango!')
else:
print('Existe morango na lista')
# Exercício 3 - Crie uma tupla de 4 elementos, multiplique cada elemento da tupla por 2 e guarde os resultados em uma lista
tupla = (1,2,3,4)
lista3 = []
for i in tupla:
lista3.append(i*2)
print(lista3)
# Exercício 4 - Crie uma sequência de números pares entre 100 e 150 e imprima na tela
for c in range(100,151,2):
if c == 150:
print(c)
else:
print(c, end=', ')
print()
# Exercício 5 - Crie uma variável chamada temperatura e atribua o valor 40. Enquanto temperatura for maior que 35,
# imprima as temperaturas na tela
temperatura = 40
while temperatura > 35:
print(temperatura, end=', ')
temperatura -= 1
print()
# Exercício 6 - Crie uma variável chamada contador = 0. Enquanto counter for menor que 100, imprima os valores na tela,
# mas quando for encontrado o valor 23, interrompa a execução do programa
contador = 0
while contador < 100:
print(contador, end=', ')
contador += 1
if contador == 23:
break
print()
# Exercício 7 - Crie uma lista vazia e uma variável com valor 4. Enquanto o valor da variável for menor ou igual a 20,
# adicione à lista, apenas os valores pares e imprima a lista
lista7 = []
var7 = 4
while var7 <= 20:
if var7 % 2 == 0:
lista7.append(var7)
var7 +=1
else:
var7 +=1
print(lista7)
# Exercício 8 - Transforme o resultado desta função range em uma lista: range(5, 45, 2)
nums = range(5, 45, 2)
print(list(nums))
# Exercício 9 - Faça a correção dos erros no código abaixo e execute o programa. Dica: são 3 erros.
temperatura = float(input('Qual a temperatura? '))
if temperatura > 30:
print('Vista roupas leves.')
else:
print('Busque seus casacos.')
# Exercício 10 - Faça um programa que conte quantas vezes a letra "r" aparece na frase abaixo. Use um placeholder na sua instrução de impressão
frase = "É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a vantagem de existir."
for letra in range(0, len(frase)):
if frase[letra] == 'r':
contador += 1
print(f'Foram contados {contador} letras "r"')
|
[
"88220713+LuanGermano@users.noreply.github.com"
] |
88220713+LuanGermano@users.noreply.github.com
|
53782ebdaec5105beb96a0ea327db79cdcb20e31
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/MinWindow_20200713232140.py
|
da9aae373aa9bf562d26a1905143fca258c24897
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
# pseducode
# "ADOBECODEBANC"
# "ABC"
# if n(string) is less than k then return false
# have a left and right pointer where both start from the begining and right increases the window size and left reduces the window size
# make a dictionart for the char in k and the count of unique char in k
def small(n,k):
uniqueChar = {}
uniqueCount = 0
minCount = 0
for i in k:
if i not in uniqueChar:
uniqueChar[i] = 1
else:
uniqueChar[i] +=1
for i in uniqueChar:
if uniqueChar[i] > 1:
uniqueCount += uniqueChar[i]
if len(k) > len(n):
return 'false'
left = 0
right = 1
while left < len(n) and right < len(n):
if n[left:right] in uniqueChar:
print(n[left:right])
right +=1
small("ADOBECODEBANC","ABCC")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
81c14dc95fe92cb71fd3ab3323a7d30f3c721564
|
45d6b7739ef7e61779d778b16e2d2cb9b92a08c0
|
/test/run_in_parallel-200PU-grow/submit-49.py
|
dc64b6143eac91b619db28129379d724ba08a4f9
|
[] |
no_license
|
isobelojalvo/phase2L1TauAnalyzer
|
40b545baec97bf287a8d8ab26bea70546bf9f6f8
|
98ef6d31a523698ba0de48763cadee1d5b2ce695
|
refs/heads/master
| 2021-01-22T08:38:17.965156
| 2019-07-25T17:25:51
| 2019-07-25T17:25:51
| 92,623,686
| 0
| 1
| null | 2019-07-23T19:43:55
| 2017-05-27T20:56:25
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
process.source.secondaryFileNames = cms.untracked.vstring(
"/store/relval/CMSSW_9_3_7/RelValZTT_14TeV/GEN-SIM-DIGI-RAW/PU25ns_93X_upgrade2023_realistic_v5_2023D17PU200-v1/10000/6E64C932-2E2D-E811-86D5-0242AC130002.root")
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange("1:24")
|
[
"ojalvo@wisc.edu"
] |
ojalvo@wisc.edu
|
809dbdaf35c9f1d9c579f7c054c3957ee204aa1e
|
4e163aa4aa0f4c4ddc22f74ae21b6fb1c85a7a09
|
/238. 除自身以外数组的乘积.py
|
2e458c88617acdbc6d2816c027b1d9510b858e13
|
[] |
no_license
|
dxc19951001/Everyday_LeetCode
|
72f46a0ec2fc651168129720ad0b1e7b5c372b0b
|
3f7b2ea959308eb80f4c65be35aaeed666570f80
|
refs/heads/master
| 2023-08-03T09:22:08.467100
| 2023-07-23T17:08:27
| 2023-07-23T17:08:27
| 270,723,436
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
class Solution(object):
def productExceptSelf_0(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 核心思想:
# 用两层for循环可以轻松解决,时间复杂度为n^2,不符合题目要求
# 核心:利用列表切片,每次循环将不参与计算的数组剔除
output= [0] * len(nums)
for i in range(len(nums)):
j = 1
news = nums[:i] + nums[i+1 :]
for k in news:
j *= k
output[i] = j
return output
def productExceptSelf_1(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 核心思想:
# 分别定义i左边数组、右边数组、答案数组
# 分别计算i的左边数组和右边数组中每个的乘积,再将对应元素相乘即可得到答案
length = len(nums)
# L 和 R 分别表示左右两侧的乘积列表
L, R, answer = [0]*length, [0]*length, [0]*length
# L[i] 为索引 i 左侧所有元素的乘积
# 对于索引为 '0' 的元素,因为左侧没有元素,所以 L[0] = 1
L[0] = 1
for i in range(1, length):
L[i] = nums[i - 1] * L[i - 1]
# R[i] 为索引 i 右侧所有元素的乘积
# 对于索引为 'length-1' 的元素,因为右侧没有元素,所以 R[length-1] = 1
R[length - 1] = 1
for i in reversed(range(length - 1)):
# 相当于从(length-2)一直到0
R[i] = nums[i + 1] * R[i + 1]
# 对于索引 i,除 nums[i] 之外其余各元素的乘积就是左侧所有元素的乘积乘以右侧所有元素的乘积
for i in range(length):
answer[i] = L[i] * R[i]
return answer
def productExceptSelf_2(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 核心思想:
# 节约空间aanswer和左侧所有元素乘积公用一块空间
# answer[i] 表示索引 i 左侧所有元素的乘积
# 因为索引为 '0' 的元素左侧没有元素, 所以 answer[0] = 1
length = len(nums)
answer = [0]*length
answer[0] = 1
for i in range(1, length):
answer[i] = nums[i - 1] * answer[i - 1]
# R 为右侧所有元素的乘积
# 刚开始右边没有元素,所以 R = 1
R = 1
for i in reversed(range(length)):
# 对于索引 i,左边的乘积为 answer[i],右边的乘积为 R
answer[i] = answer[i] * R
# R 需要包含右边所有的乘积,所以计算下一个结果时需要将当前值乘到 R 上
R *= nums[i]
return answer
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 用两个常量分别来计算左边数组和右边数组
left = 1
right = 1
result = [1] * len(nums)
# 算出左边列表
for i in range(len(nums)):
result[i] *= left
left *= nums[i]
# 算出右边列表
for i in range(len(nums)-1, -1, -1):
result[i] *= right
right *= nums[i]
return result
nums = [1,2,3,4]
s = Solution()
a = s.productExceptSelf(nums)
print(a)
|
[
"870200615@qq.com"
] |
870200615@qq.com
|
6d48f45d6eb3ac4fe3fe69c45bf0ec4b44276d16
|
176f2533c07323f3eccb13d576092c32c46428fc
|
/game/game.py
|
46f57da139764cbc2db7162a1541d4ddad3ab89f
|
[] |
no_license
|
viciu/pyconpl-2014
|
3fbe382c5376cc54ca448efaca2777e6d242c607
|
4539ab8135c56cfbb2428c456ca182a86a2f46c9
|
refs/heads/master
| 2021-01-17T20:59:36.882412
| 2014-10-18T08:52:16
| 2014-10-18T08:52:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
import random
APPLES = ['X', 'O']
ORANGE = '.'
GRAPEFRUIT = ORANGE * 9
RAISINS = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6]
]
def play():
banana = GRAPEFRUIT
melon = None
coconut(banana)
for plum in range(9):
prune = random.choice(walnut(banana))
nectarine = APPLES[plum % 2]
banana = peanut(banana, prune, nectarine)
coconut(banana)
if hazelnut(banana, nectarine):
melon = nectarine
break
if melon:
print 'Player {} wins'.format(melon)
else:
print 'It is a draw'
def coconut(lychee):
print '{} | {} | {}'.format(*lychee[:3])
print '--+---+--'
print '{} | {} | {}'.format(*lychee[3:6])
print '--+---+--'
print '{} | {} | {}'.format(*lychee[6:])
print
def peanut(pineapple, mango, papaya):
if not 0 <= mango < 9:
raise ValueError('Invalid position: {}'.format(mango))
if pineapple[mango] != ORANGE:
raise ValueError('Position is full: {}'.format(position))
return pineapple[:mango] + papaya + pineapple[mango+1:]
def walnut(lemon):
return [grape for grape in range(9) if lemon[grape] == ORANGE]
def hazelnut(lime, peach):
for p1, p2, p3 in RAISINS:
if lime[p1] == lime[p2] == lime[p3] == peach:
return True
return False
if __name__ == '__main__':
play()
|
[
"peter.inglesby@gmail.com"
] |
peter.inglesby@gmail.com
|
9c49e21e8fccfbd0b43c02b5ef63fdc32eccfdd4
|
9b7291d81a416bde2ec181229601eb2e33c7b8b2
|
/monophoton/spikes/collect.py
|
d0cf880677d760340d04e54dfd7cde2976bd94cd
|
[] |
no_license
|
MiT-HEP/MonoX
|
ab1528e72dad2590a0ae64f1a1d47195139e1749
|
224ee01107a94cedf8563c497edb2f326b99d9b1
|
refs/heads/master
| 2021-01-24T06:04:16.645559
| 2019-11-15T09:18:40
| 2019-11-15T09:18:40
| 41,823,403
| 1
| 9
| null | 2018-07-19T17:05:30
| 2015-09-02T19:33:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,528
|
py
|
import os
import sys
import re
import math
import array
thisdir = os.path.dirname(os.path.realpath(__file__))
basedir = os.path.dirname(thisdir)
sys.path.append(basedir)
import config
import utils
from datasets import allsamples
import ROOT
arun = array.array('I', [0])
alumi = array.array('I', [0])
aevent = array.array('I', [0])
aeta = array.array('f', [0.] * 10)
aphi = array.array('f', [0.] * 10)
positions = {}
#for sname in ['sph-16b-m', 'sph-16c-m', 'sph-16d-m', 'sph-16e-m', 'sph-16f-m', 'sph-16g-m', 'sph-16h-m']:
for sname in ['sph-16b-m', 'sph-16c-m', 'sph-16d-m']:
positions[sname] = {}
source = ROOT.TFile.Open(utils.getSkimPath(sname, 'monoph'))
tree = source.Get('events')
tree.Draw('>>elist', 'photons.scRawPt[0] > 175. && t1Met.pt > 170. && t1Met.photonDPhi > 0.5 && t1Met.minJetDPhi > 0.5', 'entrylist')
elist = ROOT.gDirectory.Get('elist')
tree.SetEntryList(elist)
tree.SetBranchAddress('runNumber', arun)
tree.SetBranchAddress('lumiNumber', alumi)
tree.SetBranchAddress('eventNumber', aevent)
tree.SetBranchAddress('photons.eta_', aeta)
tree.SetBranchAddress('photons.phi_', aphi)
ientry = 0
while True:
ilocal = tree.GetEntryNumber(ientry)
if ilocal < 0:
break
ientry += 1
tree.GetEntry(ilocal)
positions[sname][(arun[0], alumi[0], aevent[0])] = (aeta[0], aphi[0])
print sname, len(positions[sname]), 'photons'
source.Close()
outTrees = {}
outFiles = []
aieta = array.array('h', [0])
aiphi = array.array('h', [0])
sourcedir = '/mnt/hadoop/scratch/yiiyama/spike_event'
for fname in os.listdir(sourcedir):
if 'Run2016B' in fname:
sname = 'sph-16b-m'
elif 'Run2016C' in fname:
sname = 'sph-16c-m'
elif 'Run2016D' in fname:
sname = 'sph-16d-m'
elif 'Run2016E' in fname:
sname = 'sph-16e-m'
elif 'Run2016F' in fname:
sname = 'sph-16f-m'
elif 'Run2016G' in fname:
sname = 'sph-16g-m'
elif 'Run2016H' in fname:
sname = 'sph-16h-m'
if sname not in ['sph-16b-m', 'sph-16c-m', 'sph-16d-m']:
continue
matches = re.match('.+AOD_([0-9]+)_([0-9]+)_([0-9]+)[.]root', fname)
event = (int(matches.group(1)), int(matches.group(2)), int(matches.group(3)))
position = positions[sname][event]
# print event, position
source = ROOT.TFile.Open(sourcedir + '/' + fname)
tree = source.Get('outTree/hits')
if sname not in outTrees:
outFile = ROOT.TFile.Open(config.histDir + '/spikes/hits_' + sname + '.root', 'recreate')
outFiles.append(outFile)
outTree = tree.CloneTree(0)
outTrees[sname] = outTree
tree.SetBranchAddress('ieta', aieta)
tree.SetBranchAddress('iphi', aiphi)
ientry = 0
while tree.GetEntry(ientry) > 0:
ientry += 1
eta = aieta[0] * 0.0174
phi = (aiphi[0] - 10) / 180. * math.pi
deta = position[0] - eta
dphi = position[1] - phi
while dphi > math.pi:
dphi -= 2. * math.pi
while dphi < -math.pi:
dphi += 2. * math.pi
if deta * deta + dphi * dphi < 0.01:
tree.CopyAddresses(outTrees[sname])
outTrees[sname].Fill()
break
else:
print 'Matching photon not found for event', event
tree.CopyAddresses(outTrees[sname], True)
source.Close()
for tree in outTrees.itervalues():
outFile = tree.GetCurrentFile()
outFile.cd()
tree.Write()
outFile.Close()
|
[
"yiiyama@mit.edu"
] |
yiiyama@mit.edu
|
463dcc5b3f6bd9f93ff40fc5eea5cc0d69680a9e
|
83a637ff77108f2582397c4ca4b2e7953ef4e137
|
/categorical_embedder/processors/DiscriminativeWrapper.py
|
09dac41aa0d8882e5556a2fbe6f7a17d8590bdee
|
[
"Apache-2.0"
] |
permissive
|
erelcan/categorical-embedder
|
066e0e279826f27aae0e927744d745bd724ba340
|
376b8779500af2aa459c879f8e525f2ef25d6b31
|
refs/heads/master
| 2023-02-03T01:44:01.896677
| 2020-12-19T10:51:11
| 2020-12-19T10:51:11
| 322,824,753
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
from categorical_embedder.processors.ProcessorABC import ProcessorABC
class DiscriminativeWrapper(ProcessorABC):
def __init__(self, feature_processor, label_processor):
super().__init__()
self._feature_processor = feature_processor
self._label_processor = label_processor
def process(self, data, training=True):
if training:
# data: [features: numpy ndarray, labels: numpy ndarray]
# If features is a list or tuple, we will assume the last one is for target!
# Re-consider and better design this.~
if (isinstance(data, list) or isinstance(data, tuple)) and len(data) == 2:
processed1 = self._feature_processor.process(data[0])
processed2 = self._label_processor.process(data[1])
if isinstance(processed1, list) or isinstance(processed1, tuple):
return processed1[0:-1], {"main": processed1[-1], "discriminative": processed2}
else:
raise Exception("Data for DiscriminativeWrapper should have at least 2 target data: one for main embedding, and one for discriminative.")
else:
raise Exception("Data for DiscriminativeWrapper should be a list or tuple with length 2, for training.")
else:
# data: numpy ndarray
return self._feature_processor.process(data, training=False)
def get_feature_processor(self):
return self._feature_processor
def get_label_processor(self):
return self._label_processor
|
[
"erelcan89@gmail.com"
] |
erelcan89@gmail.com
|
0ffcd09026f2a8b1327d97c4bfc0ae63dcfbf8bf
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_exocyst_tags/optimized_8006.py
|
92f04b4eb65847eae8c1440af4a4a0c165c44645
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,845
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_GFPN" not in marker_sets:
s=new_marker_set('Sec3_GFPN')
marker_sets["Sec3_GFPN"]=s
s= marker_sets["Sec3_GFPN"]
mark=s.place_marker((541.596, 512.333, 499.687), (0.15, 0.78, 0.66), 21.9005)
if "Sec3_GFPC" not in marker_sets:
s=new_marker_set('Sec3_GFPC')
marker_sets["Sec3_GFPC"]=s
s= marker_sets["Sec3_GFPC"]
mark=s.place_marker((568.86, 487.556, 533.851), (0.15, 0.78, 0.66), 31.586)
if "Sec3_Anch" not in marker_sets:
s=new_marker_set('Sec3_Anch')
marker_sets["Sec3_Anch"]=s
s= marker_sets["Sec3_Anch"]
mark=s.place_marker((443.376, 310.402, 599.636), (0.15, 0.58, 0.66), 26.9335)
if "Sec5_GFPN" not in marker_sets:
s=new_marker_set('Sec5_GFPN')
marker_sets["Sec5_GFPN"]=s
s= marker_sets["Sec5_GFPN"]
mark=s.place_marker((502.226, 474.639, 462.699), (0.38, 0.24, 0.37), 21.9005)
if "Sec5_GFPC" not in marker_sets:
s=new_marker_set('Sec5_GFPC')
marker_sets["Sec5_GFPC"]=s
s= marker_sets["Sec5_GFPC"]
mark=s.place_marker((560.207, 476.263, 566.453), (0.38, 0.24, 0.37), 31.586)
if "Sec6_GFPN" not in marker_sets:
s=new_marker_set('Sec6_GFPN')
marker_sets["Sec6_GFPN"]=s
s= marker_sets["Sec6_GFPN"]
mark=s.place_marker((552.054, 434.475, 481.425), (0.84, 0.98, 0.24), 21.9005)
if "Sec6_GFPC" not in marker_sets:
s=new_marker_set('Sec6_GFPC')
marker_sets["Sec6_GFPC"]=s
s= marker_sets["Sec6_GFPC"]
mark=s.place_marker((366.021, 454.286, 498.926), (0.84, 0.98, 0.24), 31.586)
if "Sec6_Anch" not in marker_sets:
s=new_marker_set('Sec6_Anch')
marker_sets["Sec6_Anch"]=s
s= marker_sets["Sec6_Anch"]
mark=s.place_marker((374.226, 646.956, 533.807), (0.84, 0.78, 0.24), 26.9335)
if "Sec8_GFPC" not in marker_sets:
s=new_marker_set('Sec8_GFPC')
marker_sets["Sec8_GFPC"]=s
s= marker_sets["Sec8_GFPC"]
mark=s.place_marker((348.131, 467.846, 564.892), (0.62, 0.67, 0.45), 31.586)
if "Sec8_Anch" not in marker_sets:
s=new_marker_set('Sec8_Anch')
marker_sets["Sec8_Anch"]=s
s= marker_sets["Sec8_Anch"]
mark=s.place_marker((502.808, 301.01, 608.5), (0.62, 0.47, 0.45), 26.9335)
if "Sec10_GFPN" not in marker_sets:
s=new_marker_set('Sec10_GFPN')
marker_sets["Sec10_GFPN"]=s
s= marker_sets["Sec10_GFPN"]
mark=s.place_marker((303.358, 468.232, 544.156), (0, 0.91, 0), 21.9005)
if "Sec10_GFPC" not in marker_sets:
s=new_marker_set('Sec10_GFPC')
marker_sets["Sec10_GFPC"]=s
s= marker_sets["Sec10_GFPC"]
mark=s.place_marker((556.988, 502.166, 655.685), (0, 0.91, 0), 31.586)
if "Sec10_Anch" not in marker_sets:
s=new_marker_set('Sec10_Anch')
marker_sets["Sec10_Anch"]=s
s= marker_sets["Sec10_Anch"]
mark=s.place_marker((401.96, 607.326, 428.684), (0, 0.71, 0), 26.9335)
if "Sec15_GFPN" not in marker_sets:
s=new_marker_set('Sec15_GFPN')
marker_sets["Sec15_GFPN"]=s
s= marker_sets["Sec15_GFPN"]
mark=s.place_marker((463.517, 451.821, 412.144), (0.11, 0.51, 0.86), 21.9005)
if "Sec15_GFPC" not in marker_sets:
s=new_marker_set('Sec15_GFPC')
marker_sets["Sec15_GFPC"]=s
s= marker_sets["Sec15_GFPC"]
mark=s.place_marker((313.133, 480.912, 604.313), (0.11, 0.51, 0.86), 31.586)
if "Sec15_Anch" not in marker_sets:
s=new_marker_set('Sec15_Anch')
marker_sets["Sec15_Anch"]=s
s= marker_sets["Sec15_Anch"]
mark=s.place_marker((421.945, 580.637, 681.719), (0.11, 0.31, 0.86), 26.9335)
if "Exo70_GFPN" not in marker_sets:
s=new_marker_set('Exo70_GFPN')
marker_sets["Exo70_GFPN"]=s
s= marker_sets["Exo70_GFPN"]
mark=s.place_marker((560.517, 511.605, 542.302), (0.89, 0.47, 0.4), 21.9005)
if "Exo70_GFPC" not in marker_sets:
s=new_marker_set('Exo70_GFPC')
marker_sets["Exo70_GFPC"]=s
s= marker_sets["Exo70_GFPC"]
mark=s.place_marker((327.274, 424.61, 563.459), (0.89, 0.47, 0.4), 31.586)
if "Exo70_Anch" not in marker_sets:
s=new_marker_set('Exo70_Anch')
marker_sets["Exo70_Anch"]=s
s= marker_sets["Exo70_Anch"]
mark=s.place_marker((480.52, 659.803, 387.92), (0.89, 0.27, 0.4), 26.9335)
if "Exo84_GFPN" not in marker_sets:
s=new_marker_set('Exo84_GFPN')
marker_sets["Exo84_GFPN"]=s
s= marker_sets["Exo84_GFPN"]
mark=s.place_marker((489.953, 490.909, 468.969), (0.5, 0.7, 0), 31.586)
if "Exo84_GFPC" not in marker_sets:
s=new_marker_set('Exo84_GFPC')
marker_sets["Exo84_GFPC"]=s
s= marker_sets["Exo84_GFPC"]
mark=s.place_marker((549.547, 470.354, 589.704), (0.5, 0.7, 0), 31.586)
if "Exo84_Anch" not in marker_sets:
s=new_marker_set('Exo84_Anch')
marker_sets["Exo84_Anch"]=s
s= marker_sets["Exo84_Anch"]
mark=s.place_marker((447.784, 637.856, 623.069), (0.5, 0.5, 0), 26.9335)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
b2dcbad518e02a363f8d5ba7c30bfe5a7c22ce1e
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/1-Python-Basics/30-sets2_20200413211158.py
|
368c37538e1350070bdbcc7b3a0b51dc8b9c681f
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
# methods
my_set = { 342, 23, 1, 2, 3, 9, 10, 9 }
your_set = [ 342, 23, 42, 46, 53, 34, 10 ]
print(my_set)
#output {1, 2, 3, 9}
print(my_set.difference(your_set))
#output {1, 2, 3, 9, 342, 23}
my_set.discard(10)
print(my_set)
#output {1, 2, 3, 9, 342, 23}
my_set.intersection(your_set)
print(my_set)
#output {1, 2, 3, 4, 9, 10, 14, 23}
my_set1 = { 3, 3, 4, 10, 14, 23, 1, 2, 3, 9, 10, 9 }
your_set1= [ 342, 23, 42, 46, 53, 34, 10 ]
my_set1.isdisjoint(your_set1)
print(my_set1)
#output - {1, 2, 3, 9, 10, 342, 23}
my_set2 = { 342, 23, 1, 2, 3, 9, 10, 9 }
your_set2 = [ 342, 23, 42, 46, 53, 34, 10 ]
my_set2.union(your_set2)
print(my_set2)
#output - {1, 2, 3, 9, 10, 23, 8888}
my_set3 = { 8888, 23, 1, 2, 3, 9, 10, 9 }
your_set3 = [ 342, 23, 42, 46, 53, 34, 10 ]
my_set3.issuperset(your_set3)
print(my_set3)
#ouput - {1, 2, 3, 9, 10, 23, 8888}
my_set3 = { 8888, 23, 1, 2, 3, 9, 10, 9 }
your_set3 = [ 342, 23, 42, 46, 53, 34, 10 ]
print(my_set3.isdisjoint(your_set3))
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
26591cf0c6446d0ec56368b1f6e560280d10c616
|
c01a58ecd6614128e3c29a70e3e768b220a2a4a2
|
/common/xrd-ui-tests-python/helpers/mockrunner.py
|
76c1f651e438d70bb1f13223c5d66af078e33553
|
[
"MIT"
] |
permissive
|
nordic-institute/X-Road-tests
|
772a6d7485606c1f10b61a1260b8fb66111bf0be
|
e030661a0ad8ceab74dd8122b751e88025a3474a
|
refs/heads/develop
| 2021-06-03T01:38:20.542859
| 2019-03-18T12:16:18
| 2019-03-18T12:16:18
| 125,643,677
| 2
| 3
|
MIT
| 2018-06-14T15:09:21
| 2018-03-17T15:36:32
|
Python
|
UTF-8
|
Python
| false
| false
| 4,103
|
py
|
from helpers import ssh_client
import re
import time
class MockRunner:
'''
Class that tries to control the mock service script (SoapUI MockRunner) over an SSH connection. Uses
ssh_helper.SSHClient component.
Connects to SSH server, sends a one-liner command and then waits until a specified regex matches output or a timeout
occurs. To stop the service, sends a single keycode (Ctrl-C by default).
'''
running = False # Internal variable - service running or not
error = None # Last error
command = None # Mock start command
debug = False
def __init__(self, host, username, password, command,
ready_regex='.*\[SoapUIMockServiceRunner\] Started.*', ready_timeout=60, stop_keycode=3):
'''
Initialize the class and open the SSH connection.
:param host: str - hostname of the server
:param username: str - username
:param password: str - password
:param command: str - mock service start command, one-liner (semicolons can be used for command sequence)
:param ready_regex: str - regex to wait for until concluding that the service is up and running
:param ready_timeout: int - service start timeout in seconds; if this passes, starting failed
:param stop_keycode: int - keycode to send to kill the service; can be Ctrl-C (3) or Enter (13) for SoapUI
'''
self.ssh = ssh_client.SSHClient(host=host, username=username, password=password)
self.command = command
self.ready_regex = re.compile(ready_regex)
self.ready_timeout = ready_timeout
self.stop_keycode = stop_keycode
def start(self):
'''
Tries to start the mock service.
:return: bool - if the service was started
'''
# No errors by default
self.error = None
# If the service is already running, set an error and fail start (return False)
if self.running:
self.error = 'Already running'
return False
# Set running to be true to block other start requests
self.running = True
# Execute command over SSH, line reading timeout is 1 second
self.ssh.exec_command(self.command, timeout=1)
# Get the current time to check for timeout
start_time = time.time()
while True:
# Read lines from SSH
try:
line = self.ssh.readline()
if line:
if self.debug:
# Print line for logging
print(line)
# If the line matches the specified regex, mock is running, break the loop.
if self.ready_regex.match(line):
break
else:
# Go to the exception
raise RuntimeError
except:
# If time limit passed, set an error and return False
if time.time() > start_time + self.ready_timeout:
self.error = 'Mock start timeout'
return False
return True
def restart(self):
'''
Restart mock service.
:return:
'''
# If already running, stop it.
if self.running:
self.stop()
# Start again.
self.start()
def stop(self):
'''
Stop the mock service.
:return:
'''
if self.running:
if self.debug:
print("Mock stopping")
# Send a stop character and flush it.
try:
self.ssh.write(chr(self.stop_keycode), flush=True)
except:
pass
# Not running and no error.
self.running = False
self.error = None
def get_error(self):
'''
Returns the last error.
:return: str|None - last error message
'''
return self.error
|
[
"mardu@varuosakeskus.ee"
] |
mardu@varuosakeskus.ee
|
1b994a2be95d50152c5761e91816f512f0cd103d
|
1676168244eed1c5610b2c1c38f692f89990b112
|
/part4-ML/from_1021_Django/django_movie_second/movies/urls.py
|
a7004f938653e54f8ea8c49830a0f6a92dc15b22
|
[] |
no_license
|
gtpgg1013/AI_docs
|
351e83f986d66224c82fff2de944753c98336d03
|
43f8eed8b2732314bd40ed65e1d7eb44dd28fc04
|
refs/heads/master
| 2022-12-09T17:32:02.992554
| 2019-11-20T09:03:56
| 2019-11-20T09:03:56
| 182,927,565
| 1
| 0
| null | 2022-12-08T06:50:23
| 2019-04-23T03:54:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
from django.urls import path
from . import views
app_name = 'movies'
urlpatterns = [
path('', views.index, name="index"),
path('new/', views.new, name="new"),
path('<int:movie_pk>/', views.detail, name="detail"),
path('<int:movie_pk>/edit/', views.edit, name="edit"),
path('<int:movie_pk>/delete/', views.delete, name="delete"),
path('<int:movie_pk>/ratings/new/', views.new_rating, name="new_rating"),
path('<int:movie_pk>/ratings/<int:rating_pk>/delete/', views.delete_rating, name='delete_rating'),
]
|
[
"gtpgg1013@gmail.com"
] |
gtpgg1013@gmail.com
|
6063ac49e1b928783643b11ac36053dbf051478d
|
2c1429a1bd2d0477fd88119d4d778fc68c82adcf
|
/python/DeepSeaSceneLighting/SceneShadowManagerPrepare.py
|
f17117f268680fdab051407968ae346b70024422
|
[
"Apache-2.0"
] |
permissive
|
akb825/DeepSea
|
d7ac54f6d8243d43d6ea538159f3067ab7e79880
|
5a909b4f51717bc59682e51ad6aa598a25a9b965
|
refs/heads/master
| 2023-08-31T23:45:19.533393
| 2023-08-29T07:30:36
| 2023-08-29T07:30:43
| 142,716,767
| 10
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaSceneLighting
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SceneShadowManagerPrepare(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SceneShadowManagerPrepare()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSceneShadowManagerPrepare(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SceneShadowManagerPrepare
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SceneShadowManagerPrepare
def ShadowManager(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def SceneShadowManagerPrepareStart(builder):
builder.StartObject(1)
def Start(builder):
SceneShadowManagerPrepareStart(builder)
def SceneShadowManagerPrepareAddShadowManager(builder, shadowManager):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shadowManager), 0)
def AddShadowManager(builder, shadowManager):
SceneShadowManagerPrepareAddShadowManager(builder, shadowManager)
def SceneShadowManagerPrepareEnd(builder):
return builder.EndObject()
def End(builder):
return SceneShadowManagerPrepareEnd(builder)
|
[
"akb825@gmail.com"
] |
akb825@gmail.com
|
abdafe161112ab293d182ea37b60e0b2175203e4
|
1eaa6c2500868d0c60b5b2cd552cd671b635de32
|
/Algorithm/sword of offer/2.替换空格.py
|
d830f7aee41e3cd0884252602f7cb3b806b7c3bc
|
[] |
no_license
|
jiangyuwei666/my-study-demo
|
f85f14a599c328addb5af09078d404f1139e0a82
|
9e2baef2f36f071f8903768adb8d5a5a8c1123f6
|
refs/heads/master
| 2022-04-30T16:47:24.715570
| 2022-03-24T09:08:43
| 2022-03-24T09:08:43
| 152,565,041
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
def solution_by_py(s):
s = s.replace(' ', '%20')
return s
def solution_by_py1(s):
s = s.split(' ')
s = '%20'.join(s)
return s
# def solution_by_re(s):
#
print(solution_by_py('a b c'))
print(solution_by_py1('a b c'))
|
[
"739843128@qq.com"
] |
739843128@qq.com
|
382ea429b816b3ca6a1ce86c23964238e697ed53
|
5796cdc0bf59ed09e1493804bd86e982daf73f7f
|
/python/interp.py
|
d5bb30cfd8ef1975b51b3ba3c6504a05101289a1
|
[] |
no_license
|
pgDora56/BrainfxckInterpreter
|
ec67113b4480a8b328fde126932ac1061f2999d0
|
c3544922e4422633d869266b5a9035d87806c92c
|
refs/heads/master
| 2020-09-20T06:36:23.508956
| 2019-12-09T08:13:05
| 2019-12-09T08:13:05
| 224,401,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
import sys
ptr = 0
memory = [0]
readcnt = 0
whilemark = []
if len(sys.argv) == 2:
fname = sys.argv[1]
with open(fname) as f:
program = f.read()
while readcnt < len(program):
c = program[readcnt]
if c == ">":
if ptr < 1000000:
ptr += 1
while len(memory) <= ptr:
memory.append(0)
else:
raise Exception("ptr is too large")
elif c == "<":
if ptr > 0:
ptr -= 1
else:
raise Exception("ptr must be positive value")
elif c == "+":
memory[ptr] += 1
elif c == "-":
memory[ptr] -= 1
elif c == ".":
print(chr(memory[ptr]))
elif c == "[":
if memory[ptr] == 0:
wcnt = 1
readcnt += 1
while wcnt > 0:
if readcnt >= len(program):
raise Exception("] isn't found.")
if program[readcnt] == "[":
wcnt += 1
elif program[readcnt] == "]":
wcnt -= 1
readcnt += 1
else:
whilemark.append(readcnt)
elif c == "]":
if memory[ptr] != 0:
readcnt = whilemark[-1]
else:
whilemark.pop(len(whilemark)-1)
readcnt += 1
|
[
"doradora.prog@gmail.com"
] |
doradora.prog@gmail.com
|
f94f3789185b61d958643300f7e8cde8600bad3e
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_23037.py
|
3bd2a710d8fe38850715610ce1eaa8fd6bce9240
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
# create dictionary based on filter in sqlalchemy
by_name = {g.name: g.users for g in Group.query.options(db.joinedload(Group.users))}
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
658baaf3e894c98deb60ec089ad7d2c063bd3ff8
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/humanfriendly/humanfriendly/tables.pyi
|
0b324b1f32331119ff29d6ec1a91025a38402f4f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 304
|
pyi
|
from typing import Any
def format_smart_table(data, column_names): ...
def format_pretty_table(data, column_names: Any | None = ..., horizontal_bar: str = ..., vertical_bar: str = ...): ...
def format_robust_table(data, column_names): ...
def format_rst_table(data, column_names: Any | None = ...): ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
068979971645c1203a8ee605549bd08a140f44c8
|
2d8a3a9b15e76bacd24e1627a124f0f60a83b253
|
/sfepy/terms/terms_new.py
|
a3b8b1b2718a856c97abc55b196a36c84c25a6e7
|
[
"BSD-3-Clause"
] |
permissive
|
cheon7886/sfepy
|
2722ae15bb52cdf20bac264771c32b1b051bb2ae
|
2e9eb78341f9072ad07424221a64306c95c5ebd1
|
refs/heads/master
| 2021-01-19T19:33:11.938856
| 2015-03-29T08:56:35
| 2015-03-29T08:56:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,892
|
py
|
"""
todo:
- get row variable, col variable (if diff_var)
- determine out shape
- set current group to all variable arguments
- loop over row/col dofs:
- call term
? how to deal with components of (vector) variables?
(*) for given group, Variable has to be able to:
- evaluate value in quadrature points
- evaluate gradient in quadrature points
- ? evaluate divergence in quadrature points
- ? lazy evaluation, cache!
?? base function gradients in space elements stored now in terms - in
geometry, shared dict of geometries belongs to Equations
-> where to cache stuff? - in variables!
"""
import numpy as nm
from sfepy.base.base import output
from sfepy.terms.terms import Term, get_shape_kind
from sfepy.terms.utils import get_range_indices
from sfepy.mechanics.tensors import get_full_indices
from sfepy.linalg import dot_sequences as dot
class NewTerm(Term):
def get_geometry_key(self, variable):
is_trace = self.arg_traces[variable.name]
geometry_type = self.geometry_types[variable.name]
region_name, iorder, ig = self.get_current_group()
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
region_name = region.name
ig = ig_map_i[ig]
ap = variable.get_approximation(ig)
key = (region_name, iorder, geometry_type, ap.name)
return key, ig
def get_geometry(self, variable):
key, ig = self.get_geometry_key(variable)
geo = self.get_mapping(variable)[0]
return geo, key, ig
def set_current_group(self, ig):
"""
Set current group for the term and all variables in its
arguments.
"""
self.char_fun.set_current_group(ig)
shape_kind = get_shape_kind(self.integration)
for var in self.get_variables():
geo, geo_key, geo_ig = self.get_geometry(var)
var.setup_bases(geo_key, geo_ig, geo, self.integral, shape_kind)
var.set_current_group(geo_key, geo_ig)
def integrate(self, val_qp, variable):
shape_kind = get_shape_kind(self.integration)
geo, _, _ = self.get_geometry(variable)
sh = val_qp.shape
val = nm.zeros((sh[0], 1, sh[2], sh[3]), dtype=val_qp.dtype)
if shape_kind == 'volume':
geo.integrate(val, val_qp)
else:
geo.integrate(val, val_qp)
return val
def evaluate(self, mode='eval', diff_var=None, **kwargs):
shape_kind = get_shape_kind(self.integration)
if mode == 'eval':
var = self.get_variables()[0]
val = 0.0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
val_qp = self(*args, **kwargs)
_val = self.integrate(val_qp, var)
val += self.sign * _val.sum()
elif mode in ('el_avg', 'qp'):
raise NotImplementedError()
elif mode == 'weak':
varr = self.get_virtual_variable()
vals = []
iels = []
if diff_var is None:
for ig in self.iter_groups():
args = self.get_args(**kwargs)
aux = varr.get_data_shape(ig, self.integral,
shape_kind, self.region.name)
n_elr, n_qpr, dim, n_enr, n_cr = aux
n_row = n_cr * n_enr
shape = (n_elr, 1, n_row, 1)
val = nm.zeros(shape, dtype=varr.dtype)
for ir in varr.iter_dofs():
irs = slice(ir, ir + 1)
try:
val_qp = self(*args, **kwargs)
except ValueError:
output('%s term evaluation failed!' % self.name)
raise
_val = self.integrate(val_qp, varr)
val[..., irs, :] = _val
vals.append(self.sign * val)
iels.append((ig, nm.arange(n_elr, dtype=nm.int32)))
else:
varc = self.get_variables(as_list=False)[diff_var]
for ig in self.iter_groups():
args = self.get_args(**kwargs)
aux = varr.get_data_shape(ig, self.integral,
shape_kind, self.region.name)
n_elr, n_qpr, dim, n_enr, n_cr = aux
n_row = n_cr * n_enr
aux = varc.get_data_shape(ig, self.integral,
shape_kind, self.region.name)
n_elc, n_qpc, dim, n_enc, n_cc = aux
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
val = nm.zeros(shape, dtype=varr.dtype)
for ir in varr.iter_dofs():
irs = slice(ir, ir + 1)
for ic in varc.iter_dofs():
ics = slice(ic, ic + 1)
try:
val_qp = self(*args, **kwargs)
except ValueError:
output('%s term evaluation failed!' % self.name)
raise
_val = self.integrate(val_qp, varr)
val[..., irs, ics] = _val
vals.append(self.sign * val)
iels.append((ig, nm.arange(n_elr, dtype=nm.int32)))
# Setup return value.
if mode == 'eval':
out = (val,)
else:
out = (vals, iels)
# Hack: add zero status.
out = out + (0,)
if len(out) == 1:
out = out[0]
return out
class NewDiffusionTerm(NewTerm):
"""
"""
name = 'dw_new_diffusion'
arg_types = ('material', 'virtual', 'state')
def __call__(self, mat, virtual, state, **kwargs):
val = dot(virtual.grad(), dot(mat, state.grad()), 'ATB')
return val
class NewMassScalarTerm(NewTerm):
"""
"""
name = 'dw_new_mass_scalar'
arg_types = ('virtual', 'state')
def __call__(self, virtual, state, **kwargs):
val = virtual.val() * state.val()
return val
class NewMassTerm(NewTerm):
"""
Works for both scalar and vector variables.
"""
name = 'dw_new_mass'
arg_types = ('virtual', 'state')
def __call__(self, virtual, state, **kwargs):
rindx = virtual.get_component_indices()
cindx = state.get_component_indices()
val = virtual.get_element_zeros()
for ir, irs in rindx:
for ic, ics in cindx:
if ir == ic:
val += virtual.val(ir) * state.val(ic)
return val
class NewLinearElasticTerm(NewTerm):
"""
"""
name = 'dw_new_lin_elastic'
arg_types = ('material', 'virtual', 'state')
def __call__(self, mat, virtual, state, **kwargs):
"""
Doubled out-of-diagonal strain entries!
"""
rindx = virtual.get_component_indices()
cindx = state.get_component_indices()
kindx = lindx = get_range_indices(state.dim)
fi = nm.array(get_full_indices(state.dim))
val = virtual.get_element_zeros()
for ir, irs in rindx:
for ik, iks in kindx:
irk = fi[ir, ik]
irks = slice(irk, irk + 1)
erk = virtual.grad(ir, ik)
for ic, ics in cindx:
for il, ils in lindx:
icl = fi[ic, il]
icls = slice(icl, icl + 1)
ecl = state.grad(ic, il)
val += mat[..., irks, icls] * erk * ecl
return val
|
[
"cimrman3@ntc.zcu.cz"
] |
cimrman3@ntc.zcu.cz
|
1ae492172aa438a72336aba09bcd68fe23e03941
|
b90190cd97f1aa2a3168d3f25ce6832a1c22d1b2
|
/Code/models/arena/arena.py
|
0cb906374edd997cd4c0ec9067f3c7282035faab
|
[] |
no_license
|
stjordanis/Decentralized-and-multi-agent-control-of-Franka-Emika-Panda-robot-in-continuous-task-execution
|
7c002cd4dea95b1a1256172c6d8d38c6226199f9
|
c2b27e7f8059e3c29c876b60656f6a20d55e5da2
|
refs/heads/main
| 2023-07-31T20:47:24.476564
| 2021-10-01T08:44:19
| 2021-10-01T08:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
import numpy as np
from models.base import MujocoXML
from utils.mjcf_utils import array_to_string, string_to_array
class Arena(MujocoXML):
"""Base arena class."""
def set_origin(self, offset):
"""Applies a constant offset to all objects."""
offset = np.array(offset)
for node in self.worldbody.findall("./*[@pos]"):
cur_pos = string_to_array(node.get("pos"))
new_pos = cur_pos + offset
node.set("pos", array_to_string(new_pos))
|
[
"noreply@github.com"
] |
stjordanis.noreply@github.com
|
699ff99fdcca12765962e862f59dd227a46dd7b7
|
edf7fc01b731c1d17324a1acd095bac93c3537ef
|
/test/test_sampling.py
|
ea93e3e3d7755398f50c401a7758c01032c10eae
|
[
"BSD-3-Clause"
] |
permissive
|
bernardotorres/profiling
|
f6d3d8a764c75ce6bb3478deb562c8160d9bad04
|
8763a5d11c4ebd06b5a90bdced0a01aaadf02687
|
refs/heads/master
| 2020-04-01T16:45:40.985548
| 2018-10-18T06:53:41
| 2018-10-18T06:53:41
| 153,396,990
| 0
| 0
|
BSD-3-Clause
| 2018-10-17T04:49:31
| 2018-10-17T04:49:31
| null |
UTF-8
|
Python
| false
| false
| 2,752
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import signal
import sys
import pytest
from _utils import find_stats, spin
from profiling.sampling import SamplingProfiler
from profiling.sampling.samplers import ItimerSampler, TracingSampler
def spin_100ms():
spin(0.1)
def spin_500ms():
spin(0.5)
def _test_sampling_profiler(sampler):
profiler = SamplingProfiler(base_frame=sys._getframe(), sampler=sampler)
with profiler:
spin_100ms()
spin_500ms()
stat1 = find_stats(profiler.stats, 'spin_100ms')
stat2 = find_stats(profiler.stats, 'spin_500ms')
ratio = stat1.deep_hits / stat2.deep_hits
# 1:5 expaected, but tolerate (0.8~1.2):5
assert 0.8 <= ratio * 5 <= 1.2
@pytest.mark.flaky(reruns=10)
def test_itimer_sampler():
assert signal.getsignal(signal.SIGPROF) == signal.SIG_DFL
try:
_test_sampling_profiler(ItimerSampler(0.0001))
# no crash caused by SIGPROF.
assert signal.getsignal(signal.SIGPROF) == signal.SIG_IGN
for x in range(10):
os.kill(os.getpid(), signal.SIGPROF)
# respect custom handler.
handler = lambda *x: x
signal.signal(signal.SIGPROF, handler)
_test_sampling_profiler(ItimerSampler(0.0001))
assert signal.getsignal(signal.SIGPROF) == handler
finally:
signal.signal(signal.SIGPROF, signal.SIG_DFL)
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler():
pytest.importorskip('yappi')
_test_sampling_profiler(TracingSampler(0.0001))
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler_does_not_sample_too_often():
pytest.importorskip('yappi')
# pytest-cov cannot detect a callback function registered by
# :func:`sys.setprofile`.
class fake_profiler(object):
samples = []
@classmethod
def sample(cls, frame):
cls.samples.append(frame)
@classmethod
def count_and_clear_samples(cls):
count = len(cls.samples)
del cls.samples[:]
return count
sampler = TracingSampler(0.1)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 0
spin(0.5)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
def test_not_sampler():
with pytest.raises(TypeError):
SamplingProfiler(sampler=123)
def test_sample_1_depth():
frame = sys._getframe()
while frame.f_back is not None:
frame = frame.f_back
assert frame.f_back is None
profiler = SamplingProfiler()
profiler.sample(frame)
|
[
"sub@subl.ee"
] |
sub@subl.ee
|
94a491393853c4e7890846273a17e6a7fd0545c2
|
422ce4dad362cd9a1112965e6c5df17d13fe2287
|
/econom_game/teams/migrations/0012_auto_20180830_1921.py
|
fa4b37689379a6dbbb2ad4f08fb70b61f95351cc
|
[] |
no_license
|
zzaakiirr/econom_game
|
22bfc7f8b009ab4e6366a912df731f5a234da506
|
56f0ca2e29e17b18cc7ec5248e66066bb061bc19
|
refs/heads/master
| 2020-03-16T15:05:21.872872
| 2018-09-06T11:10:08
| 2018-09-06T11:10:08
| 132,727,278
| 3
| 2
| null | 2018-08-30T22:32:43
| 2018-05-09T08:38:26
|
Python
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-30 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0011_auto_20180829_1251'),
]
operations = [
migrations.AlterField(
model_name='team',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"zzaakiirr@gmail.com"
] |
zzaakiirr@gmail.com
|
de13a9ae414154f6bec5b36554ab9c6650cf7929
|
d93159d0784fc489a5066d3ee592e6c9563b228b
|
/Validation/RecoParticleFlow/Benchmarks/Tools/submit.py
|
87e493ec5ff2edf11475e492be5dbb1190793c81
|
[] |
permissive
|
simonecid/cmssw
|
86396e31d41a003a179690f8c322e82e250e33b2
|
2559fdc9545b2c7e337f5113b231025106dd22ab
|
refs/heads/CAallInOne_81X
| 2021-08-15T23:25:02.901905
| 2016-09-13T08:10:20
| 2016-09-13T08:53:42
| 176,462,898
| 0
| 1
|
Apache-2.0
| 2019-03-19T08:30:28
| 2019-03-19T08:30:24
| null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
#!/usr/bin/env python
# to submit a benchmark webpage to the validation website
# author: Colin
import shutil, sys, os, valtools
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog"
parser.add_option("-e", "--extension", dest="extension",
help="adds an extension to the name of this benchmark",
default=None)
parser.add_option("-f", "--force", dest="force",action="store_true",
help="force the submission. Be careful!",
default=False)
(options,args) = parser.parse_args()
if len(args)!=0:
parser.print_help()
sys.exit(1)
website = valtools.website()
bench = valtools.benchmark( options.extension )
localBench = valtools.benchmark()
print 'submitting from local: ', localBench
print ' to: ', bench
comparisons = website.listComparisons( bench )
if len(comparisons)>0:
print 'You are about to make the following list of comparison pages obsolete. These pages will thus be removed:'
print comparisons
answer = None
while answer != 'y' and answer != 'n':
answer = raw_input('do you agree? [y/n]')
if answer == 'n':
sys.exit(0)
# check that the user can write in the website
website.writeAccess()
bench.makeRelease( website )
if bench.exists( website ) == True:
if options.force == False:
print 'please use the -e option to choose another extension'
print ' e.g: submit.py -e Feb10'
print 'or force it.'
sys.exit(1)
else:
print 'overwriting...'
shutil.rmtree(bench.benchmarkOnWebSite(website))
# local benchmark. this one does not have an extension!
shutil.copytree(localBench.fullName(), bench.benchmarkOnWebSite(website) )
print 'done. Access your benchmark here:'
print bench.benchmarkUrl( website )
# removing comparisons
# COMPARISONS COULD ALSO BE REDONE.
for comparison in comparisons:
rm = 'rm -rf '+comparison
os.system(rm)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
4b0ad8b0dbc902231fb8f660ff4c04d461fd7e54
|
bb6e80f7deff48a720d04850d9b4fd2bb379e14d
|
/ExpEYES17/UserManual/fr/rst/exp/prettyLaTeX.py
|
ae038f27591a7cd40285480463f4fb97f8fcf025
|
[] |
no_license
|
EduCodeBe/expeyes-programs
|
6a04881c4c2c4a198999baf57802508985ad8a06
|
2464866382155ed4c951962be4313fdcfe73dcec
|
refs/heads/master
| 2020-03-22T15:21:29.519546
| 2018-07-08T17:21:32
| 2018-07-08T17:21:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
#!/usr/bin/python3
import os, sys, re
def filterSVG(t,verbose=False):
"""
remove .svg in image names, replace by .pdf
"""
if verbose:
sys.stderr.write("replacing .svg extensions by .pdf for included graphics\n")
return re.sub(r"\.svg", ".pdf", t)
def filterPNG(t,verbose=False):
"""
remove .png in image names, replace by .pdf
"""
if verbose:
sys.stderr.write("replacing .png extensions by .pdf for included graphics\n")
return re.sub(r"\.png", ".pdf", t)
def filterJPG(t,verbose=False):
"""
remove .jpg in image names, replace by .pdf
"""
if verbose:
sys.stderr.write("replacing .jpg extensions by .pdf for included graphics\n")
return re.sub(r"\.jpg", ".pdf", t)
def filterNastyUnicode(t,verbose=False):
"""
remove problematic Unicode characters, like dots, thin spaces,
special minus sign
"""
if verbose:
sys.stderr.write("removing nasty unicode chars\n")
toReplace={
"\u2005": " ",
"\u2003": " ",
"\u200a": " ",
"\u22ef": "\\dots",
"\u2212": "-",
"↑": "",
"↓": "",
}
for code, repl in toReplace.items():
t=re.sub(code, repl, t)
return t
def filterSphinxIncludeGraphics(t, verbose=False):
if verbose:
sys.stderr.write("remove empty lines between SphinxIncludeGraphics")
pattern=re.compile(r"\\noindent\\sphinxincludegraphics.*")
lines=t.split("\n")
new=[lines[0], lines[1]] # always keep the two first lines
for i in range(2, len(lines)):
if pattern.match(new[-2]) and new[-1]=="" and pattern.match(lines[i]):
new[-1]=lines[i] # this drops the empty line
else:
new.append(lines[i])
return "\n".join(new)
filters=(
filterSVG,
filterPNG,
filterJPG,
filterNastyUnicode,
filterSphinxIncludeGraphics
)
if __name__=="__main__":
buildDir=sys.argv[1]
texFile=sys.argv[2]
t=""
with open(buildDir+"/"+texFile) as infile:
t=infile.read()
for f in filters:
t=f(t, verbose=True)
with open(buildDir+"/"+texFile+".tmp","w") as outfile:
outfile.write(t)
os.rename(buildDir+"/"+texFile+".tmp", buildDir+"/"+texFile)
|
[
"georgesk@debian.org"
] |
georgesk@debian.org
|
03c83a59017b89259eefe545f24d5e0bce961cf1
|
9e85747a446175575533485593054834971cd372
|
/colegio/educa/migrations/0003_auto_20200620_1553.py
|
d394ddec856dac48584594ac20e8286dd0d25f87
|
[] |
no_license
|
JTorero/Colegios
|
63337038c1b67cc8dcf419a5d35d89b9342ec6b0
|
e0403d0cd3ea8ebfbe8f0d7804270eb398c8e560
|
refs/heads/master
| 2022-11-09T02:12:03.604676
| 2020-06-22T03:58:55
| 2020-06-22T03:58:55
| 273,765,254
| 0
| 2
| null | 2020-06-22T03:58:56
| 2020-06-20T18:43:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
# Generated by Django 2.2.7 on 2020-06-20 20:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('educa', '0002_alumno'),
]
operations = [
migrations.CreateModel(
name='Periodo',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre_periodo', models.CharField(max_length=50)),
],
),
migrations.AlterModelOptions(
name='aula',
options={'ordering': ['id'], 'verbose_name': 'aula', 'verbose_name_plural': 'aulas'},
),
migrations.AlterModelTable(
name='aula',
table='educa_aula',
),
migrations.CreateModel(
name='Aula_Periodo',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('aula', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='educa.Aula')),
('periodo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='educa.Periodo')),
],
),
]
|
[
"mail@mail.com"
] |
mail@mail.com
|
367414398a374e2ad96f456f195a778d28ec7824
|
40c4b0c31a5870a9201d3d42a63c5547092e5912
|
/frappe/website/doctype/personal_data_download_request/test_personal_data_download_request.py
|
71e269f0700206e2de4862dc22964e0a0d24b531
|
[
"MIT"
] |
permissive
|
ektai/frappe3
|
fab138cdbe15bab8214cf623d9eb461e9b9fb1cd
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
refs/heads/master
| 2022-12-25T15:48:36.926197
| 2020-10-07T09:19:20
| 2020-10-07T09:19:20
| 301,951,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
import json
from frappe.website.doctype.personal_data_download_request.personal_data_download_request import get_user_data
from frappe.contacts.doctype.contact.contact import get_contact_name
class TestRequestPersonalData(unittest.TestCase):
def setUp(self):
create_user_if_not_exists(email='test_privacy@ektai.mail')
def tearDown(self):
frappe.db.sql("""DELETE FROM `tabPersonal Data Download Request`""")
def test_user_data_creation(self):
user_data = json.loads(get_user_data('test_privacy@ektai.mail'))
contact_name = get_contact_name('test_privacy@ektai.mail')
expected_data = {'Contact': frappe.get_all('Contact', {"name": contact_name}, ["*"])}
expected_data = json.loads(json.dumps(expected_data, default=str))
self.assertEqual({'Contact': user_data['Contact']}, expected_data)
def test_file_and_email_creation(self):
frappe.set_user('test_privacy@ektai.mail')
download_request = frappe.get_doc({
"doctype": 'Personal Data Download Request',
'user': 'test_privacy@ektai.mail'
})
download_request.save(ignore_permissions=True)
frappe.set_user('Administrator')
file_count = frappe.db.count('File', {
'attached_to_doctype':'Personal Data Download Request',
'attached_to_name': download_request.name
})
self.assertEqual(file_count, 1)
email_queue = frappe.get_all('Email Queue',
fields=['message'],
order_by="creation DESC",
limit=1)
self.assertTrue("Subject: Download Your Data" in email_queue[0].message)
frappe.db.sql("delete from `tabEmail Queue`")
def create_user_if_not_exists(email, first_name = None):
frappe.delete_doc_if_exists("User", email)
frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0],
"birth_date": frappe.utils.now_datetime()
}).insert(ignore_permissions=True)
|
[
"63931935+ektai@users.noreply.github.com"
] |
63931935+ektai@users.noreply.github.com
|
c5a1c49864b92fb6c4dcd7ee138f89563b247dae
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/license/consumedtask.py
|
755a8455d7a73d4dbda910708d94f6333059703c
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,767
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ConsumedTask(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.license.ConsumedTask")
meta.moClassName = "licenseConsumedTask"
meta.rnFormat = "consumedTask-%(licenseType)s"
meta.category = MoCategory.REGULAR
meta.label = "Entitlement Consumed"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.license.ConsumedInstDef")
meta.childClasses.add("cobra.model.license.ConsumedAppInstDef")
meta.childNamesAndRnPrefix.append(("cobra.model.license.ConsumedInstDef", "instDef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.license.ConsumedAppInstDef", "appDef-"))
meta.parentClasses.add("cobra.model.license.Holder")
meta.rnPrefixes = [
('consumedTask-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "licenseType", "licenseType", 36910, PropCategory.REGULAR)
prop.label = "License Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 4
prop.defaultValueStr = "leaf-base"
prop._addConstant("apic-plugin-app", "apic-plugin-app", 101)
prop._addConstant("ave-inst", "ave-inst", 13)
prop._addConstant("fex-16-10g", "fex-16-10g", 0)
prop._addConstant("fex-32-10g", "fex-32-10g", 1)
prop._addConstant("fex-48-10g", "fex-48-10g", 2)
prop._addConstant("fex-48-1g", "fex-48-1g", 3)
prop._addConstant("leaf-24-port", "leaf-24-port", 14)
prop._addConstant("leaf-48-port", "leaf-48-port", 15)
prop._addConstant("leaf-adv-fabric-insight-add-on", "leaf-adv-fabric-insight-add-on", 18)
prop._addConstant("leaf-adv-multi-pod", "leaf-adv-multi-pod", 9)
prop._addConstant("leaf-adv-multi-site", "leaf-adv-multi-site", 8)
prop._addConstant("leaf-base", "leaf-base", 4)
prop._addConstant("leaf-ess-fabric-insight-basic", "leaf-ess-fabric-insight-basic", 17)
prop._addConstant("leaf-ess-netflow", "leaf-ess-netflow", 5)
prop._addConstant("leaf-ess-ptp", "leaf-ess-ptp", 7)
prop._addConstant("leaf-ess-tetration", "leaf-ess-tetration", 6)
prop._addConstant("leaf-plugin-app", "leaf-plugin-app", 100)
prop._addConstant("leaf-sec", "leaf-sec", 10)
prop._addConstant("leaf-storage", "leaf-storage", 12)
prop._addConstant("spine-lc-sec", "spine-lc-sec", 11)
prop._addConstant("vpod-ave", "vpod-ave", 19)
prop._addConstant("vpod-vleaf", "vpod-vleaf", 20)
prop._addConstant("vpod-vspine", "vpod-vspine", 21)
meta.props.add("licenseType", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "licenseType"))
def __init__(self, parentMoOrDn, licenseType, markDirty=True, **creationProps):
namingVals = [licenseType]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
6ba1c6858b83940c3599f01f612860955bfeecd0
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/wc/src/311.py
|
ad8d0ddb3393ecd74fb6c04b5ba6a331d199648b
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
from collections import Counter
def word_count(words):
wordCount = Counter()
for word in words.split():
word = ''.join(ch for ch in word if ch.isalnum()).lower()
if not word:
continue
wordCount[word] += 1
return wordCount
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
fa89c2b5471eefbf222f8142835081003be7ccbb
|
610942849a2fac0b229af569c0c3db001d87eb94
|
/utest/libdoc/test_libdoc_api.py
|
de2ea0957c9f96a0f21a95212f20f403dd24236e
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
jnhyperion/robotframework
|
a6befd1c2d50d08b7c625a73228b43c04769ca3d
|
559eb744c26f6acf11eb2d3a11be8343532c9a90
|
refs/heads/master
| 2023-01-27T12:50:41.962755
| 2022-08-24T08:33:03
| 2022-08-24T08:33:03
| 273,444,398
| 1
| 0
|
Apache-2.0
| 2023-01-13T08:09:17
| 2020-06-19T08:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
from io import StringIO
import sys
import tempfile
import unittest
from robot import libdoc
from robot.utils.asserts import assert_equal
class TestLibdoc(unittest.TestCase):
def setUp(self):
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = sys.__stdout__
def test_html(self):
output = tempfile.mkstemp(suffix='.html')[1]
libdoc.libdoc('String', output)
assert_equal(sys.stdout.getvalue().strip(), output)
with open(output) as f:
assert '"name": "String"' in f.read()
def test_xml(self):
output = tempfile.mkstemp(suffix='.xml')[1]
libdoc.libdoc('String', output)
assert_equal(sys.stdout.getvalue().strip(), output)
with open(output) as f:
assert 'name="String"' in f.read()
def test_format(self):
output = tempfile.mkstemp()[1]
libdoc.libdoc('String', output, format='xml')
assert_equal(sys.stdout.getvalue().strip(), output)
with open(output) as f:
assert 'name="String"' in f.read()
def test_quiet(self):
output = tempfile.mkstemp(suffix='.html')[1]
libdoc.libdoc('String', output, quiet=True)
assert_equal(sys.stdout.getvalue().strip(), '')
with open(output) as f:
assert '"name": "String"' in f.read()
if __name__ == '__main__':
unittest.main()
|
[
"peke@iki.fi"
] |
peke@iki.fi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.