blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b13046ceab6991b6f1d95e04fbe41b7ce103755b
|
888899f0cb3e6e7b28a9de39001a1fd1c177cd35
|
/COMPLETE PYTHON-3 COURSE/Chapter-05-LIST/summary.py
|
857ad7b8e6e4441e7a901e9c5b8d6851a307ef48
|
[] |
no_license
|
VivakaNand/COMPLETE_PYTHON_3
|
ef162d71d3a44bf661fcc1a8aacce31e7953cd7c
|
b3b835afe7671fdc3d29d912650fd4ccd3bc83f6
|
refs/heads/master
| 2023-02-04T10:13:41.881939
| 2020-12-23T08:30:51
| 2020-12-23T08:30:51
| 323,839,528
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
# list chapter summary
# list is a data structure that can hold any type of data
# create list
words = ["word1", "word2"]
# you can store anything inside list
mixed = [1,2,3, [4,5,6], 'seven', 8.0,None]
# list is ordered collection of items
# print(mixed[0]) # output = 1
# print(mixed[3]) # output = [4,5,6]
# add data to our list
# append method
# mixed.append("10")
# mixed.append([10,20,30]) # it adds as it list at the end as one element
# print(mixed)
# extend method
# mixed.extend([10,20,30]) # it adds all elements of list at the end
# print(mixed)
# join method
# join two list
# l = l1 + l2
# insert method
# mixed.insert(1, 'inserted') # it adds elements in the specefic position
# print(mixed)
# remove data from list
# # pop method
# poped = mixed.pop() # removes last item
# popped = mixed.pop(1) # remove item at 1 position
# print(poped)
# print(popped)
#remove method
# mixed.remove('seven')
# print(mixed)
# del statement
# del mixed[3]
# print(mixed)
# loop in list
for i in mixed:
print(i)
|
[
"vivekjetani83@gmail.com"
] |
vivekjetani83@gmail.com
|
6f331f833f6106821b1fbc0630bb3491154a5ed3
|
28a9cc19537f7264421afeb9883962aa480c2616
|
/login/models.py
|
dfc8567340a7277a6b236ffa42c5bf8ad2a3ca0c
|
[] |
no_license
|
ujjwalagrawal17/BrokerAppBackend
|
b33df886b389aabfcfe7278c3e41c99d13d4fbb3
|
1b8ffd18e4c5257d222c17b8aece3351b549b204
|
refs/heads/master
| 2021-01-22T21:23:18.807792
| 2017-03-18T19:06:44
| 2017-03-18T19:06:44
| 85,425,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class login_user(models.Model):
name= models.CharField(max_length=120,null=False,blank=False)
firm_name= models.CharField(max_length=240,null=False,blank=False)
city= models.CharField(max_length=240,null=False,blank=False)
mobile=models.PositiveSmallIntegerField(default=0)
catigory=models.CharField(max_length=120,null=False,blank=False,default="buyer")
|
[
"ujjwal.iitism@gmail.com"
] |
ujjwal.iitism@gmail.com
|
e35356aad8d52ce034b950fa1b84a9f27923a533
|
c5759366f8b2cb2e129df0637b62774225a0c41a
|
/code/tensor2tensor/tensor2tensor/data_generators/text_encoder_build_subword.py
|
89c6b9516e982d110e466e5b73735fd4f1e123fe
|
[
"Apache-2.0"
] |
permissive
|
cake-lab/transient-deep-learning
|
f8646a4386528aa147d8d3dcdff8089985870041
|
87c6717e4026801623cf0327e78ad57f51cb1461
|
refs/heads/master
| 2022-11-02T20:02:29.642997
| 2022-02-08T16:51:09
| 2022-02-08T16:51:09
| 227,036,173
| 11
| 1
|
Apache-2.0
| 2022-10-05T13:01:38
| 2019-12-10T05:27:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Program to build a SubwordTextEncoder.
The flags --min_count and --corpus_max_lines will affect the size of the
vocabulary. Try changing these flags until you get a vocabulary
of the size you want.
Example usage:
python data_generators/text_encoder_build_subword.py \
--corpus_filepattern=$DATA_DIR/my_problem-train-* \
--corpus_max_lines=12345 \
--output_filename=$DATA_DIR/my_problem.subword_text_encoder \
--logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import tokenizer
import tensorflow as tf
tf.flags.DEFINE_string('output_filename', '/tmp/my.subword_text_encoder',
'where to store the SubwordTextEncoder')
tf.flags.DEFINE_string('corpus_filepattern', '',
'Corpus of one or more text files')
tf.flags.DEFINE_string('vocab_filepattern', '', 'One or more vocabulary files '
'(one word per line as "word,count")')
tf.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus')
tf.flags.DEFINE_integer('corpus_max_lines', 10000,
'How many lines of corpus to read')
tf.flags.DEFINE_integer('num_iterations', 4, 'Number of iterations')
tf.flags.DEFINE_bool('split_on_newlines', True, 'Break corpus into lines.')
FLAGS = tf.flags.FLAGS
def main(unused_argv):
if FLAGS.corpus_filepattern and FLAGS.vocab_filepattern:
raise ValueError(
'Must only provide one of --corpus_filepattern or --vocab_filepattern')
elif FLAGS.corpus_filepattern:
token_counts = tokenizer.corpus_token_counts(
FLAGS.corpus_filepattern,
FLAGS.corpus_max_lines,
split_on_newlines=FLAGS.split_on_newlines)
elif FLAGS.vocab_filepattern:
token_counts = tokenizer.vocab_token_counts(FLAGS.vocab_filepattern,
FLAGS.corpus_max_lines)
else:
raise ValueError(
'Must provide one of --corpus_filepattern or --vocab_filepattern')
encoder = text_encoder.SubwordTextEncoder()
encoder.build_from_token_counts(token_counts, FLAGS.min_count,
FLAGS.num_iterations)
encoder.store_to_file(FLAGS.output_filename)
if __name__ == '__main__':
tf.app.run()
|
[
"ozymandias@OzymandiasdeMacBook-Pro.local"
] |
ozymandias@OzymandiasdeMacBook-Pro.local
|
ff77547cc6d5321804ab90dbd2386f9f3b515921
|
fff54b01b46cef0bbc70a6469c88c01c82af5a57
|
/network/library/glib-networking/actions.py
|
660dea0d9d3cf5ffb5633a05765dd140c9dcdf02
|
[] |
no_license
|
LimeLinux/Packages
|
e51deae6c0d1406e31f06caa5aaa7749466bef0b
|
d492e075d8b051df68b98c315ad0628e33a8fac4
|
refs/heads/master
| 2021-01-11T12:37:22.150638
| 2018-08-30T18:24:32
| 2018-08-30T18:24:32
| 77,054,292
| 5
| 19
| null | 2018-02-02T17:24:06
| 2016-12-21T13:33:45
|
Python
|
UTF-8
|
Python
| false
| false
| 725
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static \
--disable-installed-tests \
--with-ca-certificates=/etc/ssl/certs/ca-certificates.crt \
--with-gnutls \
--with-pkcs11")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "NEWS", "README")
|
[
"zirkovandersen@gmail.com"
] |
zirkovandersen@gmail.com
|
744dd80c6dd301c986dfd766d02f90b8df0c7590
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/historicalbattles/__init__.py
|
319e96efe3c7f12b469f2b5042230f76267adc3d
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,685
|
py
|
# 2015.11.10 21:27:13 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/historicalBattles/__init__.py
from gui.Scaleform.framework import GroupedViewSettings, ViewTypes, ScopeTemplates
from gui.Scaleform.framework.package_layout import PackageBusinessHandler
from gui.Scaleform.genConsts.PREBATTLE_ALIASES import PREBATTLE_ALIASES
from gui.app_loader.settings import APP_NAME_SPACE
from gui.shared import EVENT_BUS_SCOPE
def getViewSettings():
from gui.Scaleform.daapi.view.lobby.historicalBattles.HistoricalBattlesListWindow import HistoricalBattlesListWindow
return (GroupedViewSettings(PREBATTLE_ALIASES.HISTORICAL_BATTLES_LIST_WINDOW_PY, HistoricalBattlesListWindow, 'historicalBattlesListWindow.swf', ViewTypes.WINDOW, '', PREBATTLE_ALIASES.HISTORICAL_BATTLES_LIST_WINDOW_PY, ScopeTemplates.DEFAULT_SCOPE, True),)
def getBusinessHandlers():
return (_HistoricalBattlesBusinessHandler(),)
class _HistoricalBattlesBusinessHandler(PackageBusinessHandler):
def __init__(self):
listeners = ((PREBATTLE_ALIASES.HISTORICAL_BATTLES_LIST_WINDOW_PY, self.__showHBListWindow),)
super(_HistoricalBattlesBusinessHandler, self).__init__(listeners, APP_NAME_SPACE.SF_LOBBY, EVENT_BUS_SCOPE.LOBBY)
def __showHBListWindow(self, _):
alias = name = PREBATTLE_ALIASES.HISTORICAL_BATTLES_LIST_WINDOW_PY
self.loadViewWithDefName(alias, name)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\historicalbattles\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:27:13 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
aec9135ff3f8ea294de2287b9c3fb015c1842ecb
|
9c9abdf101ce10d170de060155d7e96b244112eb
|
/logicmind/tokens/nop.py
|
d17faec06205634f0c31c421b359bde0fbb21eb9
|
[
"MIT"
] |
permissive
|
gridl/Py-Utils
|
b914aef6b527d5e24972c2b2559937ffe14f8f54
|
96e554ef4da7f9f94d405f523bd234db7dca96a7
|
refs/heads/master
| 2020-11-29T08:30:59.015303
| 2019-04-27T13:45:31
| 2019-04-27T13:45:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from tokens.token import Token
class Not(Token):
representations = ['¬', '!']
single_char_representation = '¬'
def __init__(self):
super().__init__(operands=1, precedence=1)
def apply(self, right):
return not right
def __repr__(self):
return '¬'
|
[
"totufals@hotmail.com"
] |
totufals@hotmail.com
|
5cd3e967959c0a4211a1c671d6336fbd4c832a7a
|
3288a3e1ac9fe24260e6eb3e54234cf1a9c6e33a
|
/model/rage.py
|
3941517a4a82de7a3cddb1758bb223a744cab090
|
[] |
no_license
|
phamdinhkhanh/alltherage
|
691ea098cb485df84db230af1f0bb376e1a8201f
|
94f253dbc5b830dc9d1b76680c9b41a05a6c3f16
|
refs/heads/master
| 2021-01-23T14:49:57.214474
| 2017-07-30T09:23:38
| 2017-07-30T09:23:38
| 93,261,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
from mongoengine import *
from flask_restful import Resource, reqparse
import mlab
class Rage(Document):
name = StringField();
url = StringField();
description = StringField();
old_price = FloatField();
new_price = FloatField();
discount_rate = FloatField();
is_favorite = BooleanField();
number_seen = IntField();
code = StringField();
def get_json(self):
return mlab.item2json(self)
def get_oid(self):
str = mlab.item2json(self)
oid = str["_id"]["$oid"]
return {
"$oid":oid
}
def get_json_oid(self):
str = mlab.item2json(self)
oid = str["_id"]["$oid"]
return {
"oid": oid,
"name": self.name,
"url": self.url,
"description": self.description,
"old_price": self.old_price,
"new_price": self.new_price,
"discount_rate": self.discount_rate,
"is_favorite": self.is_favorite,
"number_seen":self.number_seen,
"code":self.code
}
class RageInfo(Document):
rage = ReferenceField("Rage");
info = StringField();
def get_json(self):
return {
"rage":self.rage.get_json(),
"info":self.info
}
|
[
"phamdinhkhanh.tkt53.neu@gmail.com"
] |
phamdinhkhanh.tkt53.neu@gmail.com
|
a05f388b7fed9deac9f7b8e1e5e439e90ec715a9
|
84d2efd222fa190c8b3efcad083dcf2c7ab30047
|
/test.py
|
fc24c2054c5968948fcc906e963e832aa2a418a6
|
[] |
no_license
|
webclinic017/Capstone-2
|
aedfc8692647f2e84114da5b2e32856d0de80586
|
d476723f7893c7c5da14e24f28736a8f0ba7ff55
|
refs/heads/master
| 2023-01-23T06:44:36.868373
| 2020-12-03T19:44:51
| 2020-12-03T19:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,642
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn.linear_model
import scipy.stats as stats
import pandas_market_calendars as mcal
from alpha_vantage.timeseries import TimeSeries
api_key = '8FIYTT49ZEZT2GV5'
ts = TimeSeries(key=api_key, output_format='pandas')
data, meta_data = ts.get_daily(symbol='SPY', outputsize = 'full')
data = data.reset_index()
data['date'] = data['date'].dt.strftime('%Y%m%d')
data['date'] = data['date'].values.astype(int)
X = np.c_[data['date']]
Y = np.c_[data['4. close']]
X = [i[0] for i in X]
Y = [i[0] for i in Y]
X = X[::-1] #REVERSING ORDER
Y = Y[::-1] #REVERSING ORDER
last_day = len(X) - 1
th_day = list(range(0,last_day+1))
def YYYY_MM_DD_to_th_day(YMD):
early = nyse.schedule(start_date='1999-11-01', end_date=YMD)
return len(early)-1
model = np.polyfit(X, Y, 4)
std = np.std(Y)
testdate = [[20210101]]
testprediction = np.polyval(model, testdate)
testprice = testprediction[0] + (1 * std)
zscore = float((testprice - testprediction[0]) / std)
probability = 0
cdf = stats.norm.cdf(zscore)
if(cdf <= .5):
probability = cdf
elif(cdf >= .5):
probability = 1-cdf
print(testprediction)
#NEW CODE STARTING BELOW
#OK, .01 std tests
#get the best trade per day
#choose...idk
'''
shortPrices = 0
longPrices = 0
currentPrice = Y[0]
reward = longPrices - currentPrice
risk = currentPrice - shortPrices
print(reward)
print(risk)
'''
#OPTIMIZATION TESTS
#ULTRA SHORT TERM (original: 20211218), TODAY : 360
#1st Degree : 255 in a week, 255 in a month, 263 in 6 months
#2nd Degree : 320 in a week, 320 in a month, 347 in 6 months
#3rd Degree : 325 in a week, 325 in a month, 356 in 6 months
#4th Degree : 323 in a week, 323 in a month, 351 in 6 months
#5th Degree : 323 in a week, 323 in a month, 351 in 6 months
#SHORT TERM
#1st Degree : 264 in 2021, 273 in 2022
#2nd Degree : 349 in 2021, 381 in 2022
#3rd Degree : 359 in 2021, 396 in 2022
#4th Degree : 353 in 2021, 385 in 2022
#5th Degree : 353 in 2021, 385 in 2022
#ULTRA LONG TERM (assuming downturns every 10 years)
#1st Degree : 344 in 2030, 434 in 2040
#2nd Degree : 704 in 2030, 1282 in 2040
#3rd Degree : 804 in 2030, 1652 in 2040
#4th Degree : 649 in 2030, 759 in 2040
#5th Degree : 648 in 2030, 745 in 2040
#COMPARISON TO EXTERNAL PREDICTIONS (11/26/25, $486)
#1st Degree: 300
#2nd Degree: 487
#3rd Degree: 523
#4th Degree: 484
#5th Degree: 484
#BEST RESULT : 4th Degree
#ACCURACY TEST
#Today: 0.5Z
#March Low : -1.5Z
#February High : 0.27Z
#2009 Low : -0.71Z
#2007 High : 0.66Z
|
[
"noreply@github.com"
] |
webclinic017.noreply@github.com
|
fe41e57f2ed88a9306816bc86c1326ed3f15f4a5
|
853d7901c4bdc7db8e655092c9939741b4f86161
|
/886.py
|
be36ff32f774dcec2059ca91e5828d87b1299df8
|
[
"MIT"
] |
permissive
|
wilbertgeng/LeetCode_exercise
|
904d6a3f91d94f451b40f3760131aefaa8584b3b
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
refs/heads/main
| 2023-03-16T01:25:00.514922
| 2021-03-15T06:12:59
| 2021-03-15T06:12:59
| 347,856,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
"""886. Possible Bipartition"""
class Solution(object):
def possibleBipartition(self, N, dislikes):
"""
:type N: int
:type dislikes: List[List[int]]
:rtype: bool
"""
seen = {}
self.graph = collections.defaultdict(list)
for (u, v) in dislikes: # Create graph
self.graph[u].append(v)
self.graph[v].append(u)
for i in range(1, N+1):
if i not in seen:
if self.check(seen, i, self.graph) == False:
return False
return True
def check(self, seen, i, graph):
q = [(i, 1)]
while q:
pos, color = q.pop(0)
if pos in seen:
if seen[pos] != color:
return False
continue
seen[pos] = color
vertices = graph[pos]
for v in vertices:
q.append((v, -color))
return True
|
[
"wilbertgeng@gmail.com"
] |
wilbertgeng@gmail.com
|
748c4782c7cd76f5ae63a10cc29668cecc5cb385
|
70fa4bc22afd3d0527888d382827c7c2e1269b8a
|
/examples/columbia_river_crossing.py
|
263f0b7f942ff977e4ec41aba47835a0ecbf4025
|
[
"BSD-2-Clause"
] |
permissive
|
moorepants/EfficientRoutes
|
20029f19ed5ec79d484660d8f963f4e78d2d899d
|
2705b643b95cb7921dc3216d534aa5bdbff302a1
|
refs/heads/master
| 2020-04-06T06:47:52.698417
| 2012-06-28T09:12:30
| 2012-06-28T09:12:30
| 4,477,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,056
|
py
|
#!/usr/bin/env python
# This example compares two routes connecting Portland, Oregon to Vancouver,
# Washington over a bridge across the Columbia River. The planned bicycle route
# sends the bicyclist through various elevation changes and several yield
# signs, stop signs, and traffic signals where as the automobiles get to travel
# across level ground with no stops. These simulations compare the bicyclist's
# energy expenditure, trip distance, and trip time through the two routes.
import numpy as np
from efficientroutes.model import Bicyclist, Route, Trip
# Load a bicyclist.
bicyclist = Bicyclist()
# Load in the the planned bicycle route data and process the traffic controls
# column.
bicycleRouteData = np.recfromcsv('../data/columbia_river_crossing_bicycle.csv')
stopLocations = []
for i, device in enumerate(bicycleRouteData['traffic_control']):
if device != '':
stopLocations.append(bicycleRouteData['distance'][i])
bicycleRoute = Route(bicycleRouteData['distance'],
bicycleRouteData['elevation'], bicycleRouteData['speed_limit'],
stopLocations=np.array(stopLocations))
# Setup and compute the results for the trip across the planned bicycle route.
bicycleTrip = Trip(bicyclist, bicycleRoute)
bicycleTrip.solve()
print "===================="
print "Bicycle route stats:"
print "===================="
bicycleTrip.stats()
bicycleFig = bicycleTrip.plot()
bicycleFig.suptitle('Bicycle Route')
bicycleFig.set_figheight(8.0)
bicycleFig.savefig('../data/columbia_river_crossing_bicycle.png', dpi=200)
bicycleFig.show()
# Load in the data for the automobile path.
autoRouteData = np.recfromcsv('../data/columbia_river_crossing_auto.csv')
autoRoute = Route(autoRouteData['distance'],
autoRouteData['elevation'], autoRouteData['speed_limit'] - 17.88)
# Setup and compute the results for the trip across the automobile route.
autoTrip = Trip(bicyclist, autoRoute)
autoTrip.solve()
print "======================="
print "Automobile route stats:"
print "======================="
autoTrip.stats()
autoFig = autoTrip.plot()
autoFig.suptitle('Automobile Route')
autoFig.set_figheight(8.0)
autoFig.savefig('../data/columbia_river_crossing_auto.png', dpi=200)
autoFig.show()
# Load in the data for the automobile path.
bestRouteData = np.recfromcsv('../data/columbia_river_crossing_best.csv')
stopLocations = []
for i, device in enumerate(bestRouteData['traffic_control']):
if device != '':
stopLocations.append(bestRouteData['distance'][i])
bestRoute = Route(bestRouteData['distance'],
bestRouteData['elevation'], bestRouteData['speed_limit'] - 17.88,
stopLocations=np.array(stopLocations))
# Setup and compute the results for the trip across the automobile route.
bestTrip = Trip(bicyclist, bestRoute)
bestTrip.solve()
print "================="
print "Best route stats:"
print "================="
bestTrip.stats()
bestFig = bestTrip.plot()
bestFig.suptitle('Best Route')
bestFig.set_figheight(8.0)
bestFig.savefig('../data/columbia_river_crossing_best.png', dpi=200)
bestFig.show()
|
[
"moorepants@gmail.com"
] |
moorepants@gmail.com
|
8aa1667e96ff01d3a43197b476b881ceb027dc8e
|
d3be0d693440c618d211bc3801a29b885041786a
|
/scripts/migrations/label_test.py
|
45e6a66c1d0b77179e304048114bec7d94c2c009
|
[
"Apache-2.0"
] |
permissive
|
jimpallomeni/buck
|
9479b048e59ee1d0a78b3c0c30cb98af61920fe3
|
0d752267ca1ea6f93ac1966bac75e6168df0254c
|
refs/heads/master
| 2021-07-05T08:27:30.295952
| 2017-09-27T22:34:01
| 2017-09-28T00:18:46
| 105,082,899
| 0
| 0
| null | 2017-09-28T00:22:08
| 2017-09-28T00:22:08
| null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
import label
import unittest
class LabelTest(unittest.TestCase):
def test_can_parse_full_label_from_string(self):
l = label.from_string('cell//package:name')
self.assertEqual(l.name, 'name')
self.assertEqual(l.package, 'package')
self.assertEqual(l.cell, 'cell')
def test_can_parse_label_without_cell(self):
l = label.from_string('//package:name')
self.assertEqual(l.name, 'name')
self.assertEqual(l.package, 'package')
self.assertIsNone(l.cell)
def test_can_parse_label_with_multilevel_package(self):
l = label.from_string('cell//pkg/subpkg:name')
self.assertEqual(l.name, 'name')
self.assertEqual(l.package, 'pkg/subpkg')
self.assertEqual(l.cell, 'cell')
def test_cannot_parse_invalid_label(self):
with self.assertRaisesRegex(AssertionError, "Invalid label 'cell/pkg:name'"):
label.from_string('cell/pkg:name')
def test_can_resolve_path_to_build_file(self):
l = label.from_string('cell//pkg:name')
cell_roots = {
'cell': '/repo/cell',
}
self.assertEqual('/repo/cell/pkg/BUCK', l.get_build_file_path(cell_roots, 'BUCK'))
def test_can_convert_to_import_string(self):
self.assertEqual('cell//pkg:name', label.from_string('cell//pkg:name').to_import_string())
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
6c7283f79ab27c859cffb7b7d39c93d67372cd59
|
164e0f43ef3ad4cb7f6b28dfdd2bfbaa66d38ce2
|
/Word_Pattern/Word_Pattern.py
|
c8e8770d0ef80e9476f394041e584d25a6bd9e7b
|
[] |
no_license
|
maoxx241/code
|
b217f2d10065d90f52cfa38788c99e238565b892
|
16e97ec5ee7ae9ffa69da2e001d15a86d73d2040
|
refs/heads/master
| 2021-07-11T14:25:35.098241
| 2020-11-25T14:01:56
| 2020-11-25T14:01:56
| 222,544,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
lst=str.split()
if len(lst)!=len(pattern):
return False
dic={}
for i,c in enumerate(pattern):
if c not in dic:
if lst[i] in dic.values():
return False
dic[c]=lst[i]
else:
if lst[i]!=dic[c]:
return False
return True
|
[
"maomaoyu870@gmail.com"
] |
maomaoyu870@gmail.com
|
9ee85000310e262b188ffd12e9948483f3d681e7
|
7cb626363bbce2f66c09e509e562ff3d371c10c6
|
/multimodel_inference/py3_v1/sc3elsm.py
|
2d50cceedee31d0481ec347f123d4e6d48609f87
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
z0on/AFS-analysis-with-moments
|
76bfd6b0361ab7e9173144dbd21b6fa2c7bf1795
|
eea4735b3b6fbe31c4e396da3d798387884a1500
|
refs/heads/master
| 2023-07-31T20:49:20.865161
| 2023-07-19T06:57:32
| 2023-07-19T06:57:32
| 96,915,117
| 4
| 5
| null | 2020-09-02T17:39:08
| 2017-07-11T16:38:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,247
|
py
|
#!/usr/bin/env python3
# split, three epochs in each pop, asymmetric migration at same rates in all epochs
# n(para): 11
import matplotlib
matplotlib.use('PDF')
import moments
import pylab
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
if len(sys.argv)==9:
params = np.loadtxt(sys.argv[8], delimiter=" ", unpack=False)
else:
params=[1,1,1,1,1,1,1,1,1,1,0.01]
# mutation rate per sequenced portion of genome per generation: for A.millepora, 0.02
mu=float(sys.argv[6])
# generation time, in thousand years: 0.005 (5 years)
gtime=float(sys.argv[7])
# set Polarized=False below for folded AFS analysis
fs = moments.Spectrum.from_file(infile)
data=fs.project(projections)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split into unequal pop sizes with asymmetrical migration
def sc3ei(params , ns):
# p_misid: proportion of misidentified ancestral states
nu1_1, nu2_1, nu1_2,nu2_2,nu1_3,nu2_3,T1, T2, T3,m, p_misid = params
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nu1_1, nu2_1], T1, m = np.array([[0, m], [m, 0]]))
fs.integrate([nu1_2, nu2_2], T2, m = np.array([[0, 0], [0, 0]]))
fs.integrate([nu1_3, nu2_3], T3, m = np.array([[0, m], [m, 0]]))
return (1-p_misid)*fs + p_misid*moments.Numerics.reverse_array(fs)
func=sc3ei
upper_bound = [100, 100, 100,100,100, 100, 100, 100,100, 200,0.25]
lower_bound = [1e-3,1e-3, 1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-5,1e-5]
params = moments.Misc.perturb_params(params, fold=2, upper_bound=upper_bound,
lower_bound=lower_bound)
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=30)
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# plotting demographic model
plot_mod = moments.ModelPlot.generate_model(func, poptg, ns)
moments.ModelPlot.plot_model(plot_mod, save_file="sc3elsm_"+ind+".png", pop_labels=pop_ids, nref=theta/(4*mu), draw_scale=False, gen_time=gtime, gen_time_units="KY", reverse_timeline=True)
# bootstrapping for SDs of params and theta
# printing parameters and their SDs
print( "RESULT","sc3elsm",ind,len(params),ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta)
# plotting quad-panel figure witt AFS, model, residuals:
moments.Plotting.plot_2d_comp_multinom(model, data, vmin=0.1, resid_range=3,
pop_ids =pop_ids)
plt.savefig("sc3elsm_"+ind+"_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]+'.pdf')
|
[
"matz@utexas.edu"
] |
matz@utexas.edu
|
661866a587111cbadec04ee46f867134a8b01025
|
ac192c0d64c31c33d76708b3f5a0062a842d59cf
|
/LearningCode/3_8_aroundTheWorld.py
|
ba68059b790cdb5bd2695a4bd3887c758b429be7
|
[
"Apache-2.0"
] |
permissive
|
jercas/PythonCrashCourse
|
7a73c6af327b653581e9d260431b022a08923fb3
|
464cf1dfa4c33adc73e15e15a37da94da0912e19
|
refs/heads/master
| 2020-12-02T22:11:24.650904
| 2017-07-03T09:37:27
| 2017-07-03T09:37:27
| 96,094,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
#coding=utf-8
#放眼世界P41 2017.4.11
myDreamPlace = ['losAngle','houston','newYork']
print('1.this is the oringle list')
print(myDreamPlace)
print('2.this is the sorted list')
print(sorted(myDreamPlace))
print('now what?')
print(myDreamPlace)
print('3.this is the sorted and reverse list')
print(sorted(myDreamPlace))
print('now what?')
print(myDreamPlace)
print('4.this is the reverse list')
myDreamPlace.reverse()
print(myDreamPlace)
print("Let's take it reverse again")
myDreamPlace.reverse()
print(myDreamPlace)
print('5.this is the sort list')
myDreamPlace.sort()
print(myDreamPlace)
print('now what?')
print(myDreamPlace)
|
[
"jercas0618@163.com"
] |
jercas0618@163.com
|
a5dae9eaf99e07f4a4f3bcdd368bb8a6b274af16
|
0857ee93b0a041bb38c635b71e456247982e18f0
|
/app/migrations/0001_initial.py
|
20b17abffc8af2b5cdf3a8f0e2ae4fc224399542
|
[] |
no_license
|
ConnorFieldUser/single_page_secrets
|
932ae5f253c3c4742d3584ecb6a34e0776f5672e
|
e4acdc26e64999e9d351beda98fd4f6af91566b5
|
refs/heads/master
| 2020-07-26T21:48:56.960107
| 2016-11-10T17:15:48
| 2016-11-10T17:15:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-10 16:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Secret',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"jtaddei@gmail.com"
] |
jtaddei@gmail.com
|
74764b1315255cbdaa416559f8d622f03ccc9269
|
31c310ef2cedb0d7b7327668bdbff4b50b165e74
|
/app/__init__.py
|
2030ab9a162509c54f126c68431fcc06854e893f
|
[
"MIT"
] |
permissive
|
wou-cs/wolfit
|
8ffacc5a4eb235d570f7f2042c4c731b4f145be5
|
cebf6a0676ae86ea9d37ad9e8b2fe1aa1535c498
|
refs/heads/main
| 2023-03-09T22:41:17.418489
| 2023-02-04T13:36:02
| 2023-02-04T13:36:02
| 136,679,479
| 2
| 14
|
MIT
| 2023-02-16T07:12:04
| 2018-06-09T01:05:52
|
Python
|
UTF-8
|
Python
| false
| false
| 580
|
py
|
import os
import config
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from app.commands import sample_data
app = Flask(__name__)
app.config.from_object(config.Config)
app.config.from_envvar('WOLFIT_SETTINGS')
app.config['SQLALCHEMY_DATABASE_URI'] = config.Config.DATABASE_URI(app)
app.register_blueprint(sample_data)
db = SQLAlchemy(app)
login = LoginManager(app)
migrate = Migrate(app, db)
bootstrap = Bootstrap(app)
from app import models, routes
|
[
"chris@chrisbrooks.org"
] |
chris@chrisbrooks.org
|
fb33ad47c4b0d1dbaab994ac4d7707c1c15ad619
|
4ac6808e6153dceebd6271c017f9613818866da5
|
/app/__init__.py
|
7b12aec453ef6cc0483e2eb1f4a1bdf6ff520c4f
|
[
"MIT"
] |
permissive
|
quanpower/xielaoban-server
|
59d9331737c79163f0d4bd352bdcfc900c2e0c0c
|
584eaa6c049a9d664efaf60cd23273147d0a5c6e
|
refs/heads/master
| 2022-12-09T20:59:20.466225
| 2018-02-10T15:30:17
| 2018-02-10T15:30:17
| 120,546,895
| 1
| 0
|
MIT
| 2022-12-08T00:44:41
| 2018-02-07T01:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
from flask_admin import Admin, BaseView, expose
from flask_admin.contrib.fileadmin import FileAdmin
from flask_babelex import Babel
import os.path as op
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
flas_admin = Admin(name='smart-iiot')
babel = Babel()
# flask-login
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
# flask-admin add views
from app.admin import UserAdminView, TestAdminView, UserModelView, LoraGatewayModelView, LoraNodeModelView, NodeMqttTransFuncModelView, PowerIoModelView, RelayCurrentRs485FuncModelView, \
GrainStorehouseModelView, GrainBarnModelView, GrainTempModelView, AlarmLevelSettingModelView, AlarmStatusModelView, AlarmTypesModelView, AlarmRecordsModelView
#
# flas_admin.add_view(UserAdminView(name='UserAdmin', category='UserAdmin'))
# flas_admin.add_view(TestAdminView(name='test', endpoint='test', category='UserAdmin'))
flas_admin.add_view(GrainStorehouseModelView(db.session, name='GrainStorehouse', endpoint='grain_storehouse', category='GrainAdmin'))
flas_admin.add_view(GrainBarnModelView(db.session, name='GrainBarn', endpoint='grain_barn', category='GrainAdmin'))
flas_admin.add_view(GrainTempModelView(db.session, name='GrainTemp', endpoint='grain_temps', category='GrainAdmin'))
flas_admin.add_view(LoraGatewayModelView(db.session, name='LoraGateway', endpoint='lora_gateway', category='LoraAdmin'))
flas_admin.add_view(LoraNodeModelView(db.session, name='LoraNode', endpoint='lora_node', category='LoraAdmin'))
flas_admin.add_view(NodeMqttTransFuncModelView(db.session, name='NodeMqttTransFunc', endpoint='node_mqtt_trans_func', category='LoraAdmin'))
flas_admin.add_view(PowerIoModelView(db.session, name='PowerIo', endpoint='power_io', category='LoraAdmin'))
flas_admin.add_view(RelayCurrentRs485FuncModelView(db.session, name='RelayCurrentRs485Func', endpoint='relay_current_rs485_func', category='LoraAdmin'))
flas_admin.add_view(AlarmStatusModelView(db.session, name='AlarmStatus', endpoint='alarm_status', category='AlarmAdmin'))
flas_admin.add_view(AlarmTypesModelView(db.session, name='AlarmTypes', endpoint='alarm_types', category='AlarmAdmin'))
flas_admin.add_view(AlarmRecordsModelView(db.session, name='AlarmRecords', endpoint='alarm_records', category='AlarmAdmin'))
flas_admin.add_view(AlarmLevelSettingModelView(db.session, name='AlarmLevelSetting', endpoint='alarm_level_setting', category='AlarmAdmin'))
flas_admin.add_view(UserModelView(db.session, name='User', endpoint='user', category='UserAdmin'))
path = op.join(op.dirname(__file__), 'static')
print(path)
flas_admin.add_view(FileAdmin(path, '/static/', name='Static Files'))
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# bable config for i18n
app.config['BABEL_DEFAULT_LOCALE'] = 'zh_CN'
if app.config['SSL_REDIRECT']:
from flask_sslify import SSLify
sslify = SSLify(app)
configure_extensions(app)
register_blueprints(app)
return app
def configure_extensions(app):
"""configure flask extensions
"""
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
babel.init_app(app)
flas_admin.init_app(app)
def register_blueprints(app):
"""register all blueprints for application
"""
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
|
[
"quanpower@gmail.com"
] |
quanpower@gmail.com
|
7926b45512fe41390359b55d5dc715655d19920f
|
72b74f66f83239a928bf049c0dd6e47576e57bae
|
/NLP/word2vec/data_processing/__init__.py
|
26e9b2e5b6b5f6c6a8227cbaffee616283614ce3
|
[] |
no_license
|
InsaneLife/DeepLearning
|
7934056682e4fec7f3241dd2d4fbe1b4c5f192d2
|
4b60fe40587b96ba2a351c1b3cb832d03c2071ab
|
refs/heads/master
| 2022-10-08T08:18:19.633449
| 2017-08-30T10:47:05
| 2017-08-30T10:47:05
| 65,697,666
| 2
| 4
| null | 2022-09-30T21:55:05
| 2016-08-15T02:16:34
|
C++
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
# coding=utf8
# author = 'Aaron Chou'
import sys
import zipfile
reload(sys)
sys.setdefaultencoding('utf-8')
# filename = '../../../..//data/NLP/sougou/news_oneline.txt'
# with open(filename) as f:
# data = f.readline().decode('utf-8')
#
#
# filename = '../../../../data/word2vec/text8.zip'
# with zipfile.ZipFile(filename) as f:
# data = f.read(f.namelist()[0])
#
# print 'yes'
#
# # Read the data into a list of strings.
# def read_data(filename):
# """Extract the first file enclosed in a zip file as a list of words"""
# with zipfile.ZipFile(filename) as f:
# data = f.read(f.namelist()[0])
# return data
s = "公安机关销毁10余万非法枪支 跨国武"
s = s.replace(' ','')
print s
|
[
"993001803@qq.com"
] |
993001803@qq.com
|
736f4698de804a541c0980b218de7e032a7725b7
|
6618febe7d31b263acf2006dae748ce25fb03cfc
|
/fileparsers.py
|
3ff51617fc82d199d0a93c62a7b2c5cbccf578a2
|
[] |
no_license
|
breecummins/PatternMatch
|
d8312d95d119ea8e373ed3f1ff5be9350fb543ed
|
061b87fea1ef52825d4dba3af675d1a44af0f20c
|
refs/heads/master
| 2021-01-17T09:42:04.173524
| 2016-06-02T17:04:57
| 2016-06-02T17:04:57
| 31,024,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,607
|
py
|
# The MIT License (MIT)
# Copyright (c) 2015 Breschine Cummins
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
def parseMorseGraphs(fname="morsegraphs.txt"):
f=open(fname,'r')
morse_graphs_and_sets=[]
for l in f.readlines():
L=l.replace('|',' ').split()
morse_graphs_and_sets.append((L[0],L[1:]))
f.close()
return morse_graphs_and_sets
def parseParameters(fname="concatenatedparams.txt"):
f=open(fname,'r')
morsegraph_morseset_param=[]
for l in f.readlines():
morsegraph_morseset_param.append(tuple(l.split('|')))
f.close()
return morsegraph_morseset_param
def parsePatterns(fname="patterns.txt"):
f=open(fname,'r')
maxmin=[]
varnames=[]
originalpatterns=[]
for l in f:
if l[-1]=='\n':
l=l[:-1]
originalpatterns.append(l)
L=l.replace(',',' ').split()
varnames.append(L[::2])
maxmin.append(L[1::2])
f.close()
return varnames, maxmin, originalpatterns
def parseMorseSet(fname='dsgrn_output.json'):
parsed = json.load(open(fname),strict=False)
varnames = [ x[0] for x in parsed["network"] ]
threshnames = [ [parsed["network"][i][2][j] for j in parsed["parameter"][i][2]] for i in range(len(parsed["network"])) ]
return varnames,threshnames,parsed["graph"],parsed["cells"],parsed["vertices"]
def parseDomainCells(fname='dsgrn_domaincells.json'):
parsed = json.load(open(fname),strict=False)
return parsed["cells"]
def parseDomainGraph(fname="dsgrn_domaingraph.json"):
return json.load(open(fname),strict=False)
|
[
"breecummins@gmail.com"
] |
breecummins@gmail.com
|
884b606858c44db1fa41bb8f88a377326eb04a69
|
27722ac879b3416a0919dce80d4ec4f2a5c93c97
|
/adafruit_pixie.py
|
739cd33384b7309d19e4b59d60c04d7e4daf4f47
|
[
"MIT"
] |
permissive
|
makermelissa/Adafruit_CircuitPython_Pixie
|
ee3d5b5861dcf5283b67053c18ce23eb06b88ee9
|
2bdfcf52d8861befc47f433b660f372c20a23d2d
|
refs/heads/master
| 2020-04-25T06:57:24.144082
| 2019-02-25T22:57:25
| 2019-02-25T22:57:25
| 172,598,614
| 0
| 0
|
MIT
| 2019-02-25T22:54:35
| 2019-02-25T22:54:35
| null |
UTF-8
|
Python
| false
| false
| 4,674
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Damien P. George (original Neopixel object)
# Copyright (c) 2018 Ladyada
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_pixie` - Pixie LED driver
====================================================
* Author(s): Damien P. George, Limor Fried, Kattni Rembor
"""
import time
import math
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Pixie.git"
class Pixie:
"""
PIxie LEDs.
:param uart: The UART object.
:param int n: The number of Pixies in the chain.
:param float brightness: Brightness of the pixels between 0.0 and 1.0.
:param bool auto_write: True if the Pixies should immediately change when
set. If False, `show` must be called explicitly.
Example for two Pixie LEDs chained:
.. code_block::python
import time
import board
import busio
import adafruit_pixie
uart = busio.UART(board.TX, rx=None, baudrate=115200)
pixies = adafruit_pixie.Pixie(uart, 2, brightness=0.5)
while True:
pixies.fill((255, 0, 0))
time.sleep(1)
pixies[0] = (0, 255, 0)
pixies[1] = (0, 0, 255)
time.sleep(1)
"""
def __init__(self, uart, n, *, brightness=1.0, auto_write=True):
self._uart = uart
self._n = n
self._buf = bytearray(self._n * 3)
# Set auto_write to False temporarily so brightness setter does _not_
# call show() while in __init__.
self.auto_write = False
self._brightness = brightness
self.auto_write = auto_write
def _set_item(self, index, value):
if index < 0:
index += len(self)
if index >= self._n or index < 0:
raise IndexError
offset = index * 3
r = 0
g = 0
b = 0
if isinstance(value, int):
r = value >> 16
g = (value >> 8) & 0xff
b = value & 0xff
elif len(value) == 3:
r, g, b = value
self._buf[offset + 0] = r
self._buf[offset + 1] = g
self._buf[offset + 2] = b
def __setitem__(self, index, val):
if isinstance(index, slice):
start, stop, step = index.indices(len(self._buf) // 3)
length = stop - start
if step != 0:
length = math.ceil(length / step)
if len(val) != length:
raise ValueError("Slice and input sequence size do not match.")
for val_i, in_i in enumerate(range(start, stop, step)):
self._set_item(in_i, val[val_i])
else:
self._set_item(index, val)
if self.auto_write:
self.show()
def __len__(self):
return len(self._buf) // 3
@property
def brightness(self):
"""Overall brightness of the pixel"""
return self._brightness
@brightness.setter
def brightness(self, brightness):
self._brightness = min(max(brightness, 0.0), 1.0)
if self.auto_write:
self.show()
def fill(self, color):
"""Colors all pixels the given ***color***."""
auto_write = self.auto_write
self.auto_write = False
for i in range(self._n):
self[i] = color
if auto_write:
self.show()
self.auto_write = auto_write
def show(self):
"""
Shows the new colors on the pixels themselves if they haven't already
been autowritten.
"""
self._uart.write(bytes([int(i * self.brightness) for i in self._buf]))
time.sleep(0.005)
|
[
"kattni@adafruit.com"
] |
kattni@adafruit.com
|
ed629e71203af591f84402090e41ad720808065a
|
10e8b0b82c429593449f5b3f0ee6efca6d403870
|
/Old_Pando/HRRR_downloads/old_dwnld_scripts/download_hrrr.py
|
4df7e0c8549423f3e27555f7099459c2528e0af5
|
[] |
no_license
|
janmandel/HorelS3-Archive
|
7bf50ba2e65812857ea857bc2d033c2a661273e4
|
73f765de5358352ea9d87d76275c5cfb67a5cf43
|
refs/heads/master
| 2020-03-28T11:24:45.375321
| 2018-09-06T21:17:15
| 2018-09-06T21:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,249
|
py
|
# Brian Blaylock
# February 27, 2017
"""
Downloads the operational HRRR from NCEOP NOMADS server
A re-write of the "get_hrrr.csh" script in python.
"""
import urllib
from datetime import datetime, timedelta
import os
import stat
# ----------------------------------------------------------------------------
# Introductory Stuff
# ----------------------------------------------------------------------------
# download HRRR files from yesterday
yesterday = datetime.today() #-timedelta(days=1)
# Put the downloaded files in the horel-group/archive. Mkdir if it doesn't exist
OUTDIR = '/uufs/chpc.utah.edu/common/home/horel-group/archive/%04d%02d%02d/BB_test/models/hrrr/' \
% (yesterday.year, yesterday.month, yesterday.day)
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
# Change directory permissions
os.chmod(OUTDIR, stat.S_IRWXU | \
stat.S_IRGRP | stat.S_IXGRP | \
stat.S_IROTH | stat.S_IXOTH)
# User can read, write, execute
# Group can read and execute
# Others can read and execute
# ----------------------------------------------------------------------------
def reporthook(a,b,c):
# ',' at the end of the line is important!
print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
#sys.stdout.write("\r% 3.1f%% of %d bytes"
# % (min(100, float(a * b) / c * 100), c)
def download_hrrr(DATE, field,
hour=range(0, 24), forecast=range(0, 19), OUTDIR='./'):
"""
Downloads HRRR grib2 files from the nomads server
http://nomads.ncep.noaa.gov/
Input:
DATE - a datetime object for which you wish to download
fields - the field you want to download
Options are fields ['prs', 'sfc','subh', 'nat']
pressure fields (~350 MB), surface fields (~6 MB),
native fields (~510 MB)!
hour - a list of hours you want to download
Default all hours in the day
forecast - a list of forecast hour you wish to download
Default all forecast hours (0-18)
outpath - the outpath directory you wish to save the files.
"""
# We'll store the URLs we download from and return them for troubleshooting
URL_list = []
# Build the URL string we want to download. One for each field, hour, and forecast
# New URL for downloading HRRRv2+
URL = 'http://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.%04d%02d%02d/' \
% (DATE.year, DATE.month, DATE.day)
# Create a new array for each field to keep things organized.
for h in hour:
for f in forecast:
FileName = 'hrrr.t%02dz.wrf%sf%02d.grib2' % (h, field, f)
# Download and save the file
print 'Downloading:', URL, FileName
urllib.urlretrieve(URL+FileName, OUTDIR+FileName, reporthook)
print 'Saved:', OUTDIR+FileName
URL_list.append(URL+FileName)
# Return the list of URLs we downloaded from for troubleshooting
return URL_list
def download_hrrr_bufr(DATE,
stations=['725720'],
rename=['kslc'],
hour=range(0,24),
OUTDIR='./'):
"""
Special case for downloading HRRR bufr soundings.
"""
URL_list = []
URL = 'http://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.%04d%02d%02d/bufrsnd.t%02dz/' \
% (DATE.year, DATE.month, DATE.day, DATE.hour)
for h in hour:
for i in range(len(stations)):
FILE = 'bufr.%s.%04d%02d%02d%02d' \
% (stations[i], DATE.year, DATE.month, DATE.day, h)
NEWNAME = '%s_%04d%02d%02d%02d.buf' \
% (rename[i], DATE.year, DATE.month, DATE.day, h)
urllib.urlretrieve(URL+FILE, OUTDIR+NEWNAME, reporthook)
URL_list.append(URL+FILE)
return URL_list
if __name__ == '__main__':
# Download Surface fields: all hours and all forecast hours
sfc_hxx = range(0, 24)
sfc_fxx = range(0, 19)
sfc_URLs = download_hrrr(yesterday,
field='sfc',
hour=sfc_hxx,
forecast=sfc_fxx,
OUTDIR=OUTDIR)
# Download Pressure fields: all hours, only analysis hours
prs_hxx = range(0, 24)
prs_fxx = range(0, 1)
prs_URLs = download_hrrr(yesterday,
field='prs',
forecast=prs_fxx,
hour=prs_hxx,
OUTDIR=OUTDIR)
# Download bufr soundings: KSLC, KODG, KPVU
stations = ['725720', '725724', '725750']
rename = ['kslc', 'kpvu', 'kodg']
bufr_URLs = download_hrrr_bufr(yesterday,
stations=stations,
rename=rename,
OUTDIR=OUTDIR)
## Download subhourly
#subh_hxx = range(0, 24)
#subh_fxx = range(0, 19)
#subh_URLs = download_hrrr(yesterday, field='subh', hour=sub_hxx, forecast=subh_fxx)
|
[
"blaylockbk@gmail.com"
] |
blaylockbk@gmail.com
|
5b932c5056e7bd1fe88edd01c629c4c593215858
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03026/s629929775.py
|
95360d8a62b73cf93e76bfc9c8e613d053dfdfeb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
N = int(input())
L = [list(map(int,input().split())) for k in range(N-1)]
c = sorted(list(map(int,input().split())))
a = sum(c) - c[-1]
T = [[] for k in range(N)]
for e in L:
T[e[0]-1].append(e[1]-1)
T[e[1]-1].append(e[0]-1)
kyori = [-1 for k in range(N)]
que = [L[0][0]]
kyori[L[0][0]] = c.pop()
while len(que) > 0:
now = que.pop()
for tsugi in T[now]:
if kyori[tsugi] == -1:
kyori[tsugi] = c.pop()
que.append(tsugi)
print(a)
print(*kyori, sep=" ")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ed09351b57eec7ced5b4c69fadb372f03896c127
|
37fef592f365194c28579f95abd222cc4e1243ae
|
/streamlit/venv/lib/python3.7/site-packages/streamlit/proto/PageInfo_pb2.py
|
c550c50579acc89520cbfcfbccf60c1c2162d8de
|
[] |
no_license
|
edimaudo/Python-projects
|
be61e0d3fff63fb7bd00513dbf1401e2c1822cfb
|
85d54badf82a0b653587a02e99daf389df62e012
|
refs/heads/master
| 2023-04-07T03:26:23.259959
| 2023-03-24T12:03:03
| 2023-03-24T12:03:03
| 72,611,253
| 4
| 3
| null | 2022-10-31T18:10:41
| 2016-11-02T06:37:17
| null |
UTF-8
|
Python
| false
| true
| 1,978
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: streamlit/proto/PageInfo.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='streamlit/proto/PageInfo.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1estreamlit/proto/PageInfo.proto\" \n\x08PageInfo\x12\x14\n\x0cquery_string\x18\x01 \x01(\tb\x06proto3'
)
_PAGEINFO = _descriptor.Descriptor(
name='PageInfo',
full_name='PageInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query_string', full_name='PageInfo.query_string', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=66,
)
DESCRIPTOR.message_types_by_name['PageInfo'] = _PAGEINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PageInfo = _reflection.GeneratedProtocolMessageType('PageInfo', (_message.Message,), {
'DESCRIPTOR' : _PAGEINFO,
'__module__' : 'streamlit.proto.PageInfo_pb2'
# @@protoc_insertion_point(class_scope:PageInfo)
})
_sym_db.RegisterMessage(PageInfo)
# @@protoc_insertion_point(module_scope)
|
[
"edimaudo@gmail.com"
] |
edimaudo@gmail.com
|
77fe07d5e52bfe2f60ffef3a17d87c9c4778edbb
|
fa32f7fe4068323b719725558423927ad307cc4b
|
/build_isolated/roslaunch/catkin_generated/pkg.develspace.context.pc.py
|
2b96196666b66142663c46a34c0c8bcc818b81b6
|
[] |
no_license
|
CJohnson5136/ros_catkin_ws
|
d07ee8c20bc1ebe6c05abdea24ef1f5dab14954b
|
05193a7e587ab82e696c66176b151c43d2bcef82
|
refs/heads/master
| 2021-05-09T03:05:12.373334
| 2018-01-28T03:13:33
| 2018-01-28T03:13:33
| 119,227,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "roslaunch"
PROJECT_SPACE_DIR = "/home/pi/ros_catkin_ws/devel_isolated/roslaunch"
PROJECT_VERSION = "1.13.5"
|
[
"cody.johnson@ucollege.edu"
] |
cody.johnson@ucollege.edu
|
e2b23a1391b17cde86fbd36afbff414773d9903b
|
a66c079f250c5469e01b5ec5b00a795dbc9fa9a0
|
/blog/admin.py
|
8c315f065ee59a47926fb5507af9892b991febc6
|
[
"MIT"
] |
permissive
|
Cpeters1982/MillGeekV2
|
b925f013ae9b95827bfc304a57b0e0dceabd7544
|
e08a1366bbe732b0d7fc7a7802cd54ff1d1091bf
|
refs/heads/master
| 2021-08-06T14:45:57.616281
| 2017-11-06T06:38:11
| 2017-11-06T06:38:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'author', 'publish', 'status')
list_filter = ('status', 'created', 'publish', 'author')
search_fields = ('title', 'body')
prepopulated_fields = {'slug': ('title',)}
raw_id_fields = ('author',)
date_hierarchy = 'publish'
ordering = ['status', 'publish']
admin.site.register(Post, PostAdmin)
|
[
"chiefautoparts@outlook.com"
] |
chiefautoparts@outlook.com
|
8a6970567b9013782b84557a586c6ce62b9f05a7
|
2d23c271ec1a226bb345c23d7b2671ec021e9502
|
/Triangle.py
|
74991744eea783b53c8a6721026897aac3e70a7c
|
[] |
no_license
|
chenlanlan/leetcode
|
2e6aec0846ed951466bcd2c2e4596c998faca8e4
|
d02478853c32c29477f53852286c429c20f1424e
|
refs/heads/master
| 2016-09-08T05:07:46.904441
| 2015-07-12T05:41:15
| 2015-07-12T05:41:15
| 32,845,795
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
#!/usr/bin/python
class Solution:
# @param triangle, a list of lists of integers
# @return an integer
def minimumTotal(self, triangle):
sum = triangle
ans = triangle[0][0]
for i in range(1, len(triangle)):
for j in range(0, i + 1):
if j == 0:
sum[i][j] = sum[i - 1][j] + triangle[i][j]
ans = sum[i][j]
elif j == i:
sum[i][j] = sum[i - 1][j - 1] + triangle[i][j]
else:
sum[i][j] = min(sum[i - 1][j - 1], sum[i - 1][j]) + triangle[i][j]
if sum[i][j] < ans:
ans = sum[i][j]
return ans
x = Solution()
print(x.minimumTotal([
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]))
|
[
"silan0318@163.com"
] |
silan0318@163.com
|
44e995678e486ed927b284dd2cac84cdbca80e09
|
a722faf9fb50c794555861bb4858c3ed8a7a25f3
|
/sandbox/peachpy_avx_practice/logic.py
|
c3fc2c739060b29e44d01407d874555685f434b3
|
[] |
no_license
|
ar90n/lab
|
31e5d2c320de5618bc37572011596fee8923255d
|
6d035e12f743e9ba984e79bfe660967b9ca8716b
|
refs/heads/main
| 2023-07-25T17:29:57.960915
| 2023-07-22T12:08:18
| 2023-07-22T12:08:18
| 77,883,405
| 4
| 0
| null | 2023-07-17T08:45:14
| 2017-01-03T04:15:49
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
from peachpy import *
from peachpy.x86_64 import *
import numpy as np
import ctypes
def gen_andnot_ps():
x = Argument(ptr(const_float_))
y = Argument(ptr(const_float_))
z = Argument(ptr(float_))
with Function("AndNot", (x, y, z), target=uarch.default + isa.avx2) as asm_function:
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, x)
reg_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_y, y)
reg_z = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_z, z)
ymm = YMMRegister()
VMOVUPD(ymm, [reg_x])
VANDNPS(ymm, ymm, [reg_y])
VMOVUPD([reg_z], ymm)
RETURN()
return asm_function.finalize(abi.detect()).encode().load()
andnot_ps = gen_andnot_ps()
x = np.array(
[
0x59595959,
0x59595959,
0x59595959,
0x59595959,
0x59595959,
0x59595959,
0x59595959,
0x59595959,
],
dtype=np.uint32,
)
y = np.array(
[
0x95959595,
0x95959595,
0x95959595,
0x95959595,
0x95959595,
0x95959595,
0x95959595,
0x95959595,
],
dtype=np.uint32,
)
z = np.empty(8, dtype=np.uint32)
andnot_ps(
x.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
y.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
z.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
)
assert np.allclose(
z,
np.array(
[
0x84848484,
0x84848484,
0x84848484,
0x84848484,
0x84848484,
0x84848484,
0x84848484,
0x84848484,
],
dtype=np.uint32,
),
)
|
[
"argon.argon.argon@gmail.com"
] |
argon.argon.argon@gmail.com
|
f714a0095b6e307d7d0c605551e5062b707f474e
|
644d9ef18713e4cb5d4c3b53301bd7276dcdf477
|
/api/programs/views/shared_files.py
|
16753e871ff23a7486b45fe52c1b5af4a82dca6c
|
[] |
no_license
|
alexhernandez-git/django-classline
|
6cb5bcd268248999e18037f58c4ed30012d51915
|
49fcf0c6d735a56eaebc17d04be52dab91ca4c3a
|
refs/heads/master
| 2023-03-18T07:10:08.770066
| 2021-03-04T22:24:09
| 2021-03-04T22:24:09
| 287,985,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,552
|
py
|
"""Program views."""
# Django REST Framework
from rest_framework import mixins, viewsets, status, filters
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
# Permissions
from rest_framework.permissions import IsAuthenticated
# Filters
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
# Models
from api.programs.models import File
# Serializers
from api.programs.serializers import (
FileModelSerializer,
ShareUsersFilesSerializer
)
from api.programs.serializers.subscriptions import(
SubscriptionSignUpSerializer
)
from api.users.serializers import (
ProfileModelSerializer,
UserWithoutTeacherModelSerializer
)
import stripe
# Utils
from api.utils.permissions import AddProgramMixin
class SharedFileViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
AddProgramMixin):
"""Circle view set."""
pagination_class = None
serializer_class = FileModelSerializer
lookup_field = 'pk'
queryset = File.objects.all()
filter_backends = [filters.SearchFilter]
search_fields = ['name']
def get_queryset(self):
"""Restrict list to public-only."""
queryset = File.objects.filter(program=self.program)
return queryset
def get_permissions(self):
"""Assign permissions based on action."""
permissions = []
return [permission() for permission in permissions]
def list(self, request, *args, **kwargs):
if 'top_folder' in request.GET and request.GET['top_folder']:
queryset = self.get_queryset().filter(
top_folder=request.GET['top_folder'])
else:
queryset = self.get_queryset().filter(shared_users=request.user)
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
|
[
"vlexhndz@gmail.com"
] |
vlexhndz@gmail.com
|
6e1f6880a6caf5cffabb380e12829f1606c3e98f
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/1501-2000/1751.Maximum Number of Events That Can Be Attended II.py
|
9344ddbdab3c4b8f5e076a9e4c18942fe3232650
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062
| 2023-08-27T07:59:16
| 2023-08-27T07:59:16
| 57,526,914
| 69
| 9
| null | 2023-08-20T06:34:41
| 2016-05-01T05:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 756
|
py
|
import functools, collections, bisect
class Solution:
def maxValue(self, events: List[List[int]], k: int) -> int:
end2eid = collections.defaultdict(list)
for i in range(len(events)):
end2eid[events[i][1]].append(i)
end_days = list(sorted(end2eid.keys()))
@functools.lru_cache(None)
def dp(end_day_index, count):
if count == 0 or end_day_index == -1:
return 0
res = dp(end_day_index - 1, count)
for eid in end2eid[end_days[end_day_index]]:
start, end, value = events[eid]
res = max(res, dp(bisect.bisect_left(end_days, start) - 1, count - 1) + value)
return res
return dp(len(end_days) - 1, k)
|
[
"skw_kevin@126.com"
] |
skw_kevin@126.com
|
a9ef067613f57b825d3533520dc3ed43f8292ccf
|
c0bf1f7ca6d9d7562f72b4a668e97a2d5ffe7c88
|
/examples/thread_matmul_ipxact/thread_matmul_ipxact.py
|
0ce4d4f6801adc0539c36f4b0974578f5403318c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
00mjk/veriloggen
|
cee0da16182c3c9bd95340a966d6a3febc0e7ad1
|
9d0af9638470b3b85cbf9cb53f16b853932571c8
|
refs/heads/master
| 2023-06-23T07:10:20.645734
| 2021-07-18T14:53:13
| 2021-07-18T14:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,682
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import numpy as np
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
import veriloggen.types.ipxact as ipxact
axi_datawidth = 32
datawidth = 32
matrix_size = 16
a_offset = 0
b_offset = 4096
c_offset = 4096 * 2
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
addrwidth = 10
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth)
maxi = vthread.AXIM(m, 'maxi', clk, rst, datawidth)
saxi = vthread.AXISLiteRegister(m, 'saxi', clk, rst, datawidth, length=8)
def matmul():
while True:
saxi.wait_flag(0, value=1, resetvalue=0)
matrix_size = saxi.read(1)
a_offset = saxi.read(2)
b_offset = saxi.read(3)
c_offset = saxi.read(4)
comp(matrix_size, a_offset, b_offset, c_offset)
saxi.write_flag(5, 1, resetvalue=0)
def comp(matrix_size, a_offset, b_offset, c_offset):
a_addr, c_addr = a_offset, c_offset
for i in range(matrix_size):
maxi.dma_read(ram_a, 0, a_addr, matrix_size)
b_addr = b_offset
for j in range(matrix_size):
maxi.dma_read(ram_b, 0, b_addr, matrix_size)
sum = 0
for k in range(matrix_size):
x = ram_a.read(k)
y = ram_b.read(k)
sum += x * y
ram_c.write(j, sum)
b_addr += matrix_size * (datawidth // 8)
maxi.dma_write(ram_c, 0, c_addr, matrix_size)
a_addr += matrix_size * (datawidth // 8)
c_addr += matrix_size * (datawidth // 8)
th = vthread.Thread(m, 'th_matmul', clk, rst, matmul)
fsm = th.start()
return m
def mkTest(memimg_name=None):
a_shape = (matrix_size, matrix_size)
b_shape = (matrix_size, matrix_size)
c_shape = (a_shape[0], b_shape[0])
n_raw_a = axi.shape_to_length(a_shape)
n_raw_b = axi.shape_to_length(b_shape)
n_a = axi.shape_to_memory_size(a_shape, datawidth)
n_b = axi.shape_to_memory_size(b_shape, datawidth)
a = np.zeros(a_shape, dtype=np.int64)
b = np.zeros(b_shape, dtype=np.int64)
value = 1
for y in range(a_shape[0]):
for x in range(a_shape[1]):
if x == y:
a[y][x] = value
value += 1
else:
a[y][x] = 0
for y in range(b_shape[0]):
for x in range(b_shape[1]):
if x == y:
b[y][x] = 2
else:
b[y][x] = 0
a_addr = a_offset
size_a = n_a * datawidth // 8
b_addr = b_offset
size_b = n_b * datawidth // 8
mem = np.zeros([1024 * 1024 * 8 // axi_datawidth], dtype=np.int64)
axi.set_memory(mem, a, axi_datawidth, datawidth, a_addr)
axi.set_memory(mem, b, axi_datawidth, datawidth, b_addr)
led = mkLed()
m = Module('test')
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst,
mem_datawidth=axi_datawidth,
memimg=mem, memimg_name=memimg_name)
memory.connect(ports, 'maxi')
# AXI-Slave controller
_saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
# Timer
counter = m.Reg('counter', 32, initval=0)
seq = Seq(m, 'seq', clk, rst)
seq(
counter.inc()
)
def ctrl():
for i in range(100):
pass
awaddr = 4
print('# matrix_size = %d' % matrix_size)
_saxi.write(awaddr, matrix_size)
awaddr = 8
print('# a_offset = %d' % a_offset)
_saxi.write(awaddr, a_offset)
awaddr = 12
print('# b_offset = %d' % b_offset)
_saxi.write(awaddr, b_offset)
awaddr = 16
print('# c_offset = %d' % c_offset)
_saxi.write(awaddr, c_offset)
awaddr = 0
start_time = counter
print('# start time = %d' % start_time)
_saxi.write(awaddr, 1)
araddr = 20
v = _saxi.read(araddr)
while v == 0:
v = _saxi.read(araddr)
end_time = counter
print('# end time = %d' % end_time)
time = end_time - start_time
print('# exec time = %d' % time)
all_ok = True
for y in range(matrix_size):
for x in range(matrix_size):
v = memory.read(
c_offset + (y * matrix_size + x) * datawidth // 8)
if y == x and vthread.verilog.NotEql(v, (y + 1) * 2):
all_ok = False
print("NG [%d,%d] = %d" % (y, x, v))
if y != x and vthread.verilog.NotEql(v, 0):
all_ok = False
print("NG [%d,%d] = %d" % (y, x, v))
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
m = mkLed()
ipxact.to_ipxact(m,
clk_ports=[('CLK', ('RST',))],
rst_ports=[('RST', 'ACTIVE_HIGH')])
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
694cbb1a9d0f58df9ff2f2a939cac87b4136ff2c
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/zactionConst.py
|
e0a0d9c1bc4143546dda68c8a1f4581f9655fb66
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,359
|
py
|
#Embedded file name: zactionConst.py
"""
Constants for the Action System
"""
ACTION_SCHEMA = 'zaction'
_ACTION_PREFIX = ACTION_SCHEMA + '.'
_ACTION_TREE_TABLE_NAME = 'trees'
_ACTION_TREE_TABLE_FULL_PATH = _ACTION_PREFIX + _ACTION_TREE_TABLE_NAME
_ACTION_TREE_LINK_TABLE_NAME = 'treeLinks'
_ACTION_TREE_LINK_TABLE_FULL_PATH = _ACTION_PREFIX + _ACTION_TREE_LINK_TABLE_NAME
ACTION_STEP_TABLE_NAME = 'steps'
ACTION_STEP_TABLE_FULL_PATH = _ACTION_PREFIX + ACTION_STEP_TABLE_NAME
ACTION_PROC_TABLE_NAME = 'procs'
ACTION_PROC_TABLE_FULL_PATH = _ACTION_PREFIX + ACTION_PROC_TABLE_NAME
_ACTION_PROPERTY_TABLE_NAME = 'properties'
_ACTION_PROPERTY_TABLE_FULL_PATH = _ACTION_PREFIX + _ACTION_PROPERTY_TABLE_NAME
ACTION_PROC_TYPE_TABLE_NAME = 'procTypes'
ACTION_PROC_TYPE_TABLE_FULL_PATH = _ACTION_PREFIX + ACTION_PROC_TYPE_TABLE_NAME
ACTION_PROC_TYPE_PROPERTY_TABLE_NAME = 'procTypeProperties'
ACTION_PROC_TYPE_PROPERTY_TABLE_FULL_PATH = _ACTION_PREFIX + ACTION_PROC_TYPE_PROPERTY_TABLE_NAME
MAX_PROP_NAME_LEN = 64
SELECT_ACTION_TREE = 'SelectActionTree'
SELECT_ACTION_STEP = 'SelectActionStep'
SELECT_ACTION_PROC = 'SelectActionProc'
ACTIONSTEP_TYPE_NORMAL = 'Normal'
ACTIONSTEP_TYPE_CONDITIONAL = 'Conditional'
ACTIONSTEP_TYPE_TRY = 'Try Until'
ACTIONSTEP_TYPE_CATCH = 'Catch'
ACTIONSTEP_TYPE_TRYSYNC = 'Try Sync'
ACTIONSTEP_TYPE_WHILE = 'While'
ACTIONSTEP_TYPE_PREREQ = 'Prerequisite'
ACTIONSTEP_TYPEID_NORMAL = 0
ACTIONSTEP_TYPEID_CONDITIONAL = 1
ACTIONSTEP_TYPEID_TRY = 2
ACTIONSTEP_TYPEID_CATCH = 3
ACTIONSTEP_TYPEID_TRYSYNC = 4
ACTIONSTEP_TYPEID_WHILE = 5
ACTIONSTEP_TYPEID_PREREQ = -1
ACTION_STEP_TYPEID_TO_STRING = (ACTIONSTEP_TYPE_NORMAL,
ACTIONSTEP_TYPE_CONDITIONAL,
ACTIONSTEP_TYPE_TRY,
ACTIONSTEP_TYPE_CATCH,
ACTIONSTEP_TYPE_TRYSYNC,
ACTIONSTEP_TYPE_WHILE,
ACTIONSTEP_TYPE_PREREQ)
ACTION_STEP_TYPE_CONDITIONALS = [ACTIONSTEP_TYPE_CONDITIONAL,
ACTIONSTEP_TYPE_TRY,
ACTIONSTEP_TYPE_WHILE,
ACTIONSTEP_TYPE_PREREQ]
ACTION_STEP_TYPE_NORMALS = [ACTIONSTEP_TYPE_NORMAL, ACTIONSTEP_TYPE_CATCH, ACTIONSTEP_TYPE_TRYSYNC]
ACTIONSTEP_LOC_CLIENTSERVER = 'Client and Server'
ACTIONSTEP_LOC_CLIENTONLY = 'Client Only'
ACTIONSTEP_LOC_SERVERONLY = 'Server Only'
ACTIONSTEP_LOCID_CLIENTSERVER = 0
ACTIONSTEP_LOCID_CLIENTONLY = 1
ACTIONSTEP_LOCID_SERVERONLY = 2
ACTION_STEP_LOCID_TO_STRING = (ACTIONSTEP_LOC_CLIENTSERVER, ACTIONSTEP_LOC_CLIENTONLY, ACTIONSTEP_LOC_SERVERONLY)
ACTIONSTEP_FLAG_ELSE = 1
ACTIONSTEP_FLAG_CATCH = 2
ACTIONSTEP_FLAGS = {ACTIONSTEP_FLAG_ELSE: 'Else',
ACTIONSTEP_FLAG_CATCH: 'Catch'}
_ACTION_INTERNAL = 0
_ACTION_EXPOSED = 1
ACTION_TREE_LINK_REFERENCE = 1
ACTION_TREE_LINK_BRANCH = 2
ACTION_TREE_LINK_TYPES = [ACTION_TREE_LINK_REFERENCE, ACTION_TREE_LINK_BRANCH]
ACTION_TREE_LINK_NAMES = {'In-Place': ACTION_TREE_LINK_REFERENCE,
'Branch-To': ACTION_TREE_LINK_BRANCH}
_ACTION_LINK_BIT_ORDER_SPLIT = 16
_ACTION_LINK_TYPE_BIT_FILTER = 65535
_ACTION_LINK_TYPE_MAX_VALUE = 65535
_ACTION_LINK_EXPOSURE_BIT_FILTER = 4294901760L
_ACTION_LINK_EXPOSURE_MAX_VALUE = 65535
ACTION_EXPOSURE_EXTERNAL = 0
ACTION_EXPOSURE_INTERNAL = 1
ACTION_EXPOSURE_NEVER = 2
ACTION_EXPOSURE_TYPES = [ACTION_EXPOSURE_EXTERNAL, ACTION_EXPOSURE_INTERNAL, ACTION_EXPOSURE_NEVER]
ACTION_EXPOSURE_TYPE_NAMES = {'External': ACTION_EXPOSURE_EXTERNAL,
'Internal': ACTION_EXPOSURE_INTERNAL,
'Never': ACTION_EXPOSURE_NEVER}
ACTIONTREE_RECIPE_DEFAULT_ACTION_NAME = 'defaultAction'
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
2d8ff7c75391eb1e4653770f6cae51129252f4a2
|
8ff5bd7d22b578678fe7225a84f82cea5eafa25a
|
/Backend/todoapps/todo/apps/forms.py
|
f4b8eed46fe38dbebdb1842a7b754f591f1a3bf1
|
[] |
no_license
|
Jayson7/Mumswhocode-bootcamp-frontend-basics
|
33639eb40b48cba6aca99ca4d60e7119ab9739bf
|
de05098e44c13077a9402074811525d53b580b9c
|
refs/heads/master
| 2023-08-15T03:45:07.848603
| 2021-09-09T22:58:27
| 2021-09-09T22:58:27
| 399,247,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django.forms import ModelForm
from .models import Todo
class Todoforms(ModelForm):
class Meta:
model = Todo
fields = "__all__"
|
[
"lexxiijoo70@gmail.com"
] |
lexxiijoo70@gmail.com
|
0c8715456aea5a32d0a4f5330b7b558b9fd2d9ca
|
5acc2eb70ed8b755d7f6b62b65a09cc29b661271
|
/Aula 10/atv01.py
|
c376629dd2744f4bba8899ed5d2c8d19113ba60a
|
[] |
no_license
|
Yuri-Santiago/sor-python-ifce-p7
|
07d1a30f2c304a0a11a2a39b40784cc543f4a18c
|
ccd3460ecab580e23fb41921ee7cc284d7212aef
|
refs/heads/master
| 2023-05-28T08:32:13.188126
| 2021-06-06T22:20:14
| 2021-06-06T22:20:14
| 350,816,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
"""
1 - Criar uma classe que modele retângulos.
1. Atributos: LadoA, LadoB (ou Comprimento e Largura, ou Base e Altura, a escolher)
2. Métodos: Mudar valor dos lados, Retornar valor dos lados, calcular Área e calcular Perímetro;
3. Crie um programa que utilize esta classe. Ele deve pedir ao usuário que informe as medidas de um cômodo.
Depois, deve criar um objeto com as medidas e calcular a quantidade de pisos e de rodapés necessárias para o local.
"""
class Retangulo:
def __init__(self, lado_a, lado_b):
self.lado_a = lado_a
self.lado_b = lado_b
def mudar_valor_lados(self, a, b):
self.lado_a = a
self.lado_b = b
def retornar_valor_lados(self):
return self.lado_a, self.lado_b
def calcular_area(self):
return self.lado_a * self.lado_b
def calcular_perimetro(self):
return self.lado_a * 2 + self.lado_b * 2
print("Nesse programa você deverá informar as medidas de um cômodo em metros.")
comprimento = float(input("Digite o valor do comprimento do Cômodo: "))
largura = float(input("Digite o valor do largura do Cômodo: "))
comodo = Retangulo(comprimento, largura)
print(f'Você usará {comodo.calcular_area()} m quadrados de piso.')
print(f'Você usará {comodo.calcular_perimetro()} m quadrados de rodapé.')
|
[
"yurimateussantiago@gmail.com"
] |
yurimateussantiago@gmail.com
|
0afcf429e50ba86c7d155d63ceed8647dcab0bf3
|
3327a87cefa2275bd0ba90a500444f3494b14fdf
|
/bwu/stack/225-implement-stack-using-queues.py
|
4bc2aee9364af8063e2c1b101d5f1e0e53ac5d4d
|
[] |
no_license
|
captainhcg/leetcode-in-py-and-go
|
e1b56f4228e0d60feff8f36eb3d457052a0c8d61
|
88a822c48ef50187507d0f75ce65ecc39e849839
|
refs/heads/master
| 2021-06-09T07:27:20.358074
| 2017-01-07T00:23:10
| 2017-01-07T00:23:10
| 61,697,502
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
class Stack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.queue1, self.queue2 = [], []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.queue1.append(x)
def pop(self):
"""
:rtype: nothing
"""
while len(self.queue1) > 1:
self.queue2.append(self.queue1.pop(0))
self.queue1.pop(0)
while len(self.queue2):
self.queue1.append(self.queue2.pop(0))
def top(self):
"""
:rtype: int
"""
return self.queue1[-1]
def empty(self):
"""
:rtype: bool
"""
return len(self.queue1) == 0
|
[
"noreply@github.com"
] |
captainhcg.noreply@github.com
|
df2e7687f497b9de3efad226520196e5f835d697
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Logit/trend_ConstantTrend/cycle_5/ar_12/test_artificial_1024_Logit_ConstantTrend_5_12_0.py
|
5b95e6cd8a0927e6f837267cb4099daf8fe96f20
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Logit", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
4d2e063d60ff9fe1c6e29ab07f13137c1edbbd48
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/swaggeraemosgi/model/org_apache_sling_event_impl_eventing_thread_pool_properties.py
|
8ded9a93c3374c856f6e681fbdcda9df8a966b64
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.config_node_property_integer import ConfigNodePropertyInteger
globals()['ConfigNodePropertyInteger'] = ConfigNodePropertyInteger
class OrgApacheSlingEventImplEventingThreadPoolProperties(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'min_pool_size': (ConfigNodePropertyInteger,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'min_pool_size': 'minPoolSize', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OrgApacheSlingEventImplEventingThreadPoolProperties - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
min_pool_size (ConfigNodePropertyInteger): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"cliffano@gmail.com"
] |
cliffano@gmail.com
|
326b4bbd99f9eed9bff3b4bb454476f0beb387c0
|
7d8900637a800d0efa2e1d6a9d4fe877943fdabf
|
/dudu/add.py
|
cfcb0a5d3c3f51cc3bb33468b54af380d8a287cd
|
[
"MIT"
] |
permissive
|
vollov/python-test
|
d66e0b101f3a664d2d0d33591af8af7134afabd6
|
864896f8ccedb28e15c4962d8983862e9a0e6d77
|
refs/heads/master
| 2021-07-19T03:15:45.026613
| 2021-02-12T18:24:22
| 2021-02-12T18:24:22
| 46,740,187
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# add two numbers a and b, and return a+b
# para:
# a - integer number
# b - integer number
# return: a number which is sum of a and b
#
def add(a, b):
print("OK")
return a+b
print("NOT OK")
# here im going to make a subtracting function
# input:
# a - integer
# b - integer
# return: a-b
def subtract(a, b):
return a-b
# here im going to make a multiplying function
# input:
# a - integer
# b - integer
# return: a*b
def multiply(a, b):
return a*b
# testing add
print(add(5,23))
# testing subtract should return -18
print(subtract(5,23))
# testing multiply
print(multiply(5,23))
|
[
"dike.zhang@gmail.com"
] |
dike.zhang@gmail.com
|
bbe8ca1d65b3a671fbd2432b250d18df17ed5c27
|
8004b7468ad46a6330192985f1f9e3a45cc1d2c2
|
/databasenote/第一周/第三天/复习.py
|
9ba787c38c5ac08b4d75305be9ffb55897966a62
|
[] |
no_license
|
yuemeiss/database
|
d9704f90127cfd27b92f62a251c213a8d6bc93eb
|
5f2304cf72330d6102124755cbc1ff5d14c77a77
|
refs/heads/master
| 2020-03-27T23:14:47.532297
| 2018-12-11T02:28:04
| 2018-12-11T02:28:04
| 147,304,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
1.数据库的技术的发展
2.相关概念:
数据库:
数据库系统:
数据库管理系统:
为什么使用mysql:
SQL 数据库语言
3.什么是mysql数据库?
mysql的优势?
是一款开源的自由的软件
是一款多用户、多线程的SQL数据库服务器
能够快捷、高效、安全的处理大量数据,使用简单,易操作
跨平台,可移植型强
支持多种操作系统
为多种编程语言提供了API
4. 安装mysql、启动、连接
5. 创建数据库、修改数据库、删除数据库
6.数据库的存储引擎(三大引擎)和数据库的字段类型(数字、字符串、日期)
7.数据库表的创建、删除、修改
创建表(字段、参数类型、为空、不为空、默认值、主键、自增)
查看数据库创建语句
添加列
修改列
删除列
重命名表
复制表
CREATE TABLE IF NOT EXISTS 表名 LIKE 要复制的表
|
[
"1083027306@qq.com"
] |
1083027306@qq.com
|
5ffc3d3bf7c7b8c6b4ccf0ee7f773bdbb5fed2f3
|
a10377a6d0c7576b9e47209f49dea398181f73fe
|
/extras/qpsclient.py
|
f399f7aa38285299a94de567adbf9fa214f42306
|
[
"BSD-3-Clause"
] |
permissive
|
zymITsky/ants
|
14077dab214aff543bbc75a059240dd55f656916
|
52918d18c94a9a69c3b2495286e3384ba57ad6f8
|
refs/heads/master
| 2020-06-01T11:04:53.520288
| 2015-02-03T08:09:59
| 2015-02-03T08:09:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
"""
A spider that generate light requests to meassure QPS troughput
usage:
scrapy runspider qpsclient.py --loglevel=INFO --set RANDOMIZE_DOWNLOAD_DELAY=0 --set CONCURRENT_REQUESTS=50 -a qps=10 -a latency=0.3
"""
from ants.spider import Spider
from ants.http import Request
class QPSSpider(Spider):
name = 'qps'
benchurl = 'http://localhost:8880/'
# Max concurrency is limited by global CONCURRENT_REQUESTS setting
max_concurrent_requests = 8
# Requests per second goal
qps = None # same as: 1 / download_delay
download_delay = None
# time in seconds to delay server responses
latency = None
# number of slots to create
slots = 1
def __init__(self, *a, **kw):
super(QPSSpider, self).__init__(*a, **kw)
if self.qps is not None:
self.qps = float(self.qps)
self.download_delay = 1 / self.qps
elif self.download_delay is not None:
self.download_delay = float(self.download_delay)
def start_requests(self):
url = self.benchurl
if self.latency is not None:
url += '?latency={0}'.format(self.latency)
slots = int(self.slots)
if slots > 1:
urls = [url.replace('localhost', '127.0.0.%d' % (x + 1)) for x in xrange(slots)]
else:
urls = [url]
idx = 0
while True:
url = urls[idx % len(urls)]
yield Request(url, dont_filter=True)
idx += 1
def parse(self, response):
pass
|
[
"cong.wang@tqmall.com"
] |
cong.wang@tqmall.com
|
b5329073e1fc97c5ba35195a87c08968c20a55b4
|
6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8
|
/algorithms/algorithms-python/leetcode_easy/Question_821_Shortest_Distance_to_a_Character.py
|
33695fa6230f0a4b098de6ed4589ea8ce26b3954
|
[] |
no_license
|
Lanceolata/code
|
aae54af632a212c878ce45b11dab919bba55bcb3
|
f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb
|
refs/heads/master
| 2022-09-01T04:26:56.190829
| 2021-07-29T05:14:40
| 2021-07-29T05:14:40
| 87,202,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def shortestToChar(self, S, C):
"""
:type S: str
:type C: str
:rtype: List[int]
"""
n, pos = len(S), -len(S)
res = [n] * n
for i in range(n):
if S[i] == C:
pos = i
res[i] = min(res[i], abs(i - pos))
for i in range(n-1, -1, -1):
if S[i] == C:
pos = i
res[i] = min(res[i], abs(i - pos));
return res
|
[
"lanceolatayuan@gmail.com"
] |
lanceolatayuan@gmail.com
|
6dc15d918ffa46a255df6032022b7e89718fe9b3
|
1cb2b8bf6f244f67e3b867a74a6297556b5e0167
|
/rmatics/view/protocol.py
|
f076bf3da0140903d3b344160a7d59b880c09656
|
[] |
no_license
|
ElinRin/informatics-alive
|
82f1be65e88860bc4b170b72da39811d30a681e6
|
59a0d1537b90481c2750a05172557bd88fcdcc98
|
refs/heads/master
| 2020-03-28T22:39:41.979770
| 2018-09-24T10:56:26
| 2018-09-24T10:56:26
| 149,248,491
| 0
| 0
| null | 2018-09-24T10:56:27
| 2018-09-18T07:44:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,016
|
py
|
import time
import traceback
from collections import OrderedDict
from flask import (
g,
jsonify,
Blueprint
)
from sqlalchemy import and_
from rmatics.model import db
from rmatics.model.ejudge_run import EjudgeRun
from rmatics.model.statement import Statement
from rmatics.utils.exceptions import (
InternalServerError,
RunAuthorOnly,
RunNotFound,
)
from rmatics.view import (
require_auth,
require_roles,
)
protocol = Blueprint('protocol', __name__, url_prefix='/protocol')
# log = logging.getLogger(__name__)
# # signal_description = {
# # 1 : "Hangup detected on controlling terminal or death of controlling process",
# # 2 : "Interrupt from keyboard",
# # 3 : "Quit from keyboard",
# # 4 : "Illegal Instruction",
# # 6 : "Abort signal",
# # 7 : "Bus error (bad memory access)",
# # 8 : "Floating point exception",
# # 9 : "Kill signal",
# # 11 : "Invalid memory reference",
# # 13 : "Broken pipe: write to pipe with no readers",
# # 14 : "Timer signal",
# # 15 : "Termination signal"
# # }
# TODO: Переместить в view/run (/run/id/protocol), убрать вложеные try/except
@protocol.route('/get/<int:contest_id>/<int:run_id>')
def get_protocol(contest_id, run_id):
try:
run = EjudgeRun.get_by(run_id=run_id, contest_id=contest_id)
try:
run.fetch_tested_protocol_data()
if run.user.statement \
.filter(Statement.olympiad == 1) \
.filter(Statement.time_stop > time.time()) \
.filter(Statement.time_start < time.time()) \
.count() == 0:
res = OrderedDict()
if run.tests:
sample_tests = run.problem.sample_tests.split(',')
for num in range(1, len(run.tests.keys()) + 1):
str_num = str(num)
if str_num in sample_tests:
res[str_num] = run.get_test_full_protocol(str_num)
else:
res[str_num] = run.tests[str_num]
return jsonify({
'tests': res,
'host': run.host,
'compiler_output': run.compiler_output,
})
else:
try:
return jsonify({
'tests': run.tests['1'],
'host': run.host,
'compiler_output': run.compiler_output,
})
except KeyError as e:
return jsonify({'result' : 'error', 'message' : e.__str__(), "stack" : traceback.format_exc()})
except Exception as e:
return jsonify({'result' : 'error', 'message' : run.compilation_protocol, 'error' : e.__str__(), 'stack' : traceback.format_exc(), 'protocol': run.protocol})
except Exception as e:
return jsonify({'result': 'error', 'message' : e.__str__(), 'stack': traceback.format_exc(), 'protocol': run.protocol})
@protocol.route('/get_v2/<int:contest_id>/<int:run_id>')
@require_auth
def protocol_get_v2(contest_id, run_id):
# TODO: переделать формат протокола (статус выдавать по id), избавиться от fetch_tested_protocol_data
run = db.session.query(EjudgeRun) \
.filter(
and_(
EjudgeRun.run_id == run_id,
EjudgeRun.contest_id == contest_id
)
) \
.first()
if not run:
raise RunNotFound
if g.user.ejudge_id != run.user_id:
raise RunAuthorOnly
try:
run.fetch_tested_protocol_data()
except Exception:
raise InternalServerError
tests_dict = OrderedDict()
if run.tests:
sample_tests = run.problem.sample_tests.split(',')
for num in range(1, len(run.tests.keys()) + 1):
str_num = str(num)
if str_num in sample_tests:
tests_dict[str_num] = run.get_test_full_protocol(str_num)
else:
tests_dict[str_num] = run.tests[str_num]
return jsonify({
'tests': tests_dict,
'host': run.host,
'compiler_output': run.compiler_output,
})
@protocol.route('/get-full/<int:contest_id>/<int:run_id>')
@require_roles('admin', 'teacher', 'ejudge_teacher')
def protocol_get_full(contest_id, run_id):
run = EjudgeRun.get_by(run_id=run_id, contest_id=contest_id)
protocol = get_protocol(contest_id, run_id).json
if protocol.get('result') == 'error':
return protocol
prot = protocol.get('tests', {})
out_arch = None
for test_num in prot:
prot[test_num] = run.get_test_full_protocol(test_num)
if out_arch:
out_arch.close()
full_protocol = {
'tests': prot,
'audit': run.get_audit(),
}
if protocol.get('compiler_output'):
full_protocol['compiler_output'] = protocol['compiler_output']
return jsonify(full_protocol)
# @view_config(route_name="protocol.get_test", renderer="string")
# @check_global_role(("teacher", "ejudge_teacher", "admin"))
# def protocol_get_test(request):
# contest_id = int(request.matchdict['contest_id'])
# run_id = int(request.matchdict['run_id'])
# run = EjudgeRun.get_by(run_id = run_id, contest_id = contest_id)
# prob = run.problem
# return prob.get_test(int(request.matchdict['test_num']), prob.get_test_size(int(request.matchdict['test_num'])))
# @view_config(route_name="protocol.get_corr", renderer="string")
# @check_global_role(("teacher", "ejudge_teacher", "admin"))
# def protocol_get_corr(request):
# contest_id = int(request.matchdict['contest_id'])
# run_id = int(request.matchdict['run_id'])
# run = EjudgeRun.get_by(run_id = run_id, contest_id = contest_id)
# prob = run.problem
# return prob.get_corr(int(request.matchdict['test_num']), prob.get_corr_size(int(request.matchdict['test_num'])))
# @view_config(route_name="protocol.get_outp", renderer="string")
# @check_global_role(("teacher", "ejudge_teacher", "admin"))
# def protocol_get_outp(request):
# contest_id = int(request.matchdict['contest_id'])
# run_id = int(request.matchdict['run_id'])
# run = EjudgeRun.get_by(run_id = run_id, contest_id = contest_id)
# return run.get_output_file(int(request.matchdict['test_num']), tp='o')
# @view_config(route_name="protocol.get_submit_archive", renderer="string")
# @check_global_role(("teacher", "ejudge_teacher", "admin"))
# def get_submit_archive(request):
# contest_id = int(request.matchdict['contest_id'])
# run_id = int(request.matchdict['run_id'])
# sources = "sources" in request.params
# all_tests = "all_tests" in request.params
# tests = request.params.get("tests", "")
# tests_set = set()
# for i in tests.split(" "):
# try:
# tests_set.add(int(i))
# except ValueError:
# pass
# run = EjudgeRun.get_by(run_id = run_id, contest_id = contest_id)
# run.parsetests
# prob = run.problem
# archive = BytesIO()
# zf = zipfile.ZipFile(archive, "w", zipfile.ZIP_DEFLATED)
# run.fetch_tested_protocol_data()
# for i in range(1, run.tests_count + 1):
# if all_tests or i in tests_set:
# zf.writestr("tests/{0:02}".format(i), prob.get_test(i, prob.get_test_size(i)))
# zf.writestr("tests/{0:02}.a".format(i), prob.get_corr(i, prob.get_corr_size(i)))
# if sources:
# zf.writestr("{0}{1}".format(run_id, get_lang_ext_by_id(run.lang_id)), run.get_sources())
# checker_src, checker_ext = prob.get_checker()
# zf.writestr("checker{}".format(checker_ext), checker_src)
# zf.close()
# archive.seek(0)
# response = Response(content_type="application/zip", content_disposition='attachment; filename="archive_{0}_{1}.zip"'.format(contest_id, run_id), body=archive.read())
# return response
|
[
"n.pakhtusov@tinkoff.ru"
] |
n.pakhtusov@tinkoff.ru
|
b77f5b032f953ed006c22bb0b7765506412aad56
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0801_0850/LeetCode818_RaceCar.py
|
8973cff01e19bfcaf0a4885b70d166e80525fb5d
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 941
|
py
|
'''
Created on May 1, 2018
@author: tongq
'''
c_ Solution(o..
___ -
hashmap {0:0}
___ racecar target
"""
:type target: int
:rtype: int
"""
__ target __ hashmap: r.. hashmap[target]
# Number of bits necessary to represent self in binary.
n target.bit_length()
__ 2**n-1 __ target:
hashmap[target] n
____
hashmap[target] racecar(2**n-1-target)+n+1
___ m __ r..(n-1
hashmap[target] m..(hashmap[target],\
racecar(target-2**(n-1)+2**m)+n+m+1)
r.. hashmap[target]
___ test
testCases [
3,
6,
]
___ target __ testCases:
print('target: %s' % target)
result racecar(target)
print('result: %s' % result)
print('-='*30+'-')
__ _____ __ _____
Solution().test()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
1243495e4d86517d0efab71f93b5ef8c5692f8c5
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/database/influxdb/influxdb_user.py
|
6b78276dbad5b9feb48f678066ed402dec5dee69
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 5,248
|
py
|
#!/usr/bin/python
# (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users
version_added: 2.5
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
user_password:
description:
- Password to be set for the user.
required: false
admin:
description:
- Whether the user should be in the admin role or not.
default: no
choices: [ yes, no]
state:
description:
- State of the user.
choices: [ present, absent ]
default: present
extends_documentation_fragment: influxdb
'''
EXAMPLES = '''
- name: Create a user on localhost using default login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
- name: Create a user on localhost using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create an admin user on a remote host using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Destroy a user using custom login credentials
influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
state: absent
'''
RETURN = '''
#only defaults
'''
import ansible.module_utils.urls
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.influxdb as influx
def find_user(module, client, user_name):
name = None
try:
names = client.get_list_users()
for u_name in names:
if u_name['user'] == user_name:
name = u_name
break
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
return name
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=True)
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=True)
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
if state == 'present':
if user:
if check_user_password(module, client, user_name, user_password):
module.exit_json(changed=False)
else:
set_user_password(module, client, user_name, user_password)
else:
create_user(module, client, user_name, user_password, admin)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
adfe58513a6fe2e4b6bbf9df0757e94a5dc4e27a
|
d735b8354e06eb26aa5ed0ac25ebf96bdd8d67b6
|
/python16/day1-21/day006 小数据池和编码/02 作业讲解.py
|
8d5be45087e50789765e5e05c7d549a69cf11dba
|
[] |
no_license
|
cn5036518/xq_py
|
e004766e6b2582ba37d7335320ed6b42f563c46c
|
ac932dc7fcb89a7a7faf8bda80791743755fd557
|
refs/heads/master
| 2021-07-15T18:44:19.244025
| 2020-09-12T09:38:25
| 2020-09-12T09:38:25
| 208,355,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,879
|
py
|
#重点题目回顾和重写
# 5、元素分类
# 有如下值li= [11,22,33,44,55,66,77,88,99,90],
# 将所有大于 66 的值保存至字典的第一个key中,
# 将小于 66 的值保存至第二个key的值中。
# 即: {'k1': 大于66的所有值列表, 'k2': 小于66的所有值列表}
#方法1 空字典{} 空列表 好理解
li= [11,22,33,44,55,66,77,88,99,90]
dic1 = {}
li1 = []
li2 = []
for i in li:
if i > 66:
li1.append(i) #空列表追加
else:
li2.append(i)
dic1["k1"] = li1 #字典新增键值对
dic1["k2"] = li2
print(dic1) #{'k1': [77, 88, 99, 90], 'k2': [11, 22, 33, 44, 55, 66]}
#方法2 非空字典{} #比较简洁,代码行数较少
li= [11,22,33,44,55,66,77,88,99,90]
dic1 = {"k1":[],"k2":[]}
for i in li:
if i > 66:
dic1["k1"].append(i)
else:
dic1["k2"].append(i)
print(dic1) #{'k2': [11, 22, 33, 44, 55, 66], 'k1': [77, 88, 99, 90]}
#方法3 字典的get方法
li= [11,22,33,44,55,66,77,88,99,90]
dic1 = {}
for i in li:
if i > 66:
if dic1.get("k1") == None: #1 如果key="k1"不存在,就新增
dic1["k1"] = [i] #注意点:这里必须是[i],而不能是i
else: #2 如果key=k1存在,就往value-列表中追加新元素
dic1["k1"].append(i)
else:
if dic1.get("k2") == None: #1 如果key="k2"不存在,就新增
dic1["k2"] = [i] #注意点:这里必须是[i],而不能是i
else: #2 如果key=k2存在,就往value-列表中追加新元素
dic1["k2"].append(i)
print(dic1) #{'k1': [77, 88, 99, 90], 'k2': [11, 22, 33, 44, 55, 66]}
#方法4 字典的setdefault方法
li= [11,22,33,44,55,66,77,88,99,90]
dic1 = {}
for i in li:
if i > 66:
ret1 = dic1.setdefault("k1",[]) #新增功能:"k1"之前不存在,就新增键值对;“k1”存在,就不操作(不覆盖)
# 返回value--查询功能
#注意:这里value必须是[],而不是[i]
ret1.append(i) #往列表中追加新元素
#ret1 第一次取值是[77]
#ret1 第二次取值是[77,88]
#ret1 第三次取值是[77,88,99],依次类推--过程重要 不跳步骤,足够耐心
else:
ret2 = dic1.setdefault("k2", []) # 新增:"k1"之前不存在,就新增键值对;“k1”存在,就不操作(不覆盖) 返回value
ret2.append(i) #往列表中追加新元素
print(dic1) #{'k2': [11, 22, 33, 44, 55, 66], 'k1': [77, 88, 99, 90]}
print("---------------1")
# 6、输出商品列表,用户输入序号,显示用户选中的商品(升级题)
#
# 商品列表:
goods = [{"name": "电脑", "price": 1999}, {"name": "鼠标", "price": 10},
{"name": "游艇", "price": 20}, {"name": "美女", "price": 998}, ]
#
# 要求:
# 1:页面显示 序号 + 商品名称 + 商品价格,如:
# 1 电脑 1999
# 2 鼠标 10
# …
# 2:用户输入选择的商品序号,然后打印商品名称及商品价格
# 3:如果用户输入的商品序号有误,则提示输入有误,并重新输入。
# 4:用户输入Q或者q,退出程序。
#6-1 不打印序号
# for i in goods:
# print(i["name"],i["price"])
#6-1 打印序号
for i in range(len(goods)): #i 是0-3
print(i+1,goods[i]["name"],goods[i]["price"])
print("---------------6-1")
#6-2 方法1 --不推荐
# 用户输入选择的商品序号,然后打印商品名称及商品价格
goods = [{"name": "电脑", "price": 1999}, {"name": "鼠标", "price": 10},
{"name": "游艇", "price": 20}, {"name": "美女", "price": 998}, ]
# for i in range(10): #0-9 限定输入10次
# # while 1: #不限定输入次数
# content = int(input("请输入商品编号:")) #输入的是字符串,需要转换成int #这里如果输入的不是字符串数字,就会报错
# #ValueError: invalid literal for int() with base 10: 'ss'
# #商品编号-1 = 索引编号
# if content>0 and content<=len(goods):
# print(goods[content-1]["name"],goods[content-1]["price"])
# else:
# print("没有找到对应的商品,请重新输入")
#6-2 方法2 推荐
# 2:用户输入选择的商品序号,然后打印商品名称及商品价格
# 3:如果用户输入的商品序号有误,则提示输入有误,并重新输入。
# 4:用户输入Q或者q,退出程序
while 1:
content = input("请输入商品编号:") #输入的是字符串
if content.upper() == "Q":
print("退出程序")
break
elif content.isdigit():
content = int(content)
if content>0 and content <=len(goods):
print(goods[content-1]["name"],goods[content-1]["price"])
else:
print("您输入的商品编号不存在,请重新输入")
else:
print("输入有误,请重新输入数字")
|
[
"wangtongpei@meicai.cn"
] |
wangtongpei@meicai.cn
|
8e6ee2d167ba76946304901f9b857a62506894e4
|
0cb08e9532758cbec1afe20eb41028d5f276e82d
|
/gs37(nested serializer )/api/admin.py
|
e3c0a4b8c2a8cde9f8fa47e15ccefb3248d11be7
|
[] |
no_license
|
P-iyushRaj/Django-Rest-Framework
|
1eca586ee6ded4720e8f1845c9c9cac06d637492
|
9d7e754156739118f725ab431d25bdde63ebd91d
|
refs/heads/master
| 2023-03-15T05:27:37.352592
| 2021-03-08T09:27:46
| 2021-03-08T09:27:46
| 345,599,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Singer, Song
@admin.register(Singer)
class SingerAdmin(admin.ModelAdmin):
list_display=['id', 'name', 'gender']
@admin.register(Song)
class SongAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'singer', 'duration']
|
[
"piyush@gmail.com"
] |
piyush@gmail.com
|
f24665f25700be68e4a637774b61b744257416cb
|
37fef592f365194c28579f95abd222cc4e1243ae
|
/streamlit/venv/lib/python3.7/site-packages/plotly/graph_objs/scattermapbox/_line.py
|
24de4166109b0586711d43349e01d8f14fa88d1b
|
[] |
no_license
|
edimaudo/Python-projects
|
be61e0d3fff63fb7bd00513dbf1401e2c1822cfb
|
85d54badf82a0b653587a02e99daf389df62e012
|
refs/heads/master
| 2023-04-07T03:26:23.259959
| 2023-03-24T12:03:03
| 2023-03-24T12:03:03
| 72,611,253
| 4
| 3
| null | 2022-10-31T18:10:41
| 2016-11-02T06:37:17
| null |
UTF-8
|
Python
| false
| false
| 5,567
|
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.line"
_valid_props = {"color", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Line`
color
Sets the line color.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermapbox.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[
"edimaudo@gmail.com"
] |
edimaudo@gmail.com
|
b4c5e3e144d510a232590523f174411f1d2e2336
|
b1c7a768f38e2e987a112da6170f49503b9db05f
|
/userprofile/migrations/0004_profile_date.py
|
d3cd49baf2783abbc91a484f53e46ccd13a7639a
|
[] |
no_license
|
Niladrykar/bracketerp
|
8b7491aa319f60ec3dcb5077258d75b0394db374
|
ca4ee60c2254c6c132a38ce52410059cc6b19cae
|
refs/heads/master
| 2022-12-11T04:23:07.504966
| 2019-03-18T06:58:13
| 2019-03-18T06:58:13
| 176,218,029
| 1
| 0
| null | 2022-12-08T03:01:46
| 2019-03-18T06:27:37
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
# Generated by Django 2.0.6 on 2018-09-19 07:05
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0003_auto_20180915_1222'),
]
operations = [
migrations.AddField(
model_name='profile',
name='Date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"niladry.kar85@gmail.com"
] |
niladry.kar85@gmail.com
|
4a81108d031b334c2ee7e35ff0f338776b47c049
|
9a0e2312236b628007a67c07164ea7b97207e47c
|
/col/apps/logpoint_agent_collector/logpoint_agent_collector.py
|
e1650f10b279daff06f16f5235310f5f6c184d17
|
[] |
no_license
|
laxmi518/network_project
|
d88b9fe73522deaa90c1dbfd22c6861020a6c7be
|
2e998338f3d1142a8098d3dfd35f4c8ad0e4ba00
|
refs/heads/master
| 2020-05-21T15:48:07.830107
| 2018-05-09T18:58:37
| 2018-05-09T18:58:37
| 84,631,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
#!/usr/bin/env python
import logging
from pylib.wiring import gevent_zmq as zmq
from lib import fi_collector
from fi_applications import make_zip
from pylib import conf, wiring, textual
log = logging.getLogger(__name__)
def _parse_args():
options, config = conf.parse_config()
return config
def _prepare_application_directory(config):
make_zip.create_zipped_application_packages(config['basedir'])
def main():
zmq_context = zmq.Context()
config = _parse_args()
#config = textual.utf8(config)
#_prepare_application_directory(config)
fi_out = wiring.Wire('collector_out', zmq_context=zmq_context,
conf_path=config.get('wiring_conf_path') or None)
log.info('LogPoint_agent_collector starting...')
fi_collector.main(config, fi_out)
main()
|
[
"laxmi.jhapa@gmail.com"
] |
laxmi.jhapa@gmail.com
|
86124ffe93f07eff65064bf4b695bf53599a00e4
|
e70bc88ccc01a7616016d085a96f8f8c81ade50c
|
/tests/test_changelog.py
|
b3c324c81487c1c47a64f856f938f5422e291cae
|
[
"BSD-3-Clause"
] |
permissive
|
justcalamari/rever
|
1dc9b51c8338c3fb53c7c7adbb0eac59de2a8305
|
8bea796991a6ed354d45b053064324659b1f2b38
|
refs/heads/master
| 2021-04-29T19:34:57.352326
| 2018-02-28T04:59:59
| 2018-02-28T04:59:59
| 121,580,667
| 0
| 0
| null | 2018-02-15T01:26:15
| 2018-02-15T01:26:14
| null |
UTF-8
|
Python
| false
| false
| 2,976
|
py
|
"""Tests the changelog activity."""
import os
from rever import vcsutils
from rever.logger import current_logger
from rever.main import env_main
REVER_XSH = """
$ACTIVITIES = ['changelog']
$DAG['changelog'].kwargs = {
'filename': 'CHANGELOG.rst',
'ignore': ['TEMPLATE.rst'],
'news': 'nuws',
}
"""
CHANGELOG_RST = """.. current developments
v42.1.0
============
* And some other stuff happeneded.
"""
TEMPLATE_RST = """**Added:** None
**Changed:** None
**Deprecated:** None
**Removed:** None
**Fixed:** None
**Security:** None
"""
N0_RST = """**Added:**
* from n0
**Changed:** None
**Deprecated:** None
**Removed:**
* here
* and here
**Fixed:** None
**Security:** None
"""
N1_RST = """**Added:**
* from n1
**Changed:**
* But what martial arts are they mixing?
**Deprecated:** None
**Removed:**
* There
**Fixed:** None
**Security:** None
"""
CHANGELOG_42_1_1 = """.. current developments
v42.1.1
====================
**Added:**
* from n0
* from n1
**Changed:**
* But what martial arts are they mixing?
**Removed:**
* here
* and here
* There
v42.1.0
============
* And some other stuff happeneded.
"""
def test_changelog(gitrepo):
os.makedirs('nuws', exist_ok=True)
files = [('rever.xsh', REVER_XSH),
('CHANGELOG.rst', CHANGELOG_RST),
('nuws/TEMPLATE.rst', TEMPLATE_RST),
('nuws/n0.rst', N0_RST),
('nuws/n1.rst', N1_RST),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial changelog and news')
env_main(['42.1.1'])
# now see if this worked
newsfiles = os.listdir('nuws')
assert 'TEMPLATE.rst' in newsfiles
assert 'n0.rst' not in newsfiles
assert 'n1.rst' not in newsfiles
with open('CHANGELOG.rst') as f:
cl = f.read()
assert CHANGELOG_42_1_1 == cl
# ensure that the updates were commited
logger = current_logger()
entries = logger.load()
assert entries[-2]['rev'] != entries[-1]['rev']
SETUP_XSH = """
$PROJECT = 'castlehouse'
$ACTIVITIES = ['changelog']
$REVER_DIR = 'rvr'
$CHANGELOG_FILENAME = 'CHANGELOG.rst'
$CHANGELOG_NEWS = 'nuws'
$CHANGELOG_TEMPLATE = 'TEMPLATE.rst'
"""
def test_changelog_setup(gitrepo):
os.makedirs('nuws', exist_ok=True)
files = [('rever.xsh', SETUP_XSH),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial changelog')
env_main(['setup'])
# now see if this worked
newsfiles = os.listdir('nuws')
assert 'TEMPLATE.rst' in newsfiles
basefiles = os.listdir('.')
assert 'CHANGELOG.rst' in basefiles
with open('CHANGELOG.rst') as f:
cl = f.read()
assert 'castlehouse' in cl
assert '.gitignore' in basefiles
with open('.gitignore') as f:
gi = f.read()
assert '\n# Rever\nrvr/\n' in gi
|
[
"scopatz@gmail.com"
] |
scopatz@gmail.com
|
e2f31f47bc3e6ca77641e3545093fb0f071d4f07
|
ee4f74535a92687f51ebc77eee24408d8c925488
|
/blog/models.py
|
9966438cc7b73478e3cdafeec56ccc5a5f2f2eff
|
[] |
no_license
|
sandeepshiven/portfolio_project
|
ae617d2703c072e0e8c7ff1e1bf92de1b963f0e5
|
09e53d2594aed81876be84eea4339a5f2150bfb5
|
refs/heads/master
| 2022-12-01T02:20:39.305145
| 2019-12-19T14:26:19
| 2019-12-19T14:26:19
| 227,981,293
| 0
| 0
| null | 2022-11-22T04:55:01
| 2019-12-14T07:07:26
|
HTML
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=50)
pub_date = models.DateTimeField()
body = models.TextField()
image = models.ImageField(upload_to = 'images/')
def __str__(self):
return self.title
def summary(self):
return self.body[:200]+"................."
|
[
"sandeepshiven0@gmail.com"
] |
sandeepshiven0@gmail.com
|
eb3dd994a59471aa0ad713fb833febeade24fb65
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/Others/snuke/snuke21/a.py
|
68e931ecf644f095aa53460d65c107a576f9d12e
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 325
|
py
|
# -*- coding: utf-8 -*-
def main():
from math import sqrt
n = int(input())
q = 1 + 8 * n
if int(sqrt(q)) ** 2 == q:
if (-1 + int(sqrt(q))) % 2 == 0:
print((-1 + int(sqrt(q))) // 2)
else:
print(-1)
else:
print(-1)
if __name__ == '__main__':
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
b155d7cd688658411d1e3a3aa742052fb2a20181
|
01ac3dadac08b258af33f3e8d2f64229a8d5259a
|
/unit_tests/neb/pys_chk/RIB2PLYP/BOLi_LiBO_FIRE.py
|
820a1bcc1dd2f97c4fbdcadf0531dd269d6c5171
|
[] |
no_license
|
jminuse/clancelot
|
245f468dd87afe4e2d6ba2d518fe7b20754af7ac
|
82b4b3afc742d65aaddfce936f94137b0e4f628c
|
refs/heads/master
| 2021-01-23T14:03:46.811820
| 2016-09-20T19:14:50
| 2016-09-20T19:14:50
| 36,739,780
| 1
| 3
| null | 2016-04-04T14:38:50
| 2015-06-02T14:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
import sys
import files, neb
fptr = 'BOLi_LiBO'
frames = files.read_xyz('/fs/home/hch54/clancelot/unit_tests/neb/xyz/'+fptr+'.xyz')
opt = 'FIRE'
route = '! RI-B2PLYP D3BJ def2-TZVP def2-TZVP/C Grid3 FinalGrid5'
run_name = fptr[:fptr.find('.xyz')] + '_' + opt
neb.neb(run_name, frames, route, opt=opt, maxiter=1000, gtol=0.00183726383934, DFT='orca', alpha=0.1, dt=0.1, mem=40, Nmax=20)
|
[
"hherbol@gmail.com"
] |
hherbol@gmail.com
|
c4cb5e306919ee5c35fed911e857689a646c585b
|
1e9ad304868c2bda918c19eba3d7b122bac3923b
|
/kubernetes/test/test_v1_service_status.py
|
d5f786c2cbf01b2f85e8b1121bed9faa4be4318d
|
[
"Apache-2.0"
] |
permissive
|
pineking/client-python
|
c77e5bd3d476ac852e6dffa96056008baa0f597f
|
74a64d7325518f4298600d4bb300f92843c29347
|
refs/heads/master
| 2021-01-22T22:16:27.368406
| 2017-03-15T08:21:21
| 2017-03-15T08:21:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_service_status import V1ServiceStatus
class TestV1ServiceStatus(unittest.TestCase):
""" V1ServiceStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ServiceStatus(self):
"""
Test V1ServiceStatus
"""
model = kubernetes.client.models.v1_service_status.V1ServiceStatus()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
06f0a4da06e156333a6a8b4a2dbbca61d50e8de5
|
99117515c0410cea29357adfdf71aa628241ece1
|
/posts/tests.py
|
77595ac51b8b58c5fe0042066b95086632b8d949
|
[
"MIT"
] |
permissive
|
kangsLee/9XD
|
e3a774184381a03722f09a4bed86132b67845118
|
f80ea88f85f4cc677c7d1b404101e2ffe8b538ea
|
refs/heads/master
| 2021-04-28T23:42:13.918903
| 2016-12-30T16:16:29
| 2016-12-30T16:16:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
from test_plus.test import TestCase
from posts.factories import PostFactory
class PostsTest(TestCase):
def test_get_list(self):
post = PostFactory()
post_list_url = self.reverse('post:list')
self.get_check_200(post_list_url)
self.assertResponseContains(post.title, html=False)
self.assertResponseContains(post.author.name, html=False)
write_url = self.reverse('post:create')
self.assertResponseContains(write_url, html=False)
def test_get_writing_page_with_login(self):
user = self.make_user('jelly jelly')
with self.login(username=user.username):
write_post_url = self.reverse('post:create')
self.get_check_200(write_post_url)
def test_get_writing_page_with_anonymous(self):
self.assertLoginRequired('post:create')
|
[
"chm073@gmail.com"
] |
chm073@gmail.com
|
9931d68bde183982f725f119901d39bc281b0f83
|
5d36864f5f9f1b737c4718703ee53c3aa715e398
|
/CourseGrading/3.2.11整数列表排序.py
|
1505665fa902de210a4dfcebd676dabf3f5b4234
|
[] |
no_license
|
xzl995/Python
|
d909274c9aba8ae9f18029a5f2069b1bb3418b9a
|
48d4add7a1d46b2e3773bdf096e834852115014d
|
refs/heads/master
| 2020-04-14T11:51:35.407548
| 2019-01-02T10:54:55
| 2019-01-02T10:54:55
| 163,824,702
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
"""
【问题描述】
输入一组整数,从小到大排序后,输出排序结果。
【输入形式】
一行。一组用空格隔开的整数。
【输出形式】
一行。一组用一个空格隔开的整数,从小到大排列。
【样例输入】
9 8 7 6
【样例输出】
6 7 8 9
【提示】
用列表的sort方法。
"""
num = input().split()
list = []
for n in num:
list.append(int(n))
list.sort()
print(" ".join(str(i) for i in list))
|
[
"595696893@qq.com"
] |
595696893@qq.com
|
135ba6b27144192daf557049497729eec9ce5091
|
5bcee9248d0bdebb134c61b4d0a3f3113337a569
|
/IV_term/02_lesson_2704/02_turtle_commands.py
|
34fc4f3dc65273ce01d64bb61b8de0376dda47e6
|
[] |
no_license
|
100ballovby/6V_Lesson
|
c2edbc652ea2ebec07eeed60060c16ae4b4792e4
|
4b6dfda323a628558bd63bd5569960004fc335dd
|
refs/heads/master
| 2023-05-08T07:49:14.569854
| 2021-05-25T06:40:53
| 2021-05-25T06:40:53
| 330,888,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
from turtle import *
tina = Turtle()
tina.shape('turtle')
tina.fd(100) # .fd(step), .forward(step) пойти вперед step шагов
tina.lt(45) # .lt(deg), .left(deg) повернуть влево на deg градусов
tina.bk(120) # .bk(step), .backward(step) пойти назад step шагов
tina.rt(90) # .rt(deg), right(deg) повернуть вправо на deg градусов
tina.up() # не рисовать
tina.fd(150)
tina.down() # рисовать
done()
|
[
"greatraksin@icloud.com"
] |
greatraksin@icloud.com
|
f1ae77ce6c8a31bdb0e7fbe79377aaef1b871b5d
|
b0bb5a3e1bd4efc3bdc035f477c2b14c40b3d62a
|
/blog/templatetags/blog_tags.py
|
f6655ef002989018795274f5d168b3d84b2eb876
|
[] |
no_license
|
yuansuixin/Myblog_Python
|
45c0bd610ab3dbbcdc10b4b4b9e45589f178ce29
|
a6b0c526371def0aab8fd7251517a97ce215168c
|
refs/heads/master
| 2021-09-05T21:02:37.227342
| 2018-01-31T01:31:14
| 2018-01-31T01:31:14
| 114,453,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
from django import template
from ..models import Post,Category,Tag
from django.db.models.aggregates import Count
from blog.models import Category
register = template.Library()
@register.simple_tag
def get_recent_posts(num=5):
return Post.objects.all().order_by('-created_time')[:num]
@register.simple_tag
def archives():
return Post.objects.dates('created_time', 'month', order='DESC')
@register.simple_tag
def get_categories():
# 别忘了在顶部引入 Category 类
# 记得在顶部引入 count 函数
# Count 计算分类下的文章数,其接受的参数为需要计数的模型的名称
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
@register.simple_tag
def get_tags():
# 记得在顶部引入 Tag model
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
|
[
"cyss428@163.com"
] |
cyss428@163.com
|
5e3a365993b2027bd0ca4a439c8b29a1a03cc4e5
|
db54727e37a0928ed698171e862ad274b99a02b1
|
/get_angelist_jobs_by_location.py
|
a8f0282858b2fbff8abaa29e875b1ae5bc664c2a
|
[
"MIT"
] |
permissive
|
tibbetts/sfba-compensation
|
6514a58c004b468c5eb0b204087d5a1d8d168a12
|
89a36391c7dd778b6343e67c2f8d587651b1d7f2
|
refs/heads/master
| 2020-04-05T23:46:12.264717
| 2016-06-01T05:09:35
| 2016-06-01T05:09:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
import urllib2
import json
import datetime
import csv
import time
#https://api.angel.co/1/tags/151282/jobs?access_token=xxx
access_token = "<FILL IN>" # DO NOT SHARE WITH ANYONE!
location_tag = 151282 # San Francisco Bay Area
# location_tag = 1664 # New York City
def request_until_succeed(url):
req = urllib2.Request(url)
success = False
while success is False:
try:
response = urllib2.urlopen(req)
if response.getcode() == 200:
success = True
except Exception, e:
print e
time.sleep(5)
print "Error for URL %s: %s" % (url, datetime.datetime.now())
return response.read()
# Needed to write tricky unicode correctly to csv; not present in tutorial
def unicode_normalize(text):
return text.translate({ 0x2018:0x27, 0x2019:0x27, 0x201C:0x22, 0x201D:0x22, 0xa0:0x20 }).encode('utf-8')
def getAngelListPageFeedData(location_tag, access_token, page_num):
# construct the URL string
url = "https://api.angel.co/1/tags/%s/jobs?access_token=%s&page=%s" % (location_tag, access_token, page_num)
# retrieve data
data = json.loads(request_until_succeed(url))
return data
def processAngelListPageFeedStatus(job):
# The status is now a Python dictionary, so for top-level items,
# we can simply call the key.
# Additionally, some items may not always exist,
# so must check for existence first
job_id = job['id']
job_title = '' if 'title' not in job.keys() else unicode_normalize(job['title'])
job_type = '' if 'job_type' not in job.keys() else unicode_normalize(job['job_type'])
job_city = [unicode_normalize(tag['display_name']) for tag in job['tags'] if tag['tag_type'] == 'LocationTag'][0].decode('utf-8')
salary_min = '' if 'salary_min' not in job.keys() else job['salary_min']
salary_max = '' if 'salary_max' not in job.keys() else job['salary_max']
equity_cliff = '' if 'equity_cliff' not in job.keys() else job['equity_cliff']
equity_vest = '' if 'equity_vest' not in job.keys() else job['equity_vest']
equity_min = '' if 'equity_min' not in job.keys() else job['equity_min']
equity_max = '' if 'equity_max' not in job.keys() else job['equity_max']
roles = ', '.join([unicode_normalize(tag['display_name']) for tag in job['tags'] if tag['tag_type'] == 'RoleTag']).decode('utf-8')
skills = ', '.join([unicode_normalize(tag['display_name']) for tag in job['tags'] if tag['tag_type'] == 'SkillTag']).decode('utf-8')
# Time needs special care since a) it's in UTC and
# b) it's not easy to use in statistical programs.
updated_at = datetime.datetime.strptime(job['updated_at'],'%Y-%m-%dT%H:%M:%SZ')
updated_at = updated_at + datetime.timedelta(hours=-8) # PST
updated_at = updated_at.strftime('%Y-%m-%d %H:%M:%S') # best time format for spreadsheet programs
# return a tuple of all processed data
return (job_id, job_title, job_type, job_city, salary_min, salary_max, equity_cliff,
equity_vest, equity_min, equity_max, roles, skills, updated_at)
def scrapeAngelListPageFeedStatus(location_tag, access_token):
with open('%s_angelist_jobs.csv' % location_tag, 'wb') as file:
w = csv.writer(file)
w.writerow(['job_id', 'job_title', 'job_type', 'job_city', 'salary_min', 'salary_max', 'equity_cliff', 'equity_vest', 'equity_min', 'equity_max', 'roles', 'skills', 'updated_at'])
has_next_page = True
page = 1
num_processed = 0 # keep a count on how many we've processed
scrape_starttime = datetime.datetime.now()
print "Scraping %s AngelList Page: %s\n" % (location_tag, scrape_starttime)
while has_next_page:
data = getAngelListPageFeedData(location_tag, access_token, page)
for job in data['jobs']:
w.writerow(processAngelListPageFeedStatus(job))
# output progress occasionally to make sure code is not stalling
num_processed += 1
if num_processed % 100 == 0:
print "%s Jobs Processed: %s" % (num_processed, datetime.datetime.now())
# if there is no next page, we're done.
if data['last_page'] == page:
has_next_page = False
else:
page += 1
#print "\nDone!\n%s Jobs Processed in %s" % (num_processed, datetime.datetime.now() - scrape_starttime)
if __name__ == '__main__':
scrapeAngelListPageFeedStatus(location_tag, access_token)
# The CSV can be opened in all major statistical programs. Have fun! :)
|
[
"max@minimaxir.com"
] |
max@minimaxir.com
|
9734b8d821852dd069ae730eb6e5581f79278c28
|
3bdcb60b0bffeeb6ff7b0ddca4792b682158bb12
|
/Sentencias Condicionales/4.1.8-90.py
|
c0d1ba9b3e6d0df836a98fc9e064ff10920a2037
|
[] |
no_license
|
FrankCasanova/Python
|
03c811801ec8ecd5ace66914f984a94f12befe06
|
03f15100991724a49437df3ce704837812173fc5
|
refs/heads/master
| 2023-05-23T01:37:12.632204
| 2021-06-10T15:20:38
| 2021-06-10T15:20:38
| 278,167,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
#CORRECCIÓN DE PROGRAMA.
from math import pi
radio = float(input('Dame el radio de un circulo: '))
#menú
print('Escoge una opción: ')
print('a) Calcular el diámetro: ')
print('b) Calcular el perímetro: ')
print('c) Calcular el área: ')
opción = input('Teclea a, b o c y pulsa intro: ')
if opción.lower() == 'a':
diámetro = 2*pi
print('El diámetro es {0}'.format(diámetro))
else:
if opción.lower() == 'b':
perímetro = 2*pi*radio
print('El perímetro es {0}'.format(perímetro))
else:
if opción.lower() == 'c':
área = pi*radio**2
print('El área es {0}'.format(área))
else:
print('Solo hay 3 opciónes: a, b o c.')
print('Tú has tecleado {0}'.format(opción))
|
[
"frankcasanova.info@gmail.comm"
] |
frankcasanova.info@gmail.comm
|
ea92fcc7c3a19e5cc1c4276c6e75738bd12b4d5b
|
9d58364bd43fbb4edf60fba3425e9dc006097e22
|
/product/serializers.py
|
e6571d2151ef723f8b3eab7b9cfa5344cf3c8373
|
[] |
no_license
|
darkblank/commerce-toy
|
a92ee2634777fa9997cb52c57f2fd8a68877677c
|
be8b4b7633204c83615853835c3772369e37766a
|
refs/heads/master
| 2022-12-03T14:18:54.966662
| 2020-08-12T22:29:49
| 2020-08-12T22:29:49
| 287,127,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
from rest_framework import serializers
from product.models import Product, ProductOption
from user.serializers import ProviderSerializer
class ProductOptionSerializer(serializers.ModelSerializer):
class Meta:
model = ProductOption
fields = (
'id', 'stock', 'name',
)
class ProductSerializer(serializers.ModelSerializer):
provider = ProviderSerializer()
options = ProductOptionSerializer(many=True)
class Meta:
model = Product
fields = (
'id', 'name', 'price', 'shipping_price',
'is_on_sale', 'can_bundle', 'created_at', 'updated_at', 'provider', 'options',
)
class ProductWithoutOptionsSerializer(ProductSerializer):
class Meta:
model = Product
fields = (
'id', 'name', 'price', 'shipping_price',
'is_on_sale', 'can_bundle', 'created_at', 'updated_at', 'provider',
)
class ProductOptionWithProductSerializer(ProductOptionSerializer):
product = ProductWithoutOptionsSerializer()
class Meta:
model = ProductOption
fields = (
'id', 'stock', 'name', 'product',
)
|
[
"darkblank1990@gmail.com"
] |
darkblank1990@gmail.com
|
c37f6418fac1b3ca989225a2b5d667bc7a56df58
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/PostenMapping/Model/Post060352270.py
|
1361d8f30b13daab25781738ad6daac5b6a201ce
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,846
|
py
|
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060352270(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.52270',
beschrijving='Bestrating van gebakken straatstenen, standaardkwaliteitsklasse B volgens 6-3.6, dikformaat, hoogte ca. 70 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen.formaatVanBestratingselement',
dotnotation='formaatVanBestratingselement',
defaultWaarde='dikformaat-(ca.-200-x-ca.-65-mm)',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52270')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52270')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen.standaardkwaliteitsklasse',
dotnotation='standaardkwaliteitsklasse',
defaultWaarde='b',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52270')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='7',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52270')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanGebakkenStraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.52270')])
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
9e5d5b8517e58b290fe195b2aab1e2d9938074d6
|
aced407b41f6669f69e9eb8bd599260d50c0bd3f
|
/server/libs/top/api/rest/TaobaokeMobileListurlGetRequest.py
|
b09b1da208bf08fea60e8912f9988fb2a936ad96
|
[] |
no_license
|
alswl/music_sofa
|
42f7d15431f11b97bf67b604cfde0a0e9e3860cc
|
c4e5425ef6c80c3e57c91ba568f7cbfe63faa378
|
refs/heads/master
| 2016-09-12T18:37:34.357510
| 2016-05-20T11:49:52
| 2016-05-20T11:49:52
| 58,946,171
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
'''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TaobaokeMobileListurlGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.q = None
def getapiname(self):
return 'taobao.taobaoke.mobile.listurl.get'
|
[
"alswlx@gmail.com"
] |
alswlx@gmail.com
|
c9364b17c64e0b99950c44eaf940ac5a5081aea4
|
b1403c5a0f8dcf5eec881367f3928c6faf047b1d
|
/PRINCIPIANTE/1051-1100/1098.py
|
d3a71591410aff0c977f88c66d582bdb8e28a005
|
[] |
no_license
|
apesquero/URI
|
ba56f9d597e0e781bf85dc14eeeedf9bc206fbc4
|
8c45813d99eb903405ebe1a0e2c6618e87025641
|
refs/heads/master
| 2021-06-19T23:56:51.798180
| 2021-01-01T18:43:20
| 2021-01-01T18:43:20
| 138,192,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
# -*- coding: utf-8 -*-
i = 0
while i <= 2:
for x in range(3):
print("I={:g} J={:g}" .format(i, x+1+i))
i += 0.2
|
[
"apesquero@gmail.com"
] |
apesquero@gmail.com
|
865ae8442eadb66eb5b33a7c998542e89eb501ce
|
c1aac38b1ee7bf7b8621050bd4837a60744bfd9f
|
/ay_hw_4/main_test.py
|
2b487bd9621a3f4e1763f4d5ad1fd4560b536838
|
[] |
no_license
|
callmeorangecat/INF_552
|
3b0a007a37963fcd57396dab96d3f17ee20b0eb6
|
cdcaf20e549bfa2d5942f91f2ce3b4a93d1beba9
|
refs/heads/master
| 2021-02-17T15:04:32.222974
| 2020-02-21T20:31:54
| 2020-02-21T20:31:54
| 245,105,595
| 0
| 1
| null | 2020-03-05T08:16:29
| 2020-03-05T08:16:28
| null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
#
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '9/27/2019 11:13 PM'
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
x = np.random.uniform(-3, 3, size=100)
X = x.reshape(-1, 1)
# assume the relationship between X and Y is linear y = 0.5X + 3 + ε
y = 3 + 0.5 * x + np.random.normal(0, 0.5, 100)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2333)
print(X_train.shape)
# Since the formula of MSE is 1 /n * (∑((y_hat - y_true)^2)) and RSS is ∑((y_hat - y_true)^2)
# so MSE = 1 / n * (RSS), we can use MSE to evaluate our algorithm for convenience
# get MSE(RSS) fpr the linear regression
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
lin_p1 = lin_reg.predict(X_train)
lin_p2 = lin_reg.predict(X_test)
# get MSE(RSS) for the cubic regression
cubic_reg = Pipeline([("polynomial_features", PolynomialFeatures(degree=3)),
("linear_regression", LinearRegression())])
cubic_reg.fit(X_train, y_train)
cub_p1 = cubic_reg.predict(X_train)
cub_p2 = cubic_reg.predict(X_test)
# plot figures
plt.scatter(x, y)
plt.plot(X_train.tolist(), lin_p1, color='r', label="linear Train")
plt.scatter(X_train.tolist(), cub_p1.tolist(), color='g', label="cubic Train")
# plt.plot(X_test.tolist(), y_p1, color='r', label="linear Test")
# plt.plot(X_test.tolist(), y_p2, color='g', label="cubic Test")
plt.legend()
print("MSE for linear regression: ", mean_squared_error(y_train, lin_p1))
print("MSE for cubic regression: ", mean_squared_error(y_train, cub_p1))
print("------------------")
print("MSE for linear regression: ", mean_squared_error(y_test, lin_p2))
print("MSE for cubic regression: ", mean_squared_error(y_test, cub_p2))
plt.show()
|
[
"aaron19940628@gmail.com"
] |
aaron19940628@gmail.com
|
e58bbef1e15f9f5ad7ab00b1d2269ab475394474
|
726d164e2dcf0c13beacf5ac23db1f76e34645f1
|
/src/scripts/mcce/count_conf.py
|
a84c651e305eb12a71987d33c9e42616df9aab6f
|
[] |
no_license
|
zxiaoyao/br_pscript
|
57bde5940d0d18704052f705c2025e4ba73e1b29
|
a3023b33a4332d0d77389f1d64c5af1b19a80a95
|
refs/heads/master
| 2020-06-01T13:39:39.192776
| 2015-06-09T04:26:39
| 2015-06-09T04:26:39
| 16,268,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
#!/usr/bin/python
def count_conf():
'''Count the number of conformers in step2_out.pdb."
Assuming the atoms in a conformer are in consecutive lines in step2_out.pdb.
'''
atoms = open("step2_out.pdb").readlines()
for line in atoms:
if line[:6] != 'ATOM ' and line[:6] != 'HETATM': continue
if line[27:30] == '000': continue
confid_old = line[21:30]
break
count = 0
for line in atoms:
if line[:6] != 'ATOM ' and line[:6] != 'HETATM': continue
if line[27:30] == '000': continue
confid_new = line[21:30]
if confid_new == confid_old:
continue
else:
count = count + 1
confid_old = confid_new
count = count + 1
return count
if __name__ == "__main__":
counter = count_conf()
print 'there are ', counter, 'confermers'
|
[
"zhuxuyu@gmail.com"
] |
zhuxuyu@gmail.com
|
59f73e7d1222effb04a6343a2dd82ffeabb56260
|
3b2971ac117cf597e60233afffbf088989d7abdb
|
/tests/settings.py
|
6f66deb4674338780d11af86a8f3faa9ca1eabd7
|
[
"BSD-3-Clause"
] |
permissive
|
ubernostrum/pwned-passwords-django
|
7b8e27ce774e1d698eef382a67664301be61bdbd
|
c615c2b80eab2f6554b6f113e99c658dd722c5cd
|
refs/heads/trunk
| 2023-04-10T05:33:44.797687
| 2023-03-27T05:25:45
| 2023-03-27T05:25:45
| 124,067,069
| 116
| 8
|
BSD-3-Clause
| 2023-09-11T06:26:23
| 2018-03-06T11:15:11
|
Python
|
UTF-8
|
Python
| false
| false
| 722
|
py
|
"""
Minimal Django settings file for test runs.
"""
from django.utils.crypto import get_random_string
INSTALLED_APPS = ["pwned_passwords_django"]
ROOT_URLCONF = "tests.urls"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
MIDDLEWARE = [
"pwned_passwords_django.middleware.pwned_passwords_middleware",
]
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "pwned_passwords_django.validators.PwnedPasswordsValidator"}
]
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"handlers": {"null": {"class": "logging.NullHandler"}},
"loggers": {
"pwned_passwords_django.api": {"handlers": ["null"], "propagate": False}
},
}
SECRET_KEY = get_random_string(12)
|
[
"james@b-list.org"
] |
james@b-list.org
|
9fc65a9e22b154df1f46b9485b329b31a238ff3b
|
96f32051b1fcd322534fdf1f590704b7f48e08e3
|
/otx_epub/migrations/0003_auto_20161027_2317.py
|
22756320fbeb9f4e40103f15ce8d9ebe93f84e72
|
[
"Apache-2.0"
] |
permissive
|
NYULibraries/dlts-enm-tct-backend
|
0f1ebad7d7334d41b4f24b2243639314c6ed1353
|
07455a660fb2cb8bc91a54f7f12d150923678157
|
refs/heads/master
| 2020-05-18T14:36:31.041830
| 2017-09-30T17:51:16
| 2017-09-30T17:51:16
| 84,251,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-28 03:17
from __future__ import unicode_literals
from django.db import migrations, models
import otx_epub.storage
class Migration(migrations.Migration):
dependencies = [
('otx_epub', '0002_epub_contents'),
]
operations = [
migrations.AlterField(
model_name='epub',
name='contents',
field=models.FilePathField(allow_files=False, allow_folders=True, path='/vagrant/nyu/media/epub_decompressed', recursive=True),
),
migrations.AlterField(
model_name='epub',
name='manifest',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='epub',
name='oebps_folder',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='epub',
name='source',
field=models.FileField(storage=otx_epub.storage.OverwriteStorage(), upload_to='epubs'),
),
]
|
[
"go.for.dover@gmail.com"
] |
go.for.dover@gmail.com
|
9c9273b0557cfea35937de7e9f7ed2c0f11e495f
|
09f8a3825c5109a6cec94ae34ea17d9ace66f381
|
/cohesity_management_sdk/models/view_intent.py
|
e28eef393c010d4f1cd356037269faaa14fa16a2
|
[
"Apache-2.0"
] |
permissive
|
cohesity/management-sdk-python
|
103ee07b2f047da69d7b1edfae39d218295d1747
|
e4973dfeb836266904d0369ea845513c7acf261e
|
refs/heads/master
| 2023-08-04T06:30:37.551358
| 2023-07-19T12:02:12
| 2023-07-19T12:02:12
| 134,367,879
| 24
| 20
|
Apache-2.0
| 2023-08-31T04:37:28
| 2018-05-22T06:04:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
class ViewIntent(object):
"""Implementation of the 'ViewIntent' model.
Specifies the Intent of the View.
Attributes:
template_id (long|int): Specifies the template Id from which the View
is created.
template_name (string): Specifies the template name from which the View
is created.
"""
# Create a mapping from Model property names to API property names
_names = {
"template_id":'TemplateId',
"template_name":'TemplateName',
}
def __init__(self,
template_id=None,
template_name=None,
):
"""Constructor for the ViewIntent class"""
# Initialize members of the class
self.template_id = template_id
self.template_name = template_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
template_id = dictionary.get('TemplateId')
template_name = dictionary.get('TemplateName')
# Return an object of this model
return cls(
template_id,
template_name
)
|
[
"naveena.maplelabs@cohesity.com"
] |
naveena.maplelabs@cohesity.com
|
ea98fab7c614bb2763e13a4a538ea3249fe899bf
|
afc3558e47ea4c82cb70190743472274eae7aeb1
|
/configs/textrecog/master/master_resnet31_12e_toy.py
|
adf14636518c6aa154da397106d2aae8b008226c
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmocr
|
86a77fb77ca80cede9c41a9a22080eeeaf364002
|
9551af6e5a2482e72a2af1e3b8597fd54b999d69
|
refs/heads/main
| 2023-08-03T14:06:11.075037
| 2023-07-26T02:32:14
| 2023-07-26T02:32:14
| 355,559,187
| 3,734
| 801
|
Apache-2.0
| 2023-09-12T03:17:12
| 2021-04-07T13:40:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
_base_ = [
'_base_master_resnet31.py',
'../_base_/datasets/toy_data.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_adam_base.py',
]
optim_wrapper = dict(optimizer=dict(lr=4e-4))
train_cfg = dict(max_epochs=12)
# learning policy
param_scheduler = [
dict(type='LinearLR', end=100, by_epoch=False),
dict(type='MultiStepLR', milestones=[11], end=12),
]
# dataset settings
train_list = [_base_.toy_rec_train]
test_list = [_base_.toy_rec_test]
train_dataset = dict(
type='ConcatDataset', datasets=train_list, pipeline=_base_.train_pipeline)
test_dataset = dict(
type='ConcatDataset', datasets=test_list, pipeline=_base_.test_pipeline)
train_dataloader = dict(
batch_size=2,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=train_dataset)
val_dataloader = dict(
batch_size=2,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=test_dataset)
test_dataloader = val_dataloader
val_evaluator = dict(dataset_prefixes=['Toy'])
test_evaluator = val_evaluator
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
21df06ff4d70b2fe784d4a83c9a4ea9391b1f1a8
|
71297da3cf9e0cc5e2e3eb29477ed77150bf93fa
|
/baekjoon/10984.py
|
0e27694252233b2a2f65c64c0406100c715057e8
|
[] |
no_license
|
Kangjinwoojwk/algorithm
|
7c8e4189c384f7104225284e9e0a2e2a5cdd18df
|
c92da7410523d340240bbe9256cecf710c4e641e
|
refs/heads/master
| 2022-11-15T19:11:33.918644
| 2022-11-10T03:13:50
| 2022-11-10T03:13:50
| 184,699,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
for tc in range(int(input())):
A, G = 0, 0
for i in range(int(input())):
a, b = map(float, input().split())
A += a
G += a * b
G /= A
print('{} {}'.format(int(A), round(G, 1)))
|
[
"kjw03230@naver.com"
] |
kjw03230@naver.com
|
56f43f8aff84049b1d9ce85fb3c2d3fb0e5bc6ab
|
0431fb263e38422585edca273fb47ef92fd22243
|
/dataloaders/data_atari.py
|
50f23b17a6e6faf07dfeb81276ef50ed9522166d
|
[] |
no_license
|
RRoundTable/EEN-with-Keras
|
a6c3352eafc05fcb7ed41463d637a684de9a0b27
|
ae71903afa05135f5eb6e2797854969f5a082958
|
refs/heads/master
| 2020-04-28T03:17:25.762629
| 2019-04-27T12:19:13
| 2019-04-27T12:19:13
| 174,930,756
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,310
|
py
|
import numpy, os, random, glob, pdb, gc
import pickle as pickle
from scipy import misc
#import torch, torchvision
from tensorflow.python.keras.backend import *
import copy, time
class ImageLoader(object):
def _load_set(self, split, n_episodes):
datalist = []
datapath = '{}/{}'.format(self.arg.get("datapath"), split)
flist = os.listdir(datapath)
print('loading {} new episodes for {} set'.format(n_episodes, split))
for i in range(1, n_episodes):
if split == 'train':
fdname = random.choice(flist)
else:
fdname = flist[i]
abs_fdname = '{}/{}'.format(datapath, fdname)
episode = None
while (episode == None):
# print('loading {}'.format(abs_fdname))
try:
episode = numpy.load(abs_fdname)
except:
print('problem loading {}'.format(abs_fdname))
break
states = episode['states']
actions = episode['actions']
assert(len(states) == len(actions))
assert(len(states) > 0)
datalist.append({'states': states, 'actions': actions})
episode.close()
gc.collect()
return datalist
def __init__(self, arg):
super(ImageLoader, self).__init__()
self.arg = arg
self.datalist = []
self.h = arg.get('height')
self.w = arg.get('width')
self.nc = arg.get('nc')
self.ncond = arg.get('ncond', 4)
self.npred = arg.get('npred', 4)
self.datalist_train = self._load_set('train', 500)
self.datalist_valid = self._load_set('valid', 200)
self.datalist_test = self._load_set('test', 200)
self.iter_video_ptr = 0
self.iter_sample_ptr = self.ncond
self.train_batch_cntr = 0
print("Dataloader for Atari constructed done")
def reset_ptrs(self):
self.iter_video_ptr = 0
self.iter_sample_ptr = self.ncond
def _sample_time(self, video, actions, num_cond, num_pred):
start_pos = random.randint(num_cond+1, video.shape[0]-num_pred-2)
cond_frames = video[start_pos-num_cond:start_pos]
pred_frames = video[start_pos:start_pos+num_pred]
actions = actions[start_pos:start_pos+num_pred]
return cond_frames, pred_frames, actions
def _iterate_time(self, video, start_pos, actions, num_cond, num_pred):
cond_frames = video[start_pos-num_cond:start_pos]
pred_frames = video[start_pos:start_pos+num_pred]
actions = actions[start_pos:start_pos+num_pred]
return cond_frames, pred_frames, actions
def get_batch(self, split):
cond_frames, pred_frames, actions = [], [], []
if split == 'train':
# since the training set is large, we fetch new episodes every so often
if self.train_batch_cntr == 1000:
self.datalist_train = self._load_set('train', 500)
self.train_batch_cntr = 0
else:
self.train_batch_cntr += 1
this_set = self.datalist_train
elif split == 'valid':
this_set = self.datalist_valid
elif split == 'test':
this_set = self.datalist_test
# rolling
id = 1
while id <= self.arg.get("batchsize"):
sample = random.choice(this_set)
sample_video = sample.get('states')
sample_actions = sample.get('actions')
if len(sample_actions) > self.ncond + self.npred + 2:
selected_cond_frames, selected_pred_frames, selected_actions = self._sample_time(
sample_video, sample_actions, self.ncond, self.npred)
assert(len(selected_actions) > 0)
cond_frames.append(selected_cond_frames)
pred_frames.append(selected_pred_frames)
actions.append(selected_actions)
id += 1
# processing on the numpy array level
cond_frames = numpy.array(cond_frames, dtype='float') / 255.0
pred_frames = numpy.array(pred_frames, dtype='float') / 255.0
actions = numpy.array(actions, dtype='float')
# return tensor
cond_frames_ts = torch.from_numpy(cond_frames).float()
pred_frames_ts = torch.from_numpy(pred_frames).float()
actions_ts = torch.from_numpy(actions.squeeze()).float()
return cond_frames_ts.cuda(), pred_frames_ts.cuda(), actions_ts.cuda()
def get_paired_batch(self, split):
# hardcoded
assert(self.npred == 2)
assert(self.ncond == 4)
cond, target, action = self.get_batch(split)
cond1 = cond
target1 = target[:, 0].unsqueeze(1)
action1 = action[:, 0]
cond2 = torch.cat((cond[:, 1:], target1), 1)
target2 = target[:, 1].unsqueeze(1)
action2 = action[:, 1]
return cond1.clone(), target1.clone(), action1.clone(), cond2.clone(), target2.clone(), action2.clone()
def get_iterated_batch(self, split):
if split == 'train':
this_set = self.datalist_train
elif split == 'valid':
this_set = self.datalist_valid
elif split == 'test':
this_set = self.datalist_test
cond_frames, pred_frames, actions = [], [], []
# rolling
id = 1
while id <= self.arg.get("batchsize"):
if self.iter_video_ptr == len(this_set):
return None, None, None
sample = this_set[self.iter_video_ptr]
sample_video = sample.get('states')
sample_actions = sample.get('actions')
if self.iter_sample_ptr + self.npred > sample_video.shape[0]:
self.iter_video_ptr += 1
self.iter_sample_ptr = self.ncond
else:
selected_cond_frames, selected_pred_frames, selected_actions = self._iterate_time(
sample_video, self.iter_sample_ptr, sample_actions, self.ncond, self.npred)
assert(len(selected_actions) > 0)
cond_frames.append(selected_cond_frames)
pred_frames.append(selected_pred_frames)
actions.append(selected_actions)
id += 1
self.iter_sample_ptr += 10
# processing on the numpy array level
cond_frames = numpy.array(cond_frames, dtype='float') / 255.0
pred_frames = numpy.array(pred_frames, dtype='float') / 255.0
actions = numpy.array(actions, dtype='float')
# return tensor
cond_frames_ts = torch.from_numpy(cond_frames).float()
pred_frames_ts = torch.from_numpy(pred_frames).float()
actions_ts = torch.from_numpy(actions.squeeze()).float()
return cond_frames_ts.cuda(), pred_frames_ts.cuda(), actions_ts.cuda()
def plot_seq(self, cond, pred):
cond_pred = torch.cat((cond, pred), 1)
cond_pred = cond_pred.view(-1, self.nc, self.h, self.w)
grid = torchvision.utils.make_grid(cond_pred, self.ncond+self.npred, pad_value=1, normalize=True)
return grid
|
[
"ryu071511@gmail.com"
] |
ryu071511@gmail.com
|
262b31f426537ac6065fa5fbe714726118f635a2
|
18940c73497d11c1386a19a2e90719356f8ed9b2
|
/python_solutions/90-subsets-2.py
|
aaa5aa73c476b0fb0652047257cab595e27aa005
|
[] |
no_license
|
yunieyuna/Solutions-for-Leetcode-Problems
|
9741ba1a48341157fded637bb6e269fd9a659b5a
|
47910878328591844ab2c9b97ff8c89e2b98c319
|
refs/heads/master
| 2020-09-13T17:37:25.632463
| 2020-07-02T02:07:19
| 2020-07-02T02:07:19
| 222,857,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
# https://leetcode.com/problems/subsets-ii/
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
dic = {}
for i in nums:
dic[i] = dic.get(i, 0) + 1
res = [[]]
for i, v in dic.items():
temp = res.copy()
for j in res:
temp.extend(j + [i]*(k + 1) for k in range(v))
res = temp
return res
"""
Runtime: 44 ms, faster than 24.50% of Python3 online submissions for Subsets II.
Memory Usage: 12.5 MB, less than 100.00% of Python3 online submissions for Subsets II.
"""
|
[
"noreply@github.com"
] |
yunieyuna.noreply@github.com
|
419827681f3f82c9700097168df022ee151b4639
|
2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b
|
/33.搜索旋转排序数组.py
|
e67b6bd61499c8507cb2cb4f53db5ccbe9e2e868
|
[] |
no_license
|
mqinbin/python_leetcode
|
77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3
|
73e0c81867f38fdf4051d8f58d0d3dc245be081e
|
refs/heads/main
| 2023-03-10T18:27:36.421262
| 2021-02-25T07:24:10
| 2021-02-25T07:24:10
| 314,410,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
#
# @lc app=leetcode.cn id=33 lang=python3
#
# [33] 搜索旋转排序数组
#
# @lc code=start
class Solution:
def search(self, nums: List[int], target: int) -> int:
# if len(nums) == 1 :
# return 0 if target == nums[0] else -1
left , right = 0 , len(nums)
while left < right:
rotatePos = (left + right) // 2
if rotatePos == len(nums) - 1:
break
if nums[rotatePos]>nums[rotatePos+1]:
rotatePos += 1
break
if nums[rotatePos]>nums[left]:
left = rotatePos + 1
else:
right = rotatePos
def foundIndex(nums, left, right,target):
while left < right:
mid = (left + right) // 2
if nums[mid] == target:
return mid
if target>nums[mid]:
left = mid + 1
else:
right = mid
return -1
leftPart = foundIndex(nums,0,rotatePos,target)
if leftPart != -1:
return leftPart
rightPart = foundIndex(nums,rotatePos,len(nums),target)
return rightPart
# @lc code=end
|
[
"mqinbin@gmail.com"
] |
mqinbin@gmail.com
|
eec69491952dc2206c921236c774340265028549
|
cf7d6cc5efd2d3545a538c1bf12e927a60b8600e
|
/iniciante/1478.py
|
2fbb42d314bb183909e20c8d6e3ab7903c9700a9
|
[] |
no_license
|
DarknessRdg/URI
|
1a4d821cc43c715f5092076a1753b13f42f3ceac
|
459361f9e055efb6eb291fb030dde47cb08688ab
|
refs/heads/master
| 2022-05-08T06:46:44.618627
| 2022-04-24T03:46:43
| 2022-04-24T03:46:43
| 200,585,901
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
def main():
ordem = int(input())
while ordem != 0:
matriz = []
for i in range(1, ordem + 1):
matriz += [[i] * ordem]
for i in range(ordem):
for j in range(ordem):
if i == j:
matriz[i][j] = 1
if i < j:
matriz[i][j] = matriz[i][j - 1] + 1
if i > j:
if j == 0:
continue
else:
matriz[i][j] = matriz[i][j - 1] - 1
for i in range(ordem):
for j in range(ordem):
matriz[i][j] = str(matriz[i][j])
tamanho = len(matriz[i][j])
if j == 0:
if tamanho == 1:
print(' ' + matriz[i][j], end='')
elif tamanho == 2:
print(' ' + matriz[i][j], end='')
else:
print(matriz[i][j], end='')
else:
if tamanho == 1:
print(' ' + matriz[i][j], end='')
elif tamanho == 2:
print(' ' + matriz[i][j], end='')
else:
print(' ' + matriz[i][j], end='')
print()
print()
ordem = int(input())
if __name__ == '__main__':
main()
|
[
"luanrodrigues007@hotmail.com"
] |
luanrodrigues007@hotmail.com
|
44877423e0a7b2fbd5c6d3490744498281831dd6
|
14373275670c1f3065ce9ae195df142146e2c1a4
|
/stubs/influxdb-client/influxdb_client/domain/range_threshold.pyi
|
bc6b3aa97fdc891511d30c007c3c8dd1a4f27782
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
sobolevn/typeshed
|
eb7af17c06a9722f23c337e6b9a4726223155d58
|
d63a82640390a9c130e0fe7d409e8b0b836b7c31
|
refs/heads/master
| 2023-08-04T05:59:29.447015
| 2023-06-14T21:27:53
| 2023-06-14T21:27:53
| 216,265,622
| 2
| 0
|
Apache-2.0
| 2022-02-08T10:40:53
| 2019-10-19T20:21:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,004
|
pyi
|
from _typeshed import Incomplete
from influxdb_client.domain.threshold_base import ThresholdBase
class RangeThreshold(ThresholdBase):
openapi_types: Incomplete
attribute_map: Incomplete
discriminator: Incomplete
def __init__(
self,
type: str = "range",
min: Incomplete | None = None,
max: Incomplete | None = None,
within: Incomplete | None = None,
level: Incomplete | None = None,
all_values: Incomplete | None = None,
) -> None: ...
@property
def type(self): ...
@type.setter
def type(self, type) -> None: ...
@property
def min(self): ...
@min.setter
def min(self, min) -> None: ...
@property
def max(self): ...
@max.setter
def max(self, max) -> None: ...
@property
def within(self): ...
@within.setter
def within(self, within) -> None: ...
def to_dict(self): ...
def to_str(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
|
[
"noreply@github.com"
] |
sobolevn.noreply@github.com
|
82ef0c669736d69b2da60bea4943dc6b93cf54f9
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_luaus.py
|
db10b77fa904fbdc865fafa175f9a8538ee5ee10
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from xai.brain.wordbase.nouns._luau import _LUAU
#calss header
class _LUAUS(_LUAU, ):
def __init__(self,):
_LUAU.__init__(self)
self.name = "LUAUS"
self.specie = 'nouns'
self.basic = "luau"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5c0db854454cabafcab66f84b62cc0ae6df4a7a6
|
4c7a9d9af1f5a68e718fd626281b979590f7a434
|
/v6/combine_filter.py
|
54dd64244b4450e2414f2e8e536e9122c58768c3
|
[] |
no_license
|
bcrafton/bp-analysis
|
59f077f09d4c10445b2f1e2889842a681de44129
|
9290df4bc8ae6a527586328eee0ad86fd370989e
|
refs/heads/master
| 2020-05-31T12:27:23.673875
| 2019-07-02T15:46:58
| 2019-07-02T15:46:58
| 190,281,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import numpy as np
def combine_filter(f1, f2, stride=1):
h1, w1, fin1, fout1 = np.shape(f1)
h2, w2, fin2, fout2 = np.shape(f2)
oh = h1 + 2*(h2 // 2) * stride
ow = w1 + 2*(w2 // 2) * stride
ofin = fin1
ofout = fout2
fout = np.zeros(shape=(oh, ow, ofin, ofout))
for x in range(h2):
for y in range(w2):
for c in range(fout2):
sh = x * stride ; eh = x * stride + h1
sw = y * stride ; ew = y * stride + w1
fout[sh:eh, sw:ew, :, c] += np.sum(f2[x, y, :, c] * f1, axis=3)
return fout
|
[
"crafton.b@husky.neu.edu"
] |
crafton.b@husky.neu.edu
|
6b124e1471d862488e655f3811a9cdfd0dc281cb
|
e4aab0a71dc5c047d8b1576380b16364e03e7c0d
|
/post scripts/deluge.py
|
4a1106e8a5d1e3b050a700159f20b48d25f02268
|
[
"Apache-2.0"
] |
permissive
|
Joecastra/Watcher3
|
8ca66c44846030f0eb771d9d6ddeb9c37f637a4e
|
ce25d475f83ed36d6772f0cc35ef020d5e47c94b
|
refs/heads/master
| 2021-01-19T11:05:55.454351
| 2017-04-10T20:17:24
| 2017-04-10T20:17:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
#!/usr/bin/env python3
# ======================================== #
# ============= INSTRUCTIONS ============= #
# Add file to Deluge's Execute plugin for event Torrent Complete
# Add api information to conf:
watcherapi = 'APIKEY'
watcheraddress = u'http://localhost:9090/'
category = 'Watcher'
# DO NOT TOUCH ANYTHING BELOW THIS LINE! #
# ======================================== #
import json
import os
import sys
import urllib.request
import urllib.parse
data = {}
args = sys.argv
download_dir = args[3]
while download_dir[-1] in ['/', '\\']:
download_dir = download_dir[:-1]
parent_folder = os.path.split(download_dir)[-1]
if parent_folder.lower() != category.lower():
# Not watcher category
sys.exit(0)
data['apikey'] = watcherapi
data['name'] = args[2]
data['path'] = u'{}/{}'.format(download_dir, args[2])
data['downloadid'] = args[1]
data['guid'] = args[1]
data['mode'] = 'complete'
url = u'{}/postprocessing/'.format(watcheraddress)
post_data = urllib.parse.urlencode(data).encode('ascii')
request = urllib.request.Request(url, post_data, headers={'User-Agent': 'Mozilla/5.0'})
response = json.loads(urllib.request.urlopen(request, timeout=600).read())
if response['status'] == 'finished':
sys.exit(0)
elif response['status'] == 'incomplete':
sys.exit(1)
else:
sys.exit(1)
sys.exit(0)
# pylama:ignore=E402
|
[
"nosmokingbandit@gmail.com"
] |
nosmokingbandit@gmail.com
|
58913959884ae744d8d208372316ac4a7ab7379f
|
e6ea71d6acbb41bd40d3a17b352e19c6369d5c4b
|
/senpai/stage_instance.py
|
736cc80fa7d0ffe07b80e08582726f18cda05304
|
[
"MIT"
] |
permissive
|
alexyy802/waifucord
|
bbfb50515ca23bf711e940ac8921092ff6d1e12e
|
c3bb883a6a148effb127781a885e839697df6a8b
|
refs/heads/master
| 2023-09-02T13:19:44.478472
| 2021-10-29T06:51:51
| 2021-10-29T06:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,004
|
py
|
"""
The MIT License (MIT)
Copyright (c) 2021-present waifucord
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from .utils import MISSING, cached_slot_property
from .mixins import Hashable
from .errors import InvalidArgument
from .enums import StagePrivacyLevel, try_enum
__all__ = ("StageInstance",)
if TYPE_CHECKING:
from .chan.channel import StageInstance as StageInstancePayload
from .state import ConnectionState
from .channel import StageChannel
from .guild import Guild
class StageInstance(Hashable):
"""Represents a stage instance of a stage channel in a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two stage instances are equal.
.. describe:: x != y
Checks if two stage instances are not equal.
.. describe:: hash(x)
Returns the stage instance's hash.
Attributes
-----------
id: :class:`int`
The stage instance's ID.
guild: :class:`Guild`
The guild that the stage instance is running in.
channel_id: :class:`int`
The ID of the channel that the stage instance is running in.
topic: :class:`str`
The topic of the stage instance.
privacy_level: :class:`StagePrivacyLevel`
The privacy level of the stage instance.
discoverable_disabled: :class:`bool`
Whether discoverability for the stage instance is disabled.
"""
__slots__ = (
"_state",
"id",
"guild",
"channel_id",
"topic",
"privacy_level",
"discoverable_disabled",
"_cs_channel",
)
def __init__(
self, *, state: ConnectionState, guild: Guild, data: StageInstancePayload
) -> None:
self._state = state
self.guild = guild
self._update(data)
def _update(self, data: StageInstancePayload):
self.id: int = int(data["id"])
self.channel_id: int = int(data["channel_id"])
self.topic: str = data["topic"]
self.privacy_level: StagePrivacyLevel = try_enum(
StagePrivacyLevel, data["privacy_level"]
)
self.discoverable_disabled: bool = data.get("discoverable_disabled", False)
def __repr__(self) -> str:
return f"<StageInstance id={self.id} guild={self.guild!r} channel_id={self.channel_id} topic={self.topic!r}>"
@cached_slot_property("_cs_channel")
def channel(self) -> Optional[StageChannel]:
"""Optional[:class:`StageChannel`]: The channel that stage instance is running in."""
# the returned channel will always be a StageChannel or None
return self._state.get_channel(self.channel_id) # type: ignore
def is_public(self) -> bool:
return self.privacy_level is StagePrivacyLevel.public
async def edit(
self,
*,
topic: str = MISSING,
privacy_level: StagePrivacyLevel = MISSING,
reason: Optional[str] = None,
) -> None:
"""|coro|
Edits the stage instance.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
-----------
topic: :class:`str`
The stage instance's new topic.
privacy_level: :class:`StagePrivacyLevel`
The stage instance's new privacy level.
reason: :class:`str`
The reason the stage instance was edited. Shows up on the audit log.
Raises
------
InvalidArgument
If the ``privacy_level`` parameter is not the proper type.
Forbidden
You do not have permissions to edit the stage instance.
HTTPException
Editing a stage instance failed.
"""
payload = {}
if topic is not MISSING:
payload["topic"] = topic
if privacy_level is not MISSING:
if not isinstance(privacy_level, StagePrivacyLevel):
raise InvalidArgument(
"privacy_level field must be of type PrivacyLevel"
)
payload["privacy_level"] = privacy_level.value
if payload:
await self._state.http.edit_stage_instance(
self.channel_id, **payload, reason=reason
)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the stage instance.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
-----------
reason: :class:`str`
The reason the stage instance was deleted. Shows up on the audit log.
Raises
------
Forbidden
You do not have permissions to delete the stage instance.
HTTPException
Deleting the stage instance failed.
"""
await self._state.http.delete_stage_instance(self.channel_id, reason=reason)
|
[
"noreply@github.com"
] |
alexyy802.noreply@github.com
|
f08f5e37c8d43e03f7302f3dc075c370c0ebb88c
|
c70d020907e538492665c7fe75d7b2b90c88ba93
|
/python/pygobject/basis.py
|
9507a9e5cafe13baefa28c73ed1d79d857155810
|
[] |
no_license
|
4179e1/misc
|
7250de0bc0d9ab5641f48ec87f038f8b5dbed29b
|
1fb4d38b75fcc7692da605a6d3ec72e735116de1
|
refs/heads/master
| 2023-08-10T23:37:10.362847
| 2023-07-08T14:27:53
| 2023-07-08T14:27:53
| 14,710,900
| 3
| 3
| null | 2023-02-23T02:03:23
| 2013-11-26T08:21:00
|
Roff
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
#!/usr/bin/env python
from gi.repository import Gtk
win = Gtk.Window()
win.connect ("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
[
"4179e1@gmail.com"
] |
4179e1@gmail.com
|
aae7967595c33684d0cddfce97bc78285b719b53
|
67c0bc2b2292857fcc19b3c6e6da5570dc09749c
|
/chapter_6_visualization/audio_path.py
|
b8f4132d681a1663b16025bb3e2816c9f6d25d96
|
[
"Apache-2.0"
] |
permissive
|
jim-schwoebel/voicebook
|
9d28f638fa6a31cb8c4915f9871c07da261b3ea6
|
0e8eae0f01487f15589c0daa2cf7ca3c6f3b8ad3
|
refs/heads/master
| 2022-12-11T13:41:24.005431
| 2021-04-15T13:51:35
| 2021-04-15T13:51:35
| 137,778,789
| 363
| 84
|
Apache-2.0
| 2022-12-08T03:58:01
| 2018-06-18T16:37:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,273
|
py
|
'''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## AUDIO_PATH.PY ##
================================================
Give some simple visual feedback when recording an audio stream.
'''
import sounddevice as sd
import soundfile as sf
import random, time, librosa, os
import numpy as np
import matplotlib.pyplot as plt
from drawnow import drawnow
def make_fig():
plt.scatter(x, y)
def record_data(filename, duration, fs, channels):
# synchronous recording
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
y, sr = librosa.load(filename)
rmse=np.mean(librosa.feature.rmse(y)[0])
os.remove(filename)
return rmse*1000
# initialize plot
plt.ion() # enable interactivity
fig = plt.figure() # make a figure
x = list()
y = list()
for i in range(100):
# record 20ms of data
sample=record_data('sample.wav',0.02, 44100, 1)
x.append(i)
y.append(sample)
drawnow(make_fig)
plt.savefig('stream.png')
os.system('open stream.png')
|
[
"noreply@github.com"
] |
jim-schwoebel.noreply@github.com
|
db6175fa2394c30524ad292439ef8721022ae025
|
851c474c9d91875b0fa8fe46d591bb6550b1c550
|
/eye/migrations/0003_auto_20200818_0030.py
|
62e72256111c4d71a8e8f0ef2e19f772bd42a5f2
|
[] |
no_license
|
songlei1979/BigEye4
|
1a8e9c71cae3e3bd3fb5432c05b73997ba9590c2
|
825e79ebd43d71203c827490d5ffe36c861e4dec
|
refs/heads/master
| 2022-12-03T22:19:25.224277
| 2020-08-26T05:44:19
| 2020-08-26T05:44:19
| 288,315,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
# Generated by Django 3.0.8 on 2020-08-18 00:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eye', '0002_auto_20200812_1007'),
]
operations = [
migrations.AlterModelTable(
name='allocation',
table='eye_allocation',
),
migrations.AlterModelTable(
name='assignment',
table='eye_assignment',
),
]
|
[
"gabriel_sl19798@hotmail.com"
] |
gabriel_sl19798@hotmail.com
|
3803da3b8f058178d20d30d5aa287d146a64bf84
|
7579431f002103a74b5ae6bccf76458b221e78ab
|
/Dynammic Programming/Egg Dropping Puzzle.py
|
0cea9ac5597a588db4fd63558fc43eea55a49cab
|
[] |
no_license
|
mukundajmera/competitiveprogramming
|
5c187e9681b320482c7b8cdfa71be95f9f9184c1
|
85774aea5a20a9cf4a97c66237d2faa570edd96b
|
refs/heads/main
| 2023-03-21T14:01:03.653953
| 2021-03-22T19:41:54
| 2021-03-22T19:41:54
| 319,270,423
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
# User function Template for python3
# Function to get minimum number of trials needed in worst
# case with n eggs and k floors
import math
def eggDrop(n, k):
# code here
# create table of n+1, k+1
dp = [[None] * (n + 1) for i in range(k + 1)]
# fill
# floor 0 with egg is zero
# floor 1 with egg is 1
for i in range(1, n + 1):
# floor 1 with egg is 1
dp[1][i] = 1
# floor 0 with egg is zero
dp[0][i] = 0
for j in range(k + 1):
# egg 1 for each floor is 1
dp[j][1] = j
# check for egg with n value with f floors
# min (max( case1 (break), case2 (do not break))) + 1
# min(dp[i][j], (max( dp[x-1,n-1], dp[f-x,e])) + 1)
for floor in range(2, k + 1):
for egg in range(2, n + 1):
dp[floor][egg] = math.inf
# iter for x
for rem in range(1, floor + 1):
dp[floor][egg] = min(dp[floor][egg], 1 + max(dp[rem - 1][egg - 1], dp[floor - rem][egg]))
# print(dp)
# print(dp[k][n])
return dp[k][n]
# {
# Driver Code Starts
# Initial Template for Python 3
import atexit
import io
import sys
# Contributed by : Nagendra Jha
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases):
n, k = map(int, input().strip().split())
print(eggDrop(n, k))
# } Driver Code Ends
|
[
"mukundajmera94@gmail.com"
] |
mukundajmera94@gmail.com
|
103b7a688f8a7e8cf5d5dddf9358f9b4c41bf7fa
|
7fc03f7d28ea7bbdca650a51a23fc0b13cbefde1
|
/supervised_learning/0x11-attention/6-main.py
|
756a7098a53ec467f39ff16efa42dd77ce531301
|
[] |
no_license
|
HeimerR/holbertonschool-machine_learning
|
54c410e40d38635de482773f15e26ce1c2c95e46
|
e10b4e9b6f3fa00639e6e9e5b35f0cdb43a339a3
|
refs/heads/master
| 2021-07-24T09:33:25.833269
| 2021-01-14T00:21:45
| 2021-01-14T00:21:45
| 236,603,791
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
MultiHeadAttention = __import__('6-multi_head_attention').MultiHeadAttention
mha = MultiHeadAttention(512, 8)
print(mha.dm)
print(mha.h)
print(mha.depth)
print(mha.Wq)
print(mha.Wk)
print(mha.Wv)
print(mha.linear)
Q = tf.convert_to_tensor(np.random.uniform(size=(50, 15, 256)).astype('float32'))
K = tf.convert_to_tensor(np.random.uniform(size=(50, 15, 256)).astype('float32'))
V = tf.convert_to_tensor(np.random.uniform(size=(50, 15, 256)).astype('float32'))
output, weights = mha(Q, K, V, None)
print(output)
print(weights)
|
[
"ing.heimer.rojas@gmail.com"
] |
ing.heimer.rojas@gmail.com
|
b0680c3e15836ae1c131b441d324db4d218d5f96
|
04f8c7d9eb20745def8568fcbd2401392187ebf0
|
/www/www/settings_prod.py
|
2df597cfc30b7934a40ca8e4d06329af96778cbd
|
[] |
no_license
|
boogiiieee/Victoria
|
5f01d8c01e92d78b756324ee8da2208ab9c63bc6
|
c76996698bbbd88309ed35f47d19e09fec19eb94
|
refs/heads/master
| 2021-09-04T03:13:00.208173
| 2018-01-15T04:05:29
| 2018-01-15T04:05:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
# -*- coding: utf-8 -*-
'''
from settings import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
MANAGERS = (
('', ''),
('', ''),
)
MEDIA_ROOT = '/var/www/home/'
STATIC_ROOT = '/var/www/home/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': 'postgresql2.locum.ru',
'PORT': '5432',
}
}
ALLOWED_HOSTS = ['127.0.0.1:8000', 'localhost', '', '']
ROBOTS_SITEMAP_URLS = ['/sitemap.xml']
ROBOTS_SITEMAP_HOST = ''
DEFAULT_FROM_EMAIL = ''
EMAIL_SUBJECT_PREFIX = ''
EMAIL_HOST = ''
EMAIL_PORT = 25
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
'''
|
[
"shalyapinalexander@gmail.com"
] |
shalyapinalexander@gmail.com
|
c7390cd06f4184861f8f1c7715515e955cf21374
|
9427fa36416d835e28faa8e52a11140b6512e9c7
|
/dæmatímaverk/dæmatími_tuples/tuples5.py
|
9a17ed22e2776c27573687cc7d5525fb0ac8cac2
|
[] |
no_license
|
fannarl/traveler
|
e5f274e404c10fe799b87e36eb4eb37e64fb8db8
|
8bc121d9fc26374f1b0dcee6da815ed732f2d889
|
refs/heads/master
| 2020-03-28T23:23:31.216234
| 2018-12-14T17:22:30
| 2018-12-14T17:22:30
| 149,289,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
def get_list():
a_list = input("Enter elements of list separated by commas: ").strip().split(',')
return a_list
def get_integer(prompt):
val = int(input(prompt))
return val
def transform(list1, list2, r1, r2):
new_list = []
for i in range(r1,r2):
new_list.append(list1[i])
for i in range(len(new_list)):
list1.remove(list1[r1])
list2.extend(reversed(new_list))
# Main program starts here - DO NOT change it
list1 = get_list()
list2 = get_list()
index1 = get_integer("Enter from value: ")
index2 = get_integer("Enter to value: ")
transform(list1, list2, index1, index2)
print(list1)
print(list2)
|
[
"fannarleo95@hotmail.com"
] |
fannarleo95@hotmail.com
|
0696c8ebc33ac64b9ec37ab7fb5a23ad490a88fc
|
194a1e2ac246c5f9926b014c00d4c733f0cdaf0c
|
/tests/wallet/test_singleton_lifecycle.py
|
965eaeab5e49fb9b8403e58e5fcd5a088a688c53
|
[
"Apache-2.0"
] |
permissive
|
chia-os/btcgreen-blockchain
|
03e889cd0268284b7673917ab725ad71f980b650
|
2688e74de423ec59df302299e993b4674d69489e
|
refs/heads/main
| 2023-08-29T14:40:11.962821
| 2021-08-17T06:33:34
| 2021-08-17T06:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,191
|
py
|
import asyncio
from typing import List, Tuple
from blspy import G2Element
from clvm_tools import binutils
from btcgreen.types.blockchain_format.program import Program, INFINITE_COST
from btcgreen.types.announcement import Announcement
from btcgreen.types.blockchain_format.coin import Coin
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.types.coin_solution import CoinSolution
from btcgreen.types.spend_bundle import SpendBundle
from btcgreen.util.condition_tools import ConditionOpcode
from btcgreen.util.ints import uint64
from btcgreen.wallet.puzzles.load_clvm import load_clvm
from tests.core.full_node.test_conditions import bt, check_spend_bundle_validity, initial_blocks
SINGLETON_MOD = load_clvm("singleton_top_layer.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
P2_SINGLETON_MOD = load_clvm("p2_singleton.clvm")
POOL_MEMBER_MOD = load_clvm("pool_member_innerpuz.clvm")
POOL_WAITINGROOM_MOD = load_clvm("pool_waitingroom_innerpuz.clvm")
LAUNCHER_PUZZLE_HASH = LAUNCHER_PUZZLE.get_tree_hash()
SINGLETON_MOD_HASH = SINGLETON_MOD.get_tree_hash()
POOL_REWARD_PREFIX_MAINNET = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a00000000000000000000000000000000")
def check_coin_solution(coin_solution: CoinSolution):
# breakpoint()
try:
cost, result = coin_solution.puzzle_reveal.run_with_cost(INFINITE_COST, coin_solution.solution)
except Exception as ex:
print(ex)
# breakpoint()
print(ex)
def adaptor_for_singleton_inner_puzzle(puzzle: Program) -> Program:
# this is prety slow
return Program.to(binutils.assemble("(a (q . %s) 3)" % binutils.disassemble(puzzle)))
def launcher_conditions_and_spend_bundle(
parent_coin_id: bytes32,
launcher_amount: uint64,
initial_singleton_inner_puzzle: Program,
metadata: List[Tuple[str, str]],
launcher_puzzle: Program = LAUNCHER_PUZZLE,
) -> Tuple[Program, bytes32, List[Program], SpendBundle]:
launcher_puzzle_hash = launcher_puzzle.get_tree_hash()
launcher_coin = Coin(parent_coin_id, launcher_puzzle_hash, launcher_amount)
singleton_full_puzzle = SINGLETON_MOD.curry(
SINGLETON_MOD_HASH, launcher_coin.name(), launcher_puzzle_hash, initial_singleton_inner_puzzle
)
singleton_full_puzzle_hash = singleton_full_puzzle.get_tree_hash()
message_program = Program.to([singleton_full_puzzle_hash, launcher_amount, metadata])
expected_announcement = Announcement(launcher_coin.name(), message_program.get_tree_hash())
expected_conditions = []
expected_conditions.append(
Program.to(
binutils.assemble(f"(0x{ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT.hex()} 0x{expected_announcement.name()})")
)
)
expected_conditions.append(
Program.to(
binutils.assemble(f"(0x{ConditionOpcode.CREATE_COIN.hex()} 0x{launcher_puzzle_hash} {launcher_amount})")
)
)
launcher_solution = Program.to([singleton_full_puzzle_hash, launcher_amount, metadata])
coin_solution = CoinSolution(launcher_coin, launcher_puzzle, launcher_solution)
spend_bundle = SpendBundle([coin_solution], G2Element())
lineage_proof = Program.to([parent_coin_id, launcher_amount])
return lineage_proof, launcher_coin.name(), expected_conditions, spend_bundle
def singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> Program:
return SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, launcher_puzzle_hash, inner_puzzle)
def singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> bytes32:
return singleton_puzzle(launcher_id, launcher_puzzle_hash, inner_puzzle).get_tree_hash()
def solution_for_singleton_puzzle(lineage_proof: Program, my_amount: int, inner_solution: Program) -> Program:
return Program.to([lineage_proof, my_amount, inner_solution])
def p2_singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, launcher_puzzle_hash)
def p2_singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32) -> bytes32:
return p2_singleton_puzzle(launcher_id, launcher_puzzle_hash).get_tree_hash()
def test_only_odd_coins_0():
blocks = initial_blocks()
farmed_coin = list(blocks[-1].get_included_reward_coins())[0]
metadata = [("foo", "bar")]
ANYONE_CAN_SPEND_PUZZLE = Program.to(1)
launcher_amount = uint64(1)
launcher_puzzle = LAUNCHER_PUZZLE
launcher_puzzle_hash = launcher_puzzle.get_tree_hash()
initial_singleton_puzzle = adaptor_for_singleton_inner_puzzle(ANYONE_CAN_SPEND_PUZZLE)
lineage_proof, launcher_id, condition_list, launcher_spend_bundle = launcher_conditions_and_spend_bundle(
farmed_coin.name(), launcher_amount, initial_singleton_puzzle, metadata, launcher_puzzle
)
conditions = Program.to(condition_list)
coin_solution = CoinSolution(farmed_coin, ANYONE_CAN_SPEND_PUZZLE, conditions)
spend_bundle = SpendBundle.aggregate([launcher_spend_bundle, SpendBundle([coin_solution], G2Element())])
run = asyncio.get_event_loop().run_until_complete
coins_added, coins_removed = run(check_spend_bundle_validity(bt.constants, blocks, spend_bundle))
coin_set_added = set([_.coin for _ in coins_added])
coin_set_removed = set([_.coin for _ in coins_removed])
launcher_coin = launcher_spend_bundle.coin_solutions[0].coin
assert launcher_coin in coin_set_added
assert launcher_coin in coin_set_removed
assert farmed_coin in coin_set_removed
# breakpoint()
singleton_expected_puzzle_hash = singleton_puzzle_hash(launcher_id, launcher_puzzle_hash, initial_singleton_puzzle)
expected_singleton_coin = Coin(launcher_coin.name(), singleton_expected_puzzle_hash, launcher_amount)
assert expected_singleton_coin in coin_set_added
# next up: spend the expected_singleton_coin
# it's an adapted `ANYONE_CAN_SPEND_PUZZLE`
# then try a bad lineage proof
# then try writing two odd coins
# then try writing zero odd coins
# then, destroy the singleton with the -113 hack
return 0
|
[
"svginsomnia@gmail.com"
] |
svginsomnia@gmail.com
|
8a7146223b544962aa2917ea4cf93337f3460901
|
3740de0d6e43ea140fc09ab314e4c492603ba185
|
/scripts/sources/s_checklist_scenariobased_step07.py
|
f74d1ac5806b3df08f57092c9a18ef1c0abce61d
|
[
"MIT"
] |
permissive
|
s0ap/arpmRes
|
29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
refs/heads/master
| 2022-02-16T05:01:22.118959
| 2019-08-20T16:45:02
| 2019-08-20T16:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,564
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step07 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step07&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-7).
# +
import numpy as np
import pandas as pd
from arpym.portfolio import spectral_index
from arpym.statistics import meancov_sp, quantile_sp
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-parameters)
# +
# indicates which projection to continue from
# True: use copula-marginal projections
# False: use historical projections
copula_marginal = True
lam = 3e-7 # parameter of exponential utility function
c_quantile = 0.95 # confidence level for the quantile satisfaction measure
c_es = 0.95 # confidence level for the negative expected shortfall
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step00): Load data
# +
path = '../../../databases/temporary-databases/'
if copula_marginal:
# Projection
db_projection_tools = pd.read_csv(path + 'db_projection_tools.csv')
j_ = int(db_projection_tools['j_'][0])
db_scenprob = pd.read_csv(path + 'db_scenario_probs.csv')
p = db_scenprob['p'].values
# Pricing
db_pricing = pd.read_csv(path + 'db_pricing.csv')
pi_tnow_thor = db_pricing.values
# Aggregation
db_exante_perf = pd.read_csv(path + 'db_exante_perf.csv')
y_h = db_exante_perf.values.squeeze()
else:
# Projection
db_projection_tools = pd.read_csv(path + 'db_projection_bootstrap_tools.csv')
j_ = int(db_projection_tools['j_'][0])
db_scenprob = pd.read_csv(path + 'db_scenario_probs_bootstrap.csv')
p = db_scenprob['p'].values
# Pricing
db_pricing = pd.read_csv(path + 'db_pricing_historical.csv')
pi_tnow_thor = db_pricing.values
# Aggregation
db_exante_perf = pd.read_csv(path + 'db_exante_perf_historical.csv')
y_h = db_exante_perf.values.squeeze()
db_holdings = pd.read_csv(path + 'db_holdings.csv')
h = np.squeeze(db_holdings.values)
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step01): Calculate certainty equivalent satisfaction measure
# +
# expected utility
expected_utility = p @ (-np.exp(-lam * y_h)) # expected utility computation
# certainty equivalent satisfaction measure
cert_eq_yh = -(1 / lam) * np.log(-expected_utility)
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step02): Quantile satisfaction measure
# quantile
q_yh = quantile_sp(1 - c_quantile, y_h, p, method='kernel_smoothing')
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step03): Expected shortfall satisfaction measure
# +
# indicator function
def indicator(x):
return (0 <= x and x <= 1 - c_es)
# spectrum function
def spectr_es(x):
return (1 / (1 - c_es)) * indicator(x)
# negative expected shortfall
es_yh, _ = spectral_index(spectr_es, pi_tnow_thor,
p, h)
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step04): Expectation and variance satisfaction measures
# expectation satisfaction measure
mean_yh, var_yh = meancov_sp(y_h, p)
# opposite of variance is satisfaction measure
neg_var_yh = -var_yh
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step07-implementation-step05): Save database
# +
out = pd.DataFrame({'cert_eq_yh': pd.Series(cert_eq_yh),
'q_yh': pd.Series(q_yh),
'es_yh': pd.Series(es_yh),
'mean_yh': pd.Series(mean_yh),
'neg_var_yh': pd.Series(neg_var_yh),
'c_es': pd.Series(c_es),
'c_quantile': pd.Series(c_quantile)})
if copula_marginal:
out.to_csv(path + 'db_quantile_and_satis.csv',
index=False)
else:
out.to_csv(path + 'db_quantile_and_satis_historical.csv',
index=False)
del out
|
[
"dario.popadic@yahoo.com"
] |
dario.popadic@yahoo.com
|
df39f1b43eaa8211c126ff9d37b5062dbbbf6b0a
|
7f99fb1f9d051dbcd13770e7a08e92f90e6027ca
|
/Analyzer/fakeRateGetRatesMuons_DiBoson_cfg.py
|
505ad378830fb34d6a4008d8b48de6479a3b79a3
|
[] |
no_license
|
kmtos/AnalyzerGeneratorRecoVariousFunctions
|
be54dd360a37b4ef1aeef2ee692ada4351b7db2e
|
acd8cde2f5657674c73eae9af28f3235b9b3f7c5
|
refs/heads/master
| 2021-01-10T08:34:49.953330
| 2018-07-26T12:36:41
| 2018-07-26T12:36:41
| 50,570,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,085
|
py
|
### PDG IDs ###
A_PDGID = 36
MU_PDGID = 13
TAU_PDGID = 15
ANY_PDGID = 0
### Tau decay types ###
TAU_HAD = 0
TAU_MU = 1
TAU_E = 2
TAU_ALL = 3
### Tau hadronic decay types ###
TAU_ALL_HAD = -1
TAU_1PRONG_0NEUTRAL = 0
TAU_1PRONG_1NEUTRAL = 1
TAU_1PRONG_2NEUTRAL = 2
TAU_1PRONG_3NEUTRAL = 3
TAU_1PRONG_NNEUTRAL = 4
TAU_2PRONG_0NEUTRAL = 5
TAU_2PRONG_1NEUTRAL = 6
TAU_2PRONG_2NEUTRAL = 7
TAU_2PRONG_3NEUTRAL = 8
TAU_2PRONG_NNEUTRAL = 9
TAU_3PRONG_0NEUTRAL = 10
TAU_3PRONG_1NEUTRAL = 11
TAU_3PRONG_2NEUTRAL = 12
TAU_3PRONG_3NEUTRAL = 13
TAU_3PRONG_NNEUTRAL = 14
TAU_RARE = 15
### No consideration of pT rank ###
ANY_PT_RANK = -1
#################
# Initialization
#################
import FWCore.ParameterSet.Config as cms
import FWCore.Utilities.FileUtils as FileUtils
process = cms.Process("CleanJetsAnalyzer")
###################################################
# initialize MessageLogger and output report
###################################################
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
####################
# Input File List
####################
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'root://eoscms/FILE_PATHRegionB_selection_NUM.root')
)
process.ggh = cms.EDAnalyzer("FakeRateMiniAODGetRatesMuons",
outFileName = cms.string('/afs/cern.ch/work/k/ktos/public/CMSSW_8_0_17/src/AnalyzerGeneratorRecoVariousFunctions/Analyzer/BSUB/DIRNAME/DIRNAME_Plots_NUM.root'),
mu1Tag = cms.InputTag("GetMuOne"),
muonsTag = cms.InputTag("MuonsIDdxydz"),
tauTag = cms.InputTag("slimmedTausMuonCleaned"),
tauIsoTag = cms.string("byMediumIsolationMVArun2v1DBoldDMwLT"),
decayModeFindingTag = cms.string("decayModeFinding"),
checkTau = cms.bool(False),
checkTauIso = cms.bool(False),
passTauIso = cms.bool(False),
mu3dRMin = cms.double(0.0),
mu3dRMax = cms.double(0.8),
tauPtCut = cms.double(10.0),
mu3dROverlapCut = cms.double(.4),
requireRemovedMuon = cms.bool(True),
checkInvMass = cms.bool(True),
checkInvMassMin = cms.double(81),
checkInvMassMax = cms.double(101),
relIsoCutVal = cms.double(0.25),
passRelIso = cms.bool(True),
mu12dRCut = cms.double(600),
oppositeSign = cms.bool(True),
passdR = cms.bool(True),
mu2PtCut = cms.double(3),
passMu2PtCutForMu3Rate = cms.bool(True),
isMC = cms.bool(True),
xsec = cms.double(XSEC),
lumi = cms.double(LUMI_DATA),
summedWeights = cms.double(SUMMED_WEIGHTS),
pileupSummaryInfo = cms.InputTag("slimmedAddPileupInfo", "", "PAT"),
genEventInfoToken = cms.InputTag("generator", "", "SIM"),
PileupFileName = cms.string('/afs/cern.ch/user/k/ktos/GroupDir/CMSSW_8_0_17/src/AnalyzerGeneratorRecoVariousFunctions/Analyzer/FILE_TESTS/PileupWeights.root')
)
process.p2 = cms.Path(
process.ggh
)
|
[
"kmtos@ucdavis.edu"
] |
kmtos@ucdavis.edu
|
a94dad9094cd2893b487f36a7115d0663e3c5db0
|
526bf18a8695862067c817f432ab197ceb645f39
|
/migrations/versions/76fb82b8961b_cars_add_location_diller.py
|
985f450b99df7550155f2a0c2b4945f918d451e9
|
[] |
no_license
|
sintimaski/bfs-be
|
a7fd623911a2220face49a0ef84574f3fd7a09a8
|
964a9c7e9cc876aaf8b0723d6b3f26bd378c3721
|
refs/heads/master
| 2023-08-02T09:00:44.855055
| 2021-09-22T13:07:01
| 2021-09-22T13:07:01
| 339,531,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
"""common add location diller
Revision ID: 76fb82b8961b
Revises: 920ac7659cb4
Create Date: 2020-09-15 08:23:44.523023
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "76fb82b8961b"
down_revision = "920ac7659cb4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"car_product", sa.Column("location_diller", sa.Text(), nullable=True)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("car_product", "location_diller")
# ### end Alembic commands ###
|
[
"dimadrebezov@gmail.com"
] |
dimadrebezov@gmail.com
|
b6a81ff24dc21edef75de72eabcc703386468603
|
33c51931bc7d6f73da5a64ecc0e7cb751e7fc62c
|
/karesansui/db/model/option.py
|
b43f24a1ca8043122b981e017ab2dccc30551e60
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
qmutz/karesansui
|
e86ed107f313f8c4140797a2c3250c5f16b524c2
|
f4ba1cf6f88cf76c3e4dbc444139d73134f7c9d1
|
refs/heads/develop
| 2023-05-06T14:52:38.668631
| 2019-02-01T03:57:00
| 2019-02-01T03:57:00
| 316,682,704
| 0
| 0
|
MIT
| 2021-06-03T14:59:45
| 2020-11-28T07:43:33
| null |
UTF-8
|
Python
| false
| false
| 6,806
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui Core.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sqlalchemy
from sqlalchemy.orm import mapper, clear_mappers, relation
import karesansui
import karesansui.db.model
from karesansui.lib.const import DEFAULT_LANGS
def get_option_table(metadata, now):
"""<comment-ja>
Option のテーブル定義を返却します。
@param metadata: MetaData
@type metadata: sqlalchemy.schema.MetaData
@param now: now
@type now: Datatime
@return: sqlalchemy.schema.Table
</comment-ja>
<comment-en>
TODO: English Comment
</comment-en>
"""
return sqlalchemy.Table('option', metadata,
sqlalchemy.Column('id', sqlalchemy.Integer,
primary_key=True,
autoincrement=True,
),
sqlalchemy.Column('created_user_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('user.id'),
),
sqlalchemy.Column('modified_user_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('user.id'),
),
sqlalchemy.Column('key', sqlalchemy.String(12),
nullable=False,
unique=True,
),
sqlalchemy.Column('value', sqlalchemy.Text,
nullable=True,
),
sqlalchemy.Column('created', sqlalchemy.DateTime,
default=now,
),
sqlalchemy.Column('modified', sqlalchemy.DateTime,
default=now,
onupdate=now,
),
)
class Option(karesansui.db.model.Model):
"""<comment-ja>
Optionテーブルモデルクラス
</comment-ja>
<comment-en>
TODO: English Comment
</comment-en>
"""
def __init__(self, created_user, modified_user,
key, value=None):
"""<comment-ja>
@param created_user: 作成者
@type created_user: User
@param modified_user: 最終更新者
@type modified_user: User
@param key: option key
@type key: str
@param value: option value
@type value: str(Text)
</comment-ja>
<comment-en>
TODO: English Comment
</comment-en>
"""
self.created_user = created_user
self.modified_user = modified_user
self.key = key
self.value = value
def get_json(self, languages):
ret = {}
ret["id"] = self.id
ret["key"] = self.key
ret["value"] = self.value
ret["created_user_id"] = self.created_user_id
ret["created_user"] = self.created_user.get_json(languages)
ret["modified_user_id"] = self.modified_user_id
ret["modified_user"] = self.modified_user.get_json(languages)
try:
ret["created"] = self.created.strftime(
DEFAULT_LANGS[languages]['DATE_FORMAT'][1])
except:
ret["created"] = "unknown"
try:
ret["modified"] = self.modified.strftime(
DEFAULT_LANGS[languages]['DATE_FORMAT'][1])
except:
ret["modified"] = "unknown"
return ret
def __repr__(self):
return "Option<'key=%s, value=%s>" \
% (self.key, self.value)
def reload_mapper(metadata, now):
"""<comment-ja>
Option(Model)のマッパーをリロードします。
@param metadata: リロードしたいMetaData
@type metadata: sqlalchemy.schema.MetaData
@param now: now
@type now: Datatime
</comment-ja>
<comment-en>
TODO: English Comment
</comment-en>
"""
t_option = get_option_table(metadata, now)
t_user = metadata.tables['user']
mapper(Option, t_option, properties={
'created_user' : relation(karesansui.db.model.user.User,
primaryjoin=t_option.c.created_user_id==t_user.c.id),
'modified_user' : relation(karesansui.db.model.user.User,
primaryjoin=t_option.c.modified_user_id==t_user.c.id),
})
if __name__ == '__main__':
import sqlalchemy.orm
bind_name = 'sqlite:///:memory:'
engine = sqlalchemy.create_engine(bind_name,
encoding="utf-8",
convert_unicode=True,
#assert_unicode='warn', # DEBUG
echo=True,
echo_pool=False
)
metadata = sqlalchemy.MetaData(bind=engine)
if metadata.bind.name == 'sqlite':
_now = sqlalchemy.func.datetime('now', 'localtime')
else:
_now = sqlalchemy.func.now()
reload_mapper(metadata, _now)
import pdb; pdb.set_trace()
metadata.drop_all()
metadata.create_all()
Session = sqlalchemy.orm.sessionmaker(bind=engine, autoflush=False)
session = Session()
# INSERT
# SELECT One
# UPDATE
# DELETE
|
[
"taizo@karesansui-project.info"
] |
taizo@karesansui-project.info
|
fd67b2cea25f2c76efaf85561698eed8bbd55edd
|
88e06bab1989c81a2dd649bb09b144fa7c958f89
|
/leet_subarray_with_k_different_integers.py
|
e67b6bb2f63e7f22e17fc918d64b415ae5cb16f5
|
[] |
no_license
|
VaibhavD143/Coding
|
4499526b22ee4ef13f66c3abcea671c80a8f748a
|
5de3bae8891c7d174cbc847a37c3afb00dd28f0e
|
refs/heads/master
| 2023-08-06T21:56:44.934954
| 2021-10-09T18:31:29
| 2021-10-09T18:31:29
| 263,890,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
"""
Intution:
l1 is left-most valid index for substring ending at rth index
l2 is left-most index from where substring becomes invalid ending at rth index
so we can generate l2-l1 substrings ending at rth index
Valid : have exactly K distinct elements
"""
from collections import defaultdict
class Solution:
def subarraysWithKDistinct(self, lst: List[int], K: int) -> int:
l1 = l2 = res = 0
dist1 = dist2 = 0
ha1 = {}
ha2 = {}
for r in range(len(lst)):
if ha1.get(lst[r],0) == 0:
dist1+=1
if ha2.get(lst[r],0) == 0:
dist2+=1
ha1[lst[r]] = ha1.get(lst[r],0)+1
ha2[lst[r]] = ha2.get(lst[r],0)+1
while dist1>K:
ha1[lst[l1]] -=1
if ha1[lst[l1]] == 0:
dist1-=1
l1+=1
while dist2>=K:
ha2[lst[l2]] -=1
if ha2[lst[l2]] == 0:
dist2-=1
l2+=1
res+=l2-l1
return res
# return self.atMostK(A,K)-self.atMostK(A,K-1)
def atMostK(self,lst,k):
l = res = 0
ha = defaultdict(int)
dist = 0
for r in range(len(lst)):
if ha[lst[r]] == 0:
dist+=1
ha[lst[r]]+=1
while dist>k:
ha[lst[l]]-=1
if ha[lst[l]] == 0:
dist-=1
l+=1
res+= r-l+1
return res
|
[
"vaibhav.dodiya143vd@gmail.com"
] |
vaibhav.dodiya143vd@gmail.com
|
cfeefe2c257d1544d9c955d1dc11614118070a9c
|
15e8a393f6c71ba77094a1718f4f89050409c7ae
|
/library/templatetags/book_tags.py
|
9a7f4c4fe3a48fb66d691bf5f46e69dbe8b81227
|
[] |
no_license
|
emilte/johansson
|
21a3e20208c67725776af0f94de4c29150935b50
|
d16bdde26e840814562f668904b2f5588c0a13ad
|
refs/heads/master
| 2023-07-23T21:01:32.830302
| 2021-09-05T14:56:01
| 2021-09-05T14:56:01
| 390,360,563
| 0
| 0
| null | 2021-08-30T00:42:49
| 2021-07-28T13:26:31
|
SCSS
|
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
# imports
import PIL
import random
from django import template
from django.conf import settings
from django.utils import timezone
from django.templatetags.static import static
from django.contrib.auth.models import Permission
# End: imports -----------------------------------------------------------------
register = template.Library()
# https://docs.djangoproject.com/en/3.0/howto/custom-template-tags/
@register.inclusion_tag('library/components/book.html')
def display_book(request, perms, book, **kwargs):
return {
'request': request,
'perms': perms,
'book': book,
'classes': kwargs.get('classes'),
}
@register.inclusion_tag('library/components/book_filter_form.html')
def display_book_filter(request, perms, form, **kwargs):
return {
'request': request,
'perms': perms,
'form': form,
'classes': kwargs.get('classes'),
'action': kwargs.get('action'),
}
# https://docs.djangoproject.com/en/3.0/howto/custom-template-tags/
# @register.simple_tag
# def get_image(model, fielname):
# if not model or not getattr(model, fieldname):
# return static('/root/img/image-placeholder.png')
# return getattr(model, fieldname).url
#
# # https://stackoverflow.com/questions/16348003/displaying-a-timedelta-object-in-a-django-template
# @register.filter()
# def smooth_timedelta(timedeltaobj):
# """Convert a datetime.timedelta object into Days, Hours, Minutes, Seconds."""
# if not timedeltaobj:
# return None
# secs = timedeltaobj.total_seconds()
# timetot = ""
# if secs > 86400: # 60sec * 60min * 24hrs
# days = secs // 86400
# timetot += "{} dager".format(int(days))
# secs = secs - days*86400
#
# if secs > 3600:
# hrs = secs // 3600
# timetot += " {} timer".format(int(hrs))
# secs = secs - hrs*3600
#
# if secs > 60:
# mins = secs // 60
# timetot += " {} minutter".format(int(mins))
# secs = secs - mins*60
#
# if secs > 0:
# timetot += " {} sekunder".format(int(secs))
# return timetot
#
|
[
"emil.telstad@gmail.com"
] |
emil.telstad@gmail.com
|
132f1bff7f32b051fa9726e0c877ce090d8c5506
|
c264153f9188d3af187905d846fa20296a0af85d
|
/Python/Python3网络爬虫开发实战/《Python3网络爬虫开发实战》随书源代码/jiepai/spider.py
|
7ca771da154f16c51cbbbec444ee1ad0371ecea2
|
[] |
no_license
|
IS-OSCAR-YU/ebooks
|
5cd3c1089a221759793524df647e231a582b19ba
|
b125204c4fe69b9ca9ff774c7bc166d3cb2a875b
|
refs/heads/master
| 2023-05-23T02:46:58.718636
| 2021-06-16T12:15:13
| 2021-06-16T12:15:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
import os
from multiprocessing.pool import Pool
import requests
from urllib.parse import urlencode
from hashlib import md5
def get_page(offset):
params = {
'offset': offset,
'format': 'json',
'keyword': '街拍',
'autoload': 'true',
'count': '20',
'cur_tab': '1',
}
url = 'http://www.toutiao.com/search_content/?' + urlencode(params)
try:
response = requests.get(url)
if response.status_code == 200:
return response.json()
except requests.ConnectionError:
return None
def get_images(json):
if json.get('data'):
for item in json.get('data'):
title = item.get('title')
images = item.get('image_detail')
for image in images:
yield {
'image': image.get('url'),
'title': title
}
def save_image(item):
if not os.path.exists(item.get('title')):
os.mkdir(item.get('title'))
try:
response = requests.get(item.get('image'))
if response.status_code == 200:
file_path = '{0}/{1}.{2}'.format(item.get('title'), md5(response.content).hexdigest(), 'jpg')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded', file_path)
except requests.ConnectionError:
print('Failed to Save Image')
def main(offset):
json = get_page(offset)
for item in get_images(json):
print(item)
save_image(item)
GROUP_START = 1
GROUP_END = 20
if __name__ == '__main__':
pool = Pool()
groups = ([x * 20 for x in range(GROUP_START, GROUP_END + 1)])
pool.map(main, groups)
pool.close()
pool.join()
|
[
"jiangzhangha@163.com"
] |
jiangzhangha@163.com
|
335096cb45cac78881b05c35bf2129c9b91761c7
|
76742bf1c7dee6a01a0a41402fe734eeb0da3d74
|
/venv/bin/sphinx-autogen
|
7607e9cf2f5bad91acbff30af728168c78ff5ba7
|
[] |
no_license
|
Zacharilius/tangoProject
|
e5490c80af3caaabe2cf132a40387db2574713dc
|
305fa20e344f8ad24514dff959be3e4e3632645e
|
refs/heads/master
| 2021-01-22T23:26:51.921743
| 2015-03-17T17:52:22
| 2015-03-17T17:52:22
| 29,359,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
#!/home/zacharilius/Documents/GitHub/tangoProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sphinx.ext.autosummary.generate import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"zabensley@gmail.com"
] |
zabensley@gmail.com
|
|
741026f4fb0abd4abf264a986c30c02d470275ad
|
21c6989099b95e608c73b246c5ccc97dfac688da
|
/testserver.py
|
b7d29e7363de0bf4c74e4e5fc18ed0a4cdfda7d1
|
[] |
no_license
|
ashcrow/taboot-tailer
|
a8200dedab4084041ec1f931b93d53360e5789f2
|
4c403cfaeb38e7a3b10478b74b28e5b31b1d4e5b
|
refs/heads/master
| 2021-01-21T01:18:05.144141
| 2014-05-23T15:39:45
| 2014-05-23T15:39:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
def main():
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer((
"127.0.0.1", 5000),
SimpleHTTPServer.SimpleHTTPRequestHandler)
print "Do not use this server for anything but testing!"
print "Serving on 127.0.0.1:5000. Hit ctrl+c to exit."
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
if __name__ == '__main__':
main()
|
[
"stevem@gnulinux.net"
] |
stevem@gnulinux.net
|
988107dcdf49e79a36105bd8984988e97d0bd846
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/L84/84-85_wat_20Abox/set_1ns_equi.py
|
3b6c86ee4db3d4d1da891eb4aea66a5c7fe219e7
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L84/wat_20Abox/ti_one-step/84_85/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../84-85_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
be50f1b323652c40e6b49cf9a2bd79de2d808a78
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_crocks.py
|
1a3f520d65582f3f690bc163e29107237760a5b2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.nouns._crock import _CROCK
#calss header
class _CROCKS(_CROCK, ):
def __init__(self,):
_CROCK.__init__(self)
self.name = "CROCKS"
self.specie = 'nouns'
self.basic = "crock"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f83c4b3c6faf3ef8350d51cee7b1ed1509e0e084
|
a51062d546e0936c5b2ac5cbfb3f91fc7bf342d7
|
/openff/evaluator/properties/__init__.py
|
a1178d3374263026cb4e256232bb78964f451d98
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
joegomes/openff-evaluator
|
4e748a9f43001a8e738851a9160837f6618fc8e5
|
9ccd3d017af0c6c1b556b1e77a8eccd0d1dfbcd1
|
refs/heads/master
| 2023-04-22T01:02:47.038298
| 2021-05-05T18:49:49
| 2021-05-05T18:49:49
| 364,670,685
| 0
| 0
|
NOASSERTION
| 2021-05-05T18:49:50
| 2021-05-05T18:27:10
| null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from .binding import HostGuestBindingAffinity
from .density import Density, ExcessMolarVolume
from .dielectric import DielectricConstant
from .enthalpy import EnthalpyOfMixing, EnthalpyOfVaporization
from .solvation import SolvationFreeEnergy
__all__ = [
HostGuestBindingAffinity,
Density,
ExcessMolarVolume,
DielectricConstant,
EnthalpyOfMixing,
EnthalpyOfVaporization,
SolvationFreeEnergy,
]
|
[
"simon.boothroyd@colorado.edu"
] |
simon.boothroyd@colorado.edu
|
2323ad3b44127d58791d33e46cf34b22f637af99
|
553b86e3b1ed21e64ea4feeb690af0701a17ba5f
|
/prob1.py
|
807fc0a8bb9f9d250e350cde7f495af6014e156a
|
[] |
no_license
|
shihyuuuuuuu/LeetCode_practice
|
d1c4b7851abfa42fcc4b56f835444792aca3f222
|
dbc7e988ca9fd6f3a9541a36a0ad543c97b884af
|
refs/heads/master
| 2023-01-03T21:25:14.426989
| 2020-11-03T07:09:29
| 2020-11-03T07:09:29
| 254,667,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
class Solution:
# Prettier
def twoSum(self, nums: List[int], target: int) -> List[int]:
d = {}
for cnt, i in enumerate(nums):
if d.get(target - i, -1) != -1:
return [d[target-i], cnt]
else:
d[i] = cnt
# Faster, more memory
def twoSum(self, nums: List[int], target: int) -> List[int]:
num_dict = {}
for idx, i in enumerate(nums):
num_dict[i] = idx
for idx, i in enumerate(nums):
if((target - i) in num_dict and num_dict[target-i] != idx):
return [idx, num_dict[target-i]]
# Slower, less memory
def twoSum_ver2(self, nums: List[int], target: int) -> List[int]:
for idx1, i in enumerate(nums):
for idx2, j in enumerate(nums[idx1+1:]):
if(i + j == target):
return [idx1, idx1+idx2+1]
|
[
"www.elmo20816@gmail.com"
] |
www.elmo20816@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.