blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8e650b4108f33a5a304944caf20ee25f045cba5
|
8747375a4c6442a5bc317baad36ba41f5de4512e
|
/personal/migrations/0007_auto_20150226_0351.py
|
ca2beb584b49a1f858048462ec0f5e23cf67c068
|
[] |
no_license
|
raultr/perBackend
|
40f73199cb722133d79d76b4389d4f613764560b
|
f22542f79f293de444e29ac7183a0ee9c5b86889
|
refs/heads/master
| 2022-12-06T10:17:29.400434
| 2017-02-14T03:23:13
| 2017-02-14T03:23:13
| 30,055,264
| 0
| 0
| null | 2022-11-22T00:26:36
| 2015-01-30T03:57:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 888
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('personal', '0006_personal_imagen'),
]
operations = [
migrations.AlterField(
model_name='personal',
name='condiciones_alta',
field=models.CharField(default=b'', max_length=150, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='personal',
name='cuip',
field=models.CharField(max_length=30, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='personal',
name='id_seguridad_social',
field=models.CharField(max_length=20, blank=True),
preserve_default=True,
),
]
|
[
"raultr@gmail.com"
] |
raultr@gmail.com
|
24f91adc550d123a98239a57ae27ac6345f382ab
|
cd44f9f6d97e54886352353da9c45d9e6c291928
|
/newspaper/admin.py
|
d347faa1a852e2b8fbe3b1fd52357e2b88adaebb
|
[] |
no_license
|
MaksimFelchuck/Felnews
|
c480f045dc21d6f40e10d233a011fb05522f53f9
|
a3411f10230b7cecdac4a49cb7e83c03d1c89444
|
refs/heads/master
| 2023-02-17T08:13:21.413801
| 2021-01-16T14:55:03
| 2021-01-16T14:55:03
| 330,102,149
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django.contrib import admin
from newspaper.models import *
# Register your models here.
@admin.register(News, Image)
class PersonAdmin(admin.ModelAdmin):
pass
|
[
"felchuck@yandex.ru"
] |
felchuck@yandex.ru
|
fa870428c18812b9d152127aa4df6cc4092bdbff
|
e967290f67437c0afcbb4597e9ba6020761f2a45
|
/github.com/ceph/ceph-deploy/ceph_deploy/util/wrappers.py
|
4bff77b5657b6ea0ac484359fb93d3a804362451
|
[
"MIT"
] |
permissive
|
mp-rheinrich/mp-fs-sandbox
|
77bf40a27a0d6c2b38cbc7562023a92fca8751c0
|
35c38ac9d4d7ad941facfd24ab0a068630c57bdf
|
refs/heads/master
| 2020-05-31T11:13:13.474102
| 2013-08-21T12:59:11
| 2013-08-21T12:59:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
"""
In a lot of places we need to make system calls, mainly through subprocess.
Here we define them and reuse them with the added functionality of getting
logging and remote execution.
This allows us to only remote-execute the actual calls, not whole functions.
"""
from ceph_deploy.util.decorators import remote_compile
from ceph_deploy.util import context
def check_call(conn, logger, args, *a, **kw):
"""
Wraps ``subprocess.check_call`` for a remote call via ``pushy``
doing all the capturing and logging nicely upon failure/success
The mangling of the traceback when an exception ocurrs, is because the
caller gets eating up by not being executed in the actual function of
a given module (e.g. ``centos/install.py``) but rather here, where the
stack trace is no longer relevant.
:param args: The args to be passed onto ``check_call``
"""
command = ' '.join(args)
patch = kw.pop('patch', True) # Always patch unless explicitly told to
logger.info('Running command: %s' % command)
def remote_call(args, *a, **kw):
import subprocess
subprocess.check_call(
args,
*a,
**kw
)
with context.remote(conn, logger, remote_call, mangle_exc=False, patch=patch) as call:
try:
return call(args, *a, **kw)
except Exception as err:
import inspect
stack = inspect.getframeinfo(inspect.currentframe().f_back)
if hasattr(err, 'remote_traceback'):
logger.error('Traceback (most recent call last):')
logger.error(' File "%s", line %s, in %s' % (
stack[0],
stack[1],
stack[2])
)
err.remote_traceback.pop(0)
for line in err.remote_traceback:
if line:
logger.error(line)
raise RuntimeError('Failed to execute command: %s' % ' '.join(args))
else:
raise err
|
[
"roman.heinrich@gmail.com"
] |
roman.heinrich@gmail.com
|
59ab3b8c00e340f686e72893e1533b2c4bc80c26
|
14ec9fc9aee69d54701168c069df4fe46a27b811
|
/makeDigikeyBOM.py
|
67fa76a4e374de9b7f2442ed9f00517b1d4886b5
|
[] |
no_license
|
BitKnitting/MakeDigikeyBOM
|
1af6e79b9c9bb86590425ec06bbacc63fa2cbb60
|
ef12a92dec3abbd86571b40d6ea7ea72fa6e60b1
|
refs/heads/master
| 2021-01-13T00:59:15.063713
| 2017-02-20T18:30:42
| 2017-02-20T18:30:42
| 53,728,672
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
#
# The main entry point to making a digikey Bom CSV file from the output of bom2csv
# as discussed in the bitknitting blog post: https://bitknitting.wordpress.com/2016/03/05/from-kicad-to-digikey-generating-a-bom-based-on-esteem-overview/
#
import logging
logger = logging.getLogger(__name__)
from replaceJellyBeanParts import replaceJellyBeanParts
from makeDigikeyFile import makeDigikeyFile
from getParts import getParts
def makeDigikeyBOM(outputFrom_bom2csv,jellyBeanFile,outDir,numProcesses):
modifiedBOM2csvFile = replaceJellyBeanParts(outputFrom_bom2csv=outputFrom_bom2csv,jellyBeanFile=jellyBeanFile)
components_by_part_number = getParts(modifiedBOM2csvFile=modifiedBOM2csvFile)
if not makeDigikeyFile(components_by_part_number,outDir):
logger.error("Could not make the Digikey file. Check output from logger.")
|
[
"farmerrobbie@freshsalad.today"
] |
farmerrobbie@freshsalad.today
|
8ebd576a5c0651eb84ceff4de6e7daa1b0798574
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_com_adobe_granite_repository_hc_impl_authorizable_node_name_health_check_properties.py
|
3a604357918c8edc66ecdea80b46d7424d41f966
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_granite_repository_hc_impl_authorizable_node_name_health_check_properties import ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties(unittest.TestCase):
"""ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties(self):
"""Test ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_granite_repository_hc_impl_authorizable_node_name_health_check_properties.ComAdobeGraniteRepositoryHcImplAuthorizableNodeNameHealthCheckProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
1a164b7d02c2408775c790313f51483c12f60fa3
|
0124528676ee3bbaec60df5d6950b408e6da37c8
|
/Projects/QTPy/adafruit-circuitpython-bundle-7.x-mpy-20220601/examples/pct2075_high_temp_alert_example.py
|
2ec4e88196a8c47dfe88b6d038fdaac9fc2e1030
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
land-boards/lb-boards
|
8127658dc537dcfde0bb59a5018ab75c3f0087f6
|
eeb98cc2003dac1924845d949f6f5bd387376568
|
refs/heads/master
| 2023-06-07T15:44:46.110742
| 2023-06-02T22:53:24
| 2023-06-02T22:53:24
| 4,847,305
| 10
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import adafruit_pct2075
i2c = board.I2C() # uses board.SCL and board.SDA
pct = adafruit_pct2075.PCT2075(i2c)
pct.high_temperature_threshold = 35.5
pct.temperature_hysteresis = 30.0
pct.high_temp_active_high = False
print("High temp alert active high? %s" % pct.high_temp_active_high)
# Attach an LED with the Cathode to the INT pin and Anode to 3.3V with a current limiting resistor
while True:
print("Temperature: %.2f C" % pct.temperature)
time.sleep(0.5)
|
[
"doug@douglasgilliland.com"
] |
doug@douglasgilliland.com
|
893ef9476b1476a4529208a0c1475e6749d452e7
|
6d1728bf105a7d6481d0bbca2b88f4478e0632d9
|
/beautifulsoup/start/sibling.py
|
e0e1b3167becdb2101cb9e5f656394b366a1fa76
|
[] |
no_license
|
Phantomn/Python
|
00c63aceb2d4aa0db71fe5e33fe8b5159b41aadd
|
12808adf4b52c60cfe94befb6daa1e8187224beb
|
refs/heads/Python
| 2022-11-09T16:49:49.165884
| 2019-08-05T07:30:07
| 2019-08-05T07:30:07
| 44,149,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from urllib import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
bsObj = BeautifulSoup(html, "html.parser")
for sibling in bsObj.find("table", {"id":"giftList"}).tr.next_siblings:
print(sibling)
|
[
"tmdvyr123@naver.com"
] |
tmdvyr123@naver.com
|
09b3a389b1084df14d4c4a8c2f0930a95a481b25
|
44a7330dfa4fe321eb432ee57a32328578dec109
|
/milk/tests/test_pca.py
|
e543aff8ebdf2506e861097e21e31271cf4bb07d
|
[
"MIT"
] |
permissive
|
tzuryby/milk
|
7cb6760fad600e9e0d0c9216dc749db289b596fb
|
a7159b748414d4d095741978fb994c4affcf6b9b
|
refs/heads/master
| 2020-12-29T02:45:33.044864
| 2011-03-15T20:23:29
| 2011-03-15T20:25:11
| 1,485,748
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
import numpy.random
import milk.unsupervised.pca
import numpy as np
def test_pca():
numpy.random.seed(123)
X = numpy.random.rand(10,4)
X[:,1] += numpy.random.rand(10)**2*X[:,0]
X[:,1] += numpy.random.rand(10)**2*X[:,0]
X[:,2] += numpy.random.rand(10)**2*X[:,0]
Y,V = milk.unsupervised.pca(X)
Xn = milk.unsupervised.normalise.zscore(X)
assert X.shape == Y.shape
assert ((np.dot(V[:4].T,Y[:,:4].T).T-Xn)**2).sum()/(Xn**2).sum() < .3
|
[
"lpc@cmu.edu"
] |
lpc@cmu.edu
|
76d1807da6e30de90b7fc8d7ae5c3f2be4b808a3
|
c10f20abec372f81dbd6468ead208543f60940f1
|
/learning/20.BayesianNetwork/20.1.Iris_GaussianNB.py
|
8cd96f72d7add1efc07bd0c634a76cf4b1150c0f
|
[] |
no_license
|
alenzhd/meachineLearning
|
64876e7a6c0b8b39a63a9eb586d306a3489b4447
|
1b66ce2f73b226548f07e45c8537b8286635a048
|
refs/heads/master
| 2021-08-24T10:55:52.056439
| 2017-12-09T10:26:37
| 2017-12-09T10:26:37
| 112,688,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
if __name__ == "__main__":
data = pd.read_csv('..\\8.Regression\\iris.data', header=None)
x, y = data[np.arange(4)], data[4]
y = pd.Categorical(values=y).codes
feature_names = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
features = [0,1]
x = x[features]
x, x_test, y, y_test = train_test_split(x, y, train_size=0.7, random_state=0)
priors = np.array((1,2,4), dtype=float)
priors /= priors.sum()
gnb = Pipeline([
('sc', StandardScaler()),
('poly', PolynomialFeatures(degree=1)),
('clf', GaussianNB(priors=priors))]) # 由于鸢尾花数据是样本均衡的,其实不需要设置先验值
# gnb = KNeighborsClassifier(n_neighbors=3).fit(x, y.ravel())
gnb.fit(x, y.ravel())
y_hat = gnb.predict(x)
print ('训练集准确度: %.2f%%' % (100 * accuracy_score(y, y_hat)))
y_test_hat = gnb.predict(x_test)
print ('测试集准确度:%.2f%%' % (100 * accuracy_score(y_test, y_test_hat))) # 画图
N, M = 500, 500 # 横纵各采样多少个值
x1_min, x2_min = x.min()
x1_max, x2_max = x.max()
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_grid = np.stack((x1.flat, x2.flat), axis=1) # 测试点
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
cm_light = mpl.colors.ListedColormap(['#77E0A0', '#FF8080', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_grid_hat = gnb.predict(x_grid) # 预测值
y_grid_hat = y_grid_hat.reshape(x1.shape)
plt.figure(facecolor='w')
plt.pcolormesh(x1, x2, y_grid_hat, cmap=cm_light) # 预测值的显示
plt.scatter(x[features[0]], x[features[1]], c=y, edgecolors='k', s=50, cmap=cm_dark)
plt.scatter(x_test[features[0]], x_test[features[1]], c=y_test, marker='^', edgecolors='k', s=120, cmap=cm_dark)
plt.xlabel(feature_names[features[0]], fontsize=13)
plt.ylabel(feature_names[features[1]], fontsize=13)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.title(u'GaussianNB对鸢尾花数据的分类结果', fontsize=18)
plt.grid(True)
plt.show()
|
[
"zhanghd@asiainfo-mixdata.com"
] |
zhanghd@asiainfo-mixdata.com
|
c63fff8d99a3dc4b7fc547ac13a5fde5ce61b21f
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/home/pi/GrovePi/Software/Python/others/temboo/Library/Fitbit/Social/CreateInvite.py
|
ad0fa7c6517b0cc3e0e04b248b8071b8d35346b8
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902
| 2016-11-17T17:32:30
| 2016-11-17T17:32:30
| 42,438,362
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,095
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# CreateInvite
# Invites a user to become friends with authorized user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateInvite(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateInvite Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateInvite, self).__init__(temboo_session, '/Library/Fitbit/Social/CreateInvite')
def new_input_set(self):
return CreateInviteInputSet()
def _make_result_set(self, result, path):
return CreateInviteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateInviteChoreographyExecution(session, exec_id, path)
class CreateInviteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateInvite
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(CreateInviteInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(CreateInviteInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(CreateInviteInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(CreateInviteInputSet, self)._set_input('ConsumerSecret', value)
def set_InvitedUserEmail(self, value):
"""
Set the value of the InvitedUserEmail input for this Choreo. ((conditional, string) The email address of the user to invite; user can be a Fitbit member already. Required unless providing the InvitedUserID.)
"""
super(CreateInviteInputSet, self)._set_input('InvitedUserEmail', value)
def set_InvitedUserID(self, value):
"""
Set the value of the InvitedUserID input for this Choreo. ((conditional, string) The Fitbit user id of the user to send an invite to. Required unless providing the InvitedUserEmail.)
"""
super(CreateInviteInputSet, self)._set_input('InvitedUserID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(CreateInviteInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(CreateInviteInputSet, self)._set_input('UserID', value)
class CreateInviteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateInvite Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
class CreateInviteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateInviteResultSet(response, path)
|
[
"lupyuen@gmail.com"
] |
lupyuen@gmail.com
|
0b130d34300f0d54fda9186249d00d2196464eda
|
d2ada8e9dea0a59476dbbdcfdebc3b8eed951271
|
/CH02/bh_sshserver.py
|
5046e3f12011c7357d50aa4e84956dbebd0307ea
|
[] |
no_license
|
sadavoya/bhp
|
dccf211f4bd95f5eaf69e44c3bfee8f7d07af688
|
6fbf1be8ca0f83363234d9c95170bdd770716c28
|
refs/heads/master
| 2021-01-13T14:51:13.347114
| 2017-02-21T01:39:57
| 2017-02-21T01:39:57
| 76,486,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
#!/usr/bin/env python
'''SSH'''
import socket
import threading
import paramiko
import sys
# using the demo keys in the paramiko demo files
host_key = paramiko.RSAKey(filename='test_rsa.key')
#print host_key.get_base64()
class Server(paramiko.ServerInterface):
def __init__(self):
self.event = threading.Event()
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
if (username == 'joker') and (password == 'joker'):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def main():
'''Main'''
server = sys.argv[1]
ssh_port = int(sys.argv[2])
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((server, ssh_port))
sock.listen(100)
print '[+] Listening for connection...'
client, addr = sock.accept()
except Exception, e:
print '[-] Listen failed: ' + str(e)
sys.exit(1)
#print '[+] Got a connection to %s:%d!' % (addr[1], addr[2])
try:
bh_session = paramiko.Transport(client)
bh_session.add_server_key(host_key)
server = Server()
try:
bh_session.start_server(server=server)
except paramiko.SSHException, x:
print '[-] SSH negotiation failed.'
chan = bh_session.accept(20)
print '[+] Authenticated!'
print chan.recv(1024)
chan.send('Welcome to bh_ssh')
while True:
try:
command = raw_input("Enter command: ").strip('\n')
if command != 'exit':
chan.send(command)
print chan.recv(1024) + '\n'
else:
chan.send('exit')
print 'exiting'
bh_session.close()
raise Exception('exit')
except KeyboardInterrupt:
bh_session.close()
except Exception, e:
print '[-] Caught exception: ' + str(e)
try:
bh_session.close()
except:
pass
sys.exit(1)
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e89d6dc70ef70ba87520aa3295eb41f07cb4aaa9
|
2a3606551a4d850a7b4d6a4e08089c51108ef7be
|
/plugin.video.mrknow/resources/lib/crypto/keyedHash/pbkdf2.py
|
cf79523b747c13cbeb4fb110e54813a48c123a41
|
[
"Apache-2.0"
] |
permissive
|
rrosajp/filmkodi
|
a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
refs/heads/master
| 2021-09-18T06:03:17.561062
| 2018-06-22T23:28:53
| 2018-06-22T23:28:53
| 234,768,781
| 1
| 0
|
Apache-2.0
| 2021-06-03T20:33:07
| 2020-01-18T17:11:57
| null |
WINDOWS-1252
|
Python
| false
| false
| 1,571
|
py
|
# -*- coding: iso-8859-1 -*-
""" crypto.keyedHash.pbkdf2
Password Based Key Derivation Function 2
References: RFC2898, B. Kaliski, September 2000, PKCS #5
This function is used for IEEE 802.11/WPA passphrase to key hashing
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
"""
from ..keyedHash.hmacHash import HMAC_SHA1
from ..common import xor
from math import ceil
from struct import pack
def pbkdf2(password, salt, iterations, keySize, PRF=HMAC_SHA1):
""" Create key of size keySize from password and salt """
if len(password)>63:
raise 'Password too long for pbkdf2'
#if len(password)<8 : raise 'Password too short for pbkdf2'
if (keySize > 10000): # spec says >4294967295L*digestSize
raise 'keySize too long for PBKDF2'
prf = PRF(key=password) # HMAC_SHA1
numBlocks = int(ceil(1.*keySize/prf.digest_size)) # ceiling function
key = ''
for block in range(1,numBlocks+1):
# Calculate F(P, salt, iterations, i)
F = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes
U = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes
for count in range(2,iterations+1):
U = prf(U)
F = xor(F,U)
key = key + F
return key[:keySize]
def dot11PassPhraseToPSK(passPhrase,ssid):
""" The 802.11 TGi recommended pass-phrase-to-preshared-key mapping.
This function simply uses pbkdf2 with interations=4096 and keySize=32
"""
assert( 7<len(passPhrase)<64 ), 'Passphrase must be greater than 7 or less than 64 characters'
return pbkdf2(passPhrase, ssid, iterations=4096, keySize=32)
|
[
"mrknow@interia.pl"
] |
mrknow@interia.pl
|
91956ba4d19b41720a01993ac3acbd491ad295d4
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/PythonTesting-BeginnersGuide/code/tests/test_chapter5/test_pid.py
|
72b93cd1bcd67c97b5266912ef867908e2d9e800
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546
| 2021-07-16T01:38:55
| 2021-07-16T01:38:55
| 87,686,563
| 8
| 2
| null | 2023-07-11T22:49:03
| 2017-04-09T05:57:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
from unittest import TestCase, main
from mocker import Mocker
import pid
class test_pid_constructor(TestCase):
def test_without_when(self):
mocker = Mocker()
mock_time = mocker.replace('time.time')
mock_time()
mocker.result(1.0)
mocker.replay()
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12)
mocker.restore()
mocker.verify()
self.assertEqual(controller.gains, (0.5, 0.5, 0.5))
self.assertAlmostEqual(controller.setpoint[0], 0.0)
self.assertEqual(len(controller.setpoint), 1)
self.assertAlmostEqual(controller.previous_time, 1.0)
self.assertAlmostEqual(controller.previous_error, -12.0)
self.assertAlmostEqual(controller.integrated_error, 0)
def test_with_when(self):
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 1, initial = 12,
when = 43)
self.assertEqual(controller.gains, (0.5, 0.5, 0.5))
self.assertAlmostEqual(controller.setpoint[0], 1.0)
self.assertEqual(len(controller.setpoint), 1)
self.assertAlmostEqual(controller.previous_time, 43.0)
self.assertAlmostEqual(controller.previous_error, -11.0)
self.assertAlmostEqual(controller.integrated_error, 0)
class test_calculate_response(TestCase):
def test_without_when(self):
mocker = Mocker()
mock_time = mocker.replace('time.time')
mock_time()
mocker.result(1.0)
mock_time()
mocker.result(2.0)
mock_time()
mocker.result(3.0)
mock_time()
mocker.result(4.0)
mock_time()
mocker.result(5.0)
mocker.replay()
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12)
self.assertEqual(controller.calculate_response(6), -3)
self.assertEqual(controller.calculate_response(3), -4.5)
self.assertEqual(controller.calculate_response(-1.5), -0.75)
self.assertEqual(controller.calculate_response(-2.25), -1.125)
mocker.restore()
mocker.verify()
def test_with_when(self):
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12,
when = 1)
self.assertEqual(controller.calculate_response(6, 2), -3)
self.assertEqual(controller.calculate_response(3, 3), -4.5)
self.assertEqual(controller.calculate_response(-1.5, 4), -0.75)
self.assertEqual(controller.calculate_response(-2.25, 5), -1.125)
if __name__ == '__main__':
main()
|
[
"GreenJedi@protonmail.com"
] |
GreenJedi@protonmail.com
|
0e00d7b9dfd12f62cf14341e65cd37786e0b1482
|
f687b45b061a0a4ed849d5d56e265a3423c95f56
|
/mime_gen_both.py
|
9f8121e8b8ff5edba0344a98ab758923591037af
|
[] |
no_license
|
wwwlwscom/python
|
45e52529fffccf161a0cff8aaf2d19a149ac2056
|
5478329f068f9a4eff5c07eee8005318b41b6440
|
refs/heads/master
| 2021-01-20T10:06:17.251976
| 2015-10-20T20:03:34
| 2015-10-20T20:03:34
| 41,769,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
#!/usr/bin/env python
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Utils, Encoders
import mimetypes, sys
def genpart(data, contenttype):
maintype, subtype = contenttype.split('/')
if maintype == 'text':
retval = MIMEText(data, _subtype=subtype)
else:
retval = MIMEBase(maintype, subtype)
retval.set_payload(data)
Encoders.encode_base64(retval)
return retval
def attachment(filename):
fd = open(filename, 'rb')
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding or (mimetype is None):
mimetype = 'application/octet-stream'
retval = genpart(fd.read(), mimetype)
retval.add_header('Content-Disposition', 'attachment', filename = filename)
fd.close()
return retval
message = """Hello,
This is a test message from Rock. I hope you enjoy it!
--Anonymous"""
messagehtml = """Hello,<P>
This is a <B>great</B>test message from Rock. I hope you enjoy it!<P>
--<I>Anonymous<I>"""
msg = MIMEMultipart()
msg['To'] = 'recipient@example.com'
msg['From'] = 'Test Sender <sender@example.com>'
msg['Subject'] = 'Test Message, Rock'
msg['Date'] = Utils.formatdate(localtime = 1)
msg['Message-ID'] = Utils.make_msgid()
body = MIMEMultipart('alternative')
body.attach(genpart(message, 'text/plain'))
body.attach(genpart(messagehtml, 'text/html'))
msg.attach(body)
for filename in sys.argv[1:]:
msg.attach(attachment(filename))
print msg.as_string()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
7da317e87cb08431320105068322690d71269402
|
a1092fecf5057e45f1df4e738a14be210dadbc83
|
/gen.py
|
3d26eb5062cedb3108e425576485a5c6bc7d741c
|
[] |
no_license
|
robert-giaquinto/baum-welch
|
ba45b3c80e839ae7fd5b8b5a00ee07dd9228b61a
|
b57fb2bd64ed3fdfed1552a6ea5afd9c7c120cfc
|
refs/heads/master
| 2021-01-15T09:09:29.267399
| 2014-05-31T21:17:42
| 2014-05-31T21:17:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import random
import numpy as np
N_SEQ = 10
START = 0
BEFORE = 1
AFTER = 2
END = 3
def gen_seq():
seq = []
state = START
while state != END:
if state == START:
state = BEFORE
seq.append('S')
if state == BEFORE:
n, l, r = np.random.multinomial(1, [0.96, 0.036, 0.004])
if n:
seq.append('N')
elif l:
seq.append('L')
else:
seq.append('R')
state += np.random.binomial(1, 1/5000.)
if state == AFTER:
n, l, r = np.random.multinomial(1, [0.96, 0.004, 0.036])
if n:
seq.append('N')
elif l:
seq.append('L')
else:
seq.append('R')
state += np.random.binomial(1, 1/5000.)
seq.append('E')
return seq
if __name__ == '__main__':
random.seed(42)
for i in xrange(N_SEQ):
seq = gen_seq()
print ''.join(seq)
|
[
"piotrek.kaleta@gmail.com"
] |
piotrek.kaleta@gmail.com
|
6ee7e72ba92ecde352fbe7130382ee1d2873e524
|
d5f080543d3004f560c1ae636900080f1c7e8b31
|
/configs/D2Det/D2Det_detection_r101_fpn_2x.py
|
4e184d8220699043f302581714e52140c0c3b0ba
|
[
"MIT"
] |
permissive
|
Randl/D2Det
|
dc7bd395b8c538e96f390d7ce5c396f87ee89bd8
|
5e35b218d9de824e73e0a49953af25a0c6984e74
|
refs/heads/master
| 2022-09-25T13:52:21.141590
| 2020-06-11T09:08:47
| 2020-06-11T09:08:47
| 271,498,684
| 0
| 0
|
MIT
| 2020-06-11T08:56:15
| 2020-06-11T08:56:15
| null |
UTF-8
|
Python
| false
| false
| 5,685
|
py
|
# model settings
model = dict(
type='D2Det',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='DeformRoIPoolingPack',
out_size=7,
sample_per_part=1,
out_channels=256,
no_trans=False,
group_size=1,
trans_std=0.1),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
with_reg=False,
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0)),
reg_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
D2Det_head=dict(
type='D2DetHead',
num_convs=8,
in_channels=256,
norm_cfg=dict(type='GN', num_groups=36),
MASK_ON=False))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_radius=1,
pos_weight=-1,
max_num_grid=192,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.03, nms=dict(type='nms', iou_thr=0.5), max_per_img=125))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 80,
step=[20, 23])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/D2Det_detection_r101_fpn_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"connor@tju.edu.cn"
] |
connor@tju.edu.cn
|
7d812592e10d2a0d003e3156aef68f26c0796648
|
601adbb343313e7cce71b9b8d06620f541f349e5
|
/tests/test_ci/test_runners/test_BaseRunner.py
|
4545078bf38683e3c939099329a8ad2f0d27d15f
|
[] |
no_license
|
jgsogo/conan-sword-and-sorcery
|
f3ff2c9b739410a7fb6eb97c49470d585fd1ab4c
|
143f05d8b469a3afc9c807ec87fbe2dcbe63dab3
|
refs/heads/master
| 2021-04-06T06:23:40.584031
| 2018-08-15T16:50:43
| 2018-08-15T16:50:43
| 124,441,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,120
|
py
|
# -*- coding: utf-8 -*-
import os
import unittest
try:
from unittest import mock
except ImportError:
import mock
from conan_sword_and_sorcery.ci.runners import AppveyorRunner
from conan_sword_and_sorcery.ci.runners.base_runner import SUCCESS, FAIL, DRY_RUN, BaseRunner
from conan_sword_and_sorcery.parsers.settings import get_settings
from conan_sword_and_sorcery.utils.environ import context_env
from conan_sword_and_sorcery.parsers.profile import profile_for
from tests.utils import TestCaseEnvClean
class JobGeneratorClass4Testing:
def __init__(self, *args, **kwargs):
pass
class BaseRunner4Testing(BaseRunner):
job_generator_class = JobGeneratorClass4Testing
class TestBaseRunnerStableBranch(TestCaseEnvClean):
def setUp(self):
self.settings = get_settings()
# Dummy (but valid) conanfile
me = os.path.dirname(__file__)
self.conanfile = os.path.join(me, '..', '..', 'files', 'single', 'conanfile01.py')
def test_enumerate_jobs(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_VISUAL_VERSIONS="12", CONAN_VISUAL_RUNTIMES="MT"):
self.assertTrue(len(list(runner.enumerate_jobs())) != 0)
def test_is_pull_request(self):
runner = BaseRunner4Testing(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with self.assertRaises(NotImplementedError):
runner.is_pull_request()
def test_get_branch_name(self):
runner = BaseRunner4Testing(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with self.assertRaises(NotImplementedError):
runner.get_branch_name()
def test_dry_run(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows", dry_run=True)
with context_env(CONAN_GCC_VERSIONS="6", CONAN_ARCHS='x86', CONAN_BUILD_PACKAGES='pckg1'):
compiler, options = list(runner.enumerate_jobs())[0]
with profile_for(compiler=compiler) as profile_file:
runner.set_compiler(compiler)
runner.set_profile(profile_file)
r = runner.run(options={'shared': True}, username='test', channel='testing')
self.assertEqual(r, DRY_RUN)
def test_run_fail(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_GCC_VERSIONS="6", CONAN_ARCHS='x86', CONAN_BUILD_PACKAGES='pckg1'):
compiler, options = list(runner.enumerate_jobs())[0]
with profile_for(compiler=compiler) as profile_file:
runner.set_compiler(compiler)
runner.set_profile(profile_file)
with mock.patch('conan_sword_and_sorcery.ci.runners.base_runner.cmd', return_value=1) as mocked_cmd:
r = runner.run(options={'shared': True}, username='test', channel='testing')
self.assertEqual(r, FAIL)
def test_run_success(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_GCC_VERSIONS="6", CONAN_ARCHS='x86', CONAN_BUILD_PACKAGES='pckg1'):
compiler, options = list(runner.enumerate_jobs())[0]
with profile_for(compiler=compiler) as profile_file:
runner.set_compiler(compiler)
runner.set_profile(profile_file)
with mock.patch('conan_sword_and_sorcery.ci.runners.base_runner.cmd', return_value=0) as mocked_cmd:
r = runner.run(options={'shared': True}, username='test', channel='testing')
self.assertEqual(r, SUCCESS)
args, kwargs = mocked_cmd.call_args
self.assertEqual(len(args), 0) # All arguments are passed with name
self.assertEqual(kwargs['exception'], None)
command = kwargs.get('command')
self.assertIn('--build=pckg1', command)
self.assertIn('--build=outdated', command)
self.assertIn('--build={}'.format(runner.recipe.name), command)
self.assertIn('--profile {}'.format(profile_file), command)
self.assertIn('-o {}:shared=True'.format(runner.recipe.name), command)
def test_is_upload_requested(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="True", APPVEYOR_REPO_BRANCH='non-stable-branch'):
self.assertFalse(runner.is_stable_branch())
self.assertFalse(runner.is_upload_requested())
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="False", APPVEYOR_REPO_BRANCH='non-stable-branch'):
self.assertFalse(runner.is_stable_branch())
self.assertTrue(runner.is_upload_requested())
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="False", APPVEYOR_REPO_BRANCH='stable/v1.2.3'):
self.assertTrue(runner.is_stable_branch())
self.assertTrue(runner.is_upload_requested())
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="True", APPVEYOR_REPO_BRANCH='stable/v1.2.3'):
self.assertTrue(runner.is_stable_branch())
self.assertTrue(runner.is_upload_requested())
def test_upload(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with mock.patch('conan_sword_and_sorcery.ci.runners.base_runner.upload', return_value=0) as mocked_upload:
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="True", APPVEYOR_REPO_BRANCH='non-stable-branch'):
runner.upload(username='test', channel='testing')
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="False", APPVEYOR_REPO_BRANCH='non-stable-branch'):
runner.upload(username='test', channel='testing')
args, kwargs = mocked_upload.call_args
self.assertEqual(kwargs['username'], 'test')
|
[
"jgsogo@gmail.com"
] |
jgsogo@gmail.com
|
602ecb7bb83ddd5c367c45eeaec4531e135d6824
|
f87dc2227f9539ce9f87b8eb417d28f487ea2eac
|
/이진탐색/부품찾기.py
|
b3627efacbce4f210bf7ebc9dc2784e06dd4977a
|
[] |
no_license
|
jjangsungwon/python-for-coding-test
|
fb1e019a2e68e426bb4f6770bffdc6289a647b4a
|
8d9bf8de5de2a9724f75b35ea04dd9bcc40dec86
|
refs/heads/master
| 2022-12-16T02:53:55.967070
| 2020-08-26T08:41:14
| 2020-08-26T08:41:14
| 285,842,867
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
def binary_search(target, start, end):
if start > end:
return None
while start <= end:
mid = (start + end) // 2
if array[mid] == target: # 일치
return "yes"
elif array[mid] > target: # 중간값이 찾고자 하는 값보다 클 때
end = mid - 1
else:
start = mid + 1
return None # 일치하는 값이 없을 때
if __name__ == "__main__":
# 입력
N = int(input())
array = list(map(int, input().split()))
M = int(input())
find = list(map(int, input().split()))
# 이진 탐색을 하기 위해서 정렬
array.sort()
# find에서 값을 하나씩 읽는다.
for data in find:
# 이진 탐색
result = binary_search(data, 0, N - 1)
if result is not None:
print('yes', end=" ")
else:
print('no', end=" ")
|
[
"dnjs2113@gmail.com"
] |
dnjs2113@gmail.com
|
d8ca730c49e849faef22bb61d6e7c1ea1853c890
|
694d57c3e512ce916269411b51adef23532420cd
|
/python/chapter-1/lab4-exec1.2.py
|
00e1dd5356363c18fb8e1045f63f53286f0a515a
|
[] |
no_license
|
clovery410/mycode
|
5541c3a99962d7949832a0859f18819f118edfba
|
e12025e754547d18d5bb50a9dbe5e725fd03fd9c
|
refs/heads/master
| 2021-05-16T02:46:47.996748
| 2017-05-10T23:43:50
| 2017-05-10T23:43:50
| 39,235,141
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
def gcb_recur(a, b):
smaller_para = min(a, b)
larger_para = max(a, b)
remainder = larger_para % smaller_para
if smaller_para % remainder == 0:
return remainder
return gcb_recur(smaller_para, remainder)
print(gcb_recur(50, 35))
def gcb_itera(a, b):
smaller_para = min(a, b)
larger_para = max(a, b)
remainder = larger_para % smaller_para
while not smaller_para % remainder == 0:
smaller_para, remainder = remainder, smaller_para % remainder
return remainder
print(gcb_itera(50, 35))
|
[
"admin@admins-MacBook-Air.local"
] |
admin@admins-MacBook-Air.local
|
3b98e43e2f3dc2377b74432e9fe99c572da37f2a
|
4904acd900496b4883c2f5b4aa6b45d1ef6654c0
|
/graphgallery/gallery/nodeclas/tensorflow/__init__.py
|
1cf21d123e086ed846bcb034e8d4271c9735498d
|
[
"MIT"
] |
permissive
|
blindSpoter01/GraphGallery
|
aee039edd759be9272d123463b0ad73a57e561c7
|
e41caeb32a07da95364f15b85cad527a67763255
|
refs/heads/master
| 2023-06-17T11:42:27.169751
| 2021-07-15T03:07:39
| 2021-07-15T03:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
from .gcn import GCN
from .gat import GAT
from .clustergcn import ClusterGCN
from .sgc import SGC
from .gwnn import GWNN
from .robustgcn import RobustGCN
from .graphsage import GraphSAGE
from .fastgcn import FastGCN
from .chebynet import ChebyNet
from .densegcn import DenseGCN
from .lgcn import LGCN
from .BVAT.obvat import OBVAT
from .BVAT.sbvat import SBVAT
from .gmnn import GMNN
from .dagnn import DAGNN
from .mlp import MLP
from .tagcn import TAGCN
from .appnp import APPNP, PPNP
from .ssgc import SSGC
from .agnn import AGNN
from .arma import ARMA
# experimental model
from .experimental.edgeconv import EdgeGCN
from .experimental.s_obvat import SimplifiedOBVAT
from .experimental.gcn_mix import GCN_MIX
from .experimental.gcna import GCNA
from .experimental.sat import SAT
|
[
"cnljt@outlook.com"
] |
cnljt@outlook.com
|
7d6e7442b32fe58141787e6063cf7b0ae35a74b7
|
d49fbd7874b70a93cbc551afed1b87e3e47617a8
|
/django/example/repositories/__init__.py
|
1efb28043ae95783f5bde83b3415bcedaf028594
|
[] |
no_license
|
gitter-badger/tutorials-4
|
bbdbb673e978118f9fec3212baa13f6f99226be0
|
3ce1cdb7c6d26f6df4d6bb94e82f83e8cab9389b
|
refs/heads/master
| 2020-04-04T20:52:28.181616
| 2018-10-28T22:05:17
| 2018-10-28T22:05:17
| 156,264,177
| 0
| 0
| null | 2018-11-05T18:32:17
| 2018-11-05T18:32:16
| null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from .category import load_categories, load_category # noqa
from .entry import load_entries # noqa
from .notification import create_notification, load_notifications # noqa
from .price import ( # noqa
cheapest_price_by_category,
load_price,
prices_for_category,
)
from .profile import ( # noqa
add_balance,
create_profile,
del_balance,
load_profile,
save_profile,
)
from .subscription import create_subscription, load_subscription # noqa
from .user import create_user, save_password # noqa
|
[
"proofit404@gmail.com"
] |
proofit404@gmail.com
|
ff3e75465d6bc74082977d0011083bd7cb9d2fa1
|
8dc745854d73e362aa60747b3ab1b5a0dd975902
|
/demo/funs/varying_args.py
|
95be35fceef5366dfe3c457b3dac4f7b9e356ad3
|
[] |
no_license
|
srikanthpragada/PYTHON_27_AUG_2020
|
08a5898fe1a0ae110b74897ce6cce6595bdfce45
|
af2aebbb0d83c5e8f381cdda844ab66d2362019c
|
refs/heads/master
| 2022-12-30T10:12:56.688671
| 2020-10-09T14:20:43
| 2020-10-09T14:20:43
| 291,730,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
def wish(*names, message="Hi"):
for n in names:
print(message, n)
wish("Bill", "Steve", message="Hello")
wish("Bill", "Steve", "Mike")
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
560ff9f3f493317e04240dcf5f75f3fb3c0b41e7
|
500bca3e22bd0c30c79b74918e9847742b3c428e
|
/sdk/python/endpoints/online/mlflow/sklearn-diabetes/src/score.py
|
4e2c269f5cb447804f693d12932e283e9219e83f
|
[
"MIT"
] |
permissive
|
Azure/azureml-examples
|
2304c862fd2e36e6640ecc4d09f69c5ed93b48ab
|
e5f7b247d4753f115a8f7da30cbe25294f71f9d7
|
refs/heads/main
| 2023-08-31T00:10:14.107509
| 2023-08-30T17:29:22
| 2023-08-30T17:29:22
| 289,334,021
| 1,219
| 1,074
|
MIT
| 2023-09-14T16:00:55
| 2020-08-21T18:04:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 979
|
py
|
import logging
import os
import json
import mlflow
from io import StringIO
from mlflow.pyfunc.scoring_server import infer_and_parse_json_input, predictions_to_json
def init():
global model
global input_schema
# "model" is the path of the mlflow artifacts when the model was registered. For automl
# models, this is generally "mlflow-model".
model_path = os.path.join(os.getenv("AZUREML_MODEL_DIR"), "model")
model = mlflow.pyfunc.load_model(model_path)
input_schema = model.metadata.get_input_schema()
def run(raw_data):
json_data = json.loads(raw_data)
if "input_data" not in json_data.keys():
raise Exception("Request must contain a top level key named 'input_data'")
serving_input = json.dumps(json_data["input_data"])
data = infer_and_parse_json_input(serving_input, input_schema)
predictions = model.predict(data)
result = StringIO()
predictions_to_json(predictions, result)
return result.getvalue()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
0dec940c8d9ee73e47f55d49a771aebb21beec6d
|
55d560fe6678a3edc9232ef14de8fafd7b7ece12
|
/tools/build/test/rescan_header.py
|
36a007eb406fa403704cb5091d42f2606d7901ce
|
[
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
stardog-union/boost
|
ec3abeeef1b45389228df031bf25b470d3d123c5
|
caa4a540db892caa92e5346e0094c63dea51cbfb
|
refs/heads/stardog/develop
| 2021-06-25T02:15:10.697006
| 2020-11-17T19:50:35
| 2020-11-17T19:50:35
| 148,681,713
| 0
| 0
|
BSL-1.0
| 2020-11-17T19:50:36
| 2018-09-13T18:38:54
|
C++
|
UTF-8
|
Python
| false
| false
| 5,653
|
py
|
#!/usr/bin/python
# Copyright 2012 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# Test a header loop that depends on (but does not contain) a generated header.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.h", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#include "header3.h"
#endif
""")
t.write("header3.in", "/* empty file */\n")
t.write("jamroot.jam", """\
import common ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp : <implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2"])
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test a linear sequence of generated headers.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.in", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.in", "/* empty file */\n")
t.write("jamroot.jam", """\
import common ;
make header1.h : header1.in : @common.copy ;
make header2.h : header2.in : @common.copy ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp :
<implicit-dependency>header1.h
<implicit-dependency>header2.h
<implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header1.h")
t.expect_addition("bin/header2.h")
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test a loop in generated headers.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.in", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#endif
""")
t.write("jamroot.jam", """\
import common ;
actions copy {
sleep 1
cp $(>) $(<)
}
make header1.h : header1.in : @common.copy ;
make header2.h : header2.in : @common.copy ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp :
<implicit-dependency>header1.h
<implicit-dependency>header2.h
<implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header1.h")
t.expect_addition("bin/header2.h")
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test that all the dependencies of a loop are updated before any of the
# dependents.
t.write("test1.cpp", '#include "header1.h"\n')
t.write("test2.cpp", """\
#include "header2.h"
int main() {}
""")
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.h", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#include "header3.h"
#endif
""")
t.write("header3.in", "\n")
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("jamroot.jam", """\
import common ;
import os ;
if [ os.name ] = NT
{
SLEEP = call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
rule copy { common.copy $(<) : $(>) ; }
actions copy { $(SLEEP) 1 }
make header3.h : header3.in : @copy ;
exe test : test2.cpp test1.cpp : <implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test1.obj")
t.expect_addition("bin/$toolset/debug*/test2.obj")
t.expect_addition("bin/$toolset/debug*/test.exe")
t.expect_nothing_more()
t.touch("header3.in")
t.run_build_system(["-j2", "test"])
t.expect_touch("bin/header3.h")
t.expect_touch("bin/$toolset/debug*/test1.obj")
t.expect_touch("bin/$toolset/debug*/test2.obj")
t.expect_touch("bin/$toolset/debug*/test.exe")
t.expect_nothing_more()
t.rm(".")
# Test a loop that includes a generated header
t.write("test1.cpp", '#include "header1.h"\n')
t.write("test2.cpp", """\
#include "header2.h"
int main() {}
""")
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.h", """\
#ifndef HEADER3_H
#define HEADER3_H
#include "header1.h"
#endif
""")
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("jamroot.jam", """\
import common ;
import os ;
if [ os.name ] = NT
{
SLEEP = call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
rule copy { common.copy $(<) : $(>) ; }
actions copy { $(SLEEP) 1 }
make header2.h : header2.in : @copy ;
exe test : test2.cpp test1.cpp : <implicit-dependency>header2.h <include>. ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header2.h")
t.expect_addition("bin/$toolset/debug*/test1.obj")
t.expect_addition("bin/$toolset/debug*/test2.obj")
t.expect_addition("bin/$toolset/debug*/test.exe")
t.expect_nothing_more()
t.cleanup()
|
[
"james.pack@stardog.com"
] |
james.pack@stardog.com
|
87329ac75e0a03161d9c4ec7e50671e1a8c5b0d0
|
22299195d67f887d8de9f8764e8a85680cd3416c
|
/class7 (Color Filtering - OpenCV with Python for Image and Video Analysis 7)/main.py
|
e4430a318df1dc716db227d2a786414f7b6eb3ff
|
[] |
no_license
|
EnggQasim/PythonOpenCV
|
71268cb9bfa603b9aec1e239756f515f9693f74c
|
2f1cd61df0fd520dbdc0e41a52ebfc4da410c771
|
refs/heads/master
| 2021-01-01T15:29:14.768477
| 2017-07-18T18:11:19
| 2017-07-18T18:11:19
| 97,629,494
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#hsv hue sat value
lower_red = np.array([150,150,50])
upper_red = np.array([180, 255, 150])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('Frame', frame)
cv2.imshow('Mask', mask)
cv2.imshow('Result', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cv2.release()
|
[
"m.qasim077@gmail.com"
] |
m.qasim077@gmail.com
|
51fe296f9a06966e6e243a907c4209236b1137e9
|
0c66e605e6e4129b09ea14dbb6aa353d18aaa027
|
/diventi/ebooks/migrations/0007_auto_20190429_1732.py
|
3b567492131adb79f3e21d1f851220e0b4b14f01
|
[
"Apache-2.0"
] |
permissive
|
flavoi/diventi
|
58fbc8c947f387cbcc1ce607878a59a6f2b72313
|
c0b1efe2baa3ff816d6ee9a8e86623f297973ded
|
refs/heads/master
| 2023-07-20T09:32:35.897661
| 2023-07-11T19:44:26
| 2023-07-11T19:44:26
| 102,959,477
| 2
| 1
|
Apache-2.0
| 2023-02-08T01:03:17
| 2017-09-09T14:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
# Generated by Django 2.1.7 on 2019-04-29 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0006_auto_20190429_1727'),
]
operations = [
migrations.AddField(
model_name='chapter',
name='description_en',
field=models.TextField(null=True, verbose_name='description'),
),
migrations.AddField(
model_name='chapter',
name='description_it',
field=models.TextField(null=True, verbose_name='description'),
),
migrations.AddField(
model_name='chapter',
name='slug_en',
field=models.SlugField(null=True, unique=True, verbose_name='slug'),
),
migrations.AddField(
model_name='chapter',
name='slug_it',
field=models.SlugField(null=True, unique=True, verbose_name='slug'),
),
migrations.AddField(
model_name='chapter',
name='title_en',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='chapter',
name='title_it',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
]
|
[
"flavius476@gmail.com"
] |
flavius476@gmail.com
|
a4c78496e3e6c0ca7c8343f03b0e455be84de413
|
585fcfd09bcc37ad73c6f301cb8b16261a93df7e
|
/projects/pyDOE-master/pyDOE/build_regression_matrix.py
|
5ea2c2f53342a023823a115a04a403407c9ccc3d
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
louisXW/Surrogate-Model
|
e9e8de3ab892eed2f8ed424e09b770e67126c1f3
|
65ec8a89c1b7a19d4c04c62e2c988340c96c69f8
|
refs/heads/master
| 2021-07-21T09:37:41.045898
| 2017-10-30T11:49:35
| 2017-10-30T11:49:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
"""
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
import numpy as np
def grep(haystack, needle):
start = 0
while True:
start = haystack.find(needle, start)
if start == -1:
return
yield start
start += len(needle)
def build_regression_matrix(H, model, build=None):
"""
Build a regression matrix using a DOE matrix and a list of monomials.
Parameters
----------
H : 2d-array
model : str
build : bool-array
Returns
-------
R : 2d-array
"""
ListOfTokens = model.split(' ')
if H.shape[1] == 1:
size_index = len(str(H.shape[0]))
else:
size_index = len(str(H.shape[1]))
if build is None:
build = [True] * len(ListOfTokens)
# Test if the vector has the wrong direction (lines instead of columns)
if H.shape[0] == 1:
H = H.T
# Collect the list of monomials
Monom_Index = []
for i in range(len(ListOfTokens)):
if build[i]:
Monom_Index += [grep(ListOfTokens, 'x' + str(0) * (size_index - \
len(str(i))) + str(i))]
Monom_Index = -np.sort(-Monom_Index)
Monom_Index = np.unique(Monom_Index)
if H.shape[1] == 1:
nb_var = H.shape[0] # vector "mode": the number of vars is equal to the number of lines of H
VectorMode = True
for i in range(nb_var):
for j in range(ListOfTokens.shape[0]):
ListOfTokens[j] = ListOfTokens[j].replace(
'x' + str(0) * (size_index - len(str(i))) + str(i),
'H(' + str(i) + ')')
else:
nb_var = H.shape[0] # matrix "mode": the number of vars is equal to the number of columns of H
VectorMode = False
for i in range(nb_var):
for j in range(ListOfTokens.shape[0]):
ListOfTokens[j] = ListOfTokens[j].replace(
'x' + str(0) * (size_index - len(str(i))) + str(i),
'H[i,' + str(i) + ')')
# Now build the regression matrix
if VectorMode:
R = np.zeros((len(ListOfTokens), 1))
for j in range(len(ListOfTokens)):
R[j, 0] = eval(ListOfTokens[j])
else:
R = np.zeros((H.shape[0], len(ListOfTokens)))
for i in range(H.shape[0]):
for j in range(len(ListOfTokens)):
R[i, j] = eval(ListOfTokens[j])
return R
|
[
"quanpan302@hotmail.com"
] |
quanpan302@hotmail.com
|
21258aa7598c5f930fe4eaed3af4d0a499b648d9
|
98efe1aee73bd9fbec640132e6fb2e54ff444904
|
/loldib/getratings/models/NA/na_ornn/na_ornn_top.py
|
2ca0d07a4824d85d8de49a6105daf5c1b67f4de7
|
[
"Apache-2.0"
] |
permissive
|
koliupy/loldib
|
be4a1702c26546d6ae1b4a14943a416f73171718
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
refs/heads/master
| 2021-07-04T03:34:43.615423
| 2017-09-21T15:44:10
| 2017-09-21T15:44:10
| 104,359,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,269
|
py
|
from getratings.models.ratings import Ratings
class NA_Ornn_Top_Aatrox(Ratings):
pass
class NA_Ornn_Top_Ahri(Ratings):
pass
class NA_Ornn_Top_Akali(Ratings):
pass
class NA_Ornn_Top_Alistar(Ratings):
pass
class NA_Ornn_Top_Amumu(Ratings):
pass
class NA_Ornn_Top_Anivia(Ratings):
pass
class NA_Ornn_Top_Annie(Ratings):
pass
class NA_Ornn_Top_Ashe(Ratings):
pass
class NA_Ornn_Top_AurelionSol(Ratings):
pass
class NA_Ornn_Top_Azir(Ratings):
pass
class NA_Ornn_Top_Bard(Ratings):
pass
class NA_Ornn_Top_Blitzcrank(Ratings):
pass
class NA_Ornn_Top_Brand(Ratings):
pass
class NA_Ornn_Top_Braum(Ratings):
pass
class NA_Ornn_Top_Caitlyn(Ratings):
pass
class NA_Ornn_Top_Camille(Ratings):
pass
class NA_Ornn_Top_Cassiopeia(Ratings):
pass
class NA_Ornn_Top_Chogath(Ratings):
pass
class NA_Ornn_Top_Corki(Ratings):
pass
class NA_Ornn_Top_Darius(Ratings):
pass
class NA_Ornn_Top_Diana(Ratings):
pass
class NA_Ornn_Top_Draven(Ratings):
pass
class NA_Ornn_Top_DrMundo(Ratings):
pass
class NA_Ornn_Top_Ekko(Ratings):
pass
class NA_Ornn_Top_Elise(Ratings):
pass
class NA_Ornn_Top_Evelynn(Ratings):
pass
class NA_Ornn_Top_Ezreal(Ratings):
pass
class NA_Ornn_Top_Fiddlesticks(Ratings):
pass
class NA_Ornn_Top_Fiora(Ratings):
pass
class NA_Ornn_Top_Fizz(Ratings):
pass
class NA_Ornn_Top_Galio(Ratings):
pass
class NA_Ornn_Top_Gangplank(Ratings):
pass
class NA_Ornn_Top_Garen(Ratings):
pass
class NA_Ornn_Top_Gnar(Ratings):
pass
class NA_Ornn_Top_Gragas(Ratings):
pass
class NA_Ornn_Top_Graves(Ratings):
pass
class NA_Ornn_Top_Hecarim(Ratings):
pass
class NA_Ornn_Top_Heimerdinger(Ratings):
pass
class NA_Ornn_Top_Illaoi(Ratings):
pass
class NA_Ornn_Top_Irelia(Ratings):
pass
class NA_Ornn_Top_Ivern(Ratings):
pass
class NA_Ornn_Top_Janna(Ratings):
pass
class NA_Ornn_Top_JarvanIV(Ratings):
pass
class NA_Ornn_Top_Jax(Ratings):
pass
class NA_Ornn_Top_Jayce(Ratings):
pass
class NA_Ornn_Top_Jhin(Ratings):
pass
class NA_Ornn_Top_Jinx(Ratings):
pass
class NA_Ornn_Top_Kalista(Ratings):
pass
class NA_Ornn_Top_Karma(Ratings):
pass
class NA_Ornn_Top_Karthus(Ratings):
pass
class NA_Ornn_Top_Kassadin(Ratings):
pass
class NA_Ornn_Top_Katarina(Ratings):
pass
class NA_Ornn_Top_Kayle(Ratings):
pass
class NA_Ornn_Top_Kayn(Ratings):
pass
class NA_Ornn_Top_Kennen(Ratings):
pass
class NA_Ornn_Top_Khazix(Ratings):
pass
class NA_Ornn_Top_Kindred(Ratings):
pass
class NA_Ornn_Top_Kled(Ratings):
pass
class NA_Ornn_Top_KogMaw(Ratings):
pass
class NA_Ornn_Top_Leblanc(Ratings):
pass
class NA_Ornn_Top_LeeSin(Ratings):
pass
class NA_Ornn_Top_Leona(Ratings):
pass
class NA_Ornn_Top_Lissandra(Ratings):
pass
class NA_Ornn_Top_Lucian(Ratings):
pass
class NA_Ornn_Top_Lulu(Ratings):
pass
class NA_Ornn_Top_Lux(Ratings):
pass
class NA_Ornn_Top_Malphite(Ratings):
pass
class NA_Ornn_Top_Malzahar(Ratings):
pass
class NA_Ornn_Top_Maokai(Ratings):
pass
class NA_Ornn_Top_MasterYi(Ratings):
pass
class NA_Ornn_Top_MissFortune(Ratings):
pass
class NA_Ornn_Top_MonkeyKing(Ratings):
pass
class NA_Ornn_Top_Mordekaiser(Ratings):
pass
class NA_Ornn_Top_Morgana(Ratings):
pass
class NA_Ornn_Top_Nami(Ratings):
pass
class NA_Ornn_Top_Nasus(Ratings):
pass
class NA_Ornn_Top_Nautilus(Ratings):
pass
class NA_Ornn_Top_Nidalee(Ratings):
pass
class NA_Ornn_Top_Nocturne(Ratings):
pass
class NA_Ornn_Top_Nunu(Ratings):
pass
class NA_Ornn_Top_Olaf(Ratings):
pass
class NA_Ornn_Top_Orianna(Ratings):
pass
class NA_Ornn_Top_Ornn(Ratings):
pass
class NA_Ornn_Top_Pantheon(Ratings):
pass
class NA_Ornn_Top_Poppy(Ratings):
pass
class NA_Ornn_Top_Quinn(Ratings):
pass
class NA_Ornn_Top_Rakan(Ratings):
pass
class NA_Ornn_Top_Rammus(Ratings):
pass
class NA_Ornn_Top_RekSai(Ratings):
pass
class NA_Ornn_Top_Renekton(Ratings):
pass
class NA_Ornn_Top_Rengar(Ratings):
pass
class NA_Ornn_Top_Riven(Ratings):
pass
class NA_Ornn_Top_Rumble(Ratings):
pass
class NA_Ornn_Top_Ryze(Ratings):
pass
class NA_Ornn_Top_Sejuani(Ratings):
pass
class NA_Ornn_Top_Shaco(Ratings):
pass
class NA_Ornn_Top_Shen(Ratings):
pass
class NA_Ornn_Top_Shyvana(Ratings):
pass
class NA_Ornn_Top_Singed(Ratings):
pass
class NA_Ornn_Top_Sion(Ratings):
pass
class NA_Ornn_Top_Sivir(Ratings):
pass
class NA_Ornn_Top_Skarner(Ratings):
pass
class NA_Ornn_Top_Sona(Ratings):
pass
class NA_Ornn_Top_Soraka(Ratings):
pass
class NA_Ornn_Top_Swain(Ratings):
pass
class NA_Ornn_Top_Syndra(Ratings):
pass
class NA_Ornn_Top_TahmKench(Ratings):
pass
class NA_Ornn_Top_Taliyah(Ratings):
pass
class NA_Ornn_Top_Talon(Ratings):
pass
class NA_Ornn_Top_Taric(Ratings):
pass
class NA_Ornn_Top_Teemo(Ratings):
pass
class NA_Ornn_Top_Thresh(Ratings):
pass
class NA_Ornn_Top_Tristana(Ratings):
pass
class NA_Ornn_Top_Trundle(Ratings):
pass
class NA_Ornn_Top_Tryndamere(Ratings):
pass
class NA_Ornn_Top_TwistedFate(Ratings):
pass
class NA_Ornn_Top_Twitch(Ratings):
pass
class NA_Ornn_Top_Udyr(Ratings):
pass
class NA_Ornn_Top_Urgot(Ratings):
pass
class NA_Ornn_Top_Varus(Ratings):
pass
class NA_Ornn_Top_Vayne(Ratings):
pass
class NA_Ornn_Top_Veigar(Ratings):
pass
class NA_Ornn_Top_Velkoz(Ratings):
pass
class NA_Ornn_Top_Vi(Ratings):
pass
class NA_Ornn_Top_Viktor(Ratings):
pass
class NA_Ornn_Top_Vladimir(Ratings):
pass
class NA_Ornn_Top_Volibear(Ratings):
pass
class NA_Ornn_Top_Warwick(Ratings):
pass
class NA_Ornn_Top_Xayah(Ratings):
pass
class NA_Ornn_Top_Xerath(Ratings):
pass
class NA_Ornn_Top_XinZhao(Ratings):
pass
class NA_Ornn_Top_Yasuo(Ratings):
pass
class NA_Ornn_Top_Yorick(Ratings):
pass
class NA_Ornn_Top_Zac(Ratings):
pass
class NA_Ornn_Top_Zed(Ratings):
pass
class NA_Ornn_Top_Ziggs(Ratings):
pass
class NA_Ornn_Top_Zilean(Ratings):
pass
class NA_Ornn_Top_Zyra(Ratings):
pass
|
[
"noreply@github.com"
] |
koliupy.noreply@github.com
|
9b1a400d3281860f99c1cb1c0f0a9b1c2006bf90
|
2d191eb46ed804c9029801832ff4016aeaf8d31c
|
/configs/_base_/models/deeplabv3_sep_r50-d8.py
|
bb8c92051e538c75132eb8666ccb1d1cc8698ffc
|
[
"Apache-2.0"
] |
permissive
|
openseg-group/mmsegmentation
|
df99ac2c3510b7f2dff92405aae25026d1023d98
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
refs/heads/master
| 2023-03-02T07:49:23.652558
| 2021-02-15T04:16:28
| 2021-02-15T04:16:28
| 278,537,243
| 2
| 2
| null | 2020-07-10T04:24:16
| 2020-07-10T04:24:15
| null |
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DepthwiseSeparableASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
c1_in_channels=0,
c1_channels=0,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
# model training and testing settings
train_cfg = dict()
test_cfg = dict(mode='whole')
|
[
"yhyuan@pku.edu.cn"
] |
yhyuan@pku.edu.cn
|
ad77f04ce6810e07fd8407db9354c5b4139ab67e
|
17dca703eed28a859bba4984eba5b039b900e3d7
|
/.history/nomina/views_20200227181321.py
|
a9f9c322cb015feead3955c66ebab05f4727ad27
|
[] |
no_license
|
alexogch1/SistemaOperaciones
|
1a34872daf0e151672edd202a5089ee754805203
|
ac72f6e3284061e240aebec6a3300ff463a3544c
|
refs/heads/master
| 2021-01-03T15:32:45.470642
| 2020-03-03T07:47:27
| 2020-03-03T07:47:27
| 240,133,319
| 0
| 1
| null | 2020-02-28T05:21:57
| 2020-02-12T23:02:36
|
Python
|
UTF-8
|
Python
| false
| false
| 5,733
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
#from .filters import NominaFiltro
from dateutil.parser import parse
from django.views import generic
from generales.views import SinPrivilegios
from .form import NominaEncForm, NominaDetForm, DetalleNominaFormSet
from .models import NominaEnc, NominaDet
class NominaCompletaList(generic.ListView):
template_name='nomina/nomina_completa.html'
context_object_name='nomina'
queryset = NominaEnc.objects.all()
def get_context_data(self, **kwargs):
context = super(NominaCompletaList, self).get_context_data(**kwargs)
context['detalles'] = NominaDet.objects.all()
context['encabezado'] = self.queryset
return context
class NominaList( generic.ListView):
model=NominaEnc
template_name='nomina/nomina_list.html'
context_object_name='nomina'
""" def get_context_data(self, **kwargs):
context = super(NominaList, self).get_context_data(**kwargs)
initial_date = self.request.GET.get('fecha_inicial')
final_date = self.request.GET.get('fecha_final')
if not initial_date or not final_date:
context ['nomina'] = NominaEnc.objects.order_by('fecha_nomina')
else:
initial_date = parse(initial_date)
final_date = parse(final_date)
context['nomina'] = NominaEnc.objects.filter(fecha_nomina__gte=initial_date, fecha_nomina__lte=final_date )
return context """
#def get_context_data(self, **kwargs):
#context = super().get_context_data(**kwargs)
#context['filter']=NominaFiltro(self.request.GET, queryset=self.get_queryset())
#return context
class NominaNew(SinPrivilegios, generic.CreateView):
permission_required='nomina.add_nominaenc'
model=NominaEnc
login_url='generales:home'
template_name='nomina/nomina_form.html'
form_class=NominaEncForm
success_url=reverse_lazy('nomina:nomina_list')
def get(self, request, *args, **kwargs):
self.object=None
form_class=self.get_form_class()
form=self.get_form(form_class)
detalle_nomina_formset=DetalleNominaFormSet()
return self.render_to_response(
self.get_context_data(
form=form,
detalle_nomina = detalle_nomina_formset
)
)
def post(self, request, *args, **kwargs):
form_class=self.get_form_class()
form=self.get_form(form_class)
detalle_nomina=DetalleNominaFormSet(request.POST)
if form.is_valid() and detalle_nomina.is_valid():
return self.form_valid(form, detalle_nomina)
else:
return self.form_invalid(form, detalle_nomina)
def form_valid(self, form, detalle_nomina):
self.object=form.save()
detalle_nomina.instance=self.object
detalle_nomina.save()
return HttpResponseRedirect(self.success_url)
def form_invalid(self, form, detalle_nomina):
return self.render_to_response(
self.get_context_data(
form=form,
detalle_nomina=detalle_nomina
)
)
class NominaEdit(SinPrivilegios,generic.UpdateView):
permission_required='nomina.change_nominaenc'
model=NominaEnc
login_url='generales:home'
template_name='nomina/nomina_form.html'
form_class=NominaEncForm
success_url=reverse_lazy('nomina:nomina_list')
def get_success_url(self):
from django.urls import reverse
return reverse ('nomina:nomina_edit',
kwargs={'pk':self.get_object().id})
def get (self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
detalles =NominaDet.objects.filter(nomina=self.object).order_by('pk')
detalles_data = []
for detalle in detalles:
d={
'concepto':detalle.concepto,
'cantidad':detalle.cantidad
}
detalles_data.append(d)
detalle_nomina = DetalleNominaFormSet(initial=detalles_data)
detalle_nomina.extra += len(detalles_data)
return self.render_to_response(
self.get_context_data(
form=form,
detalle_nomina = detalle_nomina
)
)
def post(self,request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form=self.get_form(form_class)
detalle_nomina = DetalleNominaFormSet(request.POST)
if form.is_valid() and detalle_nomina.is_valid():
return self.form_valid(form, detalle_nomina)
else:
return self.form_valid(form, detalle_nomina)
def form_valid(self, form, detalle_nomina):
self.object = form.save()
detalle_nomina.instance =self.object
NominaDet.objects.filter(nomina=self.object).delete()
detalle_nomina.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form, detalle_nomina):
return self.render_to_response(
self.get_context_data(
form=form,
detalle_nomina=detalle_nomina
)
)
class NominaDel(SinPrivilegios,generic.DeleteView):
permission_required='nomina:delete_nominaenc'
model= NominaEnc
template_name = 'nomina/nomina_del.html'
context_object_name='obj'
success_url=reverse_lazy('nomina:nomina_list')
|
[
"alexogch@hotmail.com"
] |
alexogch@hotmail.com
|
5a0772e1a8a55625488fe06642e451fb792dad75
|
b0129214b1d493bdec6fc4658727775fb4066a5e
|
/addons/todo_user/__manifest__.py
|
373e3f23f73a060a7b0267b94f628db4cc01f954
|
[] |
no_license
|
gitstalker/docker_odoo
|
9875636e4f1bf60a8e55c7a66e8c85abf5f61661
|
c049d93586f1c35300563fc77685da22d9cc4e14
|
refs/heads/master
| 2020-05-02T01:10:45.705337
| 2018-10-20T12:03:20
| 2018-10-20T12:03:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
{
'name':'Multiuser To-Do',
'description': 'Extend the To-Do app to multiuser.',
'depends': ['website'],
'data':['views/templates.xml'],
'author': 'hdwolf'
}
|
[
"spacegoing@gmail.com"
] |
spacegoing@gmail.com
|
8d2db0e03577849c03ffa9b296d5a266ea0fb0d7
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/aio/operations/_replicas_operations.py
|
ed2a170e523af5c4bd570d0b9b817e6a9a04d6ce
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,650
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._replicas_operations import build_list_by_server_request
from .._vendor import MySQLManagementClientMixinABC
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReplicasOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.rdbms.mysql.aio.MySQLManagementClient`'s
:attr:`replicas` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_server(
self, resource_group_name: str, server_name: str, **kwargs: Any
) -> AsyncIterable["_models.Server"]:
"""List all the replicas for a given server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Server or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.mysql.models.Server]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01"))
cls: ClsType[_models.ServerListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_server.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ServerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_server.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/replicas"
}
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
05ccf6e2d5d1a9e66261f6829dcff9f2468cbea3
|
124bdbf417117fe23168f043dd265f88b3bd6e70
|
/lib/datasets/__init__.py
|
e62bcd2b434d9f62ee2b19a9875c4c64db1d00e6
|
[] |
no_license
|
tonyonifo/anytime
|
943f56ebd4759f0f5181607d8030d50eabb8d38b
|
86bba7a334fc65899da01b30d925437163c1dede
|
refs/heads/master
| 2023-08-02T21:32:42.977184
| 2021-10-05T16:58:35
| 2021-10-05T16:58:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .cityscapes import Cityscapes as cityscapes
|
[
"email@email.com"
] |
email@email.com
|
192513b2ebb9f2f9c07d84b4cdb0e2c0f10f8099
|
2db7597686f33a0d700f7082e15fa41f830a45f0
|
/Python/coding/longestPalindromicSubstring.py
|
5b5cc4e5f9239e189fda40d29fb79084b668ae13
|
[] |
no_license
|
Leahxuliu/Data-Structure-And-Algorithm
|
04e0fc80cd3bb742348fd521a62bc2126879a70e
|
56047a5058c6a20b356ab20e52eacb425ad45762
|
refs/heads/master
| 2021-07-12T23:54:17.785533
| 2021-05-17T02:04:41
| 2021-05-17T02:04:41
| 246,514,421
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
'''
5. Longest Palindromic Substring
注意题目的return是什么
'''
def longestPalindrome(self, s: str) -> str:
if s == '':
return ''
n = len(s)
dp = [[False] * n for _ in range(n)]
max_len = 1
start = 0
for i in range(n - 1, -1, -1):
for j in range(i, n):
if i == j:
dp[i][j] = True
elif j - i == 1:
if s[i] == s[j]:
dp[i][j] = True
if max_len < 2:
max_len = 2
start = i
else:
if s[i] == s[j] and dp[i + 1][j - 1] == True:
dp[i][j] = True
if max_len < j - i + 1:
max_len = j - i + 1
start = i
return s[start:start + max_len]
|
[
"leahxuliu@gmail.com"
] |
leahxuliu@gmail.com
|
79926eb4ed7b1cb24b624dd9df42ddf2c75ac463
|
6188f8ef474da80c9e407e8040de877273f6ce20
|
/examples/docs_snippets/docs_snippets/concepts/assets/asset_input_managers_numpy.py
|
41c6a5aa55fde72067e0e687ff7d0816f51b530f
|
[
"Apache-2.0"
] |
permissive
|
iKintosh/dagster
|
99f2a1211de1f3b52f8bcf895dafaf832b999de2
|
932a5ba35263deb7d223750f211c2ddfa71e6f48
|
refs/heads/master
| 2023-01-24T15:58:28.497042
| 2023-01-20T21:51:35
| 2023-01-20T21:51:35
| 276,410,978
| 1
| 0
|
Apache-2.0
| 2020-07-01T15:19:47
| 2020-07-01T15:13:56
| null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
import os
import pandas as pd
from dagster import AssetIn, Definitions, IOManager, asset, io_manager
from .asset_input_managers import (
load_numpy_array,
load_pandas_dataframe,
store_pandas_dataframe,
)
# start_numpy_example
class PandasAssetIOManager(IOManager):
def handle_output(self, context, obj):
file_path = self._get_path(context)
store_pandas_dataframe(name=file_path, table=obj)
def _get_path(self, context):
return os.path.join(
"storage",
f"{context.asset_key.path[-1]}.csv",
)
def load_input(self, context):
file_path = self._get_path(context)
return load_pandas_dataframe(name=file_path)
@io_manager
def pandas_asset_io_manager():
return PandasAssetIOManager()
class NumpyAssetIOManager(PandasAssetIOManager):
def load_input(self, context):
file_path = self._get_path(context)
return load_numpy_array(name=file_path)
@io_manager
def numpy_asset_io_manager():
return NumpyAssetIOManager()
@asset(io_manager_key="pandas_manager")
def upstream_asset():
return pd.DataFrame([1, 2, 3])
@asset(
ins={"upstream": AssetIn(key_prefix="public", input_manager_key="numpy_manager")}
)
def downstream_asset(upstream):
return upstream.shape
defs = Definitions(
assets=[upstream_asset, downstream_asset],
resources={
"pandas_manager": pandas_asset_io_manager,
"numpy_manager": numpy_asset_io_manager,
},
)
# end_numpy_example
|
[
"noreply@github.com"
] |
iKintosh.noreply@github.com
|
f69d5a2c855b376f97d582465cd8c179d5452fa9
|
ad570312a736a84e96c5178bc1af91c6c0b898fb
|
/pyth/linkedListCoppy.py
|
4c1a565b380f1db25973401fb73c746e6d86da3f
|
[] |
no_license
|
pritamSarkar123/Compitative20_21
|
ad813d189b7388ea2179bb96f64eaa88ba75db32
|
d5474b02487dc759c47e3da1047154533dc7b641
|
refs/heads/master
| 2023-01-14T12:04:51.756085
| 2020-11-26T09:31:02
| 2020-11-26T09:31:02
| 296,670,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
#problem description
#https://www.youtube.com/watch?v=xbpUHSKoALg&t=784s
# algorithm:
# #create intermediate nodes
# p=head
# q=NULL
# while p!=NULL:
# q=p
# p=p->next
# t=copy(q)
# q->next=t
# t->next=p
# #connecting new linked list
# p=head
# q=NULL
# while p!=NULL:
# q=p
# p=p->next->next
# q->next->random=q->random->next
# q=q->next
# if p!=NULL:
# q->next=p->next
# else:
# q->next=NULLa
# #changing head pointer
# head=head->next
class Node:
def __init__(self,value):
self.value=value
self.next=None
self.random=None
self.message="Original"
class LinkedList:
def __init__(self):
self.head=Node(1)
temp=self.head
temp.next=Node(2)
temp=temp.next
temp.next=Node(3)
temp=temp.next
temp=self.head
temp.random=temp.next.next #1->3
temp=temp.next
temp.random=self.head #2->1
temp=temp.next
temp.random=temp #3->3
def show_list(self):
temp=self.head
while temp:
print(temp.value,temp.message,temp.random.value)
temp=temp.next
def copy_list(self):
#create intermediate nodes
p=self.head
q=None
while p:
q=p
p=p.next
temp=Node(q.value);temp.message="Coppied"
q.next=temp
temp.next=p
#connecting new linked list
p=self.head
q=None
while p:
q=p
p=p.next.next
q.next.random=q.random.next
q=q.next
if p:
q.next=p.next
else:
q.next=None
#changing head pointer
self.head=self.head.next
self.show_list()
if __name__=="__main__":
myList=LinkedList()
myList.show_list()
myList.copy_list()
|
[
"pritamsarkar84208220@gmail.com"
] |
pritamsarkar84208220@gmail.com
|
68e264f1175e4500758f875b6b021e66b4625bc8
|
9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7
|
/submission - Homework1/HW1 - Alex/index_nested_list.py
|
bbb999230c523e6e8d132b64459550cc015955c5
|
[] |
no_license
|
sendurr/spring-grading
|
90dfdced6327ddfb5c311ae8f42ae1a582768b63
|
2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54
|
refs/heads/master
| 2020-04-15T17:42:10.781884
| 2016-08-29T20:38:17
| 2016-08-29T20:38:17
| 50,084,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# Alexis Thompson-Klemish
#
q = [['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h']]
# print the letter a
print q[0][0]
# print the list ['d', 'e', 'f']
print q[1]
# print the last element h
print q[-1][-1]
#print the d element
print q[1][0]
#explain why q[-1][-2] has the value g
print "negative indexes count from the right, not the left so q[-1] produces the rightmost list and q[-1][-2] produces the second to last element in the last list"
|
[
"sendurr@hotmail.com"
] |
sendurr@hotmail.com
|
cf4b67c14d7a1b9856437ecb6e313e98a2c15a74
|
30c23852ae41a7808e2a202280e973ff1a4bbe2b
|
/OP/op.py
|
9217ca651fa7b0366b9ae90cc341da5a83482f7b
|
[] |
no_license
|
rohe/oidc-oob-federation
|
050ce05a1bd373795bc74c63287edeccbf1c3129
|
53517decc43f4d58aa7b825feb8c97704de8822f
|
refs/heads/master
| 2020-03-18T04:04:10.383031
| 2018-06-07T12:15:55
| 2018-06-07T12:15:55
| 134,267,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,153
|
py
|
import logging
import cherrypy
from cryptojwt import as_bytes
from oidcmsg.oauth2 import is_error_message
from oidcmsg.oauth2 import AuthorizationRequest
from oidcendpoint.sdb import AuthnEvent
logger = logging.getLogger(__name__)
class OpenIDProvider(object):
def __init__(self, config, endpoint_context):
self.config = config
self.endpoint_context = endpoint_context
def do_response(self, endpoint, req_args, **args):
info = endpoint.do_response(request=req_args, **args)
for key, value in info['http_headers']:
cherrypy.response.headers[key] = value
try:
_response_placement = info['response_placement']
except KeyError:
_response_placement = endpoint.response_placement
if _response_placement == 'body':
logger.info('Response: {}'.format(info['response']))
return as_bytes(info['response'])
elif _response_placement == 'url':
logger.info('Redirect to: {}'.format(info['response']))
raise cherrypy.HTTPRedirect(info['response'])
@cherrypy.expose
def service_endpoint(self, name, **kwargs):
logger.info(kwargs)
logger.info('At the {} endpoint'.format(name))
endpoint = self.endpoint_context.endpoint[name]
try:
authn = cherrypy.request.headers['Authorization']
except KeyError:
pr_args = {}
else:
pr_args = {'auth': authn}
if endpoint.request_placement == 'body':
if cherrypy.request.process_request_body is True:
_request = cherrypy.request.body.read()
else:
raise cherrypy.HTTPError(400, 'Missing HTTP body')
if not _request:
_request = kwargs
req_args = endpoint.parse_request(_request, **pr_args)
else:
req_args = endpoint.parse_request(kwargs, **pr_args)
logger.info('request: {}'.format(req_args))
if is_error_message(req_args):
return as_bytes(req_args.to_json())
args = endpoint.process_request(req_args)
return self.do_response(endpoint, req_args, **args)
@cherrypy.expose
def authn_verify(self, url_endpoint, **kwargs):
"""
Authentication verification
:param authn_method: Which authn method that was used
:param kwargs: response arguments
:return: HTTP redirect
"""
authn_method = self.endpoint_context.endpoint_to_authn_method[url_endpoint]
username = authn_method.verify(**kwargs)
if not username:
cherrypy.HTTPError(403, message='Authentication failed')
auth_args = authn_method.unpack_token(kwargs['token'])
request = AuthorizationRequest().from_urlencoded(auth_args['query'])
# uid, salt, valid=3600, authn_info=None, time_stamp=0, authn_time=None,
# valid_until=None
authn_event = AuthnEvent(username, 'salt',
authn_info=auth_args['authn_class_ref'],
authn_time=auth_args['iat'])
endpoint = self.endpoint_context.endpoint['authorization']
args = endpoint.post_authentication(request,
user=username,
authn_event=authn_event)
return self.do_response(endpoint, request, **args)
def _cp_dispatch(self, vpath):
# Only get here if vpath != None
ent = cherrypy.request.remote.ip
logger.info('ent:{}, vpath: {}'.format(ent, vpath))
if len(vpath) == 2 and vpath[0] == 'verify':
a = vpath.pop(0)
b = vpath.pop(0)
cherrypy.request.params['url_endpoint'] = '/'.join(['', a, b])
return self.authn_verify
for name, instance in self.endpoint_context.endpoint.items():
if vpath == instance.vpath:
cherrypy.request.params['name'] = name
for n in range(len(vpath)):
vpath.pop()
return self.service_endpoint
return self
|
[
"roland@catalogix.se"
] |
roland@catalogix.se
|
1e5810523ea93878d26b6ef00317399d8e25aa25
|
1005a4290bca16dcf4c6b3415662e134044305bd
|
/python/Sources/gensource_Z2Jets_muhad_cfi.py
|
1d71fe0e0d4998bc6ab78e9176740d7f2eb3bcf3
|
[] |
no_license
|
cms-analysis/TauAnalysis-GenSimTools
|
2652bb713107bb1d459882581237662d229d3906
|
b787b784ee3598c4428c4883c04cdc525eb54eb6
|
refs/heads/master
| 2020-12-24T15:58:14.392883
| 2013-06-28T20:10:42
| 2013-06-28T20:10:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
import FWCore.ParameterSet.Config as cms
# The Alpgen Source. It reads unweighted alpgen files
source = cms.Source("AlpgenSource",
# use an input file name without extension unw
fileNames = cms.untracked.vstring(
'file:/home/cbernet/ALPGEN/v213/zjetwork/z2j'
)
)
# The Alpgen Producer.
from GeneratorInterface.AlpgenInterface.generator_cfi import *
generator.comEnergy = 14000.0
generator.pythiaHepMCVerbosity = False
generator.maxEventsToPrint = 0
# Set the jet matching parameters as you see fit.
generator.jetMatching.applyMatching = True
generator.jetMatching.exclusive = True
generator.jetMatching.etMin = 20.0
generator.jetMatching.drMin = 0.5
# for every process including tau should be use TAUOLA
from GeneratorInterface.ExternalDecays.TauolaSettings_cff import *
generator.ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
TauolaPolar,
InputCards = cms.PSet(
pjak1 = cms.int32(0),
pjak2 = cms.int32(0),
#mdtau = cms.int32(116) #mdtau = 0 all decays
mdtau = cms.int32(116) #mdtau = 116 - ONE mu+-, other taus -> all channels
)
),
parameterSets = cms.vstring('Tauola')
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-5c72da6f595cce9b6b48aff6d56f01e9beb4aad1@cern.ch"
] |
sha1-5c72da6f595cce9b6b48aff6d56f01e9beb4aad1@cern.ch
|
f50fbf295e7c63db3184c8adcae01d3500afaf12
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/tools/grit/grit/format/policy_templates/writers/ios_plist_writer_unittest.py
|
0fdecb1d1ef33336fc052bf6c32b3b97d6f1800a
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218
| 2017-04-30T10:59:06
| 2017-04-30T23:35:58
| 89,884,931
| 5
| 3
|
BSD-3-Clause
| 2022-11-23T20:52:53
| 2017-05-01T00:09:08
| null |
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.policy_templates.writers.ios_plist_writer'''
import base64
import functools
import os
import plistlib
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
try:
import Cocoa
except:
Cocoa = None
from grit.format.policy_templates.writers import writer_unittest_common
class IOSPListWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests for IOSPListWriter.'''
def _ParseWithPython(self, decode, text):
'''Parses a serialized Plist, using Python's plistlib.
If |decode| is true then |text| is decoded as Base64 before being
deserialized as a Plist.'''
if decode:
text = base64.b64decode(text)
return plistlib.readPlistFromString(text)
def _ParseWithCocoa(self, decode, text):
'''Parses a serialized Plist, using Cocoa's python bindings.
If |decode| is true then |text| is decoded as Base64 before being
deserialized as a Plist.'''
if decode:
data = Cocoa.NSData.alloc().initWithBase64EncodedString_options_(text, 0)
else:
data = Cocoa.NSData.alloc().initWithBytes_length_(text, len(text))
result = Cocoa.NSPropertyListSerialization. \
propertyListFromData_mutabilityOption_format_errorDescription_(
data, Cocoa.NSPropertyListImmutable, None, None)
return result[0]
def _VerifyGeneratedOutputWithParsers(self,
templates,
expected_output,
parse,
decode_and_parse):
_defines = { '_chromium': '1',
'mac_bundle_id': 'com.example.Test',
'version': '39.0.0.0' }
# Generate the grit output for |templates|.
output = self.GetOutput(
self.PrepareTest(templates),
'fr',
_defines,
'ios_plist',
'en')
# Parse it as a Plist.
plist = parse(output)
self.assertEquals(len(plist), 2)
self.assertTrue('ChromePolicy' in plist)
self.assertTrue('EncodedChromePolicy' in plist)
# Get the 2 expected fields.
chrome_policy = plist['ChromePolicy']
encoded_chrome_policy = plist['EncodedChromePolicy']
# Verify the ChromePolicy.
self.assertEquals(chrome_policy, expected_output)
# Decode the EncodedChromePolicy and verify it.
decoded_chrome_policy = decode_and_parse(encoded_chrome_policy)
self.assertEquals(decoded_chrome_policy, expected_output)
def _VerifyGeneratedOutput(self, templates, expected):
# plistlib is available on all Python platforms.
parse = functools.partial(self._ParseWithPython, False)
decode_and_parse = functools.partial(self._ParseWithPython, True)
self._VerifyGeneratedOutputWithParsers(
templates, expected, parse, decode_and_parse)
# The Cocoa bindings are available on Mac OS X only.
if Cocoa:
parse = functools.partial(self._ParseWithCocoa, False)
decode_and_parse = functools.partial(self._ParseWithCocoa, True)
self._VerifyGeneratedOutputWithParsers(
templates, expected, parse, decode_and_parse)
def _MakeTemplate(self, name, type, example, extra=''):
return '''
{
'policy_definitions': [
{
'name': '%s',
'type': '%s',
'desc': '',
'caption': '',
'supported_on': ['ios:35-'],
'example_value': %s,
%s
},
],
'placeholders': [],
'messages': {},
}
''' % (name, type, example, extra)
def testEmpty(self):
templates = '''
{
'policy_definitions': [],
'placeholders': [],
'messages': {},
}
'''
expected = {}
self._VerifyGeneratedOutput(templates, expected)
def testEmptyVersion(self):
templates = '''
{
'policy_definitions': [],
'placeholders': [],
'messages': {},
}
'''
expected = {}
self._VerifyGeneratedOutput(templates, expected)
def testBoolean(self):
templates = self._MakeTemplate('BooleanPolicy', 'main', 'True')
expected = {
'BooleanPolicy': True,
}
self._VerifyGeneratedOutput(templates, expected)
def testString(self):
templates = self._MakeTemplate('StringPolicy', 'string', '"Foo"')
expected = {
'StringPolicy': 'Foo',
}
self._VerifyGeneratedOutput(templates, expected)
def testStringEnum(self):
templates = self._MakeTemplate(
'StringEnumPolicy', 'string-enum', '"Foo"',
'''
'items': [
{ 'name': 'Foo', 'value': 'Foo', 'caption': '' },
{ 'name': 'Bar', 'value': 'Bar', 'caption': '' },
],
''')
expected = {
'StringEnumPolicy': 'Foo',
}
self._VerifyGeneratedOutput(templates, expected)
def testInt(self):
templates = self._MakeTemplate('IntPolicy', 'int', '42')
expected = {
'IntPolicy': 42,
}
self._VerifyGeneratedOutput(templates, expected)
def testIntEnum(self):
templates = self._MakeTemplate(
'IntEnumPolicy', 'int-enum', '42',
'''
'items': [
{ 'name': 'Foo', 'value': 100, 'caption': '' },
{ 'name': 'Bar', 'value': 42, 'caption': '' },
],
''')
expected = {
'IntEnumPolicy': 42,
}
self._VerifyGeneratedOutput(templates, expected)
def testStringList(self):
templates = self._MakeTemplate('StringListPolicy', 'list', '["a", "b"]')
expected = {
'StringListPolicy': [ "a", "b" ],
}
self._VerifyGeneratedOutput(templates, expected)
def testStringEnumList(self):
templates = self._MakeTemplate('StringEnumListPolicy',
'string-enum-list', '["a", "b"]',
'''
'items': [
{ 'name': 'Foo', 'value': 'a', 'caption': '' },
{ 'name': 'Bar', 'value': 'b', 'caption': '' },
],
''')
expected = {
'StringEnumListPolicy': [ "a", "b" ],
}
self._VerifyGeneratedOutput(templates, expected)
def testListOfDictionary(self):
templates = self._MakeTemplate(
'ManagedBookmarks', 'dict',
'''
[
{
"name": "Google Search",
"url": "www.google.com",
},
{
"name": "Youtube",
"url": "www.youtube.com",
}
]
''')
expected = {
'ManagedBookmarks': [
{ "name": "Google Search", "url": "www.google.com" },
{ "name": "Youtube", "url": "www.youtube.com" },
],
}
self._VerifyGeneratedOutput(templates, expected)
if __name__ == '__main__':
unittest.main()
|
[
"enrico.weigelt@gr13.net"
] |
enrico.weigelt@gr13.net
|
17182f5cae79f76332304a2abd4a7f9acf5a1442
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/layout/ternary/aaxis/_showticksuffix.py
|
2a1b061ea73eecf6d8c074e3d8662b7cb67f3748
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(
_plotly_utils.basevalidators.EnumeratedValidator
):
def __init__(
self,
plotly_name='showticksuffix',
parent_name='layout.ternary.aaxis',
**kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
values=['all', 'first', 'last', 'none'],
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
9bf714a6abbcdb0385038e4cdee96b601cece13d
|
06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b
|
/2013_adni/MMSE-AD-CTL/01_build_dataset.py
|
f9049d7b28f1fc1da3d70f12b740317ca04ad28d
|
[] |
no_license
|
neurospin/scripts
|
6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458
|
f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb
|
refs/heads/master
| 2021-07-11T22:55:46.567791
| 2021-07-02T13:08:02
| 2021-07-02T13:08:02
| 10,549,286
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,376
|
py
|
# -*- coding: utf-8 -*-
"""
@author: edouard.Duchesnay@cea.fr
Compute mask, concatenate masked non-smoothed images for all the subjects.
Build X, y, and mask
INPUT:
- subject_list.txt:
- population.csv
OUTPUT:
- mask.nii.gz
- y.npy
- X.npy = intercept + Age + Gender + Voxel
"""
import os
import numpy as np
import glob
import pandas as pd
import nibabel
import brainomics.image_atlas
import shutil
#import proj_classif_config
GENDER_MAP = {'Female': 0, 'Male': 1}
BASE_PATH = "/neurospin/brainomics/2013_adni"
#INPUT_CLINIC_FILENAME = os.path.join(BASE_PATH, "clinic", "adnimerge_baseline.csv")
INPUT_SUBJECTS_LIST_FILENAME = os.path.join(BASE_PATH,
"templates",
"template_FinalQC",
"subject_list.txt")
INPUT_IMAGEFILE_FORMAT = os.path.join(BASE_PATH,
"templates",
"template_FinalQC",
"registered_images",
"mw{PTID}*_Nat_dartel_greyProba.nii")
INPUT_CSV = os.path.join(BASE_PATH, "MMSE-AD-CTL", "population.csv")
OUTPUT = os.path.join(BASE_PATH, "MMSE-AD-CTL")
OUTPUT_CS = os.path.join(BASE_PATH, "MMSE-AD-CTL_cs")
#OUTPUT_ATLAS = os.path.join(BASE_PATH, "MMSE-AD-CTL_gtvenet")
#OUTPUT_CS_ATLAS = os.path.join(BASE_PATH, "MMSE-AD-CTL_cs_gtvenet")
if not os.path.exists(OUTPUT): os.makedirs(OUTPUT)
if not os.path.exists(OUTPUT_CS): os.makedirs(OUTPUT_CS)
#os.makedirs(OUTPUT_ATLAS)
#os.makedirs(OUTPUT_CS_ATLAS)
# Read input subjects
input_subjects = pd.read_table(INPUT_SUBJECTS_LIST_FILENAME, sep=" ",
header=None)
input_subjects = [x[:10] for x in input_subjects[1]]
# Read pop csv
pop = pd.read_csv(INPUT_CSV)
pop['PTGENDER.num'] = pop["PTGENDER"].map(GENDER_MAP)
#############################################################################
# Read images
n = len(pop)
assert n == 242
Z = np.zeros((n, 3)) # Intercept + Age + Gender
Z[:, 0] = 1 # Intercept
y = np.zeros((n, 1)) # DX
images = list()
for i, PTID in enumerate(pop['PTID']):
cur = pop[pop.PTID == PTID]
print cur
imagefile_pattern = INPUT_IMAGEFILE_FORMAT.format(PTID=PTID)
imagefile_name = glob.glob(imagefile_pattern)
if len(imagefile_name) != 1:
raise ValueError("Found %i files" % len(imagefile_name))
babel_image = nibabel.load(imagefile_name[0])
images.append(babel_image.get_data().ravel())
Z[i, 1:] = np.asarray(cur[["AGE", "PTGENDER.num"]]).ravel()
y[i, 0] = cur["MMSE"]
shape = babel_image.get_data().shape
#############################################################################
# Compute mask
# Implicit Masking involves assuming that a lower than a givent threshold
# at some voxel, in any of the images, indicates an unknown and is
# excluded from the analysis.
Xtot = np.vstack(images)
mask = (np.min(Xtot, axis=0) > 0.01) & (np.std(Xtot, axis=0) > 1e-6)
mask = mask.reshape(shape)
assert mask.sum() == 313734
#############################################################################
# Compute atlas mask
babel_mask_atlas = brainomics.image_atlas.resample_atlas_harvard_oxford(
ref=imagefile_name[0],
output=os.path.join("/tmp", "mask.nii.gz"))
mask_atlas = babel_mask_atlas.get_data()
assert np.sum(mask_atlas != 0) == 638715
mask_atlas[np.logical_not(mask)] = 0 # apply implicit mask
# smooth
mask_atlas = brainomics.image_atlas.smooth_labels(mask_atlas, size=(3, 3, 3))
assert np.sum(mask_atlas != 0) == 285983
out_im = nibabel.Nifti1Image(mask_atlas,
affine=babel_image.get_affine())
out_im.to_filename(os.path.join("/tmp", "mask.nii.gz"))
im = nibabel.load(os.path.join("/tmp", "mask.nii.gz"))
assert np.all(mask_atlas == im.get_data())
#shutil.copyfile(os.path.join(OUTPUT_ATLAS, "mask.nii.gz"), os.path.join(OUTPUT_CS_ATLAS, "mask.nii.gz"))
#############################################################################
# Compute mask with atlas but binarized (not group tv)
mask_bool = mask_atlas != 0
assert mask_bool.sum() == 285983
out_im = nibabel.Nifti1Image(mask_bool.astype("int16"),
affine=babel_image.get_affine())
out_im.to_filename(os.path.join(OUTPUT, "mask.nii.gz"))
babel_mask = nibabel.load(os.path.join(OUTPUT, "mask.nii.gz"))
assert np.all(mask_bool == (babel_mask.get_data() != 0))
shutil.copyfile(os.path.join(OUTPUT, "mask.nii.gz"), os.path.join(OUTPUT_CS, "mask.nii.gz"))
#############################################################################
# X
X = Xtot[:, mask_bool.ravel()]
X = np.hstack([Z, X])
assert X.shape == (242, 285986)
n, p = X.shape
np.save(os.path.join(OUTPUT, "X.npy"), X)
fh = open(os.path.join(OUTPUT, "X.npy").replace("npy", "txt"), "w")
fh.write('shape = (%i, %i): Intercept + Age + Gender + %i voxels' % \
(n, p, mask_bool.sum()))
fh.close()
# Xcs
X = Xtot[:, mask_bool.ravel()]
X = np.hstack([Z[:, 1:], X])
assert X.shape == (242, 285985)
X -= X.mean(axis=0)
X /= X.std(axis=0)
n, p = X.shape
np.save(os.path.join(OUTPUT_CS, "X.npy"), X)
fh = open(os.path.join(OUTPUT_CS, "X.npy").replace("npy", "txt"), "w")
fh.write('Centered and scaled data. Shape = (%i, %i): Age + Gender + %i voxels' % \
(n, p, mask_bool.sum()))
fh.close()
## atlas
#X = Xtot[:, (mask_atlas.ravel() != 0)]
#X = np.hstack([Z, X])
#assert X.shape == (242, 285986)
#n, p = X.shape
#np.save(os.path.join(OUTPUT_ATLAS, "X.npy"), X)
#fh = open(os.path.join(OUTPUT_ATLAS, "X.npy").replace("npy", "txt"), "w")
#fh.write('shape = (%i, %i): Intercept + Age + Gender + %i voxels' % \
# (n, p, (mask_atlas.ravel() != 0).sum()))
#fh.close()
#
## atlas cs
#X = Xtot[:, (mask_atlas.ravel() != 0)]
#X = np.hstack([Z[:, 1:], X])
#assert X.shape == (242, 285985)
#X -= X.mean(axis=0)
#X /= X.std(axis=0)
#n, p = X.shape
#np.save(os.path.join(OUTPUT_CS_ATLAS, "X.npy"), X)
#fh = open(os.path.join(OUTPUT_CS_ATLAS, "X.npy").replace("npy", "txt"), "w")
#fh.write('Centered and scaled data. Shape = (%i, %i): Age + Gender + %i voxels' % \
# (n, p, (mask_atlas.ravel() != 0).sum()))
#fh.close()
np.save(os.path.join(OUTPUT, "y.npy"), y)
y -= y.mean()
y /= y.std()
np.save(os.path.join(OUTPUT_CS, "y.npy"), y)
#np.save(os.path.join(OUTPUT_ATLAS, "y.npy"), y)
#np.save(os.path.join(OUTPUT_CS_ATLAS, "y.npy"), y)
|
[
"edouard.duchesnay@gmail.com"
] |
edouard.duchesnay@gmail.com
|
2bb5e9136817920f0a118765a28bf286b13b41be
|
c86277d74266b90b64774bc924b041009d697b2e
|
/source/nextdoor/wsgi.py
|
2ae744a10344445edcd3ffe9adf052710f84605a
|
[] |
no_license
|
rakeshsukla53/facebook-for-neighbours
|
dcd0c564530404e5415fa08b184398d10b1170ba
|
3d6c1430ab4f7ac8f668626c82705552da9f6566
|
refs/heads/master
| 2021-01-10T04:54:00.962831
| 2015-12-25T17:32:45
| 2015-12-25T17:32:45
| 46,942,279
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for nextdoor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nextdoor.settings")
application = get_wsgi_application()
|
[
"rakesh.sukla53@gmail.com"
] |
rakesh.sukla53@gmail.com
|
7a78bab1a7c668a9b26dfe834a4c903b5273b3e3
|
d833e1643f799d8979ae385992be9f3012af23a5
|
/examples/c60_find_submit.py
|
848dd0dd9378059f4770fd68b09de99329f047ff
|
[
"BSD-3-Clause"
] |
permissive
|
ZhouHUB/simdb
|
05906505d549cbf584dcdcc91c9cebe95c2d349b
|
33fa21ddcc683e1618dfb337f5f928363c902a1e
|
refs/heads/master
| 2020-04-01T22:32:36.665260
| 2016-04-15T19:20:46
| 2016-04-15T19:20:46
| 36,950,426
| 0
| 0
| null | 2018-07-24T19:56:44
| 2015-06-05T19:06:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
__author__ = 'christopher'
import ase
from simdb.insert import *
from simdb.search import *
from pyiid.utils import build_sphere_np
from copy import deepcopy as dc
target_config, = find_atomic_config_document(name='C60 DFT')
parent_atoms = target_config.file_payload[-1]
# find the combined Potential Energy Surface (PES)
pes, = find_pes_document(name='C60 PDF Spring')
# find the simulation parameters
params, = find_simulation_parameter_document(name='T=1, iter=100, accept=.65')
rattles = [.05, .07, .08, .1]
for rattle in rattles:
# find starting_config
try:
start_config, = find_atomic_config_document(name='C60' + str(rattle))
except ValueError:
starting_atoms = dc(parent_atoms)
starting_atoms.rattle(rattle, 42)
# Add the atoms to the DB
start_config = insert_atom_document('C60 ' + str(rattle), starting_atoms)
# Finally create the simulation
sim = insert_simulation('C60 rattle->DFT ' + str(rattle), params, start_config, pes)
print 'simulation added, number ', sim.id
|
[
"cjwright4242@gmail.com"
] |
cjwright4242@gmail.com
|
e2886ddba4caf4503f5d0cf9cf97f91e5c76cd44
|
3d0bb8d94a69237bf3c6ba6b2ccfdd0bc9cc162c
|
/addons/asterisk/agi-bin/states/get_fast_dial_destination_from_ibs.py
|
4521e2804bce47ffccda4b3c9723ff47c347a06f
|
[] |
no_license
|
ha8sh/IBSng
|
69727a7c5476ecb8efa45b7393ffe51de37a8a10
|
596aa468f8264ab0129431e3ede6cc1282b1ebbd
|
refs/heads/main
| 2023-08-25T18:21:28.081153
| 2021-10-02T05:03:52
| 2021-10-02T05:03:52
| 412,687,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
import xmlrpclib
import ibs_agi
from lib import request
from lib.error import *
def init():
ibs_agi.getStateMachine().registerState("GET_FAST_DIAL_DESTINATION_FROM_IBS",getFastDialIndexFromIBS)
def getFastDialIndexFromIBS(_index):
"""
get fast dial index destination from ibs
may raise an IBSException
"""
_index=int(_index)
req=request.Request()
try:
destination=req.send("getFastDialDestination",True,index=_index)
except xmlrpclib.Fault,e:
logException()
ibs_agi.getSelectedLanguage().sayPrompt("unknown_problem")
raise IBSException(e.faultString)
else:
if ibs_agi.getConfig().getValue("debug"):
toLog("getFastDialIndexFromIBS: %s"%destination)
return destination
|
[
"hassanshaikhi@gmail.com"
] |
hassanshaikhi@gmail.com
|
73c1cb0d4aa4a86afefeb3fd74e8241edec6456a
|
7620893c7d253a4d8c6f5aef2cfda6c72b777d49
|
/src/Camera/DisplayImage.py
|
8d1b1ee14891406071b40da9d5bf7f304a4aa7ad
|
[] |
no_license
|
garridoH/cameraGUI
|
cacc549a9da0bcb6c3b9be04ef9783c653300118
|
cb6ac1d54dd8651da974ed058990c8212d145415
|
refs/heads/master
| 2021-01-17T22:38:53.398306
| 2012-06-12T20:41:44
| 2012-06-12T20:41:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
'''
Adapted from online sources, including http://www.blog.pythonlibrary.org/2010/03/26/creating-a-simple-photo-viewer-with-wxpython/
'''
import wx
class displayImage(wx.App):
def __init__(self, redirect=False):
wx.App.__init__(self, redirect)
self.frame = wx.Frame(None, title='Prosilica Viewer', pos=(100,300), size=(1360,1024))
self.panel = wx.Panel(self.frame)
self.Image = wx.StaticBitmap(self.frame, bitmap=wx.EmptyBitmap(1360,1024))
#self.panel.Layout()
self.frame.Show()
def showImage(self, bmpImg):
h=bmpImg.GetHeight()
w=bmpImg.GetWidth()
print "Image is " + str(h) + " x " + str(w)
self.Image.SetBitmap(bmpImg)
self.Image.Refresh()
def OnClose(self, event):
self.Destroy()
|
[
"raedwards@gmail.com"
] |
raedwards@gmail.com
|
0c3d842557c9376a3e85eb48319735d211b4170d
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1/Naca/main.py
|
5645aca52186859c629e2d833d00d1e431940170
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
T = int(input());
data = [];
for i in range(T) :
data.append(int(input()));
for i in range(T) :
if (data[i] == 0) :
print("Case #" + str(i + 1) + ": INSOMNIA");
else :
digits = [];
sumN = data[i];
while (len(digits) < 10) :
tmp = sumN;
while (tmp > 0) :
if (tmp % 10 not in digits) :
digits.append(tmp % 10);
tmp //= 10;
sumN += data[i];
print("Case #" + str(i + 1) + ": " + str(sumN - data[i]));
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
5c373349176db66ba2f7617dfca9fa2c18ee4d78
|
94724578994ab1438dcefb51b7ef4d8570da5d4c
|
/calibre/draveness.recipe
|
361ce64106650ddf8643557d49369ebfec882386
|
[] |
no_license
|
PegasusWang/collection_python
|
6648d83203634abf44fd42c0b37b0bf7cc406d8f
|
9ef019a737a0817860d3184924c67a0833bd1252
|
refs/heads/master
| 2023-09-01T23:15:39.813635
| 2023-08-24T06:46:12
| 2023-08-24T06:46:12
| 43,693,872
| 130
| 90
| null | 2021-04-26T15:12:55
| 2015-10-05T15:28:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,028
|
recipe
|
#!/usr/bin/python
# encoding: utf-8
from calibre.web.feeds.recipes import BasicNewsRecipe # 引入 Recipe 基础类
"""
教程:
- https://bookfere.com/tools#calibre
- https://www.jianshu.com/p/0bcb92509309
- https://snowdreams1006.github.io/myGitbook/advance/export.html
命令:
ebook-convert draveness.recipe draveness.mobi --output-profile=kindle
"""
class DravenessBlog(BasicNewsRecipe): # 继承 BasicNewsRecipe 类的新类名
# ///////////////////
# 设置电子书元数据
# ///////////////////
title = "draveness" # 电子书名
description = u"draveness的博客" # 电子书简介
# cover_url = '' # 电子书封面
# masthead_url = '' # 页头图片
__author__ = "draveness" # 作者
language = "zh" # 语言
encoding = "utf-8" # 编码
# ///////////////////
# 抓取页面内容设置
# ///////////////////
# keep_only_tags = [{ 'class': 'example' }] # 仅保留指定选择器包含的内容
no_stylesheets = True # 去除 CSS 样式
remove_javascript = True # 去除 JavaScript 脚本
auto_cleanup = True # 自动清理 HTML 代码
delay = 5 # 抓取页面间隔秒数
max_articles_per_feed = 100 # 抓取文章数量
timeout = 10
# ///////////////////
# 页面内容解析方法
# ///////////////////
def parse_index(self):
site = "https://draveness.me/whys-the-design/"
soup = self.index_to_soup(site) # 解析列表页返回 BeautifulSoup 对象
articles = [] # 定义空文章资源数组
ultag = soup.findAll("ul")[6]
urls = ultag.findAll("li")
urls.reverse()
for link in urls:
title = link.a.contents[0].strip() # 提取文章标题
url = link.a.get("href") # 提取文章链接
print(title, url)
articles.append({"title": title, "url": url})
ans = [(self.title, articles)] # 组成最终的数据结构
return ans # 返回可供 Calibre 转换的数据结构
|
[
"291374108@qq.com"
] |
291374108@qq.com
|
eec8efa198fdd6ce3ad3070fc8265762caf05d1c
|
58141d7fc37854efad4ad64c74891a12908192ed
|
/tests/test_storage2.py
|
b09918d1388d18d6fb7fb62fa653799306d5de22
|
[] |
no_license
|
stanleylio/fishie
|
b028a93b2093f59a8ceee4f78b55a91bb1f69506
|
0685045c07e4105934d713a0fd58c4bc28821ed6
|
refs/heads/master
| 2022-08-14T13:08:55.548830
| 2022-07-29T01:32:28
| 2022-07-29T01:32:28
| 30,433,819
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
import unittest,sys
from os.path import expanduser
sys.path.append(expanduser('~'))
from node.storage.storage2 import storage
class TestStorage2(unittest.TestCase):
def test_read_latest_non_null(self):
s = storage()
self.assertTrue(s.read_latest_non_null('node-008', 'ReceptionTime', 'idx'))
#self.assertTrue(parse_SeaFET(m) is not None)
def test_read_last_N_minutes(self):
s = storage()
self.assertTrue(s.read_last_N_minutes('node-007', 'ReceptionTime', 1, 'T_280'))
if __name__ == '__main__':
unittest.main()
|
[
"stanleylio@gmail.com"
] |
stanleylio@gmail.com
|
7f06aa3882b4fc1e0e5f3f8bc66e51bcb16b8038
|
5730e8d500a65992bb21094ffed26e21ccc7c0fd
|
/augment_dnase_pipeline_outputs/metadata/aggregate_ataqc.py
|
2963e434753969d97146b33d89c1eb86a8843a62
|
[] |
no_license
|
kundajelab/atlas_resources
|
35f1df4c09356d7256a6667700b88020396d5642
|
89bcde11921526b9956be48bf367617db4974d31
|
refs/heads/master
| 2021-10-25T07:01:30.127405
| 2021-10-25T00:55:25
| 2021-10-25T00:55:25
| 160,546,622
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,942
|
py
|
import argparse
import collections
import json
import pdb
def parse_args():
parser=argparse.ArgumentParser(description="aggregate ataqc metrics for all samples in a single report")
parser.add_argument("--ataqc_files",default="/oak/stanford/groups/akundaje/projects/atlas/dnase_processed/aggregate_outputs/qc.json.txt")
parser.add_argument("--outf",default="atlas.metadata.report.txt")
parser.add_argument("--mitra_prefix",default="http://mitra.stanford.edu/kundaje/projects/atlas/")
parser.add_argument("--prefix_to_drop_for_oak",default="/oak/stanford/groups/akundaje/projects/atlas/")
parser.add_argument("--hash_to_id",default="/oak/stanford/groups/akundaje/projects/atlas/dnase_processed/processed_all.txt")
parser.add_argument("--fname_hash_index",type=int,default=9)
return parser.parse_args()
def flatten(d, parent_key='', sep='.'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def iterate_json(data,val_dict,all_keys,cur_id):
flat_data=flatten(data)
for key in flat_data:
if key not in all_keys:
all_keys.add(key)
val_dict[cur_id][key]=flat_data[key]
return val_dict,all_keys
def main():
args=parse_args()
ataqc_files=open(args.ataqc_files,'r').read().strip().split('\n')
val_dict=dict()
all_keys=set([])
outf=open(args.outf,'w')
hash_to_id=open(args.hash_to_id,'r').read().strip().split('\n')
hash_to_id_dict=dict()
for line in hash_to_id:
tokens=line.split('\t')
cur_hash=tokens[0]
cur_id=tokens[1]
hash_to_id_dict[cur_hash]=cur_id
for fname in ataqc_files:
with open(fname,'r') as cur_f:
data=json.load(cur_f)
#get the report title
report_title=fname.replace(args.prefix_to_drop_for_oak,args.mitra_prefix).replace(".json",".html")
#get the hash
cur_hash=fname.split('/')[args.fname_hash_index]
cur_id=hash_to_id_dict[cur_hash]
print(cur_id+" : "+report_title)
val_dict[cur_id]=dict()
val_dict[cur_id]['path']=report_title
all_keys.add('path')
#iterate through the json file recursively
val_dict,all_keys=iterate_json(data,val_dict,all_keys,cur_id)
outf.write('Dataset')
all_keys=list(all_keys)
for key in all_keys:
outf.write('\t'+key)
outf.write('\n')
for dataset in val_dict:
outf.write(dataset)
for key in all_keys:
if key in val_dict[dataset]:
outf.write('\t'+str(val_dict[dataset][key]))
else:
outf.write('\tNA')
outf.write('\n')
outf.close()
if __name__=="__main__":
main()
|
[
"annashcherbina@gmail.com"
] |
annashcherbina@gmail.com
|
58950f55ea5a2b7fae6b323bfc181434c02aaaee
|
305d25e1d2084761e889057077706b1ba3f9122d
|
/nmigen_boards/arty_z7.py
|
ba432b642cc9588c44c7e5037b12b600668f5a6b
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
nicolas-robin/nmigen-boards
|
d7d8fe29d788f8b1fdcf8da0cf7202a1f0fa741a
|
6fc91b491214b80c56b2b2d031da1a50c7254c56
|
refs/heads/master
| 2020-12-10T15:43:43.358008
| 2020-01-17T21:54:48
| 2020-01-17T21:54:48
| 233,314,458
| 0
| 0
|
NOASSERTION
| 2020-01-12T00:01:57
| 2020-01-12T00:01:57
| null |
UTF-8
|
Python
| false
| false
| 5,359
|
py
|
import os
import subprocess
from nmigen.build import *
from nmigen.vendor.xilinx_7series import *
from .resources import *
__all__ = ["ArtyZ720Platform"]
class ArtyZ720Platform(Xilinx7SeriesPlatform):
device = "xc7z020"
package = "clg400"
speed = "1"
default_clk = "clk125"
resources = [
Resource("clk125", 0,
Pins("H16", dir="i"), Clock(125e6), Attrs(IOSTANDARD="LVCMOS33")),
*SwitchResources(
pins="M20 M19",
attrs=Attrs(IOSTANDARD="LVCMOS33")),
RGBLEDResource(0,
r="N15", g="G17", b="L15", # LD4
attrs=Attrs(IOSTANDARD="LVCMOS33")),
RGBLEDResource(1, # LD5
r="M15", g="L14", b="G14",
attrs=Attrs(IOSTANDARD="LVCMOS33")),
*LEDResources(
pins="R14 P14 N16 M14",
attrs=Attrs(IOSTANDARD="LVCMOS33")),
*ButtonResources(
pins="D19 D20 L20 L19",
attrs=Attrs(IOSTANDARD="LVCMOS33")),
Resource("audio", 0,
Subsignal("pwm", Pins("R18", dir="o")),
Subsignal("sd", PinsN("T17", dir="o")),
Attrs(IOSTANDARD="LVCMOS33")),
Resource("crypto_sda", 0, # ATSHA204A
Pins("J15", dir="io"),
Attrs(IOSTANDARD="LVCMOS33")),
Resource("hdmi_rx", 0, # J10
Subsignal("cec", Pins("H17", dir="io")),
Subsignal("clk", DiffPairs("N18", "P19", dir="i"),
Attrs(IO_TYPE="TMDS_33")),
Subsignal("d", DiffPairs("V20 T20 N20", "W20 U20 P20", dir="i"),
Attrs(IO_TYPE="TMDS_33")),
Subsignal("hpd", Pins("T19", dir="o")),
Subsignal("scl", Pins("U14", dir="io")),
Subsignal("sda", Pins("U15", dir="io")),
Attrs(IOSTANDARD="LVCMOS33")),
Resource("hdmi_tx", 0, # J11
Subsignal("cec", Pins("G15", dir="io")),
Subsignal("clk", DiffPairs("L16", "L17", dir="o"),
Attrs(IO_TYPE="TMDS_33")),
Subsignal("d", DiffPairs("K17 K19 J18", "K18 J19 H18", dir="o"),
Attrs(IO_TYPE="TMDS_33")),
Subsignal("hpd", PinsN("R19", dir="i")),
Subsignal("scl", Pins("M17", dir="io")),
Subsignal("sda", Pins("M18", dir="io")),
Attrs(IOSTANDARD="LVCMOS33"))
]
connectors = [
Connector("pmod", 0, "Y18 Y19 Y16 Y17 - - U18 U19 W18 W19 - -"), # JA
Connector("pmod", 1, "Y14 W14 T10 T11 - - W16 V16 W13 V12 - -"), # JB
Connector("ck_io", 0, {
# Outer Digital Header
"io0": "T14",
"io1": "U12",
"io2": "U13",
"io3": "V13",
"io4": "V15",
"io5": "T15",
"io6": "R16",
"io7": "U17",
"io8": "V17",
"io9": "V18",
"io10": "T16",
"io11": "R17",
"io12": "P18",
"io13": "N17",
# Inner Digital Header
"io26": "U5",
"io27": "V5",
"io28": "V6",
"io29": "U7",
"io30": "V7",
"io31": "U8",
"io32": "V8",
"io33": "V10",
"io34": "W10",
"io35": "W6",
"io36": "Y6",
"io37": "Y7",
"io38": "W8",
"io39": "Y8",
"io40": "W9",
"io41": "Y9",
# Outer Analog Header as Digital IO
"a0": "Y11",
"a1": "Y12",
"a2": "W11",
"a3": "V11",
"a4": "T5",
"a5": "U10",
# Inner Analog Header as Digital IO
"a6": "F19",
"a7": "F20",
"a8": "C20",
"a9": "B20",
"a10": "B19",
"a11": "A20",
# Misc.
"a": "Y13"
}),
Connector("ck_spi", 0, {
"miso": "W15",
"mosi": "T12",
"sck": "H15",
"ss": "F16"
}),
Connector("ck_i2c", 0, {
"scl": "P16",
"sda": "P15"
}),
Connector("xadc", 0, {
# Outer Analog Header
"vaux1_n": "D18",
"vaux1_p": "E17",
"vaux9_n": "E19",
"vaux9_p": "E18",
"vaux6_n": "J14",
"vaux6_p": "K14",
"vaux15_n": "J16",
"vaux15_p": "K16",
"vaux5_n": "H20",
"vaux5_p": "J20",
"vaux13_n": "G20",
"vaux13_p": "G19",
# Inner Analog Header
"vaux12_n": "F20",
"vaux12_p": "F19",
"vaux0_n": "B20",
"vaux0_p": "C20",
"vaux8_n": "A20",
"vaux8_p": "B19"
})
]
def toolchain_program(self, products, name, **kwargs):
xc3sprog = os.environ.get("XC3SPROG", "xc3sprog")
with products.extract("{}.bit".format(name)) as bitstream_filename:
subprocess.run([xc3sprog, "-c", "jtaghs1_fast", "-p", "1", bitstream_filename], check=True)
if __name__ == "__main__":
from .test.blinky import *
ArtyZ720Platform().build(Blinky(), do_program=True)
|
[
"whitequark@whitequark.org"
] |
whitequark@whitequark.org
|
c5c7c9fa51eaaec171b3b32e2cb18f6d63866966
|
8a82a83655f118208692e55d7804d9fa480ad4b6
|
/book/apress/Beginning.Python.Visualization.Crafting.Visual.Transformation.Scripts/Chapter04/src/read_ini.py
|
bf4374d1f2c6de9b410eea7d36a061bd6b2c38a7
|
[] |
no_license
|
xenron/sandbox-da-python
|
0814159da9a91923e4b66c5e40057e381f765e96
|
ab8f1c0d57fdc6006355f613012b84165068c315
|
refs/heads/master
| 2020-04-12T05:41:33.182110
| 2016-12-14T22:57:33
| 2016-12-14T22:57:33
| 60,324,979
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
# read an INI (config) file
import ConfigParser
read_opts=ConfigParser.ConfigParser()
read_opts.read('../data/options.ini')
# print parameters and values
for section in read_opts.sections():
print "[%s]" % section
for param in read_opts.items(section):
print param
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
db3596b8480850b3718c2c9d08671fd49db81831
|
0543faeee9f493260e8cbd8a9155a96a8cbb0df2
|
/main_app/migrations/0018_profile.py
|
9f188815d07ced745fd5193ceebf36edd8813264
|
[] |
no_license
|
tanveerahmad1517/Treasuregram
|
134853f298628c161ebe741864cdb581ce80db8f
|
797e0ff1460eb50d90aa385f6fb25990ed7766fa
|
refs/heads/master
| 2020-03-10T13:58:43.912795
| 2018-04-13T14:32:31
| 2018-04-13T14:32:31
| 129,413,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-02-13 13:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0017_remove_treasure_date'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(db_column='first_name', max_length=50)),
('last_name', models.CharField(db_column='last_name', max_length=50)),
],
),
]
|
[
"tanveerobjects@gmail.com"
] |
tanveerobjects@gmail.com
|
53171e07178a012d3d4820a34f4a4926dbf039f9
|
909afe0216a37bdc19683d81e533fa6c094329c1
|
/python/fluent_python/17-futures/flags_threadpool.py
|
06c33a7643ddd6282576bba9507f4a1c595c79bf
|
[] |
no_license
|
wxnacy/study
|
af7fdcd9915d668be73c6db81bdc961247e24c73
|
7bca9dc8ec211be15c12f89bffbb680d639f87bf
|
refs/heads/master
| 2023-04-08T17:57:40.801687
| 2023-03-29T08:02:20
| 2023-03-29T08:02:20
| 118,090,886
| 18
| 22
| null | 2022-12-16T03:11:43
| 2018-01-19T07:14:02
|
HTML
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy(wxnacy@gmail.com)
# Description: 使用多线程下载
from concurrent.futures import ThreadPoolExecutor
from flags import download_one, main
MAX_WORKERS = 20
def download_many(suffixs):
workers = min(MAX_WORKERS, len(suffixs))
with ThreadPoolExecutor(workers) as executor:
res = executor.map(download_one, suffixs)
return len(list(res))
if __name__ == "__main__":
main(download_many)
|
[
"371032668@qq.com"
] |
371032668@qq.com
|
1998f40d200a554665e76178c4a6f06101755f94
|
74d11e4000d99e43dc4c53c93ac59782ebbe1802
|
/portrait/deeplab/model_test.py
|
c586ccd0ea5c6626ab32562bec21e041fdf8b7cc
|
[
"Apache-2.0"
] |
permissive
|
hiepgaf/portrait
|
f450971e8881e9bcd1f386966651a9a01c1d11ce
|
930a167cbb368cdc2cf906b06b70c035d87e6938
|
refs/heads/master
| 2020-03-19T00:14:06.916160
| 2018-05-14T05:57:29
| 2018-05-14T05:57:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
"""Tests for encoder"""
import numpy as np
import tensorflow as tf
from model import deeplab_v3_plus_model
def create_test_inputs(batch, height, width, channels):
"""Create mock Images """
if None in [batch, height, width, channels]:
return tf.placeholder(tf.float32, (batch, height, width, channels))
else:
return tf.to_float(
np.tile(np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch, 1, 1, channels]))
class DeepLabV3PlusTest(tf.test.TestCase):
def testBuildDeepLabV3Plus(self):
""""Encoder Constructor Test"""
images = create_test_inputs(2, 224, 224, 3)
encoded_features, _ = deeplab_v3_plus_model(
images=images)
self.assertListEqual(
encoded_features.get_shape().as_list(),
[2, 28, 28, 256])
if __name__ == '__main__':
tf.test.main()
|
[
"tdat.nguyen93@gmail.com"
] |
tdat.nguyen93@gmail.com
|
3091099fee02328c00c32ce85434caa6c2918a00
|
d2fdd6b10b0467913971d1408a9a4053f0be9ffb
|
/datahub/investment/project/migrations/0033_add_investment_document_permission.py
|
87a514117458d20c8bf84bc4f1155a3ce0c0b8eb
|
[] |
no_license
|
jakub-kozlowski/data-hub-leeloo
|
fc5ecebb5e4d885c824fc7c85acad8837fcc5c76
|
7f033fcbcfb2f7c1c0e10bec51620742d3d929df
|
refs/heads/master
| 2020-05-18T13:29:14.145251
| 2019-04-30T12:12:50
| 2019-04-30T12:12:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
# Generated by Django 2.0.1 on 2018-01-09 16:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('investment', '0032_investmentproject_comments'),
]
operations = [
migrations.AlterModelOptions(
name='investmentproject',
options={'default_permissions': ('add', 'change_all', 'delete'), 'permissions': (('read_all_investmentproject', 'Can read all investment project'), ('read_associated_investmentproject', 'Can read associated investment project'), ('change_associated_investmentproject', 'Can change associated investment project'), ('read_investmentproject_document', 'Can read investment project document'))},
),
]
|
[
"reupen@users.noreply.github.com"
] |
reupen@users.noreply.github.com
|
3e5b76060a8d08eb9c454a93b5bf91d108e6268c
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg_old/scg_obj_shell_2nd/test_c40343.py
|
3c334ff2e3e9d1098cbb335f5426d69ef272f5d0
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
import pytest
import time
import sys
from page_obj.scg.scg_def import *
from page_obj.scg.scg_def_obj import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = 40343
# 修改以subnet方式添加的一条addr obj,查看log
def test_change_obj_wxw(browser):
try:
login_web(browser, url="10.2.2.81")
# 先添加再修改
add_obj_address_wxw(browser, name='obj_add_343', desc='zhe是yi个描述1', subnetip='11.11.11.1', subnetmask='24')
# 欲修改哪个参数可直接编辑
change_obj_address_wxw(browser, name='obj_add_343', desc='zhe是yi个描述2', subnetip='11.11.11.2', subnetmask='32')
time.sleep(2)
# 切换到默认frame
browser.switch_to.default_content()
get_log(browser, 管理日志)
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("content")
loginfo = browser.find_element_by_xpath('//*[@id="namearea0"]').text
try:
assert "配置地址对象成功,修改内部对象 [obj_add_343]" in loginfo
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "配置地址对象成功,修改内部对象 [obj_add_343]" in loginfo
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
reload(hostip="10.2.2.81")
print(err)
rail_fail(test_run_id, test_id)
time.sleep(70)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c40343.py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
dc3f80de3fed77fc2506bf296b04f63ce5dd996c
|
f19c5436c7173835a3f1d064541ee742178e213a
|
/mah/divide and conquer/[BOJ]1920_수 찾기.py
|
9bdd791f8797941a03f185c9f847b8104e5e9e83
|
[] |
no_license
|
hongsungheejin/Algo-Study
|
f1c521d01147a6f74320dbc8efe3c1037e970e73
|
d6cb8a2cc6495ccfcfb3477330a3af95895fae32
|
refs/heads/main
| 2023-07-06T10:58:27.258128
| 2021-07-29T02:11:13
| 2021-07-29T02:11:13
| 379,269,918
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
N = int(input())
nums = list(map(int, input().split()))
nums.sort()
M = int(input())
tars = list(map(int, input().split()))
def binary_serach(tar):
l, r = 0, len(nums) - 1
while l<=r:
m = (l+r)//2
if nums[m] == tar: return 1
elif nums[m] < tar: l=m+1
else: r=m-1
return 0
for tar in tars:
print(binary_serach(tar))
|
[
"mai.hong0924@gmail.com"
] |
mai.hong0924@gmail.com
|
39e2d9dbdc83ea68defe4b575e5d0bee237f89bc
|
205be8d429df36e27cdfc048bfca9212c5a62a87
|
/icu/urls.py
|
ed2cf073014ebef72cfb33b59762ed1241b9df93
|
[] |
no_license
|
KennyChrisUmurundi/HOsto
|
16c8f926282fc48c981532447f1685fbbc2b457c
|
33fa31524a08934f3deb8f622a1b1554d8ef1af4
|
refs/heads/master
| 2022-04-01T02:42:39.146227
| 2020-01-07T11:44:08
| 2020-01-07T11:44:08
| 193,458,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views as icu_views
from django.conf.urls.static import static
from django.conf import settings
app_name = 'icu'
urlpatterns = [
path('Medical Update/',icu_views.update_list,name="add-medical"),
path('scan/',icu_views.ScanCode,name="ScanCode"),
path('patient/<slug:code>',icu_views.patient,name="patient"),
path('feedback/<slug:code>/<int:id>',icu_views.feedback,name="feedback"),
]
|
[
"ndayikennysmuusic@gmail.com"
] |
ndayikennysmuusic@gmail.com
|
13daadc403ee347a1877bef70ef64461737e38cc
|
5a71ca1f5c964f803350e3c1238cb48986db565c
|
/coinlibbitfinex/coinlibbitfinex/streamapi.py
|
d6eb6285f6a324d0a46d8cc40959a345f9c959cd
|
[] |
no_license
|
tetocode/coinliball
|
fd644cbc16039ecad7e43228ea4e287ead5c8e5f
|
41ebbac13c1fbba98aedaa766b9a505cb157f374
|
refs/heads/master
| 2022-09-28T21:58:08.130006
| 2020-06-04T03:00:56
| 2020-06-04T03:00:56
| 269,247,318
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,624
|
py
|
import json
import logging
from typing import Hashable, Dict
from websocket import WebSocket
from coinlib.datatypes.streamdata import StreamData
from coinlib.trade.websocketstreamapi import WebSocketStreamApi
logger = logging.getLogger(__name__)
class StreamApi(WebSocketStreamApi):
URL = 'wss://api.bitfinex.com/ws'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._subscriptions = {}
self._channel_id_map: Dict[int, Hashable] = {}
def _process_subscription_q(self, ws: WebSocket):
# process one
if len(self._subscription_q):
op, (key, params) = self._subscription_q.popleft()
if op == 'subscribe':
self._subscriptions[key] = params
self._subscribe_channel(params)
logger.debug(f'subscribe {key} {params}')
elif op == 'unsubscribe':
params = self._subscriptions.pop(key, None)
if params is not None:
for channel_id, v in self._channel_id_map.items():
if v == key:
self._unsubscribe_channel(channel_id)
logger.debug(f'unsubscribe {key} {params} {channel_id}')
break
else:
assert False, f'unknown operation={op}'
def _subscribe_channel(self, params: dict):
request = dict(event='subscribe')
request.update(params)
self.send_message(request)
def _unsubscribe_channel(self, channel_id: int):
self.send_message({
'event': 'unsubscribe',
'chanId': channel_id,
})
def on_message(self, message_data: str):
message = json.loads(message_data)
if isinstance(message, dict):
event = message.get('event')
if event == 'info':
logger.debug(f'event info {message}')
return
elif event == 'subscribed':
self.on_subscribed(message)
return
elif event == 'unsubscribed':
self.on_unsubscribed(message)
return
elif event == 'error':
self.on_error(message)
return
else:
logger.warning(f'event unsupported {message}')
return
if isinstance(message, list):
self.on_channel_data(message)
return
logger.warning(f'unknown message {message}')
def on_subscribed(self, message: dict):
channel_name = message['channel']
for key, params in self._subscriptions.items():
if channel_name == params.get('channel'):
if channel_name == 'book':
# TODO: distinguish between order_book and raw_order_book
if message['pair'].upper() != params.get('pair', '').upper():
continue
channel_id = int(message['chanId'])
self._channel_id_map[channel_id] = key
logger.debug(f'event subscribed {message}')
return
logger.warning('unknown event subscribe {message}')
def on_unsubscribed(self, message: dict):
_ = self
logger.debug(f'event unsubscribed {message}')
def on_error(self, message: dict):
_ = self
logger.error(f'event error {message}')
def on_channel_data(self, data: list):
channel_id = data[0]
key = self._channel_id_map.get(channel_id)
if key:
self.on_raw_data(StreamData(key, data))
|
[
"_"
] |
_
|
8af84a01d80c776522cf1031d8232f43427a9540
|
66bb3f65f0157a2b5475903c90a54d5173bc4f0a
|
/djthia/core/views.py
|
702cc124bdbc259bc6fdc7f8295d8de0cd43d8fa
|
[
"MIT"
] |
permissive
|
carthage-college/django-djthia
|
691233049bcb05391fd82e390edb717f3bc0588a
|
52401592291a980c7226c0573d415e7cdb8c20d3
|
refs/heads/master
| 2023-03-04T08:22:03.055448
| 2023-02-24T18:33:12
| 2023-02-24T18:33:12
| 249,989,382
| 0
| 0
|
MIT
| 2023-02-24T18:33:56
| 2020-03-25T13:43:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
# -*- coding: utf-8 -*-
import json
import requests
from datetime import datetime
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_exempt
from djauth.decorators import portal_auth_required
from djthia.core.decorators import eligibility
@portal_auth_required(
session_var='DJTHIA_AUTH',
redirect_url=reverse_lazy('access_denied'),
)
@eligibility
def home(request):
"""Application home."""
return render(request, 'home.html', {'year': datetime.now().year})
@csrf_exempt
@portal_auth_required(
session_var='DJTHIA_AUTH',
redirect_url=reverse_lazy('access_denied'),
)
def clear_cache(request, ctype='blurbs'):
"""Clear the cache for API content."""
cid = request.POST.get('cid')
request_type = 'post'
if not cid:
cid = request.GET.get('cid')
request_type = 'get'
if cid:
key = 'livewhale_{0}_{1}'.format(ctype, cid)
cache.delete(key)
timestamp = datetime.timestamp(datetime.now())
earl = '{0}/live/{1}/{2}@JSON?cache={3}'.format(
settings.LIVEWHALE_API_URL, ctype, cid, timestamp,
)
try:
response = requests.get(earl, headers={'Cache-Control': 'no-cache'})
text = json.loads(response.text)
cache.set(key, text)
api_data = mark_safe(text['body'])
except ValueError:
api_data = "Cache was not cleared."
if request_type == 'post':
content_type = 'text/plain; charset=utf-8'
else:
content_type = 'text/html; charset=utf-8'
else:
api_data = "Requires a content ID"
return HttpResponse(api_data, content_type=content_type)
|
[
"plungerman@gmail.com"
] |
plungerman@gmail.com
|
15964455a90498f40c1b5832baba1979f60603a1
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3_neat/16_0_3_stanm_coin-jam.py
|
d03fbeb3caa801b73d052e9fa018ecc9ef309635
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 945
|
py
|
#! /usr/bin/python
import sys
def rinp():
one = input()
_ = input().split(' ')
N = int(_[0])
J = int(_[1])
return (N, J)
def get_binary(num):
return "{0:b}".format(num)
def in_base(stng, base):
return int(stng, base)
def get_div(x):
for d in range(2, x):
if d * d > x:
return 1
if x % d == 0:
return d
def check_num(x):
bnry = get_binary(x)
divs = []
for base in range(2, 11):
t = in_base(bnry, base)
div = get_div(t)
if div == 1:
return 0
divs.append(div)
print (bnry, " ".join([str(d) for d in divs]))
return 1
def main():
(N, J) = rinp()
start = 2 ** (N - 1) + 1
end = 2 ** N - 1
print ("Case #1:")
count = 0
for x in range(end, start, -2):
get_binary(x)
count += check_num(x)
if count == J:
break
if __name__ == '__main__':
main()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
49876f9f114cb015c6ec0352ca7ce0cdded1edee
|
0393e64ac4ed8e3d745b31d44836b58571faaabb
|
/aefingar_forritun/daemi21-while.py
|
62b274d4f84b333ad325bb3863b604122476a8e9
|
[] |
no_license
|
danielthorr18/forritun_git
|
c3647e1e6dd35cd55287bb2d51066d8ab55ea931
|
b544371664a15dd0660aef83cdf474e506e1b412
|
refs/heads/master
| 2020-03-28T00:04:50.700712
| 2018-11-15T15:26:42
| 2018-11-15T15:26:42
| 147,368,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
turns = int(input("Sláðu inn tölu: "))
counter = 0
while counter < turns:
pick = int(input("Sláðu inn tölu: "))
if pick % 2 == 1:
print("þú valdir", pick)
counter += 1
|
[
"danielr18@ru.is"
] |
danielr18@ru.is
|
235bea81a7895dc78c4ca7bd704cd9fc6093faec
|
7c5fb33929116bb77b438de3ead93b3978b5af71
|
/alf/networks/action_encoder.py
|
16792ab6d227f26716c18cf61406688f1e6c33a0
|
[
"Apache-2.0"
] |
permissive
|
HorizonRobotics/alf
|
d6dac891322a81ccb7e2a9749139627b1eda28cb
|
b00ff2fa5e660de31020338ba340263183fbeaa4
|
refs/heads/pytorch
| 2023-08-21T18:51:41.370566
| 2023-08-16T00:07:22
| 2023-08-16T00:07:22
| 178,459,453
| 288
| 57
|
Apache-2.0
| 2023-09-14T20:40:20
| 2019-03-29T18:44:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple parameterless action encoder."""
import numpy as np
import torch
import torch.nn.functional as F
import alf
from .network import Network
class SimpleActionEncoder(Network):
"""A simple encoder for action.
It encodes discrete action to one hot representation and use the original
continous actions. The output is the concat of all of them after flattening.
"""
def __init__(self, action_spec):
"""
Args:
action_spec (nested BoundedTensorSpec): spec for actions
"""
def check_supported_spec(spec):
if spec.is_discrete:
assert np.min(spec.minimum) == np.max(spec.minimum) == 0
assert np.min(spec.maximum) == np.max(spec.maximum)
alf.nest.map_structure(check_supported_spec, action_spec)
self._action_spec = action_spec
super().__init__(input_tensor_spec=action_spec, name="ActionEncoder")
def forward(self, inputs, state=()):
"""Generate encoded actions.
Args:
inputs (nested Tensor): action tensors.
Returns:
nested Tensor with the same structure as inputs.
"""
alf.nest.assert_same_structure(inputs, self._action_spec)
actions = inputs
outer_rank = alf.nest.utils.get_outer_rank(inputs, self._action_spec)
def _encode_one_action(action, spec):
if spec.is_discrete:
num_actions = spec.maximum - spec.minimum + 1
if num_actions.ndim == 0:
num_actions = int(num_actions)
else:
num_actions = int(num_actins[0])
a = F.one_hot(action, num_actions).to(torch.float32)
else:
a = action
if outer_rank > 0:
return a.reshape(*a.shape[:outer_rank], -1)
else:
return a.reshape(-1)
actions = alf.nest.map_structure(_encode_one_action, actions,
self._action_spec)
return torch.cat(alf.nest.flatten(actions), dim=-1), ()
|
[
"noreply@github.com"
] |
HorizonRobotics.noreply@github.com
|
4c4e27e45b34e9331a3ec84ac79cfdf43b698848
|
f2543f7266cc6f6bebee3d14081daaa676a6f80a
|
/tensorflow_federated/python/research/optimization/emnist_ae/dataset_test.py
|
aa4b7a71a5ea7055bce4f6d2649dd8c65cb6a93a
|
[
"Apache-2.0"
] |
permissive
|
matech96/federated
|
d497d24e64399f6b1da673a8457e88a18bc29473
|
b30a26d66162bd02a89a12f119e17925d161a26b
|
refs/heads/master
| 2022-11-12T10:20:34.483506
| 2020-06-11T21:13:02
| 2020-06-11T21:13:30
| 271,650,529
| 0
| 0
|
Apache-2.0
| 2020-06-11T21:31:51
| 2020-06-11T21:31:50
| null |
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow_federated.python.research.optimization.emnist_ae import dataset
TEST_BATCH_SIZE = dataset.TEST_BATCH_SIZE
class DatasetTest(tf.test.TestCase):
def test_emnist_dataset_structure(self):
emnist_train, emnist_test = dataset.get_emnist_datasets(
client_batch_size=10, client_epochs_per_round=1, only_digits=True)
self.assertEqual(len(emnist_train.client_ids), 3383)
sample_train_ds = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
train_batch = next(iter(sample_train_ds))
train_batch_shape = train_batch[0].shape
test_batch = next(iter(emnist_test))
test_batch_shape = test_batch[0].shape
self.assertEqual(train_batch_shape.as_list(), [10, 28*28])
self.assertEqual(test_batch_shape.as_list(), [TEST_BATCH_SIZE, 28*28])
def test_global_emnist_dataset_structure(self):
global_train, global_test = dataset.get_centralized_emnist_datasets(
batch_size=32, only_digits=False)
train_batch = next(iter(global_train))
train_batch_shape = train_batch[0].shape
test_batch = next(iter(global_test))
test_batch_shape = test_batch[0].shape
self.assertEqual(train_batch_shape.as_list(), [32, 28*28])
self.assertEqual(test_batch_shape.as_list(), [TEST_BATCH_SIZE, 28*28])
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.copybara@gmail.com"
] |
tensorflow.copybara@gmail.com
|
caf01fee84b8f19a586c8dadd8b3aa0ec0be2030
|
be3f8a09a5b8859fffa07673cdfd17723e843b86
|
/src/Socket_Control/coordinate_collation_interface.py
|
61179a2af41d390603fb1d43dad2348078877ed4
|
[
"MIT"
] |
permissive
|
SamKaiYang/2019_Hiwin_Shaking
|
88646a0a9ff87dfe96a3fb90ede44602bd413d53
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
refs/heads/master
| 2020-07-22T13:25:21.063822
| 2019-09-09T03:29:11
| 2019-09-09T03:29:11
| 207,216,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,413
|
py
|
#!/usr/bin/env python3
# license removed for brevity
#encoding:utf-8
import tkinter as tk
import shake_strategy_trigger as shake_trig
import shake_strategy_content as shake_cont
# interface for collation
# =======================================================================
# =23/07/2019:add above pos_collation =
# =======================================================================
collate_speed=15
def LeftCollate():
shake_cont.InitData(shake_cont.delt_z)
# shake_cont.Left(shake_cont.Animation_Action)
shake_cont.Left(shake_trig.Hiwin_Solo_Action)
shake_cont.InitData(-shake_cont.delt_z)
def RightCollate():
shake_cont.InitData(shake_cont.delt_z)
# shake_cont.Right(shake_cont.Animation_Action)
shake_cont.Right(shake_trig.Hiwin_Solo_Action)
shake_cont.InitData(-shake_cont.delt_z)
def ArmTestAct():
shake_cont.InitData(shake_cont.delt_z)
# shake_cont.ArmTest(shake_cont.Animation_Action)
shake_cont.ArmTest(shake_trig.Hiwin_Action)
shake_cont.InitData(-shake_cont.delt_z)
def CupCollate():
global collate_speed
shake_cont.InitData(shake_cont.delt_z)
# Action=shake_cont.Animation_Action
Action=shake_trig.Hiwin_Solo_Action
shake_cont.SpeedModeToggle(1)
Action.ArmMove(shake_cont.Above_Shake_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯上方')
Action.ArmMove(shake_cont.Pre_Grip_Pos,collate_speed,shake_cont.gp_stop,'轉動')
Action.ArmMove(shake_cont.Grip_Shake_For_Pour_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯')
tk.messagebox.showinfo(message='Shake OK?')
Action.GripCtrl(shake_cont.Grip_Shake_For_Pour_Pos,collate_speed,shake_cont.gp_tight_catch,'移動至雪克杯','夾住雪克杯')
tk.messagebox.showinfo(message='Shake OK?')
Action.ArmMove(shake_cont.Lift_Up_Full_Shake_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯空位上方')
Action.ArmMove(shake_cont.Pour_Product_Ready_Pos,collate_speed,shake_cont.gp_stop,'準備倒飲料至手搖杯')
Action.ArmMove(shake_cont.Pour_Product_Pour_Pos,collate_speed,shake_cont.gp_stop,'倒飲料至手搖杯')
Action.ArmMove(shake_cont.Pour_Product_Down_Pos,collate_speed,shake_cont.gp_stop,'向下移動')
tk.messagebox.showinfo(message='Cup OK?')
Action.ArmMove(shake_cont.Pour_Product_Pour_Pos,collate_speed,shake_cont.gp_stop,'向上移動')
Action.ArmMove(shake_cont.Pour_Product_Ready_Pos,collate_speed,shake_cont.gp_stop,'倒飲料至手搖杯結束')
Action.ArmMove(shake_cont.Lift_Up_Full_Shake_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯空位上方')
Action.GripCtrl(shake_cont.Grip_Shake_For_Pour_Pos,collate_speed,shake_cont.gp_open,'放下雪克杯','鬆開雪克杯')
tk.messagebox.showinfo(message='Cup OK?')
Action.ArmMove(shake_cont.Pre_Grip_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯上方')
Action.ArmMove(shake_cont.Above_Shake_Pos,collate_speed,shake_cont.gp_stop,'轉動')
tk.messagebox.showinfo(message='Collation Finished!')
Action.ArmMove(shake_cont.Home_Pos,collate_speed,shake_cont.gp_stop,'移動至原位')
shake_cont.InitData(-shake_cont.delt_z)
def LidCollate():
shake_cont.InitData(shake_cont.delt_z)
# Action=shake_cont.Animation_Action
Action=shake_trig.Hiwin_Solo_Action
shake_cont.SpeedModeToggle(1)
Action.LimitArmMove(shake_cont.Above_Lid_Pos,collate_speed,5,shake_cont.gp_stop,'移動至雪克杯蓋上方')
Action.GripCtrl(shake_cont.Lid_Pos,collate_speed,shake_cont.gp_tight_catch,'移動至雪克杯蓋','夾住雪克杯蓋')
tk.messagebox.showinfo(message='Lid OK?')
Action.ArmMove(shake_cont.Above_Lid_Pos,collate_speed,shake_cont.gp_stop,'拿起雪克杯蓋')
Action.ArmMove(shake_cont.Above_Shake_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯上方')
Action.LimitArmMove(shake_cont.Collate_Lid_Pos,collate_speed,5,shake_cont.gp_stop,'蓋杯蓋')
tk.messagebox.showinfo(message='Lid OK?')
Action.ArmMove(shake_cont.Above_Shake_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯上方')
Action.ArmMove(shake_cont.Above_Lid_Pos,collate_speed,shake_cont.gp_stop,'移動至雪克杯蓋上方')
Action.GripCtrl(shake_cont.Lid_Pos,collate_speed//2,shake_cont.gp_open,'放下雪克杯蓋','鬆開雪克杯蓋')
tk.messagebox.showinfo(message='Collation Finished!')
Action.ArmMove(shake_cont.Home_Pos,collate_speed,shake_cont.gp_stop,'移動至原位')
shake_cont.InitData(-shake_cont.delt_z)
def PosCollate():
shake_cont.InitData(shake_cont.delt_z)
pos_list = [shake_cont.Home_Pos,shake_cont.Full_Ice_Pos,shake_cont.Above_Ice_Pos,
shake_cont.Above_Duo_Duo_Pos,shake_cont.Duo_Duo_Pos,shake_cont.Above_Duo_Duo_Pos,
shake_cont.Above_Dong_Gua_T_Pos,shake_cont.Dong_Gua_T_Pos,shake_cont.Above_Dong_Gua_T_Pos,
shake_cont.Above_Blace_T_Pos,shake_cont.Blace_T_Pos,shake_cont.Above_Blace_T_Pos,
shake_cont.Above_Green_T_Pos,shake_cont.Green_T_Pos,shake_cont.Above_Green_T_Pos,
shake_cont.Above_Lid_Pos,shake_cont.Back_Sugar_Pos,shake_cont.Above_Sugar_Unspined_Pos,
shake_cont.Above_Sugar_Unspined_Pos,shake_cont.Back_Sugar_Pos]
pause_list=[1,4,7,10,13,18]
shake_cont.SpeedModeToggle(1)
for i in range(len(pos_list)):
shake_trig.Hiwin_Action.ArmMove(pos_list[i],collate_speed,0,'test point({}/{})'.format(i+1,len(pos_list)))
# shake_cont.Animation_Action.ArmMove(pos_list[i],20,0,'test point({}/{})'.format(i+1,len(pos_list)))
if i in pause_list:
tk.messagebox.showinfo(message='Next Point?')
tk.messagebox.showinfo(message='Collation Finished!')
# shake_cont.Home(shake_cont.Animation_Action)
shake_cont.Home(shake_trig.Hiwin_Action)
shake_cont.InitData(-shake_cont.delt_z)
def Collation():
collate = tk.Toplevel()
collate.title('Coordinate Collation')
collate.geometry('620x355')
collate.wm_attributes("-topmost",1)
def CollateOK():
collate.destroy()
# shake_cont.Home(shake_cont.Animation_Action)
shake_cont.Home(shake_trig.Hiwin_Solo_Action)
arm_test = tk.Button(collate,text='Arm Test',font=('Arial', 15),width=45,height=2,command=ArmTestAct)
arm_test.place(x=50,y=35)
left_collate = tk.Button(collate,text='Left',font=('Arial', 15),width=19,height=2,command=LeftCollate)
left_collate.place(x=50,y=110)
right_collate = tk.Button(collate,text='Right',font=('Arial', 15),width=19,height=2,command=RightCollate)
right_collate.place(x=335,y=110)
pos_collate = tk.Button(collate,text='Pos',font=('Arial', 15),width=12,height=2,command=PosCollate)
pos_collate.place(x=50,y=185)
cup_collate = tk.Button(collate,text='Cup',font=('Arial', 15),width=12,height=2,command=CupCollate)
cup_collate.place(x=230,y=185)
lid_collate = tk.Button(collate,text='Lid',font=('Arial', 15),width=12,height=2,command=LidCollate)
lid_collate.place(x=410,y=185)
collate_ok = tk.Button(collate,text='OK',font=('Arial', 15),width=45,height=2,command=CollateOK)
collate_ok.place(x=50,y=260)
def BackHome():
collate.destroy()
# shake_cont.Home(shake_cont.Animation_Action)
shake_cont.Home(shake_trig.Hiwin_Solo_Action)
collate.protocol('WM_DELETE_WINDOW', BackHome)
collate.mainloop()
|
[
"tt00621212@gmail.com"
] |
tt00621212@gmail.com
|
7f824ee40e811bbb0b10b06bebe9de412a97a178
|
65e07d1e35598e5686e743e9bdefcdd5e1269a0d
|
/archiveit_redirect.py
|
51440f3afb3374425033df8ef6d544f7754a5e5a
|
[] |
no_license
|
ale-gaeta/bentley_scripts
|
94dbf3e120c218ec0af8ed235bc304ca45b3518e
|
2ad4b986212715a495036697d78952dc53dad74c
|
refs/heads/master
| 2023-03-15T14:12:12.595590
| 2016-06-30T19:02:55
| 2016-06-30T19:02:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,501
|
py
|
import os
from os.path import join
import requests
from lxml import etree
import csv
import re
import HTMLParser
def get_redirect_metadata(redirect_dict, collection_id, redirect_dir):
skip = ['createdDate','lastUpdatedDate','active','public','note','url']
starting_seeds = {}
for seed in redirect_dict:
starting_seeds[seed] = ''
with requests.Session() as s:
collection_feed = s.get('https://partner.archive-it.org/seam/resource/collectionFeed?accountId=934&collectionId=' + collection_id)
collection_metadata = etree.fromstring(collection_feed.text.encode('utf-8'))
tree = etree.ElementTree(collection_metadata)
seeds = tree.xpath('//seed')
for seed in seeds:
url = seed.xpath('./url')[0].text
if url in starting_seeds:
starting_seeds[url] = tree.getpath(seed)
redirect_metadata = []
add_deactivate = {}
redirect_investigate = {}
entity_parser = HTMLParser.HTMLParser()
for seed in starting_seeds:
if len(starting_seeds[seed]) > 0:
new_seed = redirect_dict[seed]
add_deactivate[seed] = new_seed
seed_metadata = {}
seed_path = starting_seeds[seed]
seed_element = tree.xpath(seed_path)[0]
for elem in seed_element.xpath('.//*'):
if elem.text is not None and not elem.tag in skip and not 'name' in elem.attrib:
elem_name = elem.tag
elem_text = entity_parser.unescape(elem.text.replace('“','"').replace('”','"').replace('’',"'"))
if elem_name not in seed_metadata:
seed_metadata[elem_name] = []
seed_metadata[elem_name].append(elem_text.encode('utf-8'))
elif 'name' in elem.attrib:
if elem.attrib['name'] not in skip:
elem_name = elem.attrib['name']
elem_text = entity_parser.unescape(elem.text.replace('“','"').replace('”','"').replace('’',"'"))
if elem_name not in seed_metadata:
seed_metadata[elem_name] = []
seed_metadata[elem_name].append(elem_text.encode('utf-8'))
seed_metadata['url'] = []
seed_metadata['url'].append(new_seed)
seed_metadata['Note'] = []
seed_metadata['Note'].append("QA NOTE: This seed was created as a result of the previous seed URL redirecting to this URL. Previous captures under seed URL " + seed)
redirect_metadata.append(seed_metadata)
else:
redirect_investigate[seed] = redirect_dict[seed]
with open(join(redirect_dir,'add_and_deactivate.csv'),'ab') as add_deactivate_csv:
writer = csv.writer(add_deactivate_csv)
writer.writerow(['Add','Deactivate','Deactivation Note'])
for seed, new_seed in add_deactivate.items():
writer.writerow([new_seed, seed, 'QA NOTE: Seed deactivated. Seed URL redirects to ' + new_seed + '. A new seed with the redirected seed URL has been added.'])
if len(redirect_investigate) > 0:
with open(join(redirect_dir,'redirect_investigate.csv'),'ab') as investigate_csv:
writer = csv.writer(investigate_csv)
writer.writerow(['Seed URL','Redirect URL'])
for seed, new_seed in redirect_investigate.items():
writer.writerow([seed, new_seed])
header_order = ['url','Title','Subject','Personal Creator','Corporate Creator','Coverage','Description','Publisher','Note']
redirect_csv = join(redirect_dir,'redirect_metadata.csv')
header_counts = {}
for seed in redirect_metadata:
for element in seed:
count = len(seed[element])
elem_lower = element.lower()
if element not in header_counts:
header_counts[element] = count
elif count > header_counts[element]:
header_counts[element] = count
for element in header_order:
elem_lower = element.lower()
if element not in header_counts and elem_lower not in header_counts:
header_counts[element] = 1
for seed in redirect_metadata:
for element in header_counts:
if element not in seed:
seed[element] = []
for element in seed:
current_count = len(seed[element])
header_count = header_counts[element]
difference = header_count - current_count
if difference > 0:
seed[element].extend([''] * difference)
header_row = []
header_counts_lower = {k.lower():v for k,v in header_counts.items()}
for element in header_order:
elem_lower = element.lower()
header_row.extend([element] * header_counts_lower[elem_lower])
with open(redirect_csv,'ab') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header_row)
for seed in redirect_metadata:
row = []
for element in header_order:
elem_lower = element.lower()
if element in seed:
row.extend([item for item in seed[element]])
elif elem_lower in seed:
row.extend([item for item in seed[elem_lower]])
with open(redirect_csv,'ab') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(row)
def main():
job_numbers = raw_input('Enter a comma separated list of job numbers: ')
base_dir = raw_input('Enter the directory in which job files are saved (e.g., U:/web_archives/jobs): ')
jobs = [job.strip() for job in job_numbers.split(',')]
for job in jobs:
redirect_dict = {}
job_dir = join(base_dir,job)
with open(join(job_dir,'seedstatus.csv'),'rb') as csvfile:
reader = csv.reader(csvfile)
first_row = reader.next()
collection_string = first_row[0]
collection_id = re.findall(r'(\d+)\t',collection_string)[0]
redirect_dir = join(job_dir,'redirects')
redirect_csv = join(redirect_dir,'redirect_information.csv')
with open(redirect_csv,'rb') as redirect_csv:
reader = csv.reader(redirect_csv)
next(reader,None)
for row in reader:
seed = row[0].strip()
redirect = row[1].strip()
redirect_dict[seed] = redirect
get_redirect_metadata(redirect_dict,collection_id,redirect_dir)
main()
|
[
"djpillen@umich.edu"
] |
djpillen@umich.edu
|
310c8ae150190d6740b6121ace9773d0a661e430
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/11/32/13.py
|
c3ba2816bd0c1116751668d1ffb42f0e53b1d0e5
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,460
|
py
|
filename = "B-large.in"
outputname = filename + "out.txt"
inFile = open(filename, 'r')
outFile = open(outputname, 'w')
numCases = int(inFile.readline())
def getTime(fullGapList, ttb, numStations):
currTime = 0
counter = 0
while currTime < ttb and counter < len(fullGapList):
currTime += fullGapList[counter]*2
counter += 1
if counter == len(fullGapList):
return sum(fullGapList)*2
newGapList = fullGapList[counter:]
if currTime != ttb:
newGapList += [(currTime - ttb)/2]
newGapList.sort()
newGapList.reverse()
stations = newGapList[0:numStations]
return sum(fullGapList)*2 - sum(stations)
for i in range(numCases):
print i
nextLine = inFile.readline().split()
numStations = int(nextLine[0])
timeToBuild = int(nextLine[1])
numStars = int(nextLine[2])
numGaps = int(nextLine[3])
gapList = []
for j in range(numGaps):
gapList += [int(nextLine[4+j])]
fullGapList = []
while len(fullGapList) < numStars:
fullGapList += gapList
fullGapList = fullGapList[0:numStars]
answer = getTime(fullGapList, timeToBuild, numStations)
outFile.write("Case #" + str(i+1) + ": " + str(answer) + "\n")
inFile.close()
outFile.close()
def oneStation(fullGapList, pos, ttb):
priorTime = sum(fullGapList[0:pos])*2
afterTime = sum(fullGapList[pos+1:])*2
if priorTime > ttb:
return priorTime + fullGapList[pos] + afterTime
elif priorTime + 2*fullGapList[pos] < ttb:
return priorTime + 2*fullGapList[pos] + afterTime
else:
return priorTime + (ttb-priorTime)/2 + fullGapList[pos] + afterTime
def twoStation(fullGapList, pos1, pos2, ttb):
priorTime = sum(fullGapList[0:pos1])*2
if priorTime > ttb:
afterBoost = priorTime + fullGapList[pos1]
elif priorTime + 2*fullGapList[pos1] < ttb:
afterBoost = priorTime + 2*fullGapList[pos1]
else:
afterBoost = priorTime + (ttb-priorTime)/2 + fullGapList[pos1]
priorTime = afterBoost + sum(fullGapList[pos1+1:pos2])*2
if priorTime > ttb:
afterBoost = priorTime + fullGapList[pos2]
elif priorTime + 2*fullGapList[pos2] < ttb:
afterBoost = priorTime + 2*fullGapList[pos2]
else:
afterBoost = priorTime + (ttb-priorTime)/2 + fullGapList[pos2]
return afterBoost + sum(fullGapList[pos2+1:])*2
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
d521724116b490a6181f5b3f286c4bc901268838
|
93ff3a214354128910c5c77824c64678d78e556d
|
/downloads/views.py
|
cd885baeb2cf1ca88ff849bf9cbe45dc2e079bad
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
samoKrajci/roots
|
1fc2c7f205ba9dc0d9026026253c7349c3a551aa
|
9c6bf6ed30e8e6ff9099e9dca6d56a2df2ef10b0
|
refs/heads/master
| 2021-09-23T02:20:17.927687
| 2017-01-01T19:05:22
| 2017-01-01T19:05:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
from sendfile import sendfile
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
@login_required
def download_protected_file(request, model_class, path_prefix, path):
"""
This view allows download of the file at the specified path, if the user
is allowed to. This is checked by calling the model's can_access_files
method.
"""
# filepath is the absolute path, mediapath is relative to media folder
filepath = settings.SENDFILE_ROOT + path_prefix + path
filepath_mediapath = path_prefix + path
if request.user.is_authenticated():
# Superusers can access all files
if request.user.is_superuser:
return sendfile(request, filepath)
else:
# We need to check can_access_files on particular instance
obj = model_class.get_by_filepath(filepath_mediapath)
if obj is not None and obj.can_access_files(request.user):
return sendfile(request, filepath)
raise PermissionDenied
|
[
"tomasbabej@gmail.com"
] |
tomasbabej@gmail.com
|
2578e88ce408cf417424eda2136335cf2eb1c5d0
|
a851830dbfd27d850e887d1cbc56c906512585d2
|
/AS0/ps0-4-code.py
|
7c37625f2438b193a9384e90e5fc0fc757b603e6
|
[] |
no_license
|
AlexisDrch/Computer-Vision
|
d6861f1e11401b6c0b131026bc22a76433b74025
|
dc2687d53af73dd4e973bf969c48ae9f343bd49d
|
refs/heads/master
| 2021-01-25T11:55:47.316194
| 2018-04-06T06:15:49
| 2018-04-06T06:15:49
| 123,440,935
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,549
|
py
|
# ### 4. Arithmetic and Geometric operations
from scipy import misc
from scipy import ndimage
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# ### a.
# input 2 pictures as numpy ndarray
picture_1 = misc.imread('./pictures/ps0-1-a-1.jpg')
picture_2 = misc.imread('./pictures/ps0-1-a-2.jpg')
# set red and blue channel to value 0
mono_g_picture = picture_1.copy()
mono_g_picture[:,:,0] = mono_g_picture[:,:,2] = 0
# In[40]:
green_mg1_values = mono_g_picture[:,:,1].copy()
min_g1_value = np.min(green_mg1_values)
max_g1_value = np.max(green_mg1_values)
mean_g1_value = np.mean(green_mg1_values)
std_g1_value = np.std(green_mg1_values)
print('From the MG1 pixel values : min = {} | max = {} | mean = {} | stand dev = {} '
.format(min_g1_value, max_g1_value, mean_g1_value, std_g1_value))
print('\n')
print('To compute these values, it is necessary to consider the pixel values as a unique array,' +
'here : the green pixel value of all the instances in the picture (green channel). ' +
'Then, basic mathematic ops can be applied.')
# #### b. Operations on mg1
# In[41]:
# substracting the mean
green_mg1_values = green_mg1_values - mean_g1_value
# diving by the std
green_mg1_values = green_mg1_values / std_g1_value
# multiply by 10
green_mg1_values = green_mg1_values * 10
# add mean
green_mg1_values = green_mg1_values + mean_g1_value
# plot (for notebook) and output the resulting picture
mono_g_picture_flat = mono_g_picture.copy()
mono_g_picture_flat[:,:,1] = green_mg1_values
#plt.imshow(mono_g_picture_flat)
#plt.title('Flat M1g')
#plt.show()
mpimg.imsave('./output/ps0-4-b-1.jpg', mono_g_picture_flat)
# #### c. Shift M1g
# In[42]:
shifted_mg1 = mono_g_picture.copy()
#shift two pixels to the left, except two last columns
for i in range(512):
for j in range(510):
shifted_mg1[i,j] = shifted_mg1[i, j+2]
# plot (for notebook) and output resulting picture
#plt.imshow(shifted_mg1)
#plt.show()
mpimg.imsave('./output/ps0-4-c-1.jpg', shifted_mg1)
# #### d. M1g - shiftedM1g
# In[47]:
sub_m1g = mono_g_picture - shifted_mg1
# verif that green chanel has valid values (not < 0)
verif_array = np.where(sub_m1g < 0)
print(verif_array)
# plot (for notebook) and output resulting picture
#plt.imshow(sub_m1g)
#plt.show()
mpimg.imsave('./output/ps0-4-d-1.jpg', sub_m1g)
# The value of a pixel represent its light intensity. Since negative light intensity doesn't exist, negative value for a pixel is a bug, and does not represent a physical quantity.
exit()
|
[
"aleksi.durocher@wanadoo.fr"
] |
aleksi.durocher@wanadoo.fr
|
3bb95ced81396f906f7822e77e1d040cd8901b31
|
d33b2ce08591d23b06ab466f5dd6e302e3d4af2f
|
/fgcz_biobeamer.py.bak
|
36ca192e64d75ab8a4deb5d3140b2ca66875ef94
|
[] |
no_license
|
Python3pkg/BioBeamer
|
8b5fceb94664dbe7ce15603276f9628bbe6d25ca
|
61dc1299fb47ece91ff9a7d333149cb2bfd500f3
|
refs/heads/master
| 2021-01-21T09:28:49.967511
| 2017-05-18T06:10:05
| 2017-05-18T06:10:05
| 91,655,529
| 0
| 0
| null | 2017-05-18T06:10:03
| 2017-05-18T06:10:03
| null |
UTF-8
|
Python
| false
| false
| 1,595
|
bak
|
#!/usr/bin/python
# -*- coding: latin1 -*-
"""
Copyright 2006-2015 Functional Genomics Center Zurich
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Author / Maintainer: Christian Panse <cp@fgcz.ethz.ch>, Witold E. Wolski <wew@fgcz.ethz.ch>
"""
# pip install PyFGCZ
import biobeamer
import sys
import socket
import time
configuration_url = "http://fgcz-s-021.uzh.ch/config/"
if __name__ == "__main__":
print( "hostname is {0}.".format(socket.gethostname()))
bio_beamer = biobeamer.Robocopy()
biobeamer_xsd = "{0}/BioBeamer.xsd".format(configuration_url)
biobeamer_xml = "{0}/BioBeamer.xml".format(configuration_url)
bio_beamer.para_from_url(xsd=biobeamer_xsd,
xml=biobeamer_xml)
bio_beamer.run()
time.sleep(5)
BBChecker = biobeamer.Checker()
BBChecker.para_from_url(xsd=biobeamer_xsd,
xml=biobeamer_xml)
BBChecker.run()
sys.stdout.write("done. exit 0\n")
time.sleep(5)
sys.exit(0)
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
7a024d0157cfc178d4f1771c56c94ee8a96ad515
|
fd41984178ffba0846fa7ab1f67c1a0843a5e3ff
|
/自动化办公与鼠标键盘模拟/2.读取PDF文件/读取PDF文件.py
|
f2f9dc6056b35277a8a88b16d765a546092ed2d4
|
[] |
no_license
|
LasterSmithKim/Python-Base
|
23f17472ee80f7224e96a4185775c9cd05ac7a98
|
27756126d999ddabf53b6bdc7114903a297464a0
|
refs/heads/master
| 2020-03-28T08:00:11.156911
| 2018-11-28T09:54:51
| 2018-11-28T09:54:51
| 147,939,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager,PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
def readPDF(path,toPath):
#以二进制形式打开PDF文件
f = open(path,"rb")
#创建管理器-pdf文档分析器
parser = PDFParser(f)
#创建一个pdf文档
pdfFile = PDFDocument()
#链接分析器与文档对象(分析器和文件双向链接)
parser.set_document(pdfFile)
pdfFile.set_parser(parser)
#提供初始化密码
pdfFile.initialize()
#检测文档是否提供txt转换
if not pdfFile.is_extractable:
raise PDFTextExtractionNotAllowed
else:
#解析数据
#数据管理器
manager = PDFResourceManager()
#创建一个PDF设备的对象
laparams = LAParams()
device = PDFPageAggregator(manager,laparams=laparams)
#创建解释器对象
interpreter = PDFPageInterpreter(manager,device)
#开始循环处理,每次处理一页
for page in pdfFile.get_pages():
interpreter.process_page(page)
#创建涂层,循环处理涂层
layout = device.get_result()
for x in layout:
#判断 x 是否是 LTTextBoxHorizontal类型的数据
if(isinstance(x,LTTextBoxHorizontal)):
with open(toPath,"a") as f:
str = x.get_text()
#print(str)
f.write(str+"\n")
path = r"/Users/jinpeihua/PycharmProjects/Python语言基础视频课程/入门教程一/自动化办公与鼠标键盘模拟/2.读取PDF文件/LegalNotices.pdf"
toPath = r"/Users/jinpeihua/PycharmProjects/Python语言基础视频课程/入门教程一/自动化办公与鼠标键盘模拟/2.读取PDF文件/a.txt"
readPDF(path,toPath)
|
[
"kingone@yeah.net"
] |
kingone@yeah.net
|
7b684337197c473ca2fbb5bd628978519553e9fb
|
2f07911e75ded21b80cae89ded82ce38f03a7931
|
/example.py
|
95c0f6863c126d023a70847fc9d9b552a45d593c
|
[] |
no_license
|
benmaier/radial-distance-layout
|
2b1571c34dd167301bfb8ea9750a177ac420cda9
|
9be12e2906138e239e72eeb6082ebcd3d569b3dc
|
refs/heads/master
| 2022-02-23T08:09:58.174365
| 2022-02-12T21:44:41
| 2022-02-12T21:44:41
| 50,359,905
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
from radial_distance_layout import radial_distance_layout
import matplotlib.pyplot as pl
import networkx as nx
paths = [ [ 'a','b','c'] ]
paths += [ [ 'a','b','d'] ]
paths += [ [ 'a','e','f','g'] ]
paths += [ [ 'a','e','f','h'] ]
paths += [ [ 'a','e','i'] ]
paths += [ [ 'a','j','k'] ]
paths += [ [ 'a','j','l'] ]
dists = {'a': 0,
'b':1.1, 'e': 1.2, 'j': 1.4,
'c':2.1, 'd': 2.2, 'f': 2.1, 'i': 2.34, 'k':3.8, 'l':2.5,
'g': 3.9, 'h': 3.8}
T = nx.DiGraph()
for p in paths:
T.add_path(p)
keystr = 'dist'
nx.set_node_attributes(T,keystr,dists)
fig,ax = pl.subplots(1,2,figsize=(15,8))
pos = radial_distance_layout(T,keystr,mode='soph')
nx.draw_networkx(T,pos,ax=ax[0])
pos = radial_distance_layout(T,keystr,mode='normal')
nx.draw_networkx(T,pos,ax=ax[1])
pl.show()
|
[
"benjaminfrankmaier@gmail.com"
] |
benjaminfrankmaier@gmail.com
|
3d4f973e6319ac02322d9c9e19a44bb5e11c4a74
|
360c777a2b77be466b1cf7c8fd74d6fd04f56b55
|
/nexus_auth/models/ping.py
|
7afa266c0b95f5154844d34bf4d67367e1726a27
|
[
"MIT"
] |
permissive
|
hreeder/nexus-auth
|
790a3b2623ddf443138a4b0f0af1380dbc4db8ae
|
8d51aef01647e32ba4a284f02de73a2caad7cf49
|
refs/heads/master
| 2021-01-10T10:08:37.190558
| 2016-02-29T12:27:21
| 2016-02-29T12:27:21
| 52,789,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
from nexus_auth import db
from nexus_auth.models.groups import Group
TYPE_SERVER = 0
TYPE_GROUP = 1
class PingServer(db.Model):
id = db.Column(db.Integer, primary_key=True)
servers = db.Column(db.Text)
display_name = db.Column(db.String(64))
class PingTarget(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_group_id = db.Column(db.Integer, db.ForeignKey('group.id'))
type = db.Column(db.SmallInteger)
target = db.Column(db.Integer)
def get_target_representation(self):
if self.type == TYPE_SERVER:
server = PingServer.query.filter_by(id=self.target).first()
return "Server: " + server.display_name
elif self.type == TYPE_GROUP:
group = Group.query.filter_by(id=self.target).first()
return "Group: " + group.name
def get_target_name(self):
if self.type == TYPE_SERVER:
server = PingServer.query.filter_by(id=self.target).first()
return server.display_name
elif self.type == TYPE_GROUP:
group = Group.query.filter_by(id=self.target).first()
return group.name
def get_group(self):
return Group.query.filter_by(id=self.parent_group_id).first()
|
[
"harry@harryreeder.co.uk"
] |
harry@harryreeder.co.uk
|
42e0edb633c9498ca865bd735ff7de4fec5a8333
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/hr_payroll_account/models/hr_payroll_account.py
|
8b32bbc1e6f2d79fb1556864a63d4f9623022933
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264
| 2019-04-30T14:43:46
| 2019-04-30T14:43:46
| 184,268,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,589
|
py
|
#-*- coding:utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_is_zero
class HrPayslipLine(models.Model):
_inherit = 'hr.payslip.line'
def _get_partner_id(self, credit_account):
"""
Get partner_id of slip line to use in account_move_line
"""
# use partner of salary rule or fallback on employee's address
register_partner_id = self.salary_rule_id.register_id.partner_id
partner_id = register_partner_id.id or self.slip_id.employee_id.address_home_id.id
if credit_account:
if register_partner_id or self.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'):
return partner_id
else:
if register_partner_id or self.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'):
return partner_id
return False
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
date = fields.Date('Date Account', states={'draft': [('readonly', False)]}, readonly=True,
help="Keep empty to use the period of the validation(Payslip) date.")
journal_id = fields.Many2one('account.journal', 'Salary Journal', readonly=True, required=True,
states={'draft': [('readonly', False)]}, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
move_id = fields.Many2one('account.move', 'Accounting Entry', readonly=True, copy=False)
@api.model
def create(self, vals):
if 'journal_id' in self.env.context:
vals['journal_id'] = self.env.context.get('journal_id')
return super(HrPayslip, self).create(vals)
@api.onchange('contract_id')
def onchange_contract(self):
super(HrPayslip, self).onchange_contract()
self.journal_id = self.contract_id.journal_id.id or (not self.contract_id and self.default_get(['journal_id'])['journal_id'])
@api.multi
def action_payslip_cancel(self):
moves = self.mapped('move_id')
moves.filtered(lambda x: x.state == 'posted').button_cancel()
moves.unlink()
return super(HrPayslip, self).action_payslip_cancel()
@api.multi
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
for slip in self:
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
date = slip.date or slip.date_to
currency = slip.company_id.currency_id
name = _('Payslip of %s') % (slip.employee_id.name)
move_dict = {
'narration': name,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'date': date,
}
for line in slip.details_by_salary_rule_category:
amount = currency.round(slip.credit_note and -line.total or line.total)
if currency.is_zero(amount):
continue
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=False),
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount > 0.0 and amount or 0.0,
'credit': amount < 0.0 and -amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=True),
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount < 0.0 and -amount or 0.0,
'credit': amount > 0.0 and amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if currency.compare_amounts(credit_sum, debit_sum) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': 0.0,
'credit': currency.round(debit_sum - credit_sum),
})
line_ids.append(adjust_credit)
elif currency.compare_amounts(debit_sum, credit_sum) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': currency.round(credit_sum - debit_sum),
'credit': 0.0,
})
line_ids.append(adjust_debit)
move_dict['line_ids'] = line_ids
move = self.env['account.move'].create(move_dict)
slip.write({'move_id': move.id, 'date': date})
move.post()
return res
class HrSalaryRule(models.Model):
_inherit = 'hr.salary.rule'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
account_tax_id = fields.Many2one('account.tax', 'Tax')
account_debit = fields.Many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)])
account_credit = fields.Many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)])
class HrContract(models.Model):
_inherit = 'hr.contract'
_description = 'Employee Contract'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
journal_id = fields.Many2one('account.journal', 'Salary Journal')
class HrPayslipRun(models.Model):
_inherit = 'hr.payslip.run'
journal_id = fields.Many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True,
required=True, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
|
[
"50145400+gilbertp7@users.noreply.github.com"
] |
50145400+gilbertp7@users.noreply.github.com
|
8692c4889582e9c8f425306d8b5ac70d4ee7090e
|
e8cb5f716b064043708293f924ed1ba84005e417
|
/examples/Redfish/ex09_find_ilo_mac_address.py
|
d05e42bb446a1e7359fb7eeacd4a665968932786
|
[
"Apache-2.0"
] |
permissive
|
injan0913/python-ilorest-library
|
9207caeab89038f7e6ae803c55de183bda02edb3
|
8507d96cf7b9604a30ae6548cafc0003d1098b72
|
refs/heads/master
| 2020-12-24T22:20:13.135325
| 2016-06-23T18:26:33
| 2016-06-23T18:26:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from redfishobject import RedfishObject
from ilorest.rest.v1_helper import ServerDownOrUnreachableError
def ex9_find_ilo_mac_address(redfishobj):
sys.stdout.write("\nEXAMPLE 9: Find iLO's MAC Addresses\n")
instances = redfishobj.search_for_type("Manager.")
for instance in instances:
tmp = redfishobj.redfish_get(instance["@odata.id"])
response = redfishobj.redfish_get(tmp.dict["EthernetInterfaces"]\
["@odata.id"])
for entry in response.dict["Members"]:
ethernet = redfishobj.redfish_get(entry["@odata.id"])
if "MACAddress" not in ethernet.dict:
sys.stderr.write("\tNIC resource does not contain " \
"'MACAddress' property\n")
else:
sys.stdout.write("\t" + ethernet.dict["Name"] + " = " + \
ethernet.dict["MACAddress"] + "\t(" + \
ethernet.dict["Status"]["State"] + ")\n")
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_host = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO address, iLO account name,
# and password to send https requests
iLO_host = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_host, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex9_find_ilo_mac_address(REDFISH_OBJ)
|
[
"jackgarcia77@gmail.com"
] |
jackgarcia77@gmail.com
|
08dde7520a5cc6318c6ea6a3daea7417cf1e7d49
|
8050168c08d5bb26f0da6784ca3d536950d43810
|
/activity/migrations/0009_auto_20190305_1508.py
|
b246aa4d037ae5a5422101d5dacb29818286457f
|
[] |
no_license
|
qoutland/docent
|
043f945d8a3016fdc54ee113a108a608e58456dc
|
f4dffaa3b72d922dfb99e40e7f73155ad25a2509
|
refs/heads/master
| 2022-12-15T00:13:24.940849
| 2019-05-02T17:55:43
| 2019-05-02T17:55:43
| 164,701,946
| 1
| 0
| null | 2022-11-22T03:31:43
| 2019-01-08T17:41:28
|
Python
|
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Generated by Django 2.1.3 on 2019-03-05 23:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activity', '0008_auto_20190305_1507'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='pic_url',
field=models.URLField(blank=True),
),
]
|
[
"="
] |
=
|
5a2ebd58eb237a8079aa77a1e5023bf20e50f182
|
4659f206098fdcaa72b059f1c5e4afe4c5fad3d5
|
/planemo-de/xenv/lib/python3.7/site-packages/galaxy/__init__.py
|
7653e9a409809c7918eef0427eacfbc2d38e427d
|
[] |
no_license
|
Slugger70/galaxy-metabolomics
|
e1ef083316394ace66c1f69c313db0a0fc8c3dec
|
0cbee8fe9e7cf1cc37832751ffdd9f88ff363136
|
refs/heads/master
| 2020-09-19T21:51:16.177730
| 2019-11-26T23:54:43
| 2019-11-26T23:54:43
| 224,306,539
| 0
| 0
| null | 2019-11-26T23:45:53
| 2019-11-26T23:45:52
| null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# -*- coding: utf-8 -*-
__version__ = '19.5.2'
PROJECT_NAME = "galaxy-lib"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy-lib"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_EMAIL = 'jmchilton@gmail.com'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
|
[
"ma.bioinformatics@gmail.com"
] |
ma.bioinformatics@gmail.com
|
8e3355f79679a4b37fc3d64860a4ce31c5548fa8
|
de8cfb5a1d39b40543e8e9d3f960f4b675781a08
|
/dask/dataframe/shuffle.py
|
4e85c7ce2194acd2302fb1ddeb476a43d0f86fd6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
JDWarner/dask
|
8b5c676d9078ecc498deb8fd47a54e1676c00a5f
|
3dec8e3526520459668ced05f8e144fd7605d5ec
|
refs/heads/master
| 2021-01-18T21:02:18.344193
| 2015-07-15T20:00:51
| 2015-07-15T20:00:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,141
|
py
|
from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from .. import threaded
from ..optimize import cull
from .core import DataFrame, Series, get, _Frame, tokens
from ..compatibility import unicode
from ..utils import ignoring
from .utils import (strip_categories, unique, shard_df_on_index, _categorize,
get_categories)
def set_index(df, index, npartitions=None, compute=True, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or df.npartitions
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1))
.compute()).tolist()
return df.set_partition(index, divisions, compute=compute, **kwargs)
def new_categories(categories, index):
""" Flop around index for '.index' """
if index in categories:
categories = categories.copy()
categories['.index'] = categories.pop(index)
return categories
def set_partition(df, index, divisions, compute=False, **kwargs):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
See Also
--------
set_index
shuffle
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
import partd
p = ('zpartd' + next(tokens),)
# Get Categories
token = next(tokens)
catname = 'set-partition--get-categories-old' + token
catname_new = 'set-partition--get-categories-new' + token
dsk1 = {catname: (get_categories, df._keys()[0]),
p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,), (partd.File,))),
catname_new: (new_categories, catname,
index.name if isinstance(index, Series) else index)}
# Partition data on disk
name = 'set-partition--partition' + next(tokens)
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(_set_partition, part, ind, divisions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(_set_partition, part, index, divisions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier' + next(tokens)
dsk3 = {barrier_token: (barrier, list(dsk2))}
if compute:
dsk = merge(df.dask, dsk1, dsk2, dsk3)
if isinstance(index, _Frame):
dsk.update(index.dask)
p, barrier_token = get(dsk, [p, barrier_token], **kwargs)
# Collect groups
name = 'set-partition--collect' + next(tokens)
dsk4 = dict(((name, i),
(_categorize, catname_new, (_set_collect, i, p, barrier_token)))
for i in range(len(divisions) - 1))
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
if compute:
dsk = cull(dsk, list(dsk4.keys()))
return DataFrame(dsk, name, df.columns, divisions)
def barrier(args):
list(args)
return 0
def _set_partition(df, index, divisions, p):
""" Shard partition and dump into partd """
df = df.set_index(index)
df = strip_categories(df)
divisions = list(divisions)
shards = shard_df_on_index(df, divisions[1:-1])
p.append(dict(enumerate(shards)))
def _set_collect(group, p, barrier_token):
""" Get new partition dataframe from partd """
try:
return p.get(group)
except ValueError:
return pd.DataFrame()
def shuffle(df, index, npartitions=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme.
See Also
--------
set_index
set_partition
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
if npartitions is None:
npartitions = df.npartitions
import partd
p = ('zpartd' + next(tokens),)
dsk1 = {p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,),
(partd.File,)))}
# Partition data on disk
name = 'shuffle-partition' + next(tokens)
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(partition, part, ind, npartitions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(partition, part, index, npartitions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier' + next(tokens)
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect' + next(tokens)
dsk4 = dict(((name, i),
(collect, i, p, barrier_token))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
return DataFrame(dsk, name, df.columns, divisions)
def partition(df, index, npartitions, p):
""" Partition a dataframe along a grouper, store partitions to partd """
rng = pd.Series(np.arange(len(df)))
if isinstance(index, Iterator):
index = list(index)
if not isinstance(index, (pd.Index, pd.core.generic.NDFrame)):
index = df[index]
if isinstance(index, pd.Index):
groups = rng.groupby([abs(hash(x)) % npartitions for x in index])
if isinstance(index, pd.Series):
groups = rng.groupby(index.map(lambda x: abs(hash(x)) % npartitions).values)
elif isinstance(index, pd.DataFrame):
groups = rng.groupby(index.apply(
lambda row: abs(hash(tuple(row))) % npartitions,
axis=1).values)
d = dict((i, df.iloc[groups.groups[i]]) for i in range(npartitions)
if i in groups.groups)
p.append(d)
def collect(group, p, barrier_token):
""" Collect partitions from partd, yield dataframes """
return p.get(group)
|
[
"mrocklin@gmail.com"
] |
mrocklin@gmail.com
|
6284288aa94622d03c3f24e10f3eb63df2e27dd0
|
22bcb68759d516eea70d18116cd434fcd0a9d842
|
/scrap/infibeam_books_scrap1.py
|
0fbafc3b59b8a0f06df21d43016818f71ac9c0f6
|
[] |
no_license
|
lovesh/abhiabhi-web-scrapper
|
1f5da38c873fea74870d59f61c3c4f52b50f1886
|
b66fcadc56377276f625530bdf8e739a01cbe16b
|
refs/heads/master
| 2021-01-01T17:16:51.577914
| 2014-10-18T15:56:42
| 2014-10-18T15:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,183
|
py
|
import downloader
import dom
import urllib2
import re
import time
import math
import pymongo
from collections import defaultdict
import datetime
siteurl='http://www.infibeam.com'
category_browser='http://www.infibeam.com/Books/BrowseCategories.action'
subcategory_browser='http://www.infibeam.com/Books/BrowseCategories.action'
books=[]
book_urls=defaultdict(list)
logfile=open('infibeam_books_log.txt','w')
dl=downloader.Downloader()
dl.addHeaders({'Origin':siteurl,'Referer':siteurl})
shipping_pattern = re.compile('in (\d+) business days', re.I)
def getCategoryUrls():
category_page=dom.DOM(url=category_browser)
category_path='//div[@id="allcategories"]//h3/a'
category_urls=dict((link[0],'http://www.infibeam.com'+link[1]) for link in category_page.getLinksWithXpath(category_path))
return category_urls
def getSubCategoryUrls():
category_page=dom.DOM(url=subcategory_browser)
subcategory_path='//div[@id="allcategories"]//ul/li/a'
subcategory_urls=set('http://www.infibeam.com'+link[1] for link in category_page.getLinksWithXpath(subcategory_path))
return subcategory_urls
def getBookUrlsFromPage(html):
book_url_path='//ul[@class="search_result"]//span[@class="title"]/h2/a'
page_dom=dom.DOM(string=html)
links=set(l[1] for l in page_dom.getLinksWithXpath(book_url_path))
return links
def getBookUrlsOfCategory(cat,category_url):
page=urllib2.urlopen(category_url)
html=page.read()
page.close()
page=dom.DOM(string=html)
urls=getBookUrlsFromPage(html) #get book urls from first page
count_path='//div[@id="search_result"]/div/b[2]'
count=int(page.getNodesWithXpath(count_path)[0].text.replace(',',''))
print count
if count>20:
num_pages=int(math.ceil(count/20.0))
page_urls=set(category_url+'/search?page='+str(page) for page in xrange(2,num_pages))
print page_urls
dl.putUrls(page_urls)
result=dl.download()
for r in result:
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
urls.update(getBookUrlsFromPage(html))
url_dict={}
for url in urls:
url_dict[url]=cat
return url_dict
def getAllBookUrls():
global book_urls
category_urls=getCategoryUrls()
start=time.time()
for cat in category_urls:
print('Getting book urls of category %s\n\n'%cat)
urls=getBookUrlsOfCategory(cat,category_urls[cat])
print('Witring book urls of category %s\n\n'%cat)
logfile.write('Witring book urls of category %s\n\n'%cat)
for url in urls:
logfile.write(url+'\n')
book_urls[url].append(urls[url])
logfile.write('\n\n\n\n')
finish=time.time()
print "All book urls(%s) fetched in %s\n\n",(len(book_urls),str(finish-start))
logfile.write("All book urls fetched in %s\n\n"%str(finish-start))
logfile.flush()
return book_urls
def parseBookPage(url=None,string=None):
book={}
print url
if url:
try:
doc=dom.DOM(url=url)
except urllib2.HTTPError:
return False
else:
doc=dom.DOM(string=string)
addBox=doc.getNodesWithXpath('//input[@class="buyimg "]')
if url:
book['url']=url
if addBox: #availability check
book['availability']=1 # availability 1 signals "in stock"
m = shipping_pattern.search(doc.html)
if m:
book['shipping']=(int(m.group(1)), )
else:
book['availability']=0
price_path = '//span[@class="infiPrice amount price"]'
price = doc.getNodesWithXpath(price_path)
if len(price) > 0:
book['price']=int(price[0].text.replace(',', ''))
img_path="//img[@id='imgMain']"
book['img_url']=doc.getImgUrlWithXpath(img_path)
tbody_path='//div[@id="ib_products"]/table/tbody'
if len(doc.getNodesWithXpath(tbody_path)) == 0:
tbody_path='//div[@id="ib_products"]/table'
if len(doc.getNodesWithXpath(tbody_path)) == 0:
tbody_path='//table[@style="color:#333; font:verdana,Arial,sans-serif;"]'
data=doc.parseTBody(tbody_path)
if data:
if 'author' in data:
data['author']=data['author'].split(',')
if 'publish date' in data:
m=re.search('(\d+)-(\d+)-(\d+)',data['publish date'])
if m:
data['pubdate']=datetime.date(int(m.group(1)),int(m.group(2)),int(m.group(3)))
book.update(data)
book['scraped_datetime']=datetime.datetime.now()
book['last_modified_datetime']=datetime.datetime.now()
book['site']='infibeam'
product_history={}
if 'price' in book:
product_history['price']=book['price']
if 'shipping' in book:
product_history['shipping']=book['shipping']
product_history['availability']=book['availability']
product_history['datetime']=book['last_modified_datetime']
book['product_history']=[product_history,]
return book
def go():
global books
urls=getAllBookUrls()
dl.putUrls(urls,10)
start=time
start=time.time()
result=dl.download()
finish=time.time()
logfile.write("All books(%s) downloaded in %s"%(len(books),str(finish-start)))
start=time.time()
for r in result:
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
book=parseBookPage(string=html)
book['url']=r
if r.find('/Books/') == -1:
book['type']='ebook'
else:
book['type']='book'
books.append(book)
finish=time.time()
logfile.write("All books parsed in %s"%str(finish-start))
return books
def prepareXMLFeed():
books=go()
root=dom.XMLNode('books')
start=time.time()
for book in books:
child=root.createChildNode('book')
child.createChildNodes(book)
f=open('infibeam_books.xml','w')
f.write(root.nodeToString())
f.close()
finish=time.time()
logfile.write("XML file created in %s"%str(finish-start))
|
[
"lovesh.bond@gmail.com"
] |
lovesh.bond@gmail.com
|
676a8d4121ad27fd5bfa82844f08c833b388178c
|
ffab02cf7e1213f91923cb1343cef4616a7de5a7
|
/venv/bin/isort
|
6d23f4819e78f95b81f0dc605acf081309c42fe5
|
[] |
no_license
|
mornville/flask_blog
|
4e50d6c3f835274589b278ce14f2f445b691b087
|
bf66060f3f519170e3d4865e6d85b6543359e9b0
|
refs/heads/master
| 2021-12-28T08:27:04.556959
| 2019-10-01T14:57:09
| 2019-10-01T14:57:09
| 203,522,537
| 0
| 0
| null | 2021-12-13T20:16:58
| 2019-08-21T06:37:56
|
Python
|
UTF-8
|
Python
| false
| false
| 251
|
#!/Users/ashutoshjha/Desktop/flask_blog/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"aj97389@gmail.com"
] |
aj97389@gmail.com
|
|
1dffe5f62462692c17b0917a0d9f33174704c851
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/1215.py
|
2727d86b5e032b798fe4ae7360d004c88b3cc807
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
def tidy(n):
a = list(str(n))
if len(a)>=2:
for i in range(len(a)-1):
if a[i]>a[i+1]:
a[i] = str(int(a[i])-1)
for j in range(i+1, len(a)):
a[j] = '9'
a = ''.join(a)
out = int(a)
return out
def check_tidy(n):
a = tidy(n)
b = list(str(a))
b.sort()
b = ''.join(b)
b = int(b)
if a == b:
return a
else:
return check_tidy(a)
in_f = open("i.in", 'r')
ou_f = open("o.out", 'w')
T = int(in_f.readline())
for i in range(T):
s = in_f.readline().strip()
k = int(s)
out = check_tidy(k)
j = "Case #" + str(i+1) +": " + str(out) + "\n"
ou_f.write(j)
in_f.close()
ou_f.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
93149bb3a6b9892081504d75a719a82d1a7fa2e1
|
f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc
|
/examples/transform_cube.py
|
f9f45274bed9265c28b79a03dfd4e3ccccfa5ad1
|
[
"MIT"
] |
permissive
|
triroakenshield/ezdxf
|
5652326710f2a24652605cdeae9dd6fc58e4f2eb
|
82e964a574bcb86febc677bd63f1626318f51caf
|
refs/heads/master
| 2023-08-17T12:17:02.583094
| 2021-10-09T08:23:36
| 2021-10-09T08:23:36
| 415,426,069
| 1
| 0
|
MIT
| 2021-10-09T21:31:25
| 2021-10-09T21:31:25
| null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
# Copyright (c) 2020-2021 Manfred Moitzi
# License: MIT License
from pathlib import Path
import math
import ezdxf
from ezdxf import zoom
from ezdxf.math import UCS
DIR = Path("~/Desktop/Outbox").expanduser()
p = [
(0, 0, 0),
(1, 0, 0),
(1, 1, 0),
(0, 1, 0),
(0, 0, 1),
(1, 0, 1),
(1, 1, 1),
(0, 1, 1),
]
doc = ezdxf.new()
msp = doc.modelspace()
block = doc.blocks.new("block_4m3")
cube = block.add_mesh()
with cube.edit_data() as mesh_data:
mesh_data.add_face([p[0], p[1], p[2], p[3]])
mesh_data.add_face([p[4], p[5], p[6], p[7]])
mesh_data.add_face([p[0], p[1], p[5], p[4]])
mesh_data.add_face([p[1], p[2], p[6], p[5]])
mesh_data.add_face([p[3], p[2], p[6], p[7]])
mesh_data.add_face([p[0], p[3], p[7], p[4]])
mesh_data.optimize()
# Place untransformed cube, don't use the rotation
# attribute unless you really need it, just
# transform the UCS.
blockref = msp.add_blockref(name="block_4m3", insert=(0, 0, 0))
# First rotation about the local x-axis
ucs = UCS().rotate_local_x(angle=math.radians(45))
# same as a rotation around the WCS x-axis:
# ucs = UCS().rotate(axis=(1, 0, 0), angle=math.radians(45))
# Second rotation about the WCS z-axis
ucs = ucs.rotate(axis=(0, 0, 1), angle=math.radians(45))
# Last step transform block reference from UCS to WCS
blockref.transform(ucs.matrix)
zoom.extents(msp)
doc.saveas(DIR / "cube.dxf")
|
[
"me@mozman.at"
] |
me@mozman.at
|
40c80298125d22d148038ffefb051f1a267a1a50
|
6e3b8a04a074c30cf4fc43abe7a208f772df795b
|
/Mid-Exam/2-task.py
|
58c67fe8bae90b41f94cde4ab24bb1499bf056e6
|
[] |
no_license
|
majurski/Softuni_Fundamentals
|
dc0808fdaab942896eebfb208fb6b291df797752
|
bf53a9efdcb45eb911624ab86d762a6281391fb8
|
refs/heads/master
| 2022-11-29T06:06:06.287984
| 2020-08-10T19:36:18
| 2020-08-10T19:36:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
arr = input().split()
new_arr = list(map(int, arr))
result = []
line = input()
while line != "end":
value = line.split()
command = value[0]
if command == "swap":
index_1 = int(value[1])
index_2 = int(value[2])
new_arr[index_1], new_arr[index_2] = new_arr[index_2], new_arr[index_1]
elif command == "multiply":
index_1 = int(value[1])
index_2 = int(value[2])
multiplied = new_arr[index_1] * new_arr[index_2]
new_arr[index_1] = multiplied
elif command == "decrease":
for val in new_arr:
val -= 1
result.append(val)
line = input()
print(", ".join(list(map(str, result))))
# print(', '.join([str(x) for x in last]))
|
[
"noreply@github.com"
] |
majurski.noreply@github.com
|
72791e17e71456aade20cc9cc4e32de6523e144b
|
34f5146e25144d4ceced8af38b5de2f8fff53fdd
|
/ui/mainwindow.py
|
158a3002e7c464033c18697b89cd33491f8128a1
|
[] |
no_license
|
fadiga/mstock
|
3271eeb0b8339b27347bbb70b96bc1f161ed6901
|
a5f621ed58bd881d9a232498ef23762a5f9c186f
|
refs/heads/master
| 2021-05-25T11:56:28.430965
| 2017-09-25T19:08:27
| 2017-09-25T19:08:27
| 39,653,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
# maintainer: Fad
from __future__ import (
unicode_literals, absolute_import, division, print_function)
from PyQt4.QtGui import QIcon
from PyQt4.QtCore import Qt
from Common.ui.common import FMainWindow, QToolBadgeButton
from ui.menutoolbar import MenuToolBar
from ui.menubar import MenuBar
from Common.ui.statusbar import GStatusBar
from ui.dashboard import DashbordViewWidget
from configuration import Config
class MainWindow(FMainWindow):
def __init__(self):
FMainWindow.__init__(self)
self.setWindowIcon(QIcon.fromTheme(
'logo', QIcon(u"{}".format(Config.APP_LOGO))))
self.menubar = MenuBar(self)
self.setMenuBar(self.menubar)
self.toolbar = MenuToolBar(self)
self.addToolBar(Qt.LeftToolBarArea, self.toolbar)
self.statusbar = GStatusBar(self)
self.setStatusBar(self.statusbar)
self.page = DashbordViewWidget
self.change_context(self.page)
def page_width(self):
return self.width() - 100
def add_badge(self, msg, count):
b = QToolBadgeButton(self)
b.setText(msg)
b.setCounter(count)
self.toolbar.addWidget(b)
def exit(self):
self.logout()
self.close()
def active_menu(self):
self.menubar = MenuBar(self)
self.setMenuBar(self.menubar)
|
[
"ibfadiga@gmail.com"
] |
ibfadiga@gmail.com
|
1e97caa9740ddd276af8721952d53c64e6237066
|
de8b832a3c804837300b9974dc0151d9294fa573
|
/code/experiment/GenderSoundNet/ex18_1_1_1_1_1_1_1_1_1_1_1_1_1_1/genderSoundNet.py
|
d318188bf1b8d63c08f77854ab0b089a4eff19a9
|
[] |
no_license
|
YuanGongND/Deep_Speech_Visualization
|
fcff2ac93e5adffd707b98eb7591f50fe77c1274
|
73a79e3596d9a5ee338eafb9a87b227696de25d1
|
refs/heads/master
| 2021-07-19T23:00:36.294817
| 2017-10-28T01:04:59
| 2017-10-28T01:04:59
| 105,332,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,377
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 21:03:04 2017
Conduct erxperiment on IEMOCAP, three labels:
96001: emotion(0-4, 5 = other emotions)
96002: speaker(0-9)
96003: gender(male=0, female=1)
@author: Kyle
"""
import os
from sys import argv
_, newFolderName, gpuI = argv
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuI)
import sys
sys.path.append("../../model/")
import soundNet
import waveCNN
sys.path.append("../")
import expUtil
import numpy as np
from keras import backend as K
import matplotlib.pyplot as plt
import shutil
#%% creat folder to save model, the code, and model configuration
while os.path.isdir( newFolderName ):
newFolderName = newFolderName + '_1'
print( 'exist' )
os.mkdir( newFolderName )
shutil.copy( os.path.basename(__file__), newFolderName ) # copy this file to the new folder
shutil.copy( '../../model/soundNet.py', newFolderName )
shutil.copy( '../../model/waveCNN.py', newFolderName )
shutil.copy( '../expUtil.py', newFolderName )
# put all configuratation here
thisTask = 'gender'
dataType = 'toyWaveform'
# define the model
model = soundNet.soundNet # define the model
#model = waveCNN.waveCNN
# according to the configuaration, change the coresponding setting
#if thisTask == 'emotion':
# trainNewFolderName = newFolderName
# load data
trainFeature, trainLabel, testFeature, testLabel = expUtil.loadData( testFolder = 4, testTask = thisTask, precision = 'original', sampleRate = 16000, dataType = dataType )
#%% grid search
#batch_sizeList = [ 32, 24, 16 ]
#learningRateList = [ 1e-3, 5e-4, 1e-4, 5e-5, 1e-5 ]
#initList = [ 'RandomUniform', 'lecun_normal', 'lecun_uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform' ]
batch_sizeList = [ 32 ]
learningRateList = [ 1e-4 ]
initList = [ 'glorot_normal' ]
for batch_size in batch_sizeList:
resultList = [ ]
for learningRate in learningRateList:
for init in initList:
tempFolderName = newFolderName + '/' + str( learningRate ) + '_' + str( batch_size ) + '_' + init
os.mkdir( tempFolderName )
# train the model
resultOnTrain, resultOnTest = expUtil.train( testFeature, testLabel, trainFeature, trainLabel, iteration_num = 100, \
lr_decay = 0.1, batch_size = batch_size, learningRate = learningRate, iterationNum = 100, \
modelT = model, newFolderName = tempFolderName, init = keras.initializers.Constant(value=0.01), saveSign = True, denseUnitNum = 64, \
dataType = dataType )
resultList.append( resultOnTest[ -1 ] )
np.savetxt( newFolderName + '\_' + str( batch_size ) +'_gridSearch.csv', resultList, delimiter = ',' )
resultList = np.array( resultList )
resultList.resize( [ len( learningRateList ), len( initList ) ] )
np.savetxt( newFolderName + '\_' + str( batch_size ) +'_gridSearch.csv', resultList, delimiter = ',' )
#%% start test
testSamples = testFeature.shape[ 0 ]
trainSamples = trainFeature.shape[ 0 ]
log = 'testSample_num = ' + str( testSamples ) + '\n trainSample_num = ' + str( trainSamples )
with open( newFolderName + '/log.txt' , "w") as text_file:
text_file.write( log )
|
[
"ygong1@nd.edu"
] |
ygong1@nd.edu
|
4e98cba026ffbfa488d602586ed1fb56b70a4b3e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/117/usersdata/168/26292/submittedfiles/al2.py
|
2636de789a2d58357263296f2d1affdab045e0ff
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
from __future__ import division
#INICIE SEU CDIGO AQUI
n=float(input('Digite n'))
n1=int(n)
n2=n-int
print('%.2f'%n1)
print('%.2f'%n2)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1872c2b02787510ea089a882647d262201237e43
|
e7f708af4b599ec6763e0d3b311e2cb47cc155d8
|
/payments/admin.py
|
f69953885eba02d24436b82c8477468a8e0d0cfd
|
[] |
no_license
|
dmontoya1/tu-licencia
|
d48bc8779d8cda50c7a382cb1c14e2ae3668ebc8
|
d436d665ba797d7b90fcdcc58bcef3e79b917682
|
refs/heads/master
| 2023-05-14T16:38:52.408066
| 2020-06-08T20:24:39
| 2020-06-08T20:24:39
| 371,433,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# from .models import Invoice
# class InvoiceAdmin(admin.ModelAdmin):
# list_display = ('__unicode__', 'user', 'release_date', 'is_discharged', 'payment_status')
# readonly_fields = ('release_date', )
# search_fields = ('release_date', 'payu_reference_code')
# admin.site.register(Invoice, InvoiceAdmin)
|
[
"dmontoya@apptitud.com.co"
] |
dmontoya@apptitud.com.co
|
015f94220909b436deb31345160eebc80132c586
|
3ab1f37b4372d0796c85ef24343dd8c03accb6ef
|
/OddEvenLinkedList.py
|
5004fdb3d7cf1ccbd577d641dc11c9e1fe6a488c
|
[] |
no_license
|
Blossomyyh/leetcode
|
2be6a99534801fc59fe9551317ca49c3704b1c3d
|
38615779eb43d147587467e11dc22761ac0726cb
|
refs/heads/master
| 2023-01-22T16:56:26.624677
| 2020-11-20T13:47:43
| 2020-11-20T13:47:43
| 266,845,278
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
if not head or head.next == None: return head
odd, even, second = head, head.next, head.next
while odd and odd.next and even and even.next:
odd.next = odd.next.next
even.next = even.next.next
odd = odd.next
even = even.next
odd.next = second
return head
node = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
Solution().oddEvenList(node)
|
[
"blossomyyh@163.com"
] |
blossomyyh@163.com
|
4e69526e2f22b9e6ead9e0673d893c9460e3b570
|
fc1cc515a65e844705cc6262a70cd0a12ce1d1df
|
/math/0x00-linear_algebra/2-size_me_please.py
|
f9800d2c3bc375d8df09bd35e4d0cfd25402da82
|
[] |
no_license
|
yulyzulu/holbertonschool-machine_learning
|
4f379a4d58da201e8125bd8d74e3c9a4dfcf8a57
|
d078d9c1c5bd96730a08d52e4520eb380467fb48
|
refs/heads/master
| 2022-12-26T12:01:25.345332
| 2020-10-03T03:19:05
| 2020-10-03T03:19:05
| 279,392,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
#!/usr/bin/env python3
""" Module to execute function """
def matrix_shape(matrix):
"""Function that calculates the shape of a matrix"""
shape = []
shape.append(len(matrix))
while type(matrix[0]) == list:
matrix = matrix[0]
shape.append(len(matrix))
return shape
|
[
"yulyzulu05@gmail.com"
] |
yulyzulu05@gmail.com
|
9ca06377d8cd9fbe39082bfb3d7983c1eb7ddd2c
|
6b27c39edc10b1353104043b7a523f4981c99ef2
|
/pytype/tools/merge_pyi/test_data/stars.pep484.py
|
56d6529efedb9cc54e761bdb5952bafc66cde7b3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
google/pytype
|
ad0ff0b6c1083b4f0a1af1747869d422f2b5f4d8
|
bda0b9547af9a084bb2bd1427f58dcde968e48b5
|
refs/heads/main
| 2023-08-26T17:52:23.546035
| 2023-08-24T22:48:00
| 2023-08-24T22:48:00
| 32,483,713
| 4,595
| 367
|
NOASSERTION
| 2023-09-13T04:40:45
| 2015-03-18T20:52:08
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
def f1(*a):
pass
def f2(**a):
pass
def f3(a, *b):
pass
def f4(a, **b):
pass
## arg with default after *args is valid python3, not python2
def f5(*a, b=1):
pass
def f6(*a, b=1, **c):
pass
def f7(x=1, *a, b=1, **c):
pass
def f8(#asd
*a):
pass
|
[
"rechen@google.com"
] |
rechen@google.com
|
eacef7a09f8311f33624a9f5acb965d5ec877336
|
7d8022661a756f77f715ee4d099fb17cb9da671a
|
/engine/data_loader.py
|
6e92e9d69aff6df248ddbe721e11db9904459073
|
[] |
no_license
|
lxj0276/Quant-Util
|
a7d70d88fc47eb16a08149faefa7b128c01c670e
|
2706ecba72a293ee01105ad22508a8d6b20e1394
|
refs/heads/master
| 2020-04-25T13:40:36.700892
| 2018-10-15T04:35:54
| 2018-10-15T04:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
import os
from collections import OrderedDict
import pandas as pd
from engine.cons import *
from engine.utils import get_calendar
from feature.time import TimeRange
from mongoapi.get_data import get_day_trade_data
from redis_cache.rediscache import cache_it_pickle
class pricing_data_loader:
def __init__(self, load_path=PRICING_DATA_PATH):
self.load_path = load_path
def load_data(self, instruments, feature, start_time, end_time):
pass
#
# @cache_it_pickle()
# def load_single_data(self, instrument, start_time, end_time, feature='close'):
# df = pd.read_csv(os.path.join(self.load_path, instrument + '.csv'))[['date', feature, 'rate']]
# df = df.rename(columns={feature: PRICE})
# # df[PRICE]=df[PRICE]/df['rate']
# del df['rate']
# carlender = get_calendar()
#
# df = df.set_index('date')
# df_full = df.reindex(carlender).fillna(method='ffill')
#
# pricing_data = df_full[(df_full.index >= start_time) & (df_full.index <= end_time)].to_dict()[PRICE]
# df = df.reindex(df_full.index)
# on_trading = (~df[(df.index >= start_time) & (df.index <= end_time)].isnull()).astype(int).to_dict()[PRICE]
# return OrderedDict(pricing_data), OrderedDict(on_trading)
@cache_it_pickle()
def load_single_data(self, instrument, start_time, end_time, feature='close'):
trade_calendar = get_calendar(start_time, end_time)
data = get_day_trade_data([instrument],
start_time,
end_time,
[feature],
return_df=True)[['date',feature]].set_index('date')
data=data.reindex(trade_calendar)
pricing_data = data.fillna(method='ffill').to_dict()[feature]
on_trading = (~data.isnull()).astype(int).to_dict()[feature]
return OrderedDict(pricing_data), OrderedDict(on_trading)
|
[
"zhangzc@pku.edu.cn"
] |
zhangzc@pku.edu.cn
|
7c38dda3f18562b3df7eecb78e86f2712b212369
|
b5d72e68c3976766a7adfd1fa33f778a5268c84a
|
/Regex/ex1.py
|
35628e29ded201a4fa05a049420dc39525ee29d4
|
[] |
no_license
|
LizinczykKarolina/Python
|
4a2fb0e7fb130c86239b2666fb346bf35a8e655b
|
7f4a3a9cba15fd2b4c7a104667461b3c49f8b757
|
refs/heads/master
| 2021-06-18T21:43:52.983557
| 2017-06-17T21:24:01
| 2017-06-17T21:24:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
#1. Write a Python program to check that a string contains only a certain set of characters (in this case a-z, A-Z and 0-9).
import re
email_address = 'wieczorek.karolina1@o2.pl'
searchObj = re.search(r'[a-zA-Z0-9.]', email_address, re.M | re.I)
if searchObj:
print True
else:
print False
"sxdupa1" -match '^sx|1$'
|
[
"wieczorek.karolina1@o2.pl"
] |
wieczorek.karolina1@o2.pl
|
f3ae22f885b78d753d9dbc851fea38a065e80d88
|
9fc768c541145c1996f2bdb8a5d62d523f24215f
|
/code/HomeWork/ch5/H_5_4.py
|
a92df699ef2667ea9bcb8c973b7297b29db61309
|
[] |
no_license
|
jumbokh/pyclass
|
3b624101a8e43361458130047b87865852f72734
|
bf2d5bcca4fff87cb695c8cec17fa2b1bbdf2ce5
|
refs/heads/master
| 2022-12-25T12:15:38.262468
| 2020-09-26T09:08:46
| 2020-09-26T09:08:46
| 283,708,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
# H_5_4.py 功能:輸入數字後判斷是否為11的倍數
num_even = 0 # 儲存偶數位數字暫存
num_odd = 0 # 儲存奇數位數字暫存
number = str(input('請輸入數字 : '))
l = len(number) # 判斷輸入數字之長度
x = int(number) # 轉換成數值型態
for n in range(l,0,-1):
y = x//(10**(n-1)) # 計算奇偶位數字
x = x - (y*(10**(n-1)))
if n%2 == 0: # 判斷若是偶數位數字則儲存在偶數位暫存,反之存奇數位暫存
num_even = num_even + y
else:
num_odd = num_odd + y
# 判斷是否為11的倍數
if abs(num_even - num_odd) == 0 or (abs(num_even - num_odd))%11 == 0:
print('此數為11的倍數')
else:
print('此數不是11的倍數')
|
[
"jumbokh@gmail.com"
] |
jumbokh@gmail.com
|
6eeb30afedb9faf5956630200d67199cef30815d
|
2a4a17a67b9069c19396c0f8eabc8b7c4b6ff703
|
/BGP3D/Chapter11/WorldClass_00.py
|
2cd366287c64b1790280d87dd425c04951cc0271
|
[] |
no_license
|
kaz101/panda-book
|
0fa273cc2df5849507ecc949b4dde626241ffa5e
|
859a759c769d9c2db0d11140b0d04506611c2b7b
|
refs/heads/master
| 2022-12-19T09:36:05.794731
| 2020-09-16T19:04:10
| 2020-09-16T19:04:10
| 295,784,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
''' World Class
This class is the launching point for the game.
This is the file that needs to run to start the game,
and this class creates all the pieces of the game.
'''
import direct.directbase.DirectStart
from direct.filter.CommonFilters import CommonFilters
from HUDClass_00 import HUD
from RaceClass_00 import Race
from InputManagerClass_00 import InputManager
from MenuClass_00 import Menu
class World:
def __init__(self):
base.disableMouse()
# Turns off the default mouse-camera controls in Panda3D.
base.setBackgroundColor(0, 0, 0)
# Sets the background to black.
self.inputManager = InputManager()
# Creates an InputManager to handle all of the user input in the game.
#taskMgr.doMethodLater(10, self.debugTask, "Debug Task")
# Tells the debugTask to run once every ten seconds. The debug task is a good
# place to put various data print outs about the game to help with debugging.
self.filters = CommonFilters(base.win, base.cam)
filterok = self.filters.setBloom(blend=(0,0,0,1),
desat=-0.5, intensity=3.0, size=2)
render.setShaderAuto()
# Turns on Panda3D's automatic shader generation.
self.menuGraphics = loader.loadModel(
"../Models/MenuGraphics.egg")
# Loads the egg that contains all the menu graphics.
self.fonts = {
"silver" : loader.loadFont("../Fonts/LuconSilver.egg"),
"blue" : loader.loadFont("../Fonts/LuconBlue.egg"),
"orange" : loader.loadFont("../Fonts/LuconOrange.egg")}
# Loads the three custom fonts our game will use.
hud = HUD(self.fonts)
# Creates the HUD.
self.race = Race(self.inputManager, hud)
self.race.createDemoRace()
# creates an instance of the race class and tells it to
# start a demo race.
self.createStartMenu()
# creates the start menu.
def createStartMenu(self):
menu = Menu(self.menuGraphics, self.fonts, self.inputManager)
menu.initMenu([0,None,
["New Game", "Quit Game"],
[[self.race.createRace, self.createReadyDialogue],
[base.userExit]],
[[None,None],[None]]])
def createReadyDialogue(self):
menu = Menu(self.menuGraphics, self.fonts, self.inputManager)
menu.initMenu([3,"Are you ready?",
["Yes","Exit"],
[[self.race.startRace],[self.race.createDemoRace]],
[[3],[None]]])
def debugTask(self, task):
print(taskMgr)
# prints all of the tasks in the task manager.
return task.again
# debugTask: Runs once every ten seconds to print out reports on the games status.
w = World()
run()
|
[
"kaz101130@gmail.com"
] |
kaz101130@gmail.com
|
93770b04b06cc4bee60b772d822f418ad8a272c9
|
faca8866b3c8aca30a915d8cb2748766557ed808
|
/object_detection_updata/metrics/tf_example_parser.py
|
510e6917fa9838e9282ec6f561e50cb77a7b5ff9
|
[] |
no_license
|
yongqis/proposal_joint_retireval
|
6899d80f8fb94569c7b60764f6e7de74bcfa9cc8
|
97b086c62473ab1a5baf45743535fce70c3f8c20
|
refs/heads/master
| 2020-05-25T19:07:22.946008
| 2019-06-03T07:09:04
| 2019-06-03T07:09:09
| 187,943,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,294
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto parser for data loading.
A parser to decode data containing serialized tensorflow.Example
protos into materialized tensors (numpy arrays).
"""
import numpy as np
from object_detection_updata.core import data_parser
from object_detection_updata.core import standard_fields as fields
class FloatParser(data_parser.DataToNumpyParser):
"""Tensorflow Example float parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].float_list.value,
dtype=np.float).transpose() if tf_example.features.feature[
self.field_name].HasField("float_list") else None
class StringParser(data_parser.DataToNumpyParser):
"""Tensorflow Example string parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return "".join(tf_example.features.feature[self.field_name]
.bytes_list.value) if tf_example.features.feature[
self.field_name].HasField("bytes_list") else None
class Int64Parser(data_parser.DataToNumpyParser):
"""Tensorflow Example int64 parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].int64_list.value,
dtype=np.int64).transpose() if tf_example.features.feature[
self.field_name].HasField("int64_list") else None
class BoundingBoxParser(data_parser.DataToNumpyParser):
"""Tensorflow Example bounding box parser."""
def __init__(self, xmin_field_name, ymin_field_name, xmax_field_name,
ymax_field_name):
self.field_names = [
ymin_field_name, xmin_field_name, ymax_field_name, xmax_field_name
]
def parse(self, tf_example):
result = []
parsed = True
for field_name in self.field_names:
result.append(tf_example.features.feature[field_name].float_list.value)
parsed &= (
tf_example.features.feature[field_name].HasField("float_list"))
return np.array(result).transpose() if parsed else None
class TfExampleDetectionAndGTParser(data_parser.DataToNumpyParser):
"""Tensorflow Example proto parser."""
def __init__(self):
self.items_to_handlers = {
fields.DetectionResultFields.key:
StringParser(fields.TfExampleFields.source_id),
# Object ground truth boxes and classes.
fields.InputDataFields.groundtruth_boxes: (BoundingBoxParser(
fields.TfExampleFields.object_bbox_xmin,
fields.TfExampleFields.object_bbox_ymin,
fields.TfExampleFields.object_bbox_xmax,
fields.TfExampleFields.object_bbox_ymax)),
fields.InputDataFields.groundtruth_classes: (
Int64Parser(fields.TfExampleFields.object_class_label)),
# Object detections.
fields.DetectionResultFields.detection_boxes: (BoundingBoxParser(
fields.TfExampleFields.detection_bbox_xmin,
fields.TfExampleFields.detection_bbox_ymin,
fields.TfExampleFields.detection_bbox_xmax,
fields.TfExampleFields.detection_bbox_ymax)),
fields.DetectionResultFields.detection_classes: (
Int64Parser(fields.TfExampleFields.detection_class_label)),
fields.DetectionResultFields.detection_scores: (
FloatParser(fields.TfExampleFields.detection_score)),
}
self.optional_items_to_handlers = {
fields.InputDataFields.groundtruth_difficult:
Int64Parser(fields.TfExampleFields.object_difficult),
fields.InputDataFields.groundtruth_group_of:
Int64Parser(fields.TfExampleFields.object_group_of),
fields.InputDataFields.groundtruth_image_classes:
Int64Parser(fields.TfExampleFields.image_class_label),
}
def parse(self, tf_example):
"""Parses tensorflow example and returns a tensor dictionary.
Args:
tf_example: a tf.Example object.
Returns:
A dictionary of the following numpy arrays:
fields.DetectionResultFields.source_id - string containing original image
id.
fields.InputDataFields.groundtruth_boxes - a numpy array containing
groundtruth boxes.
fields.InputDataFields.groundtruth_classes - a numpy array containing
groundtruth classes.
fields.InputDataFields.groundtruth_group_of - a numpy array containing
groundtruth group of flag (optional, None if not specified).
fields.InputDataFields.groundtruth_difficult - a numpy array containing
groundtruth difficult flag (optional, None if not specified).
fields.InputDataFields.groundtruth_image_classes - a numpy array
containing groundtruth image-level labels.
fields.DetectionResultFields.detection_boxes - a numpy array containing
detection boxes.
fields.DetectionResultFields.detection_classes - a numpy array containing
detection class labels.
fields.DetectionResultFields.detection_scores - a numpy array containing
detection scores.
Returns None if tf.Example was not parsed or non-optional fields were not
found.
"""
results_dict = {}
parsed = True
for key, parser in self.items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
parsed &= (results_dict[key] is not None)
for key, parser in self.optional_items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
return results_dict if parsed else None
|
[
"syq132@live.com"
] |
syq132@live.com
|
d83ff9d35756aa203096778316d1f1840a266b4c
|
d9f6f439300d298246c37ccfb881e8e8af4fda22
|
/cfp/migrations/0021_profile_name.py
|
814cb2ed2021c677ece96c65e1671b44cb2a0824
|
[
"MIT"
] |
permissive
|
ajlozier/speakers
|
e62b8d346a58a034998860d1b42a38b00cbdbd23
|
d7d87c99b1cfa5f9df5455f737385115d9d5279c
|
refs/heads/master
| 2021-09-08T19:33:08.894305
| 2018-03-12T00:54:10
| 2018-03-12T00:54:10
| 122,101,157
| 0
| 0
| null | 2018-02-19T18:08:18
| 2018-02-19T18:08:18
| null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0020_auto_20150218_0802'),
]
operations = [
migrations.AddField(
model_name='profile',
name='name',
field=models.CharField(max_length=300, default=''),
preserve_default=False,
),
]
|
[
"kyle@kyleconroy.com"
] |
kyle@kyleconroy.com
|
e5d51b840f9d3e61bbf612efe4cb4e6c26e84ce6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_actor.py
|
04e32c50a329bba627e107500de9e8bd1c6b493f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#calss header
class _ACTOR():
def __init__(self,):
self.name = "ACTOR"
self.definitions = [u'someone who pretends to be someone else while performing in a film, play, or television or radio programme: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
75af3c206ddd4f8e25574cabd71067f19214176c
|
4170ed62059b6898cc8914e7f23744234fc2f637
|
/CD zum Buch "Einstieg in Python"/Programmbeispiele/GUI/gui_check.py
|
5f2ffb98025c0f74ac7ab1c38d5a0295e3aeec43
|
[] |
no_license
|
Kirchenprogrammierer/Cheats
|
9633debd31ab1df78dc639d1aef90d3ac4c1f069
|
0b71c150f48ad1f16d7b47a8532b1f94d26e148e
|
refs/heads/master
| 2021-05-08T10:42:39.927811
| 2018-02-01T17:29:11
| 2018-02-01T17:29:11
| 119,858,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import tkinter
def ende():
main.destroy()
def anzeigen():
lb["text"] = "Zimmer " + du.get() + " " + mb.get()
main = tkinter.Tk()
# Anzeigelabel
lb = tkinter.Label(main, text = "Zimmer ", width=40)
lb.pack()
# Widget-Variablen
du = tkinter.StringVar()
du.set("ohne Dusche")
mb = tkinter.StringVar()
mb.set("ohne Minibar")
# Zwei Checkbuttons
cb1 = tkinter.Checkbutton(main, text="Dusche",
variable=du, onvalue="mit Dusche",
offvalue="ohne Dusche", command=anzeigen)
cb1.pack()
cb2 = tkinter.Checkbutton(main, text="Minibar",
variable=mb, onvalue="mit Minibar",
offvalue="ohne Minibar", command=anzeigen)
cb2.pack()
bende = tkinter.Button(main, text = "Ende",
command = ende)
bende.pack()
main.mainloop()
|
[
"noreply@github.com"
] |
Kirchenprogrammierer.noreply@github.com
|
8f95dcb9c22025b63932cb30e24ae18203a9dc38
|
ce48d74eb28ec153573cd42fe58d39adc784c85c
|
/jdcloud_sdk/services/xdata/models/DwDatabaseInfo.py
|
bedf36eee3a80637f1aaf4c87b6493744b5f6c2c
|
[
"Apache-2.0"
] |
permissive
|
oulinbao/jdcloud-sdk-python
|
4c886cb5b851707d98232ca9d76a85d54c8ff8a8
|
660e48ec3bc8125da1dbd576f7868ea61ea21c1d
|
refs/heads/master
| 2020-03-16T22:22:15.922184
| 2018-05-11T10:45:34
| 2018-05-11T10:45:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
# coding=utf8
# Copyright 2018-2025 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DwDatabaseInfo(object):
def __init__(self, owner=None, comments=None, databaseName=None):
"""
:param owner: (Optional) 所有者
:param comments: (Optional) 描述信息
:param databaseName: (Optional) 数据库名称
"""
self.owner = owner
self.comments = comments
self.databaseName = databaseName
|
[
"oulinbao@jd.com"
] |
oulinbao@jd.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.