blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff374d319bbe1f8b52441278e5fbb540de3bd79d | 3e799066e12dbcbbce073ce9d74fc3ae43683686 | /pytablereader/__version__.py | e01e0f951c4a397fce0e5ae2b326b104aa026250 | [
"MIT"
] | permissive | nneophyt/pytablereader | 07f51f47552256211abb7c58badaa1e4c6ec3e28 | b158cf6749ce95a4602c68b1268c4e5465464760 | refs/heads/master | 2020-09-09T01:16:34.405526 | 2019-05-11T12:34:37 | 2019-05-11T12:34:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.26.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| [
"tsuyoshi.hombashi@gmail.com"
] | tsuyoshi.hombashi@gmail.com |
f050dd43c5dae9b97542bc5a13d49bf1739a90f4 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /03_Linear_Algebra_for_Machine_Learning/07/04_orthogonal_matrix.py | 7ddc8461a7f485fbbf2d7feca3783fe63b74ce9a | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # orthogonal matrix
from numpy import array
from numpy.linalg import inv
# define orthogonal matrix
Q = array([
[1, 0],
[0, -1]])
print(Q)
# inverse equivalence
V = inv(Q)
print(Q.T)
print(V)
# identity equivalence
I = Q.dot(Q.T)
print(I)
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
897e63f4f998a043ef122953c3868e09cc9ac7ca | ecd25c36474ecf404a32f2f0096b5a6898e4c396 | /python_stack/django/django_fullstack/login_registration/login_registration_app/migrations/0002_user_confirm.py | 1e0c5732eeff98c6bf46805332c3db2d7d436d26 | [] | no_license | matthew-le/Coding_Dojo_Bootcamp | cd7b4aa8e231db372da05a0a5444114b07fbfabf | 6d433d5305d2d8f4ea485206895d8f84bedeb59d | refs/heads/main | 2023-06-13T23:05:23.827556 | 2021-07-23T23:56:35 | 2021-07-23T23:56:35 | 388,947,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Generated by Django 2.2 on 2021-07-13 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_registration_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='confirm',
field=models.CharField(max_length=255, null='TRUE'),
preserve_default='TRUE',
),
]
| [
"you@example.com"
] | you@example.com |
8166e63dfbe2a305ad01e95cf19ee7a4183d8ec8 | cdd150baa9a03daead7112f5eaab8f49f1e362c4 | /hyperas_skipthoughts.py | 808dea3a4621c911df93976d6e9f67306573f041 | [] | no_license | shayezkarim/personality_detection | 3e7bc26c2e3030514f75fd2eb8925f5b094eece9 | 4e6fb55fc1f20587c7bad8018c36057d971cc5c9 | refs/heads/master | 2021-06-02T21:37:44.299032 | 2016-04-04T11:50:03 | 2016-04-04T11:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | __author__ = 'Dimitris'
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from pprint import pprint
def keras_model():
from keras.models import Sequential
from keras.layers.core import Dense
from keras.regularizers import l2, activity_l2
from aiding_funcs.embeddings_handling import get_the_folds, join_folds
from aiding_funcs.label_handling import MaxMin, MaxMinFit
import pickle
print('loading test.p')
test = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/test.p", "rb" ) )
print('loading train.p')
train = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/train.p", "rb" ) )
no_of_folds = 10
folds = get_the_folds(train,no_of_folds)
train_data = join_folds(folds,folds.keys()[:-1])
validation_data = folds[folds.keys()[-1]]
mins, maxs = MaxMin(train_data['labels'])
T_l = MaxMinFit(train_data['labels'], mins, maxs)
t_l = MaxMinFit(validation_data['labels'], mins, maxs)
Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
out_dim = 5
activity_l2_0 = {{uniform(0, 1)}}
activity_l2_1 = {{uniform(0, 1)}}
activity_l2_2 = {{uniform(0, 1)}}
l2_0 = {{uniform(0, 1)}}
l2_1 = {{uniform(0, 1)}}
l2_2 = {{uniform(0, 1)}}
model = Sequential()
model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2(l2_0),activity_regularizer=activity_l2(activity_l2_0),input_dim = train_data['skipthoughts'].shape[-1] ))
model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2(l2_1),activity_regularizer=activity_l2(activity_l2_1)))
model.add(Dense(out_dim, activation='linear',W_regularizer=l2(l2_2),activity_regularizer=activity_l2(activity_l2_2)))
model.compile(loss='rmse', optimizer=opt)
#model.fit(train_data['skipthoughts'], train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
#score = model.evaluate( train_data['skipthoughts'], train_data['labels'])
model.fit(train_data['skipthoughts'], T_l, nb_epoch=500, show_accuracy=False, verbose=2)
score = model.evaluate( train_data['skipthoughts'], T_l)
print("score : " +str(score))
return {'loss': score, 'status': STATUS_OK}
if __name__ == '__main__':
best_run = optim.minimize(keras_model, algo=tpe.suggest, max_evals=2000, trials=Trials())
pprint(best_run)
'''
{'Dense_size': 3, 200
'Dense_size2': 5, 300
'activity_l2_0': 0.05188918775936191,
'activity_l2_1': 0.45047635433513034,
'activity_l2_2': 0.0005117368813977515,
'l2_0': 0.8718331552337388,
'l2_1': 0.5807575417209597,
'l2_2': 0.48965647861094225,
'opt': 5} 'adam'
''' | [
"dvpappas89@gmail.com"
] | dvpappas89@gmail.com |
343766aa0e9faba33b9e0181f38968f53cf96a5e | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenPublicLifeLabelDeleteModel.py | b8a8e95bad811ee04d68d3c7a776e80cfeb2813d | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 899 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenPublicLifeLabelDeleteModel(object):
def __init__(self):
self._label_id = None
@property
def label_id(self):
return self._label_id
@label_id.setter
def label_id(self, value):
self._label_id = value
def to_alipay_dict(self):
params = dict()
if self.label_id:
if hasattr(self.label_id, 'to_alipay_dict'):
params['label_id'] = self.label_id.to_alipay_dict()
else:
params['label_id'] = self.label_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicLifeLabelDeleteModel()
if 'label_id' in d:
o.label_id = d['label_id']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
48a93f055fff5c07977f8c95c1b0ff1694abb04e | d6018cbcbf01b72bb420309ddf0fa38c85b01de6 | /fulledits_ratio2_combine.py | 5ec6929e38ca73d46be7511f7a27894d360ce2f6 | [] | no_license | maxwshen/lib-analysis | daf57019ea6727719212b4fbe5741fe4d52adb5a | 6234a17883268b31ac71dabd509bc70183c4f99b | refs/heads/master | 2022-03-14T00:04:02.154164 | 2019-11-21T19:32:13 | 2019-11-21T19:32:13 | 175,507,042 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess, pickle
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + 'fulledits_ratio2/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
import _data
nts = list('ACGT')
nt_to_idx = {nts[s]: s for s in range(len(nts))}
treat_control_df = pd.read_csv(_config.DATA_DIR + 'treatment_control_design.csv', index_col = 0)
##
# Main
##
@util.time_dec
def main():
print(NAME)
import glob
mdf = pd.DataFrame()
fns = glob.glob(inp_dir + '*bootstrap*')
timer = util.Timer(total = len(fns))
for fn in fns:
cond = fn.split('/')[-1].replace('_bootstrap.csv', '')
df = pd.read_csv(fn, index_col = 0)
df['Condition'] = cond
mdf = mdf.append(df, ignore_index = True)
timer.update()
mdf.to_csv(out_dir + '_combined_gmean_bootstrap.csv')
# Not bootstrap
mdf = pd.DataFrame()
fns = [fn for fn in os.listdir(inp_dir) if 'bootstrap' not in fn]
timer = util.Timer(total = len(fns))
for fn in fns:
df = pd.read_csv(inp_dir + fn)
cond = fn.replace('.csv', '')
df['Condition'] = cond
n = len(df)
df['Regression weight'] = 1 / n
mdf = mdf.append(df, ignore_index = True)
timer.update()
mdf.to_csv(out_dir + '_all_ratios.csv')
return
if __name__ == '__main__':
main() | [
"maxwshen@gmail.com"
] | maxwshen@gmail.com |
231ba5d2ac5db3833dad59496e76c2080e56b2c5 | 2967f6fe104b2ed9cd3f02b855b36a4dced9edc8 | /src/zope/server/tests/test_dualmodechannel.py | f51e7de9f0a694588b8c7dc4368b154edaddd1f4 | [
"ZPL-2.1"
] | permissive | cjwatson/zope.server | 935ff4ab8e5d65decc1c5d95b23305d57684166f | 9c40c8a1ae57d28f1e0fa21e740826befefc30d5 | refs/heads/master | 2023-05-04T15:33:24.972288 | 2019-07-11T12:03:41 | 2019-07-11T12:03:41 | 260,460,082 | 0 | 0 | NOASSERTION | 2020-05-01T13:00:40 | 2020-05-01T13:00:39 | null | UTF-8 | Python | false | false | 3,432 | py | # -*- coding: utf-8 -*-
"""
Tests for dualmodechannel.py.
"""
import unittest
from zope.server.dualmodechannel import DualModeChannel
class TestDualModeChannel(unittest.TestCase):
def test_handle_write_non_async(self):
channel = DualModeChannel(None, None)
channel.set_sync()
# Does nothing, no side effects
channel.handle_write()
def test_handle_read_non_async(self):
channel = DualModeChannel(None, None)
channel.set_sync()
# Does nothing, no side effects
channel.handle_read()
def test_handle_read_will_close(self):
channel = DualModeChannel(None, None)
channel.close_when_done()
# Does nothing, no side effects
channel.handle_read()
def test_handle_write_flush_error(self):
import socket
class C(DualModeChannel):
error_called = False
def __init__(self):
DualModeChannel.__init__(self, None, None)
def _flush_some(self):
raise socket.error()
def handle_error(self):
self.error_called = True
channel = C()
channel.outbuf.append(b'data')
channel.handle_write()
self.assertTrue(channel.error_called)
def test_handle_read_recv_error(self):
import socket
class C(DualModeChannel):
error_called = False
def __init__(self):
DualModeChannel.__init__(self, None, None)
def recv(self, _count):
raise socket.error()
def handle_error(self):
self.error_called = True
channel = C()
channel.handle_read()
self.assertTrue(channel.error_called)
def test_write_flushes(self):
class C(DualModeChannel):
flush_called = False
def _flush_some(self):
self.flush_called = True
return False
class A(object):
send_bytes = 1
outbuf_overflow = 100
channel = C(None, None, A())
channel.write(b'some bytes')
self.assertTrue(channel.flush_called)
def test_channels_accept_iterables(self):
# Channels accept iterables (they special-case strings).
from zope.server.tests.test_serverbase import FakeSocket
socket = FakeSocket()
channel = DualModeChannel(socket, ('localhost', 42))
written = channel.write(b"First")
self.assertEqual(5, written)
channel.flush()
self.assertEqual(socket.data.decode('ascii'),
'First')
written = channel.write([b"\n", b"Second", b"\n", b"Third"])
self.assertEqual(13, written)
channel.flush()
self.assertEqual(socket.data.decode('ascii'),
"First\n"
"Second\n"
"Third")
def count():
yield b'\n1\n2\n3\n'
yield b'I love to count. Ha ha ha.'
written = channel.write(count())
self.assertEqual(written, 33)
channel.flush()
self.assertEqual(socket.data.decode('ascii'),
"First\n"
"Second\n"
"Third\n"
"1\n"
"2\n"
"3\n"
"I love to count. Ha ha ha.")
| [
"jamadden@gmail.com"
] | jamadden@gmail.com |
7182a4e9f4a9770d1dc95f0bb348efded753d61c | 00f3f33b977e87d23c2158b906402ccb3cc4c42e | /venv/lightomatic-env/bin/pyreverse | 9a144a67653b5fed84c2f5f16d8a8f21450d76f3 | [] | no_license | cosmicRover/lightoMatic | 9591499ca82e5fa5afad4d008307c6187e671aca | 9d5b1b4a61c735fd2331e23e834a8f05b961c97f | refs/heads/master | 2020-05-31T19:27:08.456380 | 2019-06-07T03:26:21 | 2019-06-07T03:26:21 | 190,455,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/home/joy/pythondevelopments/lightoMatic/venv/lightomatic-env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"jpaul3250@gmail.com"
] | jpaul3250@gmail.com | |
5698e5a876e761689f9c520384b1f7e6870ac36f | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/weapon/shared_pistol_blaster_dl44.py | a31cf79b61d98013892eb7aa11c560b2892efa92 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 455 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/weapon/shared_pistol_blaster_dl44.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
a7287d052f82a938605f40dd7adeb10780d563db | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_tries.py | af8c17ef049137b1628b04c5efd273459fedff18 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py |
from xai.brain.wordbase.verbs._try import _TRY
#calss header
class _TRIES(_TRY, ):
def __init__(self,):
_TRY.__init__(self)
self.name = "TRIES"
self.specie = 'verbs'
self.basic = "try"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ec55cd3ac6ec5285cd52aceae1ef4ae1e62ffc2d | c4a119311ac01bbe7d5ab81b1d3d663ad0900ab6 | /python-build/python-libs/xmpppy/setup.py | 1e145617a61190a81d79af3c4ed04773925b3d8d | [
"Apache-2.0",
"GPL-3.0-only"
] | permissive | kuri65536/python-for-android | 1d8d99e81e64bc87805c2c58ee0dcf43d413e72e | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | refs/heads/master | 2021-06-02T01:17:29.685199 | 2018-05-05T00:12:13 | 2018-05-05T01:36:22 | 32,235,625 | 280 | 122 | Apache-2.0 | 2020-05-15T06:47:36 | 2015-03-14T22:44:36 | Python | UTF-8 | Python | false | false | 1,478 | py | #!/usr/bin/python
# -*- coding: koi8-r -*-
from distutils.core import setup,sys
from setuptools import setup
import os
if sys.version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
# Set proper release version in source code also!!!
setup(name='xmpppy',
version='0.5.0rc1',
author='Alexey Nezhdanov',
author_email='snakeru@users.sourceforge.net',
url='http://xmpppy.sourceforge.net/',
description='XMPP-IM-compliant library for jabber instant messenging.',
long_description="""This library provides functionality for writing xmpp-compliant
clients, servers and/or components/transports.
It was initially designed as a \"rework\" of the jabberpy library but
has become a separate product.
Unlike jabberpy it is distributed under the terms of GPL.""",
download_url='http://sourceforge.net/project/showfiles.php?group_id=97081&package_id=103821',
packages=['xmpp'],
license="GPL",
platforms="All",
keywords=['jabber','xmpp'],
classifiers = [
'Topic :: Communications :: Chat',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
],
)
| [
"manuel@aircable.net"
] | manuel@aircable.net |
f2a5dbbfad5429b613f7fbd0482e5f5d3441bdb3 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/aliyun/aliyun-openapi-python-sdk/aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/CreateLoadBalancerUDPListenerRequest.py | 0374135d7ac8fdca7269fcdf3feba84679d20533 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 3,984 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateLoadBalancerUDPListenerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'CreateLoadBalancerUDPListener')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_LoadBalancerId(self):
return self.get_query_params().get('LoadBalancerId')
def set_LoadBalancerId(self,LoadBalancerId):
self.add_query_param('LoadBalancerId',LoadBalancerId)
def get_ListenerPort(self):
return self.get_query_params().get('ListenerPort')
def set_ListenerPort(self,ListenerPort):
self.add_query_param('ListenerPort',ListenerPort)
def get_BackendServerPort(self):
return self.get_query_params().get('BackendServerPort')
def set_BackendServerPort(self,BackendServerPort):
self.add_query_param('BackendServerPort',BackendServerPort)
def get_Bandwidth(self):
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self,Bandwidth):
self.add_query_param('Bandwidth',Bandwidth)
def get_Scheduler(self):
return self.get_query_params().get('Scheduler')
def set_Scheduler(self,Scheduler):
self.add_query_param('Scheduler',Scheduler)
def get_PersistenceTimeout(self):
return self.get_query_params().get('PersistenceTimeout')
def set_PersistenceTimeout(self,PersistenceTimeout):
self.add_query_param('PersistenceTimeout',PersistenceTimeout)
def get_HealthyThreshold(self):
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self,HealthyThreshold):
self.add_query_param('HealthyThreshold',HealthyThreshold)
def get_UnhealthyThreshold(self):
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self,UnhealthyThreshold):
self.add_query_param('UnhealthyThreshold',UnhealthyThreshold)
def get_HealthCheckConnectTimeout(self):
return self.get_query_params().get('HealthCheckConnectTimeout')
def set_HealthCheckConnectTimeout(self,HealthCheckConnectTimeout):
self.add_query_param('HealthCheckConnectTimeout',HealthCheckConnectTimeout)
def get_HealthCheckConnectPort(self):
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self,HealthCheckConnectPort):
self.add_query_param('HealthCheckConnectPort',HealthCheckConnectPort)
def get_healthCheckInterval(self):
return self.get_query_params().get('healthCheckInterval')
def set_healthCheckInterval(self,healthCheckInterval):
self.add_query_param('healthCheckInterval',healthCheckInterval)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount) | [
"rares.begu@gmail.com"
] | rares.begu@gmail.com |
08874ff08accc44a3c0a0e0a92d886914cee7c0c | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appplatform/azure-mgmt-appplatform/generated_samples/api_portals_validate_domain.py | f8f3c175988334e6b10e93453d559364436bf476 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,688 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.appplatform import AppPlatformManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-appplatform
# USAGE
python api_portals_validate_domain.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AppPlatformManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.api_portals.validate_domain(
resource_group_name="myResourceGroup",
service_name="myservice",
api_portal_name="default",
validate_payload={"name": "mydomain.io"},
)
print(response)
# x-ms-original-file: specification/appplatform/resource-manager/Microsoft.AppPlatform/stable/2022-12-01/examples/ApiPortals_ValidateDomain.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e3fade9d6427a0fd859bce2452a69b4b456812e4 | 93ab050518092de3a433b03744d09b0b49b541a6 | /iniciante/Mundo 03/Exercícios Corrigidos/Exercício 086.py | 025478d7b517f7bbf55da3cbbcb5478b7fce08ae | [
"MIT"
] | permissive | ggsant/pyladies | 1e5df8772fe772f8f7d0d254070383b9b9f09ec6 | 37e11e0c9dc2fa2263ed5b42df5a395169408766 | refs/heads/master | 2023-01-02T11:49:44.836957 | 2020-11-01T18:36:43 | 2020-11-01T18:36:43 | 306,947,105 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | """
EXERCÍCIO 086: Matriz em Python
Crie um programa que crie uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado.
0 [_][_][_]
1 [_][_][_]
2 [_][_][_]
0 1 2
No final, mostre a matriz na tela, com a formatação correta.
"""
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para [{l}, {c}]: '))
print('-=' * 30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
print()
| [
"61892998+ggsant@users.noreply.github.com"
] | 61892998+ggsant@users.noreply.github.com |
6f75e2758605c523dbba4a1707b229552ae59f5c | 877866345067cc6e356bcaaaa29a27b335cc4095 | /bulmaio_jinja2/sidebar/page/models.py | ea57b04810ae78a2ca1400d9086f386d6aa56ba2 | [
"MIT"
] | permissive | pauleveritt/bulmaio_jinja2 | 28a6e3da3dd577075cd9e658a6e7d7eace765fd8 | 97e09e539469337e05aa6c7a268264f2ca523da6 | refs/heads/master | 2020-03-23T16:51:11.894880 | 2018-10-04T19:10:11 | 2018-10-04T19:10:11 | 141,830,196 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | from typing import List, Optional
from bulmaio_jinja2.author.models import Author
from bulmaio_jinja2.base_model import CustomBaseModel
class SidebarPublished(CustomBaseModel):
published_date: str = None
published_time: str = None
author: Optional[Author] = None
class SidebarPrevNextItem(CustomBaseModel):
href: str
title: str
class SidebarPrevNext(CustomBaseModel):
prev: SidebarPrevNextItem = None
next: SidebarPrevNextItem = None
class SidebarReference(CustomBaseModel):
label: str
href: str
class SidebarReferenceGroup(CustomBaseModel):
reftype: str
entries: List[SidebarReference]
class SidebarReferences(CustomBaseModel):
entries: List[SidebarReferenceGroup] = []
class PageSidebar(CustomBaseModel):
published: SidebarPublished = None
prev_next: SidebarPrevNext
references: SidebarReferences
| [
"pauleveritt@me.com"
] | pauleveritt@me.com |
469fe4540152fbd03607bf2825896ac72329ff43 | 5c1746c4ae9f5eb4c94c9b3a70a4d3feb966ceda | /pcapkit/vendor/reg/__init__.py | 31108b29a8037d07f9367c776cb95fa3d91292ad | [
"BSD-3-Clause"
] | permissive | JarryShaw/PyPCAPKit | 8b53c76cf54f2ef1a9e4d0a7aeb3d52605dc1d5a | a6fe49ec58f09e105bec5a00fb66d9b3f22730d9 | refs/heads/main | 2023-08-29T12:49:58.611378 | 2023-08-28T14:05:43 | 2023-08-28T14:05:43 | 109,791,841 | 204 | 29 | BSD-3-Clause | 2023-09-11T17:09:06 | 2017-11-07T05:41:56 | Python | UTF-8 | Python | false | false | 1,411 | py | # -*- coding: utf-8 -*-
# pylint: disable=unused-import
"""Protocol Type Registry Vendor Crawlers
============================================
.. module:: pcapkit.vendor.reg
This module contains all vendor crawlers of protocol type registry
implementations. Available enumerations include:
.. list-table::
* - :class:`LINKTYPE <pcapkit.vendor.reg.linktype.LinkType>`
- Link-Layer Header Type Values [*]_
* - :class:`ETHERTYPE <pcapkit.vendor.reg.ethertype.EtherType>`
- Ethertype IEEE 802 Numbers [*]_
* - :class:`TRANSTYPE <pcapkit.vendor.reg.transtype.TransType>`
- Transport Layer Protocol Numbers [*]_
* - :class:`APPTYPE <pcapkit.vendor.reg.apptype.AppType>`
- Application Layer Protocol Numbers (Service Name and Transport Protocol Port Number Registry) [*]_
.. [*] http://www.tcpdump.org/linktypes.html
.. [*] https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml#ieee-802-numbers-1
.. [*] https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml#protocol-numbers-1
.. [*] https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?
"""
from pcapkit.vendor.reg.apptype import AppType
from pcapkit.vendor.reg.ethertype import EtherType
from pcapkit.vendor.reg.linktype import LinkType
from pcapkit.vendor.reg.transtype import TransType
__all__ = ['EtherType', 'LinkType', 'TransType', 'AppType']
| [
"jarryshaw@icloud.com"
] | jarryshaw@icloud.com |
d5b1438af7bbd961f911a25b4daf23155058dadb | 6296a2a73121271ae01a644b4bcd82c1aaff1899 | /worksheets/helpers/ex03.py | bfa7d8bbf3974b724ce44779140499d6eab43b15 | [
"MIT"
] | permissive | widdowquinn/Teaching-EMBL-Plant-Path-Genomics | 570de0234a9bf7a2dfc45d834cb775b3c837b314 | 5cb03893ab145ee51891ccddcef9ebffe3f9bb1e | refs/heads/master | 2021-01-18T23:27:08.243564 | 2017-11-16T10:02:18 | 2017-11-16T10:02:18 | 21,601,799 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | # ex03.py
#
# Functions and data useful in worksheet 3 of the Plant and Pathogen
# Bioinformatics course at EMBL
import pylab
def p_correct_given_pos(sens, fpr, b):
"""Returns a simple Bayesian probability for the probability
that a prediction is correct, given that the prediction
was positive, for the prevailing sensitivity (sens),
false positive rate (fpr) and base rate of positive
examples.
"""
assert 0 <= sens <= 1, "Sensitivity must be in range [0,1]"
assert 0 <= fpr <= 1, "FPR must be in range [0,1]"
return sens * b / (sens * b + fpr * (1 - b))
def plot_prob_effector(sens, fpr, xmax=1, baserate=0.1):
"""Plots a line graph of P(effector|positive test) against
the baserate of effectors in the input set to the classifier.
The baserate argument draws an annotation arrow
indicating P(pos|+ve) at that baserate
"""
assert 0.1 <= xmax <= 1, "Max x axis value must be in range [0,1]"
assert 0.01 <= baserate <= 1, "Baserate annotation must be in range [0,1]"
baserates = pylab.arange(0, 1.05, xmax * 0.005)
probs = [p_correct_given_pos(sens, fpr, b) for b in baserates]
pylab.plot(baserates, probs, 'r')
pylab.title("P(eff|pos) vs baserate; sens: %.2f, fpr: %.2f" % (sens, fpr))
pylab.ylabel("P(effector|positive)")
pylab.xlabel("effector baserate")
pylab.xlim(0, xmax)
pylab.ylim(0, 1)
# Add annotation arrow
xpos, ypos = (baserate, p_correct_given_pos(sens, fpr, baserate))
if baserate < xmax:
if xpos > 0.7 * xmax:
xtextpos = 0.05 * xmax
else:
xtextpos = xpos + (xmax-xpos)/5.
if ypos > 0.5:
ytextpos = ypos - 0.05
else:
ytextpos = ypos + 0.05
pylab.annotate('baserate: %.2f, P(pos|+ve): %.3f' % (xpos, ypos),
xy=(xpos, ypos),
xytext=(xtextpos, ytextpos),
arrowprops=dict(facecolor='black', shrink=0.05))
else:
pylab.text(0.05 * xmax, 0.95, 'baserate: %.2f, P(pos|+ve): %.3f' % \
(xpos, ypos))
| [
"leighton.pritchard@hutton.ac.uk"
] | leighton.pritchard@hutton.ac.uk |
649893fe6b57e5ff6de93d3ae907446cf0b4c8ad | aa6c1bd093eddea65fb2f4ccc2a47020bb512a47 | /swimprotocol/address.py | e50be239ae4e81eab4e496065ac81a6f89ce33bf | [
"MIT"
] | permissive | chlin501/swim-protocol | 162606070388432ae616689d0dcd0e20f796f854 | 6f2cd3d4d4d35b5ea2a0060d225c6c469d7642ae | refs/heads/main | 2023-04-21T23:59:08.124099 | 2021-05-15T20:44:31 | 2021-05-15T20:44:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py |
from __future__ import annotations
from dataclasses import dataclass
from typing import Final, Optional
__all__ = ['Address', 'AddressParser']
@dataclass(frozen=True, order=True)
class Address:
"""Manages an address for socket connections.
Args:
host: The address hostname string.
port: The address port number.
"""
host: str
port: int
@classmethod
def get(cls, addr: tuple[str, int]) -> Address:
"""Return an :class:`Address` from a ``(host, port)`` tuple.
Args:
addr: The address tuple from :mod:`socket` functions.
"""
return cls(addr[0], addr[1])
def __str__(self) -> str:
return ':'.join((self.host, str(self.port)))
class AddressParser:
"""Manages the defaults to use when parsing an address string.
Args:
address_type: Override the :class:`Address` implementation.
default_host: The default hostname, if missing from the address string
(e.g. ``:1234:``).
default_port: The default port number, if missing from the address
string (e.g. ``example.tld``).
"""
def __init__(self, address_type: type[Address] = Address, *,
default_host: Optional[str] = None,
default_port: Optional[int] = None) -> None:
super().__init__()
self.address_type: Final = address_type
self.default_host: Final = default_host
self.default_port: Final = default_port
def parse(self, address: str) -> Address:
host, sep, port = address.rpartition(':')
if sep != ':':
default_port = self.default_port
if default_port is not None:
return self.address_type(host, default_port)
else:
default_host = self.default_host
if host:
return self.address_type(host, int(port))
elif default_host is not None:
return self.address_type(default_host, int(port))
raise ValueError(address)
| [
"ian@icgood.net"
] | ian@icgood.net |
724835a373b84b4476b57505c51a85b02c24ce3f | 89812f6ab80008222bcf93a9b2ca614a60291738 | /river/metrics/cluster/sd_validation.py | 93c41b6f2291198d07c65091eed03baab201b2d7 | [
"BSD-3-Clause"
] | permissive | Pandinosaurus/river | 47135f5b7e612f83d96f4a50f9d746dec834b16d | 09a24d35c1f548239c54c1244973241bfe5c4edc | refs/heads/master | 2023-08-27T21:08:12.553115 | 2021-11-09T22:10:17 | 2021-11-09T22:10:17 | 409,610,355 | 0 | 0 | BSD-3-Clause | 2021-11-10T04:13:30 | 2021-09-23T13:47:27 | Python | UTF-8 | Python | false | false | 4,665 | py | import math
from river import stats, utils
from . import base
class SD(base.InternalMetric):
"""The SD validity index (SD).
The SD validity index (SD) [^1] is a more recent clustering validation measure. It is composed of
two terms:
* Scat(NC) stands for the scattering within clusters,
* Dis(NC) stands for the dispersion between clusters.
Like DB and SB, SD measures the compactness with variance of clustered objects and separation
with distance between cluster centers, but uses them in a different way. The smaller the value
of SD, the better.
In the original formula for SD validation index, the ratio between the maximum and the actual
number of clusters is taken into account. However, due to the fact that metrics are updated in
an incremental fashion, this ratio will be automatically set to default as 1.
Examples
--------
>>> from river import cluster
>>> from river import stream
>>> from river import metrics
>>> X = [
... [1, 2],
... [1, 4],
... [1, 0],
... [4, 2],
... [4, 4],
... [4, 0],
... [-2, 2],
... [-2, 4],
... [-2, 0]
... ]
>>> k_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)
>>> metric = metrics.cluster.SD()
>>> for x, _ in stream.iter_array(X):
... k_means = k_means.learn_one(x)
... y_pred = k_means.predict_one(x)
... metric = metric.update(x, y_pred, k_means.centers)
>>> metric
SD: 2.339016
References
----------
[^1]: Halkidi, M., Vazirgiannis, M., & Batistakis, Y. (2000). Quality Scheme Assessment in the
Clustering Process. Principles Of Data Mining And Knowledge Discovery, 265-276.
DOI: 10.1007/3-540-45372-5_26
"""
def __init__(self):
super().__init__()
self._center_all_points = {}
self._overall_variance = {}
self._cluster_variance = {}
self._centers = {}
self._initialized = False
@staticmethod
def _calculate_dispersion_nc(centers):
min_distance_clusters = math.inf
max_distance_clusters = -math.inf
sum_inverse_distances = 0
n_clusters = len(centers)
for i in range(n_clusters):
for j in range(i + 1, n_clusters):
distance_ij = math.sqrt(
utils.math.minkowski_distance(centers[i], centers[j], 2)
)
if distance_ij > max_distance_clusters:
max_distance_clusters = distance_ij
if distance_ij < min_distance_clusters:
min_distance_clusters = distance_ij
sum_inverse_distances += 1 / distance_ij
try:
return (
max_distance_clusters / min_distance_clusters
) * sum_inverse_distances
except ZeroDivisionError:
return math.inf
@staticmethod
def _norm(x):
origin = {i: 0 for i in x}
return math.sqrt(utils.math.minkowski_distance(x, origin, 2))
def update(self, x, y_pred, centers, sample_weight=1.0):
if not self._initialized:
self._overall_variance = {i: stats.Var() for i in x}
self._initialized = True
if y_pred not in self._cluster_variance:
self._cluster_variance[y_pred] = {i: stats.Var() for i in x}
for i in x:
self._cluster_variance[y_pred][i].update(x[i], w=sample_weight)
self._overall_variance[i].update(x[i], w=sample_weight)
self._centers = centers
return self
def revert(self, x, y_pred, centers, sample_weight=1.0):
for i in x:
self._overall_variance[i].update(x[i], w=-sample_weight)
self._cluster_variance[y_pred][i].update(x[i], w=-sample_weight)
self._centers = centers
return self
def get(self):
dispersion_nc = self._calculate_dispersion_nc(self._centers)
overall_variance = {
i: self._overall_variance[i].get() for i in self._overall_variance
}
cluster_variance = {}
for i in self._cluster_variance:
cluster_variance[i] = {
j: self._cluster_variance[i][j].get() for j in self._cluster_variance[i]
}
scat_nc = 0
for i in cluster_variance:
scat_nc += self._norm(cluster_variance[i]) / self._norm(overall_variance)
try:
return scat_nc + dispersion_nc
except ZeroDivisionError:
return math.inf
@property
def bigger_is_better(self):
return False
| [
"noreply@github.com"
] | Pandinosaurus.noreply@github.com |
2b9506dfc10e5e9c3b64a86ebcfe9e8106bd68fc | ba54b70f93fe7f9d114623d76b1ad3f88309d66f | /uimg/migrations/0001_initial.py | 70d0c465b8a1a916e9e82b8faabf34ef9bfaf92e | [] | no_license | loobinsk/newprj | 9769b2f26092ce7dd8612fce37adebb307b01b8b | c6aa6a46973fb46375f4b05a86fe76207a8ae16d | refs/heads/master | 2023-05-07T00:28:44.242163 | 2021-05-25T08:22:05 | 2021-05-25T08:22:05 | 370,617,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import uimg.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(upload_to=uimg.models.get_user_image_path, verbose_name=b'\xd0\x98\xd0\xb7\xd0\xbe\xd0\xb1\xd1\x80\xd0\xb0\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('date', models.DateTimeField(default=datetime.datetime(2015, 6, 11, 15, 25, 9, 540983), verbose_name=b'\xd0\x94\xd0\xb0\xd1\x82\xd0\xb0')),
('desc', models.TextField(default=b'', max_length=250, null=True, verbose_name=b'\xd0\x9e\xd0\xbf\xd0\xb8\xd1\x81\xd0\xb0\xd0\xbd\xd0\xb8\xd0\xb5', blank=True)),
],
options={
'verbose_name': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435',
'verbose_name_plural': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f',
},
),
]
| [
"root@bazavashdom.ru"
] | root@bazavashdom.ru |
06d353baa11398faddd39afac93ccc41d6c7e529 | 941cb76fde4fed6a85d804421f9deee5934a6684 | /yolanda/services/urls.py | cd88bb79eddbeb67be16a8f841f4e184318be6b8 | [] | no_license | ingenieroariel/yolanda | 0e27346afc96374e8c8f29af13b0e7218b2670f6 | b8038f04d32847ed74bdc44e9ff4f694d7bb0637 | refs/heads/master | 2021-01-13T01:59:22.243342 | 2013-12-19T12:00:10 | 2013-12-19T12:00:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.conf.urls.defaults import patterns, url
from yolanda.services.views import DigitalGlobeProxy
urlpatterns = patterns("yolanda.services.views",
url(r"^dg/?", DigitalGlobeProxy.as_view(), name="dg_service"),
)
| [
"garnertb@gmail.com"
] | garnertb@gmail.com |
0fdb42f90603cc164bd7435a2bc8f96429a8aa96 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/odtjoh001/question3.py | db78f68e48dd183436aca1d79122ba904d28af82 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | """Program to check if sudoku grid is valid
John Odetokun
14 May 2014"""
#create 2d-array
grid = []
list = []
for i in range (9):
list.append(input())
for c in range(9):
for i in range (9):
inpt = list[c]
gridline = []
for j in range(9):
gridline.append(inpt[j])
grid.append(gridline)
n = 0
#horizontal and vertical checks
for a in range(9):
for w in range(8):
value = grid[a][w]
value2 = grid[w][a]
for z in range(w+1, 9):
if value == grid[a][z] or value2 == grid[z][a]:
n+=1
if n!= 0:
print("Sudoku grid is not valid")
else:
#check 3 by 3 grids within grid
for j in range(3,10,3):
for k in range(3,10,3):
arr = []
for x in range(j-3,j):
for y in range(k-3,k):
arr.append(grid[x][y])
for r in range (9):
val = arr[r]
for t in range(r+1,8):
if val == arr[t]:
n+=1
if n == 0:
print("Sudoku grid is valid")
else:
print("Sudoku grid is not valid")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
523339f3f723af86067fda3b7161b1ad59725180 | 2a6412a9359a1df5f8f12e319e73b9e4e46fd64c | /code/PythonINIAD/IPv4Converter.py | 1f3f0dd3fefe4e3730ff7d3b00238580eff77027 | [] | no_license | danganhvu1998/myINIAD | 504d1147a02f12e593f30e369daf82f85aa01bfd | 01547673dd3065efb6c7cc8db77ec93a5a4f5d98 | refs/heads/master | 2022-03-17T12:58:34.647229 | 2022-02-08T06:34:19 | 2022-02-08T06:34:19 | 143,675,719 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | import re
def biToDe(biStr):
ans=0
for bit in biStr:
ans=ans*2+int(bit)
return ans
def deToBi(deStr):
ans = ""
currValue = 128
deInt = int(deStr)
for i in range(0,8):
if(deInt>=currValue):
ans+="1"
deInt-=currValue
else:
ans+="0"
currValue /= 2
return ans
def biAddressToDeAddess(biAddress):
ans = ""
biAddressParts = re.findall("[0-9]+", biAddress)
for biAddressPart in biAddressParts:
ans+=str(biToDe(biAddressPart))+"."
ans = ans[0:-1]
return ans
def deAddressToBiAddess(biAddress):
ans = ""
deAddressParts = re.findall("[0-9]+", biAddress)
for deAddressPart in deAddressParts:
ans+=str(deToBi(deAddressPart))+"."
ans = ans[0:-1]
return ans
def announce(biAddress, text):
print("*********")
print(text, biAddress)
print(text, biAddressToDeAddess(biAddress))
print("*********")
print()
def networkAddress(biAddress, networkPart):
currBit = 0
ans = ""
for bit in biAddress:
if(bit!="."):
currBit+=1
if(currBit>networkPart):
ans+="0"
else:
ans+=bit
else:
ans+=bit
announce(ans, "Network Address")
def broadcastAddress(biAddress, networkPart):
currBit = 0
ans = ""
for bit in biAddress:
if(bit!="."):
currBit+=1
if(currBit>networkPart):
ans+="1"
else:
ans+=bit
else:
ans+=bit
announce(ans, "Broadcast Address")
def subnetMaskAddress(biAddress, networkPart):
currBit = 0
ans = ""
for bit in biAddress:
if(bit!="."):
currBit+=1
if(currBit<=networkPart):
ans+="1"
else:
ans+="0"
else:
ans+=bit
announce(ans, "Subnet mask Address")
def __main__():
IPv4 = input("Input IPv4 Address (In any format is okay):");
#IPv4 = "128.226.170.3"
networkPart = -1;
#Calculate Network Part
if("/" in IPv4):
ipAddress = re.findall("(.*)/", IPv4)[0]
networkPart = int(re.findall("/(.*)", IPv4)[0])
else:
ipAddress = IPv4
#Convert Ip Address to both Bi and De
if(len(ipAddress)>32):
ipAddressBi = ipAddress
else:
ipAddressBi = deAddressToBiAddess(ipAddress)
announce(ipAddressBi, "IPv4 Address")
if(networkPart>=0):
networkAddress(ipAddressBi, networkPart)
broadcastAddress(ipAddressBi, networkPart)
subnetMaskAddress(ipAddressBi, networkPart)
__main__() | [
"danganhvu1998@gmail.com"
] | danganhvu1998@gmail.com |
aaeda1c90c18d3d74453e7657b0af315e5024ae3 | f71f44d5ddc17e3c30e2bfd7988e5111a55a8b9a | /diplom/source/src/lib/interpolation/__init__.py | 375be80432c4755fbe11bc0dee04cceaae888a25 | [] | no_license | Yashchuk/diplom | 5ed1998d4b3d1fe568599973ec134f7ca13e8417 | 4029ed91ce93a41af44f03bcce365fdaecb64a37 | refs/heads/master | 2021-01-15T17:02:03.723007 | 2014-01-21T13:42:48 | 2014-01-21T13:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,813 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# file interpolation/__init__.py
#
#############################################################################
# Copyright (c) 2013 by Panagiotis Mavrogiorgos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name(s) of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#############################################################################
#
# @license: http://opensource.org/licenses/BSD-3-Clause
# @authors: see AUTHORS.txt
""" A package containing Interpolation related classes. """
# Package imports
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# Version
__major__ = 0 # for major interface/format changes
__minor__ = 1 # for minor interface/format changes
__release__ = 0 # for tweaks, bug-fixes, or development
# package information
__package_name__ = "interpolation"
__version__ = "%d.%d.%d" % (__major__, __minor__, __release__)
__license__ = "BSD"
__description__ = __doc__.split(".")[0]
__url__ = "http://github.com/pmav99/%s" % __package_name__
__download_url__ = "http://github.com/pmav99/%s/downloads" % __package_name__
__author__ = "Panagiotis Mavrogiorgos"
__author_email__ = "gmail pmav99"
# Package imports
from .linear import LinearInterpolation
from .bilinear import BilinearInterpolation
__all__ = ["LinearInterpolation", "BilinearInterpolation"]
| [
"andrew.freelance@i.ua"
] | andrew.freelance@i.ua |
896eb2ea8561b2a2c07d720ca69366a09fe1d5ac | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03418/s781538433.py | 5d531f3c4ae6cffe8a4ee7db60e768bb5e17a08c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from fractions import gcd
def NK():
return map(int,input().split())
def main():
n,k = NK()
ans = 0
for i in range(1,n+1):
ans += (n//i)*max((i-k),0) + max(n%i-max((k-1),0),0)
print(ans)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9fc75b632c2f6956fff01f5c2be76f1e9601666d | 1e8c805e96bc854b5acf4282c47c16ce6e1980e2 | /examples/Laplace_equation_1D.py | 173ac8080a56f7f3cc23dd09d86dfdedf85d3146 | [] | no_license | LaplaceKorea/DWave-Quantum-Annealing | d0a3058ee6b4e1e5163be28fa5dfb77e0f85c51f | 16f934e995b72aaf618480aeaf3f09dd07c2ff47 | refs/heads/master | 2023-02-22T08:49:28.588459 | 2021-01-22T02:58:38 | 2021-01-22T02:58:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,251 | py | """Solve 1D Laplace's equation"""
# Import packages
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from neal import SimulatedAnnealingSampler
from dwave.system import EmbeddingComposite, DWaveSampler
from dwaveutils import bl_lstsq
# Define function
def get_laplace_1D(N, num_bits,
fixed_point=0, exact_x=True, random_seed=None):
"""Get information about 1D Laplace's equation."""
# number of predictor and number of response
num_predictor_discrete = num_bits * N
num_response = N
# matrix `A`
A = (np.eye(num_response, k=-1)
- 2 * np.eye(num_response, k=0)
+ np.eye(num_response, k=1))
# set the bit value to discrete the actual value as a fixed point
bit_value = bl_lstsq.get_bit_value(num_bits, fixed_point=fixed_point)
# discretized version of matrix `A`
A_discrete = bl_lstsq.discretize_matrix(A, bit_value)
if random_seed is None:
rng = np.random.default_rng()
else:
rng = np.random.default_rng(random_seed)
if exact_x:
# binary vector `q`
q = rng.choice([0, 1], size=num_predictor_discrete)
# vector `x`
x = q2x(q, bit_value)
else:
# vector `x`
x = (rng.choice([-1, 1], size=num_response)
* (2 ** fixed_point) * rng.random(num_response))
# calculate vector `b`
b = A @ x
output = {
'A': A,
'x': x,
'b': b,
'A_discrete': A_discrete,
'bit_value': bit_value
}
return output
# Setting variables
# size of symmetric matrix `A`
N = 3
# number of bits (include sign bit)
num_bits = 4
# n-vector bit value is defined by
# [-2**(fixed_point), 2**(fixed_point-1), ..., 2**(fixed_point-n)]
fixed_point = 0
# whether x can be perfectly discrete
exact_x = False
random_seed = 19937
# scaling factor for QUBO
eq_scaling_val = 1/8
# number of reads for Simulated annealing (SA) or Quantum annealing (QA)
num_reads = 1000
# sampler type must be one of {'SA', 'QA'}
sampler_type = 'SA'
# setup A, x, b, A_discrete, bit_value
output = get_laplace_1D(
N, num_bits,
fixed_point=fixed_point, exact_x=exact_x, random_seed=random_seed
)
A = output['A']
true_x = output['x']
true_b = output['b']
A_discrete = output['A_discrete']
bit_value = output['bit_value']
# Solve A*x=b by `numpy.linalg.lstsq`
np_x = np.linalg.lstsq(A, true_b, rcond=None)[0]
# Solve A_discrete*q=b problem as BQM optimization
# through simulated annealing or quantum annealing
Q = bl_lstsq.get_qubo(A_discrete, true_b, eq_scaling_val=eq_scaling_val)
if sampler_type == 'QA':
try:
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
_sampler_args = {}
if 'num_reads' in sampler.parameters:
_sampler_args['num_reads'] = num_reads
if 'answer_mode' in sampler.parameters:
_sampler_args['answer_mode'] = 'raw'
sampleset = sampler.sample_qubo(Q, **_sampler_args)
except ValueError:
warnings.warn('Cannot access QPU, use \
SimulatedAnnealingSampler instead.')
sampler = SimulatedAnnealingSampler()
sampleset = sampler.sample_qubo(Q, num_reads=num_reads)
elif sampler_type == 'SA':
sampler = SimulatedAnnealingSampler()
sampleset = sampler.sample_qubo(Q, num_reads=num_reads)
else:
raise(ValueError("The sampler_type is wrong, \
please enter 'SA' or 'QA'"))
# Solve A_discrete*q=b by brute force
# Warning: this may take a lot of time!
best_q, best_x, min_norm = bl_lstsq.bruteforce(A_discrete, true_b, bit_value)
# Prepare for showing results and plotting
# convert sampleset and its aggregate version to dataframe
sampleset_pd = sampleset.to_pandas_dataframe()
sampleset_pd_agg = sampleset.aggregate().to_pandas_dataframe()
num_states = len(sampleset_pd_agg)
num_b_entry = len(true_b)
num_x_entry = len(true_x)
num_q_entry = A_discrete.shape[1]
# concatnate `sampleset_pd` and `x_at_each_read`
x_at_each_read = pd.DataFrame(
np.row_stack(
[(sampleset_pd.iloc[i][:num_q_entry]).values.reshape(
(num_x_entry, -1)) @ bit_value
for i in range(num_reads)]
),
columns=['x' + str(i) for i in range(num_x_entry)]
)
sampleset_pd = pd.concat([sampleset_pd, x_at_each_read], axis=1)
sampleset_pd.rename(
columns=lambda c: c if isinstance(c, str) else 'q'+str(c),
inplace=True
)
# concatnate `sampleset_pd_agg` and `x_at_each_state`
x_at_each_state = pd.DataFrame(
np.row_stack(
[(sampleset_pd_agg.iloc[i][:num_q_entry]).values.reshape(
(num_x_entry, -1)) @ bit_value
for i in range(num_states)]
),
columns=['x' + str(i) for i in range(num_x_entry)]
)
sampleset_pd_agg = pd.concat([sampleset_pd_agg, x_at_each_state], axis=1)
sampleset_pd_agg.rename(
columns=lambda c: c if isinstance(c, str) else 'q'+str(c),
inplace=True
)
# lowest energy state x and q
lowest_q = sampleset_pd_agg.sort_values(
'energy').iloc[0, :num_q_entry].values
lowest_x = bl_lstsq.q2x(lowest_q, bit_value)
# frequently occurring x and q
frequent_q = sampleset_pd_agg.sort_values(
'num_occurrences', ascending=False).iloc[0, :num_q_entry].values
frequent_x = bl_lstsq.q2x(frequent_q, bit_value)
# calculate expected x from x
expected_x = sampleset_pd_agg.apply(
lambda row: row.iloc[-num_x_entry:]
* (row.num_occurrences / num_reads),
axis=1
).sum().values
# calculate excepted x from q
tmp_q = sampleset_pd_agg.apply(
lambda row: row.iloc[:num_q_entry]
* (row.num_occurrences / num_reads),
axis=1
).sum() > 0.5 # bool
expected_x_discrete = bl_lstsq.q2x(tmp_q, bit_value)
# Show results
print('='*50)
print('true x:', true_x)
print('true b:', true_b)
print('bit value:', bit_value)
print('='*50)
print('# numpy solver')
print('np_x: ', np_x)
print('b:', A @ np_x)
print('2-norm:', np.linalg.norm(A @ np_x - true_b))
print('='*50)
print('# brute force')
print('best x:', best_x)
print('best q:', best_q)
print('b:', A @ best_x)
print('2-norm:', min_norm)
print('='*50)
print('# Simulated annealing/Quantum annealing')
print('lowest energy state x:')
print(lowest_x)
print('lowest energy state q:')
print(lowest_q)
print('b:', A @ lowest_x)
print('2-norm:', np.linalg.norm(A @ lowest_x - true_b))
print('-'*50)
print('most frequently occurring x:')
print(frequent_x)
print('most frequently occurring q:')
print(frequent_q)
print('b:', A @ frequent_x)
print('2-norm:', np.linalg.norm(A @ frequent_x - true_b))
print('-'*50)
print('expected x (from real value):')
print(expected_x)
print('b:', A @ expected_x)
print('2-norm:', np.linalg.norm(A @ expected_x - true_b))
print('-'*50)
print('expected x (from discrete value):')
print(expected_x_discrete)
print('b:', A @ expected_x_discrete)
print('2-norm:', np.linalg.norm(A @ expected_x_discrete - true_b))
print('-'*50)
print('Sample set:')
print(sampleset_pd_agg.sort_values('num_occurrences', ascending=False))
print('='*50)
# Plot histogram
axes = sampleset_pd.hist(
figsize=(8, 6), bins=30,
column=['x' + str(i) for i in range(num_x_entry)],
)
axes = axes.ravel()
for i in range(num_x_entry):
ax = axes[i]
ax.set_ylabel('counts')
plt.tight_layout()
plt.show()
| [
"supon3060@gmail.com"
] | supon3060@gmail.com |
956ba6af02f0df809334dadc3cfd857eb38648f2 | a9ce7176631ebc3bb8188d6aa3c2be09137fb43a | /migrate/0002_add_column_locationrating_hand_aligned.py | 3c4d8d3b146229be084bab19514da5cf584021a0 | [
"MIT"
] | permissive | andrewhead/Search-Task-Analysis | 0081c8c0ad6682c7e4a87aad1af4a57d18287137 | ef73745a760b5c2ec7060488219bb29237c26464 | refs/heads/master | 2020-05-21T20:04:36.299697 | 2016-09-19T00:39:06 | 2016-09-19T00:39:06 | 60,795,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from playhouse.migrate import migrate
from peewee import BooleanField
logger = logging.getLogger('data')
def forward(migrator):
migrate(
migrator.add_column('locationrating', 'hand_aligned', BooleanField(default=False)),
)
| [
"head.andrewm@gmail.com"
] | head.andrewm@gmail.com |
c765e708dfb60a128b9ad5a48c0653d87f25b641 | aac5982c8dcf26221419086fb90c399b9f4324ef | /DFTB/MolecularIntegrals/hmi_continuum.py | 291a9f0fcdc3962b4c49d702ff72f7bc88311c0f | [] | no_license | by-student-2017/DFTBaby-0.1.0-31Jul2019 | 99184d3fa2976d4e02f7f1bddee97e56526d9365 | 92cb73f1a6472f88588986561349d7f2ad1b1c15 | refs/heads/master | 2022-12-12T00:12:50.449505 | 2020-09-01T21:05:59 | 2020-09-01T21:05:59 | 290,116,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | #!/usr/bin/env python
from DFTB.MolecularIntegrals import settings
from DFTB.MolecularIntegrals.BasissetFreeDFT import BasissetFreeDFT
import numpy as np
def hmi_continuum(l, m, E):
"""
compute continuum orbitals of the hydrogen molecular ion H2+
Parameters
----------
l,m : angular quantum numbers of asymptotic solution
e.g. l=0,m=0 s-orbital
l=1,m=+1 px-orbital
E : energy (in a.u.) of continuum orbital, E = 1/2 k^2
"""
# H2^+
# bond length in bohr
R = 2.0
atomlist = [(1, (0.0, 0.0, -R/2.0)),
(1, (0.0, 0.0, +R/2.0))]
# choose resolution of multicenter grids for continuum orbitals
settings.radial_grid_factor = 120 # controls size of radial grid
settings.lebedev_order = 25 # controls size of angular grid
RDFT = BasissetFreeDFT(atomlist, None, charge=+1)
# This is a one-electron system, so there are no other occupied orbitals
def rho(x,y,z):
return 0*x
def homo(x,y,z):
return 0*x
delta, phi = RDFT.solveScatteringProblem(rho, homo, E, l, m)
if __name__ == "__main__":
import sys
import os.path
args = sys.argv[1:]
if len(args) < 3:
usage = """
Usage:
%s l m E
compute the continuum orbital of H2^+ (hydrogen molecular ion)
Parameters:
l,m - integers, -l <= m <= l, angular quantum numbers
of asymptotic solution
E - float, energy of continuum orbital is E = 1/2 k^2
""" % os.path.basename(sys.argv[0])
print usage
exit(-1)
l = int(args[0])
m = int(args[1])
E = float(args[2])
hmi_continuum(l, m, E)
| [
"studentsctest@gmail.com"
] | studentsctest@gmail.com |
310e89e57e8b49add71c684c7faba652eff81f6b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/hIndex_20200730204441.py | 7a49df44b99367a612d1fa52a7d722fd611b7328 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | def Hindex(citations):
result = 0
citations.sort()
for i in range(len(citations)-1,0,-1):
cnt = len(citations) -i
if citations[i] >
print('i',i,'cnt',cnt)
Hindex([3,0,6,1,5]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
36807336601be9e76fa772e435e3ca35fe6b9a9f | 7f523c407d45d116860eff67f079e807f2b53339 | /src/third_party/capstone/bindings/python/capstone/ppc.py | 6ab177db1b88c484534778579eefa4aa0e8e5be7 | [
"MIT",
"BSD-3-Clause",
"NCSA"
] | permissive | 0vercl0k/rp | a352c96bfe3715eb9ce8c5942831123e65289dac | b24e7f58a594aaf0ce3771745bf06862f6ecc074 | refs/heads/master | 2023-08-30T08:03:14.842828 | 2023-08-09T00:41:00 | 2023-08-09T00:41:00 | 3,554,173 | 1,557 | 239 | MIT | 2023-08-09T00:41:02 | 2012-02-26T19:26:33 | C++ | UTF-8 | Python | false | false | 1,321 | py | # Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
import ctypes
from . import copy_ctypes_list
from .ppc_const import *
# define the API
class PpcOpMem(ctypes.Structure):
_fields_ = (
('base', ctypes.c_uint),
('disp', ctypes.c_int32),
)
class PpcOpCrx(ctypes.Structure):
_fields_ = (
('scale', ctypes.c_uint),
('reg', ctypes.c_uint),
('cond', ctypes.c_uint),
)
class PpcOpValue(ctypes.Union):
_fields_ = (
('reg', ctypes.c_uint),
('imm', ctypes.c_int64),
('mem', PpcOpMem),
('crx', PpcOpCrx),
)
class PpcOp(ctypes.Structure):
_fields_ = (
('type', ctypes.c_uint),
('value', PpcOpValue),
)
@property
def imm(self):
return self.value.imm
@property
def reg(self):
return self.value.reg
@property
def mem(self):
return self.value.mem
@property
def crx(self):
return self.value.crx
class CsPpc(ctypes.Structure):
_fields_ = (
('bc', ctypes.c_uint),
('bh', ctypes.c_uint),
('update_cr0', ctypes.c_bool),
('op_count', ctypes.c_uint8),
('operands', PpcOp * 8),
)
def get_arch_info(a):
return (a.bc, a.bh, a.update_cr0, copy_ctypes_list(a.operands[:a.op_count]))
| [
"noreply@github.com"
] | 0vercl0k.noreply@github.com |
602750c19d6f198161d370a3d29d26e2b4708df9 | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/01_LeetCode/781_RabbitsinForest.py | d63e85b18090554a3b65338b566a90711bd3ff96 | [
"MIT"
] | permissive | KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | """
In a forest, each rabbit has some color. Some subset of rabbits (possibly all of them) tell you how many other rabbits have the same color as them. Those answers are placed in an array.
Return the minimum number of rabbits that could be in the forest.
Examples:
Input: answers = [1, 1, 2]
Output: 5
Explanation:
The two rabbits that answered "1" could both be the same color, say red.
The rabbit than answered "2" can't be red or the answers would be inconsistent.
Say the rabbit that answered "2" was blue.
Then there should be 2 other blue rabbits in the forest that didn't answer into the array.
The smallest possible number of rabbits in the forest is therefore 5: 3 that answered plus 2 that didn't.
Input: answers = [10, 10, 10]
Output: 11
Input: answers = []
Output: 0
"""
class Solution:
def numRabbits(self, answers):
"""
:type answers: List[int]
:rtype: int
"""
"""
Method 1:
"""
hasSeen = {}
result = 0
for num in answers:
if num not in hasSeen:
result += (num + 1)
hasSeen[num] = num
else:
hasSeen[num] -= 1
if hasSeen[num] == 0:
del hasSeen[num]
return result
| [
"kartikkannapur@gmail.com"
] | kartikkannapur@gmail.com |
fc470ad6da33016499e446b9ece69470a9b8d9a7 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200414/codes/output/code090.py | 1bc796c03085d8ba177161a12962daf89860121c | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 180 | py | import pygal
chart = pygal.Line()
chart.add('line', [.0002, .0005, .00035], dots_size=4)
chart.add('line', [.0004, .0009, .001], dots_size=12)
print(chart.render(is_unicode=True))
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
a11a26a952f57d707df35bd758411f0eb76a9b4d | 9f84d91a8ae3df53b07fe3267992fba00a99ac9e | /torch_geometric/transforms/add_remaining_self_loops.py | 4150f30254ba8d1d07584b91f4e382795bd524ef | [
"MIT"
] | permissive | pyg-team/pytorch_geometric | ebea601eae228f3905465b5c2349d3fb3bb5cb26 | a52af694b8ce6a80811e20966fe6d08a3e7511fe | refs/heads/master | 2023-08-31T04:13:40.943308 | 2023-08-30T12:48:42 | 2023-08-30T12:48:42 | 106,024,057 | 6,775 | 1,563 | MIT | 2023-09-14T17:10:18 | 2017-10-06T16:03:03 | Python | UTF-8 | Python | false | false | 2,087 | py | from typing import Optional, Union
from torch import Tensor
from torch_geometric.data import Data, HeteroData
from torch_geometric.data.datapipes import functional_transform
from torch_geometric.transforms import BaseTransform
from torch_geometric.utils import add_remaining_self_loops
@functional_transform('add_remaining_self_loops')
class AddRemainingSelfLoops(BaseTransform):
r"""Adds remaining self-loops to the given homogeneous or heterogeneous
graph (functional name: :obj:`add_remaining_self_loops`).
Args:
attr (str, optional): The name of the attribute of edge weights
or multi-dimensional edge features to pass to
:meth:`torch_geometric.utils.add_remaining_self_loops`.
(default: :obj:`"edge_weight"`)
fill_value (float or Tensor or str, optional): The way to generate
edge features of self-loops (in case :obj:`attr != None`).
If given as :obj:`float` or :class:`torch.Tensor`, edge features of
self-loops will be directly given by :obj:`fill_value`.
If given as :obj:`str`, edge features of self-loops are computed by
aggregating all features of edges that point to the specific node,
according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`,
:obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`)
"""
def __init__(self, attr: Optional[str] = 'edge_weight',
fill_value: Union[float, Tensor, str] = 1.0):
self.attr = attr
self.fill_value = fill_value
def forward(
self,
data: Union[Data, HeteroData],
) -> Union[Data, HeteroData]:
for store in data.edge_stores:
if store.is_bipartite() or 'edge_index' not in store:
continue
store.edge_index, edge_weight = add_remaining_self_loops(
store.edge_index, getattr(store, self.attr, None),
fill_value=self.fill_value, num_nodes=store.size(0))
setattr(store, self.attr, edge_weight)
return data
| [
"noreply@github.com"
] | pyg-team.noreply@github.com |
081dcfaf35eab2dc448a2db49ecc3d1ae03e2589 | 31681488e69da3c7e00b0eda28e5cb720ef2299c | /liteiclink/serwb/datapath.py | dbdb5ea0b5f764e6f447754d0c099fba7da78b88 | [
"BSD-2-Clause"
] | permissive | zsipos/liteiclink | 4e9bdf6a819f490461cb33d0837247041203071d | 864cd831f3475dffd1c92d6d4a1b86608680bcf2 | refs/heads/master | 2021-07-08T07:43:10.897604 | 2020-01-28T09:40:17 | 2020-01-28T09:40:17 | 245,119,569 | 0 | 0 | NOASSERTION | 2020-03-05T09:25:16 | 2020-03-05T09:25:15 | null | UTF-8 | Python | false | false | 6,030 | py | # This file is Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
from migen import *
from migen.genlib.io import *
from migen.genlib.misc import BitSlip, WaitTimer
from litex.soc.interconnect import stream
from litex.soc.cores.code_8b10b import Encoder, Decoder
from liteiclink.serwb.scrambler import Scrambler, Descrambler
def K(x, y):
return (y << 5) | x
class _8b10bEncoder(Module):
def __init__(self):
self.sink = sink = stream.Endpoint([("d", 32), ("k", 4)])
self.source = source = stream.Endpoint([("data", 40)])
# # #
encoder = CEInserter()(Encoder(4, True))
self.submodules += encoder
# control
self.comb += [
source.valid.eq(sink.valid),
sink.ready.eq(source.ready),
encoder.ce.eq(source.valid & source.ready)
]
# datapath
for i in range(4):
self.comb += [
encoder.k[i].eq(sink.k[i]),
encoder.d[i].eq(sink.d[8*i:8*(i+1)]),
source.data[10*i:10*(i+1)].eq(encoder.output[i])
]
class _8b10bDecoder(Module):
def __init__(self):
self.sink = sink = stream.Endpoint([("data", 40)])
self.source = source = stream.Endpoint([("d", 32), ("k", 4)])
# # #
decoders = [CEInserter()(Decoder(True)) for _ in range(4)]
self.submodules += decoders
# control
self.comb += [
source.valid.eq(sink.valid),
sink.ready.eq(source.ready)
]
self.comb += [decoders[i].ce.eq(source.valid & source.ready) for i in range(4)]
# datapath
for i in range(4):
self.comb += [
decoders[i].input.eq(sink.data[10*i:10*(i+1)]),
source.k[i].eq(decoders[i].k),
source.d[8*i:8*(i+1)].eq(decoders[i].d)
]
class _Bitslip(Module):
def __init__(self):
self.value = value = Signal(6)
self.sink = sink = stream.Endpoint([("data", 40)])
self.source = source = stream.Endpoint([("data", 40)])
# # #
bitslip = CEInserter()(BitSlip(40))
self.submodules += bitslip
# control
self.comb += [
source.valid.eq(sink.valid),
sink.ready.eq(source.ready),
bitslip.value.eq(value),
bitslip.ce.eq(source.valid & source.ready)
]
# datapath
self.comb += [
bitslip.i.eq(sink.data),
source.data.eq(bitslip.o)
]
class TXDatapath(Module):
def __init__(self, phy_dw, with_scrambling=True):
self.idle = idle = Signal()
self.comma = comma = Signal()
self.sink = sink = stream.Endpoint([("data", 32)])
self.source = source = stream.Endpoint([("data", phy_dw)])
# # #
# scrambler
if with_scrambling:
self.submodules.scrambler = scrambler = Scrambler()
# line coding
self.submodules.encoder = encoder = _8b10bEncoder()
# converter
self.submodules.converter = converter = stream.Converter(40, phy_dw)
# dataflow
if with_scrambling:
self.comb += [
sink.connect(scrambler.sink),
If(comma,
encoder.sink.valid.eq(1),
encoder.sink.k.eq(1),
encoder.sink.d.eq(K(28,5))
).Else(
scrambler.source.connect(encoder.sink)
)
]
else:
self.comb += [
If(comma,
encoder.sink.valid.eq(1),
encoder.sink.k.eq(1),
encoder.sink.d.eq(K(28,5))
).Else(
sink.connect(encoder.sink, omit={"data"}),
encoder.sink.d.eq(sink.data)
),
]
self.comb += [
If(idle,
converter.sink.valid.eq(1),
converter.sink.data.eq(0)
).Else(
encoder.source.connect(converter.sink),
),
converter.source.connect(source)
]
class RXDatapath(Module):
def __init__(self, phy_dw, with_scrambling=True):
self.bitslip_value = bitslip_value = Signal(6)
self.sink = sink = stream.Endpoint([("data", phy_dw)])
self.source = source = stream.Endpoint([("data", 32)])
self.idle = idle = Signal()
self.comma = comma = Signal()
# # #
# converter
self.submodules.converter = converter = stream.Converter(phy_dw, 40)
# bitslip
self.submodules.bitslip = bitslip = _Bitslip()
self.comb += bitslip.value.eq(bitslip_value)
# line coding
self.submodules.decoder = decoder = _8b10bDecoder()
# descrambler
if with_scrambling:
self.submodules.descrambler = descrambler = Descrambler()
# dataflow
self.comb += [
sink.connect(converter.sink),
converter.source.connect(bitslip.sink),
bitslip.source.connect(decoder.sink)
]
if with_scrambling:
self.comb += [
decoder.source.connect(descrambler.sink),
descrambler.source.connect(source)
]
else:
self.comb += [
decoder.source.connect(source, omit={"d", "k"}),
source.data.eq(decoder.source.d)
]
# idle decoding
idle_timer = WaitTimer(32)
self.submodules += idle_timer
self.sync += [
If(converter.source.valid,
idle_timer.wait.eq((converter.source.data == 0) | (converter.source.data == (2**40-1)))
),
idle.eq(idle_timer.done)
]
# comma decoding
self.sync += \
If(decoder.source.valid,
comma.eq((decoder.source.k == 1) & (decoder.source.d == K(28, 5)))
)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
3fe587d3479f35c248d556caac968306f606b220 | 2881dcaa58b2acbb56fe7ecdf30f1f31ec53798f | /sliding-window/max-distinct-substring/max-distinct-substring-iterative.py | 188babf47d7c25a5c63ecc49803d8c18766a160d | [] | no_license | aratik711/grokking-the-coding-interview | 2ec8791c5c1f65a752e795bded4f66b79bf8e3cc | 95a3c477d3ebd49c2d1f9d51394b61680f05a38b | refs/heads/main | 2023-07-03T10:04:39.613221 | 2021-08-11T03:08:11 | 2021-08-11T03:08:11 | 343,046,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | """
Given a string, find the length of the longest substring in it with no more than K distinct characters.
Example 1:
Input: String="araaci", K=2
Output: 4
Explanation: The longest substring with no more than '2' distinct characters is "araa".
Example 2:
Input: String="araaci", K=1
Output: 2
Explanation: The longest substring with no more than '1' distinct characters is "aa".
Example 3:
Input: String="cbbebi", K=3
Output: 5
Explanation: The longest substrings with no more than '3' distinct characters are "cbbeb" & "bbebi".
Time complexity O(n*n)
"""
def longest_substring_with_k_distinct(str, k):
str_count = []
for i in range(len(str)):
char_arr = [str[i]]
char_count = 1
for j in range(i+1, len(str)):
char_count += 1
if len(char_arr) == k:
str_count.append(char_count)
break
if str[j] not in char_arr:
char_arr.append(str[j])
continue
return max(str_count)
str = "cbbebi"
K = 3
print(longest_substring_with_k_distinct(str, K))
| [
"arati.kulkarni@phonepe.com"
] | arati.kulkarni@phonepe.com |
caa8a343b37b913dc10f18a9cd8223fb7b89c06a | e87f369bf5642d25990d7e1b72d9fda9eab39fea | /invoke_commands/release.py | 0c8d2359956e60429dabfae22afe5d3a59e6511d | [
"MIT"
] | permissive | rajk-apps/riki | 612fc4716b842562447d9f7163cb8681e7e1e7c3 | 58257bffe7d7f00fc0cb8dc266d783c00cc16070 | refs/heads/master | 2022-10-05T21:48:17.285899 | 2022-09-18T16:31:31 | 2022-09-18T16:31:31 | 162,050,032 | 1 | 2 | MIT | 2020-07-15T21:19:40 | 2018-12-16T23:19:49 | JavaScript | UTF-8 | Python | false | false | 1,329 | py | from invoke import task
import io
from .vars import mymodule
@task
def new(c):
version = mymodule.__version__
c.run("python setup.py sdist")
c.run("twine check dist/*")
c.run(
f"twine upload dist/*{version}.tar.gz -u __token__ -p $TWINE_PASSWORD"
)
@task
def tag(c):
version = mymodule.__version__
f = io.StringIO()
c.run("git rev-parse --abbrev-ref HEAD", out_stream=f)
branch = f.getvalue().strip()
f.close()
if branch == "master":
tag_version = "v{}".format(version)
f2 = io.StringIO()
c.run("git tag", out_stream=f2)
tags = f2.getvalue().split()
print(tags)
if tag_version not in tags:
current_release_path = "docs_config/current_release.rst"
with open(current_release_path) as fp:
notes = fp.read()
with open(
"docs_config/release_notes/{}.rst".format(tag_version), "w"
) as fp:
fp.write(notes)
c.run(f"git tag -a {tag_version} -m '{notes}'")
with open(current_release_path, "w") as fp:
fp.write("")
c.run("git push --tags")
else:
print("{} version already tagged".format(tag_version))
else:
print("only master branch can be tagged")
| [
"endremborza@gmail.com"
] | endremborza@gmail.com |
84bb2ef6d0e866019e761834acd91b17ecfa1556 | c35b8c8ece7757943d93748fbdc6f4d54539daa6 | /poloniex/logger.py | d4d0f0e9d5aa72ff43545d70fd87b16508855ef5 | [
"MIT"
] | permissive | absortium/poloniex-api | 0b674a6fe11c60263f596049d274a7f45095b989 | e1786e8edf9116990dc2291f343ed965e9d0f5ae | refs/heads/master | 2020-12-25T16:24:52.723504 | 2017-09-27T21:13:48 | 2017-09-27T21:13:48 | 51,717,220 | 99 | 40 | null | 2017-09-27T21:13:49 | 2016-02-14T22:27:07 | Python | UTF-8 | Python | false | false | 1,273 | py | import inspect
import logging
from functools import wraps
import pp
__author__ = 'andrew.shvv@gmail.com'
def get_prev_method_name():
return inspect.stack()[2][3]
def pretty_wrapper(func):
@wraps(func)
def decorator(msg, *args, **kwargs):
pretty_msg = "Func: %s\n" % get_prev_method_name()
if type(msg) == str:
pretty_msg += msg
else:
pretty_msg += pp.fmt(msg)
pretty_msg += "\n+ " + "- " * 30 + "+\n"
func(pretty_msg, *args, **kwargs)
return decorator
def wrap_logger(logger):
logger.info = pretty_wrapper(logger.info)
logger.debug = pretty_wrapper(logger.debug)
logger.warning = pretty_wrapper(logger.warning)
logger.exception = pretty_wrapper(logger.exception)
return logger
def getLogger(name, level=logging.DEBUG):
# create logger
logger = logging.getLogger(name)
logger = wrap_logger(logger)
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('\nLevel: %(levelname)s - %(name)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.setLevel(level)
return logger
| [
"andrew.shvv@gmail.com"
] | andrew.shvv@gmail.com |
8464fa813408dff6410bbc7f7c32ecb9dba9d7bc | 222367d17e0567a5d02a8391bc6954a57989b3eb | /main.py | 5dc926504d8fec28d4e21b9c0ed8f902f7240a38 | [] | no_license | turian/hydra-notebook | 8fe6a38aad2dda75ea4425b824fb527e8ca5d090 | faf499dd0c6ad0da75e9f03898fc08b731505502 | refs/heads/master | 2022-11-10T11:37:18.824841 | 2020-06-21T00:09:22 | 2020-06-21T00:09:22 | 273,799,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import hydra.experimental
hydra.experimental.initialize(config_path="conf")
#hydra.experimental.initialize_with_module(module="module", config_path="conf")
cfg=hydra.experimental.compose(config_name="config.yaml")
cfg=hydra.experimental.compose(config_name="config.yaml")
import module
| [
"turian@gmail.com"
] | turian@gmail.com |
882fbb1f343f673995324914119c91a1e80e22b5 | 039c5b793ace774bb815f4061a273ff098efd475 | /in_dev/send/sender/migrations/0002_documents.py | f567f03c6f4c0c553e1c78268b62e5823994e086 | [] | no_license | zzyzx4/soft | b7872a1c1e2dc91912f22aaaf96f2cedaf1423c1 | 264c399ddef2b55efd8a1a8b796320f72c6dec7c | refs/heads/master | 2022-12-16T20:50:45.512689 | 2019-07-01T11:38:12 | 2019-07-01T11:38:12 | 184,214,960 | 0 | 0 | null | 2022-12-08T05:07:18 | 2019-04-30T07:38:24 | null | UTF-8 | Python | false | false | 958 | py | # Generated by Django 2.2.1 on 2019-05-15 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sender', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Documents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Название')),
('description', models.CharField(max_length=100, verbose_name='Описание')),
('document', models.FileField(upload_to='Документы//%Y/%m/%d/%t')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Документ1',
'verbose_name_plural': 'Документы1',
},
),
]
| [
"dastik0101@gmail.com"
] | dastik0101@gmail.com |
d863aa960bd859d9f48e63cb47e8b52f47519960 | 739b531f456ef13c04e437239a08c4ffac6b49f5 | /jl/bin/digg-tool | 4440f8b121458ed4421524ca4b973cba2460a0c9 | [] | no_license | bcampbell/journalisted | e2ec3a6f48bdf0bec4e6e5245c7975c23e77f07d | 0df05a829825e67c35e2963c1a6d53db5872e203 | refs/heads/master | 2021-01-21T05:01:59.155762 | 2016-06-14T04:18:13 | 2016-06-14T04:18:13 | 306,575 | 6 | 1 | null | 2013-07-19T04:37:45 | 2009-09-14T13:00:58 | PHP | UTF-8 | Python | false | false | 2,846 | #!/usr/bin/env python2.4
# 2008-03-19 BenC Initial version
#
# Scraper which looks for references to newspaper articles
# on digg.com and loads the number of diggs, comments etc
# into our database, populating the article_commentlink table.
#
import sys
from datetime import datetime
from optparse import OptionParser
sys.path.append( "../pylib" )
from digg import *
from JL import DB,ukmedia,CommentLink
# scraperfront used to map urls to article srcids
sys.path.append( "../scraper" )
import scrapefront
APPKEY = 'http://www.scumways.com'
domains = [
'independent.co.uk',
'dailymail.co.uk',
'mailonsunday.co.uk',
'express.co.uk',
'dailyexpress.co.uk',
'guardian.co.uk',
'mirror.co.uk',
'sundaymirror.co.uk',
'telegraph.co.uk',
'scotsman.com',
'ft.com',
'theherald.co.uk',
'thesun.co.uk',
'timesonline.co.uk',
'bbc.co.uk'
]
digg = Digg(APPKEY)
def FetchFromDigg( domain, total=500 ):
"""Try and find 'numentries' stories on Digg with the given domain"""
entries = []
got = 0
while got < total:
count = total-got
if count > 100:
count = 100
errcnt = 0
while 1:
try:
stories = digg.getStories( offset=got,count=count, domain=domain )
break
except Exception,err:
if isinstance( err, KeyboardInterrupt ):
raise
errcnt += 1
if errcnt >= 3:
ukmedia.DBUG( "digg-tool: ABORTING - too many errors\n" )
raise
print >>sys.stderr, sys.exc_info()
ukmedia.DBUG( "digg-tool: Retrying... (%d)\n" % (errcnt) )
if total > int(stories.total):
total = int(stories.total)
count = int( stories.count )
got += count
ukmedia.DBUG2( "digg-tool: %s: got %d/%d\n" % (domain,got,total) )
for s in stories:
e = {
'url': s.link,
'score': s.diggs,
'num_comments': s.comments,
'comment_url': s.href,
'source': 'digg',
# 'submitted': datetime.fromtimestamp( int( s.submit_date ) ),
}
entries.append(e)
return entries
def LoadEntries( conn, entries ):
"""Load fetched digg entries into the database"""
stats = CommentLink.Stats()
c = conn.cursor()
for e in entries:
srcid = scrapefront.CalcSrcID( e['url'] )
if not srcid:
# not handled
stats.not_handled += 1
continue
e['srcid'] = srcid
if CommentLink.AddCommentLink( conn, e ):
stats.matched += 1
else:
stats.missing += 1
return stats
def DoDomain( conn, domain ):
"""Fetch digg entries for domain and try to load them into db"""
entries = FetchFromDigg( domain )
stats = LoadEntries( conn, entries )
ukmedia.DBUG( "digg-tool: %s: %s\n" %( domain,stats.Report() ) )
return stats
def main():
conn = DB.Connect()
overallstats = CommentLink.Stats()
for d in domains:
stats = DoDomain( conn, d )
overallstats.Accumulate( stats )
ukmedia.DBUG( "digg-tool: overall: %s" % (overallstats.Report()) )
if __name__ == "__main__":
main()
| [
"ben@scumways.com"
] | ben@scumways.com | |
35ada6f333d683b572c085d7576aed98123320ff | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/secdev_scapy/scapy-master/scapy/modules/queso.py | 9c38f42735e3c27a6b92186d7b3e5755b162164d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,987 | py | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Clone of queso OS fingerprinting
"""
from scapy.data import KnowledgeBase
from scapy.config import conf
from scapy.layers.inet import IP,TCP
from scapy.error import warning
from scapy.volatile import RandInt
from scapy.sendrecv import sr
#from
conf.queso_base ="/etc/queso.conf"
#################
## Queso stuff ##
#################
def quesoTCPflags(flags):
if flags == "-":
return "-"
flv = "FSRPAUXY"
v = 0
for i in flags:
v |= 2**flv.index(i)
return "%x" % v
class QuesoKnowledgeBase(KnowledgeBase):
def lazy_init(self):
try:
f = open(self.filename)
except IOError:
return
self.base = {}
p = None
try:
for l in f:
l = l.strip()
if not l or l[0] == ';':
continue
if l[0] == '*':
if p is not None:
p[""] = name
name = l[1:].strip()
p = self.base
continue
if l[0] not in list("0123456"):
continue
res = l[2:].split()
res[-1] = quesoTCPflags(res[-1])
res = " ".join(res)
if not p.has_key(res):
p[res] = {}
p = p[res]
if p is not None:
p[""] = name
except:
self.base = None
warning("Can't load queso base [%s]", self.filename)
f.close()
queso_kdb = QuesoKnowledgeBase(conf.queso_base)
def queso_sig(target, dport=80, timeout=3):
p = queso_kdb.get_base()
ret = []
for flags in ["S", "SA", "F", "FA", "SF", "P", "SEC"]:
ans, unans = sr(IP(dst=target)/TCP(dport=dport,flags=flags,seq=RandInt()),
timeout=timeout, verbose=0)
if len(ans) == 0:
rs = "- - - -"
else:
s,r = ans[0]
rs = "%i" % (r.seq != 0)
if not r.ack:
r += " 0"
elif r.ack-s.seq > 666:
rs += " R" % 0
else:
rs += " +%i" % (r.ack-s.seq)
rs += " %X" % r.window
rs += " %x" % r.payload.flags
ret.append(rs)
return ret
def queso_search(sig):
p = queso_kdb.get_base()
sig.reverse()
ret = []
try:
while sig:
s = sig.pop()
p = p[s]
if p.has_key(""):
ret.append(p[""])
except KeyError:
pass
return ret
@conf.commands.register
def queso(*args,**kargs):
"""Queso OS fingerprinting
queso(target, dport=80, timeout=3)"""
return queso_search(queso_sig(*args, **kargs))
| [
"659338505@qq.com"
] | 659338505@qq.com |
24a66109c4bda7c5668b7766a6f938bbafb68128 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/46/usersdata/74/18966/submittedfiles/funcoes1.py | d635ffd151a958560fc49a52a3187bbec41da256 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | # -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
i = 0
cont = 0
while (len(lista)-1)>=i:
if lista[i]<lista[i+1]:
cont=cont+1
i = i+1
if (len(lista)-1)==cont:
return 'S'
else:
return 'N'
def decrescente (lista1):
j = 0
cont1 = 0
while (len(lista1)-1)>=j:
if lista1[i]<lista1[j+1]:
cont1=cont1+1
j = j+1
if (len(lista1)-1)==cont1:
return 'S'
else:
return 'N'
def ciguais (lista2):
k = 0
cont2 = 0
while (len(lista2)-1)>=k:
if lista2[k]==lista2[k+1]:
cont2 = cont2+1
k = k+1
if cont2>0:
return 'S'
else:
return'N'
n = input('Digite o tamanho do vetor? ')
x = 1
y = 1
z = 1
a = []
b = []
c = []
while n>=x:
a.append(input('Digite os valores do vetor A: ')
x = x+1
while n>=y:
b.append(input('Digite os valores do vetor B: ')
y = y+1
while n>=z:
c.append(input('Digite os valores do vetor C: ')
z = z+1
crescente(a)
decrescente(a)
ciguais(a)
crescente(b)
decrescente(b)
ciguais(b)
crescente(c)
decrescente(c)
ciguais(c)
#escreva o programa principal
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
3323cd8116e2956ac0d1007bb69f9f4a201104df | a2bbd69fe69ec9a5737565b3b7325b5dcaaecf53 | /main/page/pe_add_product.py | 074f11ce24862b59e11eba0772961cbb3a95c473 | [] | no_license | SamWithWorld/selenium-2 | d945a03492548e8ee59bbb06d8c3bdb8593d8c54 | a575d7b3962a2754e69acb99cd48fe13dc62c6e5 | refs/heads/master | 2022-09-27T09:31:28.978249 | 2015-03-12T07:03:22 | 2015-03-12T07:03:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,693 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import os, time, sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'))
from base import BasePage
class addProduct(BasePage):
url = "https://www.tokopedia.com/product-add.pl"
#locators
_pname_loc = (By.ID, 'p-name')
_pdep1_loc = (By.ID, 'p-dep-1')
_pdep2_loc = (By.ID, 'p-dep-2')
_pdep3_loc = (By.ID, 'p-dep-3')
_pminorder_loc = (By.ID, 'p-min-order')
_pprice_loc = (By.ID, 'p-price')
_pweight_loc = (By.ID, 'p-weight')
_puploadto_loc = (By.ID, 'p-upload-to')
_mustinsurance_loc = (By.ID, 'must_insurance')
_pcondition_loc = (By.ID, 'p-condition')
_returnable_loc = (By.ID, 'returnable')
_pdescription_loc = (By.ID, 'p-description')
_submit_loc = (By.ID, 's-save-prod')
# dictionary
dict = {
"index_url" : "http://www.tokopedia.com/",
"email" : "tkpd.qc+18@gmail.com",
"password" : "imtokopedia91"
}
def open(self, url):
self.driver.get(url)
time.sleep(2)
def go_to_add_product(self):
self.open(self.dict['index_url'] + 'product-add.pl')
def add_to_product(self):
self.go_to_add_product()
try:
self.driver.find_element(By.ID, "p-name").send_keys("Product AB")
time.sleep(4)
self.choose_category()
self.driver.find_element(By.ID, "p-min-order").clear()
self.driver.find_element(By.ID, "p-min-order").send_keys(randint(1, 5))
self.driver.find_element(By.ID, "p-price").send_keys(randint(5000, 10000))
self.driver.find_element(By.ID, "p-weight").send_keys(randint(100, 250))
self.choose_upload_to()
self.driver.find_element(By.ID, "s-save-prod").submit()
except Exception as inst:
print(inst)
def choose_category(self):
try:
time.sleep(6)
self.driver.execute_script("document.getElementById('p-dep-1').style.display = '';")
time.sleep(6)
list_category_first = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-1']/option")
i = 0
while i < len(list_category_first):
if i == randint(0, len(list_category_first)-1):
list_category_first[i].click()
break
i += 1
time.sleep(6)
self.driver.execute_script("document.getElementById('p-dep-2').style.display = '';")
time.sleep(6)
list_category_second = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-2']/option")
i = 0
while i < len(list_category_second):
if i == randint(0, len(list_category_second)-1):
list_category_second[i].click()
break
i += 1
time.sleep(6)
self.driver.execute_script("document.getElementById('p-dep-3').style.display = '';")
time.sleep(6)
list_category_third = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-3']/option")
i = 0
while i < len(list_category_third):
if i == randint(0, len(list_category_third)-1):
list_category_third[i].click()
break
i += 1
except Exception as inst:
print(inst)
def choose_upload_to(self):
try:
time.sleep(6)
self.driver.execute_script("document.getElementById('p-upload-to').style.display = '';")
wait = WebDriverWait(self.driver, 10)
element = wait.until(EC.element_to_be_clickable((By.ID,'p-upload-to')))
time.sleep(6)
list_upload_to = self.driver.find_elements(By.XPATH, "//select[@id='p-upload-to']/option")
list_upload_to[0].click()
time.sleep(6)
self.driver.execute_script("document.getElementById('p-menu-id').style.display = '';")
time.sleep(6)
list_etalase = self.driver.find_elements(By.XPATH, "//select[@id='p-menu-id']/option")
i = 0
while i < len(list_etalase):
if i == randint(0, len(list_etalase)-1):
list_etalase[i].click()
break
i += 1
except Exception as inst:
print(inst) | [
"herman.wahyudi02@gmail.com"
] | herman.wahyudi02@gmail.com |
4b4679abcadd364adbc3b56bf6980cb1b8789d12 | 0a037e4ee03c5afbf6f58b7293fefab1cc6998cf | /project_2/RollingDice.py | 2c4e7dc005788ebed16ca65fa51fc394a5f1cded | [] | no_license | mingyyy/crash_course | 6ac2a41b14c821e96e3938047cb056ad2ce99280 | dad9f9b37ef3093dad25a0cb7fddf0e65fed3571 | refs/heads/master | 2020-04-24T14:24:43.283617 | 2019-12-25T07:43:05 | 2019-12-25T07:43:05 | 172,019,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | from random import randint
import pygal
class Die():
def __init__(self, num_sides=6):
# dice of 6 sides
self.num_sides = num_sides
def roll(self):
# return a random value between 1 and the number of sides
return randint(1, self.num_sides)
n = 10000
m = 2
d1 = 8
d2 = 8
die1 = Die(d1)
die2 = Die(d2)
results = []
for roll_num in range(n):
results.append(die1.roll() + die2.roll())
# print(results)
freq = []
for value in range(2, die1.num_sides + die2.num_sides + 1):
freq.append(results.count(value))
# print(freq)
# visualize the results
hist = pygal.Bar()
hist.title = f"Results of rolling two D{die1.num_sides} {n} times"
# 15-6, list comprehension
hist.x_labels = [i for i in range(1*m, d1+d2+1)]
hist.x_title = "Results"
hist.y_title = "Frequency of Result"
hist.add(f'D{d1} + D{d2}', freq)
# save to the current folder, open the svg with a browser
hist.render_to_file(f'dice_visual_{m}{d1}{d2}.svg')
| [
"j.yanming@gmail.com"
] | j.yanming@gmail.com |
fbe2fb38035c098f1729f5fc9c642d658ef7bf9e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/signal/testcase/firstcases/testcase4_004.py | e8b43073fbae46b2b65501337b4b3f9efaa91db6 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.thoughtcrime.securesms',
'appActivity' : 'org.thoughtcrime.securesms.ConversationListActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.thoughtcrime.securesms/org.thoughtcrime.securesms.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"R322\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.thoughtcrime.securesms:id/sms_failed_indicator\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"4 min\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"R322\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().resourceId(\"org.thoughtcrime.securesms:id/contact_photo_image\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.thoughtcrime.securesms'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
fa8c20fc650e966a7c439fcf78f72ccfa51bcfd0 | 6669b132eb482f95c1f40d35ecae14a544fe9197 | /tree/no872.py | 2b4c2ee97838a1e8a4aef4db93b4708475064212 | [] | no_license | markdannel/leetcode | 94dade2e5a286d04075e70e48015459ea6ac383a | 6a2ac436599ecebc527efe0d6bfe0f6f825311fb | refs/heads/master | 2021-06-06T20:56:34.868122 | 2020-10-21T12:16:56 | 2020-10-21T12:16:56 | 140,668,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | # 请考虑一颗二叉树上所有的叶子,这些叶子的值按从左到右的顺序排列形成一个 叶值序列 。
# 3
# / \
# 5 1
# / \ / \
# 6 2 9 8
# / \
# 7 4
# 举个例子,如上图所示,给定一颗叶值序列为 (6, 7, 4, 9, 8) 的树。
# 如果有两颗二叉树的叶值序列是相同,那么我们就认为它们是 叶相似 的。
# 如果给定的两个头结点分别为 root1 和 root2 的树是叶相似的,则返回 true;否则返回 false 。
# 提示:
# 给定的两颗树可能会有 1 到 200 个结点。
# 给定的两颗树上的值介于 0 到 200 之间。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def fetchLeafNode(node):
if not node:
return []
res = []
res += fetchLeafNode(node.left)
if not node.left and not node.right:
res.append(node.val)
res += fetchLeafNode(node.right)
return res
return fetchLeafNode(root1) == fetchLeafNode(root2) | [
"wistion@foxmail.com"
] | wistion@foxmail.com |
2c79ac5a234ab0c1909ec5ed4fb2e050dfc7c112 | 9c58a1f594e18cee20128f2c8dad8257429b10d1 | /custom_business_reports/report/mapm_pbl_sales.py | d00089e3779c08973a081d7404eb9b5b448fc350 | [] | no_license | gastonfeng/Odoo-eBay-Amazon | e8919768b2a1500209f209ee3aecc7f2fb10cda7 | a9c4a8a7548b19027bc0fd904f8ae9249248a293 | refs/heads/master | 2022-04-05T00:23:50.483430 | 2020-02-19T04:58:56 | 2020-02-19T04:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | # -*- coding: utf-8 -*-
from odoo import tools
from odoo import api, fields, models
class MAMPPBLSales(models.Model):
_name = "mapm.pbl.sales.report"
_description = "mapm.pbl.sales.report"
_auto = False
_rec_name = 'date'
_order = 'date desc'
order_id = fields.Many2one('sale.order', 'Order', readonly=True)
item_id = fields.Char('Item', readonly=True)
name = fields.Char('LAD', readonly=True)
price_total = fields.Float('Price total', readonly=True)
date = fields.Datetime('Date Order', readonly=True)
@api.model_cr
def init(self):
# self._table = sale_report
tools.drop_view_if_exists(self.env.cr, self._table)
qry = """CREATE or REPLACE VIEW mapm_pbl_sales_report as (
SELECT row_number() OVER () AS id, sol.order_id as order_id, so.date_order - '4 hour'::interval as date,
sol.item_id, sol.name as name, sol.price_total as price_total
FROM public.sale_order_line sol
LEFT JOIN sale_order so ON sol.order_id = so.id
WHERE so.state IN ('sale','done') AND sol.item_id LIKE 'MAPM-PBL-%'
ORDER BY order_id, item_id
)"""
self.env.cr.execute(qry)
| [
"yjm@mail.ru"
] | yjm@mail.ru |
d3a937c8bafdeeaad095658e1450109e72bcd7bc | 62530422360aa0cb294cb208cbc7d21d282b18b5 | /test-1b/input_utils.py | 830a1d3799cca92271006521d459574d5937f330 | [] | no_license | ag8/capsule-b | d560d75d6204a41f4c8526a84fbdae614c9e47ff | fb3bdc9ebb66890fc3f6d06fd6d8e3335ae882f9 | refs/heads/master | 2021-04-15T08:35:27.283053 | 2018-04-17T05:55:08 | 2018-04-17T05:55:08 | 126,885,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,061 | py | import os, time
# import scipy
import numpy as np
import tensorflow as tf
import collections
from config import cfg
def load_mnist(path=cfg.dataset):
fd = open(os.path.join(path, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float)
fd = open(os.path.join(path, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.int32)
fd = open(os.path.join(path, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28, 28, 1)).astype(np.float)
fd = open(os.path.join(path, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.int32)
# normalize to 0 1 float
trX = trX / 255.
teX = teX / 255.
return trX, trY, teX, teY
def load_mmnist(path, samples_tr=200000, samples_te=10000):
mnist = {}
# train images
trX = np.fromfile(file=os.path.join(path, 'trX'), dtype=np.uint8)
mnist["trX"] = trX.reshape([samples_tr, 36, 36, 1]).astype(np.float32) / 255.
# test images
te0X = np.fromfile(file=os.path.join(path, 'te0X'), dtype=np.uint8)
mnist["te0X"] = te0X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te1X = np.fromfile(file=os.path.join(path, 'te1X'), dtype=np.uint8)
mnist["te1X"] = te1X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te2X = np.fromfile(file=os.path.join(path, 'te2X'), dtype=np.uint8)
mnist["te2X"] = te2X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te3X = np.fromfile(file=os.path.join(path, 'te3X'), dtype=np.uint8)
mnist["te3X"] = te3X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te4X = np.fromfile(file=os.path.join(path, 'te4X'), dtype=np.uint8)
mnist["te4X"] = te4X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te5X = np.fromfile(file=os.path.join(path, 'te5X'), dtype=np.uint8)
mnist["te5X"] = te5X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te6X = np.fromfile(file=os.path.join(path, 'te6X'), dtype=np.uint8)
mnist["te6X"] = te6X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te7X = np.fromfile(file=os.path.join(path, 'te7X'), dtype=np.uint8)
mnist["te7X"] = te7X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
te8X = np.fromfile(file=os.path.join(path, 'te8X'), dtype=np.uint8)
mnist["te8X"] = te8X.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR30 = np.fromfile(file=os.path.join(path, 'teR30X'), dtype=np.uint8)
mnist["teR30X"] = teR30.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR60 = np.fromfile(file=os.path.join(path, 'teR60X'), dtype=np.uint8)
mnist["teR60X"] = teR60.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR30R = np.fromfile(file=os.path.join(path, 'teR30RX'), dtype=np.uint8)
mnist["teR30RX"] = teR30R.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
teR60R = np.fromfile(file=os.path.join(path, 'teR60RX'), dtype=np.uint8)
mnist["teR60RX"] = teR60R.reshape([samples_te, 36, 36, 1]).astype(np.float32) / 255.
# train labels
trY = np.fromfile(file=os.path.join(path, 'trY'), dtype=np.int32)
mnist["trY"] = trY.reshape([samples_tr, 2])
# test labels
te0Y = np.fromfile(file=os.path.join(path, 'te0Y'), dtype=np.int32)
mnist["te0Y"] = te0Y.reshape([samples_te, 2])
te1Y = np.fromfile(file=os.path.join(path, 'te1Y'), dtype=np.int32)
mnist["te1Y"] = te1Y.reshape([samples_te, 2])
te2Y = np.fromfile(file=os.path.join(path, 'te2Y'), dtype=np.int32)
mnist["te2Y"] = te2Y.reshape([samples_te, 2])
te3Y = np.fromfile(file=os.path.join(path, 'te3Y'), dtype=np.int32)
mnist["te3Y"] = te3Y.reshape([samples_te, 2])
te4Y = np.fromfile(file=os.path.join(path, 'te4Y'), dtype=np.int32)
mnist["te4Y"] = te4Y.reshape([samples_te, 2])
te5Y = np.fromfile(file=os.path.join(path, 'te5Y'), dtype=np.int32)
mnist["te5Y"] = te5Y.reshape([samples_te, 2])
te6Y = np.fromfile(file=os.path.join(path, 'te6Y'), dtype=np.int32)
mnist["te6Y"] = te6Y.reshape([samples_te, 2])
te7Y = np.fromfile(file=os.path.join(path, 'te7Y'), dtype=np.int32)
mnist["te7Y"] = te7Y.reshape([samples_te, 2])
te8Y = np.fromfile(file=os.path.join(path, 'te8Y'), dtype=np.int32)
mnist["te8Y"] = te8Y.reshape([samples_te, 2])
teR30 = np.fromfile(file=os.path.join(path, 'teR30Y'), dtype=np.int32)
mnist["teR30Y"] = teR30.reshape([samples_te, 2])
teR60 = np.fromfile(file=os.path.join(path, 'teR60Y'), dtype=np.int32)
mnist["teR60Y"] = teR60.reshape([samples_te, 2])
teR30R = np.fromfile(file=os.path.join(path, 'teR30RY'), dtype=np.int32)
mnist["teR30RY"] = teR30R.reshape([samples_te, 2])
teR60R = np.fromfile(file=os.path.join(path, 'teR60RY'), dtype=np.int32)
mnist["teR60RY"] = teR60R.reshape([samples_te, 2])
return mnist
| [
"andrew2000g@gmail.com"
] | andrew2000g@gmail.com |
dd6825da044bffc2f8a198b2c0760f36a3143b3c | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil591.py | 331ab5964b4d9cb39ebddab5d46ce4593416f25a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | # qubit number=4
# total number=16
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += X(1) # number=8
prog += CNOT(0,1) # number=10
prog += X(1) # number=11
prog += H(1) # number=13
prog += CZ(0,1) # number=14
prog += H(1) # number=15
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil591.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
373c8a72664191552bc59b1eb1c3eda9b042f144 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_suntan.py | f7092f8f48d34221b8bcda80a4bb30ba17516561 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py |
#calss header
class _SUNTAN():
def __init__(self,):
self.name = "SUNTAN"
self.definitions = [u'pleasantly brown skin caused by being in hot sun: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
971a4fe1130c67c67676450d48c44d9c0423faff | ae12996324ff89489ded4c10163f7ff9919d080b | /LeetCodePython/LargestColorValueInaDirectedGraph.py | c4c26114bd04fca86a8152f91f6b3614eb7fbe72 | [] | no_license | DeanHe/Practice | 31f1f2522f3e7a35dc57f6c1ae74487ad044e2df | 3230cda09ad345f71bb1537cb66124ec051de3a5 | refs/heads/master | 2023-07-05T20:31:33.033409 | 2023-07-01T18:02:32 | 2023-07-01T18:02:32 | 149,399,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | """
There is a directed graph of n colored nodes and m edges. The nodes are numbered from 0 to n - 1.
You are given a string colors where colors[i] is a lowercase English letter representing the color of the ith node in this graph (0-indexed). You are also given a 2D array edges where edges[j] = [aj, bj] indicates that there is a directed edge from node aj to node bj.
A valid path in the graph is a sequence of nodes x1 -> x2 -> x3 -> ... -> xk such that there is a directed edge from xi to xi+1 for every 1 <= i < k. The color value of the path is the number of nodes that are colored the most frequently occurring color along that path.
Return the largest color value of any valid path in the given graph, or -1 if the graph contains a cycle.
Example 1:
Input: colors = "abaca", edges = [[0,1],[0,2],[2,3],[3,4]]
Output: 3
Explanation: The path 0 -> 2 -> 3 -> 4 contains 3 nodes that are colored "a" (red in the above image).
Example 2:
Input: colors = "a", edges = [[0,0]]
Output: -1
Explanation: There is a cycle from 0 to 0.
Constraints:
n == colors.length
m == edges.length
1 <= n <= 10^5
0 <= m <= 10^5
colors consists of lowercase English letters.
0 <= aj, bj < n
hints:
1 Use topological sort.
2 let dp[u][c] := the maximum count of vertices with color c of any path starting from vertex u.
"""
from collections import defaultdict, deque
from typing import List
class LargestColorValueInaDirectedGraph:
def largestPathValue(self, colors: str, edges: List[List[int]]) -> int:
res = visited = 0
n = len(colors)
dp = [[0] * 26 for _ in range(n)]
in_deg = defaultdict(int)
graph = defaultdict(list)
for s, e in edges:
graph[s].append(e)
in_deg[e] += 1
q = deque()
for i in range(n):
if in_deg[i] == 0:
q.append(i)
while q:
cur = q.popleft()
color = ord(colors[cur]) - ord('a')
print(color)
dp[cur][color] += 1
res = max(res, dp[cur][color])
visited += 1
for nb in graph[cur]:
for nb_color in range(26):
dp[nb][nb_color] = max(dp[nb][nb_color], dp[cur][nb_color])
in_deg[nb] -= 1
if in_deg[nb] == 0:
q.append(nb)
return res if visited == n else -1
| [
"tengda.he@gmail.com"
] | tengda.he@gmail.com |
f182b0390f018af7205452f0f5f35f9e85f0130c | 625daac7e73b98935f9fe93e647eb809b48b712e | /Challenges/checkEqualFrequency.py | beb465b5ca868a5cbcf16ba1eb2e11564169cb94 | [] | no_license | aleksaa01/codefights-codesignal | 19b2d70779cc60f62511b6f88ae5d049451eac82 | a57a5589ab2c9d9580ef44900ea986c826b23051 | refs/heads/master | 2022-03-15T04:46:40.356440 | 2019-12-08T15:41:37 | 2019-12-08T15:41:37 | 112,034,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
Given array of integers, check whether each integer, that occurs in it,
is contained there the same number of times as any other integer from the
given array.
"""
def checkEqualFrequency(inputArray):
if len(inputArray) > 40000:
return True
count = inputArray.count(inputArray[0])
for i in set(inputArray):
if inputArray.count(i) != count:
return False
return True
| [
"some12curious@gmail.com"
] | some12curious@gmail.com |
9e046b04afc103d5c8c01bdaa9d96e966cffef3f | dbbdf35bff726681ae34ad08eeda5f30929e2ae9 | /math/0x00-linear_algebra/12-bracin_the_elements.py | 22bcbb1c3b72f1ab9f9c4b303ab98863b5e86b99 | [] | no_license | jorgezafra94/holbertonschool-machine_learning | 0b7f61c954e5d64b1f91ec14c261527712243e98 | 8ad4c2594ff78b345dbd92e9d54d2a143ac4071a | refs/heads/master | 2023-02-03T20:19:36.544390 | 2020-12-21T21:49:10 | 2020-12-21T21:49:10 | 255,323,504 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/env python3
"""
using wise-element operations
"""
def np_elementwise(mat1, mat2):
"""
using Numpy wise-element operators
Methods of Numpy arrays or matrices
suma = np.add(mat1, mat2)
resta = np.subtract(mat1, mat2)
multi = np.multiply(mat1, mat2)
div = np.divide(mat1, mat2)
"""
suma = mat1 + mat2
resta = mat1 - mat2
multi = mat1 * mat2
div = mat1 / mat2
return (suma, resta, multi, div)
| [
"947@holbertonschool.com"
] | 947@holbertonschool.com |
55a62710c495ee1c00662cc4bad352b248617cd1 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/servicebus/azure-servicebus/azure/servicebus/aio/management/_utils.py | 0660b05a3a050fa3f60899b59ea31785903c1750 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 6,533 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import cast
from xml.etree.ElementTree import ElementTree
import urllib.parse as urlparse
from ...management import _constants as constants
from ...management._handle_response_error import _handle_response_error
# This module defines functions get_next_template and extract_data_template.
# Application code uses functools.partial to substantialize their params and builds an
# azure.core.async_paging.AsyncItemPaged instance with the two substantialized functions.
# The following is an ATOM feed XML list of QueueDescription with page size = 2.
# Tag <feed> has 2 (the page size) children <entry> tags.
# Tag <link rel="next" .../> tells the link to the next page.
# The whole XML will be deserialized into an XML ElementTree.
# Then model class QueueDescriptionFeed deserializes the ElementTree into a QueueDescriptionFeed instance.
# (QueueDescriptionFeed is defined in file ../../management/_generated/models/_models.py and _models_py3.py)
# Function get_next_template gets the next page of XML data like this one and returns the ElementTree.
# Function extract_data_template deserialize data from the ElementTree and provide link to the next page.
# azure.core.async_paging.AsyncItemPaged orchestrates the data flow between them.
# <feed xmlns="http://www.w3.org/2005/Atom">
# <title type="text">Queues</title>
# <id>https://servicebusname.servicebus.windows.net/$Resources/queues?$skip=0&$top=2&api-version=2017-04</id>
# <updated>2020-06-30T23:49:41Z</updated>
# <link rel="self" href="https://servicebusname.servicebus.windows.net/$Resources/queues?
# $skip=0&$top=2&api-version=2017-04"/>
# <link rel="next" href="https://servicebusname.servicebus.windows.net/$Resources/queues?
# %24skip=2&%24top=2&api-version=2017-04"/>
#
# <entry xml:base="https://servicebusname.servicebus.windows.net/$Resources/queues?
# $skip=0&$top=2&api-version=2017-04">
# <id>https://servicebusname.servicebus.windows.net/5?api-version=2017-04</id>
# <title type="text">5</title>
# <published>2020-06-05T00:24:34Z</published>
# <updated>2020-06-25T05:57:29Z</updated>
# <author>
# <name>servicebusname</name>
# </author>
# <link rel="self" href="../5?api-version=2017-04"/>
# <content type="application/xml">
# <QueueDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
# xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
# ...
# </QueueDescription>
# </content>
# </entry>
# <entry xml:base="https://servicebusname.servicebus.windows.net/$Resources/queues?
# $skip=0&$top=2&api-version=2017-04">
# <id>https://servicebusname.servicebus.windows.net/6?api-version=2017-04</id>
# <title type="text">6</title>
# <published>2020-06-15T19:49:35Z</published>
# <updated>2020-06-15T19:49:35Z</updated>
# <author>
# <name>servicebusname</name>
# </author>
# <link rel="self" href="../6?api-version=2017-04"/>
# <content type="application/xml">
# <QueueDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
# xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
# ...
# </QueueDescription>
# </content>
# </entry>
# </feed>
async def extract_data_template(feed_class, convert, feed_element):
"""A function that will be partialized to build a function used by AsyncItemPaged.
It deserializes the ElementTree returned from function `get_next_template`, returns data in an iterator and
the link to next page.
azure.core.async_paging.AsyncItemPaged will use the returned next page to call a partial function created
from `get_next_template` to fetch data of next page.
"""
deserialized = feed_class.deserialize(feed_element)
list_of_qd = [convert(x) if convert else x for x in deserialized.entry]
next_link = None
# when the response xml has two <link> tags, the 2nd if the next-page link.
if deserialized.link and len(deserialized.link) == 2:
next_link = deserialized.link[1].href
return next_link, iter(
list_of_qd
) # when next_page is None, AsyncPagedItem will stop fetch next page data.
async def extract_rule_data_template(feed_class, convert, feed_element):
"""Special version of function extrat_data_template for Rule.
Pass both the XML entry element and the rule instance to function `convert`. Rule needs to extract
KeyValue from XML Element and set to Rule model instance manually. The autorest/msrest serialization/deserialization
doesn't work for this special part.
After autorest is enhanced, this method can be removed.
Refer to autorest issue https://github.com/Azure/autorest/issues/3535
"""
deserialized = feed_class.deserialize(feed_element)
next_link = None
if deserialized.link and len(deserialized.link) == 2:
next_link = deserialized.link[1].href
if deserialized.entry:
list_of_entities = [
convert(*x) if convert else x
for x in zip(
feed_element.findall(constants.ATOM_ENTRY_TAG), deserialized.entry
)
]
else:
list_of_entities = []
return next_link, iter(list_of_entities)
async def get_next_template(
list_func, *args, start_index=0, max_page_size=100, **kwargs
):
"""Call list_func to get the XML data and deserialize it to XML ElementTree.
azure.core.async_paging.AsyncItemPaged will call `extract_data_template` and use the returned
XML ElementTree to call a partial function created from `extrat_data_template`.
"""
api_version = constants.API_VERSION
if args[0]: # It's next link. It's None for the first page.
queries = urlparse.parse_qs(urlparse.urlparse(args[0]).query)
start_index = int(queries[constants.LIST_OP_SKIP][0])
max_page_size = int(queries[constants.LIST_OP_TOP][0])
api_version = queries[constants.API_VERSION_PARAM_NAME][0]
with _handle_response_error():
feed_element = cast(
ElementTree,
await list_func(
skip=start_index, top=max_page_size, api_version=api_version, **kwargs
),
)
return feed_element
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
90cda9b891ed7f996babd72cf4f9b5c8bf58a64a | 87003211b07881fa747fe4fca0aa07f437f7b553 | /savanna/openstack/common/rpc/service.py | e9610c88090594de826ab67f1864a9421a96a933 | [
"Apache-2.0"
] | permissive | lookmee/savanna | 1215492c48173ec8e7423edb2896fcd2b7cbfa83 | 33cbdf6ef01e07fabe63bdbefb949012a9aadada | refs/heads/master | 2020-12-31T02:14:07.151864 | 2013-11-15T17:43:26 | 2013-11-15T17:43:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from savanna.openstack.common.gettextutils import _ # noqa
from savanna.openstack.common import log as logging
from savanna.openstack.common import rpc
from savanna.openstack.common.rpc import dispatcher as rpc_dispatcher
from savanna.openstack.common import service
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
self.serializer)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, dispatcher, fanout=False)
self.conn.create_consumer(self.topic, dispatcher, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
| [
"slukjanov@mirantis.com"
] | slukjanov@mirantis.com |
5871f2151c438d680e6b7ec9fa1d2014af3d58a4 | f98a1d31ab3e82be724a03de9e468d07a7c65b5e | /medium/sortColors.py | bc4a523cdf4809171ed15c02d6a1f825553ef29e | [] | no_license | chaochaocodes/leetcode | bd618973483a88d2aa1d9ba3d1463e8d152877d4 | 087b4780e7c95fc780afd3266129c4975c68b321 | refs/heads/master | 2023-06-05T06:04:38.863389 | 2021-06-17T04:07:49 | 2021-06-17T04:07:49 | 294,852,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | '''
https://leetcode.com/problems/sort-colors/
Given an array nums with n objects colored red, white, or blue, sort them in-place so that objects of the same color are adjacent, with the colors in the order red, white, and blue.
We will use the integers 0, 1, and 2 to represent the color red, white, and blue, respectively.
'''
nums1 = [2,0,2,1,1,0]
# Output: [0,0,1,1,2,2]
nums2 = [2,0,1]
# Output: [0,1,2]
nums3 = [0]
# Output: [0]
nums4 = [1]
# Output: [1]
# Approach 1: 3-Pointer approach with Python swap
# "Python swap" unpacks tuple with comma operator and accesses elements in constant time
# One pass, O(n) time, O(1) space
def sortColors(nums):
"""
Do not return anything, modify nums in-place instead.
"""
runner = 0
left = 0
right = len(nums) - 1
while runner <= right:
if nums[runner] == 0:
nums[runner], nums[left] = nums[left], nums[runner]
runner += 1
left += 1
elif nums[runner] == 1:
runner += 1
else:
nums[runner], nums[right] = nums[right], nums[runner]
right -= 1
# print('END. runner: {}, l: {}, r: {}, nums: {}'.format(runner, left, right, nums))
return nums
# Approach 2: Python copy with slice syntax [:] to sort in-place
# One pass, O(n) time, O(1) space. Less efficient than 3-pointer because
# - slicing lists copies the references which costs you overhead memory.
# - concatenating two lists creates a new list in memory, complexity O(n+m)
def sortColors(nums):
count0, count1, count2 = 0, 0, 0
for i in nums:
if i == 0:
count0 += 1
elif i == 1:
count1 += 1
elif i == 2:
count2 += 1
nums[:] = [0]*count0 + [1]*count1 + [2]*count2
return nums
sortColors(nums1) | [
"57464564+chaochaocodes@users.noreply.github.com"
] | 57464564+chaochaocodes@users.noreply.github.com |
b143cf79d367b8b2843fc4dc1106a8f70c8df756 | 33c4bc9ca463ce0ec61945fca5841c9d8a18ab8e | /thrift/compiler/test/fixtures/qualified/gen-py3/module1/types.pyi | 3526c5f207a7649315a2d163c2aee5a44aaa8fc0 | [
"Apache-2.0"
] | permissive | gaurav1086/fbthrift | d54bb343bf1a8503dd329fbfcd0b46fe9f70754c | 68d1a8790bfd5b3974e1b966c8071f9c456b6c6a | refs/heads/master | 2020-12-27T22:41:09.452839 | 2020-02-03T23:56:20 | 2020-02-03T23:58:33 | 238,088,855 | 0 | 0 | Apache-2.0 | 2020-02-04T00:13:04 | 2020-02-04T00:13:03 | null | UTF-8 | Python | false | false | 2,465 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as __iobuf
import thrift.py3.types
import thrift.py3.exceptions
from thrift.py3.types import NOTSET, NOTSETTYPE
import typing as _typing
import sys
import itertools
__property__ = property
class Enum(thrift.py3.types.Enum):
ONE: Enum = ...
TWO: Enum = ...
THREE: Enum = ...
class Struct(thrift.py3.types.Struct, _typing.Hashable, _typing.Iterable[_typing.Tuple[str, _typing.Any]]):
def __init__(
self, *,
first: _typing.Optional[int]=None,
second: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
first: _typing.Union[int, NOTSETTYPE, None]=NOTSET,
second: _typing.Union[str, NOTSETTYPE, None]=NOTSET
) -> Struct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['Struct'], bytes]]: ...
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
def __bool__(self) -> bool: ...
def __hash__(self) -> int: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'Struct') -> bool: ...
def __gt__(self, other: 'Struct') -> bool: ...
def __le__(self, other: 'Struct') -> bool: ...
def __ge__(self, other: 'Struct') -> bool: ...
@__property__
def first(self) -> int: ...
@__property__
def second(self) -> str: ...
_List__EnumT = _typing.TypeVar('_List__EnumT', bound=_typing.Sequence[Enum])
class List__Enum(_typing.Sequence[Enum], _typing.Hashable):
def __init__(self, items: _typing.Sequence[Enum]=None) -> None: ...
def __repr__(self) -> str: ...
def __len__(self) -> int: ...
def __hash__(self) -> int: ...
def __contains__(self, x: object) -> bool: ...
def __copy__(self) -> _typing.Sequence[Enum]: ...
@_typing.overload
def __getitem__(self, i: int) -> Enum: ...
@_typing.overload
def __getitem__(self, s: slice) -> _typing.Sequence[Enum]: ...
def count(self, item: _typing.Any) -> int: ...
def index(self, item: _typing.Any, start: int = ..., stop: int = ...) -> int: ...
def __add__(self, other: _typing.Sequence[Enum]) -> 'List__Enum': ...
def __radd__(self, other: _List__EnumT) -> _List__EnumT: ...
def __reversed__(self) -> _typing.Iterator[Enum]: ...
def __iter__(self) -> _typing.Iterator[Enum]: ...
c1: Struct = ...
e1s: List__Enum = ...
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
04f783b4a4df38fece8fdcee10e4b9afc7e09de2 | 0ec046d7ad5b66bc14d5afaac178466f9f8e7073 | /config.py | da9665e8b3c9ed60bece06604c8618383058e7b7 | [] | no_license | MarsStirner/vesta | b7e7b9da9b6028acf1ea0cd7d6088037e95fef93 | 891b26ddfddfaebe145cf4c4a220fdb8c9f74fe0 | refs/heads/master | 2020-12-03T00:34:12.420995 | 2014-10-01T08:21:37 | 2014-10-01T08:21:37 | 96,043,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # -*- coding: utf-8 -*-
DEBUG = False
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 5000
SYSTEM_USER = 'vesta'
MODULE_NAME = 'vesta'
WTF_CSRF_ENABLED = True
SECRET_KEY = ''
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_USER = 'vesta_user'
MONGODB_PASSWORD = 'vesta_pwd'
MONGODB_DB = 'vesta'
SIMPLELOGS_URL = 'http://127.0.0.1:8080'
NSI_SOAP = 'http://nsi.rosminzdrav.ru/wsdl/SOAP-server.v2.php?wsdl'
NSI_TOKEN = ''
try:
from config_local import *
except ImportError:
# no local config found
pass
MONGODB_CONNECT_URI = 'mongodb://{user}:{password}@{host}/{database}'.format(user=MONGODB_USER,
password=MONGODB_PASSWORD,
host=MONGODB_HOST,
port=MONGODB_PORT,
database=MONGODB_DB) | [
"santipov@korusconsulting.ru"
] | santipov@korusconsulting.ru |
e675565a6db7627396edece81fd2bcaafcf387f4 | 8efb4caeafe2cfb024827ce194b5abae6fdfc9a4 | /test/functional/test_framework/siphash.py | 62d02f18535edfc6f28097327c5c332948720b6d | [
"MIT"
] | permissive | Worldcoin-Network/worldcoin | cd8ac9631154666cb11603d5f07e3a9dc2e1653a | 4f14d8baadda3f46363c26dc327a68b33f14e28c | refs/heads/master | 2022-03-04T01:50:14.783972 | 2021-10-26T15:21:47 | 2021-10-26T15:21:47 | 156,328,955 | 15 | 9 | MIT | 2021-05-10T16:58:07 | 2018-11-06T05:08:32 | C++ | UTF-8 | Python | false | false | 2,016 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Worldcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Specialized SipHash-2-4 implementations.
This implements SipHash-2-4 for 256-bit integers.
"""
def rotl64(n, b):
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
def siphash_round(v0, v1, v2, v3):
v0 = (v0 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 13)
v1 ^= v0
v0 = rotl64(v0, 32)
v2 = (v2 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 16)
v3 ^= v2
v0 = (v0 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 21)
v3 ^= v0
v2 = (v2 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 17)
v1 ^= v2
v2 = rotl64(v2, 32)
return (v0, v1, v2, v3)
def siphash256(k0, k1, h):
n0 = h & ((1 << 64) - 1)
n1 = (h >> 64) & ((1 << 64) - 1)
n2 = (h >> 128) & ((1 << 64) - 1)
n3 = (h >> 192) & ((1 << 64) - 1)
v0 = 0x736f6d6570736575 ^ k0
v1 = 0x646f72616e646f6d ^ k1
v2 = 0x6c7967656e657261 ^ k0
v3 = 0x7465646279746573 ^ k1 ^ n0
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n0
v3 ^= n1
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n1
v3 ^= n2
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n2
v3 ^= n3
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n3
v3 ^= 0x2000000000000000
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= 0x2000000000000000
v2 ^= 0xFF
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
return v0 ^ v1 ^ v2 ^ v3
| [
"quentin.neveu@hotmail.ca"
] | quentin.neveu@hotmail.ca |
05ec9e42a6699d7ed705714b7814116855fc61ac | a64d7e2814c296db3157f841f17ea73169d54405 | /minmarkets/models.py | d50f27284d6342b43dec2b0d053731836029e009 | [] | no_license | andrewseft/amjumfb | ad8ea0dc091355437cbf0984983550fe149300a5 | 6c027f47e320b7ad9c7e27d635335c9b260216a7 | refs/heads/master | 2023-05-04T23:19:07.942766 | 2021-05-25T15:39:16 | 2021-05-25T15:39:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | from cloudinary.models import CloudinaryField
from django.db import models
# Create your models here.
from accounts.models import Profile, upload_image_path
class LoanPackage(models.Model):
name = models.CharField(max_length=300, blank=True, null=True)
price = models.IntegerField(default=3000)
premium_package = models.BooleanField(default=True)
package_owner = models.CharField(max_length=300)
description = models.TextField()
product_code = models.CharField(null=True, blank=True, max_length=10)
image = CloudinaryField(upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.name
def image_tag(self):
from django.utils.html import mark_safe
return mark_safe('<img src="%s" width="150" height="200" />' % self.image.url)
image_tag.short_description = 'Package Image'
image_tag.allow_tags = True
class LoanCalculators(models.Model):
name = models.CharField(max_length=300, blank=True, null=True)
price = models.IntegerField(default=3000)
premium_package = models.BooleanField(default=True)
package_owner = models.CharField(max_length=300)
description = models.TextField()
file = models.CharField(max_length=300, blank=True, null=True, help_text="download link here!")
product_code = models.CharField(null=True, blank=True, max_length=10)
image = CloudinaryField(upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name = 'Loan Calculator'
verbose_name_plural = 'Loan Calculators'
def __str__(self):
return self.name
def image_tag(self):
from django.utils.html import mark_safe
return mark_safe('<img src="%s" width="100" height="100" />' % self.image.url)
image_tag.short_description = 'Package Image'
image_tag.allow_tags = True
class LoanCollectionPackage(models.Model):
name = models.CharField(max_length=300, blank=True, null=True)
price = models.IntegerField(default=3000)
premium_package = models.BooleanField(default=True)
package_owner = models.CharField(max_length=300)
description = models.TextField()
product_code = models.CharField(null=True, blank=True, max_length=10)
image = CloudinaryField(upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.name
def image_tag(self):
from django.utils.html import mark_safe
return mark_safe('<img src="%s" width="150" height="200" />' % self.image.url)
image_tag.short_description = 'Package Image'
image_tag.allow_tags = True
| [
"mathegeniuse@gmail.com"
] | mathegeniuse@gmail.com |
0dffae8f000cbe0ea6a09206c93f17ba9c7e5ea7 | b6475b69ae89f5a2ffb3c03c21d747bc6fddbdd2 | /user/migrations/0002_auto_20201202_1712.py | cb7d7046b236d287d3782a1113ddd610830b1b72 | [] | no_license | LeeSuHa98/14-2nd-SHABANG-backend | 3718516abc1a423da7e97d9363c61bfc7dd5ec4f | 13cc50c80aca273277bae8d8b15a1623b860ce55 | refs/heads/main | 2023-02-18T05:57:27.863525 | 2021-01-19T04:47:20 | 2021-01-19T04:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # Generated by Django 3.1.3 on 2020-12-02 17:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='number',
new_name='phone_number',
),
]
| [
"fergith@naver.com"
] | fergith@naver.com |
11a63de740fb4d5f7772abdb589d20dc2321c2ae | 2a6f1afa7678e5d76efe01b1474eda59d442ae0f | /venv/Lib/site-packages/jesse/indicators/vwma.py | 3d884a9f64880ba84a80dbc72e1b1bce906a4afd | [] | no_license | cagridincel/CagriTrade | 6b50c785efc3eb43487724be59511a5850a92145 | 86839e6604eb18850f6410acf5f6993da59b74ec | refs/heads/master | 2023-03-03T09:16:29.965177 | 2021-02-16T13:01:18 | 2021-02-16T13:01:18 | 338,672,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | from typing import Union
import numpy as np
import tulipy as ti
from jesse.helpers import get_candle_source
def vwma(candles: np.ndarray, period=20, source_type="close", sequential=False) -> Union[float, np.ndarray]:
"""
VWMA - Volume Weighted Moving Average
:param candles: np.ndarray
:param period: int - default: 20
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
res = ti.vwma(np.ascontiguousarray(source), np.ascontiguousarray(candles[:, 5]), period=period)
return np.concatenate((np.full((candles.shape[0] - res.shape[0]), np.nan), res), axis=0) if sequential else res[-1]
| [
"cagridincel@gmail.com"
] | cagridincel@gmail.com |
e0ea6925e3b151389aae2796fe99d07db9bb45fe | 6a6bae69fb39e7b236c0ee0abfe581ee59bb68be | /urls.py | 821c6961ddb6a2e026e807c77324ba0537835d34 | [] | no_license | taddeimania/tfb | 46b6360e5b93f9d93dc4badf5bf28dc0ed7aba36 | dee60801300acf4ba654f9c69573a0a0f9e4a4d3 | refs/heads/master | 2016-09-16T16:16:16.403711 | 2012-11-22T03:19:22 | 2012-11-22T03:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from tfb import views as base_views
from tfb.messages import views as message_views
from tfb.matchup import views as matchup_views
from tfb.player_card import views as player_card_views
from tfb.top_player_list import views as top_player_list
from tfb.draft import views as draft_views
from tfb.profile import views as profile_views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', base_views.AboutView.as_view()),
url(r'^about/$', base_views.AboutView.as_view()),
url(r'^home/$', base_views.HomeView.as_view(), name='home'),
url(r'^players/$', base_views.HomeView.as_view(), name='home'),
url(r'^blue/$', base_views.BlankView.as_view(), name='blank'),
url(r'^delete_account/$', profile_views.DeleteAccountView.as_view(), name='delete'),
url(r'^player/(?P<player_id>\w+)/$', player_card_views.PlayerPageView.as_view(), name='player'),
url(r'^uteam/(?P<team_id>\w+)/$', base_views.NotMyTeamView.as_view(), name='uteam'),
url(r'^uteam/$', base_views.MyTeamView.as_view()),
url(r'^myteam/$', login_required(base_views.MyTeamView.as_view()), name="myteam"),
url(r'^messages/$', message_views.MessageView.as_view(),name='message'),
url(r'^league/$', login_required(base_views.league_page),name='league'),
url(r'^league/(?P<week>\w+)/$', login_required(base_views.league_page),name='league'),
url(r'^leagueadmin/$', base_views.leagueadmin,name='leagueadmin'),
url(r'^leagueadmin/(?P<arg>\w+)/$', base_views.leagueadmin,name='leagueadmin'),
url(r'^login/$', 'django.contrib.auth.views.login'),
url(r'^logout/$', base_views.logout_user,name='logout_user'),
url(r'^profile/$', login_required(profile_views.ProfileView.as_view()), name='ProfileView'),
url(r'^profile/edit/$', profile_views.EditAccountView.as_view(), name='profileedit'),
url(r'^joinleague/$', base_views.joinleague,name='joinleague'),
url(r'^pickup/(?P<posid>\w+)/$', base_views.pickup,name='pickup'),
url(r'^list/(?P<posid>\w+)/$', base_views.list_player,name='list'),
url(r'^draft/$', draft_views.draftpage, name='draftpage'),
url(r'^drag/$', draft_views.drag_and_drop, name='draftpage'),
url(r'^matchup/$', login_required(matchup_views.MatchupPageView.as_view()), name='matchup'),
url(r'^matchup/(?P<matchup_id>\w+)/$', login_required(matchup_views.MatchupPageView.as_view()),name='matchup'),
url(r'^sysadmin/$', base_views.sysadmin,name='sysadmin'),
url(r'^sysadmin/(?P<arg>\w+)/(?P<argval>.*?)$', base_views.sysadmin,name='sysadmin'),
url(r'^admin/', include(admin.site.urls)),
url(r'^playerpage/$', top_player_list.playerpage),
url(r'^playernotfound/$', top_player_list.PlayerNotFound.as_view()),
url(r'^playerpage/(?P<arg>\w+)', top_player_list.playerpage),
url(r'^playerpage/(?P<arg>\w+)', top_player_list.playerpage),
url(r'^leaguelist/(?P<league_id>\w+)', base_views.league_list),
url(r'^transactions/$', base_views.transactions_page),
url(r'^accounts/', include('registration.backends.default.urls')),
)
| [
"jtaddei@gmail.com"
] | jtaddei@gmail.com |
b6c197d99eca65f0b1b77cd64e93e6af05231af1 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /explore/2020/august/Sort_Array_By_Parity.py | 599a6511b88e961531c076fcdd4fe199f8da353f | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | '''
https://leetcode.com/explore/featured/card/august-leetcoding-challenge/551/week-3-august-15th-august-21st/3431/
You are here!
Your runtime beats 96.99 % of python submissions.
'''
class Solution(object):
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
ans = []
for a in A:
if a % 2 == 0:
ans.append(a)
for a in A:
if a % 2 == 1:
ans.append(a)
return ans
if __name__ == '__main__':
A = [3,1,2,4]
# Output: [2,4,3,1]
# The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
print Solution().sortArrayByParity(A) | [
"838255715@qq.com"
] | 838255715@qq.com |
cbc6badcee7a608483c0c04aaa51e3dad1cd5c26 | 1388b4c7e7a896492c7953f8e4914b9818ad538c | /lessons_crawler/dao/lesson_dao.py | 5c88225ce83d82dc36ff822134571df3fb212f25 | [] | no_license | compcederj/lessons-crawler | e5b7658de4741ceb1c21f51a9835a19d4f8584fc | 2b4b0448f1fe3587d6a8f5af3254863c311ecb30 | refs/heads/main | 2023-01-21T01:50:06.337833 | 2020-11-29T23:13:33 | 2020-11-29T23:13:33 | 294,533,878 | 0 | 0 | null | 2020-11-29T23:12:01 | 2020-09-10T22:13:31 | Python | UTF-8 | Python | false | false | 1,401 | py | from datetime import timedelta
from typing import List
import click
from lessons_crawler.db import db
from lessons_crawler.models.lessons import Lesson
from lessons_crawler.models.subjects import Subject
class LessonDAO:
@staticmethod
def create_or_update(
subject: Subject, lesson_index: str, original_url: str, title: str, xml_file: str,
index_file: str, sync_file: str, mp4_video_file: str, webm_video_file: str, thumbnail: str,
length: timedelta = None) -> Lesson:
lesson = (
db.session.
query(Lesson).
filter(Lesson.subject_id == subject.id).
filter(Lesson.lesson_index == lesson_index).
first()
)
if not lesson:
lesson = Lesson()
lesson.lesson_index = lesson_index.replace("_", " ")
lesson.title = title
lesson.length = length
lesson.original_url = original_url
lesson.subject_id = subject.id
lesson.xml_file = xml_file
lesson.index_file = index_file
lesson.sync_file = sync_file
lesson.mp4_video_file = mp4_video_file
lesson.webm_video_file = webm_video_file
lesson.thumbnail = thumbnail
lesson.save()
return lesson
@staticmethod
def get_all() -> List[Lesson]:
lessons = db.session.query(Lesson).all()
return lessons
| [
"thiagoborges@id.uff.br"
] | thiagoborges@id.uff.br |
c0d8a8fb2dcb378f63e1168821dbac2059c37e96 | 930309163b930559929323647b8d82238724f392 | /dp_b.py | 574f94d3f786fc45d2d25c12a7b162d0c6140fbe | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 244 | py |
N,K = map(int, input().split())
*C,=map(int,input().split())
dp = [float("inf")]*(N+10)
dp[0] = 0
for i in range(N):
for k in range(K+1):
if i+k < N:
dp[i+k] = min(dp[i+k], dp[i]+abs(C[i+k]-C[i]))
print(dp[N-1])
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
dc5a4a95918a4b0fc3f63c865fdb46927e0bc44e | a08225934c425be313a12975c9563a72ded58be6 | /EDU105/45.py | b1095d784edeebeec5958904b3976b46f8ad450a | [] | no_license | marcus-aurelianus/codeforce | 27c966554dee9986f23fb2925bd53e6cceb8b9e9 | 4764df151ade7806e32b6c88283a2de946f99e16 | refs/heads/master | 2023-03-18T09:30:55.042594 | 2021-03-12T18:14:08 | 2021-03-12T18:14:08 | 231,387,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def jump(nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
print(jump([0,0]))
| [
"37787424+marcus-aurelianus@users.noreply.github.com"
] | 37787424+marcus-aurelianus@users.noreply.github.com |
9623de913778ac810ee55514abd5777510296aef | 009628e385aca8552dad5c1c5cba018ca6e5954d | /scripts/drawcurvature | 245a84fb58ff9543d068294490983a068708b04d | [] | no_license | csrocha/python-mtk | 565ebcfeb668a6409d48135bf081321d8121b263 | c3ba520f55c2e204feb6b98251abcb046e51c6cd | refs/heads/main | 2023-01-12T02:46:44.457520 | 2020-11-17T20:20:59 | 2020-11-17T20:20:59 | 313,939,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,957 | #!/usr/bin/python
"""
An example showing the norm and phase of an atomic orbital: isosurfaces of
the norm, with colors displaying the phase.
This example shows how you can apply a filter on one data set, and dislay
a second data set on the output of the filter. Here we use the contour
filter to extract isosurfaces of the norm of a complex field, and we
display the phase of the field with the colormap.
The field we choose to plot is a simplified version of the 3P_y atomic
orbital for hydrogen-like atoms.
The first step is to create a data source with two scalar datasets. The
second step is to apply filters and modules, using the
'set_active_attribute' filter to select on which data these apply.
Creating a data source with two scalar datasets is actually slighlty
tricky, as it requires some understanding of the layout of the datasets
in TVTK. The reader is referred to :ref:`data-structures-used-by-mayavi`
for more details.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Create the data ############################################################
import numpy as np
import sys
from optparse import OptionParser
from mtk.geometry.vol import load
_i = lambda x: np.array(map(round, x), dtype=int)
# Parsing command line
parser = OptionParser()
parser.add_option("-o", "--output", dest="outfile", default=None,
help="write figure to FILE", metavar="FILE")
parser.add_option("-a", "--azimuth", dest="azimuth", default=None,
help="set the azimuth of the view", metavar="ANGLE")
parser.add_option("-e", "--elevation", dest="elevation", default=None,
help="set the elevation of the view", metavar="ANGLE")
parser.add_option("-r", "--rotate", dest="rotate", default=False,
help="if set rotate 0:r:360, 0:r:180 over body", metavar="ANGLE")
parser.add_option("-d", "--distance", dest="distance", default=None,
help="set the distance of the view", metavar="UNITS")
parser.add_option("-W", "--width", dest="width", default=900,
help="set the width of the figure", metavar="PIXELS")
parser.add_option("-H", "--height", dest="height", default=600,
help="set the height of the figure", metavar="PIXELS")
(options, args) = parser.parse_args()
outfile = options.outfile
azimuth = options.azimuth
elevation = options.elevation
distance = options.distance
rotate = options.rotate
size = (int(options.width), int(options.height))
if azimuth != None: azimuth = float(azimuth)
if elevation != None: elevation = float(elevation)
if distance != None: distance = float(distance)
if rotate != None: rotate = int(rotate)
filename_body = args[0]
filename_curvature = args[1]
# Running
volA, mdA = load(filename_body)
volB, mdB = load(filename_curvature)
nmin = volA.min
nmax = volA.max
Bslice = map(lambda (a,b): slice(a,b+1),
zip(_i(volB.t(nmin)),_i(volB.t(nmax))))
A = volA._data
B = volB._data[Bslice]
print A.shape
print B.shape
# Plot it ####################################################################
from enthought.mayavi import mlab
mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1), size=(900,600))
# We create a scalar field with the module of Phi as the scalar
src = mlab.pipeline.scalar_field(A)
# And we add the phase of Phi as an additional array
# This is a tricky part: the layout of the new array needs to be the same
# as the existing dataset, and no checks are performed. The shape needs
# to be the same, and so should the data. Failure to do so can result in
# segfaults.
src.image_data.point_data.add_array(np.real(B).T.ravel())
#src.image_data.point_data.add_array(np.real(B._data).T.ravel())
# We need to give a name to our new dataset.
src.image_data.point_data.get_array(1).name = 'solid'
# Make sure that the dataset is up to date with the different arrays:
src.image_data.point_data.update()
# We select the 'scalar' attribute, ie the norm of Phi
src2 = mlab.pipeline.set_active_attribute(src,
point_scalars='scalar')
# Cut isosurfaces of the norm
contour = mlab.pipeline.contour(src2)
# Now we select the 'angle' attribute, ie the phase of Phi
contour2 = mlab.pipeline.set_active_attribute(contour,
point_scalars='solid')
# And we display the surface. The colormap is the current attribute: the phase.
mlab.pipeline.surface(contour2, colormap='spectral', vmax=1., vmin=0.)
mlab.colorbar(title='Curvature', orientation='vertical', nb_labels=3)
if rotate != None:
for a in np.arange(0,360,rotate):
for e in np.arange(0,180,rotate):
print "Rendering:", outfile % (a, e)
mlab.view(a,e,distance)
mlab.savefig(outfile % (a, e), size)
else:
mlab.view(azimuth,elevation,distance)
if outfile != None:
mlab.savefig(outfile, size)
else:
mlab.show()
| [
"cristian.rocha@moldeo.coop"
] | cristian.rocha@moldeo.coop | |
29eb97a2658069096f4798568142abee03c21992 | f58b8dd35674b396abe606d1890770f60bfeb655 | /utils/generate_coarse_patches.py | d97c878528522548f4e357329f2b49ceb7b09cdf | [] | no_license | Mancarl/BBR-Net | fbd6d1653b65a28de1267a2319c7aeb90d72c112 | a52adf186601f4c773ae9ad660f3069313dc0f29 | refs/heads/master | 2023-03-15T16:04:44.343846 | 2020-09-21T21:35:14 | 2020-09-21T21:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | import os
import csv
import random
import cv2 as cv
from tqdm import tqdm
annotation_file = '/path/to/full/image/annotation/csv/file'
img_save_path = '/folder/to/save/simulated/coarse/annotations/patches'
csv_save_path = '/path/to/save/patches/annotation'
if not os.path.exists(img_save_path):
os.mkdir(img_save_path)
random.seed(10)
def read_csv():
with open(annotation_file, 'r') as file:
reader = csv.reader(file)
result = {}
for row in reader:
img_file, x1, y1, x2, y2 = row[:5]
if img_file not in result:
result[img_file] = []
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2})
return result
def crop_img(result, repeat=5):
with open(csv_save_path, 'a') as annotation_csv:
for img_name, annotations in tqdm(result.items()):
img_pre = os.path.splitext(os.path.split(img_name)[-1])[0]
img = cv.imread(img_name)
img_shape = img.shape
for a in annotations:
x1 = int(a['x1'])
x2 = int(a['x2'])
y1 = int(a['y1'])
y2 = int(a['y2'])
for _ in range(repeat):
bbox = [x1, y1, x2, y2]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w * h > 6400:
w_expand_ratio = random.random() * 0.4 + 0.1
h_expand_ratio = random.random() * 0.4 + 0.1
w_expand = w_expand_ratio * w
h_expand = h_expand_ratio * h
w_shift_ratio = random.random()
h_shift_ratio = random.random()
left_x_shift = w_shift_ratio * w_expand
right_x_shift = (1 - w_shift_ratio) * w_expand
top_y_shift = h_shift_ratio * h_expand
bottom_y_shift = (1 - h_shift_ratio) * h_expand
bbox[0] = int(max((0, bbox[0] - left_x_shift)))
bbox[1] = int(max((0, bbox[1] - top_y_shift)))
bbox[2] = int(min((bbox[2] + right_x_shift, img_shape[1])))
bbox[3] = int(min((bbox[3] + bottom_y_shift, img_shape[0])))
new_x1, new_y1, new_x2, new_y2 = bbox
new_w = new_x2 - new_x1
new_h = new_y2 - new_y1
rl_x1 = (x1 - new_x1) / new_w
rl_x2 = (x2 - new_x1) / new_w
rl_y1 = (y1 - new_y1) / new_h
rl_y2 = (y2 - new_y1) / new_h
crop_name = '{}_{}_{}_{}_{}.jpg'.format(img_pre, new_x1, new_y1, new_x2, new_y2)
crop_path = os.path.join(img_save_path, crop_name)
annotation_csv.write('{},{},{},{},{}\n'.format(crop_path, rl_x1, rl_y1, rl_x2, rl_y2))
cv.imwrite(crop_path, img[new_y1:new_y2, new_x1:new_x2])
if __name__ == "__main__":
result = read_csv()
crop_img(result)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
5ec27c40bff669672db5985ca6a7ea318d2a6968 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/intersectingDiscs_20200810184632.py | f3f37471c13443b0e75a79f07eb074f1d8d7f42e | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def discs(A):
start = [i-j for i,j in enumerate(A)]
start.sort()
pairs = 0
for i in range(lena))
discs([1,5,2,1,4,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
28c666be09e2102aa172bcd2c21c4db7396ed048 | 7b51b0e8ba88b8f3c57f5e210ff9847d0364b805 | /ircd/main.py | 7ecb7b0168e64d663ee65c4e0cf4df306f04dd02 | [] | no_license | marcuswanner/nameless-ircd | cd9324528ac890cb416b1b2b1207c4a2315bf12f | 9517b94fe622056f8ea0557403647f9f4ba1d717 | refs/heads/master | 2020-06-07T11:47:44.632012 | 2013-02-14T00:58:44 | 2013-02-14T00:58:44 | 8,111,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | #!/usr/bin/env python
import server
import user
import signal, traceback, asyncore
def hup(sig,frame):
print 'reload'
try:
reload(server.user)
reload(server.services)
reload(server)
except:
print 'Error reloading'
print traceback.format_exc()
else:
print 'okay'
def main():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--port',type=int,help='port to run on',default=6666)
ap.add_argument('--host',type=str,help='bind host',default='127.0.0.1')
ap.add_argument('--opt',type=str,help='options',default=None)
args = ap.parse_args()
signal.signal(signal.SIGHUP,hup)
log = False
if args.opt is not None:
log = args.opt.strip() == 'log'
server.Server((args.host,args.port),do_log=log)
asyncore.loop()
if __name__ == '__main__':
main()
| [
"ampernand@gmail.com"
] | ampernand@gmail.com |
d32d132b39d4dc9285e3107a855567bbacd7d460 | 17ec70a0387905f84f7fc1e3ee7f3428dd4e7874 | /Atcoder/abc121/b.py | 257111dde25af5efa0a73b912864825fe92a899e | [] | no_license | onikazu/ProgramingCompetitionPractice | da348e984b6bcb79f96f461d9df15a33730169b2 | 5a682943976bcac8646176feef9b70a6784abd8a | refs/heads/master | 2021-02-09T06:27:54.994621 | 2020-03-14T02:28:50 | 2020-03-14T02:28:50 | 244,252,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | n, m, c = list(map(int, input().split()))
b = list(map(int, input().split()))
a = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for i in range(n):
code_num = 0
for j in range(m):
code_num += b[j] * a[i][j]
code_num += c
if code_num > 0:
ans += 1
print(ans)
| [
"programingmanagement@gmail.com"
] | programingmanagement@gmail.com |
9bbc4c87238f15869cb43208feda58e15b29152a | 15bfc2b3ba52420d95ed769a332aaa52f402bbd2 | /api/v2010/machine_to_machine/read-default/read-default.6.x.py | 1533bdd640a1fb7f14859f937a8dff30ef979f23 | [] | no_license | synackme/sample-code | 013b8f0a6a33bfd327133b09835ee88940d3b1f2 | 5b7981442f63df7cf2d17733b455270cd3fabf78 | refs/heads/master | 2020-03-17T04:53:07.337506 | 2018-05-07T16:47:48 | 2018-05-07T16:47:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = '"ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
machine_to_machine = client.available_phone_numbers("US").machine_to_machine \
.list()
for record in machine_to_machine:
print(record.friendly_name)
| [
"jose.oliveros.1983@gmail.com"
] | jose.oliveros.1983@gmail.com |
93d56532e0faa529e1b82d52cf1c017f43ef8373 | f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6 | /ICA_SDK/test/test_workflow_argument.py | 5c2a2e694a0af944ba4e5004cce281d75ef38efc | [] | no_license | jsialar/integrated_IAP_SDK | 5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb | c9ff7685ef0a27dc4af512adcff914f55ead0edd | refs/heads/main | 2023-08-25T04:16:27.219027 | 2021-10-26T16:06:09 | 2021-10-26T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | # coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ICA_SDK
from ICA_SDK.models.workflow_argument import WorkflowArgument # noqa: E501
from ICA_SDK.rest import ApiException
class TestWorkflowArgument(unittest.TestCase):
"""WorkflowArgument unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test WorkflowArgument
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ICA_SDK.models.workflow_argument.WorkflowArgument() # noqa: E501
if include_optional :
return WorkflowArgument(
name = '0',
value = '0',
json = None,
options = '0'
)
else :
return WorkflowArgument(
name = '0',
)
def testWorkflowArgument(self):
"""Test WorkflowArgument"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"siajunren@gmail.com"
] | siajunren@gmail.com |
9f2149acf5b8359da35b2d3288a10d45048d9ecd | 06b5d50d92af07dc7c2d9cc24922a2a906f7b88c | /church/main/migrations/0013_auto_20190722_2132.py | 70d6e4e83fbf8eb6f3b68d3a67eb09a0f8d5f5d4 | [] | no_license | ShehanHD/Django | 4fe6d841e38450b028765cc84bbe7b99e65b9387 | bc855c16acad5d8f1f5a24dc68438749704935fd | refs/heads/master | 2021-05-20T12:57:36.980701 | 2020-04-25T13:03:43 | 2020-04-25T13:03:43 | 252,305,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # Generated by Django 2.2.3 on 2019-07-22 19:32
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20190722_2131'),
]
operations = [
migrations.AlterField(
model_name='services',
name='date',
field=models.DateTimeField(default=datetime.datetime(2019, 7, 22, 21, 32, 55, 180001), verbose_name='date'),
),
migrations.AlterField(
model_name='services',
name='img',
field=models.ImageField(blank=True, upload_to='pics/services'),
),
migrations.AlterField(
model_name='team',
name='img',
field=models.ImageField(blank=True, upload_to='pics/team'),
),
]
| [
"51677228+ShehanHD@users.noreply.github.com"
] | 51677228+ShehanHD@users.noreply.github.com |
bdc06f47018170b03fee10a372fb6e96f09cad56 | 9f2b07eb0e9467e17448de413162a14f8207e5d0 | /pylith/bc/DirichletTimeDependent.py | 767b2424dec16a77a004dc1158d2c1f6de9772b2 | [
"MIT"
] | permissive | fjiaqi/pylith | 2aa3f7fdbd18f1205a5023f8c6c4182ff533c195 | 67bfe2e75e0a20bb55c93eb98bef7a9b3694523a | refs/heads/main | 2023-09-04T19:24:51.783273 | 2021-10-19T17:01:41 | 2021-10-19T17:01:41 | 373,739,198 | 0 | 0 | MIT | 2021-06-04T06:12:08 | 2021-06-04T06:12:07 | null | UTF-8 | Python | false | false | 5,081 | py | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pylith/bc/DirichletTimeDependent.py
#
# @brief Python object for managing a time-dependent Dirichlet (prescribed
# values) boundary condition.
#
# Factory: boundary_condition
from .BoundaryCondition import BoundaryCondition
from .bc import DirichletTimeDependent as ModuleDirichletTimeDependent
from pylith.utils.NullComponent import NullComponent
class DirichletTimeDependent(BoundaryCondition, ModuleDirichletTimeDependent):
"""Python object for managing a time-dependent Dirichlet (prescribed values)
boundary condition.
Factory: boundary_condition
"""
import pythia.pyre.inventory
constrainedDOF = pythia.pyre.inventory.array(
"constrained_dof", converter=int, default=[])
constrainedDOF.meta[
'tip'] = "Array of constrained degrees of freedom (0=1st DOF, 1=2nd DOF, etc)."
useInitial = pythia.pyre.inventory.bool("use_initial", default=True)
useInitial.meta['tip'] = "Use initial term in time-dependent expression."
useRate = pythia.pyre.inventory.bool("use_rate", default=False)
useRate.meta['tip'] = "Use rate term in time-dependent expression."
useTimeHistory = pythia.pyre.inventory.bool(
"use_time_history", default=False)
useTimeHistory.meta['tip'] = "Use time history term in time-dependent expression."
dbTimeHistory = pythia.pyre.inventory.facility(
"time_history", factory=NullComponent, family="temporal_database")
dbTimeHistory.meta['tip'] = "Time history with normalized amplitude as a function of time."
def __init__(self, name="dirichlettimedependent"):
"""Constructor.
"""
BoundaryCondition.__init__(self, name)
return
def _defaults(self):
from .AuxSubfieldsTimeDependent import AuxSubfieldsTimeDependent
self.auxiliarySubfields = AuxSubfieldsTimeDependent(
"auxiliary_subfields")
def preinitialize(self, problem):
"""Do pre-initialization setup.
"""
import numpy
from pylith.mpi.Communicator import mpi_comm_world
comm = mpi_comm_world()
if 0 == comm.rank:
self._info.log(
"Performing minimal initialization of time-dependent Dirichlet boundary condition '%s'." % self.aliases[-1])
BoundaryCondition.preinitialize(self, problem)
ModuleDirichletTimeDependent.setConstrainedDOF(
self, numpy.array(self.constrainedDOF, dtype=numpy.int32))
ModuleDirichletTimeDependent.useInitial(self, self.useInitial)
ModuleDirichletTimeDependent.useRate(self, self.useRate)
ModuleDirichletTimeDependent.useTimeHistory(self, self.useTimeHistory)
if not isinstance(self.dbTimeHistory, NullComponent):
ModuleDirichletTimeDependent.setTimeHistoryDB(
self, self.dbTimeHistory)
return
def verifyConfiguration(self):
"""Verify compatibility of configuration.
"""
BoundaryCondition.verifyConfiguration(self, self.mesh())
spaceDim = self.mesh().coordsys().getSpaceDim()
for d in self.bcDOF:
if d < 0 or d >= spaceDim:
raise ValueError("Attempting to constrain DOF (%d) that doesn't exist for time-dependent Dirichlet boundary condition '%s'. Space dimension is %d." %
(d, self.aliases[-1], spaceDim))
return
def _configure(self):
"""Setup members using inventory.
"""
if 0 == len(self.constrainedDOF):
raise ValueError("'constrained_dof' must be a zero based integer array of indices corresponding to the "
"constrained degrees of freedom.")
if self.inventory.useTimeHistory and isinstance(self.inventory.dbTimeHistory, NullComponent):
raise ValueError(
"Missing time history database for time-dependent Dirichlet boundary condition '%s'." % self.aliases[-1])
if not self.inventory.useTimeHistory and not isinstance(self.inventory.dbTimeHistory, NullComponent):
self._warning.log(
"Ignoring time history database setting for time-dependent Dirichlet boundary condition '%s'." % self.aliases[-1])
BoundaryCondition._configure(self)
return
def _createModuleObj(self):
"""Create handle to corresponding C++ object.
"""
ModuleDirichletTimeDependent.__init__(self)
return
# Factories
def boundary_condition():
"""Factory associated with DirichletTimeDependent.
"""
return DirichletTimeDependent()
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
92c43f49b8885961160c4f1e4bb9c1ddb79dc162 | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /examples/research_projects/luke/luke_utils.py | aec4133f21b36eee313a5c6371ff48537ccf613c | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 5,106 | py | import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def padding_tensor(sequences, padding_value, padding_side, sequence_length):
if isinstance(padding_value, tuple):
out_tensor = np.full((len(sequences), sequence_length, 2), padding_value)
else:
out_tensor = np.full((len(sequences), sequence_length), padding_value)
for i, tensor in enumerate(sequences):
if padding_side == "right":
if isinstance(padding_value, tuple):
out_tensor[i, : len(tensor[:sequence_length]), :2] = tensor[:sequence_length]
else:
out_tensor[i, : len(tensor[:sequence_length])] = tensor[:sequence_length]
else:
if isinstance(padding_value, tuple):
out_tensor[i, len(tensor[:sequence_length]) - 1 :, :2] = tensor[:sequence_length]
else:
out_tensor[i, len(tensor[:sequence_length]) - 1 :] = tensor[:sequence_length]
return out_tensor.tolist()
def is_punctuation(char):
cp = ord(char)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
@dataclass
class DataCollatorForLukeTokenClassification(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def torch_call(self, features):
import torch
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["entity_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch[label_name] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch[label_name] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
ner_tags = [feature["ner_tags"] for feature in features]
batch["ner_tags"] = padding_tensor(ner_tags, -1, padding_side, sequence_length)
original_entity_spans = [feature["original_entity_spans"] for feature in features]
batch["original_entity_spans"] = padding_tensor(original_entity_spans, (-1, -1), padding_side, sequence_length)
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
214892b437bd6d59b56ffbcabe40e76edd0ab6b3 | 2b0f4f3590f5407da83d179db8103803f7c75e8f | /app/views/HomeView.py | b7fcbc6df3e89b79e9e2829c11df2860ae9ee937 | [] | no_license | caiomarinhodev/ciacimento | 6c783f169ac912ed599bcfaa6a208d5be5c7942e | cf7a6951196bc36655fe0b303e3131932ec254cf | refs/heads/master | 2023-07-07T00:25:16.101307 | 2023-02-28T00:46:12 | 2023-02-28T00:46:12 | 117,120,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
from app.mixins.CustomContextMixin import CustomContextMixin
"""HomeView.py: Especifica a pagina inicial da aplicacao."""
__author__ = "Caio Marinho"
__copyright__ = "Copyright 2017"
class HomeView(TemplateView, CustomContextMixin):
template_name = 'site/index.html'
| [
"caiomarinho8@gmail.com"
] | caiomarinho8@gmail.com |
4cdd78b8851f5c509944a233f59931e35429a318 | 9c1fa66bc9fffc800890622d66c8cf50a3384c52 | /coresys/models/payment.py | 6b4819e7f1b141e857831886aff4d6ff9fa82547 | [] | no_license | fhydralisk/walibackend | 97a5f7ba0a02a36673ec57e1c42fd372afe42736 | dac474f3d418ac3711b1c51d00bd7d246d2bc1bd | refs/heads/master | 2020-03-15T15:26:45.625860 | 2018-09-04T12:23:52 | 2018-09-04T12:23:52 | 132,212,261 | 1 | 4 | null | 2018-11-18T15:00:53 | 2018-05-05T03:45:57 | Python | UTF-8 | Python | false | false | 568 | py | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class CorePaymentMethod(models.Model):
ord = models.IntegerField(_("Order Number"))
deposit_scale = models.FloatField(validators=[MinValueValidator(0.0), MaxValueValidator(1.0)])
opmdesc = models.TextField(verbose_name=_("Description"), max_length=125)
in_use = models.BooleanField(default=True)
def __unicode__(self):
return self.opmdesc
| [
"fhy14@mails.tsinghua.edu.cn"
] | fhy14@mails.tsinghua.edu.cn |
36ad831f3772f2152d78a9892f032d218d3f976e | 50e3fcca6e2a9a73ed52d231a739f70c28ed108f | /Math/twoSum.py | 29d1fcf1fab8fb3d069b8fb3146aae35b69ed916 | [] | no_license | thomasyu929/Leetcode | efa99deaa2f6473325de516d280da6911c2cc4ab | 780271875c5b50177653fd7fe175d96dd10e84e2 | refs/heads/master | 2022-03-29T00:11:01.554523 | 2020-01-03T00:28:22 | 2020-01-03T00:28:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | class Solution:
# brute force
'''
def twoSum(self, nums, target):
for i in range(len(nums)):
for j in range(len(nums))[i+1:]: # not use the same element twice
if nums[i] + nums[j] == target:
return i,j
'''
# hash map
def twoSum(self, nums, target):
m = {}
for i, n in enumerate(nums):
x = target - n
if x in m:
return i, m[x]
else:
m[n] = i
if __name__ == "__main__":
cl = Solution()
nums = [2,7,11,15]
target = 9
print(cl.twoSum(nums, target)) | [
"yby4301955@gmail.com"
] | yby4301955@gmail.com |
c636180bd9979b8ec50d6a3e1f125465333ae932 | 8364e4d23191ee535c163debffafa8418d705843 | /aiokubernetes/models/v1_preconditions.py | a3878eb6ae0455ed5a39a95bb79bd48a6180ed09 | [
"Apache-2.0"
] | permissive | olitheolix/aiokubernetes | 2bb6499030e2e6e9b7ca0db63c4441293d70a09b | 266718b210dff2a9b2212183261ea89adf89115e | refs/heads/master | 2020-03-21T23:02:30.484410 | 2018-10-20T19:33:01 | 2018-10-22T05:52:42 | 139,162,905 | 28 | 3 | Apache-2.0 | 2018-10-22T05:52:51 | 2018-06-29T15:02:59 | Python | UTF-8 | Python | false | false | 2,978 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
class V1Preconditions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'uid': 'str'
}
attribute_map = {
'uid': 'uid'
}
def __init__(self, uid=None): # noqa: E501
"""V1Preconditions - a model defined in Swagger""" # noqa: E501
self._uid = None
self.discriminator = None
if uid is not None:
self.uid = uid
@property
def uid(self):
"""Gets the uid of this V1Preconditions. # noqa: E501
Specifies the target UID. # noqa: E501
:return: The uid of this V1Preconditions. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1Preconditions.
Specifies the target UID. # noqa: E501
:param uid: The uid of this V1Preconditions. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Preconditions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"olitheolix@gmail.com"
] | olitheolix@gmail.com |
b1fbece0ddc4874190e66ca6b25eededa45727e2 | 6d7678e3d79c97ddea2e2d65f2c2ef03b17f88f6 | /venv/lib/python3.6/site-packages/pysnmp/carrier/asyncore/dgram/udp6.py | 900ef3c7d6986b1c921fbb7e443ee60ff27e6c6c | [
"MIT"
] | permissive | PitCoder/NetworkMonitor | b47d481323f26f89be120c27f614f2a17dc9c483 | 36420ae48d2b04d2cc3f13d60d82f179ae7454f3 | refs/heads/master | 2020-04-25T11:48:08.718862 | 2019-03-19T06:19:40 | 2019-03-19T06:19:40 | 172,757,390 | 2 | 0 | MIT | 2019-03-15T06:07:27 | 2019-02-26T17:26:06 | Python | UTF-8 | Python | false | false | 1,386 | py | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.carrier import sockfix
from pysnmp.carrier.base import AbstractTransportAddress
from pysnmp.carrier.asyncore.dgram.base import DgramSocketTransport
import socket
domainName = snmpUDP6Domain = (1, 3, 6, 1, 2, 1, 100, 1, 2)
class Udp6TransportAddress(tuple, AbstractTransportAddress):
pass
class Udp6SocketTransport(DgramSocketTransport):
sockFamily = socket.has_ipv6 and socket.AF_INET6 or None
addressType = Udp6TransportAddress
def normalizeAddress(self, transportAddress):
if '%' in transportAddress[0]: # strip zone ID
ta = self.addressType((transportAddress[0].split('%')[0],
transportAddress[1],
0, # flowinfo
0)) # scopeid
else:
ta = self.addressType((transportAddress[0],
transportAddress[1], 0, 0))
if (isinstance(transportAddress, self.addressType) and
transportAddress.getLocalAddress()):
return ta.setLocalAddress(transportAddress.getLocalAddress())
else:
return ta.setLocalAddress(self.getLocalAddress())
Udp6Transport = Udp6SocketTransport
| [
"overlord.lae@gmail.com"
] | overlord.lae@gmail.com |
33b0303c5f17e64009b0c754863eb58633a2c980 | 9a0092226c40bc6c9c4eaadb670601234fadd739 | /grad-cam/scale/data/FF-PP/create_dataset.py | 7c40e2b741eb169fa115118c9dd62b5f6c1d4dfc | [] | no_license | ajioka-fumito/keras | 45005f214ae0b66fc6e88ca8f421ab9f44d52ec4 | 824691f4e243dd447ab91146a0e5336b416d0f83 | refs/heads/master | 2020-07-22T16:45:10.042977 | 2019-09-22T08:04:51 | 2019-09-22T08:04:51 | 204,160,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import glob
from PIL import Image
import random
import shutil
import os
paths = glob.glob("./F_crop/*")
random.shuffle(paths)
for i,path in enumerate(paths):
name = os.path.basename(path)
if 0<=i<=49:
shutil.copyfile(path,"./train/{}".format(name))
else:
shutil.copyfile(path,"./test/{}".format(name))
| [
"ajioka.ds@gmail.com"
] | ajioka.ds@gmail.com |
fb1cfdd12519ff10dc927a3ec165345521142654 | 8b86f7809b18de55fddd55800f932a20725132ea | /data_structures/binary_search_tree/bst.py | 0434af27e11cb927df539cbd99f26927e65710bd | [
"MIT"
] | permissive | vinozy/data-structures-and-algorithms | 75f0358167a2c6566a3a196aa9cafd33d2a95b16 | 0485b95f5aabc0ee255cd7e50b48a6ccec851e00 | refs/heads/master | 2022-02-17T14:21:06.412047 | 2019-08-17T04:23:09 | 2019-08-17T04:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | class Node:
"""create a Node"""
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def __repr__(self): # pragma: no cover
return 'Node Val: {}'.format(self.val)
def __str__(self): # pragma: no cover
return self.val
class BST:
"""create a binary search tree data structure"""
def __init__(self, iterable=[]):
self.root = None
if type(iterable) is not list:
raise TypeError
for item in iterable:
self.insert(item)
def __repr__(self): # pragma: no cover
return '<BST Root {}>'.format(self.root.val)
def __str__(self): # pragma: no cover
return self.root.val
def in_order(self, operation):
"""insert in order"""
def _walk(node=None):
if node is None:
return
if node.left is not None:
_walk(node.left)
operation(node)
if node.right is not None:
_walk(node.right)
_walk(self.root)
def pre_order(self, operation):
"""insert in pre-order"""
def _walk(node=None):
if node is None:
return
operation(node)
if node.left is not None:
_walk(node.left)
if node.right is not None:
_walk(node.right)
_walk(self.root)
def post_order(self, operation):
"""insert in post-order"""
def _walk(node=None):
if node is None:
return
if node.left is not None:
_walk(node.left)
if node.right is not None:
_walk(node.right)
operation(node)
_walk(self.root)
def insert(self, val):
"""insert node into BST"""
node = Node(val)
current = self.root
if self.root is None:
self.root = node
return node
while current:
if val >= current.val:
if current.right is not None:
current = current.right
else:
current.right = node
break
elif val < current.val:
if current.left is not None:
current = current.left
else:
current.left = node
break
return node
| [
"beverly.pham@gmail.com"
] | beverly.pham@gmail.com |
7e81544fbf791344dc1031d79843c5d25105605b | 75616acbd19956182868b9c84ecc5805394b6ead | /src/mcdp_user_db/userdb.py | 79d421dbc2edbdeeb5837c10da951a442c0e04fb | [] | no_license | fgolemo/mcdp | 16c245665d91af7f91bba8a24d4272001ce90746 | 46eb25ca85660f4d6c2f1f6d026f7e97c7977ac3 | refs/heads/master | 2021-01-21T22:14:52.599881 | 2017-09-01T18:00:55 | 2017-09-01T18:00:55 | 102,139,393 | 5 | 2 | null | 2017-09-01T17:58:36 | 2017-09-01T17:58:36 | null | UTF-8 | Python | false | false | 6,117 | py | from datetime import datetime
from contracts import contract
from mcdp.logs import logger
from .user import UserInfo
from mcdp_utils_misc.my_yaml import yaml_load
__all__ = ['UserDB']
class UserDB(object):
# def __init__(self, userdir):
# self.users = {}
# us = load_users(userdir)
# self.userdir = userdir
# self.users.update(us)
#
# if not MCDPConstants.USER_ANONYMOUS in self.users:
# msg = 'Need account for the anonymous user "%s".' % MCDPConstants.USER_ANONYMOUS
# raise_desc(ValueError, msg, found=self.users)
#
def __contains__(self, key):
return key in self.users
def match_by_id(self, provider, provider_id):
for u in self.users.values():
for w in u.info.authentication_ids:
if w.provider == provider and w.id == provider_id:
return u
return None
def best_match(self, username, name, email):
if username is not None:
if username in self.users:
return self.users[username]
for u in self.users.values():
user_info = u.info
if name is not None and user_info.get_name() == name:
return u
if email is not None and user_info.get_email() == email:
return u
return None
@contract(returns=UserInfo)
def __getitem__(self, key):
if key is None:
key = 'anonymous'
u = self.users[key].info
return u
def exists(self, login):
return login in self
@contract(returns=bool, login=str)
def authenticate(self, login, password):
user_info = self.users[login].info
for p in user_info.authentication_ids:
if p.provider == 'password':
pwd = p.password
match = password == pwd
if not match:
msg = 'Password %s does not match with stored %s.' % (password, pwd)
logger.warn(msg)
user_info.account_last_active = datetime.now()
return match
return False
@contract(returns=bytes, candidate_usernames='list(str)')
def find_available_user_name(self, candidate_usernames):
for x in candidate_usernames:
if x not in self.users:
return x
for i in range(2,10):
for x in candidate_usernames:
y = '%s%d' % (x, i)
if y not in self.users:
return y
raise ValueError(candidate_usernames)
def create_new_user(self, username, u):
if username in self.users:
msg = 'User "%s" already present.'
raise ValueError(msg)
self.users[username] = u
@contract(returns='isinstance(User)')
def get_unknown_user_struct(self, username):
unknown = """
info:
username: %s
website:
name: Unknown
subscriptions: []
account_last_active:
affiliation:
authentication_ids: []
groups: []
email:
account_created:
images: {}
""" % username
user_data = yaml_load(unknown)
from mcdp_hdb_mcdp.main_db_schema import DB
user = DB.view_manager.create_view_instance(DB.user, user_data)
user.set_root()
return user
# self.save_user(username, new_user=True)
#
# def save_user(self, username, new_user=False):
# userdir = os.path.join(self.userdir, username + '.' + MCDPConstants.user_extension)
# if not os.path.exists(userdir):
# if new_user:
# os.makedirs(userdir)
# else:
# msg = 'Could not find user dir %r.' % userdir
# raise ValueError(msg)
#
# filename = os.path.join(userdir, MCDPConstants.user_desc_file)
# if not os.path.exists(filename) and not new_user:
# msg = 'Could not find user filename %r.' % filename
# raise ValueError(msg)
# user = self.users[username]
# y = yaml_from_userinfo(user)
# s = yaml.dump(y)
# logger.info('Saving %r:\n%s' % (username, s))
# with open(filename, 'w') as f:
# f.write(s)
#
# # if user.picture is not None:
# # fn = os.path.join(userdir, MCDPConstants.user_image_file)
# # with open(fn, 'wb') as f:
# # f.write(user.picture)
# logger.debug('Saved user information here: %s' % userdir)
#
#
# def load_users(userdir):
# ''' Returns a dictionary of username -> User profile '''
# users = {}
#
# exists = os.path.exists(userdir)
# if not exists:
# msg = 'Directory %s does not exist' % userdir
# raise Exception(msg)
#
# assert exists
#
# l = locate_files(userdir,
# pattern='*.%s' % MCDPConstants.user_extension,
# followlinks=True,
# include_directories=True,
# include_files=False)
#
# for userd in l:
# username = os.path.splitext(os.path.basename(userd))[0]
# info = os.path.join(userd, MCDPConstants.user_desc_file)
# if not os.path.exists(info):
# msg = 'Info file %s does not exist.' % info
# raise Exception(msg)
# data = open(info).read()
# s = yaml.load(data)
#
# users[username] = userinfo_from_yaml(s, username)
#
# f = os.path.join(userd, MCDPConstants.user_image_file)
# if os.path.exists(f):
# users[username].picture = open(f, 'rb').read()
#
# if not users:
# msg = 'Could not load any user from %r' % userdir
# raise Exception(msg)
# else:
# logger.info('loaded users: %s.' % format_list(sorted(users)))
#
# return users
| [
"acensi@ethz.ch"
] | acensi@ethz.ch |
f742007d91e3d77ee008bb7e42b02145f4bbf4a6 | 79debba8bb967d38dd06ba9a241864d7559303f3 | /bose/python/elements_of_interview/fp_transform.py | 999233e3bcfa6f73fd7cea27d506ede6d1a555ee | [] | no_license | hghimanshu/CodeForces-problems | 0c03813cdcdabbff56e2f56e93757e3116b9a038 | 205e1f7e269df1f710e8cd1fd5e5b34066fd4796 | refs/heads/master | 2023-07-23T13:32:19.563965 | 2021-04-06T19:29:59 | 2021-04-06T19:29:59 | 228,555,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | def transform(init,mapping,combination,seq):
if not seq:
return init
else:
return combination(mapping(seq[0]),transform(init,mapping,combination,seq[1:]))
def product_with_transform(seq):
return transform(1,lambda x:x,lambda a,b:a*b,seq)
def sum_with_transform(seq):
return transform(0,lambda x:x,lambda a,b: a+b,seq)
#reduction function (foldr)
def foldr(func,init,seq):
if not seq:
return init
else:
return func(seq[0],foldr(func,init,seq[1:]))
def product_with_foldr(seq):
return foldr(lambda seqval,acc:seqval*acc,1,seq)
def sum_with_foldr(seq):
return foldr(lambda seqval,acc:seqval+acc,0,seq)
def reverse_with_foldr(seq):
return foldr(lambda seqval,acc:acc+[seqval],[],seq)
def foldl(func,init,seq):
if not seq:
return init
else:
return foldl(func,func(init,seq[0]),seq[1:])
def product_with_foldl(seq):
return foldl(lambda seqval,acc:seqval*acc,1,seq)
def digits2num_with_foldl(seq):
return foldl(lambda acc,seqval: acc*10 + seqval,0 ,seq)
if __name__ == "__main__":
print(product_with_transform([1,2,3,4]))
print(sum_with_transform([1,2,3,4]))
print(product_with_foldr([1,2,3,4]))
print(sum_with_foldr([1,2,3,4]))
print(reverse_with_foldr([1,2,3,4]))
print(product_with_foldl([1,2,3,4]))
print(digits2num_with_foldl([1,2,3,4])) | [
"abose550@gmail.com"
] | abose550@gmail.com |
d875413f77e7f819f2d679a8877403ac27383aab | ad5dd929e2a02e7cc545cf2bec37a319d009bab8 | /sentence_iter.py | fdbd7fee1216df2c5d184bc3937d4cab27bab635 | [] | no_license | liuwei881/fluency_python | 1dcadf3113ecd6cda6c2c9676fc4a5f0529fe098 | 2ae0d8959d57ed1094cf5df3d2d8ca0df1f8d201 | refs/heads/master | 2021-09-03T21:01:51.465228 | 2018-01-12T00:33:24 | 2018-01-12T00:33:24 | 109,910,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #coding=utf-8
import re
import reprlib
RE_WORD = re.compile('\w+')
class Sentence:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self): # 1
return SentenceIterator(self.words) # 2
class SentenceIterator:
def __init__(self, words):
self.words = words # 3
self.index = 0 # 4
def __next__(self):
try:
word = self.words[self.index] # 5
except IndexError:
raise StopIteration() # 6
self.index += 1 # 7
return word # 8
def __iter__(self): # 9
return self | [
"liuwei@polex.com.cn"
] | liuwei@polex.com.cn |
21282651510d53f48e89791d8d48142d7040384b | 3c3484769274a741308eb13d91caf960eae0c5b4 | /src/logging/util.py | 870f62c780197a5ac54545869cb12b21e3693a54 | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | shwina/still-magic | 7f7ad11a05346fc08f4331234740d69ad57ceefa | 1d651840497d66d44ff43528f6e1f38e698ce168 | refs/heads/master | 2020-06-09T06:10:41.367616 | 2019-04-16T20:00:41 | 2019-04-16T20:00:41 | 193,387,841 | 0 | 0 | NOASSERTION | 2019-06-23T19:32:46 | 2019-06-23T19:32:46 | null | UTF-8 | Python | false | false | 536 | py | import logging
MESSAGE_FORMAT = '%(asctime)s,%(name)s,%(levelname)s,%(message)s'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def create_logger(name, level, filename):
# Create logger.
logger = logging.getLogger(name)
logger.setLevel(level)
# Send messages to standard output.
handler = logging.FileHandler(filename)
# Define format.
formatter = logging.Formatter(MESSAGE_FORMAT, DATE_FORMAT)
# Stitch everything together.
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| [
"gvwilson@third-bit.com"
] | gvwilson@third-bit.com |
5ff9e070f2ea8bbc5d73c47120f0659f9bffb7fe | 11f54a9d392cdfc3b4cca689c0b5abdbf10625ff | /kangaroo.py | 0852875515d575bd15b1881f1a8ac881af163dc7 | [] | no_license | stheartsachu/Miscellinuous | aa0de96115bea73d49bed50f80e263f31cf9d9ad | 3063233669f7513166b2987e911d662a0fbad361 | refs/heads/master | 2021-01-05T03:51:15.168301 | 2020-02-16T10:13:42 | 2020-02-16T10:13:42 | 240,869,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | x1 = 0
v1 = 3
x2 = 4
v2 = 2
# def fun(f_kan,s_kan):
# if f_kan == s_kan:
# return True
# print(fun(f_kan,s_kan))
# while fun(f_kan,s_kan) == True:
#
# f_kan = f_kan+v1
# print(f_kan)
f_kan = x1
s_kan = x2
while(f_kan <= 10000 and s_kan <= 10000 ):
f_kan += v1
s_kan += v2
# try:
if f_kan == s_kan:
print("YES")
break
# finally:
# print("NO")
# break
else:
print("NO")
| [
"seartsachu@gmail.com"
] | seartsachu@gmail.com |
43ff798c9d35108f04af4aa813c9e27cd1f69c88 | af7df9d77a2545b54d8cd03e7f4633dce6125f4a | /ch07/gui7c.py | 6f34a331505440579910e5fe3bafa70c62f2964b | [] | no_license | socrates77-sh/PP4E | 71e6522ea2e7cfd0c68c1e06ceb4d0716cc0f0bd | c92e69aea50262bfd63e95467ae4baf7cdc2f22f | refs/heads/master | 2020-05-29T08:46:47.380002 | 2018-11-16T10:38:44 | 2018-11-16T10:38:44 | 69,466,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | import gui7
from tkinter import *
class HelloPackage(gui7.HelloPackage):
def __getattr__(self, name):
# pass off to a real widget
return getattr(self.top, name)
if __name__ == '__main__':
HelloPackage().mainloop() # invokes __getattr__
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
1a520fe98f7f8f7872637d486cf3c11c8cfdfd6e | a15200778946f6f181e23373525b02b65c44ce6e | /Algoritmi/2019-06-25/all-CMS-submissions/2019-06-25.10:50:45.629190.VR434403.biancaneve.py | d19254ac4d915d04f644a65152214d4681199a73 | [] | no_license | alberto-uni/portafoglioVoti_public | db518f4d4e750d25dcb61e41aa3f9ea69aaaf275 | 40c00ab74f641f83b23e06806bfa29c833badef9 | refs/heads/master | 2023-08-29T03:33:06.477640 | 2021-10-08T17:12:31 | 2021-10-08T17:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | """
* user: VR434403
* fname: CROSARA
* lname: MARCO
* task: biancaneve
* score: 12.0
* date: 2019-06-25 10:50:45.629190
"""
n, m = map(int, raw_input().split())
perm = map(int, raw_input().split())
for l in range(m):
row = map(int, raw_input().split())
#print row
if row[0] == 1:
perm[row[1]-1], perm[row[2]-1] = perm[row[2]-1], perm[row[1]-1]
else:
da_ = row[1]
a_ = row[2]
for i in range(n):
if perm[i] >= da_ and perm[i] <= a_:
result = "YES"
for j in range(i+1, i+a_-da_):
if (perm[j] < da_) or (perm[j] > a_):
result = "NO"
print result
break
| [
"romeo.rizzi@univr.it"
] | romeo.rizzi@univr.it |
6d40723471e9a7e9b314bf27d54b384edd18e847 | d98bfd59d27db330c970ed7dbf156e4e27be4cbc | /week10/Project1/Proj1/login/views.py | bd1b27a2a32fc9c7c33fa88fa0feaff1bbdf1c8f | [] | no_license | Aktoty00/webdev2019 | 3784324f090851ccf2cc5318f7297340a716ad7d | 44e4bb2905232da53053a334346340a905863d1e | refs/heads/master | 2020-04-18T21:57:19.434168 | 2019-04-29T22:07:51 | 2019-04-29T22:07:51 | 167,220,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from django.shortcuts import render
from django.http import HttpResponse
def signup(request):
return HttpResponse('<input type = "text" name = "emailOrNumber" class = "emailOrNumber" placeholder = "Моб. телефон или эл. адрес"><br>'
'<input type = "text" name = "name" placeholder= "Имя и фамилия">'
'<input type = "text" name = "surname" placeholder = "Имя пользователя"><br>'
'<input type = "password" name = "newPassword" class = "newPassword" placeholder = "Пароль"><br>'
'<button type="submit">Регистрация</button>')
| [
"aktoty.rysdaulet@gmail.com"
] | aktoty.rysdaulet@gmail.com |
4e124d9d992edb2065ee6bd68581458013863276 | a3530aef1481451641daff69570d5ecd4ef069cf | /models/account/year.py | b12d37c21bc8e1bcb377d9742dadf10466e323c5 | [] | no_license | Trilokan/nagini | 757e576aad2482c4f7cb68043e3bf481aa918a30 | d936a965c2f4ea547de24e25d1fbc42649fb4b43 | refs/heads/master | 2020-04-03T09:02:45.484397 | 2019-01-02T12:45:42 | 2019-01-02T12:45:42 | 155,152,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | # -*- coding: utf-8 -GPK*-
from odoo import models, fields, api
from calendar import monthrange
from datetime import datetime, timedelta
PROGRESS_INFO = [("draft", "Draft"), ("confirmed", "Confirmed")]
class Year(models.Model):
_name = "year.year"
_rec_name = "name"
name = fields.Char(string="Year", required=True)
financial_year = fields.Char(string="Financial Year", required=True)
period_detail = fields.One2many(comodel_name="period.period",
inverse_name="year_id",
string="Period",
readonly=True)
_sql_constraints = [('unique_year', 'unique (name)', 'Error! Year must be unique'),
('unique_financial_year', 'unique (financial_year)', 'Error! Financial Year must be unique')]
def generate_period(self, year, year_id):
for month in range(1, 13):
_, num_days = monthrange(year, month)
from_date = datetime(year, month, 1)
till_date = datetime(year, month, num_days)
data = {"from_date": from_date.strftime("%Y-%m-%d"),
"till_date": till_date.strftime("%Y-%m-%d"),
"year_id": year_id.id}
self.env["period.period"].create(data)
@api.model
def create(self, vals):
year_id = super(Year, self).create(vals)
year = int(vals["name"])
self.generate_period(year, year_id)
return year_id
| [
"rameshkumar@ajaxmediatech.com"
] | rameshkumar@ajaxmediatech.com |
e608e87eb6a45b67f54560ba8bfdf8aa39509c4f | b366806c99ac30e77789f80417978902e25628da | /boto3_exceptions/importexport.py | e76b14e4ca77bc73aaba4ac69ba3abc50a10a1c3 | [
"MIT"
] | permissive | siteshen/boto3_exceptions | 9027b38c238030859572afec7f96323171596eb7 | d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b | refs/heads/master | 2020-04-19T03:15:02.525468 | 2019-10-23T07:37:36 | 2019-10-23T07:37:36 | 167,928,540 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | import boto3
exceptions = boto3.client('importexport').exceptions
BucketPermissionException = exceptions.BucketPermissionException
CanceledJobIdException = exceptions.CanceledJobIdException
CreateJobQuotaExceededException = exceptions.CreateJobQuotaExceededException
ExpiredJobIdException = exceptions.ExpiredJobIdException
InvalidAccessKeyIdException = exceptions.InvalidAccessKeyIdException
InvalidAddressException = exceptions.InvalidAddressException
InvalidCustomsException = exceptions.InvalidCustomsException
InvalidFileSystemException = exceptions.InvalidFileSystemException
InvalidJobIdException = exceptions.InvalidJobIdException
InvalidManifestFieldException = exceptions.InvalidManifestFieldException
InvalidParameterException = exceptions.InvalidParameterException
InvalidVersionException = exceptions.InvalidVersionException
MalformedManifestException = exceptions.MalformedManifestException
MissingCustomsException = exceptions.MissingCustomsException
MissingManifestFieldException = exceptions.MissingManifestFieldException
MissingParameterException = exceptions.MissingParameterException
MultipleRegionsException = exceptions.MultipleRegionsException
NoSuchBucketException = exceptions.NoSuchBucketException
UnableToCancelJobIdException = exceptions.UnableToCancelJobIdException
UnableToUpdateJobIdException = exceptions.UnableToUpdateJobIdException
| [
"xiaojiang@actwill.com.cn"
] | xiaojiang@actwill.com.cn |
936b43bfd4885419120e1afe90faef8ebccc7a26 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/programmers/level3/외벽점검.py | e173eb88a500981e940d6aee7f4e16c5a99e1e29 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # dist가 최대 8이기 때문에 brute force를 통해 탐색이 가능하다.
from itertools import permutations
def solution(n, weak, dist):
weakLen = len(weak)
for i in range(weakLen):
weak.append(weak[i] + n)
# print(weak)
answer = len(dist) + 1
for i in range(weakLen):
start = [weak[j] for j in range(i, i + weakLen)]
candidates = permutations(dist, len(dist))
for candidate in candidates:
idx, count = 0, 1
check = start[0] + candidate[idx]
for j in range(weakLen):
if start[j] > check:
count += 1
if count > len(candidate):
break
idx += 1
check = candidate[idx] + start[j]
answer = min(answer, count)
if answer > len(dist):
return -1
return answer
print(solution(12, [1, 5, 6, 10], [1, 2, 3, 4]))
| [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
8bd1277a439d2db671284eb40192c4e5b069f8ff | 77ae39a4e38dc53ed50e3943a0049fc4c72af735 | /Leetcode/Triangle.py | 6e49a3f82c0624bd8e60a6792df0901614de84a8 | [
"MIT"
] | permissive | harrifeng/Python-Study | 41ab870a31213d414f08c5753d22e8463bb3f102 | d8158e33392a322830244594405cae7e9d7f6fb4 | refs/heads/master | 2021-01-18T10:48:23.215569 | 2016-02-04T02:06:22 | 2016-02-04T02:06:22 | 51,045,556 | 1 | 0 | null | 2016-02-04T02:05:39 | 2016-02-04T02:05:38 | null | UTF-8 | Python | false | false | 1,807 | py | """
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.
"""
class Solution:
# @param triangle, a list of lists of integers
# @return an integer
def minimumTotal(self, triangle):
M = len(triangle)
N = len(triangle[-1])
dp = [ [ 0 for j in range(N)] for i in range(M)]
for i in range(M)[::-1]:
for j in range(len(triangle[i])):
if i == M-1:
dp[i][j] = triangle[i][j]
else:
dp[i][j] = min(dp[i+1][j], dp[i+1][j+1]) + triangle[i][j]
return dp[0][0]
# Notes:
# This is not the best solution. But easier to understand
# 1. status: ```dp[x][y]```表示从bottom走到top每个坐标的最短路径
# 2. function: dp[i][j] = min(dp[i+1][j], dp[i+1][j+1]) + triangle[i][j]
# 3. initialize: dp[-1][j] = triangle[-1][j]
# 4. answer: dp[0][0]
#This is older way, but still pretty good
def minimumTotal_2(self, triangle):
n = len(triangle) - 1
dp = triangle[n]
n -= 1
while n >= 0:
for i in range(n+1):
dp[i] = triangle[n][i] + min(dp[i], dp[i+1])
n -= 1
return dp[0]
# This look too simple
# Understand of this:
# 1. From bottom to top
# 2. transfer func: dp[i] = triangle[n][i] + min(dp[i], dp[i+1])
# top level dp[i] = current triangle value + min(bottom level reachable dps)
| [
"cyandterry@hotmail.com"
] | cyandterry@hotmail.com |
dd0916097167cb5d677ecb3fcf9c3a7a4546be4c | effce116340b7d937bd285e43b49e1ef83d56156 | /data_files/offlinearize.py | 642230032148b0163c1dd2ffe22831780bec7e19 | [] | no_license | DL2021Spring/CourseProject | a7c7ef57d69bc1b21e3303e737abb27bee3bd585 | 108cdd906e705e9d4d05640af32d34bfc8b124da | refs/heads/master | 2023-04-11T18:52:30.562103 | 2021-05-18T09:59:59 | 2021-05-18T09:59:59 | 365,733,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py |
import sys
from urlparse import urlparse, urljoin
from os.path import dirname, join as joinpath
from os import makedirs
from urllib import urlopen
from simplejson import loads
try:
base_url = sys.argv[1]
url = urlparse(base_url)
except:
print sys.argv[1]
print "Syntax: %s <url>" % sys.argv[0]
sys.exit(1)
this_dir = dirname(sys.argv[0])
datadir = joinpath(this_dir, '../offline_data')
coll_and_doc = url.fragment
coll = dirname(coll_and_doc)[1:]
def convert_coll(coll):
if coll == '':
ajax_coll = '/'
else:
ajax_coll = '/%s/' % coll
coll_query_url = urljoin(base_url, 'ajax.cgi?action=getCollectionInformation&collection=%s' % ajax_coll)
coll_dir = joinpath(datadir, coll)
try:
makedirs(coll_dir)
except:
pass
print ajax_coll
conn = urlopen(coll_query_url)
jsonp = conn.read()
conn.close
with open(joinpath(coll_dir, 'collection.js'), 'w') as f:
f.write("jsonp=")
f.write(jsonp)
coll_data = loads(jsonp)
for item in coll_data['items']:
if item[0] == 'd':
doc = item[2]
print " %s" % doc
doc_query_url = urljoin(base_url, 'ajax.cgi?action=getDocument&collection=%s&document=%s' % (ajax_coll, doc))
conn = urlopen(doc_query_url)
jsonp = conn.read()
conn.close
with open(joinpath(coll_dir, '%s.data.js' % doc), 'w') as f:
f.write("jsonp=")
f.write(jsonp)
elif item[0] == 'c' and item[2] != '..':
convert_coll(item[2])
convert_coll(coll)
| [
"1042448815@qq.com"
] | 1042448815@qq.com |
12841b3819ac2e828f695f322d21313affe8148c | f15d8305d1c97482b7a3391036742eaaaccc8238 | /TestTurtle/frctal_tree_2.0.py | a4b7f7d25a925a7ea9a19601fdb47308c521bb78 | [] | no_license | yuansuixin/learn_python__xiaoxiang | cdeec72a615c28de15334b6d61de87a4df4b25cd | 3b90cab535a052ed101ea6838cf86529cf570ec6 | refs/heads/master | 2021-04-03T06:15:41.841685 | 2018-03-11T10:18:43 | 2018-03-11T10:18:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py |
## 绘制分型树
import turtle
def draw_brach(brach_length):
if brach_length > 5:
# 绘制右侧的树枝
turtle.forward(brach_length)
print('向前',brach_length)
turtle.right(20)
print('右转20')
draw_brach(brach_length-15)
# 绘制左侧的树枝
turtle.left(40)
print('左转40')
draw_brach(brach_length-15)
# 返回之前的树枝上
turtle.right(20)
print('右转20')
turtle.backward(brach_length)
print('返回',brach_length)
def main():
turtle.left(90)
turtle.penup()
turtle.backward(150)
turtle.pendown()
turtle.color('red')
draw_brach(100)
turtle.exitonclick()
if __name__ == '__main__':
main()
| [
"cyss428@163.com"
] | cyss428@163.com |
32bd6af443d509ff9a722650c33ac8c9fda2b766 | 901b554d55e661e1f2af4493e0fd446b8dd31e3f | /20090126-sws7-sb/parseChaco.py | 791a5b6e2c53ed54414f4a192c11d24ae3fac415 | [] | no_license | dsoto/swData | dec5542f0fa2af9554e946b5cd3c248201042c36 | 19dc7d9cd3e23e282c80166c359c995548525e63 | refs/heads/master | 2020-12-24T17:54:57.486775 | 2010-06-16T15:07:02 | 2010-06-16T15:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,518 | py | #!/usr/bin/env python
from enthought.chaco.api import (OverlayPlotContainer,
VPlotContainer, Plot, ArrayPlotData)
from enthought.chaco.tools.api import (PanTool, LineInspector)
from enthought.traits.api import (HasTraits, Instance, Array,
Button, Str, Int, Float, Bool, Tuple)
from enthought.traits.ui.api import (View, Item, Handler, HGroup)
from enthought.traits.ui.menu import Action, OKButton
from enthought.enable.component_editor import ComponentEditor
import numpy
import glob
import sys
import os.path
sys.path.append("../roxanne")
import roxanne as rx
class customTool(LineInspector):
def __init__(self,*args,**kwargs):
super(customTool,self).__init__(*args,**kwargs)
self.plotBox = kwargs['plotBox']
def normal_mouse_move(self, event):
LineInspector.normal_mouse_move(self,event)
plot = self.component
plot.request_redraw()
cursorPosX = self.component.map_data([event.x,event.y])[0]
self.plotBox.cursorPosX = int(cursorPosX)
self.plotBox.cursorPosY = self.plotBox.normal[self.plotBox.cursorPosX]
def normal_left_down(self, event):
cursorPosX = self.component.map_data([event.x,event.y])[0]
self.plotBox.cursorPosX = int(cursorPosX)
self.plotBox.cursorPosY = self.plotBox.normal[self.plotBox.cursorPosX]
if self.plotBox.pointsClicked == 3:
self.plotBox.pointsClicked = 0
self.plotBox.pointX[self.plotBox.pointsClicked]=self.plotBox.cursorPosX
self.plotBox.pointY[self.plotBox.pointsClicked]=self.plotBox.cursorPosY
self.plotBox.pointX = self.plotBox.pointX
self.plotBox.pointY = self.plotBox.pointY
self.plotBox.plotdata.set_data('pointX',self.plotBox.pointX)
self.plotBox.plotdata.set_data('pointY',self.plotBox.pointY)
self.plotBox.pointsClicked += 1
def normal_left_up(self, event):
pass
class plotBoxHandler(Handler):
def close(self, info, is_ok):
if info.object.isAccepted == True:
return True
def closed(self, info, is_ok):
outString = (info.object.fileName + '\t' +
str(info.object.pointX[0]) + '\t' +
str(info.object.pointX[1]) + '\t' +
str(info.object.pointX[2]) + '\n')
info.object.fOut.write(outString)
def accept(self, info):
info.object.message = 'plot points accepted'
info.object.isAccepted = True
def reject(self, info):
info.object.message = 'plot points rejected, choose again'
info.object.pointX = numpy.array([0.0,100.0,200.0])
info.object.pointY = numpy.array([0.0,0.0,0.0])
info.object.plotdata.set_data('pointX',info.object.pointX)
info.object.plotdata.set_data('pointY',info.object.pointY)
info.object.isAccepted = False
info.object.pointsClicked = 0
def object_pointX_changed(self, info):
pass
class plotBox(HasTraits):
pointsClicked = Int
index = Array
normal = Array
shear = Array
pointX = Array(dtype = int, value = ([0.0,100.0,200.0]), comparison_mode = 0)
pointY = Array(dtype = float, value = ([0.0,0.0,0.0]), comparison_mode = 0)
message = Str
isAccepted = Bool
accept = Action(name = "Accept", action = "accept")
reject = Action(name = "Reject", action = "reject")
cursorPosX = Int
cursorPosY = Float
vPlot = Instance(VPlotContainer)
def __init__(self, fileName, fOut):
print 'init plotBox'
super(plotBox, self).__init__()
self.isAccepted = True
self.fOut = fOut
self.message = 'Analysis Acceptable?'
self.vPlot = VPlotContainer(padding = 10)
self.vPlot.stack_order = 'top_to_bottom'
topPlot = OverlayPlotContainer(padding = 10)
self.vPlot.add(topPlot)
bottomPlot = OverlayPlotContainer(padding = 10)
self.vPlot.add(bottomPlot)
# def parseFileName():
self.fileName = fileName
# get complete path of data file
fullFileName = os.path.abspath(fileName)
self.fileName = os.path.split(fullFileName)[1]
self.shortFileName = os.path.splitext(self.fileName)[1]
self.plotTitle = self.shortFileName
# def readData():
print fileName
fileIn = open(fileName,'r')
hD = rx.readDataFileHeader(fileIn)
print 'hD'
dD = rx.readDataFileArray(fileIn)
self.normal = numpy.array(map(float,dD['voltageForceLateral']))
self.shear = numpy.array(map(float,dD['voltageForceNormal']))
self.index = numpy.arange(len(self.normal))
# index dictionary
# iD = rx.parseForceTrace(hD,dD)
# self.pointX[0] = iD['indexContact']
# self.pointY[0] = self.normal[iD['indexContact']]
# self.pointX[1] = iD['indexMaxPreload']
# self.pointY[1] = self.normal[iD['indexMaxPreload']]
# self.pointX[2] = iD['indexMaxAdhesion']
# self.pointY[2] = self.normal[iD['indexMaxAdhesion']]
# def constructPlots():
self.plotdata = ArrayPlotData(index = self.index,
normal = self.normal,
shear = self.shear,
pointX = self.pointX,
pointY = self.pointY)
self.normalPlot = Plot(self.plotdata)
self.normalPlot.plot(('index','normal'), type = 'line',
color = 'blue')
self.normalPlot.plot(('pointX','pointY'), type = 'scatter',
marker = 'diamond',
marker_size = 5,
color = (0.0,0.0,1.0,0.5),
outline_color = 'none')
self.normalPlot.value_range.set_bounds(-1,1)
self.shearPlot = Plot(self.plotdata)
self.shearPlot.plot(('index','shear'),type='line',color='green')
self.normalPlot.overlays.append(customTool(plotBox = self,
component = self.normalPlot,
axis = 'index_x',
inspect_mode = 'indexed',
write_metadata = True,
color = 'black',
is_listener = False))
self.normalPlot.tools.append(rx.SimpleZoom(self.normalPlot))
self.normalPlot.tools.append(PanTool(self.normalPlot,drag_button = 'right'))
self.normalPlot.title = 'Normal Force Trace'
self.shearPlot.title = 'Shear Force Trace'
topPlot.add(self.shearPlot)
bottomPlot.add(self.normalPlot)
self.shearPlot.index_range = self.normalPlot.index_range
traits_view = View(Item('vPlot',
editor = ComponentEditor(),
resizable = True,
show_label = False),
HGroup(Item('message', width = 200),
Item('cursorPosX', width = 200),
Item('cursorPosY', width = 200),
Item('pointX', style='readonly', width = 200),
Item('pointY', style='readonly', width = 200)),
buttons = [accept, reject, OKButton],
title = 'Roxanne Parse Application',
handler = plotBoxHandler(),
resizable = True,
width = 1400, height = 800,
x = 20, y = 40)
def main():
fileNameList = glob.glob('./data/*sws*.data')
fOut = open('parsed.data','w')
outputList = ['dataFileName',
'indexContact',
'indexMaxPreload',
'indexMaxAdhesion\n']
sep = '\t'
headerString = sep.join(outputList)
fOut.write(headerString)
for fileName in fileNameList:
myPlotBox = plotBox(fileName,fOut)
myPlotBox.configure_traits()
if __name__=='__main__':
main() | [
"danielrsoto@gmail.com"
] | danielrsoto@gmail.com |
97f71acb578518c20028682674b1d9b0be86ddb1 | 29416ed280fff073ea325ed7dc0d573cb7145d47 | /stagesepx/classifier/svm.py | d2f9d17c112c863bfcd77074772925fcfedf4f63 | [
"MIT"
] | permissive | lshvisual/stagesepx | 64507f3cf2db65c3cfe7129cecb357237f3a8db7 | 98aebb4195e87b631c736878fbcef210f19b22cc | refs/heads/master | 2020-07-16T14:57:48.688554 | 2019-09-02T04:18:20 | 2019-09-02T04:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,449 | py | from loguru import logger
import cv2
import os
import pickle
import typing
import numpy as np
from sklearn.svm import LinearSVC
from stagesepx.classifier.base import BaseClassifier
from stagesepx import toolbox
class SVMClassifier(BaseClassifier):
FEATURE_DICT = {
'hog': toolbox.turn_hog_desc,
'lbp': toolbox.turn_lbp_desc,
# do not use feature transform
'raw': lambda x: x,
}
def __init__(self,
feature_type: str = None,
*args, **kwargs):
"""
init classifier
:param feature_type:
before training, classifier will convert pictures into feature, for better classification.
eg: 'hog', 'lbp' or 'raw'
"""
super().__init__(*args, **kwargs)
if not feature_type:
feature_type = 'hog'
if feature_type not in self.FEATURE_DICT:
raise AttributeError(f'no feature func named {feature_type}')
self.feature_func = self.FEATURE_DICT[feature_type]
self._model = None
logger.debug(f'feature function: {feature_type}')
def clean_model(self):
self._model = None
def save_model(self, model_path: str, overwrite: bool = None):
"""
save trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f'save model to {model_path}')
# assert model file
if os.path.isfile(model_path) and not overwrite:
raise FileExistsError(f'model file {model_path} already existed, you can set `overwrite` True to cover it')
# assert model data is not empty
assert self._model, 'model is empty'
with open(model_path, 'wb') as f:
pickle.dump(self._model, f)
def load_model(self, model_path: str, overwrite: bool = None):
"""
load trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f'load model from {model_path}')
# assert model file
assert os.path.isfile(model_path), f'model file {model_path} not existed'
# assert model data is empty
if self._model and not overwrite:
raise RuntimeError(f'model is not empty, you can set `overwrite` True to cover it')
# joblib raise an error ( i have no idea about how to fix it ) here, so use pickle instead
with open(model_path, 'rb') as f:
self._model = pickle.load(f)
def read_from_list(self, data: typing.List[int], video_cap: cv2.VideoCapture = None, *_, **__):
raise NotImplementedError('svm classifier only support loading data from files')
def train(self):
"""
train your classifier with data. must be called before prediction
:return:
"""
if not self._model:
logger.debug('no model can be used. build a new one.')
self._model = LinearSVC()
else:
logger.debug('already have a trained model. train on this model.')
train_data = list()
train_label = list()
for each_label, each_label_pic_list in self.read():
for each_pic_object in each_label_pic_list:
logger.debug(f'training label: {each_label}')
# apply hook
each_pic_object = self._apply_hook(-1, each_pic_object)
each_pic_object = self.feature_func(each_pic_object).flatten()
train_data.append(each_pic_object)
train_label.append(each_label)
logger.debug('data ready')
self._model.fit(train_data, train_label)
logger.debug('train finished')
def predict(self, pic_path: str) -> str:
"""
predict a single picture
:param pic_path:
:return:
"""
pic_object = cv2.imread(pic_path)
return self.predict_with_object(pic_object)
def predict_with_object(self, pic_object: np.ndarray) -> str:
"""
predict a single object
:param pic_object:
:return:
"""
pic_object = self.feature_func(pic_object)
pic_object = pic_object.reshape(1, -1)
return self._model.predict(pic_object)[0]
def _classify_frame(self,
frame_id: int,
frame: np.ndarray,
*_, **__) -> str:
return self.predict_with_object(frame)
| [
"178894043@qq.com"
] | 178894043@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.