blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
96e8c5c0de2d305c63d6d23a0b1aaeb507e67ff8
ddd7d37f3a98b508e16f27978bf788fc61358225
/wsgi.py
06b4bc7a115d792c6e0690fac47c8d93e9c07d3e
[]
no_license
nbh847/bbs_practice
b124e59b86b8fb65b2e5ee34b8e7065a05bcee41
21946cbf27a34028a53144a2c202d763fda6ee21
refs/heads/master
2020-03-28T19:20:31.590484
2018-10-24T14:15:40
2018-10-24T14:15:40
148,966,258
0
0
null
null
null
null
UTF-8
Python
false
false
624
py
#!/usr/bin/env python3 import sys from os.path import abspath from os.path import dirname # 设置当前目录为工作目录 sys.path.insert(0, abspath(dirname(__file__))) # 引入 app.py import app # 必须有一个叫做 application 的变量 # gunicorn 就要这个变量 # 这个变量的值必须是 Flask 实例 # 这是规定的套路(协议) application = app.app # 这是把代码部署到 apache gunicorn nginx 后面的套路 """ ➜ ~ cat /etc/supervisor/conf.d/xx.conf [program:todo] command=/usr/local/bin/gunicorn wsgi --bind 0.0.0.0:2000 --pid /tmp/todo.pid directory=/root/web13 autostart=true """
[ "binhui.ni@yourdream.cc" ]
binhui.ni@yourdream.cc
7c2da6da0584f39818fbf738cbb2c1a7d78c13aa
8535bbc7781c4691880c935bd7025646f0dbb7c3
/check mirror images of two arrays.py
daa0276c484e75516f27c7d2d1b8b51dc8139981
[]
no_license
Mahadev0317/Codekata
3b2149f3116ebe4b48b2059b873544c27b23ff39
c35fa0ed0c4870faea69152638f461e743a9ff69
refs/heads/master
2020-04-15T04:59:17.062947
2019-05-29T04:46:35
2019-05-29T04:46:35
164,404,727
1
2
null
null
null
null
UTF-8
Python
false
false
183
py
n=int(input()) li=list(map(int,input().split())) lis=list(map(int,input().split())) k=n-1 for i in range(n): if li[i]!=lis[k]: print("no") break k-=1 else: print("yes")
[ "noreply@github.com" ]
Mahadev0317.noreply@github.com
9404fc7b69c9461b11360f92805c0269adeb68e9
f07a42f652f46106dee4749277d41c302e2b7406
/Data Set/bug-fixing-5/9e606bfdbe039112b609c263cb4dfc23e31ffe93-<check_regressors_train>-bug.py
46b71af4c66c69b7a702b9f117d56ad37df9194f
[]
no_license
wsgan001/PyFPattern
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
cc347e32745f99c0cd95e79a18ddacc4574d7faa
refs/heads/main
2023-08-25T23:48:26.112133
2021-10-23T14:11:22
2021-10-23T14:11:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,033
py
@ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_regressors_train(name, regressor_orig): (X, y) = _boston_subset() y = StandardScaler().fit_transform(y.reshape((- 1), 1)) y = y.ravel() regressor = clone(regressor_orig) y = multioutput_estimator_convert_y_2d(regressor, y) rnd = np.random.RandomState(0) if ((not hasattr(regressor, 'alphas')) and hasattr(regressor, 'alpha')): regressor.alpha = 0.01 if (name == 'PassiveAggressiveRegressor'): regressor.C = 0.01 assert_raises(ValueError, regressor.fit, X, y[:(- 1)]) if (name in CROSS_DECOMPOSITION): y_ = np.vstack([y, ((2 * y) + rnd.randint(2, size=len(y)))]) y_ = y_.T else: y_ = y set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert_equal(y_pred.shape, y_.shape) if (name not in ('PLSCanonical', 'CCA', 'RANSACRegressor')): assert_greater(regressor.score(X, y_), 0.5)
[ "dg1732004@smail.nju.edu.cn" ]
dg1732004@smail.nju.edu.cn
3b2933c2a78105815d35f620b0fe153ed6a99d8c
fca36ece36254e6175d7ac26791ae0e0abedd040
/ex03.py
8f728c512cd7fc125cef46901dabc8aa8b3eabc0
[]
no_license
DikranHachikyan/python-programming-20190318
72c63ccdb716db871c755bb589e333c9fc57bcd5
0e6b4c599be3d69efdb4acf7817abc3d9d41eb7b
refs/heads/master
2020-04-29T23:35:10.401263
2019-04-18T14:59:30
2019-04-18T14:59:30
176,480,840
1
0
null
null
null
null
UTF-8
Python
false
false
130
py
#!/home/wizard/anaconda3/bin/python def main(): x = int(input('x=')) if x < 10: print('x={}'.format(x)) main()
[ "dhachikian@expert-bg.org" ]
dhachikian@expert-bg.org
1f8bd03e1ef888abd81c307876d56b9be3c041cf
b3a2ac9eb02a6eef9e6f3504afabc6400f894f56
/clld/tests/test_web_app.py
b32979cb1f2d24ff5e626188403624fba5dc541a
[ "BSD-3-Clause", "MIT", "Apache-2.0" ]
permissive
Anaphory/clld
9f67c477e228eff05fdc7b7fa4310c703af02108
bed1a6d08275a41fd7b5d13a0af19e4e538d186c
refs/heads/master
2021-01-17T04:50:05.382411
2017-02-16T11:20:11
2017-02-16T11:20:11
66,831,136
0
0
null
2016-08-29T09:43:06
2016-08-29T09:43:05
null
UTF-8
Python
false
false
3,180
py
# coding: utf8 from __future__ import unicode_literals, print_function, division, absolute_import import importlib from zope.interface import Interface from pyramid.testing import Configurator from pyramid.httpexceptions import HTTPNotFound from purl import URL from clld.db.models.common import Contribution, ValueSet, Language, Language_files from clld.tests.util import TestWithEnv, Route, TESTS_DIR, WithDbAndDataMixin from clld.interfaces import IMapMarker from clld.web.adapters.download import N3Dump class Tests(WithDbAndDataMixin, TestWithEnv): def test_CLLDRequest(self): self.assertTrue(isinstance(self.env['request'].purl, URL)) c = self.env['request'].db.query(Contribution).first() self.env['request'].resource_url(c, ext='geojson') self.assertEqual(None, self.env['request'].ctx_for_url('/some/path/to/nowhere')) assert self.env['request'].ctx_for_url('/') self.env['request'].file_url(Language_files(id='1', object=Language.first())) assert self.env['request'].get_datatable('valuesets', ValueSet) assert self.env['request'].blog is None def test_menu_item(self): from clld.web.app import menu_item assert menu_item('contributions', None, self.env['request']) def test_ctx_factory(self): from clld.web.app import ctx_factory for model, route in [ (Contribution, 'contributions'), (ValueSet, 'valuesets'), (Language, 'languages'), ]: obj = model.first() self.set_request_properties( matchdict={'id': obj.id}, matched_route=Route(route)) ctx_factory(model, 'index', self.env['request']) ctx_factory(model, 'rsc', self.env['request']) self.set_request_properties(matchdict={'id': 'xxx'}) self.assertRaises( HTTPNotFound, ctx_factory, Contribution, 'rsc', self.env['request']) def test_MapMarker(self): marker = self.env['request'].registry.getUtility(IMapMarker) self.assertTrue(marker(None, self.env['request'])) def test_add_config_from_file(self): from clld.web.app import add_settings_from_file config = Configurator() add_settings_from_file(config, TESTS_DIR.joinpath('test.ini')) assert 'app:main.use' in config.registry.settings def test_config(self): class IF(Interface): pass config = Configurator( root_package=importlib.import_module('clld.web'), settings={ 'sqlalchemy.url': 'sqlite://', 'clld.pacific_centered_maps': True}) config.include('clld.web.app') # should have no effect, because a resource with this name is registered by # default: config.register_menu('languages', ('sources', dict(label='References'))) config.register_resource('language', None, None) config.register_resource('testresource', Language, IF, with_index=True, test=True) config.register_download(N3Dump(Language, 'clld')) config.add_301('/301pattern', 'http://example.org') config.add_410('/410pattern')
[ "xrotwang@googlemail.com" ]
xrotwang@googlemail.com
ba67521102acfb8f7814511d76092c36dc6602be
5ca85847885c6fd6f9728b0b2dffb66e96a81a1d
/hemlock/database/types/__init__.py
6c06c43fc1ad509b883efcf9d4a7a2afd3756cbc
[]
no_license
syfreed/hemlock_test2
682d843636883a6a2b883932cd7282e9b865ebcd
61933fd17630ddd1bb46d8f2090b1b039a3b4e99
refs/heads/master
2020-08-03T11:21:18.460905
2019-09-29T22:36:36
2019-09-29T22:36:36
211,733,895
0
0
null
2019-10-22T14:21:27
2019-09-29T22:25:30
Python
UTF-8
Python
false
false
227
py
"""Custom Hemlock database types""" from hemlock.database.types.data_frame import DataFrame, DataFrameType from hemlock.database.types.function import Function, FunctionType from hemlock.database.types.markup import MarkupType
[ "dsbowen@wharton.upenn.edu" ]
dsbowen@wharton.upenn.edu
380aef44b9b7964ea582816fee79936176253abd
8152e8fba564bcfa435c45dab41cd6f0b455f857
/farmdation_project/farmdation/contrib/sites/migrations/0002_set_site_domain_and_name.py
4c91e98dd67d65f97ee31ca5c1c705f719a236ea
[ "MIT" ]
permissive
panuta/django-storehouse
af068d9fb356f1f6243854c8944ff8146833202e
6b90b0de231671b20f2f549a74a52d87694d821a
refs/heads/master
2021-01-17T18:19:45.791785
2016-07-02T02:52:04
2016-07-02T02:52:04
62,430,151
0
0
null
null
null
null
UTF-8
Python
false
false
1,152
py
""" To understand why this file is here, please read: http://cookiecutter-django.readthedocs.org/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django """ # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf import settings from django.db import migrations def update_site_forward(apps, schema_editor): """Set site domain and name.""" Site = apps.get_model("sites", "Site") Site.objects.update_or_create( id=settings.SITE_ID, defaults={ "domain": settings.WEBSITE_DOMAIN, "name": settings.WEBSITE_NAME } ) def update_site_backward(apps, schema_editor): """Revert site domain and name to default.""" Site = apps.get_model("sites", "Site") Site.objects.update_or_create( id=settings.SITE_ID, defaults={ "domain": "example.com", "name": "example.com" } ) class Migration(migrations.Migration): dependencies = [ ('sites', '0001_initial'), ] operations = [ migrations.RunPython(update_site_forward, update_site_backward), ]
[ "panuta@gmail.com" ]
panuta@gmail.com
df67e70950e1afba7bbc49e3c1d809e2e069b4f4
ecb6b752523a126ef17895854b18e02df41c4cfe
/app_backend/tests/test_skf_categories.py
cd07666e4b26e67263b352f3da1a4e2f7482217e
[ "MIT" ]
permissive
zhanghe06/bearing_project
cd6a1b2ba509392da37e5797a3619454ca464276
25729aa7a8a5b38906e60b370609b15e8911ecdd
refs/heads/master
2023-05-27T17:23:22.561045
2023-05-23T09:26:07
2023-05-23T09:39:14
126,219,603
2
5
MIT
2022-12-08T03:11:27
2018-03-21T17:54:44
JavaScript
UTF-8
Python
false
false
1,824
py
#!/usr/bin/env python # encoding: utf-8 """ @author: zhanghe @software: PyCharm @file: test_skf_categories.py @time: 2020-05-19 16:32 """ import csv import requests def test_get_skf_categories(csv_name, category_id): params = { 'id': category_id, 'language': 'en', 'source': 'webpim', 'site': '307', 'hits': 100, 'offset': 0, } url = 'https://search.skf.com/prod/search-skfcom/rest/apps/opc_v1/searchers/categories' header = ['Designation', 'd[mm]', 'D[mm]', 'B[mm]', 'C[kN]', 'Co[kN]', 'Pu[kN]', 'G-Speed[r/min]', 'O-Speed[r/min]'] out = open('skf_%s.csv' % csv_name, 'a') csv_write = csv.writer(out, dialect='excel') csv_write.writerow(header) c = 0 next_page = 0 while 1: if next_page == -1: break res = requests.get(url, params=params).json() rows = res.get('documentList', {}).get('documents', []) for r in rows: data = [r['title']] + r['table_values'] csv_write.writerow(data[:9]) c += 1 print(params['hits'] * next_page + len(rows)) if res.get('documentList', {}).get('numberOfHits', 0) > params['hits'] * next_page + len(rows): next_page += 1 else: next_page = -1 params['offset'] = params['hits'] * next_page out.close() print('共计%s行记录' % c) def run(): category_map = { 'angular_contact_ball_bearings': 'BA1_010', 'cylindrical_roller_bearings': 'BC1_010', 'angular_contact_thrust_ball_bearings_double_direction': 'BEA_010', 'angular_contact_thrust_ball_bearings_for_screw_drives_single direction': 'BDA_010', } for k, v in category_map.items(): test_get_skf_categories(k, v) if __name__ == '__main__': run()
[ "zhang_he06@163.com" ]
zhang_he06@163.com
91aaf6d522486538a0edfd27de42e4a83a77f21e
ab4b210d204512f51a4807a652ccc0edaabe3341
/kombu/tests/test_functional/test_amqplib.py
94b9bfc92db9136c20c361ea1cc76fe35f9ccf9a
[ "BSD-3-Clause" ]
permissive
mixedpuppy/kombu
2c4ec30ccc8ab9ccb20bab525cd525febb085ce2
04b9a6f2fb6854fadbb4c29880866135354fdeef
refs/heads/master
2021-01-18T08:40:39.106451
2010-09-06T22:41:02
2010-09-06T22:41:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,517
py
import socket import time import unittest2 as unittest from nose import SkipTest from kombu import BrokerConnection from kombu import Producer, Consumer, Exchange, Queue def consumeN(conn, consumer, n=1): messages = [] def callback(message_data, message): messages.append(message_data) message.ack() prev, consumer.callbacks = consumer.callbacks, [callback] while True: conn.drain_events(timeout=1) if len(messages) >= n: break consumer.callback = prev return messages class test_amqplib(unittest.TestCase): def purge(self, names): chan = self.connection.channel() map(chan.queue_purge, names) def setUp(self): self.connection = BrokerConnection(transport="amqplib") try: self.connection.connect() except socket.error: self.connected = False else: self.connected = True self.exchange = Exchange("tamqplib", "direct") self.queue = Queue("tamqplib", self.exchange, "tamqplib") def test_produce__consume(self): if not self.connected: raise SkipTest("Broker not running.") chan1 = self.connection.channel() producer = Producer(chan1, self.exchange) producer.publish({"foo": "bar"}, routing_key="tamqplib") chan1.close() chan2 = self.connection.channel() consumer = Consumer(chan2, self.queue) message = consumeN(self.connection, consumer) self.assertDictEqual(message[0], {"foo": "bar"}) chan2.close() self.purge(["tamqplib"]) def test_produce__consume_multiple(self): if not self.connected: raise SkipTest("Broker not running.") chan1 = self.connection.channel() producer = Producer(chan1, self.exchange) b1 = Queue("pyamqplib.b1", self.exchange, "b1") b2 = Queue("pyamqplib.b2", self.exchange, "b2") b3 = Queue("pyamqplib.b3", self.exchange, "b3") producer.publish("b1", routing_key="b1") producer.publish("b2", routing_key="b2") producer.publish("b3", routing_key="b3") chan1.close() chan2 = self.connection.channel() consumer = Consumer(chan2, [b1, b2, b3]) messages = consumeN(self.connection, consumer, 3) self.assertItemsEqual(messages, ["b1", "b2", "b3"]) chan2.close() self.purge(["pyamqplib.b1", "pyamqplib.b2", "pyamqplib.b3"]) def test_timeout(self): if not self.connected: raise SkipTest("Broker not running.") chan = self.connection.channel() self.purge([self.queue.name]) consumer = Consumer(chan, self.queue) self.assertRaises(socket.timeout, self.connection.drain_events, timeout=0.3) consumer.cancel() def test_basic_get(self): chan1 = self.connection.channel() producer = Producer(chan1, self.exchange) producer.publish({"basic.get": "this"}, routing_key="basic_get") chan1.close() chan2 = self.connection.channel() queue = Queue("amqplib_basic_get", self.exchange, "basic_get") queue = queue(chan2) queue.declare() for i in range(50): m = queue.get() if m: break time.sleep(0.1) self.assertEqual(m.payload, {"basic.get": "this"}) chan2.close() def tearDown(self): if self.connected: self.connection.close()
[ "askh@opera.com" ]
askh@opera.com
f2f0d7e4e1a772df81cc5683ffdb4d196a8873f2
219566971a08625ca14c5ea7a6e1231454694a4b
/utils/sk_utils/encoder.py
bf2df2abcba4cbf2f1eb74522ece933562b35e72
[]
no_license
daxiongshu/kaggle-review
6b22e73702cd7a61f3d175f301c37dcc0d6e3ae2
fc02e85d0544dd64d57c05081c8774dc87d1972e
refs/heads/master
2021-01-19T18:03:23.795917
2017-11-26T18:16:25
2017-11-26T18:16:25
101,109,595
28
14
null
null
null
null
UTF-8
Python
false
false
1,317
py
from sklearn.feature_extraction import DictVectorizer from scipy import sparse def onehot_encode(tr,te,cols=None): if cols is None: cols = [i for i in tr.columns.values if i in te.columns.values] vec = DictVectorizer() for col in cols: tr[col] = tr[col].map(str) te[col] = te[col].map(str) print("start fitting") X = vec.fit_transform(tr[cols].T.to_dict().values()) Xt = vec.transform(te[cols].T.to_dict().values()) print("done fitting",X.shape,Xt.shape) return X,Xt def onehot_encode_bar(tr,te,cols=None,bar=10000): if cols is None: cols = [i for i in tr.columns.values if i in te.columns.values] vec = DictVectorizer() cat,num = [],[] for col in cols: nu = tr[col].unique().shape[0] if (nu<bar and nu>2) or tr[col].dtype=='object': cat.append(col) tr[col] = tr[col].map(str) te[col] = te[col].map(str) else: num.append(col) print("start fitting num of cat features:",len(cat)) X = vec.fit_transform(tr[cat].T.to_dict().values()) Xt = vec.transform(te[cat].T.to_dict().values()) print("done fitting",X.shape,Xt.shape) X = sparse.hstack([X,tr[num].values],format='csr') Xt = sparse.hstack([Xt,te[num].values],format='csr') return X,Xt
[ "aixueer4ever@gmail.com" ]
aixueer4ever@gmail.com
af68bf54c136aa54b298013c4dfeb5bfe6778541
ad13583673551857615498b9605d9dcab63bb2c3
/output/instances/nistData/list/float/Schema+Instance/NISTXML-SV-IV-list-float-minLength-2-2.py
ff4cdb9e18778ab154122ead5ea2d8575b566545
[ "MIT" ]
permissive
tefra/xsdata-w3c-tests
397180205a735b06170aa188f1f39451d2089815
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
refs/heads/main
2023-08-03T04:25:37.841917
2023-07-29T17:10:13
2023-07-30T12:11:13
239,622,251
2
0
MIT
2023-07-25T14:19:04
2020-02-10T21:59:47
Python
UTF-8
Python
false
false
392
py
from output.models.nist_data.list_pkg.float_pkg.schema_instance.nistschema_sv_iv_list_float_min_length_2_xsd.nistschema_sv_iv_list_float_min_length_2 import NistschemaSvIvListFloatMinLength2 obj = NistschemaSvIvListFloatMinLength2( value=[ 4.9827486e+21, 3.5627644e+26, 5.6398728e+22, 8.9484692e+16, 5.2629679e+25, 6.6862685e+16, ] )
[ "tsoulloftas@gmail.com" ]
tsoulloftas@gmail.com
1806b9d06d3b1f2f058b6de59649d283c0cc1248
d15092c5fa7e5d825f5204fa2e799f88c9495de5
/non_semantic_speech_benchmark/export_model/model_export_utils.py
4bd07af3f1681f17c17e09b7d7d39b1a8d54e647
[ "CC-BY-4.0", "Apache-2.0" ]
permissive
GuyLor/google-research
229e27ff7cf1c838736704537e3636defa710200
083ccfb249a3e2bfc49a9d56f7d2b7aae42e8c2c
refs/heads/master
2023-09-05T16:28:26.720671
2021-11-16T06:17:56
2021-11-16T06:17:56
428,534,818
0
0
Apache-2.0
2021-11-16T05:56:17
2021-11-16T05:56:16
null
UTF-8
Python
false
false
6,971
py
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities and common steps for model export.""" import os from typing import Any, Dict, List, Optional from absl import flags from absl import logging import numpy as np import tensorflow as tf from non_semantic_speech_benchmark.data_prep import data_prep_utils from non_semantic_speech_benchmark.distillation import frontend_lib from non_semantic_speech_benchmark.distillation import models from non_semantic_speech_benchmark.distillation.compression_lib import compression_op as compression from non_semantic_speech_benchmark.distillation.compression_lib import compression_wrapper def get_experiment_dirs(experiment_dir): """Returns a list of experiment directories. NOTE: This assumes that only folders with hyperparams in their name occur in the working dict. Args: experiment_dir: Base for all directories. Returns: List of specific experiment subdirs. """ if not tf.io.gfile.exists(experiment_dir): raise ValueError(f'Experiment dir doesn\'t exist: {experiment_dir}') experiment_dirs = [f for f in tf.io.gfile.listdir(experiment_dir) if tf.io.gfile.isdir(os.path.join(experiment_dir, f))] return experiment_dirs def get_params(experiment_dir_str): """Extracts hyperparams from experiment directory string. Args: experiment_dir_str: The folder-name for the set of hyperparams. Eg: '1-al=1.0,ap=False,lr=0.0001,ms=small,tbs=512' Returns: A dict mapping param key (str) to eval'ed value (float/eval/string). """ parsed_params = {} start_idx = experiment_dir_str.find('-') + 1 for kv in experiment_dir_str[start_idx:].split(','): cur_split = kv.split('=') if len(cur_split) != 2: raise ValueError(f'Folder doesn\'t split properly: {kv}') key, value = cur_split try: value = eval(value) # pylint: disable=eval-used except: # pylint: disable=bare-except pass parsed_params[key] = value return parsed_params def get_default_compressor(): compression_params = compression.CompressionOp.get_default_hparams().parse('') compressor = compression_wrapper.get_apply_compression( compression_params, global_step=0) return compressor def get_model(checkpoint_folder_path, params, tflite_friendly, checkpoint_number = None, include_frontend = False): """Given folder & training params, exports SavedModel without frontend.""" # Optionally override frontend flags from # `non_semantic_speech_benchmark/export_model/tf_frontend.py` override_flag_names = ['frame_hop', 'n_required', 'num_mel_bins', 'frame_width'] for flag_name in override_flag_names: if flag_name in params: setattr(flags.FLAGS, flag_name, params[flag_name]) static_model = models.get_keras_model( params['mt'], bottleneck_dimension=None, output_dimension=1024, truncate_output=params['tr'] if 'tr' in params else False, frontend=include_frontend, compressor=None, tflite=tflite_friendly) checkpoint = tf.train.Checkpoint(model=static_model) if checkpoint_number: checkpoint_to_load = os.path.join( checkpoint_folder_path, f'ckpt-{checkpoint_number}') assert tf.train.load_checkpoint(checkpoint_to_load) else: checkpoint_to_load = tf.train.latest_checkpoint(checkpoint_folder_path) checkpoint.restore(checkpoint_to_load).expect_partial() return static_model def convert_tflite_model(model, quantize, model_path): """Uses TFLiteConverter to convert a Keras Model. Args: model: Keras model obtained from get_tflite_friendly_model. quantize: Whether to quantize TFLite model using dynamic quantization. See: https://www.tensorflow.org/lite/performance/post_training_quant model_path: Path for TFLite file. """ converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops. # There is a GatherV2 op in the frontend that isn't supported by TFLite # as a builtin op. (It works as a TFLite builtin only if the sample size # to the frontend is a constant) # However, TFLite supports importing some relevant operators from TF, # at the cost of binary size (~ a few MB). # See: https://www.tensorflow.org/lite/guide/ops_select # NOTE: This has no effect on the model/binary size if the graph does not # required the extra TF ops (for example, for no-frontend versio tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops. ] if quantize: converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_buffer = converter.convert() with tf.io.gfile.GFile(model_path, 'wb') as f: f.write(tflite_buffer) logging.info('Exported TFLite model to %s.', model_path) def sanity_check( include_frontend, model_path, embedding_dim, tflite, n_required = None, frame_width = None, num_mel_bins = None): """Sanity check model by running dummy inference.""" n_required = n_required or flags.FLAGS.n_required frame_width = frame_width or flags.FLAGS.frame_width num_mel_bins = num_mel_bins or flags.FLAGS.num_mel_bins if include_frontend: input_shape = (1, 2 * n_required) expected_output_shape = (7, embedding_dim) else: feats_inner_dim = frontend_lib.get_frontend_output_shape()[0] * frame_width input_shape = (1, feats_inner_dim, num_mel_bins, 1) expected_output_shape = (1, embedding_dim) logging.info('Input shape: %s. Expected output shape: %s', input_shape, expected_output_shape) model_input = np.zeros(input_shape, dtype=np.float32) if tflite: logging.info('Building tflite interpreter...') interpreter = data_prep_utils.build_tflite_interpreter( model_path) logging.info('Running inference...') output = data_prep_utils.samples_to_embedding_tflite( model_input, sample_rate=16000, interpreter=interpreter, output_key='0', name='sanity_check') else: logging.info('Loading and running inference with SavedModel...') model = tf.saved_model.load(model_path) output = model(model_input)['embedding'].numpy() np.testing.assert_array_equal(output.shape, expected_output_shape) logging.info('Model "%s" worked.', model_path)
[ "copybara-worker@google.com" ]
copybara-worker@google.com
4a0f4b497cb62c2567f2afacb29986f97a64d2b4
ddea930392ac5360b21e9043b620e703a9ccb31c
/tfx/components/transform/component.py
7cb900078da95ed33cbe2fdf9bd9a465b5e9a56e
[ "Apache-2.0" ]
permissive
Ark-kun/tfx
9c82b688776c80b2435bbb6154476526e8525ec8
f685f0387bd145316f43ceb484e64f893e749dcb
refs/heads/master
2021-07-25T05:58:15.168607
2020-05-22T01:07:44
2020-05-22T01:08:18
180,868,735
0
0
Apache-2.0
2019-04-11T20:01:57
2019-04-11T20:01:57
null
UTF-8
Python
false
false
6,330
py
# Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFX Transform component definition.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Optional, Text, Union import absl from tfx import types from tfx.components.base import base_component from tfx.components.base import executor_spec from tfx.components.transform import executor from tfx.orchestration import data_types from tfx.types import artifact from tfx.types import artifact_utils from tfx.types import standard_artifacts from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): """A TFX component to transform the input examples. The Transform component wraps TensorFlow Transform (tf.Transform) to preprocess data in a TFX pipeline. This component will load the preprocessing_fn from input module file, preprocess both 'train' and 'eval' splits of input examples, generate the `tf.Transform` output, and save both transform function and transformed examples to orchestrator desired locations. ## Providing a preprocessing function The TFX executor will use the estimator provided in the `module_file` file to train the model. The Transform executor will look specifically for the `preprocessing_fn()` function within that file. An example of `preprocessing_fn()` can be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi pipeline example. ## Example ``` # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for more details. """ SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel = None, schema: types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel] = None, instance_name: Optional[Text] = None, enable_cache: Optional[bool] = None): """Construct a Transform component. Args: examples: A Channel of type `standard_artifacts.Examples` (required). This should contain the two splits 'train' and 'eval'. schema: A Channel of type `standard_artifacts.Schema`. This should contain a single schema artifact. module_file: The file path to a python module file, from which the 'preprocessing_fn' function will be loaded. The function must have the following signature. def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]: ... where the values of input and returned Dict are either tf.Tensor or tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn' must be supplied. preprocessing_fn: The path to python function that implements a 'preprocessing_fn'. See 'module_file' for expected signature of the function. Exactly one of 'module_file' or 'preprocessing_fn' must be supplied. transform_graph: Optional output 'TransformPath' channel for output of 'tf.Transform', which includes an exported Tensorflow graph suitable for both training and serving; transformed_examples: Optional output 'ExamplesPath' channel for materialized transformed examples, which includes both 'train' and 'eval' splits. input_data: Backwards compatibility alias for the 'examples' argument. instance_name: Optional unique instance name. Necessary iff multiple transform components are declared in the same pipeline. enable_cache: Optional boolean to indicate if cache is enabled for the Transform component. If not specified, defaults to the value specified for pipeline's enable_cache parameter. Raises: ValueError: When both or neither of 'module_file' and 'preprocessing_fn' is supplied. """ if input_data: absl.logging.warning( 'The "input_data" argument to the Transform component has ' 'been renamed to "examples" and is deprecated. Please update your ' 'usage as support for this argument will be removed soon.') examples = input_data if bool(module_file) == bool(preprocessing_fn): raise ValueError( "Exactly one of 'module_file' or 'preprocessing_fn' must be supplied." ) transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform, self).__init__( spec=spec, instance_name=instance_name, enable_cache=enable_cache)
[ "tensorflow-extended-team@google.com" ]
tensorflow-extended-team@google.com
c7c37367f4842a662f51398fc768a9d153243f39
159fddadea70761e5fa15ecc15ab68342958d088
/tours/migrations/0039_auto_20190923_2153.py
60ceae4908da7ea110c38c75959b195ff22bd085
[]
no_license
lalit1796/mytrip
56a2bcdaa70ffe1234025e3b0599c53d4633462c
2b8ab34d16960ef228adb2458e5b4bd0213ee923
refs/heads/master
2023-08-08T01:07:30.487038
2021-09-04T14:04:42
2021-09-04T14:04:42
402,809,166
0
0
null
null
null
null
UTF-8
Python
false
false
415
py
# Generated by Django 2.2.2 on 2019-09-23 16:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tours', '0038_auto_20190923_2151'), ] operations = [ migrations.AlterField( model_name='package', name='uid', field=models.CharField(default='--uid--', max_length=200, unique=True), ), ]
[ "lalitsingh1796@gmail.com" ]
lalitsingh1796@gmail.com
d41f85e2c73f965a8555cd3f6da0967a0e321fde
5cc4a73d6fb144d72e74b07a10b60fc36bfe50ec
/videos/api/serializers.py
462b50c2b48ff27afdefbc67ced6a79e795bd6b4
[]
no_license
pedrofolch/digitalsoil
79d9497dcbb54df3c7df64f9da35d71d592fe580
7b6d1ffd34e991cf87c91342e5336a97fa1cf59b
refs/heads/master
2022-12-11T00:47:01.728729
2019-04-11T03:34:12
2019-04-11T03:34:12
120,937,159
0
0
null
2022-12-08T04:58:09
2018-02-09T17:49:10
CSS
UTF-8
Python
false
false
1,113
py
from rest_framework import serializers from videos.models import Video class VideoSerializer(serializers.ModelSerializer): uri = serializers.SerializerMethodField(read_only=True) class Meta: model = Video fields = [ 'uri', 'pk', 'title', 'embed_code', 'share_message', 'order', 'tags', 'slug', 'active', 'featured', 'free_preview', 'category', 'timestamp', 'updated' ] read_only_fields = ['user', 'order', ] def get_uri(self, obj): request = self.context.get('request') return obj.get_api_url(request=request) def validate_title(self, value): """We want the title to be unique""" qs = Video.objects.filter(title__iexact=value) # including instance if self.instance: qs = qs.exclude(pk=self.instance.pk) if qs.exists(): raise serializers.ValidationError("This title has already been used") return value
[ "pedro.folch@gmail.com" ]
pedro.folch@gmail.com
6d640e9621785207c8da853c9f502cf40e5f4c34
74a30d76f49051ec0200a847fe83431a9501f3d7
/address_func.py
a5b6612a9078be5ba1d28b66d635386fd78f59b7
[]
no_license
Milziade/nano_dash
97e827ac24c5e8f3d97e8de69e151afd4d7d8fac
00618fa1f6d425b63f6ada625569eadda69a0176
refs/heads/master
2023-05-01T02:36:51.595622
2021-05-12T14:06:07
2021-05-12T14:06:07
362,456,210
1
0
null
null
null
null
UTF-8
Python
false
false
2,908
py
import requests import pandas as pd import random import colors def get_json(nano_address): action = { 'action': 'account_history', 'account': nano_address, 'count': '-1' } r = requests.post('https://mynano.ninja/api/node', json=action).json() if 'error' in r: return False return r['history'] def get_df(history): df = pd.DataFrame().from_dict(history) df = df.replace('nano_3kwppxjcggzs65fjh771ch6dbuic3xthsn5wsg6i5537jacw7m493ra8574x', 'FreeNanoFaucet.com') df = df.replace('nano_34prihdxwz3u4ps8qjnn14p7ujyewkoxkwyxm3u665it8rg5rdqw84qrypzk', 'nano-faucet.org') df = df.replace('nano_3pg8khw8gs94c1qeq9741n99ubrut8sj3n9kpntim1rm35h4wdzirofazmwt', 'nano.trade') df = df.replace('nano_1tyd79peyzk4bs5ok1enb633dqsrxou91k7y4zzo1oegw4s75bokmj1pey4s', 'Apollo Faucet') df['amount'] = [int(i)/10**30 for i in df['amount']] df['local_timestamp'] = pd.to_datetime(df['local_timestamp'], unit='s') del df['hash'] del df['height'] return df.to_dict('records'), df.columns.values def get_balance(nano_address): action = { "action": "account_info", "account": nano_address } r = requests.post('https://mynano.ninja/api/node', json=action).json() return int(r['balance'])/10**30 def pie_chart(df): receive_acc = list(set(item['account'] for item in df if item['type'] == 'receive')) amount_receive = {i: 0 for i in receive_acc} send_acc = list(set(item['account'] for item in df if item['type'] == 'send')) amount_send = {i: 0 for i in send_acc} for d in df: key = d['account'] if key in amount_receive: amount_receive[key] += d['amount'] else: amount_send[key] += d['amount'] return list(amount_receive.keys()), list(amount_receive.values()), \ list(amount_send.keys()), list(amount_send.values()) def balance_over_time(df: dict): # Otteniamo il momento, il tipo di transazione e l'ammontare time = list([item['type'], item['amount'], str(item['local_timestamp']).split()[0]] for item in df) # Se l'account invia soldi, l'ammontare per il bilancio diviene negativo for item in time: if item[0] == 'send': item[1] = -item[1] time_n = [time[i][2] for i in range(len(time))] # Date e orari di ogni transazione insta_bal = [time[i][1] for i in range(len(time))] overall_bal = [0] # Bilancio cumulativo for i in range(len(time)): x = time[-1-i][1] + overall_bal[-1] overall_bal.append(x) overall_bal.pop(0) return overall_bal, list(reversed(time_n)), list(reversed(insta_bal)) def get_colors(n): # creates n different color colors_list = [] for i in range(n): cols = colors.colors col = random.choice(cols) #cols.remove(col) colors_list.append(col) return colors_list
[ "you@example.com" ]
you@example.com
9cfd5d850bcc7982efa80f394dbca752ca768af5
39c7f0955e0247bbe34ec0f2a4a7e2d3294dc0df
/deployment-scripts/scripts/infrastructure/openstack/openstack-create-instance.py
2426dac27ae7ea1e05036985173780e9337887f9
[]
no_license
marjancek/Showcase
df9eac6b7c32d2209b6ffac3f80d6c6d92c5c1d3
eced27c10b7b9d5e4b10d296661e33cb0375a5fa
refs/heads/master
2021-01-15T09:08:48.146581
2014-11-19T12:13:16
2014-11-19T12:13:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,061
py
from novaclient.v1_1 import client as novaclient from common.Cloudscale import * from scripts.common.Cloudscale import check_args, parse_args class CreateInstance: def __init__(self, cfg): self.cfg = cfg self.user = cfg.get('OPENSTACK', 'username') self.pwd = cfg.get('OPENSTACK', 'password') self.url = cfg.get('OPENSTACK', 'auth_url') self.tenant = cfg.get('OPENSTACK', 'tenant_name') self.image_name = cfg.get('OPENSTACK', 'image_name') server = self.create_instance() print [s['addr'] for s in server.addresses[self.tenant] if s['OS-EXT-IPS:type'] == 'floating'][0] def create_instance(self): nc = novaclient.Client(self.user, self.pwd, self.tenant, auth_url=self.url) for f in nc.flavors.list(): print f for server in nc.servers.list(): if server._info['name'] == self.instance_name: return server if __name__ == '__main__': check_args(1, "<config_path>") _, cfg, _, _ = parse_args() CreateInstance(cfg)
[ "simon.ivansek@xlab.si" ]
simon.ivansek@xlab.si
43d1732932045b85378fb924ea0c306a767dc816
93090ffc3ccaf142a0c739e00c28486175220373
/04-day/main.py
96482f4046784b840f4b1a090c566af8992a3b4e
[ "MIT" ]
permissive
timbook/advent-of-code-2020
f17b89fb1ccb5fe449b4864cca64a95f527c6782
3b1c107a67b9f31891e77a258e45aee76fac4e47
refs/heads/main
2023-01-25T01:19:02.520349
2020-12-17T20:31:31
2020-12-17T20:31:31
317,431,021
0
0
null
null
null
null
UTF-8
Python
false
false
2,161
py
import re from functools import reduce raw = open('input.txt', 'r').read().split('\n\n') class Passport: required_fields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'] def __init__(self, entry): fields = re.split('\s', entry.strip()) self.fields = {field.split(':')[0]:field.split(':')[1] for field in fields} def is_valid_a(self): return all(req in self.fields for req in self.required_fields) def is_valid_b(self): return self.is_valid_a() and reduce( lambda a, b: a and b, [ self.is_valid_byr(), self.is_valid_iyr(), self.is_valid_eyr(), self.is_valid_hgt(), self.is_valid_hcl(), self.is_valid_ecl(), self.is_valid_pid() ] ) def is_valid_byr(self): byr = self.fields['byr'] return re.match('\d{4}', byr) and (1920 <= int(byr) <= 2002) def is_valid_iyr(self): iyr = self.fields['iyr'] return re.match('\d{4}', iyr) and (2010 <= int(iyr) <= 2020) def is_valid_eyr(self): eyr = self.fields['eyr'] return re.match('\d{4}', eyr) and (2020 <= int(eyr) <= 2030) def is_valid_hgt(self): hgt = self.fields['hgt'][:-2] unit = self.fields['hgt'][-2:] if unit == 'cm': return re.match('\d+', hgt) and (150 <= int(hgt) <= 193) elif unit == 'in': return re.match('\d+', hgt) and (59 <= int(hgt) <= 76) else: return False def is_valid_hcl(self): hcl = self.fields['hcl'] return bool(re.match('#[0-9a-f]{6}', hcl)) def is_valid_ecl(self): return self.fields['ecl'] in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] def is_valid_pid(self): return bool(re.match('^\d{9}$', self.fields['pid'])) passports = [Passport(line) for line in raw] n_valid = sum(ppt.is_valid_a() for ppt in passports) print(f"A :: Number of valid passports: {n_valid}") # 199 too high n_valid = sum(ppt.is_valid_b() for ppt in passports) print(f"B :: Number of valid passports: {n_valid}")
[ "timothykbook@gmail.com" ]
timothykbook@gmail.com
5d028cc1080c4c3e46b7d4d114f8177121ac2c6d
d16c15f2fb433c6b5874ea49ff5db89315e9711d
/benchmarks/comparison-with-addict.py
2f6c181632636f0ac48d33a840027e004ad8d8d9
[ "MIT" ]
permissive
vltr/middle
f9e16735beaccafdc7535271418dd9a8f6b22706
f7782610fbb1d9232a3b4cfea057a9331db2775e
refs/heads/develop
2023-08-17T20:35:02.878999
2019-06-18T15:44:46
2019-06-18T15:44:46
138,063,484
11
2
MIT
2023-09-04T21:27:09
2018-06-20T17:15:26
Python
UTF-8
Python
false
false
4,561
py
import cProfile import sys from enum import Enum from enum import IntEnum from enum import unique from typing import Dict from typing import List from typing import Set # --------------------------------------------------------------- # # Import boilerplate # --------------------------------------------------------------- # try: import timy import middle from addict import Dict as ADict except ImportError: print( "To run this script, you must install these dependencies:", file=sys.stderr, ) print("- addict", file=sys.stderr) print("- middle", file=sys.stderr) print("- timy", file=sys.stderr) sys.exit(1) # --------------------------------------------------------------- # # Fixed variables # --------------------------------------------------------------- # TOTAL_LOOPS = 1_000_000 if "short" in sys.argv: TOTAL_LOOPS = 1 # --------------------------------------------------------------- # # Enum definition # --------------------------------------------------------------- # @unique class PlatformEnum(str, Enum): XBOX1 = "XBOX1" PLAYSTATION4 = "PLAYSTATION4" PC = "PC" @unique class LanguageEnum(IntEnum): ENGLISH = 1 JAPANESE = 2 SPANISH = 3 GERMAN = 4 PORTUGUESE = 5 @unique class CityRegionEnum(str, Enum): TROPICAL = "TROPICAL" TEMPERATE = "TEMPERATE" BOREAL = "BOREAL" # --------------------------------------------------------------- # # middle model definition # --------------------------------------------------------------- # class MiddleCity(middle.Model): name: str = middle.field() region: CityRegionEnum = middle.field() class MiddleGame(middle.Model): name: str = middle.field() platform: PlatformEnum = middle.field() score: float = middle.field() resolution_tested: str = middle.field() genre: List[str] = middle.field() rating: Dict[str, float] = middle.field() players: Set[str] = middle.field() language: LanguageEnum = middle.field() awesome_city: MiddleCity = middle.field() # --------------------------------------------------------------- # # Test variable # --------------------------------------------------------------- # MODEL_INSTANCE = { "name": "Cities: Skylines", "platform": "PC", "score": 9.0, "resolution_tested": "1920x1080", "genre": ["Simulators", "City Building"], "rating": {"IGN": 8.5, "Gamespot": 8.0, "Steam": 4.5}, "players": ["Flux", "strictoaster"], "language": 1, "awesome_city": {"name": "Blumenau", "region": "TEMPERATE"}, } # --------------------------------------------------------------- # # Test runnable # --------------------------------------------------------------- # def test_addict(): game = ADict(MODEL_INSTANCE) assert isinstance(game.name, str) assert isinstance(game.platform, str) assert isinstance(game.score, float) assert isinstance(game.resolution_tested, str) assert isinstance(game.genre, list) assert isinstance(game.rating, dict) assert isinstance(game.players, list) assert isinstance(game.language, int) assert isinstance(game.awesome_city, dict) assert isinstance(game.awesome_city.name, str) assert isinstance(game.awesome_city.region, str) def test_middle(): game = MiddleGame(**MODEL_INSTANCE) assert isinstance(game, MiddleGame) assert isinstance(game.name, str) assert isinstance(game.platform, PlatformEnum) assert isinstance(game.score, float) assert isinstance(game.resolution_tested, str) assert isinstance(game.genre, list) assert isinstance(game.rating, dict) assert isinstance(game.players, set) assert isinstance(game.language, LanguageEnum) assert isinstance(game.awesome_city, MiddleCity) assert isinstance(game.awesome_city.name, str) assert isinstance(game.awesome_city.region, CityRegionEnum) # --------------------------------------------------------------- # # Run tests # --------------------------------------------------------------- # def main(): if "profile" in sys.argv: cProfile.run( "for i in range({}): test_addict()".format(TOTAL_LOOPS), sort="tottime", ) cProfile.run( "for i in range({}): test_middle()".format(TOTAL_LOOPS), sort="tottime", ) else: timy.timer(ident="addict", loops=TOTAL_LOOPS)(test_addict).__call__() timy.timer(ident="middle", loops=TOTAL_LOOPS)(test_middle).__call__() if __name__ == "__main__": main()
[ "rkuesters@gmail.com" ]
rkuesters@gmail.com
8d00346ccd82c5a105df92d54bf196574f2e3de6
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
/all-gists/9c113f9b86aece3efdf169af67c4e436/snippet.py
0262b68437c1a2c6ae2c332e575f4c5626c155a7
[ "MIT" ]
permissive
gistable/gistable
26c1e909928ec463026811f69b61619b62f14721
665d39a2bd82543d5196555f0801ef8fd4a3ee48
refs/heads/master
2023-02-17T21:33:55.558398
2023-02-11T18:20:10
2023-02-11T18:20:10
119,861,038
76
19
null
2020-07-26T03:14:55
2018-02-01T16:19:24
Python
UTF-8
Python
false
false
288
py
def main(): n,k=[int(i) for i in input().split(' ')] a=[1]*(n+1) for i in range(1,min(k, n+1)): a[i]=a[i-1]+a[i-1] if k <= n: a[k]=sum([a[j] for j in range(k)]) for i in range(k+1, n+1): a[i]=a[i-1]+a[i-1]-a[i-k-1] print(str(a[n])) main()
[ "gistshub@gmail.com" ]
gistshub@gmail.com
6f30e11fcb2ba4ecc7185185027b62f16a44e4c6
be5ea20226c37d81f1ccb2f704d8825d36e88765
/01. Defining classes/Exercise/01_car.py
00078b2c67447f092dffe365c113ef2ae8c2b86b
[]
no_license
dimDamyanov/PythonOOP
3845e450e5a48fef4f70a186664e07c0cd60e09b
723204f5b7e953874fac9314e48eb1d1628d6ff5
refs/heads/main
2023-04-07T18:00:36.735248
2021-04-19T20:57:14
2021-04-19T20:57:14
341,329,346
0
0
null
null
null
null
UTF-8
Python
false
false
316
py
class Car: def __init__(self, name: str, model: str, engine: str): self.name = name self.model = model self.engine = engine def get_info(self): return f'This is {self.name} {self.model} with engine {self.engine}' car = Car("Kia", "Rio", "1.3L B3 I4") print(car.get_info())
[ "dim.damianov@gmail.com" ]
dim.damianov@gmail.com
06ab40e0b88490e56e9bd9ac37b30153986def03
09d7c902e35df7eec3d3da192f0aaa47739540a5
/user_portrait/cron/flow4/zmq_work_weibo_flow4.py
241ba690235543b12bd7c89723eb1de326fac4c5
[]
no_license
yuwendong/user_portrait
2a9604ea5389f9410aae98acad11742454c36d6e
1b2cd78c91a7154c3e360a90d8426b53b68b4453
refs/heads/master
2020-12-24T16:41:06.469723
2015-10-15T14:32:26
2015-10-15T14:32:26
38,371,050
1
0
null
2015-07-01T12:54:02
2015-07-01T12:54:01
null
UTF-8
Python
false
false
5,036
py
# -*- coding=utf-8 -*- import re import sys import zmq import time import json import math from datetime import datetime reload(sys) sys.path.append('../../') from time_utils import ts2datetime, datetime2ts from global_utils import R_CLUSTER_FLOW2 as r_cluster from global_config import ZMQ_VENT_PORT_FLOW4, ZMQ_CTRL_VENT_PORT_FLOW4,\ ZMQ_VENT_HOST_FLOW1, ZMQ_CTRL_HOST_FLOW1 from global_config import SENSITIVE_WORDS_PATH f = open(SENSITIVE_WORDS_PATH, 'rb') # test #f = open('/home/ubuntu8/huxiaoqian/user_portrait/user_portrait/cron/flow2/zz.txt', 'rb') def load_sensitive_words(): ZZ_WORD = [] for line in f: line_list = line.split('=') word = line_list[0] ZZ_WORD.append(word.decode('utf-8')) f.close() return ZZ_WORD SENSITIVE_WORD = load_sensitive_words() print 'sensitive_word:', SENSITIVE_WORD def cal_text_work(item): uid = item['uid'] timestamp = item['timestamp'] date = ts2datetime(timestamp) ts = datetime2ts(date) #print 'ts:', date, ts text = item['text'] if isinstance(text, str): text = text.decode('utf-8', 'ignore') RE = re.compile(u'#([a-zA-Z-_⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+)#', re.UNICODE) hashtag_list = RE.findall(text) if hashtag_list: # there all use unicode· hashtag_dict = dict() for hashtag in hashtag_list: try: hashtag_dict[hashtag] += 1 except: hashtag_dict[hashtag] = 1 try: hashtag_count_string = r_cluster.hget('hashtag_'+str(ts), str(uid)) #print 'key:hashtag_'+ str(ts) #print 'hget hashtag result:', hashtag_count_string hashtag_count_dict = json.loads(hashtag_count_string) for hashtag in hashtag_dict: count = hashtag_dict[hashtag] try: hashtag_count_dict[hashtag] += count except: hashtag_count_dict[hashtag] = count #print 'hashtag_count_dict:', hashtag_count_dict r_cluster.hset('hashtag_'+str(ts), str(uid), json.dumps(hashtag_count_dict)) except: #print 'hash_dict:', hashtag_dict r_cluster.hset('hashtag_'+str(ts), str(uid), json.dumps(hashtag_dict)) def cal_text_sensitive(item): text = item['text'] uid = item['uid'] timestamp = item['timestamp'] date = ts2datetime(timestamp) ts = datetime2ts(date) if isinstance(text, str): text = text.decode('utf-8', 'ignore') sensitive_result = [word for word in SENSITIVE_WORD if word in text] if sensitive_result: sensitive_dict = dict() for word in sensitive_result: try: sensitive_dict[word] += 1 except: sensitive_dict[word] = 1 #print 'sensitive_dict:', sensitive_dict try: sensitive_count_string = r_cluster.hget('sensitive_'+str(ts), str(uid)) #print 'key:sensitive_', str(ts) #print 'hget sensitive result:', sensitive_count_string sensitive_count_dict = json.loads(sensitive_count_string) for word in sensitive_dict: count = sensitive_dict[word] try: sensitive_count_dict[word] += count except: sensitive_count_dict[word] = count #print 'sensitive_count_dict:', sensitive_count_dict r_cluster.hset('sensitive_'+str(ts), str(uid), json.dumps(sensitive_count_dict)) except: #print 'sensitive:', sensitive_dict r_cluster.hset('sensitive_'+str(ts), str(uid), json.dumps(sensitive_dict)) if __name__ == "__main__": """ receive weibo """ context = zmq.Context() receiver = context.socket(zmq.PULL) receiver.connect('tcp://%s:%s' %(ZMQ_VENT_HOST_FLOW1, ZMQ_VENT_PORT_FLOW4)) controller = context.socket(zmq.SUB) controller.connect("tcp://%s:%s" %(ZMQ_VENT_HOST_FLOW1, ZMQ_CTRL_VENT_PORT_FLOW4) count = 0 tb = time.time() ts = tb while 1: try: item = receiver.recv_json() except Exception, e: print Exception, ":", e if not item: continue if item['sp_type'] == '1': try: if item and (item['message_type']==1 or item['message_type']==3): cal_text_work(item) cal_text_sensitive(item) except: pass count += 1 if count % 10000 == 0: te = time.time() print '[%s] cal speed: %s sec/per %s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), te - ts, 10000) #if count % 100000 == 0: # print '[%s] total cal %s, cost %s sec [avg %s per/sec]' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), count, te - tb, count / (te - tb)) ts = te
[ "1257819385@qq.com" ]
1257819385@qq.com
324d2ab09e9924efc6e877be13f4cdfe40095ef3
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03495/s459851114.py
65115c84d9ff5de8c900d6c54330919f477407fb
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
218
py
# arc086_a.py import collections N, K = map(int, input().split()) A = list(map(int, input().split())) C = collections.Counter(A) C = sorted(C.values()) lenght = len(C) # print(lenght, K, C) print(sum(C[:(lenght-K)]))
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
2df623dd9db20cd2aa0eb30f1334f2ffc828c00c
a47e5a1565b8b4a23010020fa6ed4225459845e7
/marubatsu1.py
3085d8d5bc8fb46f0a2622392e8575a84bd52493
[]
no_license
katuhito/workspace13
0a4832e510c5f59a48aa0262985bcda91c01908d
16e55c5e6f32e009ff27c9a22315916175303970
refs/heads/master
2023-03-08T08:48:28.602697
2021-03-07T01:07:41
2021-03-07T01:07:41
335,497,536
0
0
null
null
null
null
UTF-8
Python
false
false
811
py
# 3目並べ import random goal = [ 0b111000000, 0b000111000, 0b000000111, 0b100100100, 0b010010010, 0b001001001, 0b100010001, 0b001010100 ] # 3つ並んだか判定 def check(player): for mask in goal: if player & mask == mask: return True return False # 交互に置く def play(p1, p2): if check(p2): #3つ並んだら出力して終了 print([bin(p1), bin(p2)]) return board = p1 | p2 if board == 0b111111111: #すべて置いたら引き分けで終了 print([bin(p1), bin(p2)]) return # 置ける場所を探す w = [i for i in range(9) if(board & (1 << i)) == 0] # ランダムに置いてみる r = random.choice(w) play(p2, p1 | (1 << r)) #手番を入れ替えて次を探す play(0, 0)
[ "katuhitohara@gmail.com" ]
katuhitohara@gmail.com
a29f6993d7d12c4674aecc24f39f9aa52dc357fc
8b85b933041abac2879484c6280c1bf79f91358d
/moss/plotting.py
b2b3d28e4123da388554e57a623c5361d41b42e4
[ "BSD-3-Clause" ]
permissive
ghaseminya/moss
109a4c37d2ecd85bfaff03c563eded98d5296530
06a0ea862b9a6112921dcf5cf2a6d445d7f7e0dc
refs/heads/master
2020-12-30T14:44:33.504941
2017-05-09T17:24:28
2017-05-09T17:24:28
91,079,527
1
0
null
2017-05-12T10:20:17
2017-05-12T10:20:17
null
UTF-8
Python
false
false
338
py
import matplotlib.pyplot as plt def grid_axes_labels(axes, xlabel=None, ylabel=None, **kws): plt.setp(axes.flat, xlabel="", ylabel="") if xlabel is not None: for ax in axes[-1]: ax.set_xlabel(xlabel, **kws) if ylabel is not None: for ax in axes[:, 0]: ax.set_ylabel(ylabel, **kws)
[ "mwaskom@stanford.edu" ]
mwaskom@stanford.edu
83a1f372106c9220a41955a816d98ee436bd4081
7e118b7f02275e7d1faf1e24e6bb1f6d1e173da5
/04_factory/pizza_store/lib/pizza.py
4bd3d2bd59b7f2f37614cabbd5b5c69869ede551
[ "MIT" ]
permissive
denzow/practice-design-pattern
e378a7d2d95585ab6e3c3a4b72f46c5faeb2b92b
141d59c51375e36769a73b6ff135a8afae64b664
refs/heads/master
2021-05-13T17:33:10.892543
2018-02-11T14:53:06
2018-02-11T14:53:06
116,826,619
1
0
null
null
null
null
UTF-8
Python
false
false
1,487
py
# coding: utf-8 from abc import ABC, abstractmethod class Pizza(ABC): def __init__(self, ingredient_factory): """ :param lib.pizza_factory.PizzaIngredientFactory ingredient_factory: """ self._ingredient_factory = ingredient_factory self._name = None self._dough = None self._sauce = None self._veggies = [] self._cheese = None self._pepperoni = None self._clam = None @abstractmethod def prepare(self): pass def bake(self): print('350度で25分間焼く') def cut(self): print('ピザを扇型に切り分ける') def box(self): print('PizzaStoreの正式な箱にピザを入れる') def set_name(self, name): self._name = name def get_name(self): return self._name class CheesePizza(Pizza): def prepare(self): print('{}を下処理'.format(self._name)) self._dough = self._ingredient_factory.create_dough() self._sauce = self._ingredient_factory.create_sauce() self._cheese = self._ingredient_factory.create_cheese() class ClamPizza(Pizza): def prepare(self): print('{}を下処理'.format(self._name)) self._dough = self._ingredient_factory.create_dough() self._sauce = self._ingredient_factory.create_sauce() self._cheese = self._ingredient_factory.create_cheese() self._clam = self._ingredient_factory.create_clam()
[ "denzow@gmail.com" ]
denzow@gmail.com
9ec342d11277ab3c3daa4867c1bb011eb93f1655
fc529d1d801d695150a6ebcd3e2d548ffa8d738d
/tests/test_parser.py
86ba3dbec713ac04cd077e2304299e0d3fd5199a
[]
permissive
JeanExtreme002/Virtual-Assistant
1db080cf3026a64918c0ebadd1727c29bb46205e
1444af964b21c6d043b1b8ccb23f34999c5fd81a
refs/heads/master
2021-07-25T05:54:58.886570
2021-07-09T05:35:31
2021-07-09T05:35:31
219,252,394
7
2
BSD-3-Clause
2020-09-22T16:01:20
2019-11-03T04:41:47
Python
UTF-8
Python
false
false
1,510
py
import os, sys, util sys.path.append(os.getcwd()) from src.assistant.exec.commandList import CommandList from src.assistant.exec.parser import VoiceCommandParser command_list = CommandList("EN-US") parser = VoiceCommandParser(command_list) def test_parse_system_command(): command_instance = parser.parse("repeat hello world") target_command = command_list.get_command_instance_by_voice_command("repeat") assert command_instance.system_command.lower() == "repeat" assert command_instance.args.lower() == "hello world" assert command_instance.info == target_command.info def test_parse_user_command(): voice_command, terminal_command = util.generate_random_user_command() command_list.set_user_command(voice_command, {"terminal_command": terminal_command}) other_voice_command = voice_command + " and something more" other_terminal_command = terminal_command + " and something more" command_list.set_user_command(other_voice_command, {"terminal_command": other_terminal_command}) argument1, argument2 = "a docile dog eating meet", "a cute cat sleeping" command_instance = parser.parse(voice_command + " " + argument1) other_command_instance = parser.parse(other_voice_command + " " + argument2) assert command_instance.terminal_command == terminal_command assert command_instance.args.lower() == argument1 assert other_command_instance.terminal_command == other_terminal_command assert other_command_instance.args.lower() == argument2
[ "jeangamerextreme@gmail.com" ]
jeangamerextreme@gmail.com
7f418a52a6716871d15b6b8f3ba89cbaa3cd4da8
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03138/s702162180.py
23360336ae7fa25d7e78a6efe78b883d1ec939af
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
434
py
import os, sys, re, math N,K = list(map(int,input().split(' '))) A = list(map(int,input().split(' '))) d = math.ceil(math.log2(1e+12)) ones = [0 for _ in range(d)] for a in A: s = bin(a)[::-1] for i in range(len(s)-2): if s[i] == '1': ones[i] += 1 X = 0 for di in range(d-1,-1,-1): if X + 2 ** di <= K and ones[di] <= N * 0.5: X += 2 ** di ret = 0 for a in A: ret += X ^ a print(ret)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
efdd83f89865011b36433b262e2cd99fe684ad1c
6c10c6e229014dc3bf14efaec2ea8bf07c406752
/AILearning/OptimationDeepLearning/MinibatchSGD.py
e41b377b4f3ba2829d2a1b4188d25f78bd57522d
[]
no_license
GuyRobot/AIPythonExamples
e59c6edb355d9cadee2b3f19a087b1b656956262
4acdd0d4966e31a616910554bc075b641aa152df
refs/heads/master
2021-05-21T13:05:49.615593
2021-02-28T06:41:04
2021-02-28T06:41:04
252,662,467
0
0
null
null
null
null
UTF-8
Python
false
false
4,465
py
from d2l import AllDeepLearning as d2l from mxnet import autograd, gluon, init, nd from mxnet.gluon import nn import numpy as np """ Mini batches w←w−η_t*g_t where gt=∂_wf(x_t,w) We can increase the computational efficiency of this operation by applying it to a minibatch of observations at a time. That is, we replace the gradient gt over a single observation by one over a small batch gt= 1/|B_t| * ∂w∑i∈B_t(f(xi,w)) """ def get_data_ch11(batch_size=10, n=1500): data = np.genfromtxt("E:\\Python_Data\\airfoil_self_noise.dat", dtype=np.float32) data = (data - data.mean(axis=0)) / data.std(axis=0) data_iter = d2l.load_array((data[:n, :-1], data[:n, -1]), batch_size, is_train=True) return data_iter, data.shape[1] - 1 def sgd(params, states, hyper_params): for p in params: p[:] -= hyper_params['lr'] * p.grad def train_ch11(trainer_fn, state, hyper_params, data_iter, feature_dim, num_epochs=2): w = nd.random.normal(scale=0.01, shape=(feature_dim, 1)) b = nd.zeros(1) w.attach_grad() b.attach_grad() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[0, num_epochs], ylim=[0.22, 0.32]) n, timer = 0, d2l.Timer() for _ in range(num_epochs): for X, y in data_iter: with autograd.record(): l = loss(net(X), y).mean() l.backward() trainer_fn([w, b], state, hyper_params) n += X.shape[0] if n % 200 == 0: timer.stop() animator.add(n / X.shape[0] / len(data_iter), d2l.evaluate_loss(net, data_iter, loss)) timer.start() print('loss: %.3f, %.3f sec/epoch' % (animator.Y[0][-1], timer.avg())) return timer.cumsum(), animator.Y[0] def train_sgd(lr, batch_size, num_epochs=2): data_iter, feature_dim = get_data_ch11(batch_size) return train_ch11(sgd, None, {'lr': lr}, data_iter, feature_dim, num_epochs) gd_res = train_sgd(1, 1500, 10) sgd_res = train_sgd(0.005, 1) mini1_res = train_sgd(.4, 100) mini2_res = train_sgd(.05, 10) d2l.set_figsize([6, 3]) d2l.plot(*list(map(list, zip(gd_res, sgd_res, mini1_res, mini2_res))), 'time (sec)', 'loss', xlim=[1e-2, 10], legend=['gd', 'sgd', 'batch size = 100', 'batch size = 10']) d2l.plt.gca().set_xscale('log') d2l.plt.show() def train_gluon_ch11(tr_name, hyper_params, data_iter, num_epochs=2): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=.01)) trainer = gluon.Trainer(net.collect_params(), tr_name, hyper_params) loss = gluon.loss.L2Loss() animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[0, num_epochs], ylim=[0.22, 0.35]) n, timer = 0, d2l.Timer() for _ in range(num_epochs): for X, y in data_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(X.shape[0]) n += X.shape[0] if n % 200 == 0: timer.stop() animator.add(n / X.shape[0]/len(data_iter), (d2l.evaluate_loss(net, data_iter, loss))) timer.start() print('loss: %.3f, %.3f sec/epoch' % (animator.Y[0][-1], timer.avg())) data_iter, _ = get_data_ch11(10) train_gluon_ch11('sgd', {'learning_rate': 0.05}, data_iter) d2l.plt.show() """ Vectorization makes code more efficient due to reduced overhead arising from the deep learning framework and due to better memory locality and caching on CPUs and GPUs. There is a trade-off between statistical efficiency arising from SGD and computational efficiency arising from processing large batches of data at a time. Minibatch stochastic gradient descent offers the best of both worlds: computational and statistical efficiency. In minibatch SGD we process batches of data obtained by a random permutation of the training data (i.e., each observation is processed only once per epoch, albeit in random order). It is advisable to decay the learning rates during training. In general, minibatch SGD is faster than SGD and gradient descent for convergence to a smaller risk, when measured in terms of clock time. """
[ "bluexker@gmail.com" ]
bluexker@gmail.com
149b691ee439a8d0a0d99a8d0e4ac6254fc50945
2cc6cf6e9d91799cbd9ac02f2771f7c9f95776bd
/test13.py
ce7a89c8373a7740d97c943989fa75b86726a611
[]
no_license
Master-sum/python_project
f938aa9f27e040c68c11766e1358dd7fff231b22
63036d1d80cd645d8080c8fee5bb30f241ab9914
refs/heads/master
2022-04-22T04:45:21.263195
2020-04-11T11:14:54
2020-04-11T11:14:54
254,782,126
1
0
null
null
null
null
UTF-8
Python
false
false
226
py
def change (aint,alst): aint = 0 alst[0] = 0 alst.append(4) print('aint:',aint) print('alst:',alst) aint = 3 alst = [1,2,3] print('1',aint) print('1',alst) change(aint,alst) print('3',aint) print('3',alst)
[ "285957109@qq.com" ]
285957109@qq.com
aeb8b2fe9095164e8fa12451d0d10e1a784add85
651a296c8f45b5799781fd78a6b5329effe702a0
/subset/comp_rank_grlex.py
0d203ef0c529c7bf4b41b524004553b9408e4928
[]
no_license
pdhhiep/Computation_using_Python
095d14370fe1a01a192d7e44fcc81a52655f652b
407ed29fddc267950e9860b8bbd1e038f0387c97
refs/heads/master
2021-05-29T12:35:12.630232
2015-06-27T01:05:17
2015-06-27T01:05:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,691
py
#!/usr/bin/env python def comp_rank_grlex ( kc, xc ): #*****************************************************************************80 # ## COMP_RANK_GRLEX computes the graded lexicographic rank of a composition. # # Discussion: # # The graded lexicographic ordering is used, over all KC-compositions # for NC = 0, 1, 2, ... # # For example, if KC = 3, the ranking begins: # # Rank Sum 1 2 3 # ---- --- -- -- -- # 1 0 0 0 0 # # 2 1 0 0 1 # 3 1 0 1 0 # 4 1 1 0 1 # # 5 2 0 0 2 # 6 2 0 1 1 # 7 2 0 2 0 # 8 2 1 0 1 # 9 2 1 1 0 # 10 2 2 0 0 # # 11 3 0 0 3 # 12 3 0 1 2 # 13 3 0 2 1 # 14 3 0 3 0 # 15 3 1 0 2 # 16 3 1 1 1 # 17 3 1 2 0 # 18 3 2 0 1 # 19 3 2 1 0 # 20 3 3 0 0 # # 21 4 0 0 4 # .. .. .. .. .. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 30 October 2014 # # Author: # # John Burkardt # # Parameters: # # Input, int KC, the number of parts in the composition. # 1 <= KC. # # Input, int XC[KC], the composition. # For each 1 <= I <= KC, we have 0 <= XC(I). # # Output, int RANK, the rank of the composition. # from i4_choose import i4_choose from i4vec_sum import i4vec_sum from sys import exit import numpy as np # # Ensure that 1 <= KC. # if ( kc < 1 ): print ''; print 'COMP_RANK_GRLEX - Fatal error!' print ' KC < 1' exit ( 'COMP_RANK_GRLEX - Fatal error!' ) # # Ensure that 0 <= XC(I). # for i in range ( 0, kc ): if ( xc[i] < 0 ): print '' print 'COMP_RANK_GRLEX - Fatal error!' print ' XC[I] < 0' exit ( 'COMP_RANK_GRLEX - Fatal error!' ); # # NC = sum ( XC ) # nc = i4vec_sum ( kc, xc ) # # Convert to KSUBSET format. # ns = nc + kc - 1 ks = kc - 1 xs = np.zeros ( ks, dtype = np.int32 ) xs[0] = xc[0] + 1 for i in range ( 2, kc ): xs[i-1] = xs[i-2] + xc[i-1] + 1 # # Compute the rank. # rank = 1; for i in range ( 1, ks + 1 ): if ( i == 1 ): tim1 = 0 else: tim1 = xs[i-2]; if ( tim1 + 1 <= xs[i-1] - 1 ): for j in range ( tim1 + 1, xs[i-1] ): rank = rank + i4_choose ( ns - j, ks - i ) for n in range ( 0, nc ): rank = rank + i4_choose ( n + kc - 1, n ) return rank def comp_rank_grlex_test ( ): #*****************************************************************************80 # ## COMP_RANK_GRLEX_TEST tests COMP_RANK_GRLEX. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 30 October 2014 # # Author: # # John Burkardt # from comp_random_grlex import comp_random_grlex print '' print 'COMP_RANK_GRLEX_TEST' print ' A COMP is a composition of an integer N into K parts.' print ' Each part is nonnegative. The order matters.' print ' COMP_RANK_GRLEX determines the rank of a COMP' print ' from its parts.' print '' print ' Actual Inferred' print ' Test Rank Rank' print '' kc = 3 rank1 = 20 rank2 = 60 seed = 123456789 for test in range ( 0, 5 ): xc, rank3, seed = comp_random_grlex ( kc, rank1, rank2, seed ) rank4 = comp_rank_grlex ( kc, xc ) print ' %4d %6d %8d' % ( test, rank3, rank4 ) # # Terminate. # print '' print 'COMP_RANK_GRLEX_TEST:' print ' Normal end of execution.' return if ( __name__ == '__main__' ): from timestamp import timestamp timestamp ( ) comp_rank_grlex_test ( ) timestamp ( )
[ "siplukabir@gmail.com" ]
siplukabir@gmail.com
917f1e4058d8ddec0274a5dd448fc901123da679
e1f53cb481f2b6ea2ac3ee53d0251c5d9ea782e0
/src/pyaid/xml/XMLConfigParser.py
8b3e6cb5d6bcf0235329fc8e3052b949a29c32ed
[]
no_license
hannahp/PyAid
8771ee35c2fdf9503e68e808dc0028e885e68158
b9562a954552334fab16c32a6b8285ea3e1571e0
refs/heads/master
2021-01-22T14:02:26.068735
2014-01-29T23:33:33
2014-01-29T23:33:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,164
py
# XMLConfigParser.py # (C)2011 # Scott Ernst import re import xml.dom.minidom as minidom import codecs from pyaid.xml.ConfigData import ConfigData #___________________________________________________________________________________________________ XMLConfigParser class XMLConfigParser(object): """XMLConfigParser.""" #=================================================================================================== # C L A S S TYPE_ID = 'xml' #=================================================================================================== # P U B L I C #___________________________________________________________________________________________________ parseFile @staticmethod def parseFile(path, target=None, parseToInterchangeFormat =False): fh = codecs.open(path, 'r', 'utf-8') xml = fh.read() fh.close() return XMLConfigParser.parse(xml, target, parseToInterchangeFormat) #___________________________________________________________________________________________________ parse @staticmethod def parse(xml, target=None, parseToInterchangeFormat =False): # Removes whitespace between tags to reduce potential parsing issues. pattern = re.compile('\<\?xml(.*)\?\>') if pattern.search(xml) is None: xml = '<?xml version="1.0" encoding="utf-8"?>' + xml dom = minidom.parseString(re.sub('>[\n\r\s\t]+<','><',xml)) if target is None: target = {} cd = ConfigData() for node in dom.childNodes[0].childNodes: # Ignore whitespace generated text nodes if isinstance(node, (minidom.Comment, minidom.Text)): continue XMLConfigParser._parseNode(node, cd) if parseToInterchangeFormat: cd.writeToInterchangeDict(target) else: cd.writeToDict(target) return target #___________________________________________________________________________________________________ serializeToFile @staticmethod def serializeToFile(targetFile, interchangeData): xml = XMLConfigParser.serializeToXML(interchangeData) fh = codecs.open(targetFile, 'wb', 'utf-8') fh.write(xml) fh.close() #___________________________________________________________________________________________________ serialize @staticmethod def serialize(interchangeData): xml = '<vm>\n' for n,v in interchangeData.iteritems(): xml += XMLConfigParser._writeNode(n, v) return (xml + '</vm>').decode('unicode_escape') #=================================================================================================== # P R I V A T E #___________________________________________________________________________________________________ _writeNode @staticmethod def _writeNode(name, data, depth =1): indent = (' '*4*depth) target = indent + '<' if isinstance(data, list): d = '|'.join(data[1]) if isinstance(data[1], list) else str(data) target += data[0] + ' n="' + name + '" v="' + d + '" />\n' elif isinstance(data, dict): target += 'o n="' + name + '">\n' for n,v in data.iteritems(): target += XMLConfigParser._writeNode(n, v, depth+1) target += indent + '</o>' elif isinstance(data, str): target += 's' + 'n="' + name + '" v="' + data + '" />\n' elif isinstance(data, (int, float)): target += 'n' + 'n="' + name + '" v="' + str(data) + '" />\n' else: target += 'unknown n="' + name + '" />' return target #___________________________________________________________________________________________________ _parseNode @staticmethod def _parseNode(node, configData): nodeName = node.getAttribute('n') nodeType = node.tagName if nodeType != 'o': XMLConfigParser._parseAttribute(nodeName, nodeType, node.getAttribute('v'), configData) return cd = ConfigData() for k in node.attributes.keys(): if k != 'n': aValue = node.getAttribute(k) aType = 's' if aValue.find(':') != -1: aValue = node.getAttribute(k).split(':') aType = str(aValue[0]) aValue = aValue[-1] XMLConfigParser._parseAttribute(k, aType, aValue, cd) for child in node.childNodes: XMLConfigParser._parseNode(child, cd) configData.setItem(nodeName, 'o', cd) #___________________________________________________________________________________________________ _parseAttribute @staticmethod def _parseAttribute(attrName, attrType, attrValue, configData): configData.setItem(attrName, attrType, attrValue)
[ "swernst@gmail.com" ]
swernst@gmail.com
8c0a4f5b85c510d63ce5695baafd7aac77604f94
3d6bb3df9ca1d0de6f749b927531de0790aa2e1d
/compare_SV_groups_to_trees.py
5927663662c3f71b3728d89b37c671512184bf25
[]
no_license
standardgalactic/kuhner-python
da1d66a6d638a9a379ba6bae2affdf151f8c27c5
30b73554cc8bc9d532c8108b34dd1a056596fec7
refs/heads/master
2023-07-07T04:18:30.634268
2020-04-06T04:37:48
2020-04-06T04:37:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,128
py
# -*- coding: utf-8 -*- """ Created on Thu Oct 4 12:43:41 2018 @author: Lucian """ from __future__ import division from os import walk from os import path from os import readlink from os import mkdir from os.path import isfile from copy import deepcopy import numpy import math import matplotlib.pyplot as plt import csv import ete3 #import lucianSNPLibrary as lsl groupdir = "SNV_groups/" treedir = "phylip_TS_analysis/" SVfile = "SV_events.txt" patientfile = "patient_analysis_SVs.tsv" allg_outfile = "all_groups_SVs.tsv" #outdir = "SNV_SV_tree_compare" + tag + "/" #if not path.isdir(outdir): # mkdir(outdir) groupfiles = [] for __, _, files in walk(groupdir): groupfiles += files def callGroup(group, allsamples, tree): if len(group) == 1: return "Singleton" if len(group) == len(allsamples): return "Root" trueset = set() falseset = set() for branch in tree: if branch.name != "": for sample in group: if sample in branch.name: trueset.add(branch) for sample in allsamples: if sample in group: continue if sample in branch.name: falseset.add(branch) if "blood" in branch.name: tree.set_outgroup(branch) trueroot = tree.get_common_ancestor(trueset) for fbranch in falseset: testset = trueset.copy() testset.add(fbranch) newroot = tree.get_common_ancestor(testset) if newroot == trueroot: return "Ungrouped" return "Grouped" # Read in the SNV numbers groupdata = {} allsamples= {} for gfile in groupfiles: if "all" in gfile or "patient" in gfile or "SV_" in gfile: continue patient = gfile.split("_")[0] groupdata[patient] = {} allsamples[patient] = set() for line in open(groupdir + gfile, "r"): if "Patient" in line: continue lvec = line.rstrip().split() assert(patient == lvec[0]) count = int(lvec[1]) perc = float(lvec[2]) samples = tuple(lvec[3:]) groupdata[patient][samples] = {} groupdata[patient][samples]["count"] = count groupdata[patient][samples]["percentage"] = perc groupdata[patient][samples]["SV_count"] = 0 for sample in samples: allsamples[patient].add(sample) #Read in the SV numbes SVs = {} samplelists = {} for line in open(SVfile, "r"): if "chr" in line: continue lvec = line.rstrip().split() (__, __, patient, sample, type, ch1, start1, end1, __, ch2, start2, end2, __, __, __, __) = lvec svid = (type, ch1, start1, end1, ch2, start2, end2) if patient not in SVs: SVs[patient] = {} if svid not in SVs[patient]: SVs[patient][svid] = set() SVs[patient][svid].add(sample) #Count the SVs by sample list nmulti = 0 nmulti_singletons = 0 nmulti_multis = 0 nsingle = 0 SVcounts = {} for patient in SVs: SVtotal = 0 for segid in SVs[patient]: samples = list(SVs[patient][segid]) samples.sort() samples = tuple(samples) if samples not in groupdata[patient]: groupdata[patient][samples] = {} groupdata[patient][samples]["count"] = 0 groupdata[patient][samples]["percentage"] = 0.0 groupdata[patient][samples]["SV_count"] = 0 groupdata[patient][samples]["SV_count"] += 1 SVtotal += 1 nsingle += 1 for samples in groupdata[patient]: groupdata[patient][samples]["SV_percentage"] = groupdata[patient][samples]["SV_count"]/SVtotal print("Number of segments with a single call:", str(nsingle)) print("Number of segments with multiple calls:", str(nmulti)) print("Number of segments with multiple calls, all singletons:", str(nmulti_singletons)) print("Number of segments with multiple calls, all multiples:", str(nmulti_multis)) #Now put the tree data in there, too: for patient in groupdata: treefilename = treedir + patient + "_outtree.txt" if patient == "891": treefilename = treedir + patient + "a_outtree.txt" tree = ete3.Tree(treefilename) for samples in groupdata[patient]: groupdata[patient][samples]["matches_tree"] = callGroup(samples, allsamples[patient], tree) #And finally, write out all of our information. outfile = open(groupdir + allg_outfile, "w") outfile.write("Patient\tMatches_tree\tCount\tPercentage\tSV count\tSV percentage\tSample1\tSample2\tSample3\tSample4\tSample5\tSample6\n") for patient in groupdata: for samples in groupdata[patient]: outfile.write(patient) outfile.write("\t" + groupdata[patient][samples]["matches_tree"]) outfile.write("\t" + str(groupdata[patient][samples]["count"])) outfile.write("\t" + str(groupdata[patient][samples]["percentage"])) outfile.write("\t" + str(groupdata[patient][samples]["SV_count"])) outfile.write("\t" + str(groupdata[patient][samples]["SV_percentage"])) for sample in samples: outfile.write("\t" + sample) outfile.write("\n") outfile.close() #Now do some analysis has23GD = ["74", "279", "303", "391", "396", "450", "772", "997"] types = ["Singleton", "Root", "Grouped", "Ungrouped"] outfile = open(groupdir + patientfile, "w") outfile.write("Patient\tnSNVmin\tnSNVmax\thas 2-3 GD") for type in types: outfile.write("\t" + type + " counts") outfile.write("\t" + type + " total") outfile.write("\tUngrouped potential subclone counts\tUngrouped potential scomubclone total\n") for patient in groupdata: smallestSNVcount = 100000 maxSNVcount = 0 for samples in groupdata[patient]: SNVcount = groupdata[patient][samples]["count"] if groupdata[patient][samples]["matches_tree"] == "Grouped": if SNVcount < smallestSNVcount: smallestSNVcount = SNVcount if SNVcount > maxSNVcount: maxSNVcount = SNVcount possibleSubcloneThreshold = smallestSNVcount*3/4 SVcounts = {} for match in types: SVcounts[match] = [] SVcounts["subclones"] = [] for samples in groupdata[patient]: theseSamples = groupdata[patient][samples] SVcount = theseSamples["SV_count"] if SVcount ==0: continue if theseSamples["matches_tree"] == "Ungrouped" and theseSamples["count"] >= possibleSubcloneThreshold: SVcounts["subclones"].append(SVcount) else: SVcounts[theseSamples["matches_tree"]].append(SVcount) outfile.write(patient) outfile.write("\t" + str(possibleSubcloneThreshold)) outfile.write("\t" + str(maxSNVcount)) outfile.write("\t" + str(patient in has23GD)) for type in types: outfile.write("\t") for num in SVcounts[type]: outfile.write("//" + str(num)) outfile.write("\t" + str(sum(SVcounts[type]))) outfile.write("\t") for num in SVcounts["subclones"]: outfile.write("//" + str(num)) outfile.write("\t" + str(sum(SVcounts["subclones"]))) outfile.write("\n") outfile.close()
[ "lpsmith@uw.edu" ]
lpsmith@uw.edu
5a2a55a993186bacf8fed7bada23f6ab0264b6b5
8e55b122aa7b18b1734aafc3699b477c1e85cb91
/pippy/fx/experimental/refinement_types.py
665c9d0d651b4ed929e974d822a447809f3d26dd
[ "BSD-3-Clause" ]
permissive
yunxing/tau
0a7a641db49da795de70e8db9748d93205be31ac
c679248814a76c915e651806f1b4f30a3de9fe92
refs/heads/main
2023-03-17T01:02:23.222622
2022-09-30T23:18:25
2022-09-30T23:18:25
543,822,164
0
0
BSD-3-Clause
2022-09-30T23:13:57
2022-09-30T23:13:56
null
UTF-8
Python
false
false
457
py
# Copyright (c) Meta Platforms, Inc. and affiliates class Equality: def __init__(self, lhs, rhs): self.lhs = lhs self.rhs = rhs def __str__(self): return f'{self.lhs} = {self.rhs}' def __repr__(self): return f'{self.lhs} = {self.rhs}' def __eq__(self, other): if isinstance(other, Equality): return self.lhs == other.lhs and self.rhs == other.rhs else: return False
[ "noreply@github.com" ]
yunxing.noreply@github.com
fd99bf3d3c0e15b0191184e3ac916eff8f0f70d2
a57e66be33512a7e2e99adb6f597151b56c4c373
/psgreet.py
23cec5ae7e9d198ceaf39575804a96c85a4837e3
[]
no_license
ravijaya/sep28
17025ea0997a371f54a6374f90d4bf56e0206840
8907f4b03ac2c4b2f1806d0d7cf3fd6aa680680c
refs/heads/master
2022-12-18T23:58:12.989144
2020-09-30T12:13:15
2020-09-30T12:13:15
299,901,698
0
0
null
null
null
null
UTF-8
Python
false
false
198
py
name = input('enter the name :') city = input('enter the city :') zip_code = int(input('enter the postal code :')) print('name :', name) print('city :', city) print(zip_code) print(type(zip_code))
[ "ravi.goglobium@gmail.com" ]
ravi.goglobium@gmail.com
e095bb08830df4cb39df6e6f8c0f7a370cf6d071
524baf7de05bd3fc5b9d08083cbb0b7b47a67979
/40.py
d5ffe3d133240d1686dcb26040a6492346e80813
[]
no_license
gk90731/100-questions-practice
1356dd577516567a5c51a4257f59fe01b123e7ff
f855549e037b9924dd6f0370dc2f2a53765d9227
refs/heads/master
2020-05-25T14:04:59.642819
2019-05-21T12:49:04
2019-05-21T12:49:04
187,835,741
0
0
null
null
null
null
UTF-8
Python
false
false
195
py
'''Question: Please try to guess what is missing in the following code and add the missing part so that the code works fine. import math print(math.pow(2))''' import math print(math.pow(2,3))
[ "gk90731@gmail.com" ]
gk90731@gmail.com
d8c1268253a55b552a1766dca9855a16bb18ab78
df489fddec1ffa936f0223efca3a35a22df36dc0
/99_backup/25_各国历年二氧化碳CO2排放量统计分析/challenge7_1.py
7c9a6e07c6db0eee678564cc8f73ddede36755f8
[]
no_license
echo-xr/pc
3af106530815956feb458889408085285b3fd8f5
03160675e912b4e4ad4642e14b5ab0230b973f6c
refs/heads/master
2020-04-26T16:54:03.408688
2019-03-03T11:27:34
2019-03-03T11:27:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,730
py
def co2(): import pandas as pd # 读取 Data 表 data = pd.read_excel('ClimateChange.xlsx') # 取 'Series code' 这列中值为 'EN.ATM.CO2E.KT' 的行并设置索引 data = data[data['Series code']=='EN.ATM.CO2E.KT'].set_index('Country code') # 刪掉多余的前五列,只留各年排量数据 data.drop(data.columns[:5], axis=1, inplace=True) # 把数组中值为 '..' 的元素替换成 'NaN' data.replace({'..': pd.np.nan}, inplace=True) # 对 NaN 空值进行向前和向后填充 data = data.fillna(method='ffill', axis=1).fillna(method='bfill', axis=1) # 读取 Country 表 country = pd.read_excel('ClimateChange.xlsx', 'Country') # 设置国家代号为索引,方便合并数据 country.index = country['Country code'] # 合并这俩 Series :国家总排量和国家收入属于什么群体 df = pd.concat([data.sum(axis=1), country['Income group']], axis=1) # Sum emissions a = df.groupby('Income group').sum() # 设置列名 a.columns = ['Sum emissions'] # 在 df 中加入一列国家名字 df[2] = country['Country name'] # 各收入群体中排放量最高的国家和最高排放量 h = df.sort_values(0, ascending=False).groupby('Income group').head(1).set_index('Income group') # 设置列名 h.columns = ['Highest emissions', 'Highest emission country'] # 各收入群体中排放量最低的国家和最低排放量 l = df[df[0]>0].sort_values(0).groupby('Income group').head(1).set_index('Income group') # 设置列名 l.columns = ['Lowest emissions', 'Lowest emission country'] # 返回全部数据, concat 会自动对 index 排序 return pd.concat([a, h, l], axis=1)
[ "1195581533@qq.com" ]
1195581533@qq.com
ca60ffe0443bb07e494632fa5f39f6372218f4ec
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/125/usersdata/172/29042/submittedfiles/ap1.py
683a565ff52ea8deb8c87ed40b155d4040448259
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
274
py
# -*- coding: utf-8 -*- a=float(input('digite o 1° número:')) b=float(input('digite o 2° número:')) c=float(input('digite o 3° número:')) if a<=b and a<=c: print(a) if b<=c: print(b) print(c) else : print(c) print(b)
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
7ccbfdbf9eb671918f8f2d2beca9b69a75d10e8d
a3e1e797acb16bf4d1e298271545927df8eaeae7
/tests/unit/services/storage/test_base.py
81a39a0af33497a1293be5621915105e9b1b977e
[ "MIT" ]
permissive
totem/cluster-deployer
481b11b2953a3e195c46762659c97fceca75945a
75160f051250fd782af42af472f965d50f721ff5
refs/heads/develop
2022-10-12T09:09:30.417412
2017-06-30T22:32:56
2017-06-30T22:32:56
23,778,754
0
2
MIT
2022-09-13T21:48:32
2014-09-08T04:03:06
Python
UTF-8
Python
false
false
2,592
py
import datetime from freezegun import freeze_time from mock import MagicMock from nose.tools import raises import pytz from deployer.services.storage.base import AbstractStore from tests.helper import dict_compare NOW = datetime.datetime(2022, 01, 01, tzinfo=pytz.UTC) NOW_NOTZ = datetime.datetime(2022, 01, 01) class TestAbstractStore: def setup(self): self.store = AbstractStore() @raises(NotImplementedError) def test_get(self): self.store.create_deployment(MagicMock()) @raises(NotImplementedError) def test_get_deployment(self): self.store.get_deployment('fake_id') @raises(NotImplementedError) def test_update_state(self): self.store.update_state('fake_id', 'PROMOTED') @raises(NotImplementedError) def test_update_runtime_upstreams(self): self.store.update_runtime_upstreams('fake_id', {}) @raises(NotImplementedError) def test_update_runtime_units(self): self.store.update_runtime_units('fake_id', []) @raises(NotImplementedError) def test_update_state_bulk(self): self.store.update_state_bulk('myapp', 'DECOMMISSIONED') @raises(NotImplementedError) def test_get_health(self): self.store.health() @freeze_time(NOW_NOTZ) def test_add_event(self): # Given: Mock implementation for adding raw event self.store._add_raw_event = MagicMock() # When: I add event to the store self.store.add_event('MOCK_EVENT') # Then: Event gets added to the store self.store._add_raw_event.assert_called_once_with({ 'type': 'MOCK_EVENT', 'component': 'deployer', 'details': None, 'date': NOW_NOTZ }) @raises(NotImplementedError) def test_add_raw_event(self): self.store.add_event({}) def test_setup(self): self.store.setup() # NOOP @raises(NotImplementedError) def test_find_apps(self): self.store.find_apps() @raises(NotImplementedError) def test_filter_deployments(self): self.store.filter_deployments('myapp') @freeze_time(NOW) def test_apply_modified_ts(self): # When: I apply modified timestamp for given deployemnt deployement = self.store.apply_modified_ts({ 'deployement': { 'id': 'test' } }) # Then: Modified timestamp is applied as expected dict_compare(deployement, { 'deployement': { 'id': 'test' }, 'modified': NOW })
[ "sukrit007@gmail.com" ]
sukrit007@gmail.com
d4e55b4a3c1058d466250a81fc43250a26f6e81e
79f541042e4b4d6bb443e7a758ca918817ea0f33
/PythonGTK/Examples/19_cellRendererPixbuf.py
075ed4f57dca6d57224051e634c8867f76537d47
[]
no_license
ashutoshm1771/Source-Code-from-Tutorials
d5f950db8f5f648e87303835e9558eeba404939a
f5552d4bd0f4bebcf5c674ff730fcb61f2d7a1ce
refs/heads/master
2020-09-15T06:08:31.777622
2019-11-22T09:08:31
2019-11-22T09:08:31
223,364,275
4
0
null
2019-11-22T09:01:51
2019-11-22T09:01:48
null
UTF-8
Python
false
false
962
py
from gi.repository import Gtk class CellRendererPixbufWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="CellRendererPixbuf Example") self.set_default_size(200, 200) self.liststore = Gtk.ListStore(str, str) self.liststore.append(["New", "document-new"]) self.liststore.append(["Open", "document-open"]) self.liststore.append(["Save", "document-save"]) treeview = Gtk.TreeView(model=self.liststore) renderer_text = Gtk.CellRendererText() column_text = Gtk.TreeViewColumn("Text", renderer_text, text=0) treeview.append_column(column_text) renderer_pixbuf = Gtk.CellRendererPixbuf() column_pixbuf = Gtk.TreeViewColumn("Image", renderer_pixbuf, icon_name=1) treeview.append_column(column_pixbuf) self.add(treeview) win = CellRendererPixbufWindow() win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main()
[ "buckyroberts@gmail.com" ]
buckyroberts@gmail.com
f7fb4d9a7d5027b2266d958ce939e45c3c255fd8
571a89f94f3ebd9ec8e6b618cddb7d05811e0d62
/past202010/h/main.py
3b515938a65636dfb734674b57c922c04b987390
[]
no_license
ryu19-1/atcoder_python
57de9e1db8ff13a107b5861f8f6a231e40366313
cc24b3c2895aad71d40cefbb8e2893dc397b8f4f
refs/heads/master
2023-05-10T05:32:16.507207
2021-05-19T17:48:10
2021-05-19T17:48:10
368,954,430
0
0
null
null
null
null
UTF-8
Python
false
false
1,409
py
#!/usr/bin/env python3 import sys from collections import deque, Counter from heapq import heappop, heappush from bisect import bisect_right from itertools import accumulate sys.setrecursionlimit(10**6) INF = 10**12 m = 10**9 + 7 def main(): N, M, K = map(int, input().split()) S = [list(input()) for _ in range(N)] # countを二次元累積和でもっておく count = [[[0] * (M+1) for _ in range(N+1)] for _ in range(10)] for i in range(N): for j in range(M): count[int(S[i][j])][i+1][j+1] += 1 for k in range(10): for i in range(N): for j in range(M): count[k][i + 1][j + 1] += count[k][i + 1][j] for j in range(M+1): count[k][i + 1][j] += count[k][i][j] # print(count[1]) for n in range(min(N, M), 0, -1): # print(n) for i in range(N - n + 1): for j in range(M - n + 1): # (i,j)~(i+n,j+n)の中にある数字の数+Kがn**2を超えていたらok for k in range(10): # print(n, i, j, k) cnt = count[k][i + n][j + n] - count[k][i + n][j] - count[k][i][j + n] + count[k][i][j] if cnt + K >= n * n: print(n) exit() if __name__ == "__main__": main()
[ "ryu1007kami@gmail.com" ]
ryu1007kami@gmail.com
555768f7f3294b960854d8e29e515027c5aefc09
c1e87e9a7f0f2e81e3113821c21378f7b6436b6f
/Щелчок/15_поляков/22.py
23cd2689e5ef0c8d8a5c67a9dc002aa76f830d5a
[]
no_license
Pochemu/Activity
8e2a7ec4f6b7fd233c0ee48e893733b077aac7a4
1b21e674635ff95104e18e93241c30020032e26a
refs/heads/main
2023-07-09T04:04:06.337321
2021-07-06T21:38:26
2021-07-06T21:38:26
337,492,398
1
0
null
null
null
null
UTF-8
Python
false
false
193
py
for i in range(101, 1000): x = i L = x - 21 M = x + 12 while L != M: if L > M: L = L - M else: M = M - L if M == 11: print(i)
[ "48361330+Pochemu@users.noreply.github.com" ]
48361330+Pochemu@users.noreply.github.com
0d2cb05f03c5148d545df139b233bb65df55f7c4
21d1d0cade05ae0ab3dff1598d64f80cef9a411d
/Python/Programmers/Level3/리틀프렌즈사천성.py
7aabad1adc691ca74efc1da5c8df28984e00bf84
[]
no_license
louisuss/Algorithms-Code-Upload
56d1a140a1674a53c13bcec5be96ea6da7c35219
6c97c713858b075162d5d86f124c0555f383c5b0
refs/heads/master
2023-01-08T21:37:19.702276
2020-11-09T04:03:50
2020-11-09T04:03:50
258,399,889
0
0
null
null
null
null
UTF-8
Python
false
false
5,352
py
def find_route(m1, n1, m2, n2, c): up, down, left, right = check_edges(m1, n1, m2, n2, c) case = (m1-m2)*(n1-n2) if case == 0: return (m1 == m2 and up) or (n1 == n2 and left) elif case > 0: return (up and right) or (down and left) else: return (up and left) or (down and right) def check_edges(m1, n1, m2, n2, c): up, down, left, right = [True]*4 for i in range(min(n1, n2), max(n1, n2)+1): if matrix[min(m1, m2)][i] not in ('.', c): up = False break for i in range(min(n1, n2), max(n1, n2)+1): if matrix[max(m1, m2)][i] not in ('.', c): down = False break for i in range(min(m1, m2), max(m1, m2)+1): if matrix[i][min(n1, n2)] not in ('.', c): left = False break for i in range(min(m1, m2), max(m1, m2)+1): if matrix[i][max(n1, n2)] not in ('.', c): right = False break return up, down, left, right m, n = map(int, input().split()) matrix = [] coordinates = {} for i in range(m): row = list(input()) matrix.append(row) for j in range(n): c = row[j] if c.isupper(): coordinates.setdefault(c, []).append((i, j)) result = [] friends = sorted(coordinates) i = 0 while i < len(friends): c = friends[i] if c in result or c == '.': i += 1 continue (m1, n1), (m2, n2) = coordinates[c] if find_route(m1, n1, m2, n2, c): result.append(c) friends[i] = '.' matrix[m1][n1] = '.' matrix[m2][n2] = '.' i = 0 continue i += 1 if len(result) == len(friends): print(''.join(result)) else: print('IMPOSSIBLE') # # 기존 프렌즈 사천성 -> 경로가 세 개 이하 선분 # # 리틀 프렌즈 -> 경로가 2개 이하의 수평/수직 선분 # # . 빈칸 / * 막힌칸 # from collections import defaultdict # from copy import deepcopy # def delete_line(a, b, board, key): # # 선분1 # x1, y1 = a # x2, y2 = b # check = True # # 같은 행 위치 # if x1 == x2: # for i in range(y1+1, y2): # if board[x1][i] != '.': # check = False # break # if check: # board[x1][y1], board[x2][y2] = '.', '.' # print(board) # return key # # 같은 열 위치 # elif y1 == y2: # for i in range(x1+1, x2): # if board[i][y1] != '.': # check = False # break # if check: # board[x1][y1], board[x2][y2] = '.', '.' # return key # # 선분2 # check1, check2 = True, True # # 왼쪽에 있는 경우 # if x1 < x2 and y1 > y2: # # 두 방향이 있음 # for i in range(y2, y1): # if board[x1][i] != '.': # check1 = False # break # if check1: # for i in range(x1, x2): # if board[i][y2] != '.': # check1 = False # break # for i in range(x1+1, x2+1): # if board[i][y1] != '.': # check2 = False # break # if check2: # for i in range(y2+1, y1+1): # if board[x2][i] != '.': # check2 = False # break # elif x1 < x2 and y1 < y2: # for i in range(y1+1, y2+1): # if board[x1][i] != '.': # check1 = False # break # if check1: # for i in range(x1, x2): # if board[i][y2] != '.': # check1 = False # break # for i in range(x1+1, x2+1): # if board[i][y1] != '.': # check2 = False # break # if check2: # for i in range(y1+1, y2): # if board[x2][i] != '.': # check2 = False # break # if check1 and check2: # board[x1][y1], board[x2][y2] = '.', '.' # return key # def solution(m, n, board): # answer = [] # board = list(map(list, board)) # positions = defaultdict(list) # for i in range(m): # for j in range(n): # if ord(board[i][j]) in list(range(ord('A'), ord('Z')+1)): # positions[board[i][j]].append((i, j)) # while True: # before_answer = deepcopy(answer) # temp = [] # for a, b in positions.values(): # t = delete_line(a, b, board, board[a[0]][a[1]]) # if t != None: # temp.append(t) # if temp: # answer.extend(sorted(temp)) # if len(answer) == len(positions): # return ''.join(answer) # else: # if before_answer == answer: # return "IMPOSSIBLE" # m = 3 # n = 3 # board = ['DBA', 'C*A', 'CDB'] # print(solution(m, n, board)) # a=[[1,2],[3,4]] # a[0][1] = 3 # print(a) # # 특정 문자 조건 찾기 # # print(list(range(ord('A'), ord('Z')+1))) # # 문자열 1개씩 가져오는지? # # print(board[0][0]) # # for v in 'abc': # # print(v) # # 문자열 부분 리스트로 변환하기 # # board = list(map(list, board)) # # board = [list(b) for b in board] # # print(board)
[ "dobi1115@gmail.com" ]
dobi1115@gmail.com
3997d4051e2df12bf21d542ce8da6bd90be8e2b3
1c25798a9ae17ca228383fcd04a1e801415a78e7
/Chapter 3 Math, Function, Strings and Objects/commonPythonFunction.py
25618b0b6296236322877889a7f2286192eceb02
[]
no_license
padamcs36/Introduction_to_Python_By_Daniel
688c56fff598617e979a5f71e9a48e50844ad7ea
8b8f00c9d93428c58df9c90e5edd8f75a1662647
refs/heads/main
2023-02-22T08:11:01.450054
2021-01-28T09:45:01
2021-01-28T09:45:01
333,700,411
1
0
null
null
null
null
UTF-8
Python
false
false
303
py
''' abs(), max(), min(), pow(), round() these are python built in functions there is no need to import separate module for them ''' print(abs(-3)) #return the value in positive always print(max(4,5,-8,0,7)) print(min(0,4,6,3,-4)) print(pow(4,3)) print(round(4.535)) print(round(5.554667, 2))
[ "noreply@github.com" ]
padamcs36.noreply@github.com
785a0e838d2086e476faf98a5ab30824e7268acd
b08d42933ac06045905d7c005ca9c114ed3aecc0
/src/learningCurve/tenPercent/lrClassifierN.py
5b2e407f3df56718ee1c0db2c70f5c4f024f8ad3
[]
no_license
TanemuraKiyoto/PPI-native-detection-via-LR
d148d53f5eb60a4dda5318b371a3048e3f662725
897e7188b0da94e87126a4acc0c9a6ff44a64574
refs/heads/master
2022-12-05T11:59:01.014309
2020-08-10T00:41:17
2020-08-10T00:41:17
225,272,083
1
0
null
null
null
null
UTF-8
Python
false
false
5,133
py
# 9 September 2019 # Kiyoto Aramis Tanemura # I modified the rfClassifier.py script to implement a logistic regression classifier. This classifier runs faster than the random forest classifier and Jun previously observed comparable results between logistic regression and random forest classifiers for the protein folding system. Due to the lesser time cost, I may sample a greater hyperparameter space using the logistic regression classifier. If the sampling yields a region in which overfitting is not observed, then I can refine the search. If the results are similar to that of the random forest classifier, then I may have exhausted the dataset for generalizability. # Modified 26 October 2019 by Kiyoto Aramis Tanemura. Apply logistic regression classifier to CASF-PPI dataset. # Modified 2020-02-09 by KAT. Code generalized for public use on GitHub. import pandas as pd import numpy as np import os import json import pickle #from multiprocessing import Pool from time import time from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV from sklearn.preprocessing import StandardScaler from random import shuffle, random #os.chdir('/mnt/scratch/tanemur1/') toc = time() # Randomize input file orders pathToInput = 'data/comparison_descriptors/' pathToOutput = 'results/learningCurve/' fileNames = [x for x in os.listdir(pathToInput) if '.csv' in x] shuffle(fileNames) # note: shuffle is in-place. Do not assign to variable # Specify training set fraction train_fraction = 0.1 if len(fileNames) * train_fraction == int(len(fileNames) * train_fraction): train_file_number = int(len(fileNames) * train_fraction) else: train_file_number = int(len(fileNames) * train_fraction + 1) x_train = pd.DataFrame() y_train = pd.DataFrame() # Read individual csv for comparison descriptors, append to train_data, and partition to x_train, y_train fileNamesWithPath = [pathToInput + fileName for fileName in fileNames] def read_csv(filePath): return pd.read_csv(filePath, index_col = 0) print('begin read training set') #with Pool(np.min([train_file_number, 28])) as p: # train_dataList = list(p.map(read_csv, fileNamesWithPath[:train_file_number])) train_dataList = list(map(read_csv, fileNamesWithPath[:train_file_number])) print('begin append DF | ', (time() - toc) / 60, ' min') # Append DataFrames into one. While loop used to reduce append operations. Iteratively, DFs in a list are appended # to the following DF. while len(train_dataList) != 1: number = int(len(train_dataList) / 2) for i in range(number): train_dataList[2 * i] = train_dataList[2 * i].append(train_dataList[2 * i + 1], sort = True) for j in range(number): del train_dataList[j + 1] x_train = train_dataList[0] del train_dataList print('train_data dimensions', x_train.shape, ' | ', (time() - toc) / 60, ' min') y_train = x_train['class'] x_train = x_train.drop('class', axis = 1) # x_train contains only nonbonding descriptors feature_names = x_train.columns scaler = StandardScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) y_train = y_train.values print('Dimensions x_train ', x_train.shape, ' | y_train', y_train.shape) # Define a logistic regression classifier along with pertinent hyperparameters. Here, default values are used. clf = LogisticRegression(penalty='l2', verbose = 1) def sampleRationalVals(minVal, maxVal): return 2 ** (random() * (np.log2(maxVal) - np.log2(minVal)) + np.log2(minVal)) def sampleRationalList(minVal, maxVal): theList = [] for i in range(int(2 * np.log2(maxVal - minVal) + 1)): theVal = sampleRationalVals(minVal, maxVal) theList.append(theVal) return theList parameters = { # include any hyperparameters to sample. Otherwise, leave empty to perform five fold cross validation with default values. For example: # 'C': sampleRationalList(0.001, 1000), # 'solver': ['newton-cg', 'lbfgs', 'sag','saga'] } print('begin RandomizedSearchCV | ' + str((time() - toc)/60) + ' mins') randomized_search = RandomizedSearchCV(estimator = clf, param_distributions = parameters, n_iter = 1, scoring = 'accuracy', refit = True, cv = 5, verbose = 1, n_jobs = 1, pre_dispatch = 'n_jobs', return_train_score=True) randomized_search.fit(x_train, y_train) print('begin output | ', (time() - toc) / 60 / 60, ' hours') tic = time() with open(pathToOutput + 'bestParamN.json', 'w') as g: json.dump(randomized_search.best_estimator_.get_params(), g) with open(pathToOutput + 'modelN.pkl', 'wb') as h: pickle.dump(randomized_search, h) with open(pathToOutput + 'trainingSetN.txt', 'w') as i: i.write('Training set:\n') for pdbID in fileNames[:train_file_number]: i.write(pdbID + '\n') i.write('\nJob time: ' + str((tic - toc) / 60 / 60) + ' hours') with open(pathToOutput + 'standardScalerN.pkl', 'wb') as j: pickle.dump(scaler, j) bestCoefficient = randomized_search.best_estimator_.coef_ coefDf = pd.DataFrame(bestCoefficient, columns = feature_names) with open(pathToOutput + 'coefficientsN.csv', 'w') as f: coefDf.to_csv(f)
[ "tanemur1@msu.edu" ]
tanemur1@msu.edu
bd9ed5751eee1c211f209fe86a475e63d9c97c2d
93684882400d0249ad733249f5b2c8dbd230110f
/ClassExercise & studio/chapter 8/Ex.02 pass 7-04-2016.py
b2e97a34f99d78980e9dea2a41e658df9a7c41ee
[]
no_license
SmileShmily/LaunchCode-summerofcode-Unit1
c492bbed966547cc8c1be7f15d7a23cb989d407b
03474cf77b0dae2bcfaf8513711d3fec72bd4166
refs/heads/master
2021-01-16T23:19:23.413010
2017-06-29T02:49:19
2017-06-29T02:49:19
95,730,117
0
0
null
null
null
null
UTF-8
Python
false
false
1,628
py
''' ’ (GRADED) Write a function analyze_text that receives a string as input. Your function should count the number of alphabetic characters (a through z, or A through Z) in the text and also keep track of how many are the letter 'e' (upper or lowercase). Your function should return an analysis of the text, something like this: The text contains 243 alphabetic characters, of which 109 (44.8%) are ‘e’. ''' #analyze_text=input("Please enter your str:") def analyze_text(str): # your code here # lows="abcdefghijklmnopqrstuvwxyz" # ups="ABCDEFGHIJKLMNOPQRSTUVWXYZ" str = repr(str).lower() numberOfe = 0 totalChars = 0 for ch in str: if ((ch <= 'Z' and ch >= 'A') or (ch <= 'z' and ch >= 'a')): totalChars = totalChars + 1 if ch == 'e': numberOfe = numberOfe + 1 percent_with_e = (numberOfe/totalChars) * 100 return("The text contains %d alphabetic characters, of which %d (%.1f%) %sare 'e'."% (totalChars, numberOfe, percent_with_e)) # Don't copy these tests into Vocareum from test import testEqual str = "Eeeee" expected = "The text contains 5 alphabetic characters, of which 5 (100.0%) are 'e'." testEqual(analyze_text(str), expected) str = "Blueberries are tastee!" expected = "The text contains 20 alphabetic characters, of which 6 (30.0%) are 'e'." testEqual(analyze_text(str), expected) str = "Wright's book, Gadsby, contains a total of 0 of that most common symbol ;)" expected = "The text contains 55 alphabetic characters, of which 0 (0.0%) are 'e'." testEqual(analyze_text(str), expected) '''Output Pass Pass Pass'''
[ "zyxjyaya@gmail.com" ]
zyxjyaya@gmail.com
59d4c01d5cfcf4ed342790fd1274876e93dd8832
26b0a513571a68576687b98de61d24be609b7595
/problem_set_7/NewsStory.py
350dd691a2f84602fbf6138df49edbbbe8c487c1
[]
no_license
sirajmuneer123/MITx-6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python
ea487447e119d9f1a23d49b9d6ddbe3a27b6f972
757c8087005bebb0fb250526d0caf7b79e3d1973
refs/heads/master
2021-01-10T03:21:53.565688
2015-11-07T17:57:26
2015-11-07T17:57:26
45,476,334
0
0
null
null
null
null
UTF-8
Python
false
false
579
py
# Enter your code for NewsStory in this box #Part 1: Data Structure Design class NewsStory(object): def __init__(self, guid, title, subject, summary, link): self.guid = guid self.title = title self.subject = subject self.summary = summary self.link = link def getGuid(self): return self.guid def getTitle(self): return self.title def getSubject(self): return self.subject def getSummary(self): return self.summary def getLink(self): return self.link
[ "sirajmuneer4@gmail.com" ]
sirajmuneer4@gmail.com
8c7528abac71136cad54131e9a41c7e5a0de6109
573d470c9fcb3799e8822e6953e1259b74e0672c
/Course/syntax/example_37.py
4dec6e18ca446e0776f554d3f82b7f877eb2a471
[ "Apache-2.0" ]
permissive
zevgenia/Python_shultais
e6f35773e54a72477ea5ee83520dbecfbee7ff48
e51c31de221c5e7f36ede857a960138009ec8a05
refs/heads/master
2020-03-31T21:46:25.061571
2018-10-11T13:43:47
2018-10-11T13:43:47
152,593,211
0
0
null
null
null
null
UTF-8
Python
false
false
330
py
""" Модуль демонстрации строк документации. Пример использования смотрите в файле example_38.py """ def square(x): """ Функция принимает число и возвращает квадрат этого числа. """ return x ** 2
[ "zatonskaya@yandex.ru" ]
zatonskaya@yandex.ru
e8b3a1c00a0adcbb6c45e0613c18c21c3c1a2f8b
1bc7456240639a4fac54c411fbcb562cdbcc420c
/20.valid-parenthesis.py
505f4e6def7acd2d3df87521451cf3e681bda834
[]
no_license
Manash-git/CP-LeetCode-Solve
bdbb9f13946faee5da24e191a3d593b99da61ed2
45052c7613345c76f8a12bac780ffb899062dea9
refs/heads/master
2022-11-29T13:16:03.474242
2020-08-11T19:06:07
2020-08-11T19:06:07
275,853,956
1
0
null
null
null
null
UTF-8
Python
false
false
620
py
def isValid(s): stack=[] pairs={ '(':')', '[':']', '{':'}' } for char in s: if char in pairs: # storing corresponding closing parenthesis stack.append(pairs[char]) # print(stack) else: if not stack or stack.pop() != char: return False # print("Final=>",stack) return not stack print(isValid("()[{}]")) print(isValid("()[]{}")) # test=[] test=[1] # print(not test) # Return True if list is empty. Bcoz: empty = False. so not False= True # print(test)
[ "emailatmanash@gmail.com" ]
emailatmanash@gmail.com
99712e5994e4f62c535cc123f5dee3ae5e41dc7c
b3a2e683ca82de3758fd62a8dea16bf2be7ebb78
/sequences/sorting.py
eb8a7a548068d777b5de382221deaa3f25e1f0d2
[]
no_license
jschnab/data-structures-algos-python
dc60f4e26d4d86470039bab1f4422e86a6da736b
7b7821d5fc5378c8c7a4be088aadd6c95b154312
refs/heads/master
2022-12-27T17:56:34.622558
2020-10-24T19:47:37
2020-10-24T19:47:37
265,121,711
0
0
null
null
null
null
UTF-8
Python
false
false
2,719
py
import random def select(seq, start): min_idx = start for j in range(start + 1, len(seq)): if seq[j] < seq[min_idx]: min_idx = j return min_idx def selection_sort(seq): for i in range(len(seq) - 1): min_idx = select(seq, i) tmp = seq[i] seq[i] = seq[min_idx] seq[min_idx] = tmp def merge(seq, start, mid, stop): lst = [] i = start j = mid # merge the two lists while each has more elements while i < mid and j < stop: if seq[i] < seq[j]: lst.append(seq[i]) i += 1 else: lst.append(seq[j]) j += 1 # copy in the rest of the start-to-mid sequence while i < mid: lst.append(seq[i]) i += 1 # no need to copy the rest of the sequence from j to stop # the next part of the code does this for us # so no need to do: # while j < stop: # lst.append(seq[j]) # j += 1 # copy elements back to the original sequence for i in range(len(lst)): seq[start + i] = lst[i] def merge_sort_recursive(seq, start, stop): # >= is necessary if sequence is empty # otherwise start == stop - 1 does the job if start >= stop - 1: return mid = (start + stop) // 2 merge_sort_recursive(seq, start, mid) merge_sort_recursive(seq, mid, stop) merge(seq, start, mid, stop) def merge_sort(seq): merge_sort_recursive(seq, 0, len(seq)) def partition(seq, start, stop): # pivot_index comes from the start location in the list pivot_index = start pivot = seq[pivot_index] i = start + 1 j = stop - 1 while i <= j: while i <= j and seq[i] <= pivot: i += 1 while i <= j and seq[j] > pivot: j -= 1 if i < j: tmp = seq[i] seq[i] = seq[j] seq[j] = tmp i += 1 j -= 1 seq[pivot_index] = seq[j] seq[j] = pivot return j def quicksort_recursive(seq, start, stop): if start >= stop - 1: return # pivot index ends up between the two halves # where the pivot value is in its final location pivot_index = partition(seq, start, stop) quicksort_recursive(seq, start, pivot_index) quicksort_recursive(seq, pivot_index + 1, stop) def quicksort(seq): # randomize sequence to find a good pivot for i in range(len(seq)): j = random.randint(0, len(seq) - 1) seq[i], seq[j] = seq[j], seq[i] quicksort_recursive(seq, 0, len(seq)) if __name__ == "__main__": seq = [random.randint(0, 100) for _ in range(10)] print("Before sort:", seq) quicksort(seq) print("After sort:", seq)
[ "jonathan.schnabel31@gmail.com" ]
jonathan.schnabel31@gmail.com
be19e49ff8ca50f044aa74a299385f507daf7c95
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_34/785.py
9c28392c2800225546e52dd464a7bb364a89ea2b
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
378
py
import sys import re firstline = sys.stdin.readline() (L, D, N) = map(int, firstline.split()) lex = [] for x in range(D): lex.append(sys.stdin.readline()) lex = ''.join(lex) for (i, pattern) in enumerate(sys.stdin.readlines()): pattern = pattern.replace('(','[').replace(')',']') matches = re.findall(pattern, lex) print 'Case #%d: %d' % (i+1, len(matches))
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
c886d026554eb02a740ba2ff1fe41d5cda414774
b385f39c5b701fb6f22796ab951872257ae8398a
/exercicios-secao08/exercicio29.py
ebaee4355b5c1a1ef226ff1e088f5a6abccb4523
[ "MIT" ]
permissive
EhODavi/curso-python
5c97a6913bad198ae590519287ed441c95399d80
cf07e308be9d7516f2cfe7f21c539d214c836979
refs/heads/main
2023-08-07T13:44:46.608118
2021-06-14T21:40:50
2021-06-14T21:40:50
356,542,988
0
0
null
null
null
null
UTF-8
Python
false
false
222
py
from math import factorial, radians def sinh(graus: float) -> float: soma: float = 0.0 x: float = radians(graus) for n in range(6): soma += (x ** (2 * n + 1)) / factorial(2 * n + 1) return soma
[ "davi.miau@gmail.com" ]
davi.miau@gmail.com
6b49473abce39f0169294f832573d958ed264e27
c7a404e6fe1861c90ff8dc9cbc69462ebcbb744c
/app.py
7c2a34914a0512a254d56cb7547873a693cb3975
[ "MIT" ]
permissive
bossnode/rufo-mp3-fetcher
b40ced257b7127456b5fbdd27b19cb5ce9f6289e
ed14c44c27761e4d2b2625da17a62c036f38dafd
refs/heads/master
2020-06-10T04:11:09.257754
2019-06-26T20:25:12
2019-06-26T20:25:12
193,578,331
0
0
null
null
null
null
UTF-8
Python
false
false
1,680
py
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk import os, sys from soundspider import SoundSpider from time import sleep import threading class Handler: def onDestroy(self, *args): try: download_thread._stop() except: pass Gtk.main_quit() def onToggleDownload(self, button): status = "Downloading..." builder.get_object('label4').set_text(status) button.set_sensitive(False) builder.get_object("folder_label").set_sensitive(False) builder.get_object("url_label").set_sensitive(False) ## verbose? # verbose = True verbose = False params = (builder.get_object("url_label").get_text(),builder.get_object("folder_label").get_text(),verbose, builder.get_object('label4'), button,builder.get_object("url_label"),builder.get_object("folder_label")) download_thread = threading.Thread(target=SoundSpider.convert, args=params) download_thread.start() return def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) # """ Get absolute path to resource, works for dev and for PyInstaller """ # base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) # return os.path.join(base_path, relative_path) download_thread = threading.Thread() builder = Gtk.Builder() builder.add_from_file(resource_path("ui.glade")) builder.connect_signals(Handler()) window = builder.get_object("window1") window.show_all() Gtk.main()
[ "=" ]
=
d08d566fec8e6362ded5eced9937d5449872b30a
613d0766a36a0354ed8b7be4c8a552068afe5e71
/08 Linked List/15 Reverse Linked List/02.py
45530e293097a676b12012d31b88a5d9f2c7ec75
[]
no_license
HYLee1008/Python
e9fb2109ed29d11faa37b0a823e603134d85dc28
04900b6de1d56c11fbb98d518c9b4cdd37772490
refs/heads/master
2022-12-12T12:07:49.198551
2020-09-05T12:41:55
2020-09-05T12:41:55
280,879,627
0
0
null
null
null
null
UTF-8
Python
false
false
553
py
### Reverse linked list by iterative ### Iterative consumes 70% memory of the recursive method. And slightly faster. from datastructure import * def reverse_list(head): node, prev = head, None while node: next, node.next = node.next, prev node, prev = next, node return prev input = ListNode(1) input.next = ListNode(2) input.next.next = ListNode(3) input.next.next.next = ListNode(4) input.next.next.next.next = ListNode(5) reverse = reverse_list(input) while reverse: print(reverse.val) reverse = reverse.next
[ "discone1008@gmail.com" ]
discone1008@gmail.com
d32bdac865b53dfa2b0b6159cd89f731b1be54c7
1565d4f012622c98dd519d5f6feedf7d5a67a70b
/etc/openshift_clean.py
a05d4c5f52a0c98d4598cac6d152eb7475f82f94
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
doaa-altarawy/quokka
6862da8e3f0f3583350ca4faf2bc7133a7aa9d6b
3730008100266569d1fab47c7dfa765650e2a346
refs/heads/development
2021-01-16T17:46:22.983206
2015-12-21T22:15:15
2015-12-21T22:15:15
48,460,112
1
0
null
2015-12-23T00:14:31
2015-12-23T00:14:31
null
UTF-8
Python
false
false
543
py
#!/usr/bin/python """ THIS SCRIPT CLEANS ALL DATA IN YOUR QUOKKA DB RUN ONLY IN OPENSHIFT DEMO DEPLOY OR AT YOUR OWN RISK!!!! """ from quokka import create_app from quokka.core.models.content import Content from quokka.core.models.config import Config from quokka.core.models.channel import Channel from quokka.modules.accounts.models import User app = create_app() Content.objects.delete() User.objects.delete() Config.objects.delete() for channel in Channel.objects.filter(parent__ne=None): channel.delete() Channel.objects.delete()
[ "rochacbruno@gmail.com" ]
rochacbruno@gmail.com
8aeea72b7d8a05fcd367ad2245993782dde02fba
3330944ef9af811ed59e3f40721e6b7da754e1e7
/setup.py
1d70fdd49af304dc6bdae85abbb3f2671b9bf919
[]
no_license
loum/baip-loader
8ee6ad184f4cb557437ed92590b591eaa0032956
68a4b1556b8d745e51a1502092cd1d54bfd96c76
refs/heads/master
2021-01-25T07:39:34.190887
2015-05-05T00:15:21
2015-05-05T00:15:21
30,627,252
0
0
null
null
null
null
UTF-8
Python
false
false
2,249
py
""":mod:docutils` setup.py file to generate Python compatible sources in build/ directory """ import os import glob import fnmatch import shutil from setuptools import setup VERSION = '0.0.0' def opj(*args): path = os.path.join(*args) return os.path.normpath(path) def find_data_files(srcdir, *wildcards, **kw): """Get a list of all files under the *srcdir* matching *wildcards*, returned in a format to be used for install_data. """ def walk_helper(arg, dirname, files): names = [] lst, wildcards = arg for wildcard in wildcards: wc_name = opj(dirname, wildcard) for current_file in files: filename = opj(dirname, current_file) if (fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename)): if kw.get('version') is None: names.append(filename) else: versioned_file = '%s.%s' % (filename, kw.get('version')) shutil.copyfile(filename, versioned_file) names.append('%s.%s' % (filename, kw.get('version'))) if names: if kw.get('target_dir') is None: lst.append(('', names)) else: lst.append((kw.get('target_dir'), names)) file_list = [] recursive = kw.get('recursive', True) if recursive: os.path.walk(srcdir, walk_helper, (file_list, wildcards)) else: walk_helper((file_list, wildcards), srcdir, [os.path.basename(current_file) for current_file in glob.glob(opj(srcdir, '*'))]) return file_list find_data_files('baip_loader/conf/', '*.conf', version=VERSION) setup(name='python-baip-loader', version=VERSION, description='BAIP-Loader', author='Lou Markovski', author_email='lou.markovski@gmail.com', url='', scripts=['baip_loader/bin/baip-loader'], packages=['baip_loader', 'baip_loader.config'], package_data={'baip_loader': ['conf/*.conf.[0-9]*.[0-9]*.[0-9]*']})
[ "lou.markovski@gmail.com" ]
lou.markovski@gmail.com
02d2bcf9f1aaaee3aff4b006298d5417e5d9fecb
386d5d4f8f102e701d02b326cd066f520e3dff9f
/ProjectApplication/grant_management/migrations/0030_renames_media_to_medium.py
f3c44d48ea456f3b94289c02f7b305bf53d2dd0b
[ "CC-BY-NC-SA-4.0", "CC-BY-SA-4.0", "CC-BY-4.0", "MIT" ]
permissive
Swiss-Polar-Institute/project-application
ae2561c3ae2c1d5412d165d959ce2e5886135e0a
7dc4a9f7e0f8d28c89977b85f99bc5e35ea77d43
refs/heads/master
2023-08-31T04:01:23.492272
2023-08-25T14:33:02
2023-08-25T14:33:02
206,330,401
7
5
MIT
2023-09-13T08:03:53
2019-09-04T13:49:39
Python
UTF-8
Python
false
false
685
py
# Generated by Django 3.0.5 on 2020-05-12 10:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('project_core', '0118_calls_need_to_be_part_of_a_funding_instrument'), ('grant_management', '0029_signed_by_multiple_people'), ] operations = [ migrations.RenameModel( old_name='Media', new_name='Medium', ), migrations.AlterField( model_name='grantagreement', name='signed_by', field=models.ManyToManyField(blank=True, help_text='People who signed the grant agreement', to='project_core.PhysicalPerson'), ), ]
[ "carles@pina.cat" ]
carles@pina.cat
7bcce88caf756bb58b5c86d24a41dfc8e635fa25
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
/benchmark/omninote/testcase/firstcases/testcase10_018.py
bb43949dc1a984d35d229ba18c38794740f1bd50
[]
no_license
Prefest2018/Prefest
c374d0441d714fb90fca40226fe2875b41cf37fc
ac236987512889e822ea6686c5d2e5b66b295648
refs/heads/master
2021-12-09T19:36:24.554864
2021-12-06T12:46:14
2021-12-06T12:46:14
173,225,161
5
0
null
null
null
null
UTF-8
Python
false
false
4,301
py
#coding=utf-8 import os import subprocess import time import traceback from appium import webdriver from appium.webdriver.common.touch_action import TouchAction from selenium.common.exceptions import NoSuchElementException, WebDriverException desired_caps = { 'platformName' : 'Android', 'deviceName' : 'Android Emulator', 'platformVersion' : '4.4', 'appPackage' : 'it.feio.android.omninotes', 'appActivity' : 'it.feio.android.omninotes.MainActivity', 'resetKeyboard' : True, 'androidCoverage' : 'it.feio.android.omninotes/it.feio.android.omninotes.JacocoInstrumentation', 'noReset' : True } def command(cmd, timeout=5): p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True) time.sleep(timeout) p.terminate() return def getElememt(driver, str) : for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str) except NoSuchElementException: time.sleep(1) else: return element os.popen("adb shell input tap 50 50") element = driver.find_element_by_android_uiautomator(str) return element def getElememtBack(driver, str1, str2) : for i in range(0, 2, 1): try: element = driver.find_element_by_android_uiautomator(str1) except NoSuchElementException: time.sleep(1) else: return element for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str2) except NoSuchElementException: time.sleep(1) else: return element os.popen("adb shell input tap 50 50") element = driver.find_element_by_android_uiautomator(str2) return element def swipe(driver, startxper, startyper, endxper, endyper) : size = driver.get_window_size() width = size["width"] height = size["height"] try: driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper), end_y=int(height * endyper), duration=2000) except WebDriverException: time.sleep(1) driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper), end_y=int(height * endyper), duration=2000) return # testcase018 try : starttime = time.time() driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) element = getElememtBack(driver, "new UiSelector().text(\"Nothing here!\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/menu_sort\").className(\"android.widget.TextView\")") TouchAction(driver).long_press(element).release().perform() element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/menu_sort\").className(\"android.widget.TextView\")") TouchAction(driver).long_press(element).release().perform() element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/menu_sort\").className(\"android.widget.TextView\")") TouchAction(driver).long_press(element).release().perform() element = getElememtBack(driver, "new UiSelector().text(\"Nothing here!\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")") TouchAction(driver).tap(element).perform() element = getElememtBack(driver, "new UiSelector().text(\"Reduced view\")", "new UiSelector().className(\"android.widget.TextView\")") TouchAction(driver).tap(element).perform() element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/menu_search\").className(\"android.widget.TextView\")") TouchAction(driver).tap(element).perform() except Exception, e: print 'FAIL' print 'str(e):\t\t', str(e) print 'repr(e):\t', repr(e) print traceback.format_exc() else: print 'OK' finally: cpackage = driver.current_package endtime = time.time() print 'consumed time:', str(endtime - starttime), 's' command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"10_018\"") jacocotime = time.time() print 'jacoco time:', str(jacocotime - endtime), 's' driver.quit() if (cpackage != 'it.feio.android.omninotes'): cpackage = "adb shell am force-stop " + cpackage os.popen(cpackage)
[ "prefest2018@gmail.com" ]
prefest2018@gmail.com
d8a67dd3fc7ad483dcc1c079ddf71657300fe471
4c3e992678341ccaa1d4d14e97dac2e0682026d1
/addons/account/tests/test_tax.py
62e63e8bb07a770a7428d5bb811ee52b406f5201
[]
no_license
gahan-corporation/wyatt
3a6add8f8f815bd26643e1e7c81aea024945130d
77e56da362bec56f13bf0abc9f8cf13e98461111
refs/heads/master
2021-09-03T18:56:15.726392
2018-01-08T02:54:47
2018-01-08T02:54:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,285
py
from gerp.addons.account.tests.account_test_users import AccountTestUsers import time class TestTax(AccountTestUsers): def setUp(self): super(TestTax, self).setUp() self.fixed_tax = self.tax_model.create({ 'name': "Fixed tax", 'amount_type': 'fixed', 'amount': 10, 'sequence': 1, }) self.fixed_tax_bis = self.tax_model.create({ 'name': "Fixed tax bis", 'amount_type': 'fixed', 'amount': 15, 'sequence': 2, }) self.percent_tax = self.tax_model.create({ 'name': "Percent tax", 'amount_type': 'percent', 'amount': 10, 'sequence': 3, }) self.division_tax = self.tax_model.create({ 'name': "Division tax", 'amount_type': 'division', 'amount': 10, 'sequence': 4, }) self.group_tax = self.tax_model.create({ 'name': "Group tax", 'amount_type': 'group', 'amount': 0, 'sequence': 5, 'children_tax_ids': [ (4, self.fixed_tax.id, 0), (4, self.percent_tax.id, 0) ] }) self.group_tax_bis = self.tax_model.create({ 'name': "Group tax bis", 'amount_type': 'group', 'amount': 0, 'sequence': 6, 'children_tax_ids': [ (4, self.fixed_tax.id, 0), (4, self.percent_tax.id, 0) ] }) self.group_of_group_tax = self.tax_model.create({ 'name': "Group of group tax", 'amount_type': 'group', 'amount': 0, 'sequence': 7, 'children_tax_ids': [ (4, self.group_tax.id, 0), (4, self.group_tax_bis.id, 0) ] }) self.bank_journal = self.env['account.journal'].search([('type', '=', 'bank'), ('company_id', '=', self.account_manager.company_id.id)])[0] self.bank_account = self.bank_journal.default_debit_account_id self.expense_account = self.env['account.account'].search([('user_type_id.type', '=', 'payable')], limit=1) #Should be done by onchange later def test_tax_group_of_group_tax(self): self.fixed_tax.include_base_amount = True self.group_tax.include_base_amount = True self.group_of_group_tax.include_base_amount = True res = self.group_of_group_tax.compute_all(200.0) self.assertEquals(res['total_excluded'], 200.0) # After calculation of first group # base = 210 # total_included = 231 # Base of the first grouped is passed # Base after the second group (220) is dropped. # Base of the group of groups is passed out, # so we obtain base as after first group self.assertEquals(res['base'], 210.0) self.assertEquals(res['total_included'], 263.0) def test_tax_group(self): res = self.group_tax.compute_all(200.0) self.assertEquals(res['total_excluded'], 200.0) self.assertEquals(res['total_included'], 230.0) self.assertEquals(len(res['taxes']), 2) self.assertEquals(res['taxes'][0]['amount'], 10.0) self.assertEquals(res['taxes'][1]['amount'], 20.0) def test_tax_percent_division(self): self.division_tax.price_include = True self.division_tax.include_base_amount = True self.percent_tax.price_include = False self.percent_tax.include_base_amount = False res_division = self.division_tax.compute_all(200.0) res_percent = self.percent_tax.compute_all(200.0) self.assertEquals(res_division['taxes'][0]['amount'], 20.0) self.assertEquals(res_percent['taxes'][0]['amount'], 20.0) self.division_tax.price_include = False self.division_tax.include_base_amount = False self.percent_tax.price_include = True self.percent_tax.include_base_amount = True res_division = self.division_tax.compute_all(200.0) res_percent = self.percent_tax.compute_all(200.0) self.assertEquals(res_division['taxes'][0]['amount'], 22.22) self.assertEquals(res_percent['taxes'][0]['amount'], 18.18) def test_tax_sequence_normalized_set(self): self.division_tax.sequence = 1 self.fixed_tax.sequence = 2 self.percent_tax.sequence = 3 taxes_set = (self.group_tax | self.division_tax) res = taxes_set.compute_all(200.0) self.assertEquals(res['taxes'][0]['amount'], 22.22) self.assertEquals(res['taxes'][1]['amount'], 10.0) self.assertEquals(res['taxes'][2]['amount'], 20.0) def test_tax_include_base_amount(self): self.fixed_tax.include_base_amount = True res = self.group_tax.compute_all(200.0) self.assertEquals(res['total_included'], 231.0) def test_tax_currency(self): self.division_tax.amount = 15.0 res = self.division_tax.compute_all(200.0, currency=self.env.ref('base.VEF')) self.assertAlmostEqual(res['total_included'], 235.2941) def test_tax_move_lines_creation(self): """ Test that creating a move.line with tax_ids generates the tax move lines and adjust line amount when a tax is price_include """ self.fixed_tax.price_include = True self.fixed_tax.include_base_amount = True company_id = self.env['res.users'].browse(self.env.uid).company_id.id vals = { 'date': time.strftime('%Y-01-01'), 'journal_id': self.bank_journal.id, 'name': 'Test move', 'line_ids': [(0, 0, { 'account_id': self.bank_account.id, 'debit': 235, 'credit': 0, 'name': 'Bank Fees', 'partner_id': False, }), (0, 0, { 'account_id': self.expense_account.id, 'debit': 0, 'credit': 200, 'date': time.strftime('%Y-01-01'), 'name': 'Bank Fees', 'partner_id': False, 'tax_ids': [(4, self.group_tax.id), (4, self.fixed_tax_bis.id)] })], 'company_id': company_id, } move = self.env['account.move'].with_context(apply_taxes=True).create(vals) aml_fixed_tax = move.line_ids.filtered(lambda l: l.tax_line_id.id == self.fixed_tax.id) aml_percent_tax = move.line_ids.filtered(lambda l: l.tax_line_id.id == self.percent_tax.id) aml_fixed_tax_bis = move.line_ids.filtered(lambda l: l.tax_line_id.id == self.fixed_tax_bis.id) self.assertEquals(len(aml_fixed_tax), 1) self.assertEquals(aml_fixed_tax.credit, 10) self.assertEquals(len(aml_percent_tax), 1) self.assertEquals(aml_percent_tax.credit, 20) self.assertEquals(len(aml_fixed_tax_bis), 1) self.assertEquals(aml_fixed_tax_bis.credit, 15) aml_with_taxes = move.line_ids.filtered(lambda l: set(l.tax_ids.ids) == set([self.group_tax.id, self.fixed_tax_bis.id])) self.assertEquals(len(aml_with_taxes), 1) self.assertEquals(aml_with_taxes.credit, 190)
[ "duchess@gahan-corporation.com" ]
duchess@gahan-corporation.com
1ff24ffb45f8546c93fbec4517976b96ca01b0bc
b8ed71f3d1a36c119d846e97f1aa7d8ba6774f52
/680_Valid_Palindrome_II.py
5dd32bf280843101a247fdd621ab833fcc2b7d1b
[]
no_license
imjaya/Leetcode_solved
0831c4114dd919864452430c4e46d3f69b4bd0cd
374eb0f23ae14d9638d20bbfe622209f71397ae0
refs/heads/master
2023-05-24T17:57:56.633611
2023-05-16T06:31:42
2023-05-16T06:31:42
284,203,426
0
0
null
null
null
null
UTF-8
Python
false
false
491
py
def helper(s,l,r): while l<r: if(s[l]!=s[r]): return False l+=1 r-=1 return True class Solution: def validPalindrome(self, s: str) -> bool: left=0 right=len(s)-1 while(left<right): if(s[left]!=s[right]): return helper(s,left+1,right) or helper(s,left,right-1) else: left+=1 right-=1 return True
[ "smjayasurya1997@gmail.com" ]
smjayasurya1997@gmail.com
c335ae67050596c16c4e388d411d18e7b88ca797
863a7b075963c2882d9fe5df66af16c6e52576a9
/deepiu/textsim/evaluate/evaluate-sim.py
2079a1cc158d696d4cf173579d1afde74c720102
[]
no_license
buptpriswang/hasky
3c3510d43821bbdfdfa216a337cde6e0747b3423
93afdc32956643fe191fcf1a5aa635570e219ab0
refs/heads/master
2021-06-30T19:45:03.329238
2017-09-22T15:41:58
2017-09-22T15:41:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,532
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # \file evaluate-sim-score.py # \author chenghuige # \date 2016-09-25 00:46:53.890615 # \Description # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('model_dir', '/home/gezi/new/temp/makeup/title2name/model/bow/', '') #flags.DEFINE_string('model_dir', '/home/gezi/new/temp/makeup/title2name/model/cnn.hic/', '') flags.DEFINE_string('exact_model_dir', '/home/gezi/new/temp/makeup/title2name/model/bow.elementwise/', '') flags.DEFINE_string('vocab', '/home/gezi/new/temp/makeup/title2name/tfrecord/seq-basic/vocab.txt', '') flags.DEFINE_bool('use_exact_predictor', False, '') flags.DEFINE_string('key', 'score', '') flags.DEFINE_string('lkey', 'dual_bow/main/ltext:0', '') flags.DEFINE_string('rkey', 'dual_bow/main/rtext:0', '') #flags.DEFINE_string('lkey', 'dual_cnn/main/ltext:0', '') #flags.DEFINE_string('rkey', 'dual_cnn/main/rtext:0', '') flags.DEFINE_string('exact_key', 'score', '') flags.DEFINE_string('exact_lkey', 'dual_bow2/main/ltext:0', '') flags.DEFINE_string('exact_rkey', 'dual_bow2/main/rtext:0', '') flags.DEFINE_float('exact_ratio', 1., '') flags.DEFINE_integer('np_seed', 1024, '0 random otherwise fixed random') import sys import numpy as np import melt logging = melt.logging import gezi from deepiu.util import evaluator from deepiu.util import algos_factory class Predictor(melt.PredictorBase): def __init__(self, model_dir, key, lkey, rkey, index=0): self._predictor = melt.Predictor(model_dir) self._key = key self._lkey = lkey self._rkey = rkey self._index = index def predict(self, ltext, rtext): score = self._predictor.inference(self._key, feed_dict= { self._lkey: ltext, self._rkey: rtext }, index=self._index ) return score def evaluate_score(): evaluator.init() text_max_words = evaluator.all_distinct_texts.shape[1] print('text_max_words:', text_max_words) predictor = Predictor(FLAGS.model_dir, FLAGS.key, FLAGS.lkey, FLAGS.rkey, index=0) exact_predictor=None if FLAGS.use_exact_predictor: exact_predictor = Predictor(FLAGS.exact_model_dir, FLAGS.exact_key, FLAGS.exact_lkey, FLAGS.exact_rkey, index=-1) print(tf.get_collection(FLAGS.key)) seed = FLAGS.np_seed if FLAGS.np_seed else None index = evaluator.random_predict_index(seed=seed) evaluator.evaluate_scores(predictor, random=True, index=index) if exact_predictor is not None: ##well for seq2seq did experiment and for makeup title2name score(average time per step) is much better then ori_score ##so just juse score will be fine #exact_predictor._key = 'ori_score' #evaluator.evaluate_scores(predictor, random=True, exact_predictor=exact_predictor, index=index) #exact_predictor._key = 'score' evaluator.evaluate_scores(predictor, random=True, exact_predictor=exact_predictor, exact_ratio=FLAGS.exact_ratio, index=index) def main(_): logging.init(logtostderr=True, logtofile=False) evaluate_score() if __name__ == '__main__': tf.app.run()
[ "29109317@qq.com" ]
29109317@qq.com
8b7320fbb782d353ece0bf32dd2977b744b8acd8
0e1e643e864bcb96cf06f14f4cb559b034e114d0
/Exps_7_v3/I_to_M_Gk3_no_pad/pyramid_2side/bce_s001_tv_s0p1_L8/step11_L2345678.py
c03824dab2e889ccb93f4ca311d98915db768bee
[]
no_license
KongBOy/kong_model2
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
1af20b168ffccf0d5293a393a40a9fa9519410b2
refs/heads/master
2022-10-14T03:09:22.543998
2022-10-06T11:33:42
2022-10-06T11:33:42
242,080,692
3
0
null
null
null
null
UTF-8
Python
false
false
4,986
py
############################################################################################################################################################################################################# ############################################################################################################################################################################################################# ### 把 kong_model2 加入 sys.path import os code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層 kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層 kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir import sys ### 把 kong_model2 加入 sys.path sys.path.append(kong_model2_dir) # print(__file__.split("\\")[-1]) # print(" code_exe_path:", code_exe_path) # print(" code_exe_path_element:", code_exe_path_element) # print(" kong_layer:", kong_layer) # print(" kong_model2_dir:", kong_model2_dir) ############################################################################################################################################################################################################### # 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔! code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ os.chdir(code_exe_dir) # print("current_path:", os.getcwd()) ############################################################################################################################################################################################################### import Exps_7_v3.I_to_M_Gk3_no_pad.pyramid_0side.bce_s001_tv_s0p1_L8.step10_a as L8_0side import Exps_7_v3.I_to_M_Gk3_no_pad.pyramid_1side.bce_s001_tv_s0p1_L8.step10_a as L8_1side import step10_a as side2 ################################################################################################################################################################################################################################################################################################################################################################################################# ch032_1side_1__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_1__2side_1, ] ch032_1side_2__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_2__2side_1, side2.ch032_1side_2__2side_2, ] ch032_1side_3__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_3__2side_1, side2.ch032_1side_3__2side_2, side2.ch032_1side_3__2side_3, ] ch032_1side_4__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_4__2side_1, side2.ch032_1side_4__2side_2, side2.ch032_1side_4__2side_3, side2.ch032_1side_4__2side_4, ] ch032_1side_5__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_5__2side_1, side2.ch032_1side_5__2side_2, side2.ch032_1side_5__2side_3, side2.ch032_1side_5__2side_4, side2.ch032_1side_5__2side_5, ] ch032_1side_6__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_6__2side_1, side2.ch032_1side_6__2side_2, side2.ch032_1side_6__2side_3, side2.ch032_1side_6__2side_4, side2.ch032_1side_6__2side_5, side2.ch032_1side_6__2side_6, ] ch032_1side_7__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_7__2side_1, side2.ch032_1side_7__2side_2, side2.ch032_1side_7__2side_3, side2.ch032_1side_7__2side_4, side2.ch032_1side_7__2side_5, side2.ch032_1side_7__2side_6, side2.ch032_1side_7__2side_7, ] ch032_1side_8__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_8__2side_1, side2.ch032_1side_8__2side_2, side2.ch032_1side_8__2side_3, side2.ch032_1side_8__2side_4, side2.ch032_1side_8__2side_5, side2.ch032_1side_8__2side_6, side2.ch032_1side_8__2side_7, side2.ch032_1side_8__2side_8, ] ch032_1side_9__2side_all = [ L8_0side.ch032_0side, L8_1side.ch032_1side_1, side2.ch032_1side_9__2side_1, side2.ch032_1side_9__2side_2, side2.ch032_1side_9__2side_3, side2.ch032_1side_9__2side_4, side2.ch032_1side_9__2side_5, side2.ch032_1side_9__2side_6, side2.ch032_1side_9__2side_7, side2.ch032_1side_9__2side_8, side2.ch032_1side_9__2side_9, ] ch032_1side_all__2side_all = [ ch032_1side_1__2side_all, ch032_1side_2__2side_all, ch032_1side_3__2side_all, ch032_1side_4__2side_all, ch032_1side_5__2side_all, ch032_1side_6__2side_all, ch032_1side_7__2side_all, ch032_1side_8__2side_all, ch032_1side_9__2side_all, ]
[ "s89334roy@yahoo.com.tw" ]
s89334roy@yahoo.com.tw
930d498a17db90721c75d65299f8f10b0549e799
4a88daea1741ed6b5b5088b8d1f2787093abdf80
/tests/test_gen_couroutine.py
16e6479cc1bbc400493e0975a2f4a4ccec8b11d3
[ "MIT" ]
permissive
Nigelzhf/torweb
8e0bfeb6793ae9e50230e6b7b6284ec5e6da0d57
6859a878c126addb74410e872307ac9399f20eb7
refs/heads/master
2021-01-12T14:16:10.474945
2016-10-03T20:47:25
2016-10-03T20:47:25
69,928,142
0
1
null
2016-10-04T02:25:34
2016-10-04T02:25:33
null
UTF-8
Python
false
false
414
py
#encoding:utf-8 import sys, os sys.path.append(os.path.dirname(sys.path[0])) from tornado.httpclient import AsyncHTTPClient from tornado import gen import tornado.ioloop @gen.coroutine def fetch_coroutine(url): http_client = AsyncHTTPClient() response = yield http_client.fetch(url) print('test') print(response) # fetch_coroutine('http://sxu.today') tornado.ioloop.IOLoop.instance().start()
[ "jmpews@gmail.com" ]
jmpews@gmail.com
8edd25a05553c2dfeaaf20dd6357cab4c4a87d07
4b5173af602439cdca0db985dcbdd25aef9b7a6b
/bin/topology-json-connector.py
fb1c33926b316f0d4ed9f1c45d8fbaeac5353af5
[]
no_license
kevangel79/argo-egi-connectors
a3d4bf502708cdce08acf01b5c58812042e4d201
dff42c3d4e5e7ea31ca839dd93f975ea27f4ffda
refs/heads/master
2022-02-23T15:01:13.081358
2022-02-15T14:54:05
2022-02-15T14:54:05
250,012,827
0
0
null
2020-03-25T15:14:01
2020-03-25T15:14:00
null
UTF-8
Python
false
false
7,746
py
#!/usr/bin/python3 import argparse import os import sys import json import uvloop import asyncio from argo_egi_connectors.io.http import SessionWithRetry from argo_egi_connectors.exceptions import ConnectorHttpError, ConnectorParseError from argo_egi_connectors.io.webapi import WebAPI from argo_egi_connectors.io.avrowrite import AvroWriter from argo_egi_connectors.io.statewrite import state_write from argo_egi_connectors.log import Logger from argo_egi_connectors.config import Global, CustomerConf from argo_egi_connectors.utils import filename_date, datestamp, date_check from argo_egi_connectors.parse.flat_topology import ParseFlatEndpoints, ParseContacts from argo_egi_connectors.mesh.contacts import attach_contacts_topodata from urllib.parse import urlparse logger = None globopts = {} custname = '' def is_feed(feed): data = urlparse(feed) if not data.netloc: return False else: return True async def send_webapi(webapi_opts, data, topotype, fixed_date=None): webapi = WebAPI(sys.argv[0], webapi_opts['webapihost'], webapi_opts['webapitoken'], logger, int(globopts['ConnectionRetry'.lower()]), int(globopts['ConnectionTimeout'.lower()]), int(globopts['ConnectionSleepRetry'.lower()]), date=fixed_date) await webapi.send(data, topotype) def get_webapi_opts(cglob, confcust): webapi_custopts = confcust.get_webapiopts() webapi_opts = cglob.merge_opts(webapi_custopts, 'webapi') webapi_complete, missopt = cglob.is_complete(webapi_opts, 'webapi') if not webapi_complete: logger.error('Customer:%s %s options incomplete, missing %s' % (logger.customer, 'webapi', ' '.join(missopt))) raise SystemExit(1) return webapi_opts async def fetch_data(feed): remote_topo = urlparse(feed) session = SessionWithRetry(logger, custname, globopts) res = await session.http_get('{}://{}{}'.format(remote_topo.scheme, remote_topo.netloc, remote_topo.path)) return res def parse_source_topo(res, uidservtype, fetchtype): # group_groups, group_endpoints = ParseEoscTopo(logger, res, uidservtype, fetchtype).get_data() topo = ParseFlatEndpoints(logger, res, custname, uidservtype, fetchtype, scope=custname) group_groups = topo.get_groupgroups() group_endpoints = topo.get_groupendpoints() return group_groups, group_endpoints async def write_state(confcust, fixed_date, state): cust = list(confcust.get_customers())[0] jobstatedir = confcust.get_fullstatedir(globopts['InputStateSaveDir'.lower()], cust) fetchtype = confcust.get_topofetchtype() if fixed_date: await state_write(sys.argv[0], jobstatedir, state, globopts['InputStateDays'.lower()], fixed_date.replace('-', '_')) else: await state_write(sys.argv[0], jobstatedir, state, globopts['InputStateDays'.lower()]) def write_avro(confcust, group_groups, group_endpoints, fixed_date): custdir = confcust.get_custdir() if fixed_date: filename = filename_date(logger, globopts['OutputTopologyGroupOfGroups'.lower()], custdir, fixed_date.replace('-', '_')) else: filename = filename_date(logger, globopts['OutputTopologyGroupOfGroups'.lower()], custdir) avro = AvroWriter(globopts['AvroSchemasTopologyGroupOfGroups'.lower()], filename) ret, excep = avro.write(group_groups) if not ret: logger.error('Customer:%s : %s' % (logger.customer, repr(excep))) raise SystemExit(1) if fixed_date: filename = filename_date(logger, globopts['OutputTopologyGroupOfEndpoints'.lower()], custdir, fixed_date.replace('-', '_')) else: filename = filename_date(logger, globopts['OutputTopologyGroupOfEndpoints'.lower()], custdir) avro = AvroWriter(globopts['AvroSchemasTopologyGroupOfEndpoints'.lower()], filename) ret, excep = avro.write(group_endpoints) if not ret: logger.error('Customer:%s : %s' % (logger.customer, repr(excep))) raise SystemExit(1) def main(): global logger, globopts, confcust parser = argparse.ArgumentParser(description="""Fetch and construct entities from EOSC-PORTAL feed""") parser.add_argument('-c', dest='custconf', nargs=1, metavar='customer.conf', help='path to customer configuration file', type=str, required=False) parser.add_argument('-g', dest='gloconf', nargs=1, metavar='global.conf', help='path to global configuration file', type=str, required=False) parser.add_argument('-d', dest='date', metavar='YEAR-MONTH-DAY', help='write data for this date', type=str, required=False) args = parser.parse_args() group_endpoints, group_groups = list(), list() logger = Logger(os.path.basename(sys.argv[0])) fixed_date = None if args.date and date_check(args.date): fixed_date = args.date confpath = args.gloconf[0] if args.gloconf else None cglob = Global(sys.argv[0], confpath) globopts = cglob.parse() confpath = args.custconf[0] if args.custconf else None confcust = CustomerConf(sys.argv[0], confpath) confcust.parse() confcust.make_dirstruct() confcust.make_dirstruct(globopts['InputStateSaveDir'.lower()]) global custname custname = confcust.get_custname() # safely assume here one customer defined in customer file cust = list(confcust.get_customers())[0] jobstatedir = confcust.get_fullstatedir(globopts['InputStateSaveDir'.lower()], cust) fetchtype = confcust.get_topofetchtype()[0] state = None logger.customer = custname uidservtype = confcust.get_uidserviceendpoints() topofeed = confcust.get_topofeed() loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) try: if is_feed(topofeed): res = loop.run_until_complete(fetch_data(topofeed)) group_groups, group_endpoints = parse_source_topo(res, uidservtype, fetchtype) contacts = ParseContacts(logger, res, uidservtype, is_csv=False).get_contacts() attach_contacts_topodata(logger, contacts, group_endpoints) else: try: with open(topofeed) as fp: js = json.load(fp) group_groups, group_endpoints = parse_source_topo(js, uidservtype, fetchtype) except IOError as exc: logger.error('Customer:%s : Problem opening %s - %s' % (logger.customer, topofeed, repr(exc))) loop.run_until_complete( write_state(confcust, fixed_date, True) ) webapi_opts = get_webapi_opts(cglob, confcust) numge = len(group_endpoints) numgg = len(group_groups) # send concurrently to WEB-API in coroutines if eval(globopts['GeneralPublishWebAPI'.lower()]): loop.run_until_complete( asyncio.gather( send_webapi(webapi_opts, group_groups, 'groups', fixed_date), send_webapi(webapi_opts, group_endpoints,'endpoints', fixed_date) ) ) if eval(globopts['GeneralWriteAvro'.lower()]): write_avro(confcust, group_groups, group_endpoints, fixed_date) logger.info('Customer:' + custname + ' Fetched Endpoints:%d' % (numge) + ' Groups(%s):%d' % (fetchtype, numgg)) except (ConnectorHttpError, ConnectorParseError, KeyboardInterrupt) as exc: logger.error(repr(exc)) loop.run_until_complete( write_state(confcust, fixed_date, False ) ) if __name__ == '__main__': main()
[ "daniel.vrcic@gmail.com" ]
daniel.vrcic@gmail.com
8dd0e8db5c106dc0a4a867b92314ee3975bfac32
ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7
/syntactic_mutations/cifar/mutants/mutant85.py
36cc2dc7a92ebacc6a2c035d43c75d2a8980ffb9
[]
no_license
dlfaults/mutation_operators_evaluation
ea7f33459ba7bcf7d70092d9db8b40f9b338d516
7d1ff30e901931a46bf8908e9bb05cae3daa5f0f
refs/heads/master
2020-12-27T15:45:07.262012
2020-02-03T12:22:01
2020-02-03T12:22:01
237,955,342
1
0
null
null
null
null
UTF-8
Python
false
false
1,737
py
import keras from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.datasets import cifar10 from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D def train_model(x_train, y_train, x_test, y_test, model_name): num_classes = 10 batch_size = 32 epochs = 25 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=\ x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-06) pass x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 model.fit(x_train, y_train, batch_size=\ batch_size, epochs=\ epochs, validation_data=\ (x_test, y_test), shuffle=\ True) model.save(model_name) scores = model.evaluate(x_test, y_test, verbose=1) return (scores[0], scores[1])
[ "gunel71@gmail.com" ]
gunel71@gmail.com
3f867e03ffc111fa3e897382bcaacef5230d16eb
5442e70e9a68b0a76601082e9aa307674fd4fb95
/evaluation/draw.py
a8f5d430416dfc0ae686c2c9b91b4ba0ed113967
[]
no_license
sycophant-stone/mmdetection_2.0
1871e85311fee3b81d3b1415787c837aadf6b543
6caabdcf81e7b003e612d053c94bd6c993dd5c3e
refs/heads/master
2020-09-09T10:49:42.961409
2019-12-18T14:01:57
2019-12-18T14:01:57
221,426,479
0
0
null
null
null
null
UTF-8
Python
false
false
1,120
py
import cv2 def drawline(img, pt1, pt2, color, thickness=1, style='dotted', gap=10): dist = ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** .5 pts= [] for i in np.arange(0, dist, gap): r = i / dist x = int((pt1[0] * (1 - r) + pt2[0] * r) + .5) y = int((pt1[1] * (1 - r) + pt2[1] * r) + .5) p = (x, y) pts.append(p) if style == 'dotted': for p in pts: cv2.circle(img, p, thickness, color, -1) else: s = pts[0] e = pts[0] i = 0 for p in pts: s = e e = p if i % 2 == 1: cv2.line(img,s,e,color,thickness) i += 1 def drawpoly(img, pts, color, thickness=1, style='dotted'): s = pts[0] e = pts[0] pts.append(pts.pop(0)) for p in pts: s = e e = p drawline(img, s, e, color, thickness, style) def drawrect(img, pt1, pt2, color, thickness=1, style='dotted'): pts = [pt1, (pt2[0], pt1[1]), pt2, (pt1[0], pt2[1])] drawpoly(img, pts, color, thickness, style)
[ "kawayi_rendroid@163.com" ]
kawayi_rendroid@163.com
b8db0ef08fd5c0b20c1bdbe3988364316d66243b
35044ecc79fad5d1ad35a4f713ff22849c17212c
/hw1/hw1_1/part2/main_3.py
d54310ff19a1bb2f192f12e9c432975f5330478e
[]
no_license
pohanchi/MLDS2018SPRING
8235f9bfe38eb861be24b9cdb2c0a7d30afdea21
694e5f07911ba0f780fb01bb47701dac90eeb7d2
refs/heads/master
2020-05-09T09:55:38.257016
2019-04-12T16:23:17
2019-04-12T16:23:17
166,774,093
0
0
null
null
null
null
UTF-8
Python
false
false
2,511
py
from tensorflow.examples.tutorials.mnist import input_data import matplotlib.pyplot as plt import numpy as np import pickle import random from model_2 import * def unpickle(file): with open(file, 'rb') as fo: dict_ = pickle.load(fo, encoding='bytes') return dict_ def calculate(): vars = tf.trainable_variables() for var in vars: print(var) all_number=sum([np.prod(var.get_shape()) for var in vars]) print('you use %d parameters' %(all_number)) return if __name__ == '__main__': data_list=list() mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) train_img = mnist.train.images train_label = mnist.train.labels test_img = mnist.test.images test_label = mnist.test.labels index = list(range(len(train_img))) random.shuffle(index) # print(data_list[0:3]) epoch = 10 batch_size = 100 num_batch = int(55000 / 100) CNN = CNN_3() loss = CNN.loss learning_rates= [random.uniform(0.001, 0.005),random.uniform(0.00001, 0.00005),random.uniform(0.0001, 0.0005),random.uniform(0.000001, 0.000005)] for learning_rate in learning_rates: CNN.learning_rate = learning_rate Train_step = tf.train.AdamOptimizer(CNN.learning_rate).minimize(loss) loss_array = [] step = 0 step_array = [] sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) for i in range(epoch): random.shuffle(index) train_img = train_img[index] train_label= train_label[index] for j in range(num_batch): x_data =train_img[j*100:(j+1)*100] y_data=train_label[j*100:(j+1)*100] _=sess.run(Train_step,feed_dict={CNN.x:x_data,CNN.y:y_data}) if (j+1) % 55 == 0: loss_show=sess.run(loss,feed_dict={CNN.x:x_data,CNN.y:y_data}) print("epoch %d num_batch %2d loss = %.5f" %(i,j,loss_show)) loss_array+=[loss_show] step_array+=[step] step +=1 plt.plot(step_array,loss_array,label='learning_rate= {}'.format(learning_rate)) sess.close() plt.yscale('symlog') plt.title('CNN_3 Loss on training') plt.legend(loc='upper left') plt.style.use('ggplot') plt.savefig('three_hidden_layer_loss_step.png') plt.show()
[ "aapp1420@gmail.com" ]
aapp1420@gmail.com
9f5d7e03d338bcee649e0798c30ea3227c1c60cc
b15a9d9c7374c4a1fa5ec3ef63603a8c57e8681f
/Design-Patterns-Python/bridge/circle.py
cd3638d219be793d909728245bf9ccdec57095a8
[]
no_license
gohils/zemr_notebook
3f7490ef7a2559655746c3e2e0dbfb835a83891e
00d53cea9970df44160c51e6ad2bdeadfae2c91f
refs/heads/master
2023-08-04T14:32:35.428016
2023-07-20T11:51:08
2023-07-20T11:51:08
222,027,451
0
0
null
null
null
null
UTF-8
Python
false
false
325
py
# pylint: disable=too-few-public-methods "A Circle Abstraction" from interface_shape import IShape class Circle(IShape): "The Circle is a Refined Abstraction" def __init__(self, implementer): self.implementer = implementer() def draw(self): self.implementer.draw_implementation()
[ "noreply@github.com" ]
gohils.noreply@github.com
7098d0bd349101c9033469adeb130241fc910b6c
f88f900c0384f6da82eeb749371ad44115527700
/course-book/04-basic-comp/0410-img-copy2.py
2688bf316082da1a8c629fe6e1a12eaf1bf4b44d
[]
no_license
aaron-kr/learning-opencv
eff382e8f0c822400f765451d57b192a63cd1b74
158239f0140569aec519fc1fbf255c54ef2567d2
refs/heads/main
2023-08-21T11:02:49.775425
2021-10-27T00:04:01
2021-10-27T00:04:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
429
py
# 0410.py import cv2 import numpy as np src = cv2.imread('../../img/spirit-week.jpg', cv2.IMREAD_GRAYSCALE) shape = src.shape[0], src.shape[1], 3 dst = np.zeros(shape, dtype = np.uint8) # dst[:,:,0] = src # B - Blue channel # dst[:,:,1] = src # G - Green channel dst[:,:,2] = src # R - Red channel dst[100:400, 200:300, :] = [255,255,255] cv2.imshow('src', src) cv2.imshow('dst', dst) cv2.waitKey() cv2.destroyAllWindows()
[ "jekkilekki@gmail.com" ]
jekkilekki@gmail.com
76f963318fc279d6b0842011b2404e059f5081d7
fb1e852da0a026fb59c8cb24aeb40e62005501f1
/simlm/misc/compute_metrics_marco.py
02ac181cd1a10310b7a44ccad6784c96a3b8f2b3
[ "LGPL-2.1-or-later", "LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
microsoft/unilm
134aa44867c5ed36222220d3f4fd9616d02db573
b60c741f746877293bb85eed6806736fc8fa0ffd
refs/heads/master
2023-08-31T04:09:05.779071
2023-08-29T14:07:57
2023-08-29T14:07:57
198,350,484
15,313
2,192
MIT
2023-08-19T11:33:20
2019-07-23T04:15:28
Python
UTF-8
Python
false
false
1,162
py
import json import sys import argparse sys.path.insert(0, './src') from logger_config import logger from metrics import compute_mrr, trec_eval from utils import save_json_to_file from data_utils import load_qrels, load_msmarco_predictions parser = argparse.ArgumentParser(description='compute metrics for ms-marco predictions') parser.add_argument('--in-path', default='', type=str, metavar='N', help='path to predictions in msmarco output format') parser.add_argument('--qrels', default='./data/msmarco/dev_qrels.txt', type=str, metavar='N', help='path to qrels') args = parser.parse_args() logger.info('Args={}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4))) def main(): qrels = load_qrels(path=args.qrels) predictions = load_msmarco_predictions(args.in_path) all_metrics = trec_eval(qrels=qrels, predictions=predictions) all_metrics['mrr'] = compute_mrr(qrels=qrels, predictions=predictions) logger.info(json.dumps(all_metrics, ensure_ascii=False, indent=4)) save_json_to_file(all_metrics, '{}.metrics.json'.format(args.in_path)) if __name__ == '__main__': main()
[ "wangliangpeking@gmail.com" ]
wangliangpeking@gmail.com
e8034ae0e9e03f800a93d0b36f2691dfbdee9972
f34e34e4a1fc4b606781633370ff5b9493446b89
/HackerRank/Problems/HR python staircase.py
fffe84b83deb2d0878b9309965bbbe9c1da7a0fa
[]
no_license
YeasirArafatRatul/problem_solving
b44deddb63dc9ff5ec285e03c13720523473906d
98267865a90483489f792bdb448319dbf0cc9a81
refs/heads/master
2023-02-13T17:59:35.863750
2021-01-05T04:01:13
2021-01-05T04:01:13
219,266,174
1
0
null
null
null
null
UTF-8
Python
false
false
376
py
def staircase(n): for i in range(1,n+1): #print the #, i times. so it will increase with each iteration print(('#'*i).rjust(n,' ')) n = int(input()) staircase(n) # str.rjust(width[, fillchar]) #Parameters # width − This is the string length in total after padding. # fillchar − This is the filler character, default is a space.
[ "yeasirarafat.ratul@gmail.com" ]
yeasirarafat.ratul@gmail.com
47a70babb3db84356361acf60b5e6d54a50c94ce
38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a
/nibbler/trading/collectors/Watcher.py
19ea6b0adbe54c634a51182074af580ff6b54745
[]
no_license
JizzFactoryEmployee/nibblerppman
0fbc1ce662cf8b4868b41a97291250fae29dc41d
160e557578a3e8a614450354f6ade233d32b052f
refs/heads/master
2022-11-14T01:10:31.743000
2020-07-04T01:21:52
2020-07-04T01:21:52
273,835,770
0
1
null
null
null
null
UTF-8
Python
false
false
772
py
import time from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler import nibbler.trading.collectors.futures_collector as futures class MyHandler(FileSystemEventHandler): def on_modified(self, event): event_list = event.src_path.split('\\') filename = event_list[-1] print(filename) if "BTC" in filename: print('success') def runner(): event_handler = MyHandler() observer = Observer() observer.schedule(event_handler, path=r'C:\Users\James\Documents\GitHub\Nibbler\nibbler\trading\collectors', recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() runner()
[ "52958901+JizzFactoryEmployee@users.noreply.github.com" ]
52958901+JizzFactoryEmployee@users.noreply.github.com
fde45c72b38be988de28cfb02d09ce268e6a0ac8
c29ee57cdba30d0099c55d7b138a361a4964af4b
/apps/blogs/urls.py
a6a724f8cca29d67b19a8b9d2ed788dfe37cdafc
[]
no_license
bakker4444/django_app
ea26be5fa7789a083b199d5094767e16ec0fa63e
6b1cd0cfc55be11f31cdc5970d4787f15828ac94
refs/heads/master
2020-03-10T10:06:17.177952
2018-04-13T00:30:54
2018-04-13T00:30:54
129,322,929
0
0
null
null
null
null
UTF-8
Python
false
false
345
py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index), url(r'^new/', views.new), url(r'^create/', views.create), url(r'^(?P<number>[0-9]+)/$', views.show), url(r'^(?P<number>[0-9]+)/edit/', views.edit), url(r'^(?P<number>[0-9]+)/delete/', views.destroy), ]
[ "bakker4444@gmail.com" ]
bakker4444@gmail.com
cdf12814d08e0941202a5da5abdf6a067a3b34f7
36126f91a2d5903483b84ba2d8be77e160803058
/examples/python/empirical_transition_matrix.py
61c92ac81e1bbe433c6a5a7ec9d083e8e9e8aaca
[ "Apache-2.0" ]
permissive
open-risk/transitionMatrix
9962bb2656eb637ba56afc3adecf42bbe68f9593
d05e75cbc251f01842dd8c5ce225894b988f4d99
refs/heads/master
2023-03-05T08:01:20.816425
2023-02-22T20:46:38
2023-02-22T20:46:38
110,365,127
73
29
Apache-2.0
2022-12-08T11:37:12
2017-11-11T17:25:08
Python
UTF-8
Python
false
false
5,000
py
# encoding: utf-8 # (c) 2017-2022 Open Risk, all rights reserved # # TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included # in the source distribution of TransitionMatrix. This is notwithstanding any licenses of # third-party software included in this distribution. You may not use this file except in # compliance with the License. # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions and # limitations under the License. """ Example workflows using transitionMatrix to estimate an empirical transition matrix from duration type data. The datasets are produced in examples/generate_synthetic_data.py """ import matplotlib.pyplot as plt import numpy as np import pandas as pd import transitionMatrix as tm from transitionMatrix import source_path from transitionMatrix.estimators import aalen_johansen_estimator as aj from transitionMatrix.utils.converters import datetime_to_float dataset_path = source_path + "datasets/" # Example 1: Credit Rating Migration example # Example 2: Simple 2x2 Matrix for testing # Example 3: Credit Rating Migration example with timestamps in raw date format example = 3 # Step 1 # Load the data set into a pandas frame # Make sure state is read as a string and not as integer # Second synthetic data example: # n entities with ~10 observations each, [0,1] state, 50%/50% transition matrix print("> Step 1: Load the data set into a pandas frame") if example == 1: data = pd.read_csv(dataset_path + 'synthetic_data7.csv', dtype={'State': str}) elif example == 2: data = pd.read_csv(dataset_path + 'synthetic_data8.csv', dtype={'State': str}) elif example == 3: data = pd.read_csv(dataset_path + 'synthetic_data9.csv', parse_dates=True) # convert datetime data to floats, return also the observation window data bounds, data = datetime_to_float(data) print('Start and End dates', bounds) sorted_data = data.sort_values(['Time', 'ID'], ascending=[True, True]) print(sorted_data.head(5)) print(sorted_data.describe()) # Step 2 # Describe and validate the State Space against the data print("> Step 2: Describe and validate the State Space against the data") # We insert the expected labels of the state space if example == 1 or example == 3: definition = [('0', "AAA"), ('1', "AA"), ('2', "A"), ('3', "BBB"), ('4', "BB"), ('5', "B"), ('6', "CCC"), ('7', "D")] elif example == 2: definition = [('0', "G"), ('1', "B")] myState = tm.StateSpace(definition) myState.describe() # We validate that indeed the data set conforms to our expectations labels = {'State': 'From'} print(myState.validate_dataset(dataset=sorted_data, labels=labels)) labels = {'State': 'To'} print(myState.validate_dataset(dataset=sorted_data, labels=labels)) # Step 3 # Estimate matrices using the Aalen-Johansen estimator print("> Step 3: Estimate matrices using the Aalen-Johansen estimator") myEstimator = aj.AalenJohansenEstimator(states=myState) # labels = {'Timestamp': 'Time', 'From_State': 'From', 'To_State': 'To', 'ID': 'ID'} labels = {'Time': 'Time', 'From': 'From', 'To': 'To', 'ID': 'ID'} etm, times = myEstimator.fit(sorted_data, labels=labels) # Step 4 # Print the cumulative computed matrix print("> Step 4: Print the cumulative computed matrix") print(etm[:, :, -1]) # Step 5 # Create a visualization of the transition rates if example == 1 or example == 3: # Now lets plot a collection of curves for all ratings print("> Plot the transition curves") Periods = 10 Ratings = 8 m = 4 n = 2 f, axarr = plt.subplots(m, n) f.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.90, wspace=0.0, hspace=0.1) # plt.style.use(['ggplot']) for ri in range(0, Ratings): axj = int(ri / 2) axi = ri % 2 print(ri, axj, axi) curves = [] for rf in range(0, Ratings): cPD = etm[ri, rf, :] curves.append(cPD) # axarr[axj, axi].set_aspect(5) axarr[axj, axi].set_ylabel('State ' + str(ri), fontsize=12) axarr[axj, axi].set_xlabel("Time") axarr[axj, axi].plot(times[1:], curves[rf], label="RI=%d" % (rf,)) # axarr[axj, axi].set_xticks(range(10), minor=False) axarr[axj, axi].set_yticks(np.linspace(0, 1, 5), minor=False) # axarr[axj, axi].yaxis.grid(True, which='minor') axarr[axj, axi].margins(y=0.05, x=0.05) axarr[axj, axi].autoscale() axarr[axj, axi].grid(True) # plt.tight_layout() f.suptitle("Multi-period Transition Probabilities", fontsize=12) # plt.title("Multi-period Transition Probabilities") plt.savefig("transition_probabilities.png") plt.show() def main(): print("Done") if __name__ == "__main__": main()
[ "openrisk@outlook.com" ]
openrisk@outlook.com
0cef33f9b804fb8d06480356283a48afa11a4d9c
905020fce75b4b63517ec31c601e721f5c260cd1
/Карточные расклады.py
1853c600b2cf5ef636e2296518a51051750b2fef
[]
no_license
Dimaed90800/Python_Y
7858ad46309281a89c5c1e83a0f09030996182a4
04092b854605cb05df439eeeb52003e585bb5a29
refs/heads/main
2023-01-24T04:11:17.858281
2020-11-17T20:42:45
2020-11-17T20:42:45
313,731,672
0
0
null
null
null
null
UTF-8
Python
false
false
639
py
import itertools nominals = ['2','3','4','5','6','7','8','9','10','валет','дама','король','туз'] suits = ['пик','треф','бубен','червей'] allcomb = list(itertools.product(nominals,suits)) comb = [] for elem in allcomb: if (elem not in comb) and (list(elem).reverse() not in comb): comb.append(' '.join(elem)) comb.sort() three = sorted(itertools.combinations(comb, 3)) for comb in three: comb = list(comb) comb.sort() if any([x.split()[1] == 'червей' for x in comb]) and any([y.split()[0] in nominals[9:] for y in comb]): print(', '.join(list(comb)))
[ "noreply@github.com" ]
Dimaed90800.noreply@github.com
2c735c06acb15668edfe36a3cf848f1a07cb956b
a418afb0ec01cb8f41d9fcaf167f59b0f05aea90
/Hackerrank/Python Problems/itertools.combinations.py
221a322634997b5c9b8b4e304a3db940458757d5
[]
no_license
manjurulhoque/problem-solving
95d05960e83edde5721b73348270b6123fd3bf12
e5547a8f5593141ac87f9797ddb25b2467c44e57
refs/heads/master
2022-08-08T06:44:21.839224
2022-07-25T09:32:26
2022-07-25T09:32:26
209,125,784
2
0
null
null
null
null
UTF-8
Python
false
false
170
py
import itertools x, y = input().split() for i in range(1, int(y) + 1): out = list(itertools.combinations(sorted(x), i)) for n in out: print(*n, sep='')
[ "rumimanzurulhoque@gmail.com" ]
rumimanzurulhoque@gmail.com
9e27115696afc8fea8212d51a643e9d1c2702dc1
b15a9d9c7374c4a1fa5ec3ef63603a8c57e8681f
/Design-Patterns-Python/flyweight/client.py
b7714e4c7fe83aaeb7505226e720f0509797c36d
[]
no_license
gohils/zemr_notebook
3f7490ef7a2559655746c3e2e0dbfb835a83891e
00d53cea9970df44160c51e6ad2bdeadfae2c91f
refs/heads/master
2023-08-04T14:32:35.428016
2023-07-20T11:51:08
2023-07-20T11:51:08
222,027,451
0
0
null
null
null
null
UTF-8
Python
false
false
947
py
"The Flyweight Use Case Example" from table import Table from flyweight_factory import FlyweightFactory TABLE = Table(3, 3) TABLE.rows[0].columns[0].data = "abra" TABLE.rows[0].columns[1].data = "112233" TABLE.rows[0].columns[2].data = "cadabra" TABLE.rows[1].columns[0].data = "racadab" TABLE.rows[1].columns[1].data = "12345" TABLE.rows[1].columns[2].data = "332211" TABLE.rows[2].columns[0].data = "cadabra" TABLE.rows[2].columns[1].data = "445566" TABLE.rows[2].columns[2].data = "aa 22 bb" TABLE.rows[0].columns[0].justify = 1 TABLE.rows[1].columns[0].justify = 1 TABLE.rows[2].columns[0].justify = 1 TABLE.rows[0].columns[2].justify = 2 TABLE.rows[1].columns[2].justify = 2 TABLE.rows[2].columns[2].justify = 2 TABLE.rows[0].columns[1].width = 15 TABLE.rows[1].columns[1].width = 15 TABLE.rows[2].columns[1].width = 15 TABLE.draw() print(f"FlyweightFactory has {FlyweightFactory.get_count()} flyweights")
[ "noreply@github.com" ]
gohils.noreply@github.com
3c959eed8a79ca30fbcb647176095c291b99e320
4f510470b3093ab2c60f929221af82c79b121ca7
/linux/FinalKeyPress/smallplane.py
c63ffce47aa72b6d465ad959c7dcf92fe5873091
[]
no_license
q737645224/python3
ce98926c701214f0fc7da964af45ba0baf8edacf
4bfabe3f4bf5ba4133a16102c51bf079d500e4eb
refs/heads/master
2020-03-30T07:11:17.202996
2018-10-30T06:14:51
2018-10-30T06:14:51
150,921,088
2
0
null
null
null
null
UTF-8
Python
false
false
1,695
py
import tkinter import mover import config class SmallPlane(mover.BaseMover): ''' 移动的敌机 - 大飞机 ''' def __init__(self, root, canvas, position, x, y, tags): super().__init__(root, canvas, position, x, y, tags, config.image_smallplane_width, config.image_smallplane_height, True) # 移动者的移动步长 self.steps = [config.step_length_smallplane_x, config.step_length_smallplane_y] # 移动方向 - 向下 self.move_direction = [0, 1] # 移动者加载背景图像 self.bg_image_fullname = config.images_path + config.filename_smallplane + config.filename_suffix self.bg_image = tkinter.PhotoImage(file=self.bg_image_fullname) # # 重置生命值 # super().set_lives_num(config.lives_num_enemy) def exec_move(self): if self.nw[1] < config.window_boundary_row: # Y轴边界之内正常移动 x = self.steps[0] * self.move_direction[0] y = self.steps[1] * self.move_direction[1] self.base_move(self.bg_image_tags, x, y) else: # Y轴边界之外错误处理 self.base_move(self.bg_image_tags, 0, -config.window_boundary_row) # 获取死亡图片 def get_dead_images(self): img = [] if self.do_dead_play: for i in self.dead_image_index: image_fullname = config.images_path + config.filename_smallplane + str(i) + config.filename_suffix image = tkinter.PhotoImage(file=self.bg_image_fullname) img.append(image) return img
[ "764375224@qq.com" ]
764375224@qq.com
974ce89e9bb593af97847898b2977b214a4b7980
1fe8d4133981e53e88abf633046060b56fae883e
/venv/lib/python3.8/site-packages/tensorflow/python/autograph/pyct/testing/decorators 2.py
9e47729bef44976b842be804899c28aafffda65b
[]
no_license
Akira331/flask-cifar10
6c49db8485038731ce67d23f0972b9574746c7a7
283e7a2867c77d4b6aba7aea9013bf241d35d76c
refs/heads/master
2023-06-14T16:35:06.384755
2021-07-05T14:09:15
2021-07-05T14:09:15
382,864,970
0
0
null
null
null
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:bc496cdd81c37eb5f9cddd2250b6ebe37eae2739e5ee1b3228a97b6571bfaac1 size 1298
[ "business030301@gmail.com" ]
business030301@gmail.com
9f4459a0e78eb6578c2a9bc931224ac195ef575f
edf79f6964b15ea61faa9ecd70871d1ce776eda2
/Jupyter/first-python-notebook/lib/python3.6/site-packages/pandas/tseries/util.py
5934f5843736cc9afbff1a5dcc1c59f9eb8f2180
[ "MIT" ]
permissive
Roychenlei/LearnPython
326bf7d59ebec904623ea7102a1e07f1d7cb112b
eaf44787b86fe6da69dc9ba4e4c907884db57fda
refs/heads/master
2022-10-09T07:23:39.410429
2018-04-28T02:35:07
2018-04-28T02:35:07
115,184,834
0
1
MIT
2022-10-01T15:18:43
2017-12-23T09:19:47
Python
UTF-8
Python
false
false
3,286
py
import warnings from pandas.compat import lrange import numpy as np from pandas.core.dtypes.common import _ensure_platform_int from pandas.core.frame import DataFrame import pandas.core.algorithms as algorithms def pivot_annual(series, freq=None): """ Deprecated. Use ``pivot_table`` instead. Group a series by years, taking leap years into account. The output has as many rows as distinct years in the original series, and as many columns as the length of a leap year in the units corresponding to the original frequency (366 for daily frequency, 366*24 for hourly...). The fist column of the output corresponds to Jan. 1st, 00:00:00, while the last column corresponds to Dec, 31st, 23:59:59. Entries corresponding to Feb. 29th are masked for non-leap years. For example, if the initial series has a daily frequency, the 59th column of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st, and the 60th column is masked for non-leap years. With a hourly initial frequency, the (59*24)th column of the output always correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and the 24 columns between (59*24) and (61*24) are masked. If the original frequency is less than daily, the output is equivalent to ``series.convert('A', func=None)``. Parameters ---------- series : Series freq : string or None, default None Returns ------- annual : DataFrame """ msg = "pivot_annual is deprecated. Use pivot_table instead" warnings.warn(msg, FutureWarning) index = series.index year = index.year years = algorithms.unique1d(year) if freq is not None: freq = freq.upper() else: freq = series.index.freq if freq == 'D': width = 366 offset = np.asarray(index.dayofyear) - 1 # adjust for leap year offset[(~isleapyear(year)) & (offset >= 59)] += 1 columns = lrange(1, 367) # todo: strings like 1/1, 1/25, etc.? elif freq in ('M', 'BM'): width = 12 offset = np.asarray(index.month) - 1 columns = lrange(1, 13) elif freq == 'H': width = 8784 grouped = series.groupby(series.index.year) defaulted = grouped.apply(lambda x: x.reset_index(drop=True)) defaulted.index = defaulted.index.droplevel(0) offset = np.asarray(defaulted.index) offset[~isleapyear(year) & (offset >= 1416)] += 24 columns = lrange(1, 8785) else: raise NotImplementedError(freq) flat_index = (year - years.min()) * width + offset flat_index = _ensure_platform_int(flat_index) values = np.empty((len(years), width)) values.fill(np.nan) values.put(flat_index, series.values) return DataFrame(values, index=years, columns=columns) def isleapyear(year): """ Returns true if year is a leap year. Parameters ---------- year : integer / sequence A given (list of) year(s). """ msg = "isleapyear is deprecated. Use .is_leap_year property instead" warnings.warn(msg, FutureWarning) year = np.asarray(year) return np.logical_or(year % 400 == 0, np.logical_and(year % 4 == 0, year % 100 > 0))
[ "roychenfly@gmail.com" ]
roychenfly@gmail.com
bce7d775325fcad89567eb94275ce095229d738a
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02832/s307251036.py
36c65bb99ff02d537361c6ecedc39e7b50f138b1
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
198
py
n = int(input()) a = list(map(int,input().split())) if 1 not in a: print(-1) exit() x = 1 cnt = 0 for i in range(n): if a[i] == x: x += 1 else: cnt += 1 print(cnt)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
7a4711d2854fc4a7a832a3fb5083a348ad0af491
84d581219b8065cf8936070a7f1e1766000378df
/vdj/__init__.py
f80e58d17b35d19c93aa96a3ab5bb85e8774f385
[ "MIT", "CC-BY-4.0" ]
permissive
RPGroup-PBoC/vdj_recombination
939e72539a134a61cc5aa8386b3a31f6de954d26
a59214f878968e5958915b56983b0f52a0a0483e
refs/heads/publication
2023-07-18T22:52:07.149767
2020-05-19T18:12:11
2020-05-19T18:12:11
187,516,995
0
0
MIT
2023-07-06T21:42:29
2019-05-19T19:05:04
HTML
UTF-8
Python
false
false
169
py
"""Top level package for VDJ utilities""" from . import viz from . import bayes from . import io __author__ = "Soichi Hirokawa and Griffin Chure" __version__ = "0.1.9"
[ "gchure@caltech.edu" ]
gchure@caltech.edu
e906d748fcf7530f9052fc37543b97a85ee771eb
183fa1746e80102391926064c1628bf63690026c
/Order/migrations/0001_initial.py
ae12317b8af5a2d9fa16a2af4e2549debe7875c4
[]
no_license
redbull2003/StoreMarketPlace
7454fdf06a27f754f33b41532ef2d717f313eb1a
b9e915da32f23ca087bd29e5d10fa9b9299f81d2
refs/heads/master
2023-06-06T03:53:22.766012
2021-06-24T16:30:37
2021-06-24T16:30:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,515
py
# Generated by Django 3.1.1 on 2021-06-24 06:56 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('Product', '0011_auto_20210623_1817'), ] operations = [ migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('is_paid', models.BooleanField(default=False)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='OrderItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('quantity', models.PositiveIntegerField()), ('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='Order.order')), ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Product.product')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "shayan.aimoradii@gmail.com" ]
shayan.aimoradii@gmail.com
a86ce8417607db70a904b3ff9754c6950ed06c89
faabe34af6297530617395bcc6811350765da847
/platforms/leetcode/NthDigit.py
6e67cbfe78c483aff5ee596ef9b57abc3fd7bb68
[]
no_license
pqnguyen/CompetitiveProgramming
44a542aea299bd553dd022a9e737e087285b8b6d
27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78
refs/heads/master
2021-07-21T12:15:47.366599
2021-06-27T14:58:48
2021-06-27T14:58:48
132,837,831
0
0
null
null
null
null
UTF-8
Python
false
false
368
py
class Solution: def findNthDigit(self, n: int) -> int: length, start, count = 1, 1, 9 while n > length * count: n -= length * count length += 1 count *= 10 start *= 10 start += (n - 1) // length return int(str(start)[(n - 1) % length]) res = Solution().findNthDigit(15) print(res)
[ "pqnguyen1996@gmail.com" ]
pqnguyen1996@gmail.com
00a759bd71d11f942a7195e2ce3a0f9237ad4f89
eb91f8764a466c20d3ed53f0df43e2e91fc095ea
/HFhtml/migrations/0007_auto_20210106_2045.py
efdaed8cb955a4052bb217f5edcee04b059ca0bf
[]
no_license
blackw00d/HappyFamily
61733c46a2f2c26919a532e5c07ea143fabd14a9
6b0d99a498c7a92170de0bbdb18907d6d951da31
refs/heads/master
2023-03-08T21:20:41.481293
2021-02-27T04:13:39
2021-02-27T04:13:39
291,481,548
0
0
null
null
null
null
UTF-8
Python
false
false
524
py
# Generated by Django 3.0.8 on 2021-01-06 15:45 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('HFhtml', '0006_auto_20210106_1951'), ] operations = [ migrations.AlterField( model_name='orders', name='pay', field=models.TextField(choices=[('Онлайн', 'Онлайн'), ('При получении', 'При получении')], default='Онлайн', verbose_name='Оплата'), ), ]
[ "email" ]
email
175362d65ed956693d742ed37d75ce17e75b88dc
01361e80df13a173264c369f63dc42f5fb1eb9b5
/carts/migrations/0002_initial.py
642b8aa58314767eaafc395fd7cdd75fb278ce99
[]
no_license
Angshumaan/greatkart-django
cb63cf1cc1c094f504adb7ff8fea347c43a6fa81
3ff1ac8fc7e7f23bdc3185167e2d8b678256a82c
refs/heads/main
2023-06-05T10:05:21.408371
2021-06-12T06:53:15
2021-06-12T06:53:15
363,017,306
0
0
null
null
null
null
UTF-8
Python
false
false
693
py
# Generated by Django 3.2 on 2021-05-22 03:32 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('store', '0001_initial'), ('carts', '0001_initial'), ] operations = [ migrations.AddField( model_name='cartitem', name='product', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product'), ), migrations.AddField( model_name='cartitem', name='variations', field=models.ManyToManyField(blank=True, to='store.Variation'), ), ]
[ "=" ]
=
ebbd48730fbd9939ec1d72fb2ef3df5c198e09f2
1eb72c0482efd06e1b06a7f3af8f6a5250b82584
/code/lang/complex_if.py
707a5937eca1f84dfba9e07c8e611c1bd8536da4
[]
no_license
Bubujka/python-learning
dcb8e5f8fc40600b431a2ec1f4a16814fbbfa5c9
79bbe16bec01d429c34f9c36f1d15b4f77c811bf
refs/heads/master
2021-09-07T15:31:54.822139
2018-02-25T03:08:06
2018-02-25T03:08:06
109,488,504
1
0
null
null
null
null
UTF-8
Python
false
false
217
py
#!/usr/bin/env python3 """ Комплексные условия в if (and, or) """ if True and True and False: print("NOPE") if not True or False: print("NOPE") if None or None or 1: print("yeeep")
[ "zendzirou@gmail.com" ]
zendzirou@gmail.com
f6f83d8fcc21b13c0e3b8d60e21349fc3e1accdd
eb40dce4039d528b9cd06dbeda75da09d09d7fc5
/need_install/Django-1.8.17/tests/admin_changelist/models.py
c01bf320621803e3f82fa4938596e56fe61caefe
[ "Apache-2.0", "BSD-3-Clause" ]
permissive
MulticsYin/MulticsSH
39b62189446787c7f0f037b1640c9c780bd1dddd
5837a0bff0e7da0e8535e4e0b31ef6baf24274b4
refs/heads/master
2021-08-28T07:53:51.759679
2017-12-11T15:31:03
2017-12-11T15:31:03
82,428,902
4
2
null
null
null
null
UTF-8
Python
false
false
2,666
py
from django.db import models from django.utils.encoding import python_2_unicode_compatible class Event(models.Model): # Oracle can have problems with a column named "date" date = models.DateField(db_column="event_date") class Parent(models.Model): name = models.CharField(max_length=128) class Child(models.Model): parent = models.ForeignKey(Parent, editable=False, null=True) name = models.CharField(max_length=30, blank=True) age = models.IntegerField(null=True, blank=True) class Genre(models.Model): name = models.CharField(max_length=20) class Band(models.Model): name = models.CharField(max_length=20) nr_of_members = models.PositiveIntegerField() genres = models.ManyToManyField(Genre) @python_2_unicode_compatible class Musician(models.Model): name = models.CharField(max_length=30) def __str__(self): return self.name @python_2_unicode_compatible class Group(models.Model): name = models.CharField(max_length=30) members = models.ManyToManyField(Musician, through='Membership') def __str__(self): return self.name class Membership(models.Model): music = models.ForeignKey(Musician) group = models.ForeignKey(Group) role = models.CharField(max_length=15) class Quartet(Group): pass class ChordsMusician(Musician): pass class ChordsBand(models.Model): name = models.CharField(max_length=30) members = models.ManyToManyField(ChordsMusician, through='Invitation') class Invitation(models.Model): player = models.ForeignKey(ChordsMusician) band = models.ForeignKey(ChordsBand) instrument = models.CharField(max_length=15) class Swallow(models.Model): origin = models.CharField(max_length=255) load = models.FloatField() speed = models.FloatField() class Meta: ordering = ('speed', 'load') class SwallowOneToOne(models.Model): swallow = models.OneToOneField(Swallow) class UnorderedObject(models.Model): """ Model without any defined `Meta.ordering`. Refs #17198. """ bool = models.BooleanField(default=True) class OrderedObjectManager(models.Manager): def get_queryset(self): return super(OrderedObjectManager, self).get_queryset().order_by('number') class OrderedObject(models.Model): """ Model with Manager that defines a default order. Refs #17198. """ name = models.CharField(max_length=255) bool = models.BooleanField(default=True) number = models.IntegerField(default=0, db_column='number_val') objects = OrderedObjectManager() class CustomIdUser(models.Model): uuid = models.AutoField(primary_key=True)
[ "multics_luo@163.com" ]
multics_luo@163.com
5e6ee8b555a309e5ce5e7845954d392ab41207eb
0e1e643e864bcb96cf06f14f4cb559b034e114d0
/Exps_7_v3/doc3d/Ablation4_ch016_ep010/I_w_M_to_W_pyr/pyr_0s/L7/step10_a.py
985e11f6a4a139c0e01e9166d0f6ff8d31fb4b6d
[]
no_license
KongBOy/kong_model2
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
1af20b168ffccf0d5293a393a40a9fa9519410b2
refs/heads/master
2022-10-14T03:09:22.543998
2022-10-06T11:33:42
2022-10-06T11:33:42
242,080,692
3
0
null
null
null
null
UTF-8
Python
false
false
6,009
py
############################################################################################################################################################################################################# ############################################################################################################################################################################################################# ### 把 kong_model2 加入 sys.path import os code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層 code_dir = "\\".join(code_exe_path_element[:-1]) kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層 kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir import sys ### 把 kong_model2 加入 sys.path sys.path.append(kong_model2_dir) sys.path.append(code_dir) # print(__file__.split("\\")[-1]) # print(" code_exe_path:", code_exe_path) # print(" code_exe_path_element:", code_exe_path_element) # print(" code_dir:", code_dir) # print(" kong_layer:", kong_layer) # print(" kong_model2_dir:", kong_model2_dir) ############################################################################################################################################################################################################# kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index # print(" kong_to_py_layer:", kong_to_py_layer) if (kong_to_py_layer == 0): template_dir = "" elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0 elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0 elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1]) # print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae ############################################################################################################################################################################################################# exp_dir = template_dir ############################################################################################################################################################################################################# from step06_a_datas_obj import * from step09_0side_L7 import * from step10_a2_loss_info_obj import * from step10_b2_exp_builder import Exp_builder rm_paths = [path for path in sys.path if code_dir in path] for rm_path in rm_paths: sys.path.remove(rm_path) rm_moduless = [module for module in sys.modules if "step09" in module] for rm_module in rm_moduless: del sys.modules[rm_module] ############################################################################################################################################################################################################# ''' exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~ 比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在: 6_mask_unet/自己命的名字/result_a 6_mask_unet/自己命的名字/result_b 6_mask_unet/自己命的名字/... ''' use_db_obj = type8_blender_kong_doc3d_in_I_gt_W_ch_norm_v2 use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_W").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔 ############################################################# ### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder") ############################################################# ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 10) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="") ############################################################# if(__name__ == "__main__"): print("build exps cost time:", time.time() - start_time) if len(sys.argv) < 2: ############################################################################################################ ### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~ ch032_0side.build().run() # print('no argument') sys.exit() ### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run() eval(sys.argv[1])
[ "s89334roy@yahoo.com.tw" ]
s89334roy@yahoo.com.tw
1b5f7970359cd50efcaeb24cab8c592711e3bd4d
2ca88d41f1bb5042338faec50b2af11931db0bdd
/test/test_forecaster_entrypoints.py
90ba5d0e6c33b7c209c70ac83d0eba7bdfead759
[ "Apache-2.0" ]
permissive
canerturkmen/gluon-ts
2f2d46f9b01f5ee07a51a11e822b1c72c2475caa
57ae07f571ff123eac04af077870c1f216f99d5c
refs/heads/master
2022-09-10T23:30:26.162245
2022-04-20T12:44:01
2022-04-20T12:44:01
192,873,578
1
2
Apache-2.0
2020-08-04T16:58:48
2019-06-20T07:43:07
Python
UTF-8
Python
false
false
746
py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. import pkg_resources # def test_forecaster_entrypoints(): # for entry_point in pkg_resources.iter_entry_points("gluonts_forecasters"): # entry_point.load()
[ "noreply@github.com" ]
canerturkmen.noreply@github.com
50f104e0f6bb819ed7e3260cd1671e57d0744183
f445450ac693b466ca20b42f1ac82071d32dd991
/generated_tempdir_2019_09_15_163300/generated_part003381.py
5237a05fd13b91ed25973d14234596c3dec60fd5
[]
no_license
Upabjojr/rubi_generated
76e43cbafe70b4e1516fb761cabd9e5257691374
cd35e9e51722b04fb159ada3d5811d62a423e429
refs/heads/master
2020-07-25T17:26:19.227918
2019-09-15T15:41:48
2019-09-15T15:41:48
208,357,412
4
1
null
null
null
null
UTF-8
Python
false
false
2,655
py
from sympy.abc import * from matchpy.matching.many_to_one import CommutativeMatcher from matchpy import * from matchpy.utils import VariableWithCount from collections import deque from multiset import Multiset from sympy.integrals.rubi.constraints import * from sympy.integrals.rubi.utility_function import * from sympy.integrals.rubi.rules.miscellaneous_integration import * from sympy import * class CommutativeMatcher37234(CommutativeMatcher): _instance = None patterns = { 0: (0, Multiset({0: 1}), [ (VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, S(1)), Mul) ]) } subjects = {} subjects_by_id = {} bipartite = BipartiteGraph() associative = Mul max_optional_count = 1 anonymous_patterns = set() def __init__(self): self.add_subject(None) @staticmethod def get(): if CommutativeMatcher37234._instance is None: CommutativeMatcher37234._instance = CommutativeMatcher37234() return CommutativeMatcher37234._instance @staticmethod def get_match_iter(subject): subjects = deque([subject]) if subject is not None else deque() subst0 = Substitution() # State 37233 if len(subjects) >= 1 and isinstance(subjects[0], Pow): tmp1 = subjects.popleft() subjects2 = deque(tmp1._args) # State 37235 if len(subjects2) >= 1: tmp3 = subjects2.popleft() subst1 = Substitution(subst0) try: subst1.try_add_variable('i2.2.1.1', tmp3) except ValueError: pass else: pass # State 37236 if len(subjects2) >= 1: tmp5 = subjects2.popleft() subst2 = Substitution(subst1) try: subst2.try_add_variable('i2.2.1.2', tmp5) except ValueError: pass else: pass # State 37237 if len(subjects2) == 0: pass # State 37238 if len(subjects) == 0: pass # 0: x**j yield 0, subst2 subjects2.appendleft(tmp5) subjects2.appendleft(tmp3) subjects.appendleft(tmp1) return yield from collections import deque
[ "franz.bonazzi@gmail.com" ]
franz.bonazzi@gmail.com
8baa9d2d34c28cf039e89bca5f34422e62280386
e488d6d5300b84065ddb50320aa3930d64798047
/mkt/constants/__init__.py
5c29991237eb8686d3066ac298329ca61909b66c
[]
no_license
potch/zamboni
81b2bf01a1d93bbf933a0b3f30db56b438b7b75f
4e0bfa8be5af334e2c4be3917e3794216498291c
refs/heads/master
2021-01-15T21:15:25.102909
2013-01-02T17:07:02
2013-01-02T20:16:10
634,135
0
0
null
null
null
null
UTF-8
Python
false
false
192
py
from .platforms import DEVICE_LOOKUP, FREE_PLATFORMS, PAID_PLATFORMS from .submit import (APP_IMAGE_SIZES, APP_PREVIEW_MINIMUMS, APP_PREVIEW_SIZES, MAX_PACKAGED_APP_SIZE)
[ "me@mattbasta.com" ]
me@mattbasta.com
2b74909d2d1d7762214f31b2628489b91f3c0436
508c5e01aa7dce530093d5796250eff8d74ba06c
/code/venv/lib/python3.6/site-packages/passlib/utils/_blowfish/__init__.py
16b85443b77e5ba7fccc0601e0d5299282f0083b
[ "MIT" ]
permissive
jhkuang11/UniTrade
f220b0d84db06ff17626b3daa18d4cb8b72a5d3f
5f68b853926e167936b58c8543b8f95ebd6f5211
refs/heads/master
2022-12-12T15:58:30.013516
2019-02-01T21:07:15
2019-02-01T21:07:15
166,479,655
0
0
MIT
2022-12-07T03:59:47
2019-01-18T22:19:45
Python
UTF-8
Python
false
false
6,509
py
"""passlib.utils._blowfish - pure-python eks-blowfish implementation for bcrypt This is a pure-python implementation of the EKS-Blowfish algorithm described by Provos and Mazieres in `A Future-Adaptable Password Scheme <http://www.openbsd.org/papers/bcrypt-paper.ps>`_. This package contains two submodules: * ``_blowfish/base.py`` contains a class implementing the eks-blowfish algorithm using easy-to-examine code. * ``_blowfish/unrolled.py`` contains a subclass which replaces some methods of the original class with sped-up versions, mainly using unrolled loops and local variables. this is the class which is actually used by Passlib to perform BCrypt in pure python. This module is auto-generated by a script, ``_blowfish/_gen_files.py``. Status ------ This implementation is usuable, but is an order of magnitude too slow to be usuable with real security. For "ok" security, BCrypt hashes should have at least 2**11 rounds (as of 2011). Assuming a desired response time <= 100ms, this means a BCrypt implementation should get at least 20 rounds/ms in order to be both usuable *and* secure. On a 2 ghz cpu, this implementation gets roughly 0.09 rounds/ms under CPython (220x too slow), and 1.9 rounds/ms under PyPy (10x too slow). History ------- While subsequently modified considerly for Passlib, this code was originally based on `jBcrypt 0.2 <http://www.mindrot.org/projects/jBCrypt/>`_, which was released under the BSD license:: Copyright (c) 2006 Damien Miller <djm@mindrot.org> Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ #============================================================================= # imports #============================================================================= # core from itertools import chain import struct # pkg from passlib.utils import bcrypt64, getrandbytes, rng from passlib.utils.compat import b, bytes, BytesIO, unicode, u from passlib.utils._blowfish.unrolled import BlowfishEngine # local __all__ = [ 'BlowfishEngine', 'raw_bcrypt', ] #============================================================================= # bcrypt constants #============================================================================= # bcrypt constant data "OrpheanBeholderScryDoubt" as 6 integers BCRYPT_CDATA = [ 0x4f727068, 0x65616e42, 0x65686f6c, 0x64657253, 0x63727944, 0x6f756274 ] # struct used to encode ciphertext as digest (last output byte discarded) digest_struct = struct.Struct(">6I") #============================================================================= # base bcrypt helper # # interface designed only for use by passlib.handlers.bcrypt:BCrypt # probably not suitable for other purposes #============================================================================= BNULL = b('\x00') def raw_bcrypt(password, ident, salt, log_rounds): """perform central password hashing step in bcrypt scheme. :param password: the password to hash :param ident: identifier w/ minor version (e.g. 2, 2a) :param salt: the binary salt to use (encoded in bcrypt-base64) :param rounds: the log2 of the number of rounds (as int) :returns: bcrypt-base64 encoded checksum """ #=================================================================== # parse inputs #=================================================================== # parse ident assert isinstance(ident, unicode) if ident == u('2'): minor = 0 elif ident == u('2a'): minor = 1 # XXX: how to indicate caller wants to use crypt_blowfish's # workaround variant of 2a? elif ident == u('2x'): raise ValueError("crypt_blowfish's buggy '2x' hashes are not " "currently supported") elif ident == u('2y'): # crypt_blowfish compatibility ident which guarantees compat w/ 2a minor = 1 else: raise ValueError("unknown ident: %r" % (ident,)) # decode & validate salt assert isinstance(salt, bytes) salt = bcrypt64.decode_bytes(salt) if len(salt) < 16: raise ValueError("Missing salt bytes") elif len(salt) > 16: salt = salt[:16] # prepare password assert isinstance(password, bytes) if minor > 0: password += BNULL # validate rounds if log_rounds < 4 or log_rounds > 31: raise ValueError("Bad number of rounds") #=================================================================== # # run EKS-Blowfish algorithm # # This uses the "enhanced key schedule" step described by # Provos and Mazieres in "A Future-Adaptable Password Scheme" # http://www.openbsd.org/papers/bcrypt-paper.ps # #=================================================================== engine = BlowfishEngine() # convert password & salt into list of 18 32-bit integers (72 bytes total). pass_words = engine.key_to_words(password) salt_words = engine.key_to_words(salt) # truncate salt_words to original 16 byte salt, or loop won't wrap # correctly when passed to .eks_salted_expand() salt_words16 = salt_words[:4] # do EKS key schedule setup engine.eks_salted_expand(pass_words, salt_words16) # apply password & salt keys to key schedule a bunch more times. rounds = 1<<log_rounds engine.eks_repeated_expand(pass_words, salt_words, rounds) # encipher constant data, and encode to bytes as digest. data = list(BCRYPT_CDATA) i = 0 while i < 6: data[i], data[i+1] = engine.repeat_encipher(data[i], data[i+1], 64) i += 2 raw = digest_struct.pack(*data)[:-1] return bcrypt64.encode_bytes(raw) #============================================================================= # eof #=============================================================================
[ "jhkuang11@gmail.com" ]
jhkuang11@gmail.com
816522a328e2247a1e43e6f0537dd652c8266d33
6a2c2af113bb8b4d55db6ceabc6e78a0bbcd1f91
/genus processing/Double Breasted.py
01a6ae3e0307df77a4447697a686b3f1c6324fc1
[]
no_license
JinghongM/Everlasting_Data_Cleansing
4a966aca5cba102961f64338411d76e51f60f51e
237073980b2bd1697db578013c7463dcbc1492fb
refs/heads/master
2021-04-26T23:48:38.083155
2018-06-21T20:00:11
2018-06-21T20:00:11
123,861,020
0
0
null
null
null
null
UTF-8
Python
false
false
523
py
import pandas as pd import copy import os.path Pattern=6 Material=7 Species=4 CGP = pd.read_excel("../Realdata.xlsx") for row in range(1,CGP.shape[0]): genus = str(CGP.iat[row,3]) if "Double Breasted" in genus: print(row) CGP.iat[row,3] = genus.replace("Double Breasted ","") CGP.iat[row,Species] = "Double Breasted" i=0 #process headers while i<len(CGP.columns.values): if "Unnamed" in CGP.columns.values[i]: CGP.columns.values[i] = '' i+=1 CGP.to_excel('../Realdata.xlsx',index=False)
[ "noreply@github.com" ]
JinghongM.noreply@github.com
5e7fd79ebbe73607afa51ba6a52d8e1ee3a6c9b5
2579f37a13cfbb47944c5b81c6e83ca710b29f88
/Client/config/Client_config_info.py
4f407858ee9bb1f511c54d87662edd44d4154b42
[]
no_license
YangQian1992/FTP
932f32d5ed934bae295bd674757f7af23d0ad1ba
87d3a78522e7eb8058def1d74d7f32f0f61f1b86
refs/heads/master
2020-03-31T16:28:35.146329
2018-10-10T06:53:12
2018-10-10T06:53:12
152,376,641
0
0
null
null
null
null
UTF-8
Python
false
false
136
py
personal_config_info = { 'SERVER_IP':'127.0.0.1', 'SERVER_PORT':8082, 'CODING':'utf-8',#编码方式 'BLOCK_SIZE':1024 }
[ "1289089651@qq.com" ]
1289089651@qq.com
0c4aa77a3094093376053b5f19d0e964a4b1427a
9e15ada895e90d033bc3b65c2666065bddd62605
/12/12.1/Path_test1.py
3093e21b3c397c3f1a090b05e991c3d7de8b5f0f
[]
no_license
zhyErick/fengkuang_python
b0f0c78273420fd862691799bfd7e4f1b6eadf80
6d50ad3b7d4ae05d06379c2dc87d91081964ec6d
refs/heads/master
2021-02-14T08:23:26.616211
2020-05-06T13:08:07
2020-05-06T13:08:07
244,788,500
0
0
null
null
null
null
UTF-8
Python
false
false
463
py
from pathlib import * # 获取当前目录 p = Path('../') # 遍历当前目录下的所有文件和子目录 for x in p.iterdir(): print(x) # 获取上一级目录 p = Path('../') # 获取上级目录及其所有子目录的.py文件 for x in p.glob('**/*.py'): print(x) # 获取C:\python\oldboy_python对应的目录 p = Path('C:\python\oldboy_python') # 获取当前目录及其所有子目录下的.py文件 for x in p.glob('*/*.py'): print(x)
[ "xingzhishangyang@163.com" ]
xingzhishangyang@163.com
16333d2fe48ad23cc47270a1b8fc53973efafdf3
90dc57404ea2e98006adba91f53ea9cc92124bb4
/spare_parts/forms.py
a6679136c699e7b1b3cdd28e55c4d7b38fcc7460
[]
no_license
AndreySkryl/flask-test
e57409c779f113980a9acf1c6f9a37be5106e119
8979e15789160e65a7ef6123e3ba638e764d8131
refs/heads/main
2023-04-24T18:33:59.675028
2021-05-10T15:16:56
2021-05-10T15:16:56
365,334,649
1
0
null
null
null
null
UTF-8
Python
false
false
281
py
from wtforms import Form, StringField, TextAreaField, IntegerField class SparePartForm(Form): title = StringField('Заголовок') price = IntegerField('Цена') amount = IntegerField('Количество') description = TextAreaField('Описание')
[ "ya.skryl@yandex.ru" ]
ya.skryl@yandex.ru
f1cd67923e90294c3a5c457d1925664b58b06270
e23a4f57ce5474d468258e5e63b9e23fb6011188
/125_algorithms/_exercises/exercises/Python_Hand-on_Solve_200_Problems/Section 7 Dictionary/check_key_exist_solution.py
637344c193288ad01e3c9e4394cbd7653b485f05
[]
no_license
syurskyi/Python_Topics
52851ecce000cb751a3b986408efe32f0b4c0835
be331826b490b73f0a176e6abed86ef68ff2dd2b
refs/heads/master
2023-06-08T19:29:16.214395
2023-05-29T17:09:11
2023-05-29T17:09:11
220,583,118
3
2
null
2023-02-16T03:08:10
2019-11-09T02:58:47
Python
UTF-8
Python
false
false
753
py
# # To add a new cell, type '# %%' # # To add a new markdown cell, type '# %% [markdown]' # # %% # # --------------------------------------------------------------- # # python best courses https://courses.tanpham.org/ # # --------------------------------------------------------------- # # Check if a given key already exists in a dictionary # # input # # d = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60} # # is_key_present(5) # # is_key_present(9) # # output # # Key is present in the dictionary # # Key is not present in the dictionary # # d _ {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60} # ___ is_key_present x # __ ? __ ? # print('Key is present in the dictionary') # ____ # print('Key is not present in the dictionary') # ? 5 # ? 9 # #
[ "sergejyurskyj@yahoo.com" ]
sergejyurskyj@yahoo.com
b0dad98cc918993b31888c8234bf8e6d804cb304
40eb94673bb2e015c1640665a639c508279e3df4
/cursos/_flask/flask/ejemplos/u29/aplicacion/models.py
defde3e73d247258609be8cc99c0eaf13f0671aa
[]
no_license
josedom24/plataforma_pledin
3ab5c59abd87c90f066ba9164d6d0cbc02ea816f
e0521eb103013e0f8d9e2b2ea50e6acac0d09784
refs/heads/master
2023-07-20T15:43:30.796223
2023-07-17T19:07:51
2023-07-17T19:07:51
138,278,487
2
0
null
2023-05-01T20:05:34
2018-06-22T08:44:58
Python
UTF-8
Python
false
false
2,007
py
from sqlalchemy import Boolean, Column , ForeignKey from sqlalchemy import DateTime, Integer, String, Text, Float from sqlalchemy.orm import relationship from aplicacion.app import db from werkzeug.security import generate_password_hash, check_password_hash class Categorias(db.Model): """Categorías de los artículos""" __tablename__ = 'categorias' id = Column(Integer, primary_key=True) nombre = Column(String(100)) articulos = relationship("Articulos", cascade="all, delete-orphan", backref="Categorias",lazy='dynamic') def __repr__(self): return (u'<{self.__class__.__name__}: {self.id}>'.format(self=self)) class Articulos(db.Model): """Artículos de nuestra tienda""" __tablename__ = 'articulos' id = Column(Integer, primary_key=True) nombre = Column(String(100),nullable=False) precio = Column(Float,default=0) iva = Column(Integer,default=21) descripcion = Column(String(255)) image = Column(String(255)) stock = Column(Integer,default=0) CategoriaId=Column(Integer,ForeignKey('categorias.id'), nullable=False) categoria = relationship("Categorias", backref="Articulos") def precio_final(self): return self.precio+(self.precio*self.iva/100) def __repr__(self): return (u'<{self.__class__.__name__}: {self.id}>'.format(self=self)) class Usuarios(db.Model): """Usuarios""" __tablename__ = 'usuarios' id = Column(Integer, primary_key=True) username = Column(String(100),nullable=False) password_hash = Column(String(128),nullable=False) nombre = Column(String(200),nullable=False) email = Column(String(200),nullable=False) admin = Column(Boolean, default=False) def __repr__(self): return (u'<{self.__class__.__name__}: {self.id}>'.format(self=self)) @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password)
[ "josedom24@gmail.com" ]
josedom24@gmail.com
10da254f4b916fe94485339bbb590bc46404c718
46e271e27afe50b8b62be0651d78164490911bb3
/ws_moveit/src/example/example_pkg/scripts/red.py
1ec501210d7051649bb4e99b72defa0eede3bac3
[]
no_license
Nidhiks2000/Vargi-bot
8a43af1e470b6fc84d468003f67471a1e1f47aad
3e2e7be310ed7372cb6960eea8faabec75d9fbcf
refs/heads/master
2023-07-24T01:05:10.049800
2021-09-08T16:31:08
2021-09-08T16:31:08
403,935,308
0
0
null
null
null
null
UTF-8
Python
false
false
2,639
py
#!/usr/bin/env python import rospy import cv2 import sys from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError import numpy as np class Camera1: def __init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/eyrc/vb/camera_1/image_raw", Image,self.callback) def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: rospy.logerr(e) (rows,cols,channels) = cv_image.shape image = cv_image # Resize a 720x1280 image to 360x640 to fit it on the screen resized_image = cv2.resize(image, (720/2, 1280/2)) green = np.uint8([[[0, 255, 0]]]) #green color hsvGreen = cv2.cvtColor(green, cv2.COLOR_BGR2HSV) #hsv value of green color lowerLimit = hsvGreen[0][0][0] - 10, 100, 100 # range of green color lower limit and upper limit upperLimit = hsvGreen[0][0][0] + 10, 255, 255 red = np.uint8([[[0, 0, 255]]]) #red color hsvred = cv2.cvtColor(red, cv2.COLOR_BGR2HSV) #hsv value of red color lower = hsvred[0][0][0] - 10, 100, 100 # range of red color lower limit and upper limit upper = hsvred[0][0][0] + 10, 255, 255 hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) #convert the image into hsv lg = np.array(lowerLimit) #range of green color ug = np.array(upperLimit) green_mask = cv2.inRange(hsv, lg, ug) #green masked image cv2.imshow('green_packages', green_mask) #show the image lr = np.array(lower) #range of red color ur = np.array(upper) red_mask = cv2.inRange(hsv, lr, ur) #red masked image cv2.imshow('red_packages', red_mask) #show the image original = image.copy() image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower = np.array([22, 93, 0], dtype="uint8") #yellow colour lower range and upper range upper = np.array([45, 255, 255], dtype="uint8") mask = cv2.inRange(image, lower, upper) cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] for c in cnts: x,y,w,h = cv2.boundingRect(c) cv2.rectangle(original, (x, y), (x + w, y + h), (36,255,12), 2) cv2.imshow('yellow_packages', mask) cv2.imshow("/eyrc/vb/camera_1/image_raw", resized_image) cv2.waitKey(3) def main(args): rospy.init_node('node_eg1_read_camera', anonymous=True) ic = Camera1() try: rospy.spin() except KeyboardInterrupt: rospy.loginfo("Shutting down") cv2.destroyAllWindows() if __name__ == '__main__': main(sys.argv)
[ "Happysunshine.disroot.org" ]
Happysunshine.disroot.org