blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c09d0d031676f69bb99df3784b5c7cc3a91d70e
|
00657ecc75e0529f5b77759112398bdb11e612bb
|
/Python3.6/264-Py3-M-Ugly Number II.py
|
8930b07936863fb50c031ac463f4cd020151073e
|
[] |
no_license
|
Hidenver2016/Leetcode
|
da949cd17f8e29d6007b492719bbc97418ae9cb7
|
1379a6dc2400751ecf79ccd6ed401a1fb0d78046
|
refs/heads/master
| 2021-07-18T14:46:00.986614
| 2020-05-08T05:02:20
| 2020-05-08T05:02:20
| 152,190,601
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 22:41:55 2019
@author: hjiang
"""
"""
Write a program to find the n-th ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
Example:
Input: n = 10
Output: 12
Explanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.
Note:
1 is typically treated as an ugly number.
n does not exceed 1690.
https://blog.csdn.net/fuxuemingzhu/article/details/49231615
所有的ugly number都是由1开始,乘以2/3/5生成的。
只要将这些生成的数排序即可获得,自动排序可以使用set
这样每次取出的第一个元素就是最小元素,由此再继续生成新的ugly number.
可以分成如下三组:
(1) 1×2, 2×2, 3×2, 4×2, 5×2, …
(2) 1×3, 2×3, 3×3, 4×3, 5×3, …
(3) 1×5, 2×5, 3×5, 4×5, 5×5, …
"""
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
if n < 0:
return 0
dp = [1] * n
index2, index3, index5 = 0, 0, 0
for i in range(1, n):#一边计算一边排序,溜溜溜溜!
dp[i] = min(2 * dp[index2], 3 * dp[index3], 5 * dp[index5])
if dp[i] == 2 * dp[index2]: index2 += 1
if dp[i] == 3 * dp[index3]: index3 += 1
if dp[i] == 5 * dp[index5]: index5 += 1
return dp[n - 1]
|
[
"noreply@github.com"
] |
Hidenver2016.noreply@github.com
|
8143ad2895c961ddd4a0d4b7b09c61ab15f705df
|
799a8605e28118da863079f0924cd93974221c3c
|
/src/ralph/account/admin.py
|
341a44f6d5b3666c748d8458f48daf0ba7e4f22c
|
[
"Apache-2.0"
] |
permissive
|
damjanek/ralph
|
31a2fae13e2608bcf9f13853199cfc00ba6db317
|
728e1c17ea8a70600928a59d5ec17a964063485d
|
refs/heads/master
| 2021-01-24T05:06:34.308524
| 2013-02-20T10:26:47
| 2013-02-20T10:26:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,980
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from lck.django.activitylog.admin import IPInline, UserAgentInline
from lck.django.common.admin import ForeignKeyAutocompleteTabularInline
from lck.django.profile.admin import ProfileInlineFormSet
from tastypie.models import ApiKey
from ralph.account.models import BoundPerm, Profile
class ProfileInline(admin.StackedInline):
model = Profile
readonly_fields = ('last_active',)
max_num = 1
can_delete = False
class ProfileBoundPermInline(ForeignKeyAutocompleteTabularInline):
model = BoundPerm
exclude = ['created', 'modified', 'created_by', 'modified_by', 'role',
'group']
related_search_fields = {
'venture': ['^name'],
}
formset = ProfileInlineFormSet
def __init__(self, parent_model, admin_site):
self.fk_name = 'profile'
super(ProfileBoundPermInline, self).__init__(Profile, admin_site)
class ProfileIPInline(IPInline):
formset = ProfileInlineFormSet
def __init__(self, parent_model, admin_site):
self.fk_name = 'profile'
super(ProfileIPInline, self).__init__(Profile, admin_site)
class ProfileUserAgentInline(UserAgentInline):
formset = ProfileInlineFormSet
def __init__(self, parent_model, admin_site):
self.fk_name = 'profile'
super(ProfileUserAgentInline, self).__init__(Profile, admin_site)
class ApiKeyInline(admin.StackedInline):
model = ApiKey
readonly_fields = ('created',)
extra = 0
class ProfileAdmin(UserAdmin):
def groups_show(self):
return "<br> ".join([g.name for g in self.groups.order_by('name')])
groups_show.allow_tags = True
groups_show.short_description = _("groups")
inlines = [
ProfileInline, ProfileBoundPermInline, ApiKeyInline,
ProfileIPInline, ProfileUserAgentInline,
]
list_display = ('username', 'email', 'first_name', 'last_name',
groups_show, 'is_staff', 'is_active')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups',)
save_on_top = True
search_fields = ('username', 'first_name', 'last_name',
'email', 'profile__nick')
admin.site.unregister(User)
admin.site.register(User, ProfileAdmin)
class GroupBoundPermInline(ForeignKeyAutocompleteTabularInline):
model = BoundPerm
exclude = ['created', 'modified', 'created_by', 'modified_by', 'role',
'profile']
related_search_fields = {
'venture': ['^name'],
}
class CustomGroupAdmin(GroupAdmin):
save_on_top = True
inlines = [GroupBoundPermInline]
admin.site.unregister(Group)
admin.site.register(Group, CustomGroupAdmin)
|
[
"lukasz@langa.pl"
] |
lukasz@langa.pl
|
e0a024431ad980f1924a8f5527c9a6124af8d894
|
a9c3e212f86acdbc84ba57357194e8f11c844535
|
/catalogue_management/migrations/0005_auto_20170805_1824.py
|
7f5911b81aa8bf4c4485c57fcc7aa45c5711b848
|
[] |
no_license
|
bitapardaz/carwash
|
bde4635bda1f1fa51409c2454e27aca84c2bffa0
|
0a10954eae44df7341372b5f3def652e512538b0
|
refs/heads/master
| 2021-01-15T13:34:31.198300
| 2017-08-23T11:35:33
| 2017-08-23T11:35:33
| 99,678,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue_management', '0004_auto_20170727_0856'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price',
field=models.IntegerField(default=0),
),
]
|
[
"pourranjbar.ar@gmail.com"
] |
pourranjbar.ar@gmail.com
|
9544cbbc6bb48a09e347d92844c17726d2e71c59
|
41e2cf24f0ff3a11a98bb00e03c598dde35452c4
|
/reportview/migrations/0011_auto_20180727_1857.py
|
1460d461da8f7a8e002282c032bb1794a19e8855
|
[] |
no_license
|
anushamokashi/mob
|
f5dbedc729073092f94323feca6d95dee24087a2
|
37bc0eb033bc23d37e9d4fb9bb8b2b456553ff7f
|
refs/heads/master
| 2020-04-24T08:36:56.008212
| 2019-02-21T09:09:04
| 2019-02-21T09:09:04
| 171,810,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-27 13:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reportview', '0010_auto_20180725_1515'),
]
operations = [
migrations.RenameField(
model_name='reportprintformataction',
old_name='JasperFile',
new_name='htmlfile',
),
]
|
[
"anusha.mokashi@gmail.com"
] |
anusha.mokashi@gmail.com
|
9466b21de1d9b7a2824a0e619a7509c314f821cf
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/bpmn/models/data_output_association.py
|
c0dd7b339bab9f4979a5cb5433bfb5f15dd78b02
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
from dataclasses import dataclass
from .t_data_output_association import TDataOutputAssociation
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class DataOutputAssociation(TDataOutputAssociation):
class Meta:
name = "dataOutputAssociation"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
e73aff3599ba2371422b1ab8a30e84e8a98a2ad1
|
74649c1220c68ad0af79e420d572e3769fcd7a53
|
/_unittests/ut_cli/test_cli_validate_bench_doc.py
|
4a3937dc7771228ee5b515f55a9fc32778a4db1d
|
[
"MIT"
] |
permissive
|
sdpython/mlprodict
|
e62edcb428700cb2c4527e54e96431c1d2b36118
|
27d6da4ecdd76e18292f265fde61d19b66937a5c
|
refs/heads/master
| 2023-05-08T10:44:30.418658
| 2023-03-08T22:48:56
| 2023-03-08T22:48:56
| 112,469,804
| 60
| 13
|
MIT
| 2023-04-19T01:21:38
| 2017-11-29T11:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
"""
@brief test tree node (time=42s)
"""
import os
import unittest
from pyquickhelper.loghelper import BufferedPrint
from pyquickhelper.pycode import (
ExtTestCase, get_temp_folder, ignore_warnings)
from mlprodict.__main__ import main
class TestCliValidateBenchDoc(ExtTestCase):
@ignore_warnings(UserWarning)
def test_cli_validate_bench_doc_help(self):
st = BufferedPrint()
main(args=["benchmark_doc", "--help"], fLOG=st.fprint)
res = str(st)
self.assertIn("verbose", res)
@ignore_warnings(UserWarning)
def test_cli_validate_bench_doc(self):
temp = get_temp_folder(__file__, "temp_bench_doc")
out1 = os.path.join(temp, "raw.xlsx")
out2 = os.path.join(temp, "sum.csv")
st = BufferedPrint()
main(args=["benchmark_doc", "-o", out1, "-ou", out2, "-w",
"LinearRegression", '-d', temp,
'-r', 'python_compiled'],
fLOG=st.fprint)
res = str(st)
self.assertIn('Linear', res)
self.assertExists(out1)
self.assertExists(out2)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
[
"noreply@github.com"
] |
sdpython.noreply@github.com
|
f7d764ba88db6e3901e87715853fe26847484a39
|
6d71de4e88dcb7d04f6d3a18736d393e12f8d087
|
/scripts/packages/database.py
|
f5c11749b5a1d8cec4d2441e01d576df5e077dc6
|
[
"MIT"
] |
permissive
|
wyolum/Alex
|
71075c30691229e8eb28afa06a6ab44c450b14d4
|
03f1d8ae0107454d18964e33777ffc4c0c1a1951
|
refs/heads/main
| 2023-07-02T16:11:57.088323
| 2021-08-05T17:59:04
| 2021-08-05T17:59:04
| 338,686,528
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,870
|
py
|
import sqlite3
class Struct:
def __init__(self, **kwargs):
self.attrs = kwargs
self.__dict__.update(kwargs)
def keys(self):
return self.attrs.keys()
def __getitem__(self, key):
return self.attrs[key]
def __repr__(self):
return f'Struct(**{self.attrs})'
class Table:
def __init__(self, name, *columns):
self.name = name
self.columns = columns
def create(self, db):
cols = ['%s' % col for col in self.columns]
sql = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (self.name, ','.join(cols))
db.execute(sql)
def drop(self, db):
sql = 'DROP TABLE %s' % self.name
response = input('Warning, dropping table %s\nY to confirm: ' % self.name)
if response[0] == 'Y':
db.execute(sql)
print ('%s Dropped' % self.name)
else:
print ('Drop not executed')
def create_index(self, db, colnames, unique=False):
idx_name = ''.join(colnames)
cols = ','.join(colnames)
unique = ['', 'UNIQUE'][unique]
sql = 'CREATE %s INDEX %s ON %s(%s)' % (unique, idx_name, self.name, cols)
db.execute(sql)
def insert(self, db, values):
place_holders = ','.join('?' * len(values[0]))
cols = ','.join([col.name for col in self.columns])
sql = 'INSERT INTO %s(%s) VALUES (%s);' % (self.name, cols, place_holders)
#print('sql:', sql)
rowcount = 0
for row in values:
### add quote to string fields
try:
rowcount += db.executemany(sql, [row]).rowcount
except sqlite3.IntegrityError:
pass
db.commit()
return rowcount
def delete(self, db, where):
sql = f'DELETE FROM {self.name} WHERE {where}'
#<print(sql)
try:
cur = db.execute(sql)
db.commit()
except sqlite3.OperationalError:
print(sql)
raise
def select(self, db, where=None):
sql = 'SELECT * FROM %s' % self.name
if where is not None:
sql += ' WHERE ' + where
try:
cur = db.execute(sql)
except sqlite3.OperationalError:
print(sql)
raise
out = []
colnames = [col.name for col in self.columns]
for row in cur.fetchall():
l = Struct(**dict(zip(colnames, row)))
out.append(l)
return out
def join(self, db, other, col, where=None):
sql = 'SELECT * FROM %s LEFT JOIN %s ON %s.%s' % (self.name, other.name, self.name, col)
if where:
sql += ' WHERE ' + where
cur = db.execute(sql)
colnames = [l[0] for l in cur.description]
out = []
for row in cur.fetchall():
l = dict(zip(colnames, row))
out.append(l)
return out
class Column:
def __init__(self, name, type, **kw):
self.name = name
self.type = type
self.kw = kw
def __str__(self):
kw = ''
for k in self.kw:
if self.kw[k]:
kw = kw + ' ' + '%s' % (k.upper())
return '%s %s %s' % (self.name, self.type.name, kw)
class DBType:
def __init__(self, name):
self.name = name
class Integer(DBType):
def __init__(self):
DBType.__init__(self, 'INTEGER')
self.convert = int
class Float(DBType):
def __init__(self):
DBType.__init__(self, 'FLOAT')
self.convert = float
class String(DBType):
def __init__(self):
DBType.__init__(self, 'STRING')
self.convert = str
class Boolean(DBType):
def __init__(self):
DBType.__init__(self, 'BOOLEAN')
self.convert = bool
class Text(DBType):
def __init__(self):
DBType.__init__(self, 'TEXT')
self.convert = str
|
[
"wyojustin@gmail.com"
] |
wyojustin@gmail.com
|
d5cef35ef32da1b2f8e8ae9cb2ab5ab3391634ea
|
cea45a2355c8b243a79c4c179c98a04e90e98ff7
|
/astropy/table/tests/conftest.py
|
65143fd816accb2c0754b2e8130f6ba656cf8cbe
|
[] |
no_license
|
shanmbic/astropy
|
408cfa45511cac9c64dade6350d6ba9adeb567ad
|
e8a6546dd210ade743eb663dd1c276ca2fd054b4
|
refs/heads/master
| 2021-01-15T17:20:47.626581
| 2014-04-03T16:17:03
| 2014-04-03T16:17:03
| 17,769,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
All of the py.test fixtures used by astropy.table are defined here.
The fixtures can not be defined in the modules that use them, because
those modules are imported twice: once with `from __future__ import
unicode_literals` and once without. py.test complains when the same
fixtures are defined more than once.
`conftest.py` is a "special" module name for py.test that is always
imported, but is not looked in for tests, and it is the recommended
place to put fixtures that are shared between modules. These fixtures
can not be defined in a module by a different name and still be shared
between modules.
"""
from ...tests.helper import pytest
from ... import table
@pytest.fixture(params=[table.Column, table.MaskedColumn])
def Column(request):
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
return request.param
class MaskedTable(table.Table):
def __init__(self, *args, **kwargs):
kwargs['masked'] = True
table.Table.__init__(self, *args, **kwargs)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_types(request):
class TableTypes:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
return TableTypes(request)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_data(request):
class TableData:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
self.COLS = [
self.Column(name='a', data=[1, 2, 3], description='da',
format='fa', meta={'ma': 1}, unit='ua'),
self.Column(name='b', data=[4, 5, 6], description='db',
format='fb', meta={'mb': 1}, unit='ub'),
self.Column(name='c', data=[7, 8, 9], description='dc',
format='fc', meta={'mc': 1}, unit='ub')]
self.DATA = self.Table(self.COLS)
return TableData(request)
class SubclassTable(table.Table):
pass
@pytest.fixture(params=[True, False])
def tableclass(request):
return table.Table if request.param else SubclassTable
@pytest.fixture(params=[0, 1, -1])
def protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
"""
return request.param
# Fixture to run all tests for both an unmasked (ndarray) and masked
# (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_type(request):
# return MaskedTable if request.param else table.Table
try:
request.param
return MaskedTable
except AttributeError:
return table.Table
|
[
"mdboom@gmail.com"
] |
mdboom@gmail.com
|
724cc2812ef2925248a2d0403762eb7599764b22
|
0a3e24df172a206a751217e5f85b334f39983101
|
/python_etc_3/abstract.py
|
e0ea5f6e6873c182904d30b94f58820697f27011
|
[] |
no_license
|
yeboahd24/python202
|
1f399426a1f46d72da041ab3d138c582c695462d
|
d785a038183e52941e0cee8eb4f6cedd3c6a35ed
|
refs/heads/main
| 2023-05-06T04:14:19.336839
| 2021-02-10T02:53:19
| 2021-02-10T02:53:19
| 309,841,303
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
#!usr/bin/evn/python3
import abc
class Tombola(abc.ABC):
@abc.abstractmethod
def load(self, iterable):
"""Add items from an iterable."""
@abc.abstractmethod
def pick(self):
"""Remove item at random, returning it.
This method should raise `LookupError` when the instance is empty."""
def loaded(self):
"""Return `True` if there's at least 1 item, `False` otherwise."""
return bool(self.inspect())
def inspect(self):
"""Return a sorted tuple with the items currently inside."""
items = []
while True:
try:
items.append(self.pick())
except LookupError:
break
self.load(items)
return tuple(sorted(items))
|
[
"noreply@github.com"
] |
yeboahd24.noreply@github.com
|
e87bb97c7df0a4427908d7f0aaf88841526d6ba8
|
31a0b0749c30ff37c3a72592387f9d8195de4bd6
|
/rllib/agents/ars/tests/test_ars.py
|
86ccab7f45a7af95c28c303d749585d3ca419ddc
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
longshotsyndicate/ray
|
15100bad514b602a3fa39bfe205288e7bec75d90
|
3341fae573868338b665bcea8a1c4ee86b702751
|
refs/heads/master
| 2023-01-28T15:16:00.401509
| 2022-02-18T05:35:47
| 2022-02-18T05:35:47
| 163,961,795
| 1
| 1
|
Apache-2.0
| 2023-01-14T08:01:02
| 2019-01-03T11:03:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
import unittest
import ray
import ray.rllib.agents.ars as ars
from ray.rllib.utils.test_utils import framework_iterator, check_compute_single_action
class TestARS(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=3)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_ars_compilation(self):
"""Test whether an ARSTrainer can be built on all frameworks."""
config = ars.DEFAULT_CONFIG.copy()
# Keep it simple.
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = None
config["noise_size"] = 2500000
# Test eval workers ("normal" WorkerSet, unlike ARS' list of
# RolloutWorkers used for collecting train batches).
config["evaluation_interval"] = 1
config["evaluation_num_workers"] = 1
num_iterations = 2
for _ in framework_iterator(config):
plain_config = config.copy()
trainer = ars.ARSTrainer(config=plain_config, env="CartPole-v0")
for i in range(num_iterations):
results = trainer.train()
print(results)
check_compute_single_action(trainer)
trainer.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"noreply@github.com"
] |
longshotsyndicate.noreply@github.com
|
a88a1a1ab247a5f750af8f9e792c8ecee63957ab
|
52a61caff0aeb434c32e5657e38762643e9f57dd
|
/Basics/TwoDimensionalLists(arrays)/is_matrix_symmetric.py
|
98436819ed551b845d293a2907ee30e2f2c3f3d4
|
[] |
no_license
|
AndrewErmakov/PythonTrainingBasics
|
1480a6378d1ec59884760e2b3014ccc3d28f058f
|
639e15bbfc54da762cb9e366497754cfece30691
|
refs/heads/master
| 2021-06-10T15:57:58.682335
| 2021-03-25T13:37:30
| 2021-03-25T13:37:30
| 153,678,760
| 0
| 0
| null | 2018-10-30T13:52:51
| 2018-10-18T19:45:47
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
size_list = int(input())
numbers_list = [[int(j) for j in input().split()] for i in range(size_list)]
answer = "yes"
for i in range(size_list):
for j in range(size_list):
if numbers_list[i][j] != numbers_list[j][i]:
answer = "no"
break
print(answer)
|
[
"andrew.67@list.ru"
] |
andrew.67@list.ru
|
6d482b7fc6f41fd53b8ad4099680500cf4ef92cc
|
0da9d2a15305421e224795cdf078838bd97eccc8
|
/Algorithms/Strings/SeparateTheNumbers.py
|
3ca2f20969d9e354e5da69e9dfad840c9e4c0624
|
[] |
no_license
|
LysanderGG/HackerRank
|
ac1300eea2f4e00f7d4e5084b5d570aa6fae0cfb
|
039ec4414612cff84a941a7e7538fb36e10d427f
|
refs/heads/master
| 2021-01-21T16:09:59.174131
| 2017-07-09T12:33:32
| 2017-07-09T12:33:32
| 91,877,258
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
#!/bin/python3
import sys
# Build a string of length at least n starting with i
def build_str(i, n):
res = ""
while len(res) < n:
res += str(i)
i += 1
return res
def solve(s):
for i in range(1, len(s) // 2 + 1):
first = int(s[:i])
if build_str(first, len(s)) == s:
return "YES " + str(first)
return "NO"
q = int(input().strip())
for a0 in range(q):
s = input().strip()
print(solve(s))
|
[
"lysandergc@gmail.com"
] |
lysandergc@gmail.com
|
f8e180729c3092f31dd14b405f694eda6ea55dd0
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/msgpack-c/all/conanfile.py
|
eac066163d08578e6216ffddb784ee899746d0ca
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,964
|
py
|
from conan import ConanFile
from conan.tools.files import get, copy, rmdir, save
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.scm import Version
import os
import textwrap
required_conan_version = ">=1.53.0"
class MsgpackCConan(ConanFile):
name = "msgpack-c"
description = "MessagePack implementation for C"
license = "BSL-1.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/msgpack/msgpack-c"
topics = ("msgpack", "message-pack", "serialization")
package_type = "library"
settings = "os", "arch", "build_type", "compiler"
options = {
"fPIC": [True, False],
"shared": [True, False],
}
default_options = {
"fPIC": True,
"shared": False,
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["MSGPACK_ENABLE_SHARED"] = self.options.shared
tc.variables["MSGPACK_ENABLE_STATIC"] = not self.options.shared
tc.variables["MSGPACK_32BIT"] = self.settings.arch == "x86"
tc.variables["MSGPACK_BUILD_EXAMPLES"] = False
tc.cache_variables["MSGPACK_BUILD_TESTS"] = False
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="LICENSE_1_0.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
# TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"msgpackc": "msgpack::msgpack"}
)
def _create_cmake_module_alias_targets(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
save(self, module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "msgpack")
self.cpp_info.set_property("pkg_config_name", "msgpack")
if Version(self.version) < "6.0.0":
self.cpp_info.libs = ["msgpackc"]
self.cpp_info.set_property("cmake_target_name", "msgpackc")
else:
self.cpp_info.libs = ["msgpack-c"]
self.cpp_info.set_property("cmake_target_name", "msgpack-c")
# TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed
self.cpp_info.names["cmake_find_package"] = "msgpack"
self.cpp_info.names["cmake_find_package_multi"] = "msgpack"
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.names["pkg_config"] = "msgpack"
|
[
"noreply@github.com"
] |
conan-io.noreply@github.com
|
9cbfa772353ca6774d58a800b97ac7adc0d3df4c
|
3326ed6fa75623aca9f94242c06ba736af1fe1e4
|
/src/qutip_symbolic/commutators.py
|
0743e0d0810341df1aae9f59bfd53b74d568e58a
|
[
"BSD-3-Clause"
] |
permissive
|
ZeroInfinite/qutip-symbolic
|
d443193981ea223c0ea0f9675288593f8371fc5e
|
bbc5d1e9f7928cd88e568140a6ff49bb060ce20d
|
refs/heads/master
| 2023-05-01T12:52:52.037165
| 2021-05-19T14:34:47
| 2021-05-19T14:34:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
from .compat.commutator import Commutator
def recursive_commutator(a, b, n=1):
"""
Generate a recursive commutator of order n:
[a, b]_1 = [a, b]
[a, b]_2 = [a, [a, b]]
[a, b]_3 = [a, [a, b]_2] = [a, [a, [a, b]]]
...
"""
if n == 1:
return Commutator(a, b)
else:
return Commutator(a, recursive_commutator(a, b, n-1))
|
[
"hodgestar@gmail.com"
] |
hodgestar@gmail.com
|
e458a806a907109ff03c60bc02d49f659e96156e
|
431dadb72b70ab4d604d6f7722e1554e151fda83
|
/examples/predict_demo_by_seq.py
|
fe13d90be2ae34c639eaac6850b563f74b2408ea
|
[
"Apache-2.0"
] |
permissive
|
smilelight/nymph
|
fe3c04fb9145bb16993d81791ac7d3fe0f0b7587
|
c8da2211f7a8f58d1c6d327b243e419ed9e64ead
|
refs/heads/master
| 2022-12-08T16:59:24.823290
| 2020-09-01T08:48:03
| 2020-09-01T08:48:03
| 286,218,355
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from nymph.data import SeqDataset, split_dataset
from nymph.modules import SeqClassifier
project_path = os.path.abspath(os.path.join(__file__, '../../'))
data_path = os.path.join(project_path, r'data\test.csv')
save_path = 'demo_saves_seq'
def split_fn(dataset: list):
return list(range(len(dataset)+1))
if __name__ == '__main__':
# 读取数据
data = pd.read_csv(data_path)
# 构建分类器
classifier = SeqClassifier()
# 加载分类器
classifier.load(save_path)
# 构建数据集
seq_ds = SeqDataset(data, split_fn=split_fn, min_len=4)
# 预测模型
pred = classifier.predict(seq_ds)
print(pred)
# 获取各类别分类结果,并保存信息至文件中
classifier.report(seq_ds, 'seq_demo_report.csv')
# 对数据进行预测,并将数据和预测结果写入到新的文件中
classifier.summary(seq_ds, 'seq_demo_summary.csv')
|
[
"iamlightsmile@gmail.com"
] |
iamlightsmile@gmail.com
|
c271d3cf73452571a1c93a1185eb93f88ff3c1bf
|
694c187c8a00bee8c670c1690170099bad9b16b3
|
/palindrome.py
|
220c4d3ad21d12c2a38242879e2e212ed2181a00
|
[] |
no_license
|
ajayvenkat10/Competitive
|
301f220b6d296f7e34328f192c43c4d7ef208cb1
|
14f2ecebe10eb19f72cc412dd0c414b3b1de9b4d
|
refs/heads/master
| 2022-11-20T14:31:33.590099
| 2020-07-23T15:39:14
| 2020-07-23T15:39:14
| 281,599,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
n=int(raw_input(""))
count=0
A=[]
a=raw_input("")
a=a.split()
for i in range(n):
b=int(a[i])
A.append(b)
while(len(A)>0):
count = count + pali(A)
print count
|
[
"37923623+ajayvenkat10@users.noreply.github.com"
] |
37923623+ajayvenkat10@users.noreply.github.com
|
96faca6dc627aa0ad4cd2cb5e017015d35a80cb7
|
1feae7286c5d61981b40520e2c1e1028b86bb8cc
|
/blog_newsapi/asgi.py
|
05c058cfe761a8558228cfd03981b89811503c23
|
[] |
no_license
|
mukeshrakesh123/newsapp-blog
|
b97adb486f2d463e11fc054243833a2db6944865
|
596eac8981792fc368d2abfc4e19650332347f08
|
refs/heads/main
| 2023-06-16T11:44:10.665862
| 2021-07-10T16:34:50
| 2021-07-10T16:34:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for blog_newsapi project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_newsapi.settings')
application = get_asgi_application()
|
[
"jayagupta752@gmail.com"
] |
jayagupta752@gmail.com
|
909e293fb34797f5f75d7502c620042158498e08
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/sesv2_write_f/bulk-email_send.py
|
a349241910bf4f467effed81bbf205506059170e
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
"""
write_parameter("sesv2", "send-bulk-email")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
b8bc971de51c0b9c9209b3f1c24ac435f7161b4c
|
c1d9dc8351241a3bd519a6f8ebc4531bfed2de6f
|
/backup/Python/152.py
|
3c06c1daf0f1af89b7d26c587e3d1a7fae8027fa
|
[] |
no_license
|
yichenluan/LeetCodeSolution
|
3cf4b31e36f32c6b689b7b724c5cf57c3efb70bc
|
26af13bbac60d656415bbba0c3bc7acbaa5a7d63
|
refs/heads/master
| 2021-05-23T06:05:07.183561
| 2020-10-20T09:11:41
| 2020-10-20T09:11:41
| 52,205,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
maxSoFar = nums[0]
maxEndingHere = nums[0]
minEndingHere = nums[0]
for i in xrange(1, len(nums)):
maxTemp = maxEndingHere
maxEndingHere = max(maxEndingHere * nums[i], nums[i], minEndingHere * nums[i])
minEndingHere = min(minEndingHere * nums[i], nums[i], maxTemp * nums[i])
maxSoFar = max(maxSoFar, maxEndingHere)
return maxSoFar
|
[
"jinke@bytedance.com"
] |
jinke@bytedance.com
|
89d77e01921b73651991e522906f13a394d8776d
|
a81d84fdb57e1b90812fc5b5b523685ba5b663c0
|
/python/2021_08/Question1480.py
|
2e09e851c818b1ea3b6986e1b3573c1a418ae62b
|
[] |
no_license
|
KujouNozom/LeetCode
|
1919081001126924daa7549493a0823702631a37
|
4de1e601274de1336d669e41f732a8cb056880b9
|
refs/heads/master
| 2023-07-17T12:17:45.156451
| 2021-09-04T11:57:40
| 2021-09-04T11:57:40
| 268,075,373
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
# 1480. 一维数组的动态和 [前缀和]
from typing import List
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
ans = []
pre_sum = 0
for num in nums:
pre_sum += num
ans.append(pre_sum)
return ans
|
[
"438767738@qq.com"
] |
438767738@qq.com
|
baf5f510f103558e54208fd4851c7324a7084c61
|
2f219acf03442e2aa502cd8fffce02f3c1118298
|
/Py2D/Py2D_projectBuilder.py
|
cce1aec2b30db8faf14bf3a0ec396d22613990c0
|
[] |
no_license
|
AFlyingCar/Py2D
|
27a5ec03c015de16533978315d654c338d9fa4f7
|
c06d95ac0f716598a7be2af0d7cfaaeebd189bbc
|
refs/heads/master
| 2020-12-24T06:04:24.821244
| 2016-07-05T04:00:18
| 2016-07-05T04:00:18
| 30,786,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
#########################
# AFlyingCar #
# 2/19/15 #
# Py2D Project Builder #
#########################
import shutil,errno,os
PY2D_INSTALLATION_PATH = "C:\\Program Files\\Py2D"
# PY2D_INSTALLATION_PATH = ".\\Py2D" # Use to build with lastet build rather than the recommended one
def copyFiles(source,target):
try:
shutil.copytree(source,target)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(source,target)
else:
raise
def buildProject():
verifyPy2DInstallation()
path = raw_input("Full path to the project: ")
bin_path = os.path.join(path,"bin\\Py2D")
resource_path = os.path.join(path,"resources")
config_path = os.path.join(path,"Settings")
if not os.path.exists(resource_path):
os.makedirs(resource_path)
if not os.path.exists(config_path):
os.makedirs(config_path)
open(bin_path[:4] + "__init__.py",'w').write("")
try:
copyFiles(PY2D_INSTALLATION_PATH,bin_path)
except WindowsError as e:
print "Files already copied. Skipping."
def verifyPy2DInstallation():
if not os.path.exists(PY2D_INSTALLATION_PATH):
print "Unable to find valid copy of Py2D. Please check that it properly installed in %s." % PY2D_INSTALLATION_PATH
raise OSError("ERROR - Py2D not installed.")
if __name__ == '__main__':
buildProject()
|
[
"tyler@familyrobbins.com"
] |
tyler@familyrobbins.com
|
c042d866799b19a4c0a033cdd67920cd11b294f2
|
31698241ee3485d8b053da0d0da203c215a958f9
|
/test.py
|
b087fd7b04be98ca7891c6e9c241bc700c1fa8db
|
[] |
no_license
|
martin-martin/save-udacity-forum-posts
|
06f588d875477e7579c8fafe50b07e2fd16820a2
|
7abcede5c288cfe430b8b47fc1d7e1736bebe774
|
refs/heads/master
| 2021-06-10T06:39:10.868435
| 2020-03-03T09:00:38
| 2020-03-03T09:00:38
| 146,982,630
| 0
| 0
| null | 2021-06-01T22:35:47
| 2018-09-01T08:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
# getting out the thread name to be used as file name
# testing on how to deal with the fact that there are thread *replies*
# that have a different URL structure to *original posts*
li = [
"https://discussions.udacity.com/t/logic-improvisation-needed/191902/2",
"https://discussions.udacity.com/t/so-you-want-to-post-some-code/33561"
]
# with number at the end (=reply)
print(len(li[0].split('/'))) # len = 7
# replies need the 3rd-last item
print(li[0].split('/')[-3])
# without number at the end (=original post)
print(len(li[1].split('/'))) # len = 6
# original posts need the 2nd-last item
print(li[1].split('/')[-2])
|
[
"breuss.martin@gmail.com"
] |
breuss.martin@gmail.com
|
3c40e6e237efe724302daafbcb9ecb8d7168cf24
|
dc93174785fb64ca91264fa8dee50c0b0ce616c8
|
/DeepFried2/criteria/__init__.py
|
5316b9014cc0a8dfc8d9251a659b7883978485fe
|
[
"MIT"
] |
permissive
|
elPistolero/DeepFried2
|
789ee2362f47a65efe6d4f64cf81657777ee12b3
|
56efebede5469de706071a5ba645b2b74b3adf3e
|
refs/heads/master
| 2020-12-25T16:35:39.932731
| 2015-09-18T06:48:14
| 2015-09-18T06:48:14
| 40,975,386
| 0
| 0
| null | 2015-08-18T14:02:46
| 2015-08-18T14:02:45
| null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from .ClassNLLCriterion import ClassNLLCriterion
from .BCECriterion import BCECriterion
from .RMSECriterion import RMSECriterion, MSECriterion
|
[
"lucasb.eyer.be@gmail.com"
] |
lucasb.eyer.be@gmail.com
|
ce7496e15e265dd8728a7ca0b81b7d914ea7bd5f
|
e78f1c5347069cec56c42149a1d4de3103936ee7
|
/quantum_gates/find_resonant_interactions.py
|
3af82839e1fa720a4e958cb7590b7cd4a0762e8b
|
[] |
no_license
|
twobackfromtheend/CircularStates
|
8882627923bdfce42f72d0a9401206acd7043f47
|
ab0471362b444620d48a902ac237caead3b18a8f
|
refs/heads/master
| 2023-08-04T13:20:48.494564
| 2021-09-16T19:23:29
| 2021-09-16T19:23:29
| 323,437,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
import numpy as np
from arc import StarkMapResonances, Rubidium
n = 51
n = 70
n = 90
n = 30
s = 0.5
calculation = StarkMapResonances(
Rubidium(),
[n, n - 1, n - 1 + s, n - 1 + s],
Rubidium(),
[n, n - 1, n - 1 + s, n - 1 + s],
)
n_buffer = 10
calculation.findResonances(
nMin=n - n_buffer, nMax=n + n_buffer, maxL=5000,
eFieldList=np.linspace(0, 100, 200),
# energyRange=[-0.8e9, 4.e9],
energyRange=[-10e9, 10.e9],
progressOutput=True,
)
calculation.showPlot()
|
[
"harry1996@gmail.com"
] |
harry1996@gmail.com
|
2131a1ba50f8f1a9a88ef10152942581cf16b5fa
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/iothub/azure-iot-deviceprovisioning/azure/iot/deviceprovisioning/_configuration.py
|
92106ddf94ce39971ca1d644c17886ebe2376f08
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class DeviceProvisioningClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for DeviceProvisioningClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:keyword api_version: Api Version. Default value is "2021-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", **kwargs: Any) -> None:
super(DeviceProvisioningClientConfiguration, self).__init__(**kwargs)
api_version: str = kwargs.pop("api_version", "2021-10-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://azure-devices-provisioning.net/.default"])
kwargs.setdefault("sdk_moniker", "iot-deviceprovisioning/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
418464640708b824c506f3e90e4f9b8c4ff03368
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/qiskit/QC/startQiskit_QC67.py
|
a9d853e26832f658881463c5f7c58a05494423ab
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
# qubit number=2
# total number=7
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.y(input_qubit[0]) # number=5
prog.y(input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC67.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
09fc949aa59fee950e73b5334a4796b8c4013da1
|
23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9
|
/gtwisted/test/twisted_test.py
|
75f2164247362286e2d7ba3653753983433f88b7
|
[] |
no_license
|
Cuick/traversing
|
210fcfb1c780037de59343fffeb4fa4d3f2eae32
|
c78982580af7f63c8bff4dcb37005b7f7c682b5b
|
refs/heads/master
| 2021-01-10T17:38:37.899460
| 2016-11-18T06:06:55
| 2016-11-18T06:06:55
| 55,397,540
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
#coding:utf8
'''
Created on 2014年2月21日
@author: lan (www.9miao.com)
'''
# from twisted.core.greactor import GeventReactor
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory,Protocol
reactor = reactor
class MyProtocol(Protocol):
def connectionMade(self):
pass
# print "connectionMade:",self.transport.sessionno
def dataReceived(self, data):
# print "dataReceived:",data
self.transport.write('HTTP/1.1 200 OK\n\nHello World!!')
self.transport.loseConnection()
def connectionLost(self, reason):
# pass
print "connectionLost", reason
class MyServerFactory(ServerFactory):
def __init__(self):
self.protocol = MyProtocol
from gfirefly.server.logobj import logger
ss = MyServerFactory()
import sys
# log.startLogging(sys.stdout)
reactor.listenTCP(8080, ss)
reactor.callLater(5, logger.info, "asdfasdf")
reactor.run()
|
[
"zxzxck@163.com"
] |
zxzxck@163.com
|
2ae927007cd3599a2365302b8d151333af023a05
|
9c81c170f03ba925bf3d0682526245c202e384a7
|
/tests/unit_tests/databases/dao/dao_tests.py
|
b792a65336a4e7a5f0df807fae1292ac4bf9de25
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
zcong1993/incubator-superset
|
2a08177641eff178dee9db852887ad2d19d70d54
|
269c99293f42089958dc98b5d6e5899509fc3111
|
refs/heads/master
| 2023-08-17T12:24:59.438120
| 2023-08-17T10:50:24
| 2023-08-17T10:50:24
| 209,522,299
| 0
| 0
|
Apache-2.0
| 2023-03-06T08:10:31
| 2019-09-19T10:09:21
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections.abc import Iterator
import pytest
from sqlalchemy.orm.session import Session
@pytest.fixture
def session_with_data(session: Session) -> Iterator[Session]:
from superset.connectors.sqla.models import SqlaTable
from superset.databases.ssh_tunnel.models import SSHTunnel
from superset.models.core import Database
engine = session.get_bind()
SqlaTable.metadata.create_all(engine) # pylint: disable=no-member
db = Database(database_name="my_database", sqlalchemy_uri="sqlite://")
sqla_table = SqlaTable(
table_name="my_sqla_table",
columns=[],
metrics=[],
database=db,
)
ssh_tunnel = SSHTunnel(
database_id=db.id,
database=db,
)
session.add(db)
session.add(sqla_table)
session.add(ssh_tunnel)
session.flush()
yield session
session.rollback()
def test_database_get_ssh_tunnel(session_with_data: Session) -> None:
from superset.daos.database import DatabaseDAO
from superset.databases.ssh_tunnel.models import SSHTunnel
result = DatabaseDAO.get_ssh_tunnel(1)
assert result
assert isinstance(result, SSHTunnel)
assert 1 == result.database_id
def test_database_get_ssh_tunnel_not_found(session_with_data: Session) -> None:
from superset.daos.database import DatabaseDAO
result = DatabaseDAO.get_ssh_tunnel(2)
assert result is None
|
[
"noreply@github.com"
] |
zcong1993.noreply@github.com
|
80c36a7e86e77966f54c631cb257fd0ebc75cc31
|
8edd63a42469bf09fcad1c1070995ceda6e49646
|
/env/lib/python2.7/site-packages/observations/r/codling.py
|
86cc11b0e65757702630c0d2a4ac144e458b4164
|
[] |
no_license
|
silky/bell-ppls
|
fa0b5418f40dab59de48b7220ff30caba5945b56
|
369e7602c810b694a70ac1e875017480c8910ac8
|
refs/heads/master
| 2020-04-06T08:40:28.588492
| 2018-11-01T06:51:33
| 2018-11-01T06:51:33
| 157,312,221
| 1
| 0
| null | 2018-11-13T03:04:18
| 2018-11-13T03:04:18
| null |
UTF-8
|
Python
| false
| false
| 2,203
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def codling(path):
"""Dose-mortality data, for fumigation of codling moth with methyl bromide
Data are from trials that studied the mortality response of codling moth
to fumigation with methyl bromide.
A data frame with 99 observations on the following 10 variables.
dose
Injected dose of methyl bromide, in gm per cubic meter
tot
Number of insects in chamber
dead
Number of insects dying
pobs
Proportion dying
cm
Control mortality, i.e., at dose 0
ct
Concentration-time sum
Cultivar
a factor with levels `BRAEBURN` `FUJI` `GRANNY` `Gala`
`ROYAL` `Red Delicious` `Splendour`
gp
a factor which has a different level for each different combination
of `Cultivar`, `year` and `rep` (replicate).
year
a factor with levels `1988` `1989`
numcm
a numeric vector: total number of control insects
Maindonald, J.H.; Waddell, B.C.; Petry, R.J. 2001. Apple cultivar
effects on codling moth (Lepidoptera: Tortricidae) egg mortality
following fumigation with methyl bromide. Postharvest Biology and
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `codling.csv`.
Returns:
Tuple of np.ndarray `x_train` with 99 rows and 10 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'codling.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/codling.csv'
maybe_download_and_extract(path, url,
save_file_name='codling.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
[
"akobeid.1@gmail.com"
] |
akobeid.1@gmail.com
|
95b356426471b1afe72a949f674a499ccac4cfc9
|
849e95a72f4f380d6b31573a0a13e9eccd288838
|
/data-tool/flows/common/affiliation_queries.py
|
a8e6b91b8895cf4e643efd69cb521b2eb8a4ba11
|
[
"Apache-2.0"
] |
permissive
|
bcgov/lear
|
d9b27e2b44ba607ca13878357a62a0623d54ddee
|
d90f11a7b14411b02c07fe97d2c1fc31cd4a9b32
|
refs/heads/main
| 2023-09-01T11:26:11.058427
| 2023-08-31T20:25:24
| 2023-08-31T20:25:24
| 168,396,249
| 13
| 117
|
Apache-2.0
| 2023-09-14T20:52:02
| 2019-01-30T18:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
def get_unaffiliated_firms_query(data_load_env: str):
query = f"""
select ap.account_id, ap.corp_num, ap.contact_email, c.admin_email
from affiliation_processing ap
join corporation c on ap.corp_num = c.corp_num
where environment = '{data_load_env}'
-- and processed_status is null
--or processed_status <> 'COMPLETED'
and (processed_status is null or processed_status not in ('COMPLETED', 'FAILED'))
limit 5
;
"""
return query
|
[
"noreply@github.com"
] |
bcgov.noreply@github.com
|
37ba9ed4370eaf0e92f4bec9ffa8a8614598bd76
|
9247c6081930c215a1543e95a2567cfa60214c5a
|
/mlsegment/shape.py
|
fbec3ac1046ffc61ef8dbf4294a320694d26f67d
|
[] |
no_license
|
luispedro/mlsegment
|
5cec535dbb2476f860ae0ab629b62383e0850143
|
13bf1f2b19aa22f82c9bc07fdf7e44d0fefe79af
|
refs/heads/master
| 2020-04-01T21:12:48.459146
| 2010-12-01T19:46:57
| 2010-12-01T20:39:34
| 1,127,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
# Copyright (C) 2010, Luis Pedro Coelho <lpc@cmu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# License: MIT
from __future__ import division
import numpy as np
from pyslic.features.hullfeatures import hullfeatures
def shape_features(binimg):
size = binimg.sum()
return np.concatenate(([size], hullfeatures(binimg)))
def extract1(img, solution):
labeled, n_regions = solution
for i in xrange(n_regions):
shape = (labeled == (i+1))
yield shape_features(shape)
log_limit = -100
limit = np.exp(log_limit)
def apply(img, solution, shape_model):
values = [shape_model(feats) for feats in extract1(img, solution)]
values = np.array(values)
n = len(values)
return (np.sum(np.log(values[values > limit])) + log_limit * np.sum(values <= limit))/n
shapes = apply
|
[
"lpc@cmu.edu"
] |
lpc@cmu.edu
|
c4f319cc552d88af43f227b1f826513811c0f29f
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/virtual_usb_remote_client_backing_info.py
|
14e557038cd02b58c8e04e8e355118e8cb07b301
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualUSBRemoteClientBackingInfo(vim, *args, **kwargs):
'''The virtual remote client USB device backing class.'''
obj = vim.client.factory.create('{urn:vim25}VirtualUSBRemoteClientBackingInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'hostname', 'deviceName' ]
optional = [ 'useAutoDetect', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
f435aa2b121f93f5780e2858c25946185360188a
|
09bd584a3eb73ec77693343d135ed664d96b3258
|
/server/tests/test_vanilla.py
|
d874e5410f6400c9d80e978e3f1294ab2a2c35ae
|
[] |
no_license
|
cameronmaske/pytest-django
|
ba83c84e74312f47d98f7787f1fb01dda81af825
|
09942d46277bfa6e7a2c71bdafe100a455cf9f2f
|
refs/heads/master
| 2023-03-20T10:17:55.658522
| 2014-06-12T10:39:29
| 2014-06-12T10:39:29
| 20,763,872
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
from django.test import TestCase
from base.models import Person, Dog, Tag, SimpleDog
class ModelTestCase(TestCase):
def test_person(self):
person = Person()
print "Vanilla - test_person. Person Count", Person.objects.all()
def test_dog(self):
dog = Dog()
print "Vanilla - test_dog. Person Count", Person.objects.all()
def test_tag(self):
dog = SimpleDog(id=1)
dog.save()
tag = Tag(content_object=dog)
tag.save()
self.assertIsNotNone(tag.content_object)
print "Vanilla - test_tag. Dog Count", SimpleDog.objects.all()
print "Vanilla - test_tag. Person Count", Person.objects.all()
|
[
"cameronmaske@gmail.com"
] |
cameronmaske@gmail.com
|
9da35b41038706ad71614404f62e1afe9af8e375
|
c7a6f8ed434c86b4cdae9c6144b9dd557e594f78
|
/ECE364/.PyCharm40/system/python_stubs/348993582/PyQt4/QtGui/QHideEvent.py
|
cfd848238474f2b4a649fb575cb2b6a102c3dc84
|
[] |
no_license
|
ArbalestV/Purdue-Coursework
|
75d979bbe72106975812b1d46b7d854e16e8e15e
|
ee7f86145edb41c17aefcd442fa42353a9e1b5d1
|
refs/heads/master
| 2020-08-29T05:27:52.342264
| 2018-04-03T17:59:01
| 2018-04-03T17:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib64/python2.6/site-packages/PyQt4/QtGui.so
# by generator 1.136
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QHideEvent(__PyQt4_QtCore.QEvent):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
[
"pkalita@princeton.edu"
] |
pkalita@princeton.edu
|
ac64b34973362b7ba0bb20f19fa422398239fe6d
|
8613ec7f381a6683ae24b54fb2fb2ac24556ad0b
|
/boot/hard/divrem.py
|
c7180bf7bddacb57c97c3650dba1a60803a2326d
|
[] |
no_license
|
Forest-Y/AtCoder
|
787aa3c7dc4d999a71661465349428ba60eb2f16
|
f97209da3743026920fb4a89fc0e4d42b3d5e277
|
refs/heads/master
| 2023-08-25T13:31:46.062197
| 2021-10-29T12:54:24
| 2021-10-29T12:54:24
| 301,642,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
n = int(input())
ans = 0
def calc(x):
x -= 1
if x <= 0:
return 0
elif n // x == n % x:
return x
return 0
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0:
ans += calc(n // i) + calc(i)
print(ans)
|
[
"yuuya15009@gmail.com"
] |
yuuya15009@gmail.com
|
6f906b95dab3f59bdaf57c7c66e43031b13bd885
|
b909406a1f838b2bb9e8eca90fd1bdf412e67286
|
/13.微信投票机器人/VoteRobot-master/VoteRobot.py
|
6f6fee18970cba741315d9928fbbc95582572440
|
[] |
no_license
|
kaishuibaicai/mini-Python-Projects
|
2f12a5349e389c73080443443fcd293aae04a521
|
e18d8bbecb8baaa345720011e67789b123523457
|
refs/heads/master
| 2021-09-13T02:55:12.429216
| 2018-04-24T04:44:33
| 2018-04-24T04:44:33
| 104,319,805
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
#encoding=utf-8
import sys
import time
import random
import string
import httplib
import urllib
# 生成指定位数的随机字符串,字符为字母或数字
def getRandomString(id_length):
charSeq = string.ascii_letters + string.digits
randString = 'owzeBj'
for i in range(id_length):
randString += random.choice(charSeq)
return randString
# 对指定的作品(zpid)投一张票
def voteOnce(zpid):
conn = httplib.HTTPConnection("weixinmp.fjedu.gov.cn/31/408")
opid = getRandomString(22)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
postParams = urllib.urlencode({'zpid': zpid, 'opid': opid, 'md_id': 70, 'act': 'zuopin_toupiao'})
conn.request("POST", "/wtg1/mobile/user.php", postParams, headers)
conn.close()
# 投票控制器:指定作品(zpid)和投票张数(voteNum),并随机出投票间隔时间
def voteController(zpid, voteNum):
print '======== Start to vote zpid({0}), Total votes: {1}'.format(zpid, voteNum)
for i in range(voteNum):
voteOnce(zpid)
randomSleepTime = random.randint(1, 4)
print '{0} tickets has been voted, the next ticket will be voted after {1} seconds.'.format(i+1, randomSleepTime)
time.sleep(randomSleepTime)
print '======== Voting Ended!'
if __name__ == '__main__':
# voteOnce(38)
voteController(38, 3)
|
[
"272251416@qq.com"
] |
272251416@qq.com
|
bacb1da7545cd9eba476314710d4b271f50af7c7
|
600283415a6a403b0a12ee8b5b4a3ff5d6aa757a
|
/templates-demo/venv/bin/pip3
|
50b5431b3c65b767022c0bb2a67f39c43c6248ba
|
[] |
no_license
|
pixb/flask-demo
|
0f2ef9a8bc6315c92e0aec6cac0acfdcf7bc8f03
|
a40686eb25df0ca6379bc409ba34ef0425a863f8
|
refs/heads/master
| 2023-07-25T06:35:08.364038
| 2023-07-11T16:09:49
| 2023-07-11T16:09:49
| 336,691,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
#!/home/pix/dev/code/python/python-web-demo/templates-demo/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"tpxsky@163.com"
] |
tpxsky@163.com
|
|
4090b855d38ff1df15e8df2efe8df61b15a3b630
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/Mastering-Machine-Learning-scikit-learn/NumPy-Cookbook/NumPy Cookbook 2nd Edition_CodeBundle/Final Code/0945OS_05_Final Code/ch5code/memmap.py
|
cfb0133647aece53a7a0d204bfbeaf4a3dd9f4dd
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546
| 2021-07-16T01:38:55
| 2021-07-16T01:38:55
| 87,686,563
| 8
| 2
| null | 2023-07-11T22:49:03
| 2017-04-09T05:57:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
import numpy as np
import matplotlib.pyplot as plt
N = 512
NSQUARES = 30
# Initialize
img = np.zeros((N, N), np.uint8)
centers = np.random.random_integers(0, N, size=(NSQUARES, 2))
radii = np.random.randint(0, N/9, size=NSQUARES)
colors = np.random.randint(100, 255, size=NSQUARES)
# Generate squares
for i in xrange(NSQUARES):
xindices = range(centers[i][0] - radii[i], centers[i][0] + radii[i])
xindices = np.clip(xindices, 0, N - 1)
yindices = range(centers[i][1] - radii[i], centers[i][1] + radii[i])
yindices = np.clip(yindices, 0, N - 1)
if len(xindices) == 0 or len(yindices) == 0:
continue
coordinates = np.meshgrid(xindices, yindices)
img[coordinates] = colors[i]
# Load into memory map
img.tofile('random_squares.raw')
img_memmap = np.memmap('random_squares.raw', shape=img.shape)
# Display image
plt.imshow(img_memmap)
plt.axis('off')
plt.show()
|
[
"GreenJedi@protonmail.com"
] |
GreenJedi@protonmail.com
|
8eda126b9ddc871d2c35c095ecc5b5f9c3159fc9
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/show_instance_request.py
|
5da7cd5b7264aaf5c265c40ff95920dd44048ea5
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowInstanceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id'
}
def __init__(self, instance_id=None):
"""ShowInstanceRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self.discriminator = None
self.instance_id = instance_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowInstanceRequest.
实例id
:return: The instance_id of this ShowInstanceRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowInstanceRequest.
实例id
:param instance_id: The instance_id of this ShowInstanceRequest.
:type: str
"""
self._instance_id = instance_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowInstanceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
1f55ed528984aa255668b43fc89c149e90036a34
|
51a37b7108f2f69a1377d98f714711af3c32d0df
|
/src/leetcode/P657.py
|
f83e958ab590ac9a78e88d3a6b8e2222b20faa3e
|
[] |
no_license
|
stupidchen/leetcode
|
1dd2683ba4b1c0382e9263547d6c623e4979a806
|
72d172ea25777980a49439042dbc39448fcad73d
|
refs/heads/master
| 2022-03-14T21:15:47.263954
| 2022-02-27T15:33:15
| 2022-02-27T15:33:15
| 55,680,865
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
class Solution:
def judgeCircle(self, moves: str) -> bool:
x = y = 0
for move in moves:
if move == 'U':
x -= 1
if move == 'D':
x += 1
if move == 'L':
y -= 1
if move == 'R':
y += 1
if x == y == 0:
return True
return False
if __name__ == '__main__':
print(Solution().judgeCircle("DURDLDRRLL"))
|
[
"stupidchen@foxmail.com"
] |
stupidchen@foxmail.com
|
850b481185c8eb69502f7934a5f7e97ad8d38921
|
db03d88ddb75cc9b044a193f5d5f7ac438d64e59
|
/tests/test_checksum_generator.py
|
9f92f8d7e15fb47ad05e573674e171e01d127ac5
|
[
"MIT"
] |
permissive
|
edeposit/edeposit.amqp.ltp
|
e461d00bf74fd54c797d670abdaa3bdccc84e5ce
|
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
|
refs/heads/master
| 2021-01-17T11:30:24.223324
| 2016-03-03T11:10:38
| 2016-03-03T11:10:38
| 21,615,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,029
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import os
import shutil
import pytest
import tempfile
from ltp import checksum_generator as cg
# Variables ===================================================================
DIRNAME = ""
# Functions & objects =========================================================
def create_dir_structure():
dirname = tempfile.mkdtemp()
subdir = dirname + "/xex/"
os.mkdir(subdir)
with open(dirname + "/info.xml", "w") as f:
f.write("hello")
with open(subdir + "/xex.xx", "w") as f:
f.write("this is info file")
with open(dirname + "/somefile.txt", "w") as f:
f.write("somecontent")
with open(subdir + "/somefile.txt", "w") as f:
f.write("somecontent")
return dirname
# Tests =======================================================================
def setup_module(module):
global DIRNAME
DIRNAME = create_dir_structure()
def test_get_required_fn():
assert cg._get_required_fn("./hello", "./") == "/hello"
assert cg._get_required_fn("/home/xex/hello", "/home/xex/") == "/hello"
with pytest.raises(ValueError):
assert cg._get_required_fn("./hello", "/home") == "/hello"
assert cg._get_required_fn("/home/xex/hello", "./") == "/hello"
def test_generate_checksums():
checksums = cg.generate_checksums(DIRNAME)
assert checksums == {
'/somefile.txt': '18c0864b36d60f6036bf8eeab5c1fe7d',
'/xex/somefile.txt': '18c0864b36d60f6036bf8eeab5c1fe7d',
'/xex/xex.xx': 'e77b911e47bb73f6d69a70d246489fb0'
}
def test_generate_hashfile():
hashfile = cg.generate_hashfile(DIRNAME)
assert hashfile == """18c0864b36d60f6036bf8eeab5c1fe7d /somefile.txt
18c0864b36d60f6036bf8eeab5c1fe7d /xex/somefile.txt
e77b911e47bb73f6d69a70d246489fb0 /xex/xex.xx
"""
def teardown_module(module):
global DIRNAME
shutil.rmtree(DIRNAME)
|
[
"bystrousak@kitakitsune.org"
] |
bystrousak@kitakitsune.org
|
b2a188f7f610cf761151ec7b3382ab71ee475b77
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_5/byrtre001/question1.py
|
6de3673699da383ae564bde3010d5be128888984
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
"""program to simulate a simple BBS with one stored message and 2 fixed file
Trevor Byaruhanga
15 april 2014"""
# stored message.
a=('Welcome to UCT BBS'+'\n'+
'MENU'+'\n'+
'(E)nter a message'+'\n'+
'(V)iew message'+'\n'+
'(L)ist files'+'\n'+
'(D)isplay file'+'\n'+
'e(X)it')
print(a)
# input question prompting the user to chose one of the options
#in the menu
command=input('Enter your selection:'+'\n')
#function to work out which item was chosen and present output to the user.
def function(command):
command=command.upper()
if command == 'E':
message=input('Enter the message:'+'\n')
print(a)
command=input('Enter your selection:'+'\n')
command=command.upper()
if command == 'X':
print('Goodbye!')
if command == 'V':
if message:
print ('The message is:', message)
print(a)
command=input('Enter your selection:'+'\n')
if command == 'X':
print('Goodbye!')
elif command == 'V':
print ('The message is: no message yet')
print(a)
command=input('Enter your selection:'+'\n')
elif command == 'X':
print('Goodbye!')
if command == 'L':
print('List of files: 42.txt, 1015.txt')
print(a)
command=input('Enter your selection:'+'\n')
if command == 'D':
filename=input('Enter the filename:'+'\n')
if filename=='42.txt':
print('The meaning of life is blah blah blah ...')
print(a)
command=input('Enter your selection:'+'\n')
elif filename=='1015.txt':
print('Computer Science class notes ... simplified'+'\n'+
'Do all work'+'\n'+
'Pass course'+'\n'+
'Be happy')
print(a)
command=input('Enter your selection:'+'\n')
else:
print('File not found')
print(a)
command=input('Enter your selection:'+'\n')
function(command)
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
cbc1a1a06bf7ee83de0cbc07b4f9b4f47f119827
|
9f4b3edaf1095ed58f5ff2d38d79d27b7e230e92
|
/doc/source/python_as_glue.py
|
81aaf1b30b0f672c46f77cb95265a1e0cfc8bcee
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
zhishang72/TECA
|
dbd954ec48f5d9ad0643d26f5fbb6daf8dfd9842
|
b8bed845e868133e4fbe01f4da40edd4c34cd775
|
refs/heads/master
| 2020-04-21T04:54:35.853007
| 2019-01-26T18:13:18
| 2019-01-26T18:13:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
# initialize MPI
from mpi4py import MPI
# bring in TECA
from teca_py_io import *
from teca_py_alg import *
# start the pipeline with the NetCDF CF-2.0 reader
cfr = teca_cf_reader.New()
cfr.set_files_regex('cam5_1_amip_run2\.cam2\.h2\.*')
cfr.set_x_axis_variable('lon')
cfr.set_y_axis_variable('lat')
cfr.set_t_axis_variable('time')
# add L2 norm operator to compute wind speed
l2n = teca_l2_norm.New()
l2n.set_component_0_variable('U850')
l2n.set_component_1_variable('V850')
l2n.set_l2_norm_variable('wind_speed')
l2n.set_input_connection(cfr.get_output_port())
# and vorticity operator to compute wind vorticity
vor = teca_vorticity.New()
vor.set_component_0_variable('U850')
vor.set_component_1_variable('V850')
vor.set_vorticity_variable('wind_vorticity')
vor.set_input_connnection(l2n.get_output_port())
# and finally the tropical cyclone detector
tcd = teca_tc_detect.New()
tcd.set_pressure_variable('PSL')
tcd.set_temperature_variable('TMQ')
tcd.set_wind_speed_variable('wind_speed')
tcd.set_vorticity_variable('wind_vorticity')
tcd.set_input_connection(vor.get_output_port())
# now add the map-reduce, the pipeline above is run in
# parallel using MPI+threads. Each thread processes one time
# step. the pipeline below this algorithm runs in serial on
# rank 0, # with 1 thread
mapr = teca_table_reduce.New()
mapr.set_thread_pool_size(2)
mapr.set_first_step(0)
mapr.set_last_step(-1)
mapr.set_input_connection(tcd.get_output_port())
# save the detected stroms
twr = teca_table_writer.New()
twr.set_file_name('detections_%t%.csv')
twr.set_input_connection(mapr.get_output_port())
# the commands above connect and configure the pipeline
# this command actually runs it
twr.update()
|
[
"bloring@lbl.gov"
] |
bloring@lbl.gov
|
db1ad97f218bbfe8114e47d1210c1a9a1bfafd4d
|
c099611e42319053888a747ea78468224e45a725
|
/Polar-slepian/V_20/polarchannelsim_FERvsR_rateless_Det_Iter_delta_300_T8.py
|
646a54ffe87882062f4e7a8a464441bd074fcbe9
|
[] |
no_license
|
sbsoumya/PolarProject-Code_Res
|
118f54593716520c71cdc0e479236ffdc1a94f89
|
12a3b6fb24cf8160a519c74b064fd845066cbe0b
|
refs/heads/master
| 2021-06-27T21:04:41.057937
| 2019-03-22T20:56:44
| 2019-03-22T20:56:44
| 129,615,052
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,799
|
py
|
#-------------------------------------------------------------------------------
# Name: polarchannelsim_FERvsR_rateless_det_Iterretro.py
# Purpose: FER VS R simulation for given msg_length and varying channel
#
# Author: soumya
#
# Created: 19/08/2017
#----------------------------------------
import numpy as np
import math as ma
import problib as pl
import polarencdec as ec
import polarconstruct as pcon
from datetime import datetime
import json
import polarchannel as pch
from pprint import pprint
import rateless_channel_det as rlc
from timeit import default_timer as timer
#=================================================================simulation
#------------Number of good channels = capacity
start = timer()
Nlist=[1024]
channel_plist=list(np.linspace(0.05,0.45,10))
compound_plist=[0.08349999999999963, 0.10599999999999965, 0.13099999999999967, 0.1594999999999997, 0.19249999999999973]
#[600, 525, 450, 375, 300]
T=8
msg_length=296
deltaG=38
runsim=1
start=timer()
for N in Nlist:
stamp=datetime.now().strftime("%y-%m-%d_%H-%M-%S")
filename="./simresults/polarchannel_FERvsR_rateless_Det_Iter_delta_"+str(msg_length)+"in"+str(N)+"_T"+str(T)+"_"+stamp+".txt"
f1=open(filename,'w')
print filename
print "RATE Vs FER REPORT Rateless Det Iter delta"
print "------------------------------------------"
print "Compound_plist:"
print compound_plist
print "sim ran :"+str(runsim)
print "T:"+str(T)
json.dump( "RATE Vs FER REPORT Rateless Det Iter delta",f1) ;f1.write("\n")
json.dump( "------------------------------------------",f1) ;f1.write("\n")
json.dump( "Compound_plist:",f1) ;f1.write("\n")
json.dump(compound_plist,f1) ;f1.write("\n")
json.dump("sim ran :"+str(runsim),f1) ;f1.write("\n")
json.dump("T:"+str(T),f1);f1.write("\n")
print "N="+str(N)
json.dump( "N="+str(N),f1) ;f1.write("\n")
used_rate=[];
achieved_rate=[]
FER=[];
Iter_problist=[]
for channel_p in channel_plist:
#print "channel_p:"+str(channel_p)
(u_rate,ach_rate,block_error,Iter_probdict)=rlc.send_rateless_det_Iter_retro_delta_sim(N,T,compound_plist,channel_p,msg_length,deltaG,runsim)
used_rate.append(u_rate)
achieved_rate.append(ach_rate)
FER.append(block_error)
Iter_problist.append(Iter_probdict)
block_error_exp=np.log10(FER).tolist()
print channel_plist
print achieved_rate
print block_error_exp
print Iter_problist
json.dump( "Rate vs Block_error=",f1) ;f1.write("\n")
json.dump(channel_plist,f1) ;f1.write("\n")
json.dump(achieved_rate,f1) ;f1.write("\n")
json.dump(block_error_exp,f1) ;f1.write("\n")
json.dump( "Iter Probabilities=",f1) ;f1.write("\n")
json.dump(Iter_problist,f1) ;f1.write("\n")
end = timer()
TC=(end-start)
print "Time taken:"+str(TC)
json.dump("Time taken:"+str(TC) ,f1) ;f1.write("\n")
|
[
"soumya.s.banerjee17@gmail.com"
] |
soumya.s.banerjee17@gmail.com
|
40c74d123eff6051b5952100a45abef935eac8db
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4030/894004030.py
|
124523df12c7828177f53169e08f00b209133396
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
from bots.botsconfig import *
from records004030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'DX',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'G82', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'G83', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'G22', MIN: 0, MAX: 1},
{ID: 'G72', MIN: 0, MAX: 10},
{ID: 'G23', MIN: 0, MAX: 20},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'G72', MIN: 0, MAX: 20},
{ID: 'G23', MIN: 0, MAX: 20},
{ID: 'G84', MIN: 1, MAX: 1},
{ID: 'G86', MIN: 1, MAX: 1},
{ID: 'G85', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
5779bcdba227fa41b598888395ca6c6cb372d7fd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02614/s337550371.py
|
e941783ee59fd288f4e96377d81f254481c236d0
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
h, w, k = map(int, input().split())
c = []
for _ in range(h):
c.append([c for c in input()])
ans = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for n in range(h):
for m in range(w):
if i >> n & 1:
continue
if j >> m & 1:
continue
if c[n][m] == '#':
cnt += 1
if cnt == k:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8513012fbdbf8d8b052ea5fdb992e028c5dec60c
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_03_01_preview/_azure_digital_twins_management_client.py
|
7be0cc594bb1102a3a5a1b3abda1105fdbb3df76
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,798
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import AzureDigitalTwinsManagementClientConfiguration
from .operations import DigitalTwinsEndpointOperations, DigitalTwinsOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AzureDigitalTwinsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Azure Digital Twins Client for managing DigitalTwinsInstance.
:ivar digital_twins: DigitalTwinsOperations operations
:vartype digital_twins:
azure.mgmt.digitaltwins.v2020_03_01_preview.operations.DigitalTwinsOperations
:ivar digital_twins_endpoint: DigitalTwinsEndpointOperations operations
:vartype digital_twins_endpoint:
azure.mgmt.digitaltwins.v2020_03_01_preview.operations.DigitalTwinsEndpointOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.digitaltwins.v2020_03_01_preview.operations.Operations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription identifier. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-03-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AzureDigitalTwinsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.digital_twins = DigitalTwinsOperations(self._client, self._config, self._serialize, self._deserialize)
self.digital_twins_endpoint = DigitalTwinsEndpointOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "AzureDigitalTwinsManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
2136f5f5f2747cea092dd86d56384971f885788c
|
69c33fcad69a2e61cc60209401215530d033e712
|
/Python/Python Basics/80.exercise.py
|
edec7ae40f434231bf7bad65c48644f653a77183
|
[] |
no_license
|
KULDEEPMALIKM41/Practices
|
7659b895ea959c7df2cdbc79c0b982b36f2bde63
|
193abe262ff281a384aac7895bb66dc39ee6e88d
|
refs/heads/master
| 2023-08-17T11:01:11.694282
| 2021-09-30T08:12:41
| 2021-09-30T08:12:41
| 289,527,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
#addition list element
l=[5,8,9,10,12,44,12]
print('list is : ',l)
d=0
for element in l:
d+=element
print('list addtion is : ',d)
print()
#print list element
l=[5,8,9,10,12,44,12]
print('list is : ',l)
e=1
for element in l:
print('element no',e, 'is : ',element)
e+=1
print()
#count number of elements in list
l=[1,2,3,4,5,6,7,8,9,10]
print('list is : ',l)
c=0
for element in l:
c+=1
print('number of element in list : ',c)
print()
|
[
"Kuldeepmalikm41@gmail.com"
] |
Kuldeepmalikm41@gmail.com
|
fc67686c7d358fe86c73044273f69669abee17fa
|
b22b0760b29d24cff24eda9d1c114094fd1a588f
|
/Python/Easy/1. Two Sum.py
|
04d0f39a36f83c43424be8c7e9ed2cf68bb3927e
|
[] |
no_license
|
MridulGangwar/Leetcode-Solutions
|
bbbaa06058a7b3e7621fc54050e344c06a256080
|
d41b1bbd762030733fa271316f19724d43072cd7
|
refs/heads/master
| 2022-03-07T12:20:33.485573
| 2022-02-21T07:22:38
| 2022-02-21T07:22:38
| 231,700,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dic ={}
for i,num in enumerate(nums):
if target-num in dic:
return[dic[target-num],i]
else:
dic[num]=i
|
[
"singhmridul1@gmail.com"
] |
singhmridul1@gmail.com
|
926a132b2cac32d3d310cfd5c8940261302a4f1b
|
602a4e86499841fbae43d84fc92908c533106aea
|
/core/actions/photoline.py
|
c9b5f8cb9eb9948ac8e096ec9914ec904d5370bc
|
[] |
no_license
|
vden/TsoguNG
|
b187ccf1bef387417ec73467c51458d6f1443239
|
f8d5e7ab9d85559aa163c232c9f28a24a2b7c2a4
|
refs/heads/master
| 2021-01-02T08:52:03.914218
| 2011-04-26T07:01:57
| 2011-04-26T07:01:57
| 1,663,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
# -*- coding: utf-8 -*-
from core.portal.register import portalaction
from core.portal.render import render_to_portal
from core.views import get_object_by_url
from core.models import BaseObject
import random
@portalaction(verbose_name=u'Фотокалейдоскоп')
@render_to_portal(template='actions/photogallery.html')
def photoline(request):
result = BaseObject.nodes()(types=['News'], sort_fields=['-date_published'], states=[u'опубликовынный',u'на главной']).all()[:100]
all_imgs = []
for x in result:
all_imgs.extend( x.get_images() )
format = request.GET.get("format", "medium")
if format != "small":
format = "medium"
x = y = 160
count = 25
else:
x = y = 80
count = 70
print "EEE", len(all_imgs), count
imgs = random.sample(all_imgs, count)
return { 'imgs': imgs, 'format': format, 'x': x, 'y': y }
|
[
"denis.voskvitsov@gmail.com"
] |
denis.voskvitsov@gmail.com
|
9edf40ecb62eac81cd943cb16e8c1f28793c615e
|
f95db72e9a6f0c89f77582eb589e6de625de76c7
|
/tools/perf/core/results_processor/processor_unittest.py
|
2659fce99be675fd600510596c5e1886e0fa2649
|
[
"BSD-3-Clause",
"Zlib",
"LGPL-2.0-or-later",
"MIT",
"LGPL-2.1-only",
"APSL-2.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Seshpenguin/chromium
|
ca814257d998676850dd69c86c2dc2892b06aa87
|
c2e0382538708b7801254f7a06c6bbe61b9aa65c
|
refs/heads/master
| 2023-03-11T10:50:20.627435
| 2019-11-07T21:06:41
| 2019-11-07T21:06:41
| 220,328,767
| 0
| 0
|
BSD-3-Clause
| 2019-11-07T21:05:20
| 2019-11-07T21:05:19
| null |
UTF-8
|
Python
| false
| false
| 5,495
|
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for results_processor methods."""
import datetime
import os
import unittest
import mock
from core.results_processor import processor
from core.results_processor import testing
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import date_range
from tracing.value import histogram_set
class ResultsProcessorUnitTests(unittest.TestCase):
def testAddDiagnosticsToHistograms(self):
start_ts = 1500000000
start_iso = datetime.datetime.utcfromtimestamp(start_ts).isoformat() + 'Z'
test_result = testing.TestResult(
'benchmark/story',
output_artifacts={
'trace.html': testing.Artifact('/trace.html', 'gs://trace.html'),
},
start_time=start_iso,
tags=['story_tag:test'],
result_id='3',
)
test_result['_histograms'] = histogram_set.HistogramSet()
test_result['_histograms'].CreateHistogram('a', 'unitless', [0])
processor.AddDiagnosticsToHistograms(
test_result, test_suite_start=start_iso, results_label='label',
test_path_format='telemetry')
hist = test_result['_histograms'].GetFirstHistogram()
self.assertEqual(hist.diagnostics['labels'],
generic_set.GenericSet(['label']))
self.assertEqual(hist.diagnostics['benchmarks'],
generic_set.GenericSet(['benchmark']))
self.assertEqual(hist.diagnostics['benchmarkStart'],
date_range.DateRange(start_ts * 1e3))
self.assertEqual(hist.diagnostics['traceStart'],
date_range.DateRange(start_ts * 1e3))
self.assertEqual(hist.diagnostics['stories'],
generic_set.GenericSet(['story']))
self.assertEqual(hist.diagnostics['storyTags'],
generic_set.GenericSet(['test']))
self.assertEqual(hist.diagnostics['storysetRepeats'],
generic_set.GenericSet([3]))
self.assertEqual(hist.diagnostics['traceUrls'],
generic_set.GenericSet(['gs://trace.html']))
def testUploadArtifacts(self):
test_result = testing.TestResult(
'benchmark/story',
output_artifacts={
'logs': testing.Artifact('/log.log'),
'trace.html': testing.Artifact('/trace.html'),
'screenshot': testing.Artifact('/screenshot.png'),
},
)
with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch:
cloud_patch.return_value = 'gs://url'
processor.UploadArtifacts(test_result, 'bucket', 'run1')
cloud_patch.assert_has_calls([
mock.call('bucket', 'run1/benchmark/story/logs', '/log.log'),
mock.call('bucket', 'run1/benchmark/story/trace.html', '/trace.html'),
mock.call('bucket', 'run1/benchmark/story/screenshot',
'/screenshot.png'),
],
any_order=True,
)
for artifact in test_result['outputArtifacts'].itervalues():
self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testRunIdentifier(self):
with mock.patch('random.randint') as randint_patch:
randint_patch.return_value = 54321
run_identifier = processor.RunIdentifier(
results_label='src@abc + 123',
test_suite_start='2019-10-01T12:00:00.123456Z')
self.assertEqual(run_identifier, 'src_abc_123_20191001T120000_54321')
def testAggregateTraces(self):
test_result = testing.TestResult(
'benchmark/story2',
output_artifacts={
'trace/1.json': testing.Artifact(
os.path.join('test_run', 'story2', 'trace', '1.json')),
'trace/2.json': testing.Artifact(
os.path.join('test_run', 'story2', 'trace', '2.json')),
},
)
serialize_method = 'tracing.trace_data.trace_data.SerializeAsHtml'
with mock.patch(serialize_method) as mock_serialize:
processor.AggregateTraces(test_result)
self.assertEqual(mock_serialize.call_count, 1)
trace_files, file_path = mock_serialize.call_args[0][:2]
self.assertEqual(
set(trace_files),
set([
os.path.join('test_run', 'story2', 'trace', '1.json'),
os.path.join('test_run', 'story2', 'trace', '2.json'),
]),
)
self.assertEqual(
file_path,
os.path.join('test_run', 'story2', 'trace', 'trace.html'),
)
artifacts = test_result['outputArtifacts']
self.assertEqual(len(artifacts), 1)
self.assertEqual(artifacts.keys()[0], 'trace.html')
def testMeasurementToHistogram(self):
hist = processor.MeasurementToHistogram('a', {
'unit': 'sizeInBytes',
'samples': [1, 2, 3],
'description': 'desc',
})
self.assertEqual(hist.name, 'a')
self.assertEqual(hist.unit, 'sizeInBytes')
self.assertEqual(hist.sample_values, [1, 2, 3])
self.assertEqual(hist.description, 'desc')
def testMeasurementToHistogramLegacyUnits(self):
hist = processor.MeasurementToHistogram('a', {
'unit': 'seconds',
'samples': [1, 2, 3],
})
self.assertEqual(hist.name, 'a')
self.assertEqual(hist.unit, 'ms_smallerIsBetter')
self.assertEqual(hist.sample_values, [1000, 2000, 3000])
def testMeasurementToHistogramUnknownUnits(self):
with self.assertRaises(ValueError):
processor.MeasurementToHistogram('a', {'unit': 'yards', 'samples': [9]})
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
624ed4f23273f22a3d0728f4428603fd1d9342bb
|
12a72da6848ae461b995ec2fc6c4e1827be82803
|
/coin_db/okex_kline_history.py
|
97744aeb24e43f8578bebdea12ec2dde40a5950b
|
[] |
no_license
|
lim1942/coin_helper
|
f3ed40c07a049a00f052dfa3e59cee7eefe969cf
|
d34ce363371fd964d8c46d5dd04ca7c5eb7d35b4
|
refs/heads/main
| 2023-04-30T10:46:03.231440
| 2021-05-25T12:15:49
| 2021-05-25T12:15:49
| 366,247,314
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
from coin_db.base import InstrumentIdMysql
from coin_helper.settings import TZ_HOUR
class OkexKlineHistoryMysql(InstrumentIdMysql):
# 插入
insert_fields = ('open','high','low','close','volume','time')
insert_sql = f"""INSERT IGNORE INTO table_name({','.join(insert_fields)}) VALUES({','.join(('%s' for _ in insert_fields))});"""
insert_fields_date = [('time','%Y-%m-%dT%H:%M:%S.%fZ',TZ_HOUR)]
insert_fields_multiple = ('open','high','low','close','volume')
# 查询
query_order_field = 'time'
# 建表
table_sql = f"""CREATE TABLE If Not Exists `table_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`open` bigint(20) NOT NULL,
`high` bigint(20) NOT NULL,
`low` bigint(20) NOT NULL,
`close` bigint(20) NOT NULL,
`volume` bigint(20) NOT NULL,
`time` datetime(6) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `time` (`time`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
"""
@classmethod
def get_value_by_item(cls,item):
item = {'open':item[1],'high':item[2],'low':item[3],'close':item[4],'volume':item[5],'time':item[0]}
return super().get_value_by_item(item)
if __name__ == "__main__":
OkexKlineHistoryMysql(instrument_id='BTC-USDT').create_tables()
|
[
"lim1942@163.com"
] |
lim1942@163.com
|
676fc4fbab4931a3870271f4f287804749359ed0
|
fe33bdb20436a379a17d56b83816d7064cb75d90
|
/src/rocon_multimaster/rocon_hub/src/rocon_hub/main.py
|
f1ce15a41be27d10d3bb1d6af1477ff7a91e47bd
|
[] |
no_license
|
uml-robotics/catkin_tester
|
764744614782acaff46f66f25dbd1650d0fcd5e8
|
dfc8bb2026c06d0f97696a726a6773ff8b99496e
|
refs/heads/master
| 2022-10-31T11:48:27.207535
| 2017-11-27T18:09:38
| 2017-11-27T18:09:38
| 111,495,779
| 0
| 1
| null | 2022-10-19T14:49:44
| 2017-11-21T03:45:59
|
C
|
UTF-8
|
Python
| false
| false
| 3,280
|
py
|
#!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_multimaster/license/LICENSE
#
##############################################################################
# Imports
##############################################################################
import sys
# Ros imports
import rospy
import std_srvs.srv as std_srvs
# Local imports
from . import utils
from . import redis_server
from . import ros_parameters
from . import watcher
from . import zeroconf
##############################################################################
# Variables
##############################################################################
redi = None
timeout = 15
##############################################################################
# Shutdown Handlers
##############################################################################
#
# This lets the hub have a controlled shutdown from an external party
# (in our special case of interest, from the conductor).
def ros_service_shutdown(unused_request):
shutdown()
return std_srvs.EmptyResponse()
def shutdown():
global redi
if redi is not None:
rospy.loginfo("Hub : shutting down.")
redi.shutdown()
redi = None
def wait_for_shutdown():
'''
Shutdown hook - we wait here for an external shutdown via ros service
(at which point redi is None)
timing out after a reasonable time if we need to.
'''
global redi
global timeout
count = 0.0
while count < timeout:
if redi is None:
return
else:
count += 0.5
rospy.rostime.wallsleep(0.5) # human time
rospy.logwarn("Hub : timed out waiting for external shutdown by ros service, forcing shutdown now.")
shutdown()
##############################################################################
# Main
##############################################################################
def main():
global redi
global timeout
while not utils.check_master():
rospy.logerr("Unable to communicate with master!")
rospy.rostime.wallsleep(1.0)
if rospy.is_shutdown():
sys.exit(utils.red_string("Unable to communicate with master!"))
rospy.init_node('hub')
param = ros_parameters.load()
# Installation checks - sys exits if the process if not installed.
utils.check_if_executable_available('redis-server')
if param['zeroconf']:
utils.check_if_executable_available('avahi-daemon')
if param['external_shutdown']:
timeout = param['external_shutdown_timeout']
rospy.on_shutdown(wait_for_shutdown)
unused_shutdown_service = rospy.Service('~shutdown', std_srvs.Empty, ros_service_shutdown)
redi = redis_server.RedisServer(param)
redi.start() # sys exits if server connection is unavailable or incorrect version
if param['zeroconf']:
zeroconf.advertise_port_to_avahi(param['port'], param['name']) # sys exits if running avahi-daemon not found
watcher_thread = watcher.WatcherThread('localhost', param['port'])
watcher_thread.start()
rospy.spin()
if not param['external_shutdown']:
# do it here, don't wait for the ros service to get triggered
shutdown()
|
[
"james.perl12@gmail.com"
] |
james.perl12@gmail.com
|
3328d3db51d6fb8039493a5e75180371fc20c187
|
b0e93e504bc1e45a4e08b0cbc30a9ada3601b9d2
|
/service/routes.py
|
94b4842fb7126c21b04b680857e789cd44f302a2
|
[
"Apache-2.0"
] |
permissive
|
jarty13/lab-flask-tdd
|
e54f3212d3804ecb60f5f0227f6fc9c6cb70e2a8
|
d943e84a6c2dab6660d55428be56e767fd90bc0f
|
refs/heads/master
| 2023-03-19T07:17:29.934253
| 2021-03-16T01:53:22
| 2021-03-16T01:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,059
|
py
|
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pet Store Service
Paths:
------
GET /pets - Returns a list all of the Pets
GET /pets/{id} - Returns the Pet with a given id number
POST /pets - creates a new Pet record in the database
PUT /pets/{id} - updates a Pet record in the database
DELETE /pets/{id} - deletes a Pet record in the database
"""
import os
import sys
import logging
from flask import Flask, jsonify, request, url_for, make_response, abort
from flask_api import status # HTTP Status Codes
from werkzeug.exceptions import NotFound
# For this example we'll use SQLAlchemy, a popular ORM that supports a
# variety of backends including SQLite, MySQL, and PostgreSQL
from flask_sqlalchemy import SQLAlchemy
from .models import Pet, DataValidationError
# Import Flask application
from . import app
######################################################################
# Error Handlers
######################################################################
@app.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
return bad_request(error)
@app.errorhandler(status.HTTP_400_BAD_REQUEST)
def bad_request(error):
""" Handles bad reuests with 400_BAD_REQUEST """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_400_BAD_REQUEST, error="Bad Request", message=str(error)
),
status.HTTP_400_BAD_REQUEST,
)
@app.errorhandler(status.HTTP_404_NOT_FOUND)
def not_found(error):
""" Handles resources not found with 404_NOT_FOUND """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_404_NOT_FOUND, error="Not Found", message=str(error)
),
status.HTTP_404_NOT_FOUND,
)
@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED)
def method_not_supported(error):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_405_METHOD_NOT_ALLOWED,
error="Method not Allowed",
message=str(error),
),
status.HTTP_405_METHOD_NOT_ALLOWED,
)
@app.errorhandler(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def mediatype_not_supported(error):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
error="Unsupported media type",
message=str(error),
),
status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR)
def internal_server_error(error):
""" Handles unexpected server error with 500_SERVER_ERROR """
app.logger.error(str(error))
return (
jsonify(
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
error="Internal Server Error",
message=str(error),
),
status.HTTP_500_INTERNAL_SERVER_ERROR,
)
######################################################################
# GET INDEX
######################################################################
@app.route("/")
def index():
""" Root URL response """
app.logger.info("Request for Root URL")
return (
jsonify(
name="Pet Demo REST API Service",
version="1.0",
paths=url_for("list_pets", _external=True),
),
status.HTTP_200_OK,
)
######################################################################
# LIST ALL PETS
######################################################################
@app.route("/pets", methods=["GET"])
def list_pets():
""" Returns all of the Pets """
app.logger.info("Request for pet list")
pets = []
category = request.args.get("category")
name = request.args.get("name")
if category:
pets = Pet.find_by_category(category)
elif name:
pets = Pet.find_by_name(name)
else:
pets = Pet.all()
results = [pet.serialize() for pet in pets]
app.logger.info("Returning %d pets", len(results))
return make_response(jsonify(results), status.HTTP_200_OK)
######################################################################
# RETRIEVE A PET
######################################################################
@app.route("/pets/<int:pet_id>", methods=["GET"])
def get_pets(pet_id):
"""
Retrieve a single Pet
This endpoint will return a Pet based on it's id
"""
app.logger.info("Request for pet with id: %s", pet_id)
pet = Pet.find(pet_id)
if not pet:
raise NotFound("Pet with id '{}' was not found.".format(pet_id))
app.logger.info("Returning pet: %s", pet.name)
return make_response(jsonify(pet.serialize()), status.HTTP_200_OK)
######################################################################
# ADD A NEW PET
######################################################################
@app.route("/pets", methods=["POST"])
def create_pets():
"""
Creates a Pet
This endpoint will create a Pet based the data in the body that is posted
"""
app.logger.info("Request to create a pet")
check_content_type("application/json")
pet = Pet()
pet.deserialize(request.get_json())
pet.create()
message = pet.serialize()
location_url = url_for("get_pets", pet_id=pet.id, _external=True)
app.logger.info("Pet with ID [%s] created.", pet.id)
return make_response(
jsonify(message), status.HTTP_201_CREATED, {"Location": location_url}
)
######################################################################
# UPDATE AN EXISTING PET
######################################################################
@app.route("/pets/<int:pet_id>", methods=["PUT"])
def update_pets(pet_id):
"""
Update a Pet
This endpoint will update a Pet based the body that is posted
"""
app.logger.info("Request to update pet with id: %s", pet_id)
check_content_type("application/json")
pet = Pet.find(pet_id)
if not pet:
raise NotFound("Pet with id '{}' was not found.".format(pet_id))
pet.deserialize(request.get_json())
pet.id = pet_id
pet.update()
app.logger.info("Pet with ID [%s] updated.", pet.id)
return make_response(jsonify(pet.serialize()), status.HTTP_200_OK)
######################################################################
# DELETE A PET
######################################################################
@app.route("/pets/<int:pet_id>", methods=["DELETE"])
def delete_pets(pet_id):
"""
Delete a Pet
This endpoint will delete a Pet based the id specified in the path
"""
app.logger.info("Request to delete pet with id: %s", pet_id)
pet = Pet.find(pet_id)
if pet:
pet.delete()
app.logger.info("Pet with ID [%s] delete complete.", pet_id)
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def check_content_type(media_type):
""" Checks that the media type is correct """
content_type = request.headers.get("Content-Type")
if content_type and content_type == media_type:
return
app.logger.error("Invalid Content-Type: %s", content_type)
abort(415, "Content-Type must be {}".format(media_type))
|
[
"johnnyroy@johnrofrano.com"
] |
johnnyroy@johnrofrano.com
|
be0001f3b09da183ee40c5855d5794198269d7db
|
7a59728868fc8bc81396f192a78e4dd184fb0201
|
/simulation/simulate_ring.py
|
65e3f6327a12593e872c4374b92ee1f61030cc9b
|
[] |
no_license
|
chuan137/upiv-analysis
|
1272d3515759bc6792c73fca9a2a13f074177381
|
ed1a022a9e5069108fba49bcfd2ec736356a64ee
|
refs/heads/master
| 2021-01-10T13:08:41.838545
| 2016-02-25T11:17:04
| 2016-02-25T11:17:04
| 52,277,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
#!/usr/bin/env python
import sys, os
sys.path.insert(0, os.path.realpath('../python'))
import cv
import getopt
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rand
import tifffile.tifffile as tif
from helper import stats
# {{{ Helper Functions
def snr_ind(snr):
return 20 * np.log(snr) / np.log(10)
def draw_circle(imgsize, rad, thickness=2):
w, h = imgsize
xx, yy = np.mgrid[:w, :h]
circle = (xx - w/2)**2 + (yy - h/2)**2
rmin, rmax = rad - thickness/2, rad + thickness/2
return np.logical_and(circle < rmax**2, circle > rmin**2)
def draw_fuzzy_circle(imgsize, rad, thickness):
width = 0.5 * thickness
idx = np.arange(min(imgsize))
kernel = np.exp(-(idx - rad)**2/(2*width**2))
w, h = imgsize
xx, yy = np.mgrid[:w, :h]
circle = np.floor(np.sqrt((xx - w/2)**2 + (yy - h/2)**2))
f = np.vectorize(lambda x: kernel[x])
return f(circle)
def gen_pattern(alpha, beta=0.01):
'''
generate pattern
:param beta: signal threashold, ratio of the peak value
'''
fuzzy_circle = draw_fuzzy_circle(samplesize, rad, thickness)
background = rand.normal(0.0, noise, samplesize)
image = alpha * fuzzy_circle + background
# signal noise ratio
thred = beta * image.max()
circle = draw_circle(samplesize, rad, 0.5*thickness)
signal = image[np.logical_and(image>thred, circle)]
snr = signal.mean() / noise
return image, snr
# }}}
# seperation between noise and circle
alpha = 1.0
# variance of gaussian noise
noise = 1.0
# ring radius
rad = 25
# ring thickness
thickness = 6
# sample size
samplesize = (128, 128)
# full image size
fullsize = (1024, 1024)
# output format
fmt = 'tif'
"""
options: [-a alpha] [-r radius] [-s fullsize] [-t fmt]
"""
optlist, args = getopt.getopt(sys.argv[1:], 'a:r:')
for o, a in optlist:
if o == '-a':
alpha = float(a)
elif o == '-r':
rad = float(a)
elif o == '-f':
fullsize = (float(a), float(a))
elif o == '-t':
fmt = a
else:
assert False, "unknown option"
image = np.zeros(fullsize)
xs, ys = samplesize
snr_ = []
for i in range(fullsize[0]/samplesize[0]):
for j in range(fullsize[1]/samplesize[1]):
sample, snr = gen_pattern(alpha)
snr_.append(snr)
print 'SNR : %.5f,' % snr,
print 'Industrial SNR: %.3f db' % snr_ind(snr)
image[i*xs:(i+1)*xs, j*xs:(j+1)*xs] = sample
snr_ = np.array(snr_)
print "Average SNR %.3f +/- %.3f (%.2f db)" % \
(snr_.mean(), snr_.std(), snr_ind(snr_.mean()))
# plt.imshow(image, cmap='gray')
if fmt == 'tif':
tif.imsave('figs/ring.tif', image.astype(np.float32))
elif fmt == 'png':
plt.savefig('figs/ring.png')
else:
print 'Output format %s not supported' % fmt
|
[
"chuan137@gmail.com"
] |
chuan137@gmail.com
|
8073c023a7861491a0cbcee033584d6d5578e89b
|
5618d8b7d6fc2190f070e6fa5e3b423c0480a70d
|
/03-数组问题/0026-删除排序数组中的重复项.py
|
3b2d07ba4456367779ffc8f7b64484a1b1780b7a
|
[] |
no_license
|
ybm1/LeetCode-Solution-Python
|
01d455e8d9305b789646b395e57e1ec50ea7b1b7
|
9b76e7d3d0cebbaa977769f5c41ac1813e3f9f4e
|
refs/heads/master
| 2020-06-19T12:42:55.405557
| 2019-07-13T00:57:28
| 2019-07-13T00:57:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
# 只要有重复的元素覆盖就可以了
if size == 0:
return 0
# 接下来要赋值的那个元素
j = 0
for i in range(1, size):
if nums[i] != nums[j]:
j += 1
nums[j] = nums[i]
return j + 1
|
[
"121088825@qq.com"
] |
121088825@qq.com
|
f50d4463bf0b30fc4676896b773f4ee663cfafde
|
e5eec1428da1d24d3e9b86f5723c51cd2ca636cd
|
/implement/백준/로봇 시뮬레이션.py
|
9be5407197c46650cd9bb30b77ca890fafa6f60f
|
[] |
no_license
|
jamwomsoo/Algorithm_prac
|
3c36c381f59277721517d331a8f1640399d80c1d
|
8393f3cc2f950214c47f3cf0b2c1271791f115d0
|
refs/heads/master
| 2023-06-09T06:49:14.739255
| 2021-06-18T06:41:01
| 2021-06-18T06:41:01
| 325,227,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
import sys
def find_location(num):
global a,b
#print(num)
for y in range(b):
for x in range(a):
# print(board[y][x])
if board[y][x]:
if board[y][x][0] == num:
return y,x
return None
direction = [(0,-1),(1,0),(0,1),(-1,0)]
a,b = map(int, input().split())
n,m = map(int, input().split())
board =[[[] for _ in range(a)] for _ in range(b)]
# for i in range(b):
# for j in range(a):
# print(board[i][j], end = " ")
# print()
command = []
for i in range(n):
x,y,d = map(str, input().split())
if d == 'N': d = 0
elif d == 'E': d = 1
elif d == 'S': d = 2
else: d = 3
#print("robot_location",b-(int(y)),int(x) - 1)
board[b-int(y)][int(x) - 1] = [i+1,d]
# for i in range(b):
# for j in range(a):
# print(board[i][j], end = " ")
# print()
for i in range(m):
num, com, cnt = map(str, input().split())
y,x = find_location(int(num))
d = board[y][x][1]
board[y][x] = []
for i in range(int(cnt)):
if com == 'F':
x+=direction[d][0]
y+=direction[d][1]
#print(y,x)
if not (0<=x< a and 0<= y <b):
print("Robot {0} crashes into the wall".format(num))
sys.exit()
if board[y][x]:
print("Robot {0} crashes into robot {1}".format(num,board[y][x][0]))
sys.exit()
elif com == 'L':
d-=1
if d<0: d = 3
elif com == 'R':
d+=1
if d>3: d = 0
board[y][x] = [int(num),d]
print("OK")
|
[
"41579282+jamwomsoo@users.noreply.github.com"
] |
41579282+jamwomsoo@users.noreply.github.com
|
41f3e4504258bf7eb315ccb9fa76996f1a5dafeb
|
f42608d292c5784f59d554337e2826d398d8391f
|
/base_structure/app.py
|
6bb1662cfab3f3de20a53e3d3d4606af506412d7
|
[
"Unlicense"
] |
permissive
|
thinkAmi-sandbox/werkzeug-sample
|
e4e0000b0b1ee0c72acc36113b125765b185ce39
|
fbf778ba8a83d5c91de1a5baa619087b0ab46199
|
refs/heads/master
| 2020-03-29T19:45:47.006365
| 2018-10-15T13:41:54
| 2018-10-15T13:41:54
| 150,279,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,492
|
py
|
import pathlib
from werkzeug._compat import text_type
from werkzeug.exceptions import abort, InternalServerError
from werkzeug.utils import redirect
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import SharedDataMiddleware
class MyInternalServerError(InternalServerError):
def get_body(self, environ=None):
# text_type()がprotectedなので、使っていいものか...
return text_type(
u'<!DOCTYPE html>'
u'<title>My Internal Server Error</title>'
u'<h1>Oh, my internal server error!</h1>'
)
class Application:
def dispatch_request(self, request):
"""
favicon.ico の分のリクエストも入ってくるので注意
本番だと、静的ファイルがNginxで用意されるので問題ないかも
"""
body = []
try:
# リクエストパスの取得
body.append(f'request.path: {request.base_url}')
# => http://localhost:5000/
# 環境変数の取得
# WSGI環境変数とCGI環境変数の両方が取れそう:型はdict
body.append(f'environ: {type(request.environ)} / {request.environ}')
# => <class 'dict'> / {'wsgi.version': (1, 0), ... , 'REQUEST_METHOD': 'GET', ...
# HTTPリクエストのメソッドを取得
body.append(f'HTTP method: {request.method}')
# => GET
# クエリストリングを取得
body.append(f'Query String: {request.args}')
# => [GET] $ curl http://localhost:5000?foo=bar の場合
# ImmutableMultiDict([('foo', 'bar')])
# => [POST] $ curl -w '\n' -X POST 'localhost:5000/?ham=spam' --data 'foo=1&bar=2' の場合
# ImmutableMultiDict([('ham', 'spam')])
# POSTデータを取得
body.append(f'Form: {request.form}')
# => [GET] $ curl http://localhost:5000?foo=bar の場合
# ImmutableMultiDict([])
# => [POST] $ curl -w '\n' -X POST 'localhost:5000/?ham=spam' --data 'foo=1&bar=2' の場合
# ImmutableMultiDict([('foo', '1'), ('bar', '2')])
# request.valuesを使えば、クエリストリング/formの両方の値を取得できる
body.append(f'request.values: {request.values}')
# => [GET] $ curl http://localhost:5000?foo=bar の場合
# CombinedMultiDict([ImmutableMultiDict([('foo', 'bar')]),
# ImmutableMultiDict([])
# ])
# => [POST] $ curl -w '\n' -X POST 'localhost:5000/?ham=spam' --data 'foo=1&bar=2' の場合
# CombinedMultiDict([ImmutableMultiDict([('ham', 'spam')]),
# ImmutableMultiDict([('foo', '1'), ('bar', '2')])
# ])
# HTTPリクエストヘッダの出力
for k, v in request.headers.items():
body.append(f'Request header: key:{k} / value: {v}')
# => Request header: key:Host / value: localhost:5000 ...
# 接続元IPアドレスを取得
# access_routeとremote_addrの違い
body.append(f'access_route: {request.access_route}')
# => access_route: ImmutableList(['127.0.0.1'])
body.append(f'remote_addr: {request.remote_addr}')
# => remote_addr: 127.0.0.1
# リクエスト時のCookieの値を取得
counter = request.cookies.get('counter', 0)
msg = '\n'.join(body)
response = Response(msg)
# 新しくCookieをセットしない場合でも、再リクエスト時には以前のCookieの値が使われる
if 'one_time' not in request.cookies:
response.set_cookie('one_time', 'x')
# Cookieを削除
if 'delete_cookie' in request.args:
response.delete_cookie('one_time')
# => Set-Cookie: one_time=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
# 常にセットするCookie
response.set_cookie('counter', str(int(counter) + 1))
# 同じCookieキーで、別々の属性をセットする
response.set_cookie('same_cookie', '1st', httponly=True)
response.set_cookie('same_cookie', '2nd', secure=True)
# 独自HTTPヘッダをセット
response.headers.add('X-headers-add', 'using add')
response.headers.add_header('X-headers-add_header', 'using add_header')
response.headers['X-headers-key'] = 'using key'
# => X-headers-add: using add
# X-headers-add_header: using add_header
# X-headers-key: using key
# content_typeを上書き
response.content_type = 'application/json'
# リダイレクト
if 'redirect' in request.args:
return redirect('https://www.google.co.jp')
# HTTP 500 エラー
if '500' in request.args:
abort(500)
except InternalServerError as e:
# 差し替え
return MyInternalServerError()
return response
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
"""WSGIアプリを直接dispatchすることで、wsgi_app()をWSGIミドルウェアっぽく使える"""
print('!!! app !!!')
return self.wsgi_app(environ, start_response)
def create_app(with_static=True):
application = Application()
# WSGIミドルウェアの設定ポイント
if with_static:
application.wsgi_app = SharedDataMiddleware(
application.wsgi_app,
{'/favicon.ico': str(pathlib.Path('./favicon.ico'))}
)
return application
if __name__ == '__main__':
from werkzeug.serving import run_simple
app = create_app()
# 外部からアクセス可能とするよう、第一引数は 0.0.0.0 を指定 (Flaskと同様)
# https://qiita.com/tomboyboy/items/122dfdb41188176e45b5
run_simple('0.0.0.0', 5000, app, use_debugger=True, use_reloader=True)
# run_simple('127.0.0.1', 5000, a, use_debugger=True, use_reloader=True)
|
[
"dev.thinkami@gmail.com"
] |
dev.thinkami@gmail.com
|
03da8fde9a8aff510e7b931f33942b88ea3adc4d
|
1105414add7c27eb201a0941e5bc86eb2f09378f
|
/journey5/cputype.py
|
1cd845d82e52ffb0bf5afda36aa8b46e72d2220a
|
[
"MIT"
] |
permissive
|
parrisma/AI-Intuition
|
d083204267c351bc85c796a79ce43b8ff9d58022
|
3b081696b1d226815e029cbb536fac5e4d3de9a7
|
refs/heads/master
| 2021-07-25T21:07:11.455443
| 2020-06-06T20:44:17
| 2020-06-06T20:44:17
| 193,102,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
from typing import List
from enum import Enum, unique
from copy import deepcopy
@unique
class CPUType(Enum):
GPU = 'GPU'
GENERAL = 'CPU'
BATCH = 'BAT'
def __str__(self):
return self.value
def __add__(self, other):
if isinstance(other, self.__class__):
return self.value + other.value
else:
ValueError('Cannot add :' + self.__class__.__name__ + 'with :' + other.__class__.__name__)
@classmethod
def cpu_types(cls) -> List['CPUType']:
return deepcopy([cls.GPU,
cls.GENERAL,
cls.BATCH
]
)
|
[
"parris3142@hotmail.com"
] |
parris3142@hotmail.com
|
2a68a5ba29718b61fc2e53486891a3f1d861179d
|
3a39e879fb2901207afcfc238b169ddefa104055
|
/Chapter05/Docs/headercontract/headercontract/settings.py
|
88caffbe94df2a4de120cbbaece0608c79c3236a
|
[] |
no_license
|
Synapses/Web_Scraping_with_Python
|
cb32ddd468250b9f11ad16d3576d0920693e708c
|
3bb8cd47d0e1e182bb8ee800d32e24f45bf13ab0
|
refs/heads/master
| 2023-03-15T09:19:02.754593
| 2020-06-16T02:17:11
| 2020-06-16T02:17:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,160
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for headercontract project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'headercontract'
SPIDER_MODULES = ['headercontract.spiders']
NEWSPIDER_MODULE = 'headercontract.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'headercontract (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'headercontract.middlewares.HeadercontractSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'headercontract.middlewares.HeadercontractDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'headercontract.pipelines.HeadercontractPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"philip.dongfei@gmail.com"
] |
philip.dongfei@gmail.com
|
fd48e51c8eab549b95ed79cc075a60d840f595ef
|
65dd982b7791b11b4f6e02b8c46300098d9b5bb3
|
/neutron-2014.2.2-gcloud/neutron/db/portqos_db.py
|
91e3edb81874a111692dc538727c9926b83a4b00
|
[
"Apache-2.0"
] |
permissive
|
xiongmeng1108/openstack_gcloud
|
83f58b97e333d86d141493b262d3c2261fd823ac
|
d5d3e4f8d113a626f3da811b8e48742d35550413
|
refs/heads/master
| 2021-01-10T01:21:13.911165
| 2016-03-25T08:21:14
| 2016-03-25T08:21:14
| 54,700,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,058
|
py
|
__author__ = 'luoyb'
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.extensions import gcloud_qos
LOG = logging.getLogger(__name__)
class PortQos(model_base.BASEV2):
"""
define qos
"""
__tablename__ = 'gcloud_portqoss'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=False,primary_key=True)
ingress = sa.Column(sa.BIGINT, nullable=True)
outgress = sa.Column(sa.BIGINT, nullable=True)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load extra_port_qos
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("qos", lazy='joined',uselist=False,cascade='delete'))
class PortQosMixin(object):
"""Mixin class to add extra options to the Qos file
and associate them to a port.
"""
def _extend_port_dict_qos(self,res,port):
port_qos=port.get('qos')
res['qos']=self._make_port_qos_dict(port_qos)
return res
def _get_qos(self,context,port_id):
port_qos = context.session.query(PortQos).filter_by(port_id=port_id).first()
return self._make_port_qos_dict(port_qos)
def _create_or_update_qos(self,context,id,qos):
if not qos:
raise "qos is null"
qos= qos['qos']
if id:
qos['port_id']=id
port= context.session.query(models_v2.Port).filter_by(id = qos['port_id']).first()
if not port:
raise gcloud_qos.QosPortNotFound(id = qos['port_id'])
port_qos=None
with context.session.begin(subtransactions=True):
port_qos = context.session.query(PortQos).filter_by(port_id = qos['port_id']).first()
if port_qos:
port_qos.update(qos)
else:
port_qos = PortQos(
port_id=qos['port_id'],
ingress=qos.get('ingress'),
outgress=qos.get('outgress'))
context.session.add(port_qos)
return self._make_port_qos_dict(port_qos)
def _make_port_qos_dict(self,port_qos):
res={}
if port_qos:
res = {"port_id": port_qos["port_id"],
'ingress': port_qos['ingress'],
"outgress": port_qos["outgress"]
}
return res
def update_qos(self, context,id,qos):
return self._create_or_update_qos(context,id,qos)
def create_qos(self, context, qos):
return self._create_or_update_qos(context=context,id=None,qos=qos)
def get_qos(self, context, id, fields=None):
return self._get_qos(context,port_id=id)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_dict_qos'])
|
[
"cs_xm@126.com"
] |
cs_xm@126.com
|
6bc465b047b317b874564567d86dd750b92a8daf
|
b1255aedc430b128582d42cabfe675ac3a3321f1
|
/src/promnesia/sources/browser.py
|
176c05d8a1770d0487733568cf822e8e0b0c8c00
|
[
"MIT"
] |
permissive
|
alexriabtsev/promnesia
|
855e3b54d3f17b3c9bf7ce985360e8d4f259c5da
|
5055198170bdf57135679181d87450a979a05a2b
|
refs/heads/master
| 2022-11-13T04:58:00.565517
| 2020-07-11T19:46:23
| 2020-07-11T19:46:23
| 278,930,706
| 0
| 0
|
MIT
| 2020-07-11T19:44:18
| 2020-07-11T19:44:17
| null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
import csv
import sqlite3
from datetime import datetime
from subprocess import check_output
from typing import Dict, Iterator, List, NamedTuple, Optional, Set
from urllib.parse import unquote
import pytz
from sqlalchemy import Column, MetaData, Table, create_engine # type: ignore
from ..common import Loc, PathIsh, Visit, get_logger, Second
def browser_extract(histfile: PathIsh, cols, row_handler) -> Iterator[Visit]:
logger = get_logger()
logger.debug(f'extracing history from {histfile}')
# TODO fuck. why doesn't that work???
# engine = create_engine('sqlite:///{histfile}', echo=True)
# meta = MetaData()
# visits = Table('visits', meta, autoload=True, autoload_with=engine)
# TODO contextmanager
conn = sqlite3.connect(str(histfile))
for row in conn.execute(f"SELECT {', '.join(cols)} FROM visits"):
pv = row_handler(*row)
yield pv
logger.debug('done extracing')
def _firefox(cols, histfile: PathIsh) -> Iterator[Visit]:
def row_handler(url, ts):
# ok, looks like it's unix epoch
# https://stackoverflow.com/a/19430099/706389
dt = datetime.fromtimestamp(int(ts) / 1_000_000, pytz.utc)
url = unquote(url) # firefox urls are all quoted
return Visit(
url=url,
dt=dt,
locator=Loc.file(histfile),
)
yield from browser_extract(
histfile=histfile,
cols=cols,
row_handler=row_handler,
)
def firefox_phone(histfile: PathIsh) -> Iterator[Visit]:
yield from _firefox(cols=('url', 'date'), histfile=histfile)
def firefox(histfile: PathIsh) -> Iterator[Visit]:
yield from _firefox(cols=('url', 'visit_date'), histfile=histfile)
# should be utc? https://stackoverflow.com/a/26226771/706389
# yep, tested it and looks like utc
def chrome_time_to_utc(chrome_time: int) -> datetime:
epoch = (chrome_time / 1_000_000) - 11644473600
return datetime.fromtimestamp(epoch, pytz.utc)
# TODO could use sqlite3 module I guess... but it's quick enough to extract as it is
def chrome(histfile: PathIsh) -> Iterator[Visit]:
def row_handler(url, ts, durs):
dt = chrome_time_to_utc(int(ts))
url = unquote(url) # chrome urls are all quoted # TODO not sure if we want it here?
dd = int(durs)
dur: Optional[Second]
if dd == 0:
dur = None
else:
dur = dd // 1_000_000
return Visit(
url=url,
dt=dt,
locator=Loc.file(histfile),
duration=dur,
)
yield from browser_extract(
histfile=histfile,
cols=('url', 'visit_time', 'visit_duration'),
row_handler=row_handler,
)
|
[
"karlicoss@gmail.com"
] |
karlicoss@gmail.com
|
26e8ab9c4c3bad31527986883deb13a25f8efdd7
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/216/usersdata/267/113208/submittedfiles/av2_p3_civil.py
|
ade1f16d5c34a108d034f19287aa5130054916e2
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
n=int(input('Dimensão do tabuleiro: '))
print()
a=np.zeros((n,n))
x=int(input('Número da linha em que a torre se encontra: '))
if x>n:
while x>n:
x=int(input('VALOR INVÁLIDO. Número da linha em que a torre se encontra: '))
print()
y=int(input('Número da coluna em que a torre se encontra: '))
if y>n:
while y>n:
y=int(input('VALOR INVÁLIDO. Número da coluna em que a torre se encontra: '))
print()
for i in range(0,n,1):
for j in range(0,n,1):
a[i,j]=int(input('Digite o valor da posição %d%d: '%(i+1,j+1)))
somaL=0
for i in range(0,n,1):
somaL=somaL+a[x-1,i]
somaC=0
for i in range(0,n,1):
somaC=somaC+a[i,y-1]
peso=somaL+somaC-a[x-1,y-1]
print(peso)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7e1fa4523b5fdefe795f17c9d34f9c193eec062f
|
12967293f285decb1568bd56af38b1df4e5c533d
|
/.eggs/botocore-1.10.9-py2.7.egg/botocore/__init__.py
|
71839036f4abf9dc262ff861a79879d59ff94b71
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
martbhell/git-bigstore
|
36cd16276379833fbade252a77c73cf3644aa30f
|
960e9ea64d4d5646af3ce411adf46f3236b64d7e
|
refs/heads/master
| 2020-05-16T17:51:52.011171
| 2019-03-12T20:54:42
| 2019-03-12T20:54:42
| 183,206,409
| 0
| 0
|
Apache-2.0
| 2019-04-24T10:29:48
| 2019-04-24T10:29:47
| null |
UTF-8
|
Python
| false
| false
| 3,884
|
py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.10.9'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
_partial_renames = {
'ipv-6': 'ipv6',
'ipv_6': 'ipv6',
}
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache,
partial_renames=_partial_renames):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
s2 = _number_cap_regex.sub(r'\1' + sep + r'\2', s1)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s2).lower()
# Do partial renames
for old, new in partial_renames.items():
if old in transformed:
transformed = transformed.replace(old, new)
_xform_cache[key] = transformed
return _xform_cache[key]
|
[
"dan@lionheartsw.com"
] |
dan@lionheartsw.com
|
b1761ebb3d2a30a0f0fc45062e83e5c7d20d0f93
|
f35254b599e251249b460b8edf4303e7009024d4
|
/rl/bc_utils/init_tensor.py
|
b404e044c7d8ad18c4310e3469abb6aa1c58fdc7
|
[
"MIT"
] |
permissive
|
bcrafton/icsrl-deep-learning
|
7e3a3f970bb8a3331d709d1a841f83bf15e6a39e
|
e3616982d1dda5f978d61d6591c91cb0da76ab02
|
refs/heads/master
| 2020-09-14T00:35:47.543984
| 2019-11-22T22:04:33
| 2019-11-22T22:04:33
| 222,955,022
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
import numpy as np
#######################################
def init_matrix(size, init, std=None):
input_size, output_size = size
if init == 'zero':
weights = np.zeros(shape=(input_size, output_size))
elif init == 'sqrt_fan_in':
sqrt_fan_in = np.sqrt(input_size)
weights = np.random.uniform(low=-1.0/sqrt_fan_in, high=1.0/sqrt_fan_in, size=(input_size, output_size))
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (input_size + output_size))
weights = np.random.uniform(low=-limit, high=limit, size=(input_size, output_size))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (input_size + output_size))
weights = np.random.normal(loc=0.0, scale=scale, size=(input_size, output_size))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(input_size, output_size))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(input_size, output_size))
else:
weights = np.random.normal(loc=0.0, scale=1.0, size=(input_size, output_size))
return weights
#######################################
def init_filters(size, init, std=None):
fh, fw, fin, fout = size
if init == 'zero':
weights = np.zeros(shape=(fh, fw, fin, fout))
elif init == 'sqrt_fan_in':
assert (False)
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (fh*fw*fin + fh*fw*fout))
weights = np.random.uniform(low=-limit, high=limit, size=(fh, fw, fin, fout))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (fh*fw*fin + fh*fw*fout))
weights = np.random.normal(loc=0.0, scale=scale, size=(fh, fw, fin, fout))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(fh, fw, fin, fout))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(fh, fw, fin, fout))
else:
assert (False)
return weights
#######################################
def init_local_filters(size, init, std=None):
h, w, fh, fw, fin, fout = size
if init == 'zero':
weights = np.zeros(shape=(h, w, fh*fw*fin, fout))
elif init == 'sqrt_fan_in':
assert (False)
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (fh*fw*fin + fh*fw*fout))
weights = np.random.uniform(low=-limit, high=limit, size=(h, w, fh*fw*fin, fout))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (fh*fw*fin + fh*fw*fout))
weights = np.random.normal(loc=0.0, scale=scale, size=(h, w, fh*fw*fin, fout))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(h, w, fh*fw*fin, fout))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(h, w, fh*fw*fin, fout))
else:
assert (False)
return weights
#######################################
|
[
"crafton.b@husky.neu.edu"
] |
crafton.b@husky.neu.edu
|
d18d566858011ece3ccc89dce925a0a7d1b87b83
|
4f394592f56b38e3fb1b971b2a10391ca86f4533
|
/scripts/Migrate.py
|
a7f3ffd1af3a6292283a6c710d2c4480f3316f22
|
[] |
no_license
|
JKOK005/analytics-ilmuone
|
aa605868af3d5d9c5dc2b412a88093406ad7a806
|
96c541d68246a38ac7ee1678b6b162fadb727141
|
refs/heads/master
| 2021-06-25T19:42:33.547603
| 2017-08-18T08:09:23
| 2017-08-18T08:09:23
| 100,269,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
import os
from Excel import Excel
from ParseExcel import ExcelParser
from GenEnvironDb import DbConnector
if __name__ == "__main__":
parser = ExcelParser(os.path.join("..", "data"))
with DbConnector("environ", "ilumone", "ilumone") as db:
# Migrate countries data
try:
excel = Excel().setFileName("environ.xlsx").setSheetName("Metadata - Countries").SkipRows(0)
res = parser.read(excel)
db.fillCountriesChunk(res['Country Code'], res['Country Name'], res['Region'], res['IncomeGroup'], res['SpecialNotes'], 100)
except Exception as e:
pass
# Migrate indicators data
try:
excel = Excel().setFileName("environ.xlsx").setSheetName("Metadata - Indicators").SkipRows(0)
res = parser.read(excel)
db.fillIndicatorsChunk(res['INDICATOR_CODE'], res['INDICATOR_NAME'], res['SOURCE_NOTE'], 100)
except Exception as e:
pass
# Migrate historical data
excel = Excel().setFileName("environ.xlsx").setSheetName("Data").SkipRows(3)
res = parser.read(excel)
db.fillHDChunk(res['Country Code'], res['Indicator Code'], res.ix[:, '1960':'2015'], 100)
|
[
"JKOK005@e.ntu.edu.sg"
] |
JKOK005@e.ntu.edu.sg
|
9d077b206add1d0b332089d9e49a0b4944b8dd09
|
e05f8d36c70336a8714cc260c02fe85ecee2e62e
|
/subject/api/glare/versions.py
|
fe4ada2edb37be074931d0afc9cbe7347e0b0dee
|
[
"Apache-2.0"
] |
permissive
|
laoyigrace/subject
|
eafa442b5d9ebf83c78a01ce3bb5d088d08d620d
|
e6ed989fdc250917a19788112b22322b73b3550f
|
refs/heads/master
| 2021-01-11T00:06:54.790751
| 2016-10-24T02:13:32
| 2016-10-24T02:13:32
| 70,754,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,125
|
py
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import http_client
import webob.dec
from subject.common import wsgi
from subject import i18n
_ = i18n._
versions_opts = [
# Note: Since both subject-api and glare-api have the same name for the
# option public_endpoint, oslo.config generator throws a DuplicateError
# exception during the conf file generation incase of differing help
# texts. Hence we have to have identical help texts for subject-api and
# glare-api's public_endpoint if not for changing the conf opt name.
cfg.StrOpt('public_endpoint',
help=_("""
Public url endpoint to use for Glance/Glare versions response.
This is the public url endpoint that will appear in the Glance/Glare
"versions" response. If no value is specified, the endpoint that is
displayed in the version's response is that of the host running the
API service. Change the endpoint to represent the proxy URL if the
API service is running behind a proxy. If the service is running
behind a load balancer, add the load balancer's URL for this value.
Possible values:
* None
* Proxy URL
* Load balancer URL
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(versions_opts)
class Controller(object):
"""A wsgi controller that reports which API versions are supported."""
def index(self, req, explicit=False):
"""Respond to a request for all OpenStack API versions."""
def build_version_object(version, path, status):
url = CONF.public_endpoint or req.host_url
return {
'id': 'v%s' % version,
'status': status,
'links': [
{
'rel': 'self',
'href': '%s/%s/' % (url, path),
},
],
}
version_objs = [build_version_object(0.1, 'v0.1', 'EXPERIMENTAL')]
status = explicit and http_client.OK or http_client.MULTIPLE_CHOICES
response = webob.Response(request=req,
status=status,
content_type='application/json')
response.body = jsonutils.dump_as_bytes(dict(versions=version_objs))
return response
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.index(req)
def create_resource(conf):
return wsgi.Resource(Controller())
|
[
"yibo_grace@163.com"
] |
yibo_grace@163.com
|
7cb340dcc964a7393ee90ccb58547d832899668f
|
c9eddcc8ae129d52ac08e59039a349827d78ac39
|
/doc/examples/icatexport.py
|
fe77037fe58aaba97f51505fed77bc2ca9f69d5a
|
[
"Apache-2.0"
] |
permissive
|
ahmad-tarbeya/python-icat
|
48335cedc256992e811f903616a593aadb45f194
|
a54f76551c74eacddffe19f8f217e81cf221d551
|
refs/heads/master
| 2021-01-20T06:43:09.083208
| 2017-04-25T13:42:46
| 2017-04-25T13:43:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
#! /usr/bin/python
#
# Export the content of the ICAT to a file or to stdout.
#
# Use the export feature from ICAT server: make the appropriate call
# to the ICAT RESTful interface to get the ICAT content and store the
# result to a file. Try to keep the command line interface as close
# as possible to the one from icatdump.py.
#
import sys
import os
import json
import re
import logging
import requests
import icat
import icat.config
from icat.exception import translateError
logging.basicConfig(level=logging.INFO)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
config = icat.config.Config()
config.add_variable('resturl', ("--resturl",),
dict(help="URL to the ICAT RESTful interface"),
default=True)
config.add_variable('file', ("-o", "--outputfile"),
dict(help="output file name or '-' for stdout"),
default='-')
# The format argument makes in fact little sense, as there is no
# choice. It's here for compatiblity with the command line interface
# of icatdump.py only.
config.add_variable('format', ("-f", "--format"),
dict(help="output file format", choices=["ICAT"]),
default='ICAT')
# Additional arguments that icatdump.py does not provide:
config.add_variable('query', ("--query",),
dict(help="query string to select the content"),
optional=True)
config.add_variable('attributes', ("--attributes",),
dict(help="attributes to include in the output",
choices=["ALL", "USER"]),
default='USER')
conf = config.getconfig()
client = icat.Client(conf.url, **conf.client_kwargs)
if client.apiversion < '4.3.99':
raise RuntimeError("Sorry, ICAT version %s is too old, need 4.4.0 or newer."
% client.apiversion)
client.login(conf.auth, conf.credentials)
if conf.resturl is True:
# As a default, derive the RESTful URL from the URL of the SOAP service.
conf.resturl = re.sub(r'(?<=/)ICATService/.*', 'icat', conf.url)
if not conf.resturl.endswith("/"):
conf.resturl += "/"
args = {"sessionId": client.sessionId, "attributes":conf.attributes}
if conf.query:
args['query'] = conf.query
parameters = {"json":json.dumps(args)}
request = requests.get(conf.resturl + "port", params=parameters,
stream=True, verify=conf.checkCert)
if request.status_code == requests.codes.ok:
if conf.file == "-":
# Need to reopen stdout in binary mode.
with os.fdopen(os.dup(sys.stdout.fileno()), 'wb') as f:
for chunk in request.iter_content(8192):
f.write(chunk)
else:
with open(conf.file, 'wb') as f:
for chunk in request.iter_content(8192):
f.write(chunk)
else:
try:
raise translateError(request.json(), status=request.status_code)
except (ValueError, TypeError):
request.raise_for_status()
|
[
"rolf.krahl@helmholtz-berlin.de"
] |
rolf.krahl@helmholtz-berlin.de
|
fc55aa9acf06aba5562b4ef3ac124136d7ad9207
|
8c07b4bbffac461f3dbdca5102736dded30b073a
|
/Problem Solving/Algorithms/Implementation/66_matrix_layer_rotation.py
|
1987ae2895a6b57663a705210ed810d0ea9a92e4
|
[] |
no_license
|
yang4978/Hackerrank_for_Python
|
c3c36c4d68daadbf694a387abb6f32b7172604a1
|
86db5b6b59d090fccccbe51389c5282217549569
|
refs/heads/master
| 2020-05-09T20:06:45.181158
| 2019-08-11T15:15:51
| 2019-08-11T15:15:51
| 181,394,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,407
|
py
|
#https://www.hackerrank.com/challenges/matrix-rotation-algo/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the matrixRotation function below.
def matrixRotation(matrix, r):
#dict_matrix = {}
rows = len(matrix)
columns = len(matrix[0])
for layer in range(min(rows,columns)//2):
layer_r = r%(rows*2+columns*2-4-8*layer)
while(layer_r):
temp = matrix[layer][columns-layer-1]
for j in range(columns-layer-2,layer-1,-1):
temp,matrix[layer][j] = matrix[layer][j],temp
for i in range(layer+1,rows-layer):
temp,matrix[i][layer] = matrix[i][layer],temp
for j in range(layer+1,columns-layer):
temp,matrix[rows-1-layer][j] = matrix[rows-1-layer][j],temp
for i in range(rows-layer-2,layer-1,-1):
temp,matrix[i][columns-1-layer] = matrix[i][columns-1-layer],temp
#matrix[layer][columns-layer-1] = temp
# if(i>layer):
# dict_matrix[i,layer] = matrix[i][layer]
# if(i<rows-layer-1):
# dict_matrix[i,columns-1-layer] = matrix[i][columns-1-layer]
# for j in range(layer,columns-layer):
# if(j<columns-layer-1):
# dict_matrix[layer,j] = matrix[layer][j]
# if(j>layer):
# dict_matrix[rows-1-layer,j] = matrix[rows-1-layer][j]
# for i in range(layer,rows-layer):
# if(i>layer):
# matrix[i][layer] = dict_matrix[i-1,layer]
# if(i<rows-layer-1):
# matrix[i][columns-1-layer] = dict_matrix[i+1,columns-1-layer]
# for j in range(layer,columns-layer):
# if(j<columns-layer-1):
# matrix[layer][j] = dict_matrix[layer,j+1]
# if(j>layer):
# matrix[rows-1-layer][j] = dict_matrix[rows-1-layer,j-1]
layer_r -= 1
for i in matrix:
print(*i)
if __name__ == '__main__':
mnr = input().rstrip().split()
m = int(mnr[0])
n = int(mnr[1])
r = int(mnr[2])
matrix = []
for _ in range(m):
matrix.append(list(map(int, input().rstrip().split())))
matrixRotation(matrix, r)
|
[
"noreply@github.com"
] |
yang4978.noreply@github.com
|
9e608dacc9d3ebb72707e372da7d75d249da71d7
|
6ac0bba8c1851e71529269c0d9d89a7c8fa507f2
|
/Hard/757.py
|
918d72e4e0133cbd93a4a645050b10ebca46e45d
|
[] |
no_license
|
Hellofafar/Leetcode
|
e81dc85689cd6f9e6e9756beba070cb11e7b192e
|
7a459e9742958e63be8886874904e5ab2489411a
|
refs/heads/master
| 2021-05-16T07:07:19.823953
| 2020-02-17T03:00:09
| 2020-02-17T03:00:09
| 103,690,780
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,856
|
py
|
# ------------------------------
# 757. Set Intersection Size At Least Two
#
# Description:
# An integer interval [a, b] (for integers a < b) is a set of all consecutive integers from a to
# b, including a and b.
#
# Find the minimum size of a set S such that for every integer interval A in intervals, the
# intersection of S with A has size at least 2.
#
# Example 1:
# Input: intervals = [[1, 3], [1, 4], [2, 5], [3, 5]]
# Output: 3
# Explanation:
# Consider the set S = {2, 3, 4}. For each interval, there are at least 2 elements from S in the interval.
# Also, there isn't a smaller size set that fulfills the above condition.
# Thus, we output the size of this set, which is 3.
#
# Example 2:
# Input: intervals = [[1, 2], [2, 3], [2, 4], [4, 5]]
# Output: 5
# Explanation:
# An example of a minimum sized set is {1, 2, 3, 4, 5}.
#
# Version: 1.0
# 01/22/18 by Jianfa
# ------------------------------
class Solution(object):
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key = lambda (s, e): (s, -e))
cover = [2 for x in range(len(intervals))]
res = 0
while intervals:
last = intervals.pop()
step = cover.pop()
for n in range(last[0], last[0] + step):
for idx, pair in enumerate(intervals):
if cover[idx] and n <= pair[1]:
cover[idx] -= 1
res += 1
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Follow the idea in Solution section.
# The most awesome part is in the cover list. It can be used to record how many numbers in an
# interval need to be added to set S, in order to make S meets the conditions.
|
[
"buptljf@gmail.com"
] |
buptljf@gmail.com
|
14a0100b2dbcbb01a41c4f7c91fe949ea56cfe56
|
00af09f4ac6f98203910d86c3791c152184ace9a
|
/Lib/lib2to3/tests/data/fixers/myfixes/fix_last.py
|
593b3de7a7c597e50552cc3a14520a059d1b5f8b
|
[] |
no_license
|
orf53975/CarnosOS
|
621d641df02d742a2452fde2f28a28c74b32695a
|
d06849064e4e9f30ef901ad8cf90960e1bec0805
|
refs/heads/master
| 2023-03-24T08:06:48.274566
| 2017-01-05T16:41:01
| 2017-01-05T16:41:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
<<<<<<< HEAD
<<<<<<< HEAD
from lib2to3.fixer_base import BaseFix
class FixLast(BaseFix):
run_order = 10
def match(self, node): return False
=======
from lib2to3.fixer_base import BaseFix
class FixLast(BaseFix):
run_order = 10
def match(self, node): return False
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from lib2to3.fixer_base import BaseFix
class FixLast(BaseFix):
run_order = 10
def match(self, node): return False
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
[
"Weldon@athletech.org"
] |
Weldon@athletech.org
|
8494b13a4ef36e2d314b32d5727effc37baba5a6
|
60d2c390736f5dce1cd0c9d4249a0ab95bdae802
|
/worker/domainiq/setup.py
|
5bd72d37039fcdf8af6567236e686aac55c84727
|
[
"Apache-2.0"
] |
permissive
|
tsmolka/stoq-plugins-public
|
d996b0be051ce0bac453af7380e7cbfecc03ff93
|
a8d3351fe55fc72891c395d6767188746bf381cf
|
refs/heads/master
| 2020-12-28T22:22:15.077514
| 2016-07-13T17:57:43
| 2016-07-13T17:57:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
from setuptools import setup, find_packages
setup(
name="domainiq",
version="0.1",
author="Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Interact with DomainIQ API",
packages=find_packages(),
include_package_data=True,
)
|
[
"marcus@randomhack.org"
] |
marcus@randomhack.org
|
99ee5ca0b9aa7e4965915ebe6fce53773a76ff5f
|
ddd35c693194aefb9c009fe6b88c52de7fa7c444
|
/Live 10.1.18/VCM600/TrackEQComponent.py
|
30a2e48aa972041bdf55f9e3dcde94e17de86be0
|
[] |
no_license
|
notelba/midi-remote-scripts
|
819372d9c22573877c7912091bd8359fdd42585d
|
e3ec6846470eed7da8a4d4f78562ed49dc00727b
|
refs/heads/main
| 2022-07-30T00:18:33.296376
| 2020-10-04T00:00:12
| 2020-10-04T00:00:12
| 301,003,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,076
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\VCM600\TrackEQComponent.py
# Compiled at: 2020-07-14 15:33:46
from __future__ import absolute_import, print_function, unicode_literals
import Live
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.EncoderElement import EncoderElement
from _Generic.Devices import get_parameter_by_name
EQ_DEVICES = {b'Eq8': {b'Gains': [ b'%i Gain A' % (index + 1) for index in range(8) ]}, b'FilterEQ3': {b'Gains': [
b'GainLo', b'GainMid', b'GainHi'],
b'Cuts': [
b'LowOn', b'MidOn', b'HighOn']}}
class TrackEQComponent(ControlSurfaceComponent):
""" Class representing a track's EQ, it attaches to the last EQ device in the track """
def __init__(self):
ControlSurfaceComponent.__init__(self)
self._track = None
self._device = None
self._gain_controls = None
self._cut_buttons = None
return
def disconnect(self):
if self._gain_controls != None:
for control in self._gain_controls:
control.release_parameter()
self._gain_controls = None
if self._cut_buttons != None:
for button in self._cut_buttons:
button.remove_value_listener(self._cut_value)
self._cut_buttons = None
if self._track != None:
self._track.remove_devices_listener(self._on_devices_changed)
self._track = None
self._device = None
if self._device != None:
device_dict = EQ_DEVICES[self._device.class_name]
if b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
for cut_name in cut_names:
parameter = get_parameter_by_name(self._device, cut_name)
if parameter != None and parameter.value_has_listener(self._on_cut_changed):
parameter.remove_value_listener(self._on_cut_changed)
return
def on_enabled_changed(self):
self.update()
def set_track(self, track):
assert track == None or isinstance(track, Live.Track.Track)
if self._track != None:
self._track.remove_devices_listener(self._on_devices_changed)
if self._gain_controls != None and self._device != None:
for control in self._gain_controls:
control.release_parameter()
self._track = track
if self._track != None:
self._track.add_devices_listener(self._on_devices_changed)
self._on_devices_changed()
return
def set_cut_buttons(self, buttons):
assert buttons == None or isinstance(buttons, tuple)
if buttons != self._cut_buttons:
if self._cut_buttons != None:
for button in self._cut_buttons:
button.remove_value_listener(self._cut_value)
self._cut_buttons = buttons
if self._cut_buttons != None:
for button in self._cut_buttons:
button.add_value_listener(self._cut_value, identify_sender=True)
self.update()
return
def set_gain_controls(self, controls):
assert controls != None
assert isinstance(controls, tuple)
if self._device != None and self._gain_controls != None:
for control in self._gain_controls:
control.release_parameter()
for control in controls:
assert control != None
assert isinstance(control, EncoderElement)
self._gain_controls = controls
self.update()
return
def update(self):
super(TrackEQComponent, self).update()
if self.is_enabled() and self._device != None:
device_dict = EQ_DEVICES[self._device.class_name]
if self._gain_controls != None:
gain_names = device_dict[b'Gains']
for index in range(len(self._gain_controls)):
self._gain_controls[index].release_parameter()
if len(gain_names) > index:
parameter = get_parameter_by_name(self._device, gain_names[index])
if parameter != None:
self._gain_controls[index].connect_to(parameter)
if self._cut_buttons != None and b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
for index in range(len(self._cut_buttons)):
self._cut_buttons[index].turn_off()
if len(cut_names) > index:
parameter = get_parameter_by_name(self._device, cut_names[index])
if parameter != None:
if parameter.value == 0.0:
self._cut_buttons[index].turn_on()
if not parameter.value_has_listener(self._on_cut_changed):
parameter.add_value_listener(self._on_cut_changed)
else:
if self._cut_buttons != None:
for button in self._cut_buttons:
if button != None:
button.turn_off()
if self._gain_controls != None:
for control in self._gain_controls:
control.release_parameter()
return
def _cut_value(self, value, sender):
if not sender in self._cut_buttons:
raise AssertionError
assert value in range(128)
if self.is_enabled() and self._device != None and (not sender.is_momentary() or value is not 0):
device_dict = EQ_DEVICES[self._device.class_name]
if b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
index = list(self._cut_buttons).index(sender)
if index in range(len(cut_names)):
parameter = get_parameter_by_name(self._device, cut_names[index])
if parameter != None and parameter.is_enabled:
parameter.value = float(int(parameter.value + 1) % 2)
return
def _on_devices_changed(self):
if self._device != None:
device_dict = EQ_DEVICES[self._device.class_name]
if b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
for cut_name in cut_names:
parameter = get_parameter_by_name(self._device, cut_name)
if parameter != None and parameter.value_has_listener(self._on_cut_changed):
parameter.remove_value_listener(self._on_cut_changed)
self._device = None
if self._track != None:
for index in range(len(self._track.devices)):
device = self._track.devices[(-1 * (index + 1))]
if device.class_name in EQ_DEVICES.keys():
self._device = device
break
self.update()
return
def _on_cut_changed(self):
assert self._device != None
assert b'Cuts' in EQ_DEVICES[self._device.class_name].keys()
if self.is_enabled() and self._cut_buttons != None:
cut_names = EQ_DEVICES[self._device.class_name][b'Cuts']
for index in range(len(self._cut_buttons)):
self._cut_buttons[index].turn_off()
if len(cut_names) > index:
parameter = get_parameter_by_name(self._device, cut_names[index])
if parameter != None and parameter.value == 0.0:
self._cut_buttons[index].turn_on()
return
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/VCM600/TrackEQComponent.pyc
|
[
"notelba@example.com"
] |
notelba@example.com
|
bd32f3a3edb66736241c626c86d60844c4a1a252
|
e8ecb520c73c335c9c163e8ce59fa25d3f8b1f1c
|
/automlToolkit/components/fe_optimizers/fe_optimizer_builder.py
|
236f2edc5f8da0b39c76c59c4c83f9324a2d9e75
|
[
"MIT"
] |
permissive
|
zwt233/automl-toolkit
|
d3200a2fd5b01311f33a0e61a7cd6dc7dccbaacc
|
67d057f5e0c74bec5b3cbde1440ec014696737ef
|
refs/heads/master
| 2021-05-26T01:18:09.699592
| 2020-04-25T10:48:40
| 2020-04-25T10:48:40
| 253,997,217
| 0
| 0
|
MIT
| 2020-05-09T10:15:08
| 2020-04-08T05:49:48
| null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
from automlToolkit.components.fe_optimizers.evaluation_based_optimizer import EvaluationBasedOptimizer
from automlToolkit.components.fe_optimizers.multithread_evaluation_based_optimizer import \
MultiThreadEvaluationBasedOptimizer
from automlToolkit.components.fe_optimizers.hyperband_evaluation_based_optimizer import HyperbandOptimizer
def build_fe_optimizer(eval_type, task_type, input_data, evaluator,
model_id: str, time_limit_per_trans: int,
mem_limit_per_trans: int, seed: int,
shared_mode: bool = False, n_jobs=4):
if eval_type == 'partial':
optimizer_class = HyperbandOptimizer
elif n_jobs == 1:
optimizer_class = EvaluationBasedOptimizer
else:
optimizer_class = MultiThreadEvaluationBasedOptimizer
return optimizer_class(task_type=task_type, input_data=input_data,
evaluator=evaluator, model_id=model_id,
time_limit_per_trans=time_limit_per_trans,
mem_limit_per_trans=mem_limit_per_trans,
seed=seed, shared_mode=shared_mode, n_jobs=n_jobs)
|
[
"459240868@qq.com"
] |
459240868@qq.com
|
a3bca85b9b9b07bbeb59a725e6fa0cb897347e06
|
b79837918de72f26558f484a59639bfc8ae3fc1b
|
/dialogue-engine/src/programy/config/brain/binaries.py
|
820afa307915f4ca5836ebee1e096254f040d54f
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
jaygeet/cotoba-agent-oss
|
d0efcfb3f22271afa32d0ffde04cf9808bbc1368
|
26de67dbda401be5f1d50ae2165e4e1d820882f5
|
refs/heads/master
| 2022-04-20T14:32:04.790471
| 2020-04-07T02:41:33
| 2020-04-07T02:41:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.config.section import BaseSectionConfigurationData
from programy.utils.substitutions.substitues import Substitutions
class BrainBinariesConfiguration(BaseSectionConfigurationData):
def __init__(self):
BaseSectionConfigurationData.__init__(self, "binaries")
self._save_binary = False
self._load_binary = False
self._load_aiml_on_binary_fail = False
@property
def save_binary(self):
return self._save_binary
@property
def load_binary(self):
return self._load_binary
@property
def load_aiml_on_binary_fail(self):
return self._load_aiml_on_binary_fail
def check_for_license_keys(self, license_keys):
BaseSectionConfigurationData.check_for_license_keys(self, license_keys)
def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):
binaries = configuration_file.get_section("binaries", configuration)
if binaries is not None:
self._save_binary = configuration_file.get_bool_option(binaries, "save_binary", missing_value=None, subs=subs)
self._load_binary = configuration_file.get_bool_option(binaries, "load_binary", missing_value=None, subs=subs)
self._load_aiml_on_binary_fail = configuration_file.get_bool_option(binaries, "load_aiml_on_binary_fail", missing_value=None, subs=subs)
else:
YLogger.debug(self, "'binaries' section missing from bot config, using to defaults")
def to_yaml(self, data, defaults=True):
if defaults is True:
data['save_binary'] = False
data['load_binary'] = False
data['load_aiml_on_binary_fail'] = True
else:
data['save_binary'] = self._save_binary
data['load_binary'] = self._load_binary
data['load_aiml_on_binary_fail'] = self._load_aiml_on_binary_fail
|
[
"cliff@cotobadesign.com"
] |
cliff@cotobadesign.com
|
7df101ba9e9e9e6e90edf37196a59fcb939490a1
|
898be346f3fd476f625489ec6e85f97240e32ae5
|
/LeetCode1000/LeetCode1446ConsecutiveCharacters.py
|
e11f5608019bf1f0e9621062e45f4788c7988dee
|
[] |
no_license
|
lonely7yk/LeetCode_py
|
e3a0c47f274db8ef3e4540d4d570a874e198dfcd
|
67054f724c6c0e1699118248788522cec624b831
|
refs/heads/master
| 2023-01-10T19:10:13.772069
| 2022-12-29T17:32:50
| 2022-12-29T17:32:50
| 228,568,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
"""
Given a string s, the power of the string is the maximum length of a non-empty substring that
contains only one unique character.
Return the power of the string.
Example 1:
Input: s = "leetcode"
Output: 2
Explanation: The substring "ee" is of length 2 with the character 'e' only.
Example 2:
Input: s = "abbcccddddeeeeedcba"
Output: 5
Explanation: The substring "eeeee" is of length 5 with the character 'e' only.
Example 3:
Input: s = "triplepillooooow"
Output: 5
Example 4:
Input: s = "hooraaaaaaaaaaay"
Output: 11
Example 5:
Input: s = "tourist"
Output: 1
Constraints:
1 <= s.length <= 500
s contains only lowercase English letters.
"""
# Greedy: O(n)
class Solution:
def maxPower(self, s: str) -> int:
if not s: return 0
cnt = 0
res = 1
last = None
for c in s:
if c == last:
cnt += 1
res = max(res, cnt)
else:
cnt = 1
last = c
return res
|
[
"893077810@qq.com"
] |
893077810@qq.com
|
7d4f9fdb9ab6f6463cca8a9564179547b0de0d51
|
83180906386bcb3d0a3062cc575f974c3dc1e0d8
|
/tutorials/ACT-R Unit Tutorials/u5_grouped.py
|
0bcf370790406f89606c85ddb21c96d0e93a2dbf
|
[] |
no_license
|
MatthewAKelly/ccmsuite
|
8c810ada908e7a957706ca8ebcde9c708f63c0e5
|
b1249fcd85fedceb07f67209c368f18c47501cc8
|
refs/heads/master
| 2020-12-28T21:40:01.070200
| 2018-01-17T22:50:59
| 2018-01-17T22:50:59
| 17,306,060
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,726
|
py
|
import ccm
log=ccm.log()
from ccm.lib.actr import *
class Env(ccm.Model):
result=[]
def say(self,x):
self.result.append(x)
log.x=x
class Grouped(ACTR):
focus=Buffer()
retrieval=Buffer()
memory=Memory(retrieval,threshold=-0.5,latency=1)
noise=DMNoise(memory,0.15)
partial=Partial(memory)
partial.similarity('first','second',-0.5)
partial.similarity('second','third',-0.5)
result=[]
def init():
memory.add('name:group1 parent:list position:first')
memory.add('parent:group1 name:1 position:first')
memory.add('parent:group1 name:2 position:second')
memory.add('parent:group1 name:3 position:third')
memory.add('name:group2 parent:list position:second')
memory.add('parent:group2 name:4 position:first')
memory.add('parent:group2 name:5 position:second')
memory.add('parent:group2 name:6 position:third')
memory.add('name:group3 parent:list position:third')
memory.add('parent:group3 name:7 position:first')
memory.add('parent:group3 name:8 position:second')
memory.add('parent:group3 name:9 position:third')
focus.set('start list')
def recall_first_group(focus='start ?list'):
focus.set('group first ?list')
memory.request('parent:?list position:first')
def start_recall_of_group(focus='group ?gpos ?list',retrieval='name:?groupname'):
memory.request('parent:?groupname position:first')
focus.set('item pos:first groupname:?groupname gpos:?gpos list:?list')
retrieval.clear()
def harvest_first_item(focus='item pos:first groupname:?groupname',retrieval='name:?x'):
self.parent.say(x)
focus.modify(pos='second')
memory.request('parent:?groupname position:second')
retrieval.clear()
def harvest_second_item(focus='item pos:second groupname:?groupname',retrieval='name:?x'):
self.parent.say(x)
focus.modify(pos='third')
memory.request('parent:?groupname position:third')
retrieval.clear()
def harvest_third_item(focus='item pos:third groupname:?groupname',retrieval='name:?x'):
self.parent.say(x)
focus.modify(pos='fourth')
memory.request('parent:?groupname position:fourth')
retrieval.clear()
def second_group(focus='item gpos:first list:?list',memory='error:True'):
memory.request('parent:?list position:second')
focus.set('group second ?list')
retrieval.clear()
def third_group(focus='item gpos:second list:?list',memory='error:True'):
memory.request('parent:?list position:third')
focus.set('group third ?list')
retrieval.clear()
env=Env()
env.m=Grouped()
env.run()
log.result=env.result
|
[
"tcstewar@uwaterloo.ca"
] |
tcstewar@uwaterloo.ca
|
bfba49e38303e77949de411ed0088ac40098eabf
|
205f41ac0a04d14c8d7995ee66c1e5043f255a2d
|
/imagebot/pysix.py
|
676861e86b76e1ec92598ccd3284e37ac2742e30
|
[
"MIT"
] |
permissive
|
piyushd26/imagebot
|
c4f6a2ac112ec84c268ce2ffa395648935ecf40e
|
113ea3344b54502e11c028c1c4c391f60abe5dfe
|
refs/heads/master
| 2022-12-25T07:02:56.151281
| 2020-10-01T14:13:19
| 2020-10-01T14:13:19
| 300,303,316
| 0
| 0
|
MIT
| 2020-10-01T14:12:00
| 2020-10-01T14:11:59
| null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import logging
import sys
ver = sys.version_info[0]
if ver < 3:
_logLevelNames = logging._levelNames
else:
_logLevelNames = {}
for (k, v) in logging._levelToName.items():
_logLevelNames[v] = k
def err_msg(e):
if ver < 3:
return e.message
else:
return e.msg
if ver < 3:
tkinter = 'Tkinter'
else:
tkinter = 'tkinter'
|
[
"babaiscool@gmail.com"
] |
babaiscool@gmail.com
|
20485de31fef16693a9db32635516cddb05a8c2c
|
6b6f68f507746e3e39b0e8789af5d044e27d6b0a
|
/Math/0172_FactorialTrailingZeroes_E.py
|
147075d83adb6181237a370b650f93912329e869
|
[] |
no_license
|
PFZ86/LeetcodePractice
|
bb0012d8b3120451dda1745875836278d3362e45
|
6db9db1934bc0a8142124d8b56bf6c07bdf43d79
|
refs/heads/master
| 2021-08-28T08:43:27.343395
| 2021-08-17T20:38:32
| 2021-08-17T20:38:32
| 230,925,656
| 1
| 1
| null | 2021-08-17T20:38:32
| 2019-12-30T14:01:27
|
Python
|
UTF-8
|
Python
| false
| false
| 315
|
py
|
# https://leetcode.com/problems/factorial-trailing-zeroes/
# Solution 1:
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
while n:
n /= 5
result += n
return result
|
[
"pengfeizang@pengfeis-iMac.fios-router.home"
] |
pengfeizang@pengfeis-iMac.fios-router.home
|
e4a95cd7e79a23db19fd4e93a195f3590509cd82
|
7f8f49e77c3d71531688c41e5147c75257a661f6
|
/scripts/mpl_stylesheet.py
|
83c9301c5800b94fbc18c7a6f8a4e2ab988f1ced
|
[] |
no_license
|
francosimonetti/trans-eqtl-pipeline
|
4ef83e007ee9d4e5aeb8650b30e4684b4150d6e8
|
877b835d832649056cd80cbb7feeedcebebcfb69
|
refs/heads/master
| 2023-06-19T18:40:55.354166
| 2021-03-02T20:34:29
| 2021-03-02T20:34:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,144
|
py
|
import matplotlib
from matplotlib import cycler
## Resources from:
## https://matplotlib.org/users/customizing.html
## How to change color and plot styles?
## https://matplotlib.org/users/dflt_style_changes.html
## matplotlib.rcParams[] =
def banskt_presentation(black = '#333333', linewidth = 2, ticksize = 8, fontsize = 28, padding = 10, fontfamily = 'latex', colors = 'banskt'):
if colors == 'banskt':
mcolors = banskt_colors()
elif colors == 'kelly':
mcolors = kelly_colors()
if fontfamily == 'latex':
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage[sfdefault,scaled=.85, lining]{FiraSans}',
r'\usepackage[cmintegrals]{newtxsf}',
r'\usepackage{microtype}',
]
matplotlib.rcParams['text.usetex'] = True
elif fontfamily == 'latex-clearsans':
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage[scaled=.86]{ClearSans}',
r'\usepackage[libertine]{newtxmath}',
r'\usepackage{microtype}',
]
elif fontfamily == 'system':
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'DejaVu Sans'
matplotlib.rcParams['mathtext.fontset'] = 'stixsans'
# Size
matplotlib.rcParams['figure.figsize'] = 8, 8
# Fonts
matplotlib.rcParams['font.size'] = fontsize
matplotlib.rcParams['text.color'] = black
matplotlib.rcParams['axes.titlesize'] = fontsize * 1.2
matplotlib.rcParams['axes.labelsize'] = fontsize
matplotlib.rcParams['axes.labelweight'] = 'normal'
matplotlib.rcParams['axes.labelcolor'] = black
matplotlib.rcParams['xtick.labelsize'] = fontsize
matplotlib.rcParams['ytick.labelsize'] = fontsize
matplotlib.rcParams['legend.fontsize'] = fontsize
# Axes
#matplotlib.rcParams['axes.titlepad'] = 50
matplotlib.rcParams['axes.edgecolor'] = black
matplotlib.rcParams['axes.facecolor'] = 'white'
matplotlib.rcParams['axes.labelpad'] = 20
matplotlib.rcParams['axes.linewidth'] = linewidth
# Legend
matplotlib.rcParams['legend.facecolor'] = 'inherit'
matplotlib.rcParams['legend.edgecolor'] = black
matplotlib.rcParams['legend.frameon'] = False
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.scatterpoints'] = 1
matplotlib.rcParams['legend.markerscale'] = 1.0
# Dimensions as fraction of fontsize
matplotlib.rcParams['legend.borderpad'] = 0
matplotlib.rcParams['legend.labelspacing'] = 0.3
matplotlib.rcParams['legend.handlelength'] = 0.5
matplotlib.rcParams['legend.handleheight'] = 0.9
matplotlib.rcParams['legend.handletextpad'] = 0.5
# Ticks
matplotlib.rcParams['xtick.major.top'] = False
matplotlib.rcParams['xtick.major.bottom'] = True
matplotlib.rcParams['xtick.minor.top'] = False
matplotlib.rcParams['xtick.minor.bottom'] = False
matplotlib.rcParams['ytick.major.left'] = True
matplotlib.rcParams['ytick.major.right'] = False
matplotlib.rcParams['ytick.minor.left'] = False
matplotlib.rcParams['ytick.minor.right'] = False
matplotlib.rcParams['xtick.major.size'] = ticksize
matplotlib.rcParams['xtick.minor.size'] = 2 * ticksize / 3.0
matplotlib.rcParams['ytick.major.size'] = ticksize
matplotlib.rcParams['ytick.minor.size'] = 2 * ticksize / 3.0
matplotlib.rcParams['xtick.major.pad'] = padding
matplotlib.rcParams['xtick.minor.pad'] = padding
matplotlib.rcParams['ytick.major.pad'] = padding
matplotlib.rcParams['ytick.minor.pad'] = padding
matplotlib.rcParams['xtick.major.width'] = linewidth
matplotlib.rcParams['xtick.minor.width'] = linewidth
matplotlib.rcParams['ytick.major.width'] = linewidth
matplotlib.rcParams['ytick.minor.width'] = linewidth
matplotlib.rcParams['xtick.color'] = black
matplotlib.rcParams['ytick.color'] = black
# Color cycle
matplotlib.rcParams['axes.prop_cycle'] = cycler('color', mcolors)
# Histogram
matplotlib.rcParams['hist.bins'] = 20
# Patches
# matplotlib.rcParams['patch.facecolor'] = mcolors[0] # doesn't have any effect, comes from prop_cycle
matplotlib.rcParams['patch.edgecolor'] = black
matplotlib.rcParams['patch.linewidth'] = linewidth / 2
matplotlib.rcParams['patch.force_edgecolor'] = True
# For scatter plot, show only left and bottom axes
matplotlib.rcParams['axes.spines.left'] = True
matplotlib.rcParams['axes.spines.bottom'] = True
matplotlib.rcParams['axes.spines.top'] = True
matplotlib.rcParams['axes.spines.right'] = True
return
def banskt_colors():
banskt_colors_hex = [
'#2D69C4', # blue
'#CC2529', # red
'#93AA00', # Vivid Yellowish Green
'#535154', # gray
'#6B4C9A', # purple
'#FFB300', # Vivid Yellow
'#922428', # dark brown
'#948B3D', # olive
]
return banskt_colors_hex
def kelly_colors():
kelly_colors_hex = [
'#FFB300', # Vivid Yellow
'#803E75', # Strong Purple
'#FF6800', # Vivid Orange
'#A6BDD7', # Very Light Blue
'#C10020', # Vivid Red
'#CEA262', # Grayish Yellow
'#817066', # Medium Gray
# The following don't work well for people with defective color vision
'#007D34', # Vivid Green
'#F6768E', # Strong Purplish Pink
'#00538A', # Strong Blue
'#FF7A5C', # Strong Yellowish Pink
'#53377A', # Strong Violet
'#FF8E00', # Vivid Orange Yellow
'#B32851', # Strong Purplish Red
'#F4C800', # Vivid Greenish Yellow
'#7F180D', # Strong Reddish Brown
'#93AA00', # Vivid Yellowish Green
'#593315', # Deep Yellowish Brown
'#F13A13', # Vivid Reddish Orange
'#232C16', # Dark Olive Green
]
return kelly_colors_hex
|
[
"bnrj.saikat@gmail.com"
] |
bnrj.saikat@gmail.com
|
beba7761e3c9f91219f06b74b9cbab4211c96b1e
|
4148260054c2cf4605dacb8bdef3605c82eca470
|
/temboo/Library/YouTube/GetVideoData.py
|
251023437876e0043e127d825dfedaf39bd70dca
|
[] |
no_license
|
wimsy/actuarize-web
|
0f23d5f00afe3d36d430621cdb497d2e64998416
|
5f43af3019da6fb08cafeec9ff0a89df5196b864
|
refs/heads/master
| 2021-03-12T19:38:21.887681
| 2012-12-19T01:13:50
| 2012-12-19T01:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,892
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetVideoData
# Retrieve information about a single video using its ID.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class GetVideoData(Choreography):
"""
Create a new instance of the GetVideoData Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/YouTube/GetVideoData')
def new_input_set(self):
return GetVideoDataInputSet()
def _make_result_set(self, result, path):
return GetVideoDataResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetVideoDataChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the GetVideoData
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class GetVideoDataInputSet(InputSet):
"""
Set the value of the Callback input for this choreography. ((optional, string) Value to identify the callback function to which the API response will be sent. Only necessary when ResponseFormat is jason-in-script.)
"""
def set_Callback(self, value):
InputSet._set_input(self, 'Callback', value)
"""
Set the value of the ResponseFormat input for this choreography. ((optional, string) The format of the response from YouTube. Accepts atom, rss, json, json-in-script, and jsonc. Defaults to atom.)
"""
def set_ResponseFormat(self, value):
InputSet._set_input(self, 'ResponseFormat', value)
"""
Set the value of the VideoID input for this choreography. ((required, string) The unique ID given to a video by YouTube.)
"""
def set_VideoID(self, value):
InputSet._set_input(self, 'VideoID', value)
"""
A ResultSet with methods tailored to the values returned by the GetVideoData choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class GetVideoDataResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from YouTube.)
"""
def get_Response(self):
return self._output.get('Response', None)
class GetVideoDataChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetVideoDataResultSet(response, path)
|
[
"mike.wimsatt@gmail.com"
] |
mike.wimsatt@gmail.com
|
87b1dfad526d13c87d36c656f69dc17a42896b7a
|
966280ab617298a3ced79bc60189b301c795067a
|
/Arrays/849_Maximize_Distance_to_Closest_Person.py
|
a0995f69ead8954fb648b5097c491a25f7386fab
|
[] |
no_license
|
Rishabhh/LeetCode-Solutions
|
c0382e5ba5b77832322c992418f697f42213620f
|
2536744423ee9dc7da30e739eb0bca521c216f00
|
refs/heads/master
| 2020-06-10T02:37:42.103289
| 2019-05-29T06:38:02
| 2019-05-29T06:38:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
temp = []
MIN_ = -sys.maxsize
MAX_ = sys.maxsize
left_closest, right_closest = -1, -1
for ii, seat in enumerate(seats):
if seat == 1:
left_closest = ii
temp.append(-1)
else:
if left_closest >= 0:
temp.append(ii - left_closest)
else:
temp.append(MAX_)
res = MIN_
temp = temp[::-1]
for ii, seat in enumerate(seats[::-1], 0):
if seat == 1:
right_closest = ii
else:
if right_closest >= 0:
res = max(res, min(temp[ii], ii - right_closest))
else:
res = max(res, temp[ii])
return res
|
[
"weihewang2012@gmail.com"
] |
weihewang2012@gmail.com
|
c945d1ab3b59d516b883b0730daa41751640c90e
|
95fcf7ebe0fa31d93a5792ec2970096840be2fa0
|
/synthesize_forcing.py
|
52c7e92d9cc3cc5c9b704f2dfc79ab793e35eef9
|
[] |
no_license
|
alphaparrot/rossby-blocking
|
d40036ff46a590cff115bca5cd65dae20b3872ca
|
a04beb0ff406f92002cd572387bc1ab23ccf5c48
|
refs/heads/master
| 2020-03-20T02:28:15.805933
| 2018-07-10T21:29:32
| 2018-07-10T21:29:32
| 137,114,260
| 0
| 0
| null | 2018-06-15T20:32:15
| 2018-06-12T18:48:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
import numpy as np
import matplotlib.pyplot as plt
# Block Shape: (time, xwidth, twidth, phase, (xcoord,tcoord)) (the last only for onsets)
if __name__=="__main__":
nblocks = np.zeros((5,10,5,20))
starts = np.zeros((5,10,5,20,2),dtype=object)
avgdelays = np.zeros((5,10,5,20))
stddelays = np.zeros((5,10,5,20))
phase = np.zeros(20)
foundphase = False
t1 = 0.5
t2 = 10.0
x1 = 50.0
x2 = 5000.0
p1 = 0.5
p2 = 10.0
peaks = np.logspace(np.log10(p1),np.log10(p2),num=5)
xwidth = np.logspace(np.log10(x1),np.log10(x2),num=10)
twidth = np.logspace(np.log10(t1),np.log10(t2),num=5)
nt=0
for t in twidth:
nx=0
for x in xwidth:
ip=0
for p in peaks:
name = "block%02.1f_%04.1f_%02.1f.npy"%(t,x,p)
try:
output = np.load(name)
except:
print(name)
raise
for nphase in range(0,20):
nblocks[nt,nx,ip,nphase] = output[nphase]["nblocks"]
starts[nt,nx,ip,nphase,0] = output[nphase]["onset"][0]
starts[nt,nx,ip,nphase,1] = output[nphase]["onset"][1]
dels = np.array(output[nphase]["delay"][1])
if len(dels)>0:
avgdelays[nt,nx,ip,nphase] = np.mean(dels)
stddelays[nt,nx,ip,nphase] = np.std(dels)
else:
avgdelays[nt,nx,ip,nphase] = np.nan
stddelays[nt,nx,ip,nphase] = np.nan
if not foundphase:
phase[nphase] = output[nphase]["forcing phase"]
foundphase=True
ip+=1
nx+=1
print("Finished Time %d of %d"%(nt+1,len(twidth)))
nt+=1
nblkstats = np.zeros((5,10,5,2))
ndelstats = np.zeros((5,10,5,2))
nblkstats[:,:,:,0] = np.mean(nblocks,axis=3)
nblkstats[:,:,:,1] = np.std(nblocks,axis=3)
ndelstats[:,:,:,0] = np.nanmean(avgdelays,axis=3)
ndelstats[:,:,:,1] = np.sqrt(np.nansum((stddelays*avgdelays)**2,axis=3))
output = {"raw blocks":nblocks,
"onset coords":starts,
"onset delays":(avgdelays,stddelays),
"block stats":nblkstats,
"delay stats":ndelstats,
"forcing peak":peaks,
"forcing xwidth":xwidth,
"forcing twidth":twidth,
"phase":phase,
"shape":"(peak,xwidth,twidth,phase or (mean,std))"}
np.save("forcingsweep.npy",output)
|
[
"paradise@astro.utoronto.ca"
] |
paradise@astro.utoronto.ca
|
52261d342e614435f55e2603a9afba68ca74ec2a
|
9f760efe9f29e1a6275897491a26717066009366
|
/raet/test/test_resend.py
|
fff4d770daf03816762c6fd548e94c28b5f7eca8
|
[
"Apache-2.0"
] |
permissive
|
KennethWilke/raet
|
ba2e79dfcbd831bff8e34d9d69f9c4490c233d03
|
9e0aa33e1c75c9ff4c3843094c62806a22ea98bc
|
refs/heads/master
| 2020-12-26T03:33:14.493683
| 2014-04-25T00:56:41
| 2014-04-25T00:56:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,454
|
py
|
# -*- coding: utf-8 -*-
'''
Tests to try out stacking. Potentially ephemeral
'''
# pylint: skip-file
import os
import time
from ioflo.base.odicting import odict
from ioflo.base.aiding import Timer, StoreTimer
from ioflo.base import storing
from ioflo.base.consoling import getConsole
console = getConsole()
from salt.transport.road.raet import (raeting, nacling, packeting, keeping,
estating, yarding, transacting, stacking)
def test():
'''
initially
master on port 7530 with eid of 1
minion on port 7531 with eid of 0
eventually
master eid of 1
minion eid of 2
'''
console.reinit(verbosity=console.Wordage.concise)
store = storing.Store(stamp=0.0)
#master stack
masterName = "master"
signer = nacling.Signer()
masterSignKeyHex = signer.keyhex
privateer = nacling.Privateer()
masterPriKeyHex = privateer.keyhex
masterDirpath = os.path.join(os.getcwd(), 'keep', masterName)
#minion0 stack
minionName0 = "minion0"
signer = nacling.Signer()
minionSignKeyHex = signer.keyhex
privateer = nacling.Privateer()
minionPriKeyHex = privateer.keyhex
m0Dirpath = os.path.join(os.getcwd(), 'keep', minionName0)
keeping.clearAllKeepSafe(masterDirpath)
keeping.clearAllKeepSafe(m0Dirpath)
estate = estating.LocalEstate( eid=1,
name=masterName,
sigkey=masterSignKeyHex,
prikey=masterPriKeyHex,)
stack0 = stacking.RoadStack(name=masterName,
estate=estate,
store=store,
main=True,
dirpath=masterDirpath)
estate = estating.LocalEstate( eid=0,
name=minionName0,
ha=("", raeting.RAET_TEST_PORT),
sigkey=minionSignKeyHex,
prikey=minionPriKeyHex,)
stack1 = stacking.RoadStack(name=minionName0,
estate=estate,
store=store,
dirpath=m0Dirpath)
print "\n********* Join Transaction **********"
stack1.join()
#timer = StoreTimer(store=store, duration=3.0)
while store.stamp < 2.0:
stack1.serviceAll()
stack0.serviceAll()
if store.stamp >= 0.3:
for estate in stack0.estates.values():
if estate.acceptance == raeting.acceptances.pending:
stack0.safe.acceptRemote(estate)
store.advanceStamp(0.1)
time.sleep(0.1)
for estate in stack0.estates.values():
print "Remote Estate {0} joined= {1}".format(estate.eid, estate.joined)
for estate in stack1.estates.values():
print "Remote Estate {0} joined= {1}".format(estate.eid, estate.joined)
print "{0} eid={1}".format(stack0.name, stack0.estate.uid)
print "{0} estates=\n{1}".format(stack0.name, stack0.estates)
print "{0} transactions=\n{1}".format(stack0.name, stack0.transactions)
print "{0} eid={1}".format(stack1.name, stack1.estate.uid)
print "{0} estates=\n{1}".format(stack1.name, stack1.estates)
print "{0} transactions=\n{1}".format(stack1.name, stack1.transactions)
print "Road {0}".format(stack0.name)
print stack0.road.loadLocalData()
print stack0.road.loadAllRemoteData()
print "Safe {0}".format(stack0.name)
print stack0.safe.loadLocalData()
print stack0.safe.loadAllRemoteData()
print
print "Road {0}".format(stack1.name)
print stack1.road.loadLocalData()
print stack1.road.loadAllRemoteData()
print "Safe {0}".format(stack1.name)
print stack1.safe.loadLocalData()
print stack1.safe.loadAllRemoteData()
print
print "\n********* Allow Transaction **********"
if not stack1.estates.values()[0].joined:
return
stack1.allow()
#timer = StoreTimer(store=store, duration=3.0)
while store.stamp < 4.0:
stack1.serviceAll()
stack0.serviceAll()
store.advanceStamp(0.1)
time.sleep(0.1)
for estate in stack0.estates.values():
print "Remote Estate {0} allowed= {1}".format(estate.eid, estate.allowed)
for estate in stack1.estates.values():
print "Remote Estate {0} allowed= {1}".format(estate.eid, estate.allowed)
print "{0} eid={1}".format(stack0.name, stack0.estate.uid)
print "{0} estates=\n{1}".format(stack0.name, stack0.estates)
print "{0} transactions=\n{1}".format(stack0.name, stack0.transactions)
print "{0} eid={1}".format(stack1.name, stack1.estate.uid)
print "{0} estates=\n{1}".format(stack1.name, stack1.estates)
print "{0} transactions=\n{1}".format(stack1.name, stack1.transactions)
#while stack1.transactions or stack0.transactions:
#stack1.serviceAll()
#stack0.serviceAll()
#store.advanceStamp(0.1)
print "{0} Stats".format(stack0.name)
for key, val in stack0.stats.items():
print " {0}={1}".format(key, val)
print
print "{0} Stats".format(stack1.name)
for key, val in stack1.stats.items():
print " {0}={1}".format(key, val)
print
print "\n********* Message Transactions Both Ways Again **********"
#stack1.transmit(odict(house="Oh Boy1", queue="Nice"))
#stack1.transmit(odict(house="Oh Boy2", queue="Mean"))
#stack1.transmit(odict(house="Oh Boy3", queue="Ugly"))
#stack1.transmit(odict(house="Oh Boy4", queue="Pretty"))
#stack0.transmit(odict(house="Yeah Baby1", queue="Good"))
#stack0.transmit(odict(house="Yeah Baby2", queue="Bad"))
#stack0.transmit(odict(house="Yeah Baby3", queue="Fast"))
#stack0.transmit(odict(house="Yeah Baby4", queue="Slow"))
#segmented packets
stuff = []
for i in range(300):
stuff.append(str(i).rjust(10, " "))
stuff = "".join(stuff)
stack1.transmit(odict(house="Snake eyes", queue="near stuff", stuff=stuff))
stack0.transmit(odict(house="Craps", queue="far stuff", stuff=stuff))
#timer.restart(duration=3)
while store.stamp < 8.0: #not timer.expired
stack1.serviceAll()
stack0.serviceAll()
store.advanceStamp(0.1)
time.sleep(0.1)
print "{0} eid={1}".format(stack0.name, stack0.estate.uid)
print "{0} estates=\n{1}".format(stack0.name, stack0.estates)
print "{0} transactions=\n{1}".format(stack0.name, stack0.transactions)
print "{0} Received Messages".format(stack0.name)
for msg in stack0.rxMsgs:
print msg
print "{0} Stats".format(stack0.name)
for key, val in stack0.stats.items():
print " {0}={1}".format(key, val)
print
print "{0} eid={1}".format(stack1.name, stack1.estate.uid)
print "{0} estates=\n{1}".format(stack1.name, stack1.estates)
print "{0} transactions=\n{1}".format(stack1.name, stack1.transactions)
print "{0} Received Messages".format(stack1.name)
for msg in stack1.rxMsgs:
print msg
print "{0} Stats".format(stack1.name)
for key, val in stack1.stats.items():
print " {0}={1}".format(key, val)
print
stack0.server.close()
stack1.server.close()
stack0.clearLocal()
stack0.clearRemoteKeeps()
stack1.clearLocal()
stack1.clearRemoteKeeps()
if __name__ == "__main__":
test()
|
[
"smith.samuel.m@gmail.com"
] |
smith.samuel.m@gmail.com
|
8bc4198785572801be0e33bd3d272e583af48ee1
|
1f5dc6ce24fdf4557456e3cd6775142338043d9c
|
/active_reward_learning/envs/reward_model_mean_wrapper.py
|
35a2121067a6bf1dea06a214f4b3b56acc0f21ba
|
[
"MIT"
] |
permissive
|
david-lindner/idrl
|
7c747211fc38e0478b26d254206ddef70d3dac64
|
54cfad330b0598ad4f6621796f2411644e50a6ba
|
refs/heads/main
| 2023-09-02T11:20:23.120737
| 2021-11-16T17:31:33
| 2021-11-16T17:31:33
| 421,450,567
| 13
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
from types import SimpleNamespace
from typing import Dict, Tuple, Union
import gym
import numpy as np
class RewardModelMeanWrapper(gym.RewardWrapper):
def __init__(self, env: gym.Env, reward_model, debug=False, normalize=False):
self.reward_model = reward_model
self.debug = debug
self.normalize = normalize
# import outside leads to circular import
from active_reward_learning.reward_models.kernels.linear import LinearKernel
self.is_linear = isinstance(self.reward_model.gp_model.kernel, LinearKernel)
super().__init__(env)
def step(self, action: int) -> Tuple[Union[int, np.ndarray], float, bool, Dict]:
obs, reward, done, info = self.env.step(action)
orig_reward = reward
info["true_reward"] = reward
if self.debug:
print()
print("gp_repr", info["gp_repr"])
print("reward true", reward)
if self.is_linear:
weight = self.reward_model.gp_model.linear_predictive_mean
if self.normalize:
weight /= np.linalg.norm(weight) + 1e-3
# DL: This is necessary for performance reasons in the Mujoco environments
reward = np.dot(info["gp_repr"], weight)
else:
if self.normalize:
raise NotImplementedError()
reward, _ = self.reward_model.gp_model.predict([info["gp_repr"]])
if isinstance(reward, np.ndarray):
assert reward.shape == (1,)
reward = reward[0]
info["inferred_reward"] = reward
if self.debug:
print("reward new", reward)
print()
return obs, reward, done, info
@classmethod
def load_from_model(cls, env, filename, debug=False):
# importing here prevents some circular dependencies
from active_reward_learning.reward_models.gaussian_process_linear import (
LinearObservationGP,
)
gp_model = LinearObservationGP.load(filename)
print(f"Loaded mode from {filename}")
reward_model = SimpleNamespace()
setattr(reward_model, "gp_model", gp_model)
return cls(env, reward_model, debug=debug)
|
[
"dev@davidlindner.me"
] |
dev@davidlindner.me
|
98635d4fc5f53515fc9a84f85e75df945ff982b5
|
9d0d3b7de45213b285856b85c0045166524826aa
|
/pymorphy2/tokenizers.py
|
57806fb35350349c44c49a7111887a34b1e6c623
|
[] |
no_license
|
albedium/pymorphy2
|
b9b41eacef6c3d12464a513f76e71e64ece125ee
|
174662e229535cba7ae06360d14d41c943175b3a
|
refs/heads/master
| 2021-01-14T11:49:19.806721
| 2015-05-02T21:06:20
| 2015-05-02T21:06:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# -*- coding: utf-8 -*-
import re
GROUPING_SPACE_REGEX = re.compile('([^\w_-]|[+])', re.UNICODE)
def simple_word_tokenize(text, _split=GROUPING_SPACE_REGEX.split):
""" Split text into tokens. Don't split by a hyphen. """
return [t for t in _split(text) if t and not t.isspace()]
|
[
"kmike84@gmail.com"
] |
kmike84@gmail.com
|
9970bdc6d139a4de2ffe6edfe933ebe791804b02
|
08024f21c1319d54ea3e524edf08e614eb0960bc
|
/old code/DoubanClient/douban_top250_demoV2.py
|
f8981b23e5671add044a5d515fc708ec6da8dc8e
|
[] |
no_license
|
beforeuwait/webCrawl
|
08dd5375da93f511672d746c64ef42cf5b25449c
|
dc30ed7ef00f077894cfc0c2555f9dddb22c3537
|
refs/heads/master
| 2020-06-23T02:16:40.828657
| 2017-06-15T02:59:05
| 2017-06-15T02:59:05
| 74,671,539
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
# -*- coding:utf8 -*-
import requests
from lxml import etree
import pymongo
class DoubanClient():
def __init__(self):
object.__init__(self)
self.url = 'http://movie.douban.com/top250'
self.headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36',
'Host': 'movie.douban.com'
}
self.session = requests.session()
self.session.headers.update(self.headers)
self.connection = pymongo.MongoClient()
def setupsession(self):
r = self.session.get(self.url)
#获取response的cookies作为后续请求的cookies
self.cookies = r.cookies
self.session.cookies.update(self.cookies)
dbName =self.connection.Douban
self.post_info = dbName.DoubanMovieTop250
#创建链接时即创建数据库
return self.get_data(r.content)
def get_data(self, content):
selector = etree.HTML(content)
input_data = {}
Movies = selector.xpath('//div[@class="info"]')
for eachMovie in Movies:
title = eachMovie.xpath('div[@class="hd"]/a/span/text()')
full_title = ''
for each in title:
full_title += each
input_data['title'] = full_title
input_data['movieInfo'] = eachMovie.xpath('div[@class="bd"]/p/text()')[0].replace(' ','')
input_data['star'] = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span[@class="rating_num"]/text()')[0]
#测试过程中发现有的电影没有quote,这里需要对他做一个判断,没有quote的则赋空值
quote = eachMovie.xpath('div[@class="bd"]/p[@class="quote"]/span/text()')
if quote:
input_data['quote'] = quote[0]
else:
input_data['quote'] = ''
# 因为数据插入是一条一条以字典的格式,并不是插入一个字典,因此每次插入后,应该重新定义字典
self.post_info.insert(input_data)
input_data = {}
Paginator = selector.xpath('//span[@class="next"]/a/@href')
#到最后一页没有数据,则对列表做一个判断
if Paginator:
paginator_url = 'http://movie.douban.com/top250'+Paginator[0]
n = self.session.get(paginator_url)
return self.nextPage(n.content)
print 'it\'done'
#接收数据翻页数据,返回给get_data
def nextPage(self, content):
return self.get_data(content)
if __name__ == '__main__':
c = DoubanClient()
c.setupsession()
|
[
"forme.wjw@aliyun.com"
] |
forme.wjw@aliyun.com
|
57c88811564948b7722125c6c0d4fb97522586aa
|
7c964cd93343ac704ac3d9c82c977a0cd0a672e7
|
/listing/migrations/0001_initial.py
|
de9a37a83efcd5555bca0d1c47a762b2a37abb1c
|
[] |
no_license
|
praekelt/jmbo-listing
|
be32bd43225ccf6de2c530e46c54dcac6b3c6a46
|
91a9e369a67cccef38d125e16272e01187c0ef1c
|
refs/heads/master
| 2020-04-19T10:31:51.345676
| 2017-08-30T12:20:30
| 2017-08-30T12:20:30
| 66,532,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,099
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-25 07:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('jmbo', '0003_auto_20160530_1247'),
('contenttypes', '0002_remove_content_type_name'),
('sites', '0002_alter_domain_unique'),
('category', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text=b'A short descriptive title.', max_length=256)),
('subtitle', models.CharField(blank=True, help_text=b'Some titles may be the same. A subtitle makes a distinction. It is not displayed on the site.', max_length=256, null=True)),
('slug', models.SlugField(max_length=32)),
('count', models.IntegerField(default=0, help_text=b'Number of items to display (excludes any pinned items).\nSet to zero to display all items.')),
('style', models.CharField(choices=[(b'Horizontal', b'Horizontal'), (b'Vertical', b'Vertical'), (b'Promo', b'Promo'), (b'VerticalThumbnail', b'VerticalThumbnail'), (b'Widget', b'Widget'), (b'CustomFive', b'CustomFive'), (b'CustomFour', b'CustomFour'), (b'CustomOne', b'CustomOne'), (b'CustomThree', b'CustomThree'), (b'CustomTwo', b'CustomTwo'), (b'Horizontal', b'Horizontal'), (b'Promo', b'Promo'), (b'Vertical', b'Vertical'), (b'VerticalThumbnail', b'VerticalThumbnail'), (b'Widget', b'Widget'), (b'Widget', b'Widget'), (b'CustomOne', b'CustomOne'), (b'CustomTwo', b'CustomTwo'), (b'CustomThree', b'CustomThree'), (b'CustomFour', b'CustomFour'), (b'CustomFive', b'CustomFive')], max_length=64)),
('items_per_page', models.PositiveIntegerField(default=0, help_text=b'Number of items displayed on a page (excludes any pinned items). Set to zero to disable paging.')),
('categories', models.ManyToManyField(blank=True, help_text=b'Categories for which to collect items.', null=True, related_name='listing_categories', to='category.Category')),
],
options={
'ordering': ('title', 'subtitle'),
},
),
migrations.CreateModel(
name='ListingContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(default=0)),
('listing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='content_link_to_listing', to='listing.Listing')),
('modelbase_obj', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jmbo.ModelBase')),
],
),
migrations.CreateModel(
name='ListingPinned',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(default=0)),
('listing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pinned_link_to_listing', to='listing.Listing')),
('modelbase_obj', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jmbo.ModelBase')),
],
),
migrations.AddField(
model_name='listing',
name='content',
field=models.ManyToManyField(blank=True, help_text=b'Individual items to display. Setting this will ignore any setting for <i>Content Type</i>, <i>Categories</i> and <i>Tags</i>.', null=True, related_name='listing_content', through='listing.ListingContent', to='jmbo.ModelBase'),
),
migrations.AddField(
model_name='listing',
name='content_types',
field=models.ManyToManyField(blank=True, help_text=b'Content types to display, eg. post or gallery.', null=True, to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='listing',
name='pinned',
field=models.ManyToManyField(blank=True, help_text=b'Individual items to pin to the top of the listing. These\nitems are visible across all pages when navigating the listing.', null=True, related_name='listing_pinned', through='listing.ListingPinned', to='jmbo.ModelBase'),
),
migrations.AddField(
model_name='listing',
name='sites',
field=models.ManyToManyField(blank=True, help_text=b'Sites that this listing will appear on.', null=True, to='sites.Site'),
),
migrations.AddField(
model_name='listing',
name='tags',
field=models.ManyToManyField(blank=True, help_text=b'Tags for which to collect items.', null=True, related_name='listing_tags', to='category.Tag'),
),
]
|
[
"hedleyroos@gmail.com"
] |
hedleyroos@gmail.com
|
339abc4f028a0a65d15b39f546ab2a0fa4beafd3
|
58e5e46b7e213e55f0f6e2a003b0a4ecfb261673
|
/filtered_contenttypes/fields.py
|
9bf1d2a7db8be61de7e3fa94d65a104ae86a4352
|
[
"MIT"
] |
permissive
|
pombredanne/djorm-ext-filtered-contenttypes
|
3b334d89c08c74b898b8e27ae82581cbbd59013d
|
ad15e35d0242259182d72db8ae61455b2bc833fa
|
refs/heads/master
| 2021-01-17T22:59:42.608934
| 2014-10-26T09:02:37
| 2014-10-26T09:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,677
|
py
|
# -*- encoding: utf-8 -*-
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db.models import Lookup
from django.contrib.contenttypes.models import ContentType
from django.db.models.lookups import RegisterLookupMixin
from django.db.models.query import QuerySet
from django.utils.itercompat import is_iterable
from django.db import models
class FilteredGenericForeignKeyFilteringException(Exception):
pass
class FilteredGenericForeignKey(RegisterLookupMixin, GenericForeignKey):
"""This is a GenericForeignKeyField, that can be used to perform
filtering in Django ORM.
"""
def __init__(self, *args, **kw):
# The line below is needed to bypass this
# https://github.com/django/django/commit/572885729e028eae2f2b823ef87543b7c66bdb10
# thanks to MarkusH @ freenode for help
self.attname = self.related = '(this is a hack)'
# this is needed when filtering(this__contains=x, this__not_contains=y)
self.null = False
GenericForeignKey.__init__(self, *args, **kw)
def get_prep_lookup(self, lookup_name, rhs):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if lookup_name == 'exact':
if not isinstance(rhs, models.Model):
raise FilteredGenericForeignKeyFilteringException(
"For exact lookup, please pass a single Model instance.")
elif lookup_name in ['in', 'in_raw']:
if type(rhs) == QuerySet:
return rhs, None
if not is_iterable(rhs):
raise FilteredGenericForeignKeyFilteringException(
"For 'in' lookup, please pass an iterable or a QuerySet.")
else:
raise FilteredGenericForeignKeyFilteringException(
"Lookup %s not supported." % lookup_name)
return rhs, None
def get_db_prep_lookup(self, lookup_name, param, db, prepared, **kw):
rhs, _ignore = param
if lookup_name == 'exact':
ct_id = ContentType.objects.get_for_model(rhs).pk
return "(%s, %s)", (ct_id, rhs.pk)
elif lookup_name == 'in':
if isinstance(rhs, QuerySet):
# QuerSet was passed. Don't fetch its items. Use server-side
# subselect, which will be way faster. Get the content_type_id
# from django_content_type table.
compiler = rhs.query.get_compiler(connection=db)
compiled_query, compiled_args = compiler.as_sql()
query = """
SELECT
%(django_content_type_db_table)s.id AS content_type_id,
U0.id AS object_id
FROM
%(django_content_type_db_table)s,
(%(compiled_query)s) U0
WHERE
%(django_content_type_db_table)s.model = '%(model)s' AND
%(django_content_type_db_table)s.app_label = '%(app_label)s'
""" % dict(
django_content_type_db_table=ContentType._meta.db_table,
compiled_query=compiled_query,
model=rhs.model._meta.model_name,
app_label=rhs.model._meta.app_label)
return query, compiled_args
if is_iterable(rhs):
buf = []
for elem in rhs:
if isinstance(elem, models.Model):
buf.append((ContentType.objects.get_for_model(elem).pk, elem.pk))
else:
raise FilteredGenericForeignKeyFilteringException(
"Unknown type: %r" % type(elem))
query = ",".join(["%s"] * len(buf))
return query, buf
raise NotImplementedError("You passed %r and I don't know what to do with it" % rhs)
elif lookup_name == 'in_raw':
if isinstance(rhs, QuerySet):
# Use the passed QuerSet as a 'raw' one - it selects 2 fields
# first is content_type_id, second is object_id
compiler = rhs.query.get_compiler(connection=db)
compiled_query, compiled_args = compiler.as_sql()
# XXX: HACK AHEAD. Perhaps there is a better way to change
# select, preferably by using extra. I need to have the proper
# order of columns AND the proper count of columns, which
# is no more, than two.
#
# Currently, even if I use "only", I have no control over
# the order of columns. And, if I use
# .extra(select=SortedDict([...]), I get the proper order
# of columns and the primary key and other two columns even
# if I did not specify them in the query.
#
# So, for now, let's split the query on first "FROM" and change
# the beginning part with my own SELECT:
compiled_query = "SELECT content_type_id, object_id FROM " + \
compiled_query.split("FROM", 1)[1]
return compiled_query, compiled_args
if is_iterable(rhs):
buf = []
for elem in rhs:
if isinstance(elem, tuple) and type(elem[0]) == int and type(elem[1]) == int and len(elem)==2:
buf.append(elem)
else:
raise FilteredGenericForeignKeyFilteringException(
"If you pass a list of tuples as an argument, every tuple "
"must have exeactly 2 elements and they must be integers")
query = ",".join(["%s"] * len(buf))
return query, buf
raise NotImplementedError("You passed %r and I don't know what to do with it" % rhs)
else:
raise FilteredGenericForeignKeyFilteringException(
"Unsupported lookup_name: %r" % lookup_name)
pass
class FilteredGenericForeignKeyLookup(Lookup):
def as_sql(self, qn, connection):
ct_attname = self.lhs.output_field.model._meta.get_field(
self.lhs.output_field.ct_field).get_attname()
lhs = '(%s."%s", %s."%s")' % (
self.lhs.alias,
self.lhs.output_field.ct_field + "_id",
self.lhs.alias,
self.lhs.output_field.fk_field)
rhs, rhs_params = self.process_rhs(qn, connection)
# in
subquery, args = rhs_params
return "%s %s (%s)" % (lhs, self.operator, subquery), args
class FilteredGenericForeignKeyLookup_Exact(FilteredGenericForeignKeyLookup):
lookup_name = 'exact'
operator = '='
class FilteredGenericForeignKeyLookup_In(FilteredGenericForeignKeyLookup):
lookup_name = 'in'
operator = 'in'
class FilteredGenericForeignKeyLookup_In_Raw(FilteredGenericForeignKeyLookup):
"""
in_raw lookup will not try to get the content_type_id of the right hand
side QuerySet of the lookup, but instead it will re-write the query, so
it selects columns named 'content_type_id' and 'object_id' from the right-
hand side QuerySet. See comments in
FilteredGenericForeignKeyLookup.get_db_prep
"""
lookup_name = 'in_raw'
operator = 'in'
FilteredGenericForeignKey.register_lookup(
FilteredGenericForeignKeyLookup_Exact)
FilteredGenericForeignKey.register_lookup(
FilteredGenericForeignKeyLookup_In)
FilteredGenericForeignKey.register_lookup(
FilteredGenericForeignKeyLookup_In_Raw)
|
[
"michal.dtz@gmail.com"
] |
michal.dtz@gmail.com
|
ff6af04706daacf32c78ad33e94466af43186202
|
61cb083d5d3c70d3ea1c66e4bae341c20c8e38b4
|
/accounts/migrations/0007_auto_20190827_0050.py
|
89703acd856462b1ddf5729e73c3da0661535cb8
|
[] |
no_license
|
Fabricourt/villacare
|
1c60aab2f76096d1ae4b773508fe6eb437763555
|
983e512eace01b4dee23c98cc54fcb7fdbd90987
|
refs/heads/master
| 2022-11-28T11:15:18.530739
| 2019-09-05T03:30:03
| 2019-09-05T03:30:03
| 205,774,903
| 0
| 0
| null | 2022-11-22T03:16:03
| 2019-09-02T04:09:48
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
# Generated by Django 2.1.5 on 2019-08-26 21:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20190827_0035'),
]
operations = [
migrations.RenameField(
model_name='account',
old_name='total_payments_made',
new_name='propertys_payments_made',
),
migrations.RenameField(
model_name='property_payment',
old_name='balance',
new_name='total_balance',
),
migrations.AlterField(
model_name='buyer',
name='property_bought',
field=models.ManyToManyField(help_text='all properties bought by buyer', to='accounts.Property_id'),
),
migrations.AlterField(
model_name='property_payment',
name='payment_expected',
field=models.IntegerField(help_text='total payment expected from all properties bought', null=True),
),
]
|
[
"mfalme2030@gmail.com"
] |
mfalme2030@gmail.com
|
3659bd20dff05e3843f0e513a207c6e3ef09f4c6
|
e26a5d55a219da7a5fb2e691a7fe2adb1683543d
|
/tensortrade/features/feature_pipeline.py
|
e158740f1584f1ea90839b7c4e01681617605b4d
|
[
"Apache-2.0"
] |
permissive
|
MildlyOffensive/tensortrade
|
e02dbcbec07597292de91eaa375b8af9ae305194
|
9fe223c1ff0d58428e9e88d4d287570e24ee1ae4
|
refs/heads/master
| 2022-04-06T21:48:35.380231
| 2019-10-24T18:17:02
| 2019-10-24T18:17:02
| 282,754,798
| 1
| 1
|
Apache-2.0
| 2020-07-27T00:05:44
| 2020-07-27T00:05:43
| null |
UTF-8
|
Python
| false
| false
| 3,842
|
py
|
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from gym import Space
from typing import List, Union, Callable
from .feature_transformer import FeatureTransformer
DTypeString = Union[type, str]
class FeaturePipeline(object):
"""An pipeline for transforming observation data frames into features for learning."""
def __init__(self, steps: List[FeatureTransformer], **kwargs):
"""
Arguments:
dtype: The `dtype` elements in the pipeline should be cast to.
"""
self._steps = steps
self._dtype: DTypeString = kwargs.get('dtype', np.float16)
@property
def steps(self) -> List[FeatureTransformer]:
"""A list of feature transformations to apply to observations."""
return self._steps
@steps.setter
def steps(self, steps: List[FeatureTransformer]):
self._steps = steps
@property
def dtype(self) -> DTypeString:
"""The `dtype` that elements in the pipeline should be input and output as."""
return self._dtype
@dtype.setter
def dtype(self, dtype: DTypeString):
self._dtype = dtype
def reset(self):
"""Reset all transformers within the feature pipeline."""
for transformer in self._steps:
transformer.reset()
def transform_space(self, input_space: Space, column_names: List[str]) -> Space:
"""Get the transformed output space for a given input space.
Args:
input_space: A `gym.Space` matching the shape of the pipeline's input.
column_names: A list of all column names in the input data frame.
Returns:
A `gym.Space` matching the shape of the pipeline's output.
"""
output_space = input_space
for transformer in self._steps:
output_space = transformer.transform_space(output_space, column_names)
return output_space
def _transform(self, observations: pd.DataFrame, input_space: Space) -> pd.DataFrame:
"""Utility method for transforming observations via a list of `FeatureTransformer` objects."""
for transformer in self._steps:
observations = transformer.transform(observations, input_space)
return observations
def transform(self, observation: pd.DataFrame, input_space: Space) -> pd.DataFrame:
"""Apply the pipeline of feature transformations to an observation frame.
Arguments:
observation: A `pandas.DataFrame` corresponding to an observation within a `TradingEnvironment`.
input_space: A `gym.Space` matching the shape of the pipeline's input.
Returns:
A `pandas.DataFrame` of features corresponding to an input oversvation.
Raises:
ValueError: In the case that an invalid observation frame has been input.
"""
features = self._transform(observation, input_space)
if not isinstance(features, pd.DataFrame):
raise ValueError("A FeaturePipeline must transform a pandas.DataFrame into another pandas.DataFrame.\n \
Expected return type: {} `\n \
Actual return type: {}.".format(type(pd.DataFrame([])), type(features)))
return features
|
[
"adamjking3@gmail.com"
] |
adamjking3@gmail.com
|
1f48ce3aa62ece3db3e57b1134cb2ab81a563424
|
1c655b13e159ec064968d74297fb3e994a1fa3f7
|
/0x00-python_variable_annotations/main/100-main.py
|
3163850ec41e7e25a085e95403fb340a6a58ad05
|
[] |
no_license
|
Faith-qa/alx-backend-python
|
886672663a8206afa47ce877331deef97802512c
|
1eb5ad2cbc2d2bfa4dc911901417797d160d3645
|
refs/heads/master
| 2023-08-16T07:21:55.127802
| 2021-09-21T09:50:49
| 2021-09-21T09:50:49
| 388,657,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
#!/usr/bin/env python3
safe_first_element = __import__('100-safe_first_element').safe_first_element
print(safe_first_element.__annotations__)
|
[
"atienofaith12@gmail.com"
] |
atienofaith12@gmail.com
|
a943d4d0152eac60671d88862ad057df96206c55
|
9c63f6d39a6085674ab42d1488476d0299f39ec9
|
/Python/LC_Minesweeper.py
|
7fce4305e14c974fcd536afc78f77ee3cbc38f62
|
[] |
no_license
|
vijayjag-repo/LeetCode
|
2237e3117e7e902f5ac5c02bfb5fbe45af7242d4
|
0a5f47e272f6ba31e3f0ff4d78bf6e3f4063c789
|
refs/heads/master
| 2022-11-14T17:46:10.847858
| 2022-11-08T10:28:30
| 2022-11-08T10:28:30
| 163,639,628
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
Approach:
You're given click, which corresponds to a particular coordinate within the board.
If this point is a mine, you change the board to 'X' and return the board.
Else,
This point could be an unrevealed square.
The first thing that you do is, check if this point has any mines nearby.
If it does not have any mines nearby, you gotta call its neighbors and set the current point to 'B'.
Else,
you set the current point to whatever the mine value is and do not call dfs.
Finally after first dfs calls ends, you return the state of the board.
"""
neighbors = [(-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)]
def dfs(x,y):
mine = 0
for (i,j) in neighbors:
if(0<=x+i<len(board) and 0<=y+j<len(board[0]) and board[x+i][y+j]=='M'):
mine+=1
if(mine>0):
board[x][y] = str(mine)
else:
board[x][y] = 'B'
for (i,j) in neighbors:
if(0<=x+i<len(board) and 0<=y+j<len(board[0]) and board[x+i][y+j]=='E'):
dfs(x+i,y+j)
x,y = click
if(board[x][y]=='M'):
board[x][y] = 'X'
else:
dfs(x,y)
return board
|
[
"noreply@github.com"
] |
vijayjag-repo.noreply@github.com
|
6c32e35bd711b0391725f3959ab4d3aa654ee111
|
7117862ecbccf811b8c9f476d328c0c91db15b48
|
/Config/local_settings.py
|
6e9e9080547256775543884280c4888d7dfb2d77
|
[] |
no_license
|
turamant/Profile
|
330e968ffe797fd7759269a3c804b82388a5838e
|
a7c32f22b11e93f1264aaa9d7b72aaf497acff65
|
refs/heads/main
| 2023-04-08T15:50:40.052101
| 2021-04-26T19:50:55
| 2021-04-26T19:50:55
| 361,712,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-gy^s)i8g=3i46ypvnrk-j_x+@s4^y&1*fd%$-2$dnc=#4(vxek'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
|
[
"tur1amant@gmail.com"
] |
tur1amant@gmail.com
|
ec22a156d3c9287499019a6a37d7ad52401e6b2f
|
cb4902406f4271394c352bfc49ac4a0e8931d5dc
|
/backend/home/migrations/0002_load_initial_data.py
|
3c22b9d08248db69bd7856d4286b1ed72cc5dd2c
|
[] |
no_license
|
crowdbotics-apps/yourtb-21307
|
492e2eec85e8a562d71f67a7c99ed64ba915ce5e
|
ce93c74f6bb21bceca6abdca6bb4c4e46bd5e7cb
|
refs/heads/master
| 2022-12-29T23:02:15.003469
| 2020-10-09T19:27:13
| 2020-10-09T19:27:13
| 302,735,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Yourtb"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Yourtb</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "yourtb-21307.botics.co"
site_params = {
"name": "Yourtb",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f4342daeb939ca4561554ef1dbb83de6b0ace017
|
f6c6085c34ac6e1b494ff039cd0173b47cc9c6c4
|
/byceps/services/user_badge/transfer/models.py
|
42e439d39bb717540a0ec59220198242e23a10c4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
FakoorCo/byceps
|
971042f598a12c00465b0c3a714f4ac7dbdc9d5b
|
11290b154b83f5ac3a9530fb6cd752b3d7a3989b
|
refs/heads/main
| 2023-01-23T18:53:59.930267
| 2020-11-30T01:09:20
| 2020-11-30T01:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
"""
byceps.services.user_badge.transfer.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from dataclasses import dataclass
from datetime import datetime
from typing import NewType
from uuid import UUID
from ....typing import BrandID, UserID
BadgeID = NewType('BadgeID', UUID)
@dataclass(frozen=True)
class Badge:
id: BadgeID
slug: str
label: str
description: str
image_filename: str
image_url_path: str
brand_id: BrandID
featured: bool
@dataclass(frozen=True)
class BadgeAwarding:
id: UUID
badge_id: BadgeID
user_id: UserID
awarded_at: datetime
@dataclass(frozen=True)
class QuantifiedBadgeAwarding:
badge_id: BadgeID
user_id: UserID
quantity: int
|
[
"homework@nwsnet.de"
] |
homework@nwsnet.de
|
348940e392a4bfa8cbd62fb6a434869f9e9f56d9
|
feba0822906bc306aacd1ba07254f9d85f7195db
|
/week 5/MoneyChange.py
|
391778b6abe4d8c365e88c4d8f9df1e2c14a983f
|
[] |
no_license
|
Pratyaksh7/Algorithmic-Toolbox
|
0d535d652a5619c8d1c1e8b602ab8b1cf4e12eb7
|
2153a37426a510e9232d3b3cdd01a2d624eee83f
|
refs/heads/master
| 2022-12-03T21:57:53.699507
| 2020-08-24T07:49:59
| 2020-08-24T07:49:59
| 281,179,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# using python3
def minCoins(n, Coins):
maxCoin = max(Coins)
dic = {1: 1, 2: 2, 3: 1, 4: 1}
if n > 4:
if n % maxCoin == 0:
print(n // 4)
elif n % maxCoin == 1 or n % maxCoin == 2 or n % maxCoin == 3:
print((n // 4) + 1)
else:
print(dic[n])
n = int(input())
Coins = [1, 3, 4]
minCoins(n, Coins)
|
[
"pratyakshgupta7@gmail.com"
] |
pratyakshgupta7@gmail.com
|
d8ac7dbd544e75aa6e93408cab924c49ff306ac1
|
d491c11dc87a955c95a4e14a2feea19fe1fa859e
|
/python/Arcade/Python/P15FeebackReview.py
|
77f7ef9557afa7896637dc067567d08a73208df9
|
[] |
no_license
|
Vagacoder/Codesignal
|
0f6ea791b25716cad7c46ab7df73679fb18a9882
|
87eaf44555603dd5b8cf221fbcbae5421ae20727
|
refs/heads/master
| 2023-07-16T04:18:44.780821
| 2021-08-15T18:41:16
| 2021-08-15T18:41:16
| 294,745,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,975
|
py
|
#
# * Python 15, Feedback Review
# * Easy
# * You've launched a revolutionary service not long ago, and were busy improving
# * it for the last couple of months. When you finally decided that the service
# * is perfect, you remembered that you created a feedbacks page long time ago,
# * which you never checked out since then. Now that you have nothing left to do,
# * you would like to have a look at what the community thinks of your service.
# * Unfortunately it looks like the feedbacks page is far from perfect: each
# * feedback is displayed as a one-line string, and if it's too long there's no
# * way to see what it is about. Naturally, this horrible bug should be fixed.
# * Implement a function that, given a feedback and the size of the screen, splits
# * the feedback into lines so that:
# each token (i.e. sequence of non-whitespace characters) belongs to one of
# the lines entirely;
# each line is at most size characters long;
# no line has trailing or leading spaces;
# each line should have the maximum possible length, assuming that all lines
# before it were also the longest possible.
# * Example
# For feedback = "This is an example feedback" and size = 8,
# the output should be
# feedbackReview(feedback, size) = ["This is",
# "an",
# "example",
# "feedback"]
# * Input/Output
# [execution time limit] 4 seconds (py3)
# [input] string feedback
# A string containing a feedback. Each feedback is guaranteed to contain only letters, punctuation marks and whitespace characters (' ').
# Guaranteed constraints:
# 0 ≤ feedback.length ≤ 100.
# [input] integer size
# The size of the screen. It is guaranteed that it is not smaller than the longest token in the feedback.
# Guaranteed constraints:
# 1 ≤ size ≤ 100.
# [output] array.string
# Lines from the feedback, split as described above.
#%%
# * Solution 1
# ! Hard to use regex, NOT solved yet
import re
def feedbackReview1(feedback:str, size:int)->list:
pattern = '(?<=(.{{{}}}))\w+'.format(4)
print(pattern)
return re.split(pattern, feedback)
# * Solution 2
# ! Easy one using textwrap
import textwrap
def feedbackReview2(feedback:str, size: int)->list:
return textwrap.wrap(feedback, size)
# * Solution 3
# !! Awesome
def feedbackReview3(feedback:str, size:int)->list:
return re.findall('(?:\s|^)(\S(?:.{0,%d}\S)?)(?=\s|$)' % (size-2),feedback)
# * Solution 4
# !! Awesome too
def feedbackReview4(feedback:str, size:int)->list:
return [feedback[x:y].strip() for x,y in [(m.start(),m.end()) for m in re.finditer('(.{1,%d}$)|(.{1,%d} )'%(size,size), feedback)]]
a1 = 'This is an example feedback'
a2 = 8
e1 = ["This is", "an", "example", "feedback"]
r1 = feedbackReview3(a1, a2)
# print('Expected:')
# print(e1)
print('Result:')
print(r1)
# %%
|
[
"qiruihu@gmail.com"
] |
qiruihu@gmail.com
|
147027e78c585ffbc62132cb0116bc6522c71e71
|
8a5669848a675c367e6a58d57dd25f15bf8bb2a5
|
/examples/classification/01_functional_style_metric.py
|
5a90f22f4be00cb4891772d18054d28865e4af4d
|
[
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
thieu1995/permetrics
|
a6c74150e6f03d2e62ad762dc7fe1fa7d03c5d84
|
85b0f3a750bdebc5ade0422e6be7343d90d3cb98
|
refs/heads/master
| 2023-08-17T14:00:03.705206
| 2023-08-12T15:23:53
| 2023-08-12T15:23:53
| 280,617,738
| 30
| 8
|
Apache-2.0
| 2022-06-27T04:03:30
| 2020-07-18T08:47:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,692
|
py
|
#!/usr/bin/env python
# Created by "Thieu" at 11:36, 25/03/2022 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
## This is traditional way to call a specific metric you want to use.
## Everytime, you want to use a function, you need to pass y_true and y_pred
## 1. Import packages, classes
## 2. Create object
## 3. From object call function and use
import numpy as np
from permetrics.classification import ClassificationMetric
y_true = [0, 1, 0, 0, 1, 0]
y_pred = [0, 1, 0, 0, 0, 1]
evaluator = ClassificationMetric()
## 3.1 Call specific function inside object, each function has 2 names like below
ps1 = evaluator.precision_score(y_true, y_pred, decimal=5)
ps2 = evaluator.PS(y_true, y_pred)
print(f"Precision: {ps1}, {ps2}")
recall = evaluator.recall_score(y_true, y_pred)
accuracy = evaluator.accuracy_score(y_true, y_pred)
print(f"recall: {recall}, accuracy: {accuracy}")
# CM = confusion_matrix
# PS = precision_score
# NPV = negative_predictive_value
# RS = recall_score
# AS = accuracy_score
# F1S = f1_score
# F2S = f2_score
# FBS = fbeta_score
# SS = specificity_score
# MCC = matthews_correlation_coefficient
# HS = hamming_score
# LS = lift_score
# CKS = cohen_kappa_score
# JSI = JSC = jaccard_similarity_coefficient = jaccard_similarity_index
# GMS = g_mean_score
# GINI = gini_index
# ROC = AUC = RAS = roc_auc_score
|
[
"nguyenthieu2102@gmail.com"
] |
nguyenthieu2102@gmail.com
|
8bfae8d09d2c6cb4ff1a5cce566dad3e7cbe52ed
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/259_v2/test_reverse_complement.py
|
8306e1e9cc6d840d6832bd8d3e4d29db696d1bb5
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,946
|
py
|
# _______ p__
#
# _______ ?
#
# # Table copied from
# # http://arep.med.harvard.edu/labgc/adnan/projects/Utilities/revcomp.html
# # Note that this table is different from the simple table in the template
# # This table includes additional rules which are used in more advanced
# # reverse complement generators. Please ensure that your functions work
# # with both tables (complementary base always in last column)
#
# COMPLEMENTS_STR """# Full table with ambigous bases
# Base Name Bases Represented Complementary Base
# A Adenine A T
# T Thymidine T A
# U Uridine(RNA only) U A
# G Guanidine G C
# C Cytidine C G
# Y pYrimidine C T R
# R puRine A G Y
# S Strong(3Hbonds) G C S
# W Weak(2Hbonds) A T W
# K Keto T/U G M
# M aMino A C K
# B not A C G T V
# D not C A G T H
# H not G A C T D
# V not T/U A C G B
# N Unknown A C G T N
# """
#
# # ############################################################################
# # Use default table from bite template and test functions
# # ############################################################################
#
# ACGT_BASES_ONLY
# "ACGT",
# "TTTAAAGGGCCC",
# ("TACTGGTACTAATGCCTAAGTGACCGGCAGCAAAATGTTGCAGCACTGACCCTTTTGGGACCGCAATGGGT"
# "TGAATTAGCGGAACGTCGTGTAGGGGGAAAGCGGTCGACCGCATTATCGCTTCTCCGGGCGTGGCTAGCGG"
# "GAAGGGTTGTCAACGCGTCGGACTTACCGCTTACCGCGAAACGGACCAAAGGCCGTGGTCTTCGCCACGGC"
# "CTTTCGACCGACCTCACGCTAGAAGGA"),
#
# MIXED_CASE_DNA
# "AcgT",
# "TTTaaaGGGCCc",
# ("TACtGGTACTAATGCCtAAGtGaccggcagCAAAATGTTGCAGCACTGACCCTTTTGGGACCGCAATGGGT"
# "TGAATTAGCGGAACGTCGTGTAGGGGGAAAgcgGTCGACCGCATTATCGCTTCTCCGGGCGTGGCTAGCGG"
# "GAAGGGTTGTCAACGCGTCGGACTTACCGCttaCCGCGAAACGGAccAAAGGCCGTGGTCTTCGCCACGGC"
# "CTTtcGACCGACCTCACGCTAGAAGGA"),
#
# DIRTY_DNA
# "335>\nA c g T",
# ">\nT-TT-AAA- GGGCCC!!!",
# ("TAC TGG TAC TAA TGC CTA AGT GAC CGG CAG CAA AAT GTT GCA GCA CTG ACC CTT"
# " TTG GGA CCG CAA TGG GTT GAA TTA GCG GAA CGT CGT GTA GGG GGA AAG CGG TC"
# "G ACC GCA TTA TCG CTT CTC CGG GCG TGG CTA GCG GGA AGG GTT GTC AAC GCG T"
# "CG GAC TTA CCG CTT ACC GCG AAA CGG ACC AAA GGC CGT GGT CTT CGC CAC GGC "
# "CTT TCG ACC GAC CTC ACG CTA GAA GGA"),
#
#
# CORRECT_ANSWERS_COMPLEMENTED
# "TGCA",
# "AAATTTCCCGGG",
# ("ATGACCATGATTACGGATTCACTGGCCGTCGTTTTACAACGTCGTGACTGGGAAAACCCTGGCGTTACCCA"
# "ACTTAATCGCCTTGCAGCACATCCCCCTTTCGCCAGCTGGCGTAATAGCGAAGAGGCCCGCACCGATCGCC"
# "CTTCCCAACAGTTGCGCAGCCTGAATGGCGAATGGCGCTTTGCCTGGTTTCCGGCACCAGAAGCGGTGCCG"
# "GAAAGCTGGCTGGAGTGCGATCTTCCT"),
#
# CORRECT_ANSWERS_REVERSE
# "TGCA",
# "CCCGGGAAATTT",
# ("AGGAAGATCGCACTCCAGCCAGCTTTCCGGCACCGCTTCTGGTGCCGGAAACCAGGCAAAGCGCCATTCGC"
# "CATTCAGGCTGCGCAACTGTTGGGAAGGGCGATCGGTGCGGGCCTCTTCGCTATTACGCCAGCTGGCGAAA"
# "GGGGGATGTGCTGCAAGGCGATTAAGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGAC"
# "GGCCAGTGAATCCGTAATCATGGTCAT"),
#
# CORRECT_ANSWERS_REVERSE_COMPLEMENT
# "ACGT",
# "GGGCCCTTTAAA",
# ("TCCTTCTAGCGTGAGGTCGGTCGAAAGGCCGTGGCGAAGACCACGGCCTTTGGTCCGTTTCGCGGTAAGCG"
# "GTAAGTCCGACGCGTTGACAACCCTTCCCGCTAGCCACGCCCGGAGAAGCGATAATGCGGTCGACCGCTTT"
# "CCCCCTACACGACGTTCCGCTAATTCAACCCATTGCGGTCCCAAAAGGGTCAGTGCTGCAACATTTTGCTG"
# "CCGGTCACTTAGGCATTAGTACCAGTA"),
#
#
# # ############################################################################
# # Test complement function
# # ############################################################################
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. A.. C..
#
# ___ test_acgt_complement input_sequence e..
# ... r__.c.. ?.u.. __ e..
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. M.. C..
#
# ___ test_mixed_case_complement input_sequence e..
# ... r__.c.. ?.u.. __ e..
#
#
# ?p__.m__.p.
# "input_sequence,expected", z.. D.. C..
#
# ___ test_dirty_complement input_sequence e..
# ... r__.c.. ?.u.. __ e..
#
#
# # ############################################################################
# # Test reverse function
# # ############################################################################
#
#
# ?p__.m__.p.
# "input_sequence,expected", z.. A.. C..
#
# ___ test_acgt_reverse input_sequence e..
# ... r__.r.. ?.u.. __ e..
#
#
# ?p__.m__.p.
# "input_sequence,expected", z.. M.. C..
#
# ___ test_mixed_case_reverse input_sequence e..
# ... r__.r.. ?.u.. __ e..
#
#
# ?p__.m__.p.
# "input_sequence,expected", z.. D.. C..
#
# ___ test_dirty_reverse input_sequence e..
# ... r__.r.. ?.u.. __ e..
#
#
# # ############################################################################
# # Test reverse complement function
# # ############################################################################
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. A.. C..
#
# ___ test_acgt_reverse_complement input_sequence e..
# ...
# r__.r.. ?.u..
# __ e..
#
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. M.. C..
#
# ___ test_mixed_case_reverse_complement input_sequence e..
# ...
# r__.r.. ?.u..
# __ e..
#
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. D.. C..
#
# ___ test_dirty_reverse_complement input_sequence e..
# ...
# r__.r.. ?.u..
# __ e..
#
#
# # ############################################################################
# # Use more complex complement table
# # ############################################################################
#
#
# AMBIGOUS_DIRTY_DNA
# "AGB Vnc gRy Tvv V",
# ">\nT-TT-AAA-BDNNSSRYMNXXXX GGGCCC!!!",
# ("TAC WSA YBG KGK DVN YRS TGG TAC TAA TGC CTA AGT GAC CGG CAG CAA AAT GTT"
# " GCA GCA CTG ACC CTT TTG GGA CCG CAA TGG GTT GAA TTA GCG GAA CGT CGT GT"
# "A GGG GGA AAG CGG TCG ACC GCA TTA TCG CTT CTC CGG GCG TGG CTA GCG GGA A"
# "GG GTT GTC AAC GCG TCG GAC TTA CCG CTT ACC GCG AAA CGG ACC AAA GGC CGT "
# "GGT CTT CGC CAC GGC CTT TCG ACC GAC CTC ACG CTA GAA GGA"),
#
# CORRECT_ANSWER_AMBIGOUS_DNA_COMPLEMENT
# "TCVBNGCYRABBB",
# "AAATTTVHNNSSYRKNCCCGGG",
# ("ATGWSTRVCMCMHBNRYSACCATGATTACGGATTCACTGGCCGTCGTTTTACAACGTCGTGACTGGGAAAA"
# "CCCTGGCGTTACCCAACTTAATCGCCTTGCAGCACATCCCCCTTTCGCCAGCTGGCGTAATAGCGAAGAGG"
# "CCCGCACCGATCGCCCTTCCCAACAGTTGCGCAGCCTGAATGGCGAATGGCGCTTTGCCTGGTTTCCGGCA"
# "CCAGAAGCGGTGCCGGAAAGCTGGCTGGAGTGCGATCTTCCT"),
#
# CORRECT_ANSWER_AMBIGOUS_DNA_REVERSE
# "VVVTYRGCNVBGA",
# "CCCGGGNMYRSSNNDBAAATTT",
# ("AGGAAGATCGCACTCCAGCCAGCTTTCCGGCACCGCTTCTGGTGCCGGAAACCAGGCAAAGCGCCATTCGC"
# "CATTCAGGCTGCGCAACTGTTGGGAAGGGCGATCGGTGCGGGCCTCTTCGCTATTACGCCAGCTGGCGAAA"
# "GGGGGATGTGCTGCAAGGCGATTAAGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGAC"
# "GGCCAGTGAATCCGTAATCATGGTSRYNVDKGKGBYASWCAT"),
#
# CORRECT_ANSWER_AMBIGOUS_DNA_REVERSE_COMPLEMENT
# "BBBARYCGNBVCT",
# "GGGCCCNKRYSSNNHVTTTAAA",
# ("TCCTTCTAGCGTGAGGTCGGTCGAAAGGCCGTGGCGAAGACCACGGCCTTTGGTCCGTTTCGCGGTAAGCGG"
# "TAAGTCCGACGCGTTGACAACCCTTCCCGCTAGCCACGCCCGGAGAAGCGATAATGCGGTCGACCGCTTTCC"
# "CCCTACACGACGTTCCGCTAATTCAACCCATTGCGGTCCCAAAAGGGTCAGTGCTGCAACATTTTGCTGCCG"
# "GTCACTTAGGCATTAGTACCASYRNBHMCMCVRTSWGTA"),
#
#
#
# # ############################################################################
# # Test reverse, complement and rev comp. function with new table
# # ############################################################################
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. A.. C..
#
# ___ test_acgt_complement_new_table input_sequence e..
# ...
# ? ? C__ .u..
# __ e..
#
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z..A.., C..
#
# ___ test_mixed_case_reverse_new_table input_sequence e..
# ...
# ? ? C...u..
# __ e..
#
#
#
# ?p__.m__.p.
# "input_sequence,expected",
# z.. A.., C..
#
# ___ test_dirty_reverse_complement_new_table input_sequence e..
# ...
# ?.r..
# ? C..
# .u..
# __ e..
#
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.