blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b155c696a2a8dd91263093b7d99c2201926413c7
|
e558e99f3403b5931573789d88c2ad47fffe7362
|
/sum/two_sum.py
|
7a24bce29b647d3f00253a6d4ffa9dfa70458f70
|
[] |
no_license
|
gerrycfchang/leetcode-python
|
a8a3a408381288a34caada8ca68c47c7354459fd
|
7fa160362ebb58e7286b490012542baa2d51e5c9
|
refs/heads/master
| 2021-05-11T00:38:17.925831
| 2018-07-31T14:50:42
| 2018-07-31T14:50:42
| 118,306,858
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
for x in range(0, len(nums)):
dic[nums[x]] = x
print dic
for x in range(0, len(nums)):
rest = abs(target - nums[x])
print rest
if rest in dic:
return [x, dic[rest]]
def twoSumSol(self, nums, target):
from collections import Counter
c = Counter()
for i in range (len(nums)):
part = target - nums[i]
if part in c:
return [c[part], i]
else:
c[nums[i]] = i
return None
if __name__ == '__main__':
nums = [0, 16, 11, 3]
target = 3
test = Solution()
assert test.twoSumSol(nums, target) == [0,3]
|
[
"alfie.gerrycheung@gmail.com"
] |
alfie.gerrycheung@gmail.com
|
38367fd6306431bab28c7d9476eb7f23583717bf
|
1ee3dc4fa096d12e409af3a298ba01f5558c62b5
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/highlevelstream/udf/valuelist/valuelist.py
|
dbfa6ca0e26dadbdb5304e3fe9253effed894d09
|
[
"MIT"
] |
permissive
|
parthpower/ixnetwork_restpy
|
321e64a87be0a4d990276d26f43aca9cf4d43cc9
|
73fa29796a5178c707ee4e21d90ff4dad31cc1ed
|
refs/heads/master
| 2020-07-04T13:34:42.162458
| 2019-08-13T20:33:17
| 2019-08-13T20:33:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,049
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class ValueList(Base):
"""The ValueList class encapsulates a system managed valueList node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the ValueList property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'valueList'
def __init__(self, parent):
super(ValueList, self).__init__(parent)
@property
def AvailableWidths(self):
"""Species all the possible widths available for a UDF in particular Type.
Returns:
list(str)
"""
return self._get_attribute('availableWidths')
@property
def StartValueList(self):
"""Specifies the starting value for a particular UDF.
Returns:
list(number)
"""
return self._get_attribute('startValueList')
@StartValueList.setter
def StartValueList(self, value):
self._set_attribute('startValueList', value)
@property
def Width(self):
"""Specifies the width of the UDF.
Returns:
str(16|24|32|8)
"""
return self._get_attribute('width')
@Width.setter
def Width(self, value):
self._set_attribute('width', value)
def update(self, StartValueList=None, Width=None):
"""Updates a child instance of valueList on the server.
Args:
StartValueList (list(number)): Specifies the starting value for a particular UDF.
Width (str(16|24|32|8)): Specifies the width of the UDF.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def find(self, AvailableWidths=None, StartValueList=None, Width=None):
"""Finds and retrieves valueList data from the server.
All named parameters support regex and can be used to selectively retrieve valueList data from the server.
By default the find method takes no parameters and will retrieve all valueList data from the server.
Args:
AvailableWidths (list(str)): Species all the possible widths available for a UDF in particular Type.
StartValueList (list(number)): Specifies the starting value for a particular UDF.
Width (str(16|24|32|8)): Specifies the width of the UDF.
Returns:
self: This instance with matching valueList data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of valueList data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the valueList data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
0e128695b6d32a1534a11c72d93838c79e35da17
|
a89d5746ab0255a32558be21c33c2d028e9b7148
|
/数学基础/code/朴素贝叶斯/高斯朴素贝叶斯.py
|
4b876860ed9d51444155b18dc4a4af60d2f108a0
|
[] |
no_license
|
15110500442/pa-
|
9c4bf3b2e6906c4df1e609f65a58e00660f31fa7
|
561757e6f6f1e16deaa1b9e5af5ac78fed0e21f5
|
refs/heads/master
| 2020-04-13T01:56:20.208424
| 2018-12-24T11:39:11
| 2018-12-24T11:39:11
| 162,887,811
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
#测试数据
import numpy as np
#引入高斯朴素贝叶斯
from sklearn.naive_bayes import GaussianNB
#训练集
features_train = np.array([[1,1],[1,2],[2,3],[2,3],[1,4],[2,4]])
labels_train = np.array([1,2,3,1,1,3])
#实例化
clf = GaussianNB()
#训练数据 fit相当于train
clf.fit(features_train, labels_train)
#输出单个预测结果
features_test = np.array([[1,3]])#
pred = clf.predict(features_test)
print("预测的类别为:\t",pred)
|
[
"597945045@qq.com"
] |
597945045@qq.com
|
f30b74b74f08df9126992b9926bc6a514aa82cac
|
3cd1246ff58f26329021f2d13caa62221c91d5a4
|
/testdata/python/stmt/with_.py
|
9fd016ae112182564286080a9cbcc9d114768021
|
[] |
no_license
|
mwkmwkmwk/unpyc
|
0929e15fb37599496930299d7ced0bf1bedd7e99
|
000fdaec159050c94b7ecf6ab57be3950676f778
|
refs/heads/master
| 2020-12-01T14:01:57.592806
| 2016-03-21T14:11:43
| 2016-03-21T14:12:01
| 230,650,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from __future__ import with_statement
with a:
b
c
d
with a as b:
c
with a as (a, b):
c
d
while a:
with b as c:
continue
|
[
"koriakin@0x04.net"
] |
koriakin@0x04.net
|
5dd5656c70b3d3fb0041c28be639da84f8a05701
|
7bb37821b54c2ffe0dc8cbf2a70bd20c932fb515
|
/users/migrations/0006_auto_20190812_1022.py
|
ae4141e0132bfd3748bbf0be56a5be60ae0212fd
|
[] |
no_license
|
andrewhstead/dating-site
|
6dfed04404fa1ea03594ff08a1d3aa31fe07b47c
|
bc81ade529ab916093ba80ab009d03a00e1dfab0
|
refs/heads/master
| 2020-07-02T13:04:02.878187
| 2020-05-07T22:23:09
| 2020-05-07T22:23:09
| 176,342,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# Generated by Django 2.2.4 on 2019-08-12 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20190811_2134'),
]
operations = [
migrations.AlterField(
model_name='user',
name='intro',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
[
"andrew@andrewstead.co.uk"
] |
andrew@andrewstead.co.uk
|
1c0396241c5678129c6a9423cdd169a9fcdbee83
|
de213b73f703fb8f285bc8cf15e388cc2f98898f
|
/venv/bin/Peg.py
|
9586b955a31d199a83f0a27a6c4cf33e702d3049
|
[] |
no_license
|
adampehrson/Kattis
|
18de025a6a569a46c54cc85c996eec0b55c9f74b
|
a04922caa356f8113fe30a523f3a148d458a6132
|
refs/heads/main
| 2023-07-10T02:53:29.782854
| 2021-08-14T10:44:30
| 2021-08-14T10:44:30
| 395,948,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
def checkmoves(karta, posx, posy):
moves = 0
if karta[posy][posx - 1] == 'o':
if karta[posy][posx - 2] == 'o':
moves += 1
if karta[posy][posx + 1] == 'o':
if karta[posy][posx + 2] == 'o':
moves += 1
if karta[posy - 1][posx] == 'o':
if karta[posy - 2][posx] == 'o':
moves += 1
if karta[posy + 1][posx] == 'o':
if karta[posy + 2][posx] == 'o':
moves += 1
return moves
karta = []
karta.append(' ')
i = 0
while i < 7:
karta.append(' ' + input() + ' ')
i += 1
karta.append(' ')
moves = 0
i = 1
while i < 8:
e = 0
while e < 8:
if karta[i][e] == '.':
moves = moves + checkmoves(karta, e, i)
e += 1
i += 1
print(moves)
|
[
"85373641+adampehrson@users.noreply.github.com"
] |
85373641+adampehrson@users.noreply.github.com
|
a2f18034289b7450eea0d77004a2c70a1f3c0571
|
b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/response_body_post_not_more_complex_schema_response_body_for_content_types/post.py
|
c63a09de280ebef09cc33bb720ce32b8c0316ad4
|
[
"Apache-2.0"
] |
permissive
|
FallenRiteMonk/openapi-generator
|
f8b98940219eecf14dc76dced4b0fbd394522aa3
|
b6576d11733ecad6fa4a0a616e1a06d502a771b7
|
refs/heads/master
| 2023-03-16T05:23:36.501909
| 2022-09-02T01:46:56
| 2022-09-02T01:46:56
| 164,609,299
| 0
| 0
|
Apache-2.0
| 2019-01-08T09:08:56
| 2019-01-08T09:08:56
| null |
UTF-8
|
Python
| false
| false
| 7,669
|
py
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from . import path
class SchemaFor200ResponseBodyApplicationJson(
schemas.ComposedSchema,
):
class MetaOapg:
additional_properties = schemas.AnyTypeSchema
class not_schema(
schemas.DictSchema
):
class MetaOapg:
class properties:
foo = schemas.StrSchema
__annotations__ = {
"foo": foo,
}
additional_properties = schemas.AnyTypeSchema
foo: typing.Union[MetaOapg.properties.foo, schemas.Unset]
@typing.overload
def __getitem__(self, name: typing.Literal["foo"]) -> typing.Union[MetaOapg.properties.foo, schemas.Unset]: ...
@typing.overload
def __getitem__(self, name: str) -> typing.Union[MetaOapg.additional_properties, schemas.Unset]: ...
def __getitem__(self, name: typing.Union[str, typing.Literal["foo"], ]):
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, ],
foo: typing.Union[MetaOapg.properties.foo, str, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'not_schema':
return super().__new__(
cls,
*args,
foo=foo,
_configuration=_configuration,
**kwargs,
)
def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties:
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*args,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _post_not_more_complex_schema_response_body_for_content_types_oapg(
self: api_client.Api,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostNotMoreComplexSchemaResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_not_more_complex_schema_response_body_for_content_types(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_not_more_complex_schema_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_not_more_complex_schema_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
FallenRiteMonk.noreply@github.com
|
469ad333f4179cbdcbf8ce66fba436b6172c4ab3
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/netex/models/fare_structure_element_price_ref.py
|
739a4aabbe862e4a51739db6b69106122466c992
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
from dataclasses import dataclass
from .fare_structure_element_price_ref_structure import FareStructureElementPriceRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class FareStructureElementPriceRef(FareStructureElementPriceRefStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
5f48983e3802341541423c341cc6f5d54e73071f
|
b26d11147736cae7a1b10e7deaef08c339cb1e4e
|
/profiles/views.py
|
0173cad0d63ba9afa27f6df0fd76eb4ed69d0cdc
|
[] |
no_license
|
Komilcoder/Art-blog-website
|
bcc72e6b18925c141efd28a101c321c82fe7e534
|
8c88e4245a3e5f36593ceade2ab242f331bf3121
|
refs/heads/master
| 2022-12-15T14:06:57.094660
| 2020-09-02T06:58:00
| 2020-09-02T06:58:00
| 291,511,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,498
|
py
|
from django.shortcuts import render, redirect , get_object_or_404
from .models import Profile,Relationship,RelationshipManager
from .forms import ProfileModelForm, CreateUserForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate,login
from django.contrib import messages
from django.views.generic import ListView
from django.contrib.auth.models import User
from django.db.models import Q
@login_required(login_url='/accounts/login/')
def my_profile_view(request):
profile = Profile.objects.get(user=request.user)
form = ProfileModelForm(request.POST or None, request.FILES or None,instance=profile)
confirm= False
context = {
'profile':profile,
'form':form,
'confirm':confirm
}
return render(request, 'profiles/myprofile.html',context)
def invites_received(request):
profile = Profile.objects.get(user=request.user)
qs = Relationship.objects.invatiotion_recieved(profile)
results = list(map(lambda x: x.sender, qs))
is_empty = False
if len(results) == 0:
is_empty = True
context = {
'qs':results,
'is_empty':is_empty,
}
return render(request,'profiles/my_invites.html', context)
def profiles_list_view(request):
user = request.user
qs = Profile.objects.get_all_profile(user)
context = {'qs':qs}
return render(request, 'profiles/profile_list.html', context)
# it is invite friends
def invite_profile_list(request):
user = request.user
qs = Profile.objects.get_all_profiles_invites(user)
context = {'qs':qs}
return render(request, 'profiles/invite_list.html', context)
def Loginpage(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, login)
return redirect('home')
else:
return render(request,'registration/login.html')
def logout_view(request):
logout(request)
return redirect('home')
def Registration(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + user)
return redirect('login')
else:
form = CreateUserForm()
context = {'form':form}
return render(request, 'registration/signup.html', context)
# for seeing profile on browser
class ProfileListView(ListView):
model = Profile
template_name = 'profiles/profile_list.html'
context_object_name = 'object_list'
def get_queryset(self):
qs = Profile.objects.get_all_profile(self.request.user)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = User.objects.get(username__iexact=self.request.user)
profile = Profile.objects.get(user=user)
rel_rec = Relationship.objects.filter(sender=profile)
rel_sen = Relationship.objects.filter(receiver=profile)
rel_receiver = []
rel_sender = []
for item in rel_rec:
rel_receiver.append(item.receiver.user)
for item in rel_sen:
rel_sender.append(item.sender.user)
context['rel_receiver'] = rel_receiver
context['rel_sender'] = rel_sender
context['is_empty'] = False
if len(self.get_queryset()) == 0:
context['is_empty'] == True
return context
# this is for sending freindship each other
def send_invatiation(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
user = request.user
sender = Profile.objects.get(user=user)
receiver = Profile.objects.get(pk=pk)
relat = Relationship.objects.create(sender=sender, receiver=receiver,status='send')
return redirect(request.META.get('HTTP_REFERER'))
return redirect('profiles:my_profile')
# this is deleting freindship
def remove_from_friends(request):
if request.method == 'POST':
pk = request.POST.get('profile_pk')
user = request.user
sender = Profile.objects.get(user=user)
receiver = Profile.objects.get(pk=pk)
rel = Relationship.objects.filter((Q(sender=sender) & Q(receiver=receiver)) |(Q(sender=receiver) & Q(receiver=sender)))
rel.delete()
return redirect(request.META.get('HTTP_REFERER'))
return redirect('profiles:my_profile')
def accept_invatition(request):
if request.method == 'POST':
pk = request.POST.get('profile_pk')
sender = Profile.objects.get(pk=pk)
receiver = Profile.objects.get(user=request.user)
rel = get_object_or_404(Relationship, sende=sender, receiver=receiver)
if rel.status =='sender':
rel.status == 'accepted'
rel.save()
return redirect('profiles:my_invite')
def reject_invatition(request):
if request.method == 'POST':
pk= request.POST.get('profile+_pk')
receiver = Profile.objects.get(user=request.user)
sender = Profile.objects.get(pk=pk)
rel = get_object_or_404(Relationship, sender=sender, receiver=receiver)
rel.delete()
return redirect('profiles:my_invite')
|
[
"yaxshilikovkomil@gmail.com"
] |
yaxshilikovkomil@gmail.com
|
b6441bd419e7b43c5518e361d1ff550fe25ded57
|
70628500b7bdfa5fc548c39cbc8e6df449952a98
|
/baseball_API/stats/migrations/0017_auto_20161026_2209.py
|
a95a83c41aeceea5bfe0f1e070e0a6d70a7f17ae
|
[] |
no_license
|
cjredmond/baseball_API
|
7cd4c1bd07560287d135ceb17f93821234a4fd1d
|
0bbe8b4573b34915ebe6eae0ec9b1de62ef42d13
|
refs/heads/master
| 2021-01-16T23:06:33.866358
| 2016-10-27T04:01:21
| 2016-10-27T04:01:21
| 72,024,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,220
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 22:09
from __future__ import unicode_literals
from django.db import migrations
import csv
def add_master_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
with open('Master.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
Master.objects.create(player=row["playerID"], birthYear=row["birthYear"],
birthMonth=row["birthMonth"], birthDay=row["birthDay"], birthCountry=row["birthCountry"],
birthState=row["birthState"], birthCity=row["birthCity"], deathYear=row['deathYear'],
deathMonth=row['deathMonth'], deathDay=row['deathDay'],deathState=row['deathState'],
deathCountry=row['deathCountry'],
deathCity=row['deathCity'], nameFirst=row['nameFirst'], nameLast=row['nameLast'],
nameGiven=row['nameGiven'], weight=row['weight'], height=row['height'], bats=row['bats'],
throws=row['throws'], debut=row['debut'], finalGame=['finalGame'], retroID=['retroID'],
bbrefID=row['bbrefID'])
def add_batting_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
Batting = apps.get_model("stats", "Batting")
with open('Batting.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
#print(row)
player = Master.objects.get(player=row["playerID"])
Batting.objects.create(player=player, year=row["yearID"], stint=row['stint'], team=row["teamID"],
league=row['lgID'], games=row['G'], at_bats=row['AB'], runs=row['R'],
hits=row['H'], doubles=row["2B"], triples=row['3B'], home_runs=row['HR'],
rbi=row['RBI'], stolen_bases=row['SB'], caught_stealing=row['CS'],
walks=row['BB'], strikeouts=row['SO'], intentional_walks=row['IBB'],
hit_by_pitch=row['HBP'], sac_hits=row['SH'], sac_flies=['SF'], double_plays=row['GIDP'])
def add_fielding_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
Fielding = apps.get_model("stats", "Fielding")
with open('Fielding.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
#print(row)
player = Master.objects.get(player=row["playerID"])
Fielding.objects.create(player=player, year=row["yearID"], stint=row['stint'], team=row["teamID"],
league=row['lgID'], position=row['POS'], games=row['G'], games_started=row['GS'],
innOuts=row['InnOuts'], put_outs=row['PO'], assists=row['A'], double_plays=row['DP'],
passed_balls=row['PB'], wild_pitches=row['WP'], stolen_bases=row['SB'],
caught_stealing=row['CS'], zone=row['ZR'] )
def add_pitcher_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
Pitcher = apps.get_model("stats", "Pitcher")
with open('Pitching.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
#print(row)
player = Master.objects.get(player=row["playerID"])
Pitcher.objects.create(player=player, year=row["yearID"], stint=row['stint'], team=row["teamID"],
league=row['lgID'], wins=row['W'], losses=row['L'], games=row['G'], games_started=row['GS'],
complete_games=row['CG'], shutouts=row['SHO'], saves=row['SV'], outs_pitched=row['IPouts'],
hits=row['H'], earned_runs=row['ER'], home_runs=row['HR'], walks=row['BB'], strikeouts=row['SO'],
opponent_avg=row['BAOpp'], era=row['ERA'], intentional_bb=row['IBB'],
wild_pitches=row['WP'],hit_by_pitch=row['HBP'], balks=row['BK'],
batters_faced=row['BFP'], games_finised=row['GF'], runs=row['R'],
sac_against=row['SH'], sac_flies=row['SF'], ground_dp_against=row['GIDP'])
class Migration(migrations.Migration):
dependencies = [
('stats', '0016_auto_20161026_2223'),
]
operations = [
migrations.RunPython(add_master_data),
migrations.RunPython(add_batting_data),
migrations.RunPython(add_fielding_data),
migrations.RunPython(add_pitcher_data)
]
|
[
"connor.redmond@gmail.com"
] |
connor.redmond@gmail.com
|
304f5b58c3d48bcabde5d01bcb1635415e7c3590
|
9bdeffc12343cd5c5e7bf1f4cb8969c72d81c56b
|
/mpesa_api/urls.py
|
70e4b9988c53b2601dbe91606de11fb2948a7016
|
[] |
no_license
|
johngaitho05/Mpesa-API-Python
|
5fe90d60261e9913d6adfa6bc9fc3028fe6c79e5
|
49314ac3d37be297783a7c6da7a1875ece24e1d0
|
refs/heads/master
| 2022-02-08T07:44:46.910257
| 2022-01-31T11:05:30
| 2022-01-31T11:05:30
| 222,941,616
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
from django.urls import path, include
from . import views
urlpatterns = [
path('access/token', views.getAccessToken, name='get_mpesa_access_token'),
path('online/lipa', views.lipa_na_mpesa_online, name='lipa_na_mpesa'),
# register, confirmation, validation and callback urls
path('c2b/register', views.register_urls, name="register_mpesa_validation"),
path('c2b/confirmation', views.confirmation, name="confirmation"),
path('c2b/validation', views.validation, name="validation"),
path('c2b/callback', views.call_back, name="call_back"),
]
|
[
"johngaitho05@gmail.com"
] |
johngaitho05@gmail.com
|
9b4a4205e03cccfbdc33ac81bc959da4c660fb3b
|
7e4ca815fa4776d41b2b46cdcada077149d72899
|
/course4/week4/graph.py
|
bf67b3634a527b2d80808c968688486839d57ed2
|
[] |
no_license
|
kcollett1/Stanford_Algorithms
|
1a95e0ec12737f50926c23aede08fb246f719935
|
cdab3757ebb6c6a85ee4f9c630c00ad0b3fa24aa
|
refs/heads/master
| 2022-04-21T05:55:55.988759
| 2020-04-20T14:57:53
| 2020-04-20T14:57:53
| 257,314,127
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,161
|
py
|
'''
this is my implementation of a DIRECTED graph as an adjacency list. vertices are added
to the graph from input containing the vertex num and a list of vertices connected to it.
also implemented is Kosaraju's 2 pass algorithm to compute the strongly connected
components (SCC) of a directed graph, using a depth-first-search strategy (iteratively
rather than recursively) twice on the reverse of the graph first, and then on the graph
itself, keeping track of key variables (namely, finishing time and leader) as we pass through.
'''
from stack import Stack
from queue import Queue
class Graph:
def __init__(self):
# dict of vertices, mapped to a list of sets of its outgoing/incoming edges
self.vertices = {}
# dict of edges, mapped to a list of the two endpoints of edge, in order of direction
self.edges = {} # edge #: [v1,v2]; i.e. {3:[3,2]} edge# 3 points FROM vert 3 TO vert 2
self.num_edges = 0
self.num_verts = 0
self.max_vert = 0 # track verts that exist on graph without incident edges
def __update_vert__(self, vert, ind):
'''Helper function to add_edge to add current edge number to vertex dict'''
if vert not in self.vertices:
self.num_verts += 1
if vert > self.max_vert:
self.max_vert = vert
self.vertices[vert] = [set(), set()]
self.vertices[vert][ind].add(self.num_edges)
def add_edge(self, vert1: int, vert2: int):
'''Add a new edge to the graph pointing from vert1 to vert2'''
# increment number of edges and add vertex pointers to this edge
self.num_edges += 1
self.edges[self.num_edges] = [vert1, vert2]
# add both vertices/edge# to vertex dict (and increment number of vertices if needed)
self.__update_vert__(vert1, 0)
self.__update_vert__(vert2, 1)
def add_vert(self, vert):
''' Add a vertex to the graph not connected to any edges '''
if vert not in self.vertices:
self.num_verts += 1
if vert > self.max_vert:
self.max_vert = vert
self.vertices[vert] = [set(), set()]
def BFS(self, start: int, forwards=True):
''' Breadth first search from start vertex. Can search reverse graph with forwards=False '''
# initialize all vertices as unexplored except for start vertex
explored = set()
explored.add(start)
# initialize queue to track next vertices to explore, enqueue start vertex
verts = Queue()
verts.enqueue(start)
# while queue is not empty, keep exploring vertices
while not verts.is_empty():
# dequeue next vertex and try to explore any incident edges it has
vert = verts.dequeue()
# go through all edges outgoing from this vertex
for edge in self.vertices[vert][0]:
# get vertex corresponding to this edge
# if going through G, current vert will be 1st; next_vert is in pos 1 (True)
# if going through G_rev, current vert will be 2nd; next_vert is in pos 0 (False)
next_vert = self.edges[edge][forwards]
# only interested in unexplored vertices
if next_vert in explored:
continue
# this is a vertex of interest, mark as explored and add to queue
explored.add(next_vert)
verts.enqeue(next_vert)
def DFS(self, start, forwards=True):
'''
Depth first search from start vertex, helper method for compute_scc. Can search reverse graph
with forwards=False. This DFS method uses an iterative search rather than a recursive search
as this is more memory efficient for large graphs, though tracking the finishing time bcomes
slightly more tricky. Instead of tracking just if a node is explored or not, we also need to
track a third status, "explored but not finished". This is particularly important in cases
where we take a vertex from the top of the stack, and see that all of it's neighbors have
already been explored - are all of it's neighbors actually finished being explored or are
they possibly still in the stack waiting to be assigned a finish time?
'''
global leaders, leader, finish_times, finish_time, explored
verts = Stack()
verts.push(start)
if forwards: # we only care about tracking leaders in forwards pass through graph
leaders[leader] = {start}
while not verts.is_empty():
vert = verts.top() # which vertex is currently first in the stack
if vert not in explored:
# haven't "explored" yet - add all neighbors to stack if they haven't been explored yet
# note here we may be double adding vertices to the stack, but when we get to it again
# we will check if it's already been explored and if so we mark it's finish time if needed
explored.add(vert)
for edge in self.vertices[vert][(int(forwards)+1)%2]:
next_vert = self.edges[edge][int(forwards)]
if next_vert not in explored:
if forwards: # we only care about tracking leaders in forwards pass
leaders[leader].add(next_vert)
verts.push(next_vert)
else:
# completely finished exploring this node, remove from stack, set finishing time if needed
# on first pass through, we set every nodes finish time, so on forward pass through graph
# we will never set any finishing times
verts.pop()
if vert not in finish_times:
finish_time += 1
finish_times[vert] = finish_time
def compute_scc(self):
'''
This function computes the strongly connected components of this graph using Kosarju's 2-pass
algorithm. Return the dict of each components vertices (each with an arbitrary leader as key).
'''
global leaders, leader, finish_times, finish_time, explored
leaders = {}
leader = 0
finish_times = {}
finish_time = 0
explored = set()
# DFS on reverse of graph first from all nodes until all have been explored
for vert in self.vertices:
if vert not in explored:
fin = self.DFS(start=vert, forwards=False)
# reset explored verts to all being unexplored initially
explored = set()
# DFS on original graph checking all verts from largest finish time to smallest
for vert in sorted([[t,v] for v,t in finish_times.items()], reverse=True):
if vert[1] not in explored:
leader = vert[1]
self.DFS(start=vert[1]) # passing through graph forwards, we will track leaders
# the SCC's are now contained in the leaders dict
return leaders
|
[
"collettikatrina@gmail.com"
] |
collettikatrina@gmail.com
|
0af1b00428e976ba359b1a7ffb194f8eae839390
|
be50b4dd0b5b8c3813b8c3158332b1154fe8fe62
|
/StacksAndQueues/Python/NearestSmallerElements.py
|
3d77893e6f926f45de256ee34a8b88f67e31f45a
|
[] |
no_license
|
Zimmermann25/InterviewBit
|
a8d89e090068d9644e28085625963c8ce75d3dff
|
6d2138e740bd5ba8eab992d9bf090977e077bfc5
|
refs/heads/main
| 2023-03-24T18:12:48.244950
| 2021-03-24T14:36:48
| 2021-03-24T14:36:48
| 350,835,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
class Solution:
# @param A : list of integers
# @return a list of integers
def prevSmaller(self, A):
G = [-1] * len(A) # -1, bo to ułatwi trochę
curMin = A[0]
stack = []
for i in range(len(A)-1):
#print(stack)
if stack:
# dodawaj na stos tylko te elementy, które mogą powodować zmianę
if A[i] < A[i+1]:
'''for k in range(len(stack)):
if len(stack) and stack[-k-1] > A[i]:
stack.pop()'''
stack.append(A[i])
# znajdz w stosie pierwszy element spełniający ten warunek(mniejszy niz A[i])
for j in range(len(stack)):
if stack[-j-1] < A[i]:
G[i] = stack[-j-1]
break
else: stack.append(A[i])
#print("stack: ", stack)
# dla ostatniego elementu edge case
for j in range(len(stack)):
if stack[-j-1] < A[-1]:
G[-1] = stack[-j-1]
break
return G
|
[
"noreply@github.com"
] |
Zimmermann25.noreply@github.com
|
a70b34ec60feceb1a77c2fd378dbb3a87121abd9
|
fc210e56f3d20947f84039a8ef07107bb11e6b5a
|
/main_prepare_tfrecords.py
|
def142e0fa4506356f5c7542938f2e953fb58cde
|
[] |
no_license
|
ZhouYzzz/RecurrentTracking
|
344b5fcb73f04a749f9822ae0b18f8de83ee6308
|
9dfaf2b383b2a0f67272ec090b2a40bb5d1adee4
|
refs/heads/master
| 2021-09-11T20:47:31.566421
| 2018-04-12T06:50:05
| 2018-04-12T06:50:05
| 112,092,396
| 0
| 1
| null | 2018-03-22T11:39:37
| 2017-11-26T15:07:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
"""Create TFRecords files from ILSVRC2015"""
import tensorflow as tf
import tempfile, os, argparse
from multiprocessing import Pool
from tqdm import tqdm
from ilsvrc2015 import ILSVRC2015, PHASE
from annotations import parse_annotation_folder
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset_dir', default='/home/zhouyz/ILSVRC2015/', type=str, help='ILSVRC2015 root directory')
parser.add_argument('--output_dir', default=tempfile.mkdtemp(), type=str)
parser.add_argument('--records_prefix', default='ilsvrc2015.', type=str)
FLAGS, _ = parser.parse_known_args()
def create_tfrecords(annotation_folder):
writer = tf.python_io.TFRecordWriter(
path=tempfile.mktemp(suffix='.tfrecords', prefix=FLAGS.records_prefix, dir=FLAGS.output_dir))
streams = parse_annotation_folder(annotation_folder)
for s in streams:
writer.write(s.serializeToTFSequenceExample().SerializeToString())
writer.close()
return len(streams)
def create_fixed_lengthed_tfrecords(annotation_folder, length=32):
writer = tf.python_io.TFRecordWriter(
path=tempfile.mktemp(suffix='.tfrecords', prefix=FLAGS.records_prefix, dir=FLAGS.output_dir))
streams = parse_annotation_folder(annotation_folder)
splitted_streams = []
for s in streams:
splitted_streams += s.splitIntoStreams(n=s.length//length + 1, l=length)
for s in splitted_streams:
writer.write(s.serializeToTFSequenceExample().SerializeToString())
writer.close()
return len(splitted_streams)
def main():
print('FLAGS:', FLAGS)
dataset = ILSVRC2015(FLAGS.dataset_dir)
snippet_ids = dataset.GetSnippetIDs(phase=PHASE.TRAIN)
## Using multiprocessing
# with Pool(8) as p:
# r = list(tqdm(
# p.imap(create_tfrecords, map(lambda i: os.path.join(dataset.annotations_dir, i), snippet_ids)),
# total=len(snippet_ids)
# ))
count = 0
t = tqdm(snippet_ids)
for id in t:
count += create_fixed_lengthed_tfrecords(os.path.join(dataset.annotations_dir, id))
t.set_description(desc='Total records {}'.format(count))
if __name__ == '__main__':
main()
|
[
"zhouyz9608@gmail.com"
] |
zhouyz9608@gmail.com
|
2f4ad34593d619afe4392bde5ef7782179948d56
|
fd69d76dcfe60b97ca02eb853e3f2cd2b68d990e
|
/tree/serialize_deserialize.py
|
34c503596f178063464a9402d8208b4a6238f7eb
|
[] |
no_license
|
Levalife/DSA
|
f3204946c9225f0472ec8470c0fbe29357559f35
|
4e5a94ba94fa5be01f4760a2651001426b3ef973
|
refs/heads/master
| 2023-01-23T03:51:48.864888
| 2020-11-27T13:58:04
| 2020-11-27T13:58:04
| 298,612,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
# -*- coding: utf-8 -*-
class Tree:
def __init__(self, root=None):
self.root = root
class Node:
def __init__(self, value, parent=None, left=None, right=None):
self.value = value
self.left = left
self.right = right
self.parent = parent
'''
10
7 11
6 8 20
1 9 14 22
'''
tree = Tree()
tree.root = Node(10)
tree.root.left = Node(7, tree.root)
tree.root.right = Node(11, tree.root)
tree.root.left.left = Node(6, tree.root.left)
tree.root.left.right = Node(8, tree.root.left)
tree.root.right.right = Node(20, tree.root.right)
tree.root.left.left.left = Node(1, tree.root.left.left)
tree.root.left.right.right = Node(9, tree.root.left.right)
tree.root.right.right.left = Node(14, tree.root.right.right)
tree.root.right.right.right = Node(22, tree.root.right.right)
def serialize(node):
if not node:
return "X,"
return "{},{}{}".format(node.value, serialize(node.left), serialize(node.right))
serialized_tree = serialize(tree.root)
print(serialized_tree)
def deserialize(tree_str):
tree_list = tree_str.split(',')
return deserialize_helper(tree_list)
def deserialize_helper(tree_list):
if tree_list:
if tree_list[0] == 'X':
tree_list.pop(0)
return None
newNode = Node(value=tree_list.pop(0))
newNode.left = deserialize_helper(tree_list)
newNode.right = deserialize_helper(tree_list)
return newNode
deserialized_tree = deserialize(serialized_tree)
def preorder(node):
print(node.value)
if node.left:
preorder(node.left)
if node.right:
preorder(node.right)
preorder(deserialized_tree)
|
[
"levushka14@gmail.com"
] |
levushka14@gmail.com
|
4a2f8d6c9ed2d00e8ed94eef8b4bce6ebb50a686
|
4518ce1ee32ffbd4004df6865f557c5a3909c135
|
/awards/migrations/0004_reviews.py
|
6c70e3944ef1fceffaca5ddef335e41ee17a2d17
|
[
"MIT"
] |
permissive
|
petermirithu/Grant_py
|
d9a04dee7fc0ae80e55a15b073e6b24108b23555
|
0e2e8d2a01c361583853e4d06fc4ede45e3741f8
|
refs/heads/master
| 2022-12-14T19:04:42.503002
| 2020-01-09T17:45:00
| 2020-01-09T17:45:00
| 231,231,593
| 1
| 0
|
MIT
| 2022-12-08T03:22:31
| 2020-01-01T15:20:26
|
Python
|
UTF-8
|
Python
| false
| false
| 962
|
py
|
# Generated by Django 2.2.8 on 2020-01-03 15:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('awards', '0003_auto_20200102_1411'),
]
operations = [
migrations.CreateModel(
name='reviews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=1000)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('posted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('projo_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awards.projo_post')),
],
),
]
|
[
"pyra_m.k@yahoo.com"
] |
pyra_m.k@yahoo.com
|
5c064fb27f4e1f5959994430e93a3a4ee5d91147
|
72fcc9b617014484a1c021fa90af57b457aba5ba
|
/06.BinarySearchTree/01.ConstructionConversion/3_construct_bst_from_preorder.py
|
a4d96db9559efa2c78f1ee3b4a74ff5d091c6804
|
[] |
no_license
|
shindesharad71/Data-Structures
|
249cb89fc3b54a3d8a67e4e9db832e256d072ee6
|
a7cd247228a723e880bccd3aa24c072722785f6d
|
refs/heads/main
| 2023-07-24T21:01:08.070082
| 2021-09-03T04:02:05
| 2021-09-03T04:02:05
| 370,706,713
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,468
|
py
|
# Construct BST from given preorder traversal
# https://www.geeksforgeeks.org/construct-bst-from-given-preorder-traversa/
# A O(n^2) Python3 program for
# construction of BST from preorder traversal
# A binary tree node
class Node:
# A constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# constructTreeUtil.preIndex is a static variable of
# function constructTreeUtil
# Function to get the value of static variable
# constructTreeUtil.preIndex
def getPreIndex():
return constructTreeUtil.preIndex
# Function to increment the value of static variable
# constructTreeUtil.preIndex
def incrementPreIndex():
constructTreeUtil.preIndex += 1
# A recurseive function to construct Full from pre[].
# preIndex is used to keep track of index in pre[[].
def constructTreeUtil(pre, low, high):
# Base Case
if low > high:
return None
# The first node in preorder traversal is root. So take
# the node at preIndex from pre[] and make it root,
# and increment preIndex
root = Node(pre[getPreIndex()])
incrementPreIndex()
# If the current subarray has onlye one element,
# no need to recur
if low == high:
return root
r_root = -1
# Search for the first element greater than root
for i in range(low, high + 1):
if pre[i] > root.data:
r_root = i
break
# If no elements are greater than the current root,
# all elements are left children
# so assign root appropriately
if r_root == -1:
r_root = getPreIndex() + (high - low)
# Use the index of element found in preorder to divide
# preorder array in two parts. Left subtree and right
# subtree
root.left = constructTreeUtil(pre, getPreIndex(), r_root - 1)
root.right = constructTreeUtil(pre, r_root, high)
return root
# The main function to construct BST from given preorder
# traversal. This function mailny uses constructTreeUtil()
def construct_tree(pre):
size = len(pre)
constructTreeUtil.preIndex = 0
return constructTreeUtil(pre, 0, size - 1)
def inorder(root):
if root:
inorder(root.left)
print(root.data, end=" ")
inorder(root.right)
# Driver Code
if __name__ == "__main__":
pre = [10, 5, 1, 7, 40, 50]
root = construct_tree(pre)
print("Inorder traversal of constructed tree")
inorder(root)
|
[
"shindesharad71@gmail.com"
] |
shindesharad71@gmail.com
|
059e8d97f0b62ea4ab980bb45f12a01bacc68228
|
6dd08ec6b4f6351de8450a3d7e592fd6b4994119
|
/cbase/server/cbase-1.8.1/testrunner/pytests/spatialcompaction.py
|
190ff0ff033f2e8af6e3946146558e06a12e1206
|
[] |
no_license
|
zhgwenming/appstack
|
d015e96b911fe318f9fba1bdeeea9d888d57dfba
|
8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276
|
refs/heads/master
| 2021-01-23T13:30:19.507537
| 2015-11-09T06:48:35
| 2015-11-09T06:48:35
| 7,576,644
| 1
| 2
| null | 2016-01-05T09:16:22
| 2013-01-12T15:13:21
|
C
|
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
import unittest
import uuid
import logger
from membase.helper.spatial_helper import SpatialHelper
class SpatialCompactionTests(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
def tearDown(self):
self.helper.cleanup_cluster()
def test_spatial_compaction(self):
self.log.info(
"description : test manual compaction for spatial indexes")
prefix = str(uuid.uuid4())[:7]
design_name = "dev_test_spatial_compaction"
self.helper.create_index_fun(design_name, prefix)
# Insert (resp. update, as they have the same prefix) and query
# the spatial index several time so that the compaction makes sense
for i in range(0, 8):
self.helper.insert_docs(2000, prefix)
self.helper.get_results(design_name)
# Get the index size prior to compaction
status, info = self.helper.info(design_name)
disk_size = info["spatial_index"]["disk_size"]
# Do the compaction
self.helper.compact(design_name)
# Check if the index size got smaller
status, info = self.helper.info(design_name)
self.assertTrue(info["spatial_index"]["disk_size"] < disk_size,
"The file size ({0}) isn't smaller than the "
"pre compaction size ({1})."
.format(info["spatial_index"]["disk_size"],
disk_size))
|
[
"zhgwenming@gmail.com"
] |
zhgwenming@gmail.com
|
dc9289d234825789dfd30143764b5bf441e87b50
|
a7cca49626a3d7100e9ac5c2f343c351ecb76ac7
|
/playbooks/tests/led_toggle.py
|
f8079be0655d96fcf02c841fe646899d740a03c0
|
[
"MIT"
] |
permissive
|
Carglglz/upydev
|
104455d77d64300074bda54d86bd791f19184975
|
529aa29f3e1acf8160383fe410b5659110dc96de
|
refs/heads/master
| 2023-05-24T18:38:56.242500
| 2022-10-21T14:03:17
| 2022-10-21T14:03:17
| 199,335,165
| 49
| 9
|
MIT
| 2022-10-21T14:03:18
| 2019-07-28T20:42:00
|
Python
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
import time
for i in range(5):
print(f"This is a loaded script: {i}")
led.on()
time.sleep(0.5)
led.off()
time.sleep(0.5)
|
[
"carlosgilglez@gmail.com"
] |
carlosgilglez@gmail.com
|
dcff227305bc074d0d32949ae48b052c1608a805
|
dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5
|
/eggs/Products.CMFPlone-4.1-py2.7.egg/Products/CMFPlone/skins/plone_scripts/getNotAddableTypes.py
|
d9e66131d0c1fb846122cf94e88d8368a72a9d1e
|
[] |
no_license
|
nacho22martin/tesis
|
ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5
|
e137eb6225cc5e724bee74a892567796166134ac
|
refs/heads/master
| 2020-12-24T13:20:58.334839
| 2013-11-09T12:42:41
| 2013-11-09T12:42:41
| 14,261,570
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
## Script (Python) "getNotAddableTypes"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
# customize this script to filter addable portal types based on
# context, the current user or other criteria
return ()
|
[
"ignacio@plone.(none)"
] |
ignacio@plone.(none)
|
859189dfd335cbf552d601b7f074a5040f3b71b9
|
d1f8aef0e3da67555b6b7d57ac9bec0b94e12cc5
|
/dragex/interfaces/__init__.py
|
d85a2f6ea8e655ceea1d1c1ab049f645c0717c72
|
[] |
no_license
|
victorhook/dragex
|
d3593f0c12fc2cbdbccc14a085f70e493f3b8f05
|
6c06740230f7513318abe79c78cb6d4369ba3e68
|
refs/heads/master
| 2023-06-02T03:58:54.061938
| 2021-06-17T19:06:24
| 2021-06-17T19:06:24
| 370,010,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from .drawable import Drawable # noqa
from .game_object import GameObject # noqa
from .screen import Screen # noqa
from .sprite_image import SpriteImage # noqa
|
[
"victorkrook96@gmail.com"
] |
victorkrook96@gmail.com
|
7bffb66e5f552e2e744965e1073430a1c8eaf3b7
|
1b60858c303bd7d88dae82b8db56273c326ddb44
|
/tests/swagger_client_tests/test_processor_status_snapshot_entity.py
|
5f4fb8dda20bf1e9f698019dba23303937af0daf
|
[
"Apache-2.0"
] |
permissive
|
tspannhw/nipyapi
|
1ba076ef669493bad20681579891eea1d43f4fc8
|
30cdd028cf68cc4316b54a23bfa1f0397de3ae23
|
refs/heads/master
| 2021-07-19T14:37:22.993682
| 2017-10-29T18:52:31
| 2017-10-29T18:52:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.2.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import nipyapi
from nipyapi.swagger_client.rest import ApiException
from nipyapi.swagger_client.models.processor_status_snapshot_entity import ProcessorStatusSnapshotEntity
class TestProcessorStatusSnapshotEntity(unittest.TestCase):
""" ProcessorStatusSnapshotEntity unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testProcessorStatusSnapshotEntity(self):
"""
Test ProcessorStatusSnapshotEntity
"""
# FIXME: construct object with mandatory attributes with example values
#model =nipyapi.swagger_client.models.processor_status_snapshot_entity.ProcessorStatusSnapshotEntity()
pass
if __name__ == '__main__':
unittest.main()
|
[
"dchaffey@hortonworks.com"
] |
dchaffey@hortonworks.com
|
7ee7f2e7f0034ad78299103059e5d41c7e5251e8
|
47ff744da519c525cccfad1d8cead74f7e2cd209
|
/uge4/.history/exercise_20200220124148.py
|
f126b64625bf836dfaac34c1d4c008fc555bbe88
|
[] |
no_license
|
Leafmight/Python
|
f6098395a7a13dd6afe6eb312a3eb1f3dbe78b84
|
d987f22477c77f3f21305eb922ae6855be483255
|
refs/heads/master
| 2020-12-21T14:21:06.802341
| 2020-05-22T10:21:37
| 2020-05-22T10:21:37
| 236,457,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import numpy as np
filename = './befkbhalderstatkode.csv'
dd = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
def pop(hood):
hood_mask = (dd[:,0] == 2015) & (dd[:,1] == hood)
return np.sum(dd[hood_mask][:4])
def getSumPerHood():
lst = {}
for key, value in neighb.items():
lst.update({value: pop(key)})
return lst
|
[
"jacobfolke@hotmail.com"
] |
jacobfolke@hotmail.com
|
e8813cd668f7ed59984bd897bab0933c4ba2a92a
|
8a36ddf6a9f2f6c00ff7d3db72fe7a6f88ead7a2
|
/weather/weather.py
|
f53c89e3bc9040f4b89115a55e4788b9c56e3dde
|
[] |
no_license
|
pccode21/PyQt5
|
5d5b79f55d6165d03d58768bf30f25382ac7812b
|
f0af930b1338d0472aacbd3cab65be009bddd96e
|
refs/heads/master
| 2020-12-03T11:07:44.226390
| 2020-02-19T05:29:09
| 2020-02-19T05:29:09
| 231,293,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,490
|
py
|
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from MainWindow import Ui_MainWindow
from datetime import datetime
import json
import os
import sys
import requests
from urllib.parse import urlencode
# OPENWEATHERMAP_API_KEY = os.environ.get('b020112734ca76c7df0ccad361a58fa3')
"""
从https://openweathermap.org/获取API密钥以与此结合使用
应用.
"""
def from_ts_to_time_of_day(ts):
dt = datetime.fromtimestamp(ts)
return dt.strftime("%I%p").lstrip("0")
class WorkerSignals(QObject):
'''
定义正在运行的工作线程可用的信号.
'''
finished = pyqtSignal()
error = pyqtSignal(str)
result = pyqtSignal(dict, dict)
class WeatherWorker(QRunnable):
'''
工作线程天气更新.
'''
signals = WorkerSignals()
is_interrupted = False
def __init__(self, location):
super(WeatherWorker, self).__init__()
self.location = location
@pyqtSlot()
def run(self):
try:
params = dict(
q=self.location,
appid='b020112734ca76c7df0ccad361a58fa3'
)
url = 'http://api.openweathermap.org/data/2.5/weather?%s&units=metric' % urlencode(params)
r = requests.get(url)
weather = json.loads(r.text)
# 检查我们是否失败(预测将以同样的方式失败).
if weather['cod'] != 200:
raise Exception(weather['message'])
url = 'http://api.openweathermap.org/data/2.5/forecast?%s&units=metric' % urlencode(params)
r = requests.get(url)
forecast = json.loads(r.text)
self.signals.result.emit(weather, forecast)
except Exception as e:
self.signals.error.emit(str(e))
self.signals.finished.emit()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.pushButton.pressed.connect(self.update_weather)
self.threadpool = QThreadPool() # 创建线程池类,以处理运行工作程序
self.show()
def alert(self, message):
alert = QMessageBox.warning(self, "Warning", message)
def update_weather(self):
worker = WeatherWorker(self.lineEdit.text())
worker.signals.result.connect(self.weather_result)
worker.signals.error.connect(self.alert)
self.threadpool.start(worker)
def weather_result(self, weather, forecasts):
self.latitudeLabel.setText("%.2f °" % weather['coord']['lat'])
self.longitudeLabel.setText("%.2f °" % weather['coord']['lon'])
self.windLabel.setText("%.2f m/s" % weather['wind']['speed'])
self.temperatureLabel.setText("%.1f °C" % weather['main']['temp'])
self.pressureLabel.setText("%d" % weather['main']['pressure'])
self.humidityLabel.setText("%d" % weather['main']['humidity'])
self.sunriseLabel.setText(from_ts_to_time_of_day(weather['sys']['sunrise']))
# 使用自定义from_ts_to_time_of_day函数处理时间戳,以am / pm格式返回用户友好的一天中的时间,且不带前导零。
self.weatherLabel.setText("%s (%s)" % (
weather['weather'][0]['main'],
weather['weather'][0]['description']
)
)
self.set_weather_icon(self.weatherIcon, weather['weather'])
for n, forecast in enumerate(forecasts['list'][:5], 1):
getattr(self, 'forecastTime%d' % n).setText(from_ts_to_time_of_day(forecast['dt']))
self.set_weather_icon(getattr(self, 'forecastIcon%d' % n), forecast['weather'])
getattr(self, 'forecastTemp%d' % n).setText("%.1f °C" % forecast['main']['temp'])
# 从weatherdict 设置当前的天气图标,然后遍历所提供的前5个天气预报。预报图标,时间和温度标签在Qt Designer中使用forecastIcon<n>,forecastTime<n>和定义 forecastTemp<n>,可以轻松地依次迭代它们并使用getattr当前迭代索引检索它们。
def set_weather_icon(self, label, weather):
label.setPixmap(
QPixmap(os.path.join('./PyQt5/weather/images', "%s.png" %
weather[0]['icon']
)
)
)
if __name__ == '__main__':
app = QApplication([])
window = MainWindow()
app.exec_()
|
[
"16007005@qq.com"
] |
16007005@qq.com
|
7c76835603d90ac7c8e51e9c8be02a23b28636b1
|
a5dd6bcb59130979624c0274a91bb1566421dbc4
|
/thor/config.py
|
f0faee12c5842bbacca47f5949d4fa2242d68ec3
|
[
"BSD-3-Clause"
] |
permissive
|
mjuric/thor
|
62563455526eaec09c96341ac239a5985824f24b
|
4e2403bf9c08e998ccd7a277583b0e550b9d3a67
|
refs/heads/main
| 2023-04-21T02:22:17.359744
| 2021-05-19T20:12:56
| 2021-05-19T20:12:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,027
|
py
|
import numpy as np
__all__ = ["Config"]
class Config:
"""
Config: Holds configuration settings.
Of interest to the user are two main attributes:
columnMapping : This dictionary should define the data
column names of the user's data relative to the
internally used names.
oorbDirectory : Oorb install location should be defined
here.
Parameters
----------
None
Returns
-------
None
"""
MIN_OBS = 5
MIN_ARC_LENGTH = 1.0
CONTAMINATION_PERCENTAGE = 20
BACKEND = "PYOORB"
BACKEND_KWARGS = {}
NUM_THREADS = 60
USE_RAY = False
USE_GPU = False
RANGE_SHIFT_CONFIG = {
"cell_area" : 1000,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
CLUSTER_LINK_CONFIG = {
"vx_range" : [-0.1, 0.1],
"vy_range" : [-0.1, 0.1],
"vx_bins" : 300,
"vy_bins" : 300,
"vx_values" : None,
"vy_values" : None,
"eps" : 5/3600,
"min_samples" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"threads" : NUM_THREADS,
}
IOD_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : CONTAMINATION_PERCENTAGE,
"rchi2_threshold" : 1000,
"observation_selection_method" : "combinations",
"iterate" : False,
"light_time" : True,
"linkage_id_col" : "cluster_id",
"identify_subsets" : True,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
OD_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : CONTAMINATION_PERCENTAGE,
"rchi2_threshold" : 10,
"delta" : 1e-6,
"max_iter" : 5,
"method" : "central",
"fit_epoch" : False,
"test_orbit" : None,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
ODP_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : 0.0,
"rchi2_threshold" : 5,
"eps" : 1/3600,
"delta" : 1e-8,
"max_iter" : 5,
"method" : "central",
"fit_epoch" : False,
"orbits_chunk_size" : 1,
"observations_chunk_size" : 100000,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
ADES_METADATA = {
"observatory_code" : "I11",
"observatory_name" : "Vera C. Rubin Observatory",
"telescope_aperture" : "8.4",
"telescope_design" : "Reflector",
"telescope_detector" : "CCD",
"submitter" : "D. iRAC",
"observers" : ["D. iRAC"],
"measurers" : ["D. iRAC"],
}
COLUMN_MAPPING = {
### Observation Parameters
# Observation ID
"obs_id" : "obsId",
# Exposure time
"exp_mjd" : "exp_mjd",
# Visit ID
"visit_id" : "visitId",
# Field ID
"field_id" : "fieldId",
# Field RA in degrees
"field_RA_deg" : "fieldRA_deg",
# Field Dec in degrees
"field_Dec_deg" : "fieldDec_deg",
# Night number
"night": "night",
# RA in degrees
"RA_deg" : "RA_deg",
# Dec in degrees
"Dec_deg" : "Dec_deg",
# Observatory code
"observatory_code" : "code",
# Observer's x coordinate in AU
"obs_x_au" : "HEclObsy_X_au",
# Observer's y coordinate in AU
"obs_y_au" : "HEclObsy_Y_au",
# Observer's z coordinate in AU
"obs_z_au" : "HEclObsy_Z_au",
# Magnitude (UNUSED)
"mag" : "VMag",
### Truth Parameters
# Object name
"name" : "designation",
# Observer-object distance in AU
"Delta_au" : "Delta_au",
# Sun-object distance in AU (heliocentric distance)
"r_au" : "r_au",
# Object's x coordinate in AU
"obj_x_au" : "HEclObj_X_au",
# Object's y coordinate in AU
"obj_y_au" : "HEclObj_Y_au",
# Object's z coordinate in AU
"obj_z_au" : "HEclObj_Z_au",
# Object's x velocity in AU per day
"obj_dx/dt_au_p_day" : "HEclObj_dX/dt_au_p_day",
# Object's y velocity in AU per day
"obj_dy/dt_au_p_day" : "HEclObj_dY/dt_au_p_day",
# Object's z velocity in AU per day
"obj_dz/dt_au_p_day" : "HEclObj_dZ/dt_au_p_day",
# Semi-major axis
"a_au" : "a_au",
# Inclination
"i_deg" : "i_deg",
# Eccentricity
"e" : "e",
}
|
[
"moeyensj@gmail.com"
] |
moeyensj@gmail.com
|
348970a0f4e5c0d7929ac752e3078f95f5443c3a
|
6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9
|
/Algorithm/Swea/D1_6230.py
|
11bc95295c19530488c6fba37d18d628e6562027
|
[] |
no_license
|
hongyong3/TIL
|
36d031c0da9e3e6db3eebb977bd3e12df00a849f
|
7f1492128e957a78fc95b255f4f7f2978161e471
|
refs/heads/master
| 2023-08-19T09:16:03.231757
| 2023-08-18T09:38:47
| 2023-08-18T09:38:47
| 162,100,258
| 1
| 0
| null | 2023-02-11T00:52:32
| 2018-12-17T08:42:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
data = [88, 30, 61, 55, 95]
for i in range(5):
if data[i] >= 60:
print("{}번 학생은 {}점으로 {}입니다.".format(i + 1, data[i], "합격"))
else:
print("{}번 학생은 {}점으로 {}입니다.".format(i + 1, data[i], "불합격"))
|
[
"chy66822495@gmail.com"
] |
chy66822495@gmail.com
|
11663c0f28cb942a4a9a90c69f77584703d14b96
|
5633afdce5fb2209f130bb0cd2c478a35bd75957
|
/168-理解function.py
|
62a304b055863e54e0b2122b6167d3374a9902b5
|
[] |
no_license
|
weiyinfu/learnKeras
|
36a68e7f9966bf2ac53bb4767b3754864fe6087d
|
c011005bf760053e9085a0171702e54d19cafebc
|
refs/heads/master
| 2023-03-06T18:06:32.811186
| 2021-02-22T06:05:57
| 2021-02-22T06:05:57
| 147,919,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
import keras.backend as K
import keras
import tensorflow as tf
"""
keras的function可以方便的求某几个数字的值
"""
input = keras.layers.Input((None,))
output = tf.multiply(input, input)
output2 = keras.layers.multiply([input, input])
called_count = K.variable(0.0)
f = K.function([input], [output, output2, called_count], [K.update_add(called_count, 1)])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(f([[3, 4, 5]]))
print(f([[3, 4, 5]]))
o, oo, c = sess.run([output, output2, called_count], feed_dict={
input: [[3, 4, 5]]
})
print(o, oo, c)
|
[
"weiyinfu.weiyinfu@bytedance.com"
] |
weiyinfu.weiyinfu@bytedance.com
|
90f6b044e0738dd4144dea41df919f7fe76752a2
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/test/2/library-tests/PointsTo/import_time/module.py
|
0e14ce6e5d765b8d724c6890d6495ef311dde746
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
Python
| false
| false
| 152
|
py
|
import sys
os_test = sys.platform == "linux2"
version_test = sys.version_info < (3,)
if version_test:
version_2 = True
else:
version_3 = False
|
[
"mark@hotpy.org"
] |
mark@hotpy.org
|
132233e2f673ca46ed09870bc39f3069ada4e184
|
d79c4fa73bd26550cfaa5d1a3259b20bda1fba46
|
/Tests/Services/test_distance_service.py
|
79975e946cb9b7d65f9ff492746e0f981a60d6c6
|
[] |
no_license
|
dev-11/coding-test
|
37e8372b4eff1b6d5c9b0bd2c0c13f88d0940736
|
7bd56b00d48a0419206b99170075fe34183830ee
|
refs/heads/master
| 2021-07-11T02:49:44.832998
| 2021-03-28T12:08:47
| 2021-03-28T12:08:47
| 233,877,609
| 0
| 0
| null | 2020-01-14T15:52:20
| 2020-01-14T15:52:19
| null |
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
import unittest
from Services import DistanceService
from Tests.TestEnvironment import get_test_stores
class DistanceServiceTests(unittest.TestCase):
def test_get_stores_within_range_returns_every_store_in_one_mile_range(self):
a = [51.460903, -0.301702]
stores = get_test_stores()
service = DistanceService()
result = service.get_stores_within_range(a, stores, 1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['geolocation']['latitude'], 51.463437)
self.assertEqual(result[0]['geolocation']['longitude'], -0.288602)
self.assertEqual(result[0]['name'], 'Richmond')
self.assertEqual(result[0]['postcode'], 'TW9 1YB')
def test_get_stores_within_range_returns_every_store_in_five_miles_range(self):
a = [51.460903, -0.301702]
stores = get_test_stores()
service = DistanceService()
result = service.get_stores_within_range(a, stores, 5)
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['geolocation']['latitude'], 51.405065)
self.assertEqual(result[0]['geolocation']['longitude'], -0.238117)
self.assertEqual(result[0]['name'], 'New_Malden')
self.assertEqual(result[0]['postcode'], 'SW20 0JQ')
self.assertEqual(result[1]['geolocation']['latitude'], 51.442892)
self.assertEqual(result[1]['geolocation']['longitude'], -0.412804)
self.assertEqual(result[1]['name'], 'Feltham')
self.assertEqual(result[1]['postcode'], 'TW13 4EX')
self.assertEqual(result[2]['geolocation']['latitude'], 51.482172)
self.assertEqual(result[2]['geolocation']['longitude'], -0.314343)
self.assertEqual(result[2]['name'], 'Brentford')
self.assertEqual(result[2]['postcode'], 'TW8 8JW')
self.assertEqual(result[3]['geolocation']['latitude'], 51.463437)
self.assertEqual(result[3]['geolocation']['longitude'], -0.288602)
self.assertEqual(result[3]['name'], 'Richmond')
self.assertEqual(result[3]['postcode'], 'TW9 1YB')
|
[
"otto@masterbranch.io"
] |
otto@masterbranch.io
|
9e20c44700047479c01f6cdeb7fbfcafb618f3b9
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/show_asset_meta_response.py
|
17beda5d5d2fc1bd79e8b76d4ed6bfa0f640b853
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,451
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAssetMetaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_info_array': 'list[AssetInfo]',
'is_truncated': 'int',
'total': 'int'
}
attribute_map = {
'asset_info_array': 'asset_info_array',
'is_truncated': 'is_truncated',
'total': 'total'
}
def __init__(self, asset_info_array=None, is_truncated=None, total=None):
"""ShowAssetMetaResponse - a model defined in huaweicloud sdk"""
super(ShowAssetMetaResponse, self).__init__()
self._asset_info_array = None
self._is_truncated = None
self._total = None
self.discriminator = None
if asset_info_array is not None:
self.asset_info_array = asset_info_array
if is_truncated is not None:
self.is_truncated = is_truncated
if total is not None:
self.total = total
@property
def asset_info_array(self):
"""Gets the asset_info_array of this ShowAssetMetaResponse.
媒资信息列表。
:return: The asset_info_array of this ShowAssetMetaResponse.
:rtype: list[AssetInfo]
"""
return self._asset_info_array
@asset_info_array.setter
def asset_info_array(self, asset_info_array):
"""Sets the asset_info_array of this ShowAssetMetaResponse.
媒资信息列表。
:param asset_info_array: The asset_info_array of this ShowAssetMetaResponse.
:type: list[AssetInfo]
"""
self._asset_info_array = asset_info_array
@property
def is_truncated(self):
"""Gets the is_truncated of this ShowAssetMetaResponse.
列表是否被截断。 取值如下: - 1:表示本次查询未返回全部结果。 - 0:表示本次查询已经返回了全部结果。
:return: The is_truncated of this ShowAssetMetaResponse.
:rtype: int
"""
return self._is_truncated
@is_truncated.setter
def is_truncated(self, is_truncated):
"""Sets the is_truncated of this ShowAssetMetaResponse.
列表是否被截断。 取值如下: - 1:表示本次查询未返回全部结果。 - 0:表示本次查询已经返回了全部结果。
:param is_truncated: The is_truncated of this ShowAssetMetaResponse.
:type: int
"""
self._is_truncated = is_truncated
@property
def total(self):
"""Gets the total of this ShowAssetMetaResponse.
查询媒资总数。 > 暂只能统计2万个媒资,若您需要查询具体的媒资总数,请[提交工单](https://console.huaweicloud.com/ticket/?#/ticketindex/business?productTypeId=462902cc39a04ab3a429df872021f970)申请。
:return: The total of this ShowAssetMetaResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ShowAssetMetaResponse.
查询媒资总数。 > 暂只能统计2万个媒资,若您需要查询具体的媒资总数,请[提交工单](https://console.huaweicloud.com/ticket/?#/ticketindex/business?productTypeId=462902cc39a04ab3a429df872021f970)申请。
:param total: The total of this ShowAssetMetaResponse.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAssetMetaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
af062882db668d2127cd9f91c3691c449ef42328
|
12c41119156dd3783c3801e07f5f973289f26bb0
|
/aliyun-python-sdk-green/aliyunsdkgreen/request/v20170823/DescribeWebsiteScanResultRequest.py
|
f09d346c2c80b7eb9219b58dbf61434df7b191ec
|
[
"Apache-2.0"
] |
permissive
|
toywei/aliyun-openapi-python-sdk
|
bfe0893da38af9b222ce072fd7587d5b6cdce204
|
ce8f683e3201fca8c473512267f50a34f71e31d3
|
refs/heads/master
| 2020-08-07T23:42:00.053692
| 2019-10-08T08:50:21
| 2019-10-08T08:50:21
| 213,626,962
| 1
| 0
|
NOASSERTION
| 2019-10-08T11:43:15
| 2019-10-08T11:43:15
| null |
UTF-8
|
Python
| false
| false
| 2,640
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeWebsiteScanResultRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Green', '2017-08-23', 'DescribeWebsiteScanResult','green')
def get_TotalCount(self):
return self.get_query_params().get('TotalCount')
def set_TotalCount(self,TotalCount):
self.add_query_param('TotalCount',TotalCount)
def get_SubServiceModule(self):
return self.get_query_params().get('SubServiceModule')
def set_SubServiceModule(self,SubServiceModule):
self.add_query_param('SubServiceModule',SubServiceModule)
def get_SiteUrl(self):
return self.get_query_params().get('SiteUrl')
def set_SiteUrl(self,SiteUrl):
self.add_query_param('SiteUrl',SiteUrl)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_HandleStatus(self):
return self.get_query_params().get('HandleStatus')
def set_HandleStatus(self,HandleStatus):
self.add_query_param('HandleStatus',HandleStatus)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Label(self):
return self.get_query_params().get('Label')
def set_Label(self,Label):
self.add_query_param('Label',Label)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
4aa2a44af09dce4919240097d2cf50df5c2286cc
|
56f155db28b5703786a08fef0ecf821aefb6ffe5
|
/lib/testmill/test/test_images.py
|
f43dc574d9685d3d89f1196cbad690c754365c2e
|
[
"Apache-2.0"
] |
permissive
|
h4ckl4bm3/testmill
|
595c30facec943b3593febe080b1e6602e82dee2
|
607d5622f14785e1b2f785e162ae862c5e638c5f
|
refs/heads/master
| 2021-05-27T08:58:17.899271
| 2013-04-10T15:40:12
| 2013-04-10T15:41:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
# Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
from testmill.main import main
from testmill.test import *
@systemtest
class TestImages(TestSuite):
"""Run some basic test on the standard images."""
def test_images(self):
args = get_common_args()
args += ['run', '-m', 'platformtest.yml',
'platformtest', 'sh check_image.sh']
retval = main(args)
assert retval == 0
|
[
"geertj@gmail.com"
] |
geertj@gmail.com
|
94a57d37ee01ad48525f12206f52a6d3317127e3
|
04164e028417ff8472b9f2bfec0ec45b0888f743
|
/development/pysrc/extract.py
|
1b6bc09351d99ac31b3285f0ed8f27a28be337e3
|
[] |
no_license
|
Huaguiyuan/quantum-honeycomp
|
c2b810ff5f5e25d41b1f0c1c1ff7ae500b04dc31
|
50deb0e59fffe4031f05094572552ca5be59e741
|
refs/heads/master
| 2020-03-22T19:09:58.148862
| 2018-07-08T19:51:58
| 2018-07-08T19:51:58
| 140,510,217
| 1
| 2
| null | 2018-07-11T02:20:32
| 2018-07-11T02:20:32
| null |
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
# routines to extract channels from a matrix
from __future__ import division
import numpy as np
def spin_channel(m,spin_column=None,spin_row=None,has_spin=True):
"""Extract a channel from a matrix"""
if not has_spin: return m # return initial
if (spin_row is None) or (spin_column is None): return m # return initial
n = m.shape[0] # shape of the matrix
n2 = n//2 # number of orbitals
out = np.zeros((n,n),dtype=np.complex)
if spin_column=="up": ii = 0
else: ii = 1
if spin_row=="up": jj = 0
else: jj = 1
for i in range(n2):
for j in range(n2): out[i,j] = m[2*i+ii,2*j+jj]
return np.matrix(out)
def swave(m):
"""Extract the swave pairing from a matrix, assuming
the Nambu spinor basis"""
n = m.shape[0]//4 # number of sites
ds = np.zeros(n,dtype=np.complex) # pairing
for i in range(n):
ds[i] = m[4*i,4*i+2] # get the pairing
return ds
def mz(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = (m[2*i+1,2*i+1] - m[2*i,2*i]).real/2. # get the pairing
return ds
def mx(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = m[2*i,2*i+1].real
return ds
def my(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = -m[2*i,2*i+1].imag
return ds
def onsite(m,has_spin=True):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
if has_spin: # has spin degree of freedom
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = (m[2*i,2*i].real + m[2*i+1,2*i+1].real)/2.
return ds
else:
n = m.shape[0] # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = m[i,i].real
return ds
def hopping_spinful(m,cutoff=0.001):
"""Extract hopping"""
n = m.shape[0]//2 # number sites
ii = []
jj = []
ts = []
for i in range(n):
for j in range(i,n):
t = np.abs(m[2*i,2*j]) + np.abs(m[2*i+1,2*j+1])
if t>cutoff:
ii.append(i)
jj.append(j)
ts.append(t)
return ii,jj,np.array(ts) # return pairs
def hopping_spinless(m,cutoff=0.001):
"""Extract hopping"""
n = m.shape[0] # number of sites
ii = []
jj = []
ts = []
for i in range(n):
for j in range(i,n):
t = np.abs(m[i,j])
if t>cutoff:
ii.append(i)
jj.append(j)
ts.append(t)
return ii,jj,np.array(ts) # return pairs
|
[
"jose.luis.lado@gmail.com"
] |
jose.luis.lado@gmail.com
|
72b7bb7acba687c0f6f14413cd6d43962e8a3351
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/common/Lib/encodings/iso2022_jp_ext.py
|
79e0c5be45183dd71284af4365cf20ec67ea90b1
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 964
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/encodings/iso2022_jp_ext.py
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='iso2022_jp_ext', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
93f01551fc71c691ab7c4d7b49966cb6e2af604c
|
e4200b764d0b4ffba65180e54cf84b30ee84efcc
|
/selfdrive/boardd/boardd_setup.py
|
f987c7aa29e08bc7bdd5e335dc38ac0c14730201
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
kegman/openpilot
|
c9ba96a72d905956f02c684e065091e023942883
|
54a8614b5a6451154817a4c6c86141c96103ae47
|
refs/heads/kegman-0.7
| 2022-05-22T17:07:16.656336
| 2020-01-23T16:40:55
| 2020-01-23T16:40:55
| 229,979,925
| 105
| 212
|
MIT
| 2022-03-13T05:47:51
| 2019-12-24T17:27:11
|
C
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
import subprocess
from distutils.core import Extension, setup
from Cython.Build import cythonize
from common.cython_hacks import BuildExtWithoutPlatformSuffix
from common.basedir import BASEDIR
import os
PHONELIBS = os.path.join(BASEDIR, 'phonelibs')
ARCH = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
ARCH_DIR = 'x64' if ARCH == "x86_64" else 'aarch64'
setup(name='Boardd API Implementation',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"boardd_api_impl",
libraries=[':libcan_list_to_can_capnp.a', ':libcapnp.a', ':libkj.a'] if ARCH == "x86_64" else [':libcan_list_to_can_capnp.a', 'capnp', 'kj'],
library_dirs=[
'./',
PHONELIBS + '/capnp-cpp/' + ARCH_DIR + '/lib/',
PHONELIBS + '/capnp-c/' + ARCH_DIR + '/lib/'
],
sources=['boardd_api_impl.pyx'],
language="c++",
extra_compile_args=["-std=c++11"],
)
)
)
|
[
"8837066+kegman@users.noreply.github.com"
] |
8837066+kegman@users.noreply.github.com
|
30e99cd125126168a62391d1dd2870494f66f8d3
|
45de7d905486934629730945619f49281ad19359
|
/xlsxwriter/test/comparison/test_optimize11.py
|
419bdafcf7b28b46a1cc0c98248bc2b40b67c8d9
|
[
"BSD-2-Clause"
] |
permissive
|
jmcnamara/XlsxWriter
|
599e1d225d698120ef931a776a9d93a6f60186ed
|
ab13807a1be68652ffc512ae6f5791d113b94ee1
|
refs/heads/main
| 2023-09-04T04:21:04.559742
| 2023-08-31T19:30:52
| 2023-08-31T19:30:52
| 7,433,211
| 3,251
| 712
|
BSD-2-Clause
| 2023-08-28T18:52:14
| 2013-01-04T01:07:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize11.xlsx")
def test_create_file_no_close(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet.write("A2", "Hello 2")
worksheet.write("A4", "Hello 3")
workbook.close()
self.assertExcelEqual()
def test_create_file_with_close(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet.write("A2", "Hello 2")
worksheet.write("A4", "Hello 3")
worksheet._opt_close()
workbook.close()
self.assertExcelEqual()
def test_create_file_with_reopen(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
for i in range(1, 10):
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello 1")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet.write("A2", "Hello 2")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet.write("A4", "Hello 3")
worksheet._opt_close()
worksheet._opt_reopen()
worksheet._opt_close()
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
60970ab65f2384908efc1c74b7fa6fdefbaadf46
|
b6a48f9a6158bcb7e6fc75e5eacaef19250fc4c5
|
/cosmos/ingestion/ingest/process/detection/src/torch_model/model/utils/config_manager.py
|
c5af72c9c0d77749c41e4e4151ac91a4091dc749
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
UW-COSMOS/Cosmos
|
dcde3be6534e411a20fcf1ff36e422fc8af2ac8a
|
5ed4a4c149e03773690668437d2f93aa532453c6
|
refs/heads/master
| 2023-09-01T18:03:20.525760
| 2023-08-31T13:56:21
| 2023-08-31T13:56:21
| 159,849,583
| 39
| 14
| null | 2023-09-13T14:39:45
| 2018-11-30T16:24:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
import yaml
class Struct:
def __init__(self, **entries):
for key, value in entries.items():
value2 = (Struct(**value) if isinstance(value, dict) else value)
self.__dict__[key] = value2
class ConfigManager:
"""
Basic config singleton for easily accessing config parameters
"""
class __Singleton:
def __init__(self, fp):
"""
Initialize a singleton config object
:param fp:
"""
with open(fp) as fh:
config = yaml.load(fh, yaml.Loader)
for key, value in config.items():
value2 = (Struct(**value) if isinstance(value, dict) else value)
self.__dict__[key] = value2
def merge(self, data):
for key in data.keys():
self__dict__[key] = data[key]
instance = None
def __init__(self, fp=None):
if (ConfigManager.instance is None) and (fp is not None):
ConfigManager.instance = ConfigManager.__Singleton(fp)
def __getattr__(self, item):
return getattr(ConfigManager.instance, item)
def __setattr__(self, key, value):
setattr(ConfigManager.instance, key, value)
|
[
"ankur.goswami12@gmail.com"
] |
ankur.goswami12@gmail.com
|
90472ae1500003128c099c82b18c65cd294fb594
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_67/run_cfg.py
|
b7a42574ec89cfa3b75f992ff17c74f8999faf28
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,539
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1297.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1298.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1299.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_13.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_130.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
97c3730c522f14d3e70b194878b0d860135c6b52
|
def06466dadf32385b083615e46a07188ef841c2
|
/web_app/primes/primes/wsgi.py
|
4839f01dfbebfa726790474ac354f5d2b5730dc8
|
[] |
no_license
|
ChillarAnand/just-queue-it
|
ead51fa0fa14bca6276c452b32a8d4e382e37f95
|
c58a214507b429d8854a1049e4b5ed6377435a82
|
refs/heads/master
| 2020-05-23T14:05:38.511931
| 2015-02-19T21:42:34
| 2015-02-19T21:42:34
| 31,038,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
"""
WSGI config for primes project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "primes.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"anand21nanda@gmail.com"
] |
anand21nanda@gmail.com
|
76365823d072d54826924eb954f54f08ee1178c8
|
616c3c02be31b9ae4d06bd7c5a8d4a2e7c446aa1
|
/401.二进制手表.py
|
c1394764a8ed4675d2bc74aff7690c1c59620be7
|
[] |
no_license
|
L1nwatch/leetcode-python
|
8b7c47c04ee9400d50d8b0764a544a0463df8f06
|
0484cbc3273ada25992c72105658cd67411c5d39
|
refs/heads/master
| 2023-01-11T14:53:15.339276
| 2023-01-11T05:24:43
| 2023-01-11T05:24:43
| 194,516,548
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
#
# @lc app=leetcode.cn id=401 lang=python3
#
# [401] 二进制手表
#
# @lc code=start
class Solution:
def readBinaryWatch(self, turnedOn: int) -> List[str]:
result = list()
for hour in range(12):
bin_hour_1 = bin(hour).count("1")
for minute in range(60):
if bin_hour_1 + bin(minute).count("1") == turnedOn:
result.append(f"{hour}:{minute:0>2d}")
return result
# @lc code=end
|
[
"watch1602@gmail.com"
] |
watch1602@gmail.com
|
2d35ba558e65b2aa0a4c270411cd0a7207189d72
|
9cf434b6ee59ab22496ee031fb4ab145bbaff1a2
|
/tranque_v1.8.4_source/backend/src/targets/migrations/0025_threshold_kind.py
|
9da935043934aadd20fada38b72528d8345ff01b
|
[] |
no_license
|
oliverhernandezmoreno/SourcesOH
|
f2ff1a5e3377f0ac1fb8b3153d99d0ee703700b7
|
5d9ca5ab1caceafd4d11207139c9e56210156ef8
|
refs/heads/master
| 2023-01-05T02:51:25.172103
| 2020-08-27T14:39:34
| 2020-08-27T14:39:34
| 64,422,812
| 0
| 1
| null | 2022-12-30T17:25:10
| 2016-07-28T19:33:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
# Generated by Django 2.1 on 2019-06-04 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('targets', '0024_target_remote'),
]
operations = [
migrations.AddField(
model_name='threshold',
name='kind',
field=models.SlugField(blank=True, max_length=255, null=True),
),
]
|
[
"oliverhernandezmoreno@gmail.com"
] |
oliverhernandezmoreno@gmail.com
|
1180c2df653973dfeb4478f34ad3c39fd22cab39
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/workflows/v1beta/workflows-v1beta-py/google/cloud/workflows_v1beta/types/__init__.py
|
66aec79fe2b77723f73afe591aafa1edbbb647c0
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .workflows import (
CreateWorkflowRequest,
DeleteWorkflowRequest,
GetWorkflowRequest,
ListWorkflowsRequest,
ListWorkflowsResponse,
OperationMetadata,
UpdateWorkflowRequest,
Workflow,
)
__all__ = (
'CreateWorkflowRequest',
'DeleteWorkflowRequest',
'GetWorkflowRequest',
'ListWorkflowsRequest',
'ListWorkflowsResponse',
'OperationMetadata',
'UpdateWorkflowRequest',
'Workflow',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
03756a7acb99e8907d2bf21186f702c06e303a3b
|
731c136992f98cab61508b9e5661afbd491962b6
|
/Sort/Sort.py
|
2f1338d9504c5dc5d5304e321cc3d067484b1d45
|
[] |
no_license
|
yangze01/py_LeetCode
|
c311235dbe1053c68694aea04fe29296ccb3a6e2
|
2b7213d00e2e482379a2f160b0d8e267a7951599
|
refs/heads/master
| 2021-01-20T06:03:53.852486
| 2017-12-08T01:30:26
| 2017-12-08T01:30:26
| 101,479,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,980
|
py
|
#coding=utf8
import sys
"""
算法复习开始: 八大排序算法
"""
def bubble_sort(list):
"""
冒泡排序
:param list:
:return:
"""
length = len(list)
# 第一级遍历
for index in range(length):
# 第二级遍历
for j in range(1, length - index):
if list[j-1] > list[j]:
# 交换两者数据
list[j-1], list[j] = list[j], list[j-1]
return list
def bubble_sort_flag(list):
"""
改进冒泡排序,如果已经是顺序的,则不用进行排序,直接返回结果
:param list:
:return:
"""
length = len(list)
for index in range(length):
# 标志位
flag = True
for j in range(1, length - index):
if list[j - 1] > list[j]:
list[j - 1], list[j] = list[j], list[j - 1]
flag = False
if flag:
return list
return list
def selection_sort(list):
"""
选择排序,每次将序列中最小或者最大的元素找出来,
然后放在序列的起始位置
:param list:
:return:
"""
n = len(list)
for i in range(0, n):
min_index = i
for j in range(i + 1, n):
if list[j] < list[min_index]:
min_index = j
list[min_index], list[i] = list[i], list[min_index]
return list
def insert_sort(list):
"""
插入排序,通过构建有序序列,对于未排序的数据,
在已排序序列中从后向前扫描,找到相应位置并插入。
步骤
1. 从第一个元素开始,该元素可以认为已经被排序
2. 取出下一个元素,在已经排序的序列中从后向前扫描
3. 如果该元素(已排序)大于新元素,将该元素移到下一位置
4. 重复步骤3, 直到找到已排序的元素小于或者等于新元素的位置
5. 将新元素插入到该位置后
6. 重复步骤2-5
:param list:
:return:
"""
n = len(list)
for i in range(1, n):
# 后一个元素跟前一个元素比较
# 如果比前一个小
if list[i] < list[i - 1]:
# 将这个数取出
temp = list[i]
# 保存下标
index = i
# 从后往前一次比较每个元素
for j in range(i - 1, -1, -1):
# 和比取出元素大的元素交换
if list[j] > temp:
list[j + 1] = list[j]
index = j
else:
break
# 插入元素
list[index] = temp
return list
def insert_sort2(lists):
"""
插入排序
:param lists:
:return:
"""
# 插入排序
count = len(lists)
# 每次遍历已经排好序的部分,生成结果。
for i in range(1, count):
# 记录当前元素
key = lists[i]
j = i - 1
# 从已经排好序的元素开始,遍历当前元素应该插入到哪一个
while j >= 0:
if lists[j] > key:
lists[j + 1] = lists[j]
lists[j] = key
j -= 1
return lists
# def insert_sort3(lists):
# count = len(lists)
# for i in range(1, count):
# # 记录当前元素
# key = lists[i]
# j = i - 1
# while j >= 0:
# if lists[j] > key:
# lists[j+1] = lists[j]
# lists[j] = key
# j -= 1
# return lists
def shell_sort(lists):
"""
希尔排序,每次以一定的步长(跳过等距的数)进行排序,直至步长为1.
:param list:
:return:
"""
n = len(lists)
# 初始步长
gap = round(n/2)
while gap > 0:
for i in range(gap, n):
# 每个步长进行插入排序
temp = lists[i]
j = i
# 插入排序
# while j >= gap and list[j - gap] > temp:
# list[j] = list[j - gap]
while j >= gap and lists[j - gap] > temp:
lists[j] = lists[j - gap]
j -= gap
lists[j] = temp
# 得到新的步长
gap = round(gap / 2)
return lists
# 递归方法实现归并排序
def merge_sort(lists):
# 认为长度不大于1的数列是有序的
if len(lists) <= 1:
return lists
# 二分列表
middle = len(lists) // 2
left = merge_sort(lists[:middle])
right = merge_sort(lists[middle:])
# 最后一次合并
return merge(left, right)
# 合并
def merge(left, right):
l,r=0,0
result=[]
while l<len(left) and r<len(right):
if left[l] <right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
# print(l,r)
result += left[l:]
result += right[r:]
return result
# 迭代方法实现归并排序
def merge_sort2(lists):
length = len(lists)
step = 1
# 步长为1, 2, 4, 8, ..., 一直合并下去
while step <= length:
offset = step << 1
for index in range(0, length, offset):
merge2(lists, index, min(index+step, length-1), min(index+offset-1, length-1))
step = offset
def merge2(lists, head1, head2, tail2):
# 合并两个排好序的区间:[head1, tail1]与[head2, tail2]
tail1 = head2 - 1
start = head1
index = 0
tmp = [0] * (tail2-head1+1)
while head1 <= tail1 or head2 <= tail2:
if head1 > tail1:
tmp[index] = lists[head2]
elif head2 > tail2:
tmp[index] = lists[head1]
else:
if lists[head1] <= lists[head2]:
tmp[index] = lists[head1]
else:
tmp[index] = lists[head2]
if head1 <= tail1 and tmp[index] == lists[head1]:
head1 += 1
else:
head2 += 1
index += 1
for i in range(start, tail2 + 1):
lists[i] = tmp[i-start]
# 快速排序 递归
def quick_sort(lists, left, right):
if left >= right:
return lists
key = lists[left]
low = left
high = right
while left < right:
while left < right and lists[right] >= key:
right -= 1
lists[left] = lists[right]
while left < right and lists[left] <= key:
left += 1
lists[right] = lists[left]
lists[right] = key
quick_sort(lists, low, left - 1)
quick_sort(lists, left + 1, high)
return lists
# 快速排序
def quick_sort2(lists):
less = []
pivotList = []
more = []
# 递归出口
if len(lists) <= 1:
return lists
else:
# 第一个值为基准
pivot = lists[0]
for i in lists:
# 将比base小的值放到less里面
if i < pivot:
less.append(i)
# 将比base大的值放到More里面
elif i > pivot:
more.append(i)
else:
pivotList.append(i)
less = quick_sort2(less)
more = quick_sort2(more)
return less + pivotList + more
def adjust_heap(lists, i, size):
# print(1)
lchild = 2 * i + 1 # i的左孩子节点序号
rchild = 2 * i + 2 # i的右孩子节点序号
max = i
if i <= size/2:
if lchild < size and lists[lchild] > lists[max]:
max = lchild
if rchild < size and lists[rchild] > lists[max]:
max = rchild
if max != i:
lists[i], lists[max] = lists[max], lists[i]
adjust_heap(lists, max, size) # 避免调整之后以max为父节点的子树不是堆
def build_heap(lists, size):
for i in range(0, (int(size/2)))[::-1]:
adjust_heap(lists, i, size)
def heap_sort(lists):
size = len(lists)
build_heap(lists, size)
for i in range(0, size)[::-1]:
lists[0], lists[i] = lists[i], lists[0]
adjust_heap(lists, 0, i)
return lists
if __name__ == "__main__":
# print(1)
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("bubble_sort")
print(bubble_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("bubble_sort2")
print(bubble_sort_flag(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("selection sort")
print(bubble_sort_flag(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("insert sort")
print(insert_sort2(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("shell sort")
print(shell_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("merge sort")
print(merge_sort(lists))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("merge sort2")
merge_sort2(lists)
print(lists)
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("quick sort")
print(quick_sort(lists, 0, len(lists)-1))
lists = [7, 13, 3, 1, 5, 10, 2, 20]
print("heap sort")
print(heap_sort(lists))
|
[
"858848101@qq.com"
] |
858848101@qq.com
|
58270a7c262944cd188186aa67ab970c20b93094
|
7bb9f2e6e8993c6104c1109c1c2714e331c09ac2
|
/toolbox/workload/forms.py
|
e1b7346061cdb45ffd663c20b22b963dac2ebc2f
|
[] |
no_license
|
oinopion/toolbox
|
6a775156cb20660f2d92e1d825e4cbabc9df3be7
|
a8df57ee6f2343aaaa512703da74dae5fa3d4cfd
|
refs/heads/master
| 2021-01-19T18:32:54.484006
| 2011-12-22T15:00:48
| 2011-12-22T15:00:48
| 3,033,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
# encoding: utf-8
from django import forms
from django.forms import fields
from toolbox.workload.models import Assignment
from workload.grid import date_range_inclusive
class AssignmentForm(forms.ModelForm):
beginnig = fields.DateField(
widget=forms.DateInput(attrs={'class': 'date-picker'}))
end = fields.DateField(
widget=forms.DateInput(attrs={'class': 'date-picker'}))
next = fields.CharField(widget=forms.HiddenInput())
class Meta:
exclude = ['date']
model = Assignment
def save(self, commit=True):
dates = date_range_inclusive(self.cleaned_data['beginnig'],
self.cleaned_data['end'],
exclude_weekends=True)
for date in dates:
Assignment.objects.create(**{
'date': date,
'person': self.cleaned_data['person'],
'project': self.cleaned_data['project'],
})
|
[
"tomek@hauru.eu"
] |
tomek@hauru.eu
|
c33f29d71bbf135ea10ec41aa87c6f4a64b32f7e
|
62179a165ec620ba967dbc20016e890978fbff50
|
/tests/torch/modules/seq2seq/seq2seq_base.py
|
ebe3e13913b31bd5beac08c8b2640c3364faf5eb
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,173
|
py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from torch.nn.functional import log_softmax
PAD = 0
class Seq2Seq(nn.Module):
"""
Generic Seq2Seq module, with an encoder and a decoder.
"""
def __init__(self, encoder=None, decoder=None, batch_first=False):
"""
Constructor for the Seq2Seq module.
:param encoder: encoder module
:param decoder: decoder module
:param batch_first: if True the model uses (batch, seq, feature)
tensors, if false the model uses (seq, batch, feature) tensors
"""
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.batch_first = batch_first
def encode(self, inputs, lengths):
"""
Applies the encoder to inputs with a given input sequence lengths.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param lengths: vector with sequence lengths (excluding padding)
"""
return self.encoder(inputs, lengths)
def decode(self, inputs, context, inference=False):
"""
Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode
"""
return self.decoder(inputs, context, inference)
def generate(self, inputs, context, beam_size):
"""
Autoregressive generator, works with SequenceGenerator class.
Executes decoder (in inference mode), applies log_softmax and topK for
inference with beam search decoding.
:param inputs: tensor with inputs to the decoder
:param context: context from the encoder
:param beam_size: beam size for the generator
returns: (words, logprobs, scores, new_context)
words: indices of topK tokens
logprobs: log probabilities of topK tokens
scores: scores from the attention module (for coverage penalty)
new_context: new decoder context, includes new hidden states for
decoder RNN cells
"""
logits, scores, new_context = self.decode(inputs, context, True)
logprobs = log_softmax(logits, dim=-1)
logprobs, words = logprobs.topk(beam_size, dim=-1)
return words, logprobs, scores, new_context
def forward(self, input_encoder, input_enc_len, input_decoder):
raise NotImplementedError
|
[
"noreply@github.com"
] |
openvinotoolkit.noreply@github.com
|
7083f94716d817a0f64bfe154b86ee5261c2109e
|
e17b0ad0ebeb361e5565eb3d12e717f296a7b878
|
/SheetAPI/config_example.py
|
a3fa30cc58a3bb13f0e1eee83397cd254f4f0c2e
|
[] |
no_license
|
easy-rpg/SheetAPI
|
94ea732083c3a7a82577e59e3a882a878772d6eb
|
5542197f8388eed761a15a79c6ccca4fd481ccba
|
refs/heads/master
| 2022-12-11T17:01:16.130002
| 2018-07-05T00:26:48
| 2018-07-05T00:26:48
| 131,898,341
| 1
| 0
| null | 2022-11-22T02:30:09
| 2018-05-02T19:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
# DB Heroku
# import dj_database_url
# DATABASES = {'default': dj_database_url.config(conn_max_age=600, ssl_require=True)}
# DB LOCAL
DB_HOST = "localhost"
DB_PORT = ""
DB_NAME = "DB_NAME"
DB_USER = "DB_USER"
DB_PASSWORD = ""
|
[
"rodrigondec@gmail.com"
] |
rodrigondec@gmail.com
|
40984c2fb2d800dd58b439a634f44d0ceae530a0
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/list_message_statistics_response.py
|
1e7816007f8524b86b1888cb87a5c5deb1613cd5
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,215
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListMessageStatisticsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int'
}
attribute_map = {
'count': 'count'
}
def __init__(self, count=None):
"""ListMessageStatisticsResponse
The model defined in huaweicloud sdk
:param count: 所有消息总数
:type count: int
"""
super(ListMessageStatisticsResponse, self).__init__()
self._count = None
self.discriminator = None
if count is not None:
self.count = count
@property
def count(self):
"""Gets the count of this ListMessageStatisticsResponse.
所有消息总数
:return: The count of this ListMessageStatisticsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListMessageStatisticsResponse.
所有消息总数
:param count: The count of this ListMessageStatisticsResponse.
:type count: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListMessageStatisticsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
0667f97fb57c8c12e435d2f0e0d28df739385605
|
fcf3c983043273c4e57ac33330efaa0a9e5643a2
|
/model-optimizer/mo/front/mxnet/extractors/utils_test.py
|
070d5323122452347c77478d42a838fab10ae476
|
[
"Apache-2.0"
] |
permissive
|
p3tromyz0n/dldt
|
e7ab259848c90fdffd1395eaf5cf53ecd2b1e2f3
|
669bee86e580cbbc8ef40b440ab195ba2cbf5142
|
refs/heads/2018
| 2020-05-15T13:03:47.748654
| 2019-03-14T10:13:27
| 2019-03-14T10:13:27
| 158,445,061
| 0
| 1
|
Apache-2.0
| 2019-04-19T15:24:15
| 2018-11-20T20:07:50
|
C++
|
UTF-8
|
Python
| false
| false
| 6,599
|
py
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
import mxnet as mx
from mo.front.mxnet.extractors.utils import AttrDictionary
from mo.front.mxnet.extractors.utils import load_params
class TestAttrDictionary(unittest.TestCase):
def testBool(self):
attrs = {
"global_pool": "True"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(True, global_pool)
def testBoolAsDigits(self):
attrs = {
"global_pool": "1"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(True, global_pool)
def testBoolWithoutAttr(self):
attrs = {
"something": "1"
}
attr_dict = AttrDictionary(attrs)
global_pool = attr_dict.bool("global_pool", False)
self.assertEqual(False, global_pool)
def testStrAttr(self):
attrs = {
"something": "Val"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.str("something", "Text")
self.assertEqual("Val", attr)
def testStrAttrWithoutAttr(self):
attrs = {
"something2": "Val"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.str("something", "Text")
self.assertEqual("Text", attr)
def testFloatAttr(self):
attrs = {
"something": "0.5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 0.1)
self.assertEqual(0.5, attr)
def testFloatWithoutAttr(self):
attrs = {
"something2": "0.5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 0.1)
self.assertEqual(0.1, attr)
def testIntAttr(self):
attrs = {
"something": "5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 1)
self.assertEqual(5, attr)
def testIntWithoutAttr(self):
attrs = {
"something2": "5"
}
attr_dict = AttrDictionary(attrs)
attr = attr_dict.float("something", 1)
self.assertEqual(1, attr)
def testTupleAttr(self):
attrs = {
"something": "(5,6,7)"
}
attr_dict = AttrDictionary(attrs)
a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
self.assertEqual(5, a)
self.assertEqual(6, b)
self.assertEqual(7, c)
def testTupleWithoutAttr(self):
attrs = {
"something2": "(5,6,7)"
}
attr_dict = AttrDictionary(attrs)
a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
self.assertEqual(1, a)
self.assertEqual(2, b)
self.assertEqual(3, c)
def testTupleWithEmptyTupleAttr(self):
attrs = {
"something2": "()"
}
attr_dict = AttrDictionary(attrs)
a, b = attr_dict.tuple("something", int, (2, 3))
self.assertEqual(2, a)
self.assertEqual(3, b)
def testTupleWithEmptyListAttr(self):
attrs = {
"something2": "[]"
}
attr_dict = AttrDictionary(attrs)
a, b = attr_dict.tuple("something", int, (2, 3))
self.assertEqual(2, a)
self.assertEqual(3, b)
def testListAttr(self):
attrs = {
"something": "5,6,7"
}
attr_dict = AttrDictionary(attrs)
l = attr_dict.list("something", int, [1, 2, 3])
self.assertEqual(5, l[0])
self.assertEqual(6, l[1])
self.assertEqual(7, l[2])
def testListWithoutAttr(self):
attrs = {
"something2": "5,6,7"
}
attr_dict = AttrDictionary(attrs)
l = attr_dict.list("something", int, [1, 2, 3])
self.assertEqual(1, l[0])
self.assertEqual(2, l[1])
self.assertEqual(3, l[2])
class TestUtils(unittest.TestCase):
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_params(self, mock_nd_load):
mock_nd_load.return_value = {'arg:conv0_weight': mx.nd.array([1, 2], dtype='float32'),
'arg:conv1_weight': mx.nd.array([2, 3], dtype='float32'),
'aux:bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
model_params = load_params("model.params")
self.assertTrue('conv0_weight' in model_params._param_names)
self.assertTrue('conv1_weight' in model_params._param_names)
self.assertTrue('bn_data_mean' in model_params._aux_names)
self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_args_nd(self, mock_nd_load):
mock_nd_load.return_value = {'conv0_weight': mx.nd.array([1, 2], dtype='float32'),
'conv1_weight': mx.nd.array([2, 3], dtype='float32')}
model_params = load_params("args_model.nd", data_names=('data1', 'data2'))
self.assertTrue('conv0_weight' in model_params._param_names)
self.assertTrue('conv1_weight' in model_params._param_names)
self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
@patch('mxnet.nd.load')
def test_load_symbol_nodes_from_auxs_nd(self, mock_nd_load):
mock_nd_load.return_value = {'bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
model_params = load_params("auxs_model.nd")
self.assertTrue('bn_data_mean' in model_params._aux_names)
self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
|
[
"44090433+openvino-pushbot@users.noreply.github.com"
] |
44090433+openvino-pushbot@users.noreply.github.com
|
4bf01b7ae1c62134c913b6119a7902635486c910
|
f44c9ab8a25c5f4a2811fc1e77a59cdce2fe588c
|
/analysis/check_audio_problems.py
|
790ab272d5f3822830562109658a06f5fe559128
|
[] |
no_license
|
vejmelkam/StimPackC
|
645e1137ef057379971054778cf45f7a9d89ed07
|
b82dbbf267073017be3202996906fd0fe900e89e
|
refs/heads/master
| 2021-01-10T20:39:14.301366
| 2011-08-24T17:39:54
| 2011-08-24T17:39:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
#!/usr/bin/env python
import sys
import string
# read lines from log file
f = open(sys.argv[1], "r")
lines = f.readlines()
f.close()
# find number of instance of "dropping buffer"
found = 0
for line in lines:
if string.find(line, "dropping buffer") >= 0:
found += 1
print("\n **** check audio problems script ****");
print("VLC log contains %d lines." % len(lines))
if found < 20:
print("Audio problems noted %d times, no problem for 4 videos." % found)
else:
print("Audio problems noted %d times !!! Check audio log and question subject." % found)
|
[
"devnull@localhost"
] |
devnull@localhost
|
cdc75150fd9e9b0bb84009d08bf0c00bb9a0f43b
|
05ac6b13a380f1b0ed0676afaae9f8467b86b4a9
|
/livegraph.py
|
d4bb9ed1fad2e85763f54554907e3f0591ba2853
|
[
"MIT"
] |
permissive
|
UncleEngineer/LiveGraph
|
fe6177473dca2bb16815dfb0f65dd3084b72c10e
|
825dc11663fe3dbbfde6a722bf9ec35adac1c7f2
|
refs/heads/main
| 2023-02-13T09:19:44.307744
| 2021-01-25T16:28:16
| 2021-01-25T16:28:16
| 332,809,674
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
"""
===============
Embedding in Tk
===============
"""
from tkinter import *
from tkinter import ttk
import random
import tkinter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
GUI = Tk()
GUI.geometry('600x700')
GUI.wm_title("AutoUpdate Graph")
MF1 = Frame(GUI)
MF1.pack()
# toolbar = NavigationToolbar2Tk(canvas, GUI)
# toolbar.update()
# canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
#canvas.get_tk_widget().place(x=20,y=20)
#toolbar.pack_forget()
def UpdateData():
global y
global canvas
global cv
try:
cv.destroy()
except:
pass
# remove line
# create graph
fig = Figure(figsize=(6, 5), dpi=100)
t = [0,1,2,3,4]
y = []
for i in range(len(t)):
d = random.randint(30,70)
y.append(d)
label = ['A','B','C','D','E']
graph = fig.add_subplot(111)
graph.plot(t, y)
graph.axis([None, None, 0, 100])
canvas = FigureCanvasTkAgg(fig, master=MF1) # A tk.DrawingArea.
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
cv = canvas.get_tk_widget()
cv.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
MF1.after(5000,UpdateData)
#button = ttk.Button(master=GUI, text="Update Data", command=UpdateData)
#button.pack(ipadx=20 , ipady=10 ,pady=20)
UpdateData()
GUI.mainloop()
|
[
"noreply@github.com"
] |
UncleEngineer.noreply@github.com
|
cd4b5d06ac6645f6260588192fe3ce2be88410b7
|
59bd9c968a3a31a73d17f252fe716a3eacdf7f4f
|
/portfolio/Python/scrapy/seapets/ebay_spider.py
|
60ab55266368702543f063870e4045f0adfb606e
|
[
"Apache-2.0"
] |
permissive
|
0--key/lib
|
113ff1e9cf75e446fa50eb065bc3bc36c090d636
|
a619938ea523e96ab9e676ace51f5a129e6612e6
|
refs/heads/master
| 2023-06-23T22:17:54.244257
| 2023-06-21T17:42:57
| 2023-06-21T17:42:57
| 23,730,551
| 3
| 5
| null | 2016-03-22T08:19:30
| 2014-09-06T08:46:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class EbaySpider(BaseSpider):
name = 'seapets-ebay.co.uk'
allowed_domains = ['ebay.co.uk']
start_urls = ['http://stores.ebay.co.uk/Nemos-Palace']
#def parse(self, response):
# hxs = HtmlXPathSelector(response)
# categories = hxs.select('//div[@class="lcat"]/ul[@class="lev1"]/li/a/@href').extract()
# for category in categories:
# url = urljoin_rfc(get_base_url(response), category)
# yield Request(url, callback=self.parse_products)
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@class="grid"]/tr/td')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'table/tr/td/div[@class="ttl g-std"]/a/@title')
loader.add_xpath('url', 'table/tr/td/div[@class="ttl g-std"]/a/@href')
loader.add_xpath('price', 'table/tr/td/div/table/tr/td/span[@itemprop="price"]/text()')
yield loader.load_item()
next = hxs.select('//td[@class="next"]/a/@href').extract()
if next:
url = urljoin_rfc(get_base_url(response), next[0])
yield Request(url)
|
[
"a.s.kosinov@gmail.com"
] |
a.s.kosinov@gmail.com
|
e3de59ab0a628f70e1187295bc11caee29962f62
|
308e318d1fd56520b1cfe093a5436043c72703db
|
/medicalcase/urls.py
|
7fbff357f5082f6a7d17dabd49a02a808157e9fd
|
[] |
no_license
|
NicholasTurner23/360MedNet-1
|
b35e2b79712cd5568054e697298ad02c368f8853
|
fb3939031c455c62c889383f73611b5b6845d8dd
|
refs/heads/master
| 2021-06-18T09:57:32.656789
| 2017-06-17T22:33:32
| 2017-06-17T22:33:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
from django.conf.urls import url
from medicalcase import views as medicalcase_views
urlpatterns = [
url(r'^post/medical_case/$', medicalcase_views.MedicalCaseCreate.as_view(), name='medical-case'),
url(r'^medical_cases/$', medicalcase_views.MedicalCaseList.as_view(), name='medical_cases'),
url(r'^medical_case/(?P<pk>[0-9]+)/detail/$', medicalcase_views.MedicalCaseDetail.as_view(),
name='medical_case-detail'),
]
|
[
"faithnassiwa@gmail.com"
] |
faithnassiwa@gmail.com
|
24b8b0d128b1755bfce972e35b56b2635439d049
|
927eb86f9d2b0466f580c08ec84e6a13604ba6f8
|
/worldcupapp/views/media.py
|
7d48a519f7bb1ae79aa49c2624f70fec9e7f0476
|
[] |
no_license
|
by-Exist/piku_backend_api
|
61ee1aa0526d29d735f0fd8c0cf0a69d2a01abe4
|
5dfc4a3fc6cb842e2dc16d5af5b6fd7dea609b4f
|
refs/heads/main
| 2023-06-11T21:10:51.652924
| 2021-07-07T14:35:33
| 2021-07-07T14:35:33
| 338,810,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,041
|
py
|
from itertools import chain
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from worldcupapp.models.worldcup import Worldcup
from rest_framework import mixins, viewsets, response, status
from rest_framework.decorators import action
from drf_spectacular.utils import (
PolymorphicProxySerializer,
extend_schema_view,
extend_schema,
)
from drf_patchonly_mixin import mixins as dpm_mixins
from ..models import Media, TextMedia, ImageMedia, GifMedia, VideoMedia
from ..policys import MediaViewSetAccessPolicy
from ..serializers import (
GifMediaDetailSerializer,
GifMediaListSerializer,
ImageMediaDetailSerializer,
ImageMediaListSerializer,
TextMediaDetailSerializer,
TextMediaListSerializer,
VideoMediaDetailSerializer,
VideoMediaListSerializer,
MediaCountListSerializer,
)
class MediaViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
dpm_mixins.PatchOnlyMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
detail_serializer_class = {
"Text": TextMediaDetailSerializer,
"Image": ImageMediaDetailSerializer,
"Gif": GifMediaDetailSerializer,
"Video": VideoMediaDetailSerializer,
}
list_serializer_class = {
"Text": TextMediaListSerializer,
"Image": ImageMediaListSerializer,
"Gif": GifMediaListSerializer,
"Video": VideoMediaListSerializer,
}
permission_classes = [MediaViewSetAccessPolicy]
@cached_property
def parent_object(self):
return get_object_or_404(Worldcup, pk=self.kwargs["worldcup_pk"])
def get_queryset(self):
if self.queryset:
return self.queryset
media_type_model_mapping = {
"Text": TextMedia,
"Image": ImageMedia,
"Gif": GifMedia,
"Video": VideoMedia,
}
model_cls = media_type_model_mapping[self.parent_object.media_type]
self.queryset = model_cls.objects.select_related("worldcup").filter(
worldcup=self.parent_object
)
return self.queryset
def get_serializer_class(self):
if self.action == "counts":
return MediaCountListSerializer
if self.action in ("create", "list"):
return self.list_serializer_class[self.parent_object.media_type]
return self.detail_serializer_class[self.parent_object.media_type]
@action(methods=["patch"], detail=False)
def counts(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
medias = self.get_queryset()
for counts_data in serializer.validated_data["counts"]:
media_id = counts_data["media_id"]
if up_win_count := counts_data.get("up_win_count", None):
medias.get(pk=media_id).win_count_up(up_win_count)
if up_view_count := counts_data.get("up_view_count", None):
medias.get(pk=media_id).view_count_up(up_view_count)
if up_choice_count := counts_data.get("up_choice_count", None):
medias.get(pk=media_id).choice_count_up(up_choice_count)
Media.objects.bulk_update(
medias, ["win_count", "view_count", "choice_count"]
)
return response.Response(status=status.HTTP_204_NO_CONTENT)
return response.Response(
data=serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
MediaListPolymorphicSerializer = PolymorphicProxySerializer(
component_name="MediaListPolymorphic",
serializers=[
TextMediaListSerializer,
ImageMediaListSerializer,
GifMediaListSerializer,
VideoMediaListSerializer,
],
resource_type_field_name=None,
)
MediaDetailPolymorphicSerializer = PolymorphicProxySerializer(
component_name="MediaDetailPolymorphic",
serializers=[
TextMediaDetailSerializer,
ImageMediaDetailSerializer,
GifMediaDetailSerializer,
VideoMediaDetailSerializer,
],
resource_type_field_name=None,
)
MediaViewSet = extend_schema_view(
list=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media List",
"## [ Permission ]",
"- AllowAny",
]
),
responses=MediaListPolymorphicSerializer,
),
create=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Create",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
request=MediaListPolymorphicSerializer,
responses=MediaListPolymorphicSerializer,
),
partial_update=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Partial Update",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
request=MediaDetailPolymorphicSerializer,
responses=MediaDetailPolymorphicSerializer,
),
destroy=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Worldcup's Media Destroy",
"## [ Permission ]",
"- IsWorldcupCreator",
]
),
),
counts=extend_schema(
description="\n\n".join(
[
"## [ Description ]",
"- Media's counts Update",
"- 게임이 종료될 때 사용된 미디어들의 정보 업데이트에 사용",
"- media의 win_count, view_count, choice_count를 대상으로 함",
"## [ Permission ]",
"- AllowAny",
]
),
responses={
200: None,
400: None,
},
),
)(MediaViewSet)
|
[
"bolk9652@naver.com"
] |
bolk9652@naver.com
|
a712979f0746ffdb9d01e4e7639de181f610ecfc
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/apimanagement/v20210101preview/list_delegation_setting_secrets.py
|
7b3884925eda30d4b2d81d99584cc3666a53a128
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDelegationSettingSecretsResult',
'AwaitableListDelegationSettingSecretsResult',
'list_delegation_setting_secrets',
]
@pulumi.output_type
class ListDelegationSettingSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
"""
def __init__(__self__, validation_key=None):
if validation_key and not isinstance(validation_key, str):
raise TypeError("Expected argument 'validation_key' to be a str")
pulumi.set(__self__, "validation_key", validation_key)
@property
@pulumi.getter(name="validationKey")
def validation_key(self) -> Optional[str]:
"""
This is secret value of the validation key in portal settings.
"""
return pulumi.get(self, "validation_key")
class AwaitableListDelegationSettingSecretsResult(ListDelegationSettingSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDelegationSettingSecretsResult(
validation_key=self.validation_key)
def list_delegation_setting_secrets(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDelegationSettingSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20210101preview:listDelegationSettingSecrets', __args__, opts=opts, typ=ListDelegationSettingSecretsResult).value
return AwaitableListDelegationSettingSecretsResult(
validation_key=__ret__.validation_key)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
224733db7bbbe943a5cdd5d14513e71863001123
|
37879f158886946a3328cb7c938b774eef6b12f4
|
/feature_engineering_pandas.py
|
003cf38f3ec684d66b08086075a253ee2016ccec
|
[
"MIT"
] |
permissive
|
beckernick/cml_rapids
|
82f73bb4a7a12783967e1392ab5dba0d4ca01fde
|
da29a412418ac5c5be038f6c96af0b926c57c1ea
|
refs/heads/main
| 2023-04-28T17:25:42.612687
| 2021-05-13T12:17:49
| 2021-05-13T12:17:49
| 367,154,418
| 0
| 0
|
MIT
| 2021-05-13T19:31:23
| 2021-05-13T19:31:23
| null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
## Feature Engineering using dask
import time
import pandas as dd
import pandas as pd
import numpy as np
from feature_engineering_2 import (
pos_cash, process_unified, process_bureau_and_balance,
process_previous_applications, installments_payments,
credit_card_balance
)
### Load Data
bureau_balance = dd.read_parquet('raw_data/bureau_balance.parquet')
bureau = dd.read_parquet('raw_data/bureau.parquet')
# behaviour data linked to prev as well as current loan
cc_balance = dd.read_parquet('raw_data/cc_balance.parquet')
payments = dd.read_parquet('raw_data/payments.parquet')
pc_balance = dd.read_parquet('raw_data/pc_balance.parquet')
prev = dd.read_parquet('raw_data/prev.parquet')
train = dd.read_parquet('raw_data/train.parquet')
test = dd.read_parquet('raw_data/test.parquet')
train_index = train.index
test_index = test.index
train_target = train['TARGET']
unified = dd.concat([train.drop('TARGET', axis=1), test])
# fix for the process functions not working with columns of type `category`
bureau_balance['STATUS'] = bureau_balance['STATUS'].astype('object')
bureau['CREDIT_ACTIVE'] = bureau['CREDIT_ACTIVE'].astype('object')
bureau['CREDIT_CURRENCY'] = bureau['CREDIT_CURRENCY'].astype('object')
prev['NAME_CONTRACT_STATUS'] = prev['NAME_CONTRACT_STATUS'].astype('object')
# need to split out the parquet writing
# also need to fix a UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
unified_feat = process_unified(unified, dd)
bureau_agg = process_bureau_and_balance(bureau, bureau_balance, dd)
prev_agg = process_previous_applications(prev, dd)
pos_agg = pos_cash(pc_balance, dd)
ins_agg = installments_payments(payments, dd)
cc_agg = credit_card_balance(cc_balance, dd)
unified_feat = unified_feat.join(bureau_agg, how='left', on='SK_ID_CURR') \
.join(prev_agg, how='left', on='SK_ID_CURR') \
.join(pos_agg, how='left', on='SK_ID_CURR') \
.join(ins_agg, how='left', on='SK_ID_CURR') \
.join(cc_agg, how='left', on='SK_ID_CURR')
# we can't use bool column types in xgb later on
bool_columns = [col for col in unified_feat.columns if (unified_feat[col].dtype in ['bool']) ]
unified_feat[bool_columns] = unified_feat[bool_columns].astype('int64')
# We will label encode for xgb later on
from sklearn.preprocessing import LabelEncoder
# label encode cats
label_encode_dict = {}
categorical = unified_feat.select_dtypes(include=pd.CategoricalDtype).columns
for column in categorical:
label_encode_dict[column] = LabelEncoder()
unified_feat[column] = label_encode_dict[column].fit_transform(unified_feat[column])
unified_feat[column] = unified_feat[column].astype('int64')
### Fix for Int64D
Int64D = unified_feat.select_dtypes(include=[pd.Int64Dtype]).columns
unified_feat[Int64D] = unified_feat[Int64D].fillna(0)
unified_feat[Int64D] = unified_feat[Int64D].astype('int64')
### fix unit8
uint8 = unified_feat.select_dtypes(include=['uint8']).columns
unified_feat[uint8] = unified_feat[uint8].astype('int64')
nan_columns = unified_feat.columns[unified_feat.isna().any()].tolist()
unified_feat.replace([np.inf, -np.inf], np.nan, inplace=True)
unified_feat[nan_columns] = unified_feat[nan_columns].fillna(0)
train_feats = unified_feat.loc[train_index].merge(train_target, how='left',
left_index=True, right_index=True)
test_feats = unified_feat.loc[test_index]
train_feats.to_parquet('data_eng/feats/train_feats.parquet')
test_feats.to_parquet('data_eng/feats/test_feats.parquet')
|
[
"bpl.law@gmail.com"
] |
bpl.law@gmail.com
|
5eae1492af790922bb806b1d1c75466db26ca638
|
1d22e0cc8db1ddbdab6c06a049ccc15f35dfff99
|
/hmm_class/hmm_classifier.py
|
ef78103fd4f6f7572a36a305ebe37019bd61ebd0
|
[] |
no_license
|
JiaxinYu/machine_learning_examples
|
59f37335407d9b9523a6879602ad3d58eac7da77
|
db49879ca5efd34e7d2ad6c3ddf1fb4854c24429
|
refs/heads/master
| 2020-06-11T07:24:29.871826
| 2016-11-27T17:54:19
| 2016-11-27T17:54:19
| 75,734,758
| 1
| 0
| null | 2016-12-06T13:39:27
| 2016-12-06T13:39:27
| null |
UTF-8
|
Python
| false
| false
| 2,841
|
py
|
# https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python
# http://lazyprogrammer.me
# Demonstrate how HMMs can be used for classification.
import string
import numpy as np
import matplotlib.pyplot as plt
from hmmd_theano import HMM
from sklearn.utils import shuffle
from nltk import pos_tag, word_tokenize
class HMMClassifier:
def __init__(self):
pass
def fit(self, X, Y, V):
K = len(set(Y)) # number of classes - assume 0..K-1
self.models = []
self.priors = []
for k in xrange(K):
# gather all the training data for this class
thisX = [x for x, y in zip(X, Y) if y == k]
C = len(thisX)
self.priors.append(np.log(C))
hmm = HMM(5)
hmm.fit(thisX, V=V, p_cost=0.1, print_period=1, learning_rate=10e-5, max_iter=100)
self.models.append(hmm)
def score(self, X, Y):
N = len(Y)
correct = 0
for x, y in zip(X, Y):
lls = [hmm.log_likelihood(x) + prior for hmm, prior in zip(self.models, self.priors)]
p = np.argmax(lls)
if p == y:
correct += 1
return float(correct) / N
# def remove_punctuation(s):
# return s.translate(None, string.punctuation)
def get_tags(s):
tuples = pos_tag(word_tokenize(s))
return [y for x, y in tuples]
def get_data():
word2idx = {}
current_idx = 0
X = []
Y = []
for fn, label in zip(('robert_frost.txt', 'edgar_allan_poe.txt'), (0, 1)):
count = 0
for line in open(fn):
line = line.rstrip()
if line:
print line
# tokens = remove_punctuation(line.lower()).split()
tokens = get_tags(line)
if len(tokens) > 1:
# scan doesn't work nice here, technically could fix...
for token in tokens:
if token not in word2idx:
word2idx[token] = current_idx
current_idx += 1
sequence = np.array([word2idx[w] for w in tokens])
X.append(sequence)
Y.append(label)
count += 1
print count
if count >= 50:
break
print "Vocabulary:", word2idx.keys()
return X, Y, current_idx
def main():
X, Y, V = get_data()
# print "Finished loading data"
print "len(X):", len(X)
print "Vocabulary size:", V
X, Y = shuffle(X, Y)
N = 20 # number to test
Xtrain, Ytrain = X[:-N], Y[:-N]
Xtest, Ytest = X[-N:], Y[-N:]
model = HMMClassifier()
model.fit(Xtrain, Ytrain, V)
print "Score:", model.score(Xtest, Ytest)
if __name__ == '__main__':
main()
|
[
"sublime.balloon@gmail.com"
] |
sublime.balloon@gmail.com
|
18ab42c276337f57636ec03c57500e23dd33eeda
|
d57f6c045c7b07dd53ee80982005beb33450b64b
|
/migrations/versions/b75221b7534f_.py
|
80700eb2975f945de0b9aab80daaa6d3a076c042
|
[] |
no_license
|
gwynethbradbury/ouss_ball
|
7df0ccafd42bd8d6fd22816c71fbe9a6a852351a
|
1115fe316f7c1ee1407017a60a054b1f7291f331
|
refs/heads/master
| 2023-05-11T18:36:29.921936
| 2018-03-22T15:56:52
| 2018-03-22T15:56:52
| 122,100,136
| 1
| 0
| null | 2018-03-22T13:55:05
| 2018-02-19T17:58:55
|
PHP
|
UTF-8
|
Python
| false
| false
| 641
|
py
|
"""empty message
Revision ID: b75221b7534f
Revises: 57bc3837370a
Create Date: 2016-01-11 19:56:43.653390
"""
# revision identifiers, used by Alembic.
revision = 'b75221b7534f'
down_revision = '57bc3837370a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('postage', sa.Column('paid', sa.Boolean(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('postage', 'paid')
### end Alembic commands ###
|
[
"samuel.littley@toastwaffle.com"
] |
samuel.littley@toastwaffle.com
|
bd0fab02a5fbbadc2955432d86b4c0f514793a5d
|
1817aca734cda258cbbfd9e13fbf040d76824621
|
/aliyun-python-sdk-slb/aliyunsdkslb/request/v20140515/SetLogsDownloadStatusRequest.py
|
3f5de92cf81226eceacc5ace8c2ca2a158173dc2
|
[
"Apache-2.0"
] |
permissive
|
sdk-team/aliyun-openapi-python-sdk
|
4bd770718e70e31f19e1e322727c27ba74d9fb80
|
996cb07bfcf010fe3ab65daa73d26df2f3b6e97f
|
refs/heads/master
| 2022-08-04T13:11:56.729215
| 2022-07-25T10:01:10
| 2022-07-25T10:01:10
| 183,356,741
| 0
| 0
| null | 2019-04-25T04:33:24
| 2019-04-25T04:33:24
| null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetLogsDownloadStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'SetLogsDownloadStatus','asdfdsf')
def get_access_key_id(self):
return self.get_query_params().get('access_key_id')
def set_access_key_id(self,access_key_id):
self.add_query_param('access_key_id',access_key_id)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_LogsDownloadStatus(self):
return self.get_query_params().get('LogsDownloadStatus')
def set_LogsDownloadStatus(self,LogsDownloadStatus):
self.add_query_param('LogsDownloadStatus',LogsDownloadStatus)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
59c3bd06e2e52ff8c563ba694f192343d83d345f
|
b76615ff745c6d66803506251c3d4109faf50802
|
/pyobjc-framework-SceneKit/PyObjCTest/test_scnmaterial.py
|
f2c6d6c21c5547c3bc9103160f5ceb299b9928c3
|
[
"MIT"
] |
permissive
|
danchr/pyobjc-git
|
6ef17e472f54251e283a0801ce29e9eff9c20ac0
|
62b787fddeb381184043c7ff136f1c480755ab69
|
refs/heads/master
| 2021-01-04T12:24:31.581750
| 2020-02-02T20:43:02
| 2020-02-02T20:43:02
| 240,537,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,832
|
py
|
from PyObjCTools.TestSupport import *
import objc
import sys
if os_level_key(os_release()) < os_level_key("10.12") or sys.maxsize >= 2 ** 32:
import SceneKit
class TestSCNMaterial(TestCase):
def testConstants(self):
self.assertIsInstance(SceneKit.SCNLightingModelPhong, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelBlinn, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelLambert, unicode)
self.assertIsInstance(SceneKit.SCNLightingModelConstant, unicode)
self.assertEqual(SceneKit.SCNFillModeFill, 0)
self.assertEqual(SceneKit.SCNFillModeLines, 1)
self.assertEqual(SceneKit.SCNCullBack, 0)
self.assertEqual(SceneKit.SCNCullFront, 1)
self.assertEqual(SceneKit.SCNTransparencyModeAOne, 0)
self.assertEqual(SceneKit.SCNTransparencyModeRGBZero, 1)
self.assertEqual(SceneKit.SCNTransparencyModeSingleLayer, 2)
self.assertEqual(SceneKit.SCNTransparencyModeDualLayer, 3)
self.assertEqual(
SceneKit.SCNTransparencyModeDefault, SceneKit.SCNTransparencyModeAOne
)
self.assertEqual(SceneKit.SCNBlendModeAlpha, 0)
self.assertEqual(SceneKit.SCNBlendModeAdd, 1)
self.assertEqual(SceneKit.SCNBlendModeSubtract, 2)
self.assertEqual(SceneKit.SCNBlendModeMultiply, 3)
self.assertEqual(SceneKit.SCNBlendModeScreen, 4)
self.assertEqual(SceneKit.SCNBlendModeReplace, 5)
self.assertEqual(SceneKit.SCNBlendModeMax, 6)
@min_os_level("10.12")
def testConstants10_12(self):
self.assertIsInstance(SceneKit.SCNLightingModelPhysicallyBased, unicode)
@min_os_level("10.15")
def testConstants10_15(self):
self.assertIsInstance(SceneKit.SCNLightingModelShadowOnly, unicode)
def testMethods(self):
self.assertResultIsBOOL(SceneKit.SCNMaterial.isLitPerPixel)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setLitPerPixel_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.isDoubleSided)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setDoubleSided_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.locksAmbientWithDiffuse)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setLocksAmbientWithDiffuse_, 0)
self.assertResultIsBOOL(SceneKit.SCNMaterial.writesToDepthBuffer)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setWritesToDepthBuffer_, 0)
@min_os_level("10.9")
def testMethods10_9(self):
self.assertResultIsBOOL(SceneKit.SCNMaterial.readsFromDepthBuffer)
self.assertArgIsBOOL(SceneKit.SCNMaterial.setReadsFromDepthBuffer_, 0)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
8411f21b811eca560091444108d42f0dc1514fce
|
951a3c8d6ec3d4e5f0718b8e6c92348196e5ebbf
|
/mysite/polls/migrations/0003_remove_question_question_prompt.py
|
e82e0fccbdf3a513a36859ca9de862621ece514d
|
[] |
no_license
|
aspiringguru/learnDjango
|
6f3b178381cd8037f9c954e7cc49f68d6a8b3b4c
|
24ac82293b109ad36bb375e32983154b4de23470
|
refs/heads/master
| 2020-12-10T23:00:33.479558
| 2020-01-15T08:46:18
| 2020-01-15T08:46:18
| 233,736,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
# Generated by Django 2.2.9 on 2020-01-15 00:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_question_question_prompt'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='question_prompt',
),
]
|
[
"bmatthewtaylor@gmail.com"
] |
bmatthewtaylor@gmail.com
|
5859434341568411959a48e0941bf29a6dbeaeae
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_091/ch4_2020_09_04_14_40_54_928784.py
|
652261e221bca6774cbba41cd2b6e29cac4be123
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
def classifica_idade(idade):
if idade <= 11:
return ('crianca')
if 12<=idade<=17:
return('adolescente')
if idade => 18:
return('adulto')
a= 13
b=classica_idade(a)
print(b)
|
[
"you@example.com"
] |
you@example.com
|
512c76ab159a877dea30fe399f3220371dd2baf0
|
51de6a2a2ce8882ee6462cd1076c7b9675830531
|
/0x0F-python-object_relational_mapping/2-my_filter_states.py
|
20f1742598a0848dd05b4b932cf3a0fffab10e70
|
[] |
no_license
|
anamariaroman/holbertonschool-higher_level_programming
|
9b479c9b1484e4388ec0a4390cda81480626725a
|
5d75ccc35dfc92887d0f9a9e0b0773ed741d179e
|
refs/heads/master
| 2023-08-17T23:40:25.164128
| 2021-09-23T04:57:43
| 2021-09-23T04:57:43
| 361,869,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/python3
"""
takes in an argument and displays all values in the
states table of hbtn_0e_0_usa where name matches the argument.
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=argv[1],
passwd=argv[2], db=argv[3], charset="utf8")
cursor = db.cursor()
cursor.execute("SELECT * FROM states WHERE states.name = '{:s}' ORDER BY \
states.id ASC".format(argv[4]))
r = cursor.fetchall()
for row in r:
if row[1] == argv[4]:
print(row)
cursor.close()
db.close()
|
[
"2979@holbertonschool.com"
] |
2979@holbertonschool.com
|
96b772958a9c0a774904dcf77ee5a9f9143e17c7
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/2cb4a725b4cb9be160d194f7b47df6c98709ebfd-<create_connection_team_slave>-fix.py
|
d3c209e5c778414dddc980ca9daa3ffc050223ca
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
def create_connection_team_slave(self):
cmd = [self.nmcli_bin, 'connection', 'add', 'type', self.type, 'con-name']
if (self.conn_name is not None):
cmd.append(self.conn_name)
elif (self.ifname is not None):
cmd.append(self.ifname)
cmd.append('ifname')
if (self.ifname is not None):
cmd.append(self.ifname)
elif (self.conn_name is not None):
cmd.append(self.conn_name)
cmd.append('master')
if (self.conn_name is not None):
cmd.append(self.master)
return cmd
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
e1f6740a875c434bf2e70839f5493f69bb4e96d7
|
64b6364b2cea4e49cc1768e159ceb3fb438fc096
|
/src/metric_runner.py
|
dc4d64f00f77ed24aac17d9f471364a1a419b32d
|
[] |
no_license
|
nkartashov/4genome_tester
|
902828f2a4373df9888788d4cb98398700259e7b
|
547446b9f38ee69177d8a12bb171c1d2ae993cad
|
refs/heads/master
| 2016-09-06T01:08:24.565208
| 2015-06-04T22:55:17
| 2015-06-04T22:55:17
| 34,047,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
__author__ = 'nikita_kartashov'
from src.graph.statistics import get_distribution_metric, \
get_simple_paths_metric, \
get_bp_distance_metric, \
get_dcj_distance_metric, \
get_ca_metric, \
get_mca_metric, \
get_cumulative_metric_batch
from .metrics.metrics import Metrics
ANNOTATED_SINGLE_METRICS = (
# (get_distribution_metric, 'D'), # Distribution
# (get_simple_paths_metric, 'SP'), # Simple Paths
# (get_bp_distance_metric, 'S_BP'),
# (get_dcj_distance_metric, 'S_DCJ'),
(get_ca_metric, 'CA'),
(get_mca_metric, 'MCA'),
)
ANNOTATED_BATCH_METRICS = ((get_cumulative_metric_batch, 'MCA+'),)
METRICS = Metrics(ANNOTATED_SINGLE_METRICS, ANNOTATED_BATCH_METRICS)
A, B, C, D = 'A', 'B', 'C', 'D'
TOPOLOGIES = [((A, B), (C, D)),
((A, C), (B, D)),
((A, D), (C, B))]
# If we have m methods and n trees then function returns score matrix of m lines and n columns
# def run_metrics(breakpoint_graph):
# return (((metric(breakpoint_graph, topology), topology) for topology in TOPOLOGIES) for metric in METRICS)
def compare_metric_results(breakpoint_graph, right_tree):
metric_results = METRICS.run_metrics(breakpoint_graph, TOPOLOGIES)
def decide_if_right(scored_trees):
scored_trees = list(scored_trees)
min_score = min(scored_trees)[0]
trees_with_min_score = list(tree for score, tree in scored_trees if score == min_score)
return int(len(trees_with_min_score) == 1 and trees_with_min_score[0] == right_tree)
return (decide_if_right(score_tuple) for score_tuple in metric_results)
|
[
"snailandmail@gmail.com"
] |
snailandmail@gmail.com
|
36a11457b2ad103a18565c44f60b426d4dc20b3e
|
99d436394e47571160340c95d527ecadaae83541
|
/algorithms_questions/ch18_graph_theory/q45_1.py
|
0053a3fd5f07e6c424f2a633246622ae14a46a7f
|
[] |
no_license
|
LeeSeok-Jun/Algorithms
|
b47ba4de5580302e9e2399bcf85d245ebeb1b93d
|
0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8
|
refs/heads/main
| 2023-03-02T06:47:20.939235
| 2021-02-08T05:18:24
| 2021-02-08T05:18:24
| 299,840,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
"""
최종 순위 - 2회차
"""
# 풀이 제한 시간 : 60분
# 2020/12/31 11:10 ~ 11:31
# 실패 - 자료의 사용(data[i])에 실수, 큐에 처음 초기화를 안함
from collections import deque
"""
# 위상 정렬 알고리즘에서는 사용할 필요가 없다.
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
"""
for tc in range(int(input())):
n = int(input())
parent = [0] * (n + 1)
for i in range(1, n+1):
parent[i] = i
indegree = [0] * (n+1)
data = list(map(int, input().split()))
graph = [[] for _ in range(n+1)]
# data[i]와 data[j]를 사용해야함!
for i in range(n):
for j in range(i+1, n):
graph[data[j]].append(data[i])
indegree[data[i]] += 1
m = int(input())
for _ in range(m):
a, b = map(int, input().split())
if b not in graph[a]:
graph[b].remove(a)
indegree[a] -= 1
graph[a].append(b)
indegree[b] += 1
else:
graph[a].remove(b)
indegree[b] -= 1
graph[b].append(a)
indegree[a] += 1
cycle = False
certain = True
q = deque()
result = []
# 맨 처음 queue에 원소를 집어 넣는 것을 뺌
for i in range(1, n+1):
if indegree[i] == 0:
q.append(i)
for _ in range(n):
if len(q) == 0:
cycle = True
break
if len(q) >= 2:
certain = False
break
now = q.popleft()
result.append(now)
for i in graph[now]:
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
if cycle:
print("IMPOSSIBLE")
elif not certain:
print("?")
else:
for i in reversed(result):
print(i, end=" ")
print()
|
[
"seok9376@gmail.com"
] |
seok9376@gmail.com
|
96cdda7deaa7720cd3559f3d0b3e5accb90e9308
|
6c597d56ab500f8d0788b803fdfb9ab4dbb37a90
|
/openregistry/assets/claimrights/tests/transferring.py
|
29487148a1b9b1a3825f6e85e4ebbe8f092f72a2
|
[
"Apache-2.0"
] |
permissive
|
openprocurement/openregistry.assets.claimrights
|
1671e55313aa69b073d1662a0fe16a8bd604f4fd
|
8f8d59760da3b647730da9d56e656a6ef4d12302
|
refs/heads/master
| 2021-05-14T23:59:00.664485
| 2019-03-27T15:33:44
| 2019-03-27T15:33:44
| 104,233,542
| 0
| 10
|
Apache-2.0
| 2019-02-06T11:28:28
| 2017-09-20T15:27:44
|
Python
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
# -*- coding: utf-8 -*-
import unittest
from openregistry.assets.claimrights.tests.base import AssetTransferWebTest
from openregistry.assets.core.tests.plugins.transferring.mixins import AssetOwnershipChangeTestCaseMixin
class AssetOwnershipChangeTest(AssetTransferWebTest,
AssetOwnershipChangeTestCaseMixin):
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AssetOwnershipChangeTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
[
"leitsius@gmail.com"
] |
leitsius@gmail.com
|
438a534b66b835b18edc0a542fc5499bae377670
|
ca23b411c8a046e98f64b81f6cba9e47783d2584
|
/cache_replacement/policy_learning/cache/main.py
|
5cbddf2a4c41057f1d91d6f6838f52f0665a237d
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pdybczak/google-research
|
1fb370a6aa4820a42a5d417a1915687a00613f9c
|
0714e9a5a3934d922c0b9dd017943a8e511eb5bc
|
refs/heads/master
| 2023-03-05T23:16:11.246574
| 2021-01-04T11:30:28
| 2021-01-04T11:30:28
| 326,629,357
| 1
| 0
|
Apache-2.0
| 2021-02-01T12:39:09
| 2021-01-04T09:17:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,923
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=line-too-long
r"""Runs cache simulation.
Example usage:
python3 -m cache_replacement.policy_learning.cache.main \
--experiment_base_dir=/tmp \
--experiment_name=sample_belady_llc \
--cache_configs=cache_replacement/policy_learning/cache/configs/default.json \
--cache_configs=cache_replacement/policy_learning/cache/configs/eviction_policy/belady.json \
--memtrace_file=cache_replacement/policy_learning/cache/traces/sample_trace.csv
Simulates a cache configured by the cache configs with Belady's as the
replacement policy on the sample trace.
"""
# pylint: enable=line-too-long
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tqdm
from cache_replacement.policy_learning.cache import cache as cache_mod
from cache_replacement.policy_learning.cache import evict_trace as evict
from cache_replacement.policy_learning.cache import memtrace
from cache_replacement.policy_learning.common import config as cfg
from cache_replacement.policy_learning.common import utils
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
"cache_configs",
[
"cache_replacement/policy_learning/cache/configs/default.json", # pylint: disable=line-too-long
"cache_replacement/policy_learning/cache/configs/eviction_policy/lru.json" # pylint: disable=line-too-long
],
"List of config paths merged front to back for the cache.")
flags.DEFINE_multi_string(
"config_bindings", [],
("override config with key=value pairs "
"(e.g., eviction_policy.policy_type=greedy)"))
flags.DEFINE_string(
"experiment_base_dir", "/tmp/experiments",
"Base directory to store all experiments in. Should not frequently change.")
flags.DEFINE_string(
"experiment_name", "unnamed",
"All data related to this experiment is written to"
" experiment_base_dir/experiment_name.")
flags.DEFINE_string(
"memtrace_file",
"cache_replacement/policy_learning/cache/traces/omnetpp_train.csv",
"Memory trace file path to use.")
flags.DEFINE_integer(
"tb_freq", 10000, "Number of cache reads between tensorboard logs.")
flags.DEFINE_integer(
"warmup_period", int(2e3), "Number of cache reads before recording stats.")
flags.DEFINE_bool(
"force_overwrite", False,
("If true, overwrites directory at "
" experiment_base_dir/experiment_name if it exists."))
def log_scalar(tb_writer, key, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
tb_writer.add_summary(summary, step)
def main(_):
# Set up experiment directory
exp_dir = os.path.join(FLAGS.experiment_base_dir, FLAGS.experiment_name)
utils.create_experiment_directory(exp_dir, FLAGS.force_overwrite)
tensorboard_dir = os.path.join(exp_dir, "tensorboard")
tf.disable_eager_execution()
tb_writer = tf.summary.FileWriter(tensorboard_dir)
miss_trace_path = os.path.join(exp_dir, "misses.csv")
evict_trace_path = os.path.join(exp_dir, "evictions.txt")
cache_config = cfg.Config.from_files_and_bindings(
FLAGS.cache_configs, FLAGS.config_bindings)
with open(os.path.join(exp_dir, "cache_config.json"), "w") as f:
cache_config.to_file(f)
flags_config = cfg.Config({
"memtrace_file": FLAGS.memtrace_file,
"tb_freq": FLAGS.tb_freq,
"warmup_period": FLAGS.warmup_period,
})
with open(os.path.join(exp_dir, "flags.json"), "w") as f:
flags_config.to_file(f)
logging.info("Config: %s", str(cache_config))
logging.info("Flags: %s", str(flags_config))
cache_line_size = cache_config.get("cache_line_size")
with memtrace.MemoryTrace(
FLAGS.memtrace_file, cache_line_size=cache_line_size) as trace:
with memtrace.MemoryTraceWriter(miss_trace_path) as write_trace:
with evict.EvictionTrace(evict_trace_path, False) as evict_trace:
def write_to_eviction_trace(cache_access, eviction_decision):
evict_trace.write(
evict.EvictionEntry(cache_access, eviction_decision))
cache = cache_mod.Cache.from_config(cache_config, trace=trace)
# Warm up cache
for _ in tqdm.tqdm(range(FLAGS.warmup_period), desc="Warming up cache"):
pc, address = trace.next()
hit = cache.read(pc, address, [write_to_eviction_trace])
if not hit:
write_trace.write(pc, address)
if trace.done():
raise ValueError()
# Discard warm-up cache statistics
cache.hit_rate_statistic.reset()
num_reads = 0
with tqdm.tqdm(desc="Simulating cache on MemoryTrace") as pbar:
while not trace.done():
pc, address = trace.next()
hit = cache.read(pc, address, [write_to_eviction_trace])
if not hit:
write_trace.write(pc, address)
num_reads += 1
if num_reads % FLAGS.tb_freq == 0:
log_scalar(tb_writer, "cache_hit_rate",
cache.hit_rate_statistic.success_rate(), num_reads)
pbar.update(1)
log_scalar(tb_writer, "cache_hit_rate",
cache.hit_rate_statistic.success_rate(), num_reads)
# Force flush, otherwise last writes will be lost.
tb_writer.flush()
if __name__ == "__main__":
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
deb56472c890832c3e7ee3dae8b4a62f9590c3d3
|
74863206d868c63d73ed927c5d4559fe4e2320fd
|
/week 5/wk 5 q 2.py
|
4e92daa6b065054e24c2e2d95ebeb2cbd758f5ac
|
[] |
no_license
|
Shubhanshu-Nishad/210-Coursework-Amanjit-S-Phull
|
e58a622b9b0bd2da3259f318944d1164c9f3fd93
|
01ed9eb426d3af180cb486503ab8bfcdf6694e90
|
refs/heads/master
| 2022-12-18T06:08:58.172949
| 2020-10-01T14:27:44
| 2020-10-01T14:27:44
| 300,308,089
| 1
| 0
| null | 2020-10-01T14:26:13
| 2020-10-01T14:26:12
| null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
class Node(object):
def __init__(self, value):
self.value=value
self.next=None
self.prev=None
class List(object):
def __init__(self):
self.head=None
self.tail=None
def insert(self,n,x):
if n!=None:
x.next=n.next
n.next=x
x.prev=n
if x.next!=None:
x.next.prev=x
if self.head==None:
self.head=self.tail=x
x.prev=x.next=None
elif self.tail==n:
self.tail=x
def delete(self,n): #Remove pointers to an element
if n.prev != None:
n.prev.next = n.next
else:
self.head = n.next
if n.next != None:
n.next.prev = n.prev
else:
self.tail = n.prev
def display(self):
values=[]
n=self.head
while n!=None:
values.append(str(n.value))
n=n.next
print ("List: ",",".join(values))
if __name__ == '__main__':
l=List()
l.insert(None, Node(4))
l.insert(l.head,Node(6))
l.insert(l.head,Node(8))
l.delete(l.tail)
l.display()
|
[
"noreply@github.com"
] |
Shubhanshu-Nishad.noreply@github.com
|
1c7ed19f2aaacdb47b9e5eefd21dd227de5cb2ed
|
d024ccbb4cc04af3866a4db1ac1d8c1d7395d909
|
/boj/3040.py
|
7340db9c6af2e68f61e4fb313c8b4a7a0a8b412e
|
[] |
no_license
|
demetoir/ps-solved-code
|
ff0418dddd10f3b053c9b8d32af48027b10c8481
|
f4d4fd2183176b083f2287c9d89c6d5a1e983cc5
|
refs/heads/master
| 2022-10-14T20:11:34.581439
| 2020-06-12T11:24:11
| 2020-06-12T11:24:11
| 68,782,768
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
import itertools
l=[]
for i in range(9):l+=[input()]
for s in itertools.combinations(range(9),7):
if sum(l[i] for i in s)==100:
print "\n".join(str(l[i]) for i in s)
|
[
"wnsqlehlswk@gmail.com"
] |
wnsqlehlswk@gmail.com
|
b15b87aebf2cf07b8e065a527f31b2b55377fa13
|
d7ee76b7f1d6cd038982335792f15959a58a8395
|
/SWEA/4615. 재미있는 오셀로 게임.py
|
e557fb8ef44326abc668927b3051576baa6bd26d
|
[] |
no_license
|
min1378/-algorithm
|
1c5dea6b2f03e4d376275cfccbf11b240bc659d9
|
bfb720277160077a816deec21469a7e597c62d14
|
refs/heads/master
| 2021-08-02T06:54:10.478501
| 2021-07-31T14:03:01
| 2021-07-31T14:03:01
| 202,688,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,638
|
py
|
#import sys
from pprint import pprint
#sys.stdin = open('4615.txt', 'r')
# 벽체크 함수
def isWall(x, y):
if x > N or x < 1 :
return True
if y > N or y < 1 :
return True
return False
# 색을 바꿔야할 돌의 위치 체크.
def enermy_check(x, y, mode, color):
check_enermy = []
dx = [0, 1, 1, 1, 0, -1, -1, -1]
dy = [-1, -1, 0, 1, 1, 1, 0, -1]
while True:
# 전달받은 mode로 한 발자국 나아간다.
test_x = x+dx[mode]
test_y = y+dy[mode]
# 벽이라면 그 전까지 체크한 위치는 무시하고 빈 리스트 []를 반환
if isWall(test_x, test_y) == True:
return []
# 같은 색을 만났다면 그동안 체크한 좌표의 리스트를 반환
if data[test_y-1][test_x-1] == color:
return check_enermy
# 0을 만났다면 비어 있는 공간이므로 빈리스트 [] 반환
if data[test_y-1][test_x-1] == 0:
return []
# 나머지 조건들은 좌표를 체크하여 check_enermy에 저장한다.
else :
check_enermy.append([test_x, test_y])
# 좌표를 체크하였다면 갱신시킨다.
x = test_x
y = test_y
# 검사하는 함수
def inspect(x, y, color):
# 8방향 모드의 반복문을 실행한다.
for mode in range(8):
# enermy_check의 리턴 값을 받아온다.
result = enermy_check(x, y, mode, color)
# 만약 빈리스트가 아니라면
if result != []:
# result에서 좌표를 꺼내 data에 색칠한다.
for check_x, check_y in result:
data[check_y-1][check_x-1] = color
TC=int(input())
for test_case in range(1, TC+1):
N, M = map(int, input().split())
data = [[0]*N for _ in range(N)]
# 흑은 1 백은 2
data[N // 2 - 1][N // 2 - 1] = 2
data[N // 2 - 1][N // 2] = 1
data[N // 2][N // 2 - 1] = 1
data[N // 2][N // 2] = 2
check = [list(map(int, input().split())) for _ in range(M)]
while True:
if check == []:
break
#check에서 앞에서 하나씩 꺼내서 돌을 놓는다.
x, y, color = check.pop(0)
data[y-1][x-1] = color
# 돌을 놓았을 때 어떻게 변화할 지 확인한다.
inspect(x, y, color)
# 반복문이 끝나면 모든 돌을 놓았다는 것이므로 흑돌과 백돌의 개수를 체크한다.
black = 0
white = 0
for line in data:
black += line.count(1)
white += line.count(2)
print("#{} {} {}".format(test_case, black, white))
|
[
"qwes123@naver.com"
] |
qwes123@naver.com
|
812b33798a282b1ce8b7d31e14999b7e5d629e07
|
9255068b7b45348a084555b8c413fd55a4b12013
|
/odfdo/link.py
|
43d15ef07e9f2067b7636723ff4a05076ec64545
|
[
"Apache-2.0"
] |
permissive
|
mat-m/odfdo
|
fdf9752f0273760deb59403f23dbc20eac3de753
|
a4a509a056517ecf91449e029b36fe9a8ffa8ed0
|
refs/heads/master
| 2020-03-18T05:04:16.263647
| 2018-05-21T21:46:13
| 2018-05-21T21:54:11
| 134,322,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
# Copyright 2018 Jérôme Dumonteil
# Copyright (c) 2009-2013 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): jerome.dumonteil@gmail.com
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: Hervé Cauwelier <herve@itaapy.com>
# Jerome Dumonteil <jerome.dumonteil@itaapy.com>
from .element import Element, register_element_class
from .paragraph_base import ParagraphBase
class Link(ParagraphBase):
"""Link class, <text:a> odf_element.
"""
_tag = 'text:a'
_properties = (('url', 'xlink:href'), ('name',
'office:name'), ('title',
'office:title'),
('target_frame',
'office:target-frame-name'), ('show', 'xlink:show'),
('visited_style',
'text:visited-style-name'), ('style', 'text:style-name'))
def __init__(self,
url='',
name=None,
title=None,
text=None,
target_frame=None,
style=None,
visited_style=None,
**kwargs):
"""
Arguments:
url -- str
name -- str
title -- str
text -- str
target_frame -- '_self', '_blank', '_parent', '_top'
style -- string
visited_style -- string
return: Link
"""
super().__init__(**kwargs)
if self._do_init:
self.url = url
if name is not None:
self.name = name
if title is not None:
self.title = title
if text is not None:
self.text = text
if target_frame is not None:
self.target_frame = target_frame
# show can be: 'new' or 'replace'"
if target_frame == '_blank':
self.show = 'new'
else:
self.show = 'replace'
if style is not None:
self.style = style
if visited_style is not None:
self.visited_style = visited_style
Link._define_attribut_property()
register_element_class(Link)
|
[
"jerome.dumonteil@gmail.com"
] |
jerome.dumonteil@gmail.com
|
092653579244e4f4c095d89145e7b1090c29b97a
|
8ecd899a8558ad0a644ecefa28faf93e0710f6fb
|
/other_practices/JOI2009_ho2.py
|
bf6da9af81b0852342546b9a6414ba07ece8d743
|
[] |
no_license
|
yut-inoue/AtCoder_ABC
|
b93885547049788d452e86b442a4a9f5ee191b0e
|
3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe
|
refs/heads/master
| 2021-07-03T09:09:20.478613
| 2021-02-21T13:20:31
| 2021-02-21T13:20:31
| 227,140,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
import bisect
d = int(input())
n = int(input())
m = int(input())
dl = [int(input()) for _ in range(n-1)]
ml = [int(input()) for _ in range(m)]
dl.append(0)
dl.append(d)
dl.sort()
dis = 0
for m in ml:
ind = bisect.bisect_left(dl, m)
dis += min(abs(dl[ind]-m), abs(dl[ind-1]-m))
print(dis)
|
[
"yinoue.1996787@gmail.com"
] |
yinoue.1996787@gmail.com
|
fdcfdfd429431291ef3a98faf19e4dc7d4ffcdb2
|
841c0df958129bef4ec456630203992a143c7dc7
|
/src/1/1297.py
|
8c9a783bb90ccd8c2f495c94b1b79838d0b82fc5
|
[
"MIT"
] |
permissive
|
xCrypt0r/Baekjoon
|
da404d3e2385c3278a1acd33ae175c2c1eb82e5e
|
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
|
refs/heads/master
| 2022-12-25T18:36:35.344896
| 2021-11-22T20:01:41
| 2021-11-22T20:01:41
| 287,291,199
| 16
| 25
|
MIT
| 2022-12-13T05:03:49
| 2020-08-13T13:42:32
|
C++
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
1297. TV 크기
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 72 ms
해결 날짜: 2020년 9월 20일
"""
def main():
d, h, w = map(int, input().split())
hk = pow(d ** 2 * h ** 2 // (h ** 2 + w ** 2), 0.5)
wk = pow(d ** 2 * w ** 2 // (h ** 2 + w ** 2), 0.5)
print(f'{int(hk)} {int(wk)}')
if __name__ == '__main__':
main()
|
[
"fireintheholl@naver.com"
] |
fireintheholl@naver.com
|
9ef04a08bc10dea64e0d9e928d37a877bfa39cc1
|
603ed82854e5b67af76d9bbdf4d2183419c6167c
|
/pages/views.py
|
05b88e646d6daa37ff170c9d948d5fc2c442c219
|
[] |
no_license
|
IbrahimAlAzhar/Basic-CRUD
|
26a209433fefb3da38d742602e54abeff83daa8d
|
2e9d68537270fc72b44757b39eea845d78602902
|
refs/heads/master
| 2022-12-14T11:34:20.784724
| 2020-09-05T21:18:51
| 2020-09-05T21:18:51
| 293,155,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
def home_view(request,*args, **kwargs):
print(args, kwargs)
print(request)
print(request.user)
# return HttpResponse("<h1>Hello world</h1>")
return render(request,"products/home.html", {})
def contact_view(request,*args, **kwargs):
# return HttpResponse("<h1>Contact page</h1>")
return render(request, "products/contact.html", {})
def about_view(request,*args, **kwargs):
print(request.user)
# return HttpResponse("<h1>Hello from the other side</h1>")
my_context = {
"my_text": "this is about us",
"my_name": "ibrahim al azhar",
"my_number": 123,
"my_list": [12,23,23,44,"abc","azhar"],
"my_html": "<h1>This one is html tag</h1>"
}
return render(request, "products/about.html", my_context)
def social_view(request,*args, **kwargs):
# return HttpResponse("<h1>Social page</h1>")
return render(request, "products/social.html", {})
|
[
"ibrahimalazhar264@gmail.com"
] |
ibrahimalazhar264@gmail.com
|
ea7db6646783c4f5b7190aa6fb3fa228a8266c5b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03061/s160632113.py
|
8ebe292df9e3f2e8d6f104cfca93a5b226a41bb0
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
from fractions import gcd
N = int(input())
A = list(map(int, input().split()))
L = [-1] * (N-1)
L[0] = A[0]
R = [-1] * (N-1)
R[0] = A[-1]
for i in range(1, N-1):
L[i] = gcd(L[i-1], A[i])
for i in range(1, N-1):
R[i] = gcd(R[i-1], A[-i-1])
ans = 0
for i in range(1, N-1):
tmp = gcd(L[i-1], R[N-i-2])
ans = max(ans, tmp)
ans = max(ans, L[N-2])
ans = max(ans, R[N-2])
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
92a05238afd3189143bdf1d508e8b2205b46dabe
|
917c0949dd410439e7f882e20a3fb744b7b4bd6e
|
/Pandas/obesity.py
|
bf7e8dceb6a5561f1b97151d830ba938469e350c
|
[
"MIT"
] |
permissive
|
daveaseeman/PyEng
|
229d01df85c2959b4333d5bd19ba15029b11ee38
|
31403a7f0e557456eeaad865295213cf27847bf9
|
refs/heads/master
| 2020-12-28T19:11:49.210811
| 2017-05-15T23:13:42
| 2017-05-15T23:13:42
| 43,885,548
| 0
| 0
| null | 2015-10-08T12:03:50
| 2015-10-08T12:03:49
| null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
data = pd.ExcelFile("Obes-phys-acti-diet-eng-2014-tab.xls")
print data.sheet_names
# Read section 7.1 from the Excel file
# Define the columns to be read
columns1 = ['year', 'total', 'males', 'females']
data_gender = data.parse(u'7.1', skiprows=4, skipfooter=14, names=columns1)
#print data_gender
# Remove the N/A from the data
data_gender.dropna(inplace = True)
#print data_gender
data_gender.set_index('year', inplace=True)
# Plot all
data_gender.plot()
plt.show()
# Read 2nd section, by age
data_age = data.parse(u'7.2', skiprows=4, skipfooter=14)
print data_age
# Rename unames to year
data_age.rename(columns={u'Unnamed: 0': u'Year'}, inplace=True)
# Drop empties and reset index
data_age.dropna(inplace=True)
data_age.set_index('Year', inplace=True)
#plot
data_age.plot()
plt.show()
# Plotting everything cause total to override everything. So drop it.
# Drop the total column and plot
data_age_minus_total = data_age.drop('Total', axis = 1)
data_age_minus_total.plot()
plt.show()
plt.close()
#Plot children vs adults
data_age['Under 16'].plot(label = "Under 16")
data_age['25-34'].plot(label = "25-34")
plt.legend(loc="upper right")
plt.show()
|
[
"a@a.com"
] |
a@a.com
|
976eab4c20ccc6d97267a0e261e856efb42bac17
|
9a393d5dae8147088b1c9b78987197c60a6618cf
|
/0828/모의2.py
|
5afbfdf130cc6d42c69e1a772ee5ab0f6d43cf74
|
[] |
no_license
|
bumbum9944/bumpycharm
|
5444440379f6d5142130bc8a7a4b69276f23f991
|
b487eb433d41ff0d2f6e1ca4f723225b114b96c0
|
refs/heads/master
| 2020-07-05T16:04:35.153231
| 2019-10-02T00:14:00
| 2019-10-02T00:14:00
| 202,693,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
def shuffle(cards, card_u, card_d, N):
X = list(range(N))
global cnt
if cards != card_u and cards != card_d:
if cnt > 5:
return -1
else:
cnt += 1
for x in X:
if x > N // 2:
x = x - N // 2
for change in range(N // 2 - 1 - x, N // 2 + x, 2):
cards[change], cards[change + 1] = cards[change + 1], cards[change]
return shuffle(cards, card_u, card_d, N)
else:
return cnt
T = int(input())
for tc in range(1, T+1):
cnt = 0
N = int(input())
cards = list(map(int, input().split()))
card_u = sorted(cards)
card_d = card_u[::-1]
ans = shuffle(cards, card_u, card_d, N)
print('#{} {}'.format(tc, ans))
|
[
"tong940526@gmail.com"
] |
tong940526@gmail.com
|
2438dc850e5d62d640bcdc86236a89bc67376373
|
1d60c5a7b8ce6277bff514e376f79848f706344c
|
/Data Engineer with Python/04. Writing Efficient Python Code/04. Basic pandas optimizations/08. Bringing it all together: Predict win percentage.py
|
cf12b3bebb30941ce6308446c58c8d8a439da8bb
|
[] |
no_license
|
DidiMilikina/DataCamp
|
338c6e6d3b4f5b6c541c1aba155a36e9ee24949d
|
3bf2cf3c1430190a7f8e54efda7d50a5fd66f244
|
refs/heads/master
| 2020-12-15T13:16:54.178967
| 2020-05-06T17:30:54
| 2020-05-06T17:30:54
| 235,113,616
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
'''
Bringing it all together: Predict win percentage
A pandas DataFrame (baseball_df) has been loaded into your session. For convenience, a dictionary describing each column within baseball_df has been printed into your console. You can reference these descriptions throughout the exercise.
You'd like to attempt to predict a team's win percentage for a given season by using the team's total runs scored in a season ('RS') and total runs allowed in a season ('RA') with the following function:
def predict_win_perc(RS, RA):
prediction = RS ** 2 / (RS ** 2 + RA ** 2)
return np.round(prediction, 2)
Let's compare the approaches you've learned to calculate a predicted win percentage for each season (or row) in your DataFrame.
Instructions 1/4
25 XP
1
Use a for loop and .itertuples() to predict the win percentage for each row of baseball_df with the predict_win_perc() function. Save each row's predicted win percentage as win_perc_pred and append each to the win_perc_preds_loop list.
2
Apply predict_win_perc() to each row of the baseball_df DataFrame using a lambda function. Save the predicted win percentage as win_perc_preds_apply.
3
Calculate the predicted win percentages by passing the underlying 'RS' and 'RA' arrays from baseball_df into predict_win_perc(). Save these predictions as win_perc_preds_np.
'''
SOLUTION
1
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_scored)
win_perc_preds_loop.append(win_perc_pred)
2
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_allowed)
win_perc_preds_loop.append(win_perc_pred)
# Apply predict_win_perc to each row of the DataFrame
win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1)
3
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_allowed)
win_perc_preds_loop.append(win_perc_pred)
# Apply predict_win_perc to each row of the DataFrame
win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1)
# Calculate the win percentage predictions using NumPy arrays
win_perc_preds_np = predict_win_perc(baseball_df['RS'].values, baseball_df['RA'].values)
baseball_df['WP_preds'] = win_perc_preds_np
print(baseball_df.head())
|
[
"didimilikina8@gmail.com"
] |
didimilikina8@gmail.com
|
3f126799ab9a40abdd2ebaae9d63469bf925c350
|
65381b8dffa1ade89746f6fc3a4979a7eb548d34
|
/analytic_structure/models/analytic_dimension.py
|
3e0c1f79cf94b69f49c82b31d834c963f9d7f218
|
[] |
no_license
|
ff4f/AISJ-13
|
a4240d1952c3854dd5b21a62cf7dbfdebb16fde5
|
a2f2183e0f753100842877efecc844bdc72f8bd4
|
refs/heads/master
| 2023-05-08T22:54:43.972954
| 2021-06-03T14:44:10
| 2021-06-03T14:48:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields
class AnalyticDimension(models.Model):
######################
# Private attributes #
######################
_name = "account.analytic.dimension"
###################
# Default methods #
###################
######################
# Fields declaration #
######################
name = fields.Char(string="Dimension Name",
required=True)
dependency_id = fields.Many2one(comodel_name="account.analytic.dimension",
string="Dependent On")
##############################
# Compute and search methods #
##############################
############################
# Constrains and onchanges #
############################
#########################
# CRUD method overrides #
#########################
##################
# Action methods #
##################
|
[
"LuisAngelMalaveMora@gmail.com"
] |
LuisAngelMalaveMora@gmail.com
|
aff1a5f925b9a5fb61aa23bc3c7204c9d0b2fdf8
|
98f730ec6a43d8be4a34b0f2a44a9d35989d2287
|
/tests/unit/entity/test_flow_file_entity.py
|
c96730ce6075f70da6c024829667d2c0880046c9
|
[] |
no_license
|
scottwr98/pynifi-client
|
9337a4f322536ee466d419a788b8b5948cdc62d7
|
013ac2ffa591284a0d6cbb9ed552681cc6f91165
|
refs/heads/master
| 2020-04-18T08:47:03.680749
| 2017-11-04T23:59:58
| 2017-11-04T23:59:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import pynifi_client
from pynifi_client.models.flow_file_entity import FlowFileEntity # noqa: E501
from pynifi_client.rest import ApiException
class TestFlowFileEntity(unittest.TestCase):
"""FlowFileEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowFileEntity(self):
"""Test FlowFileEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = pynifi_client.models.flow_file_entity.FlowFileEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"ajish@rootedinsights.com"
] |
ajish@rootedinsights.com
|
386b87b23a4abb72e8025a74ef4beb8cda822341
|
c2bcf42e04a1e2146b41b250ff14e62fddcdf589
|
/docs/examples/plot_gpr.py
|
b38412ecdc8bb8734c124690fb196f341c3f89ea
|
[
"Apache-2.0"
] |
permissive
|
onnx/sklearn-onnx
|
0f958e1c090572fbe11e15f95bec975d1780cf8d
|
895c3a76a315c7a6567a1a07a96dc658994ec16a
|
refs/heads/main
| 2023-08-18T18:49:25.164433
| 2023-08-17T09:52:31
| 2023-08-17T09:52:31
| 162,340,939
| 455
| 92
|
Apache-2.0
| 2023-08-31T16:04:13
| 2018-12-18T20:18:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,674
|
py
|
# SPDX-License-Identifier: Apache-2.0
"""
.. _l-gpr-example:
Discrepencies with GaussianProcessorRegressor: use of double
============================================================
The `GaussianProcessRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.
GaussianProcessRegressor.html>`_ involves
many matrix operations which may requires double
precisions. *sklearn-onnx* is using single floats by default
but for this particular model, it is better to use double.
Let's see how to create an ONNX file using doubles.
Train a model
+++++++++++++
A very basic example using *GaussianProcessRegressor*
on the Boston dataset.
"""
import pprint
import numpy
import sklearn
from sklearn.datasets import load_diabetes
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, RBF
from sklearn.model_selection import train_test_split
import onnx
import onnxruntime as rt
import skl2onnx
from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType
from skl2onnx import convert_sklearn
dataset = load_diabetes()
X, y = dataset.data, dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
gpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.0)
gpr.fit(X_train, y_train)
print(gpr)
###########################
# First attempt to convert a model into ONNX
# ++++++++++++++++++++++++++++++++++++++++++
#
# The documentation suggests the following way to
# convert a model into ONNX.
initial_type = [("X", FloatTensorType([None, X_train.shape[1]]))]
onx = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess = rt.InferenceSession(onx.SerializeToString())
try:
pred_onx = sess.run(None, {"X": X_test.astype(numpy.float32)})[0]
except RuntimeError as e:
print(str(e))
###########################
# Second attempt: variable dimensions
# +++++++++++++++++++++++++++++++++++
#
# Unfortunately, even though the conversion
# went well, the runtime fails to compute the prediction.
# The previous snippet of code imposes fixed dimension
# on the input and therefore let the runtime assume
# every node output has outputs with fixed dimensions
# And that's not the case for this model.
# We need to disable these checkings by replacing
# the fixed dimensions by an empty value.
# (see next line).
initial_type = [("X", FloatTensorType([None, None]))]
onx = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess = rt.InferenceSession(onx.SerializeToString())
pred_onx = sess.run(None, {"X": X_test.astype(numpy.float32)})[0]
pred_skl = gpr.predict(X_test)
print(pred_skl[:10])
print(pred_onx[0, :10])
###################################
# The differences seems quite important.
# Let's confirm that by looking at the biggest
# differences.
diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) - numpy.squeeze(pred_onx)))[-5:]
print(diff)
print("min(Y)-max(Y):", min(y_test), max(y_test))
###########################
# Third attempt: use of double
# ++++++++++++++++++++++++++++
#
# The model uses a couple of matrix computations
# and matrices have coefficients with very different
# order of magnitude. It is difficult to approximate
# the prediction made with scikit-learn if the converted
# model sticks to float. Double precision is needed.
#
# The previous code requires two changes. The first
# one indicates that inputs are now of type
# ``DoubleTensorType``. The second change
# is the extra parameter ``dtype=numpy.float64``
# tells the conversion function that every real
# constant matrix such as the trained coefficients
# will be dumped as doubles and not as floats anymore.
initial_type = [("X", DoubleTensorType([None, None]))]
onx64 = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess64 = rt.InferenceSession(onx64.SerializeToString())
pred_onx64 = sess64.run(None, {"X": X_test})[0]
print(pred_onx64[0, :10])
################################
# The new differences look much better.
diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) - numpy.squeeze(pred_onx64)))[-5:]
print(diff)
print("min(Y)-max(Y):", min(y_test), max(y_test))
####################################
# Size increase
# +++++++++++++
#
# As a result, the ONNX model is almost twice bigger
# because every coefficient is stored as double and
# and not as floats anymore.
size32 = len(onx.SerializeToString())
size64 = len(onx64.SerializeToString())
print("ONNX with floats:", size32)
print("ONNX with doubles:", size64)
#################################
# return_std=True
# +++++++++++++++
#
# `GaussianProcessRegressor <https://scikit-learn.org/stable/modules/
# generated/sklearn.gaussian_process.GaussianProcessRegressor.html>`_
# is one model which defined additional parameter to the predict function.
# If call with ``return_std=True``, the class returns one more results
# and that needs to be reflected into the generated ONNX graph.
# The converter needs to know that an extended graph is required.
# That's done through the option mechanism
# (see :ref:`l-conv-options`).
initial_type = [("X", DoubleTensorType([None, None]))]
options = {GaussianProcessRegressor: {"return_std": True}}
try:
onx64_std = convert_sklearn(
gpr, initial_types=initial_type, options=options, target_opset=12
)
except RuntimeError as e:
print(e)
######################################
# This error highlights the fact that the *scikit-learn*
# computes internal variables on first call to method predict.
# The converter needs them to be initialized by calling method
# predict at least once and then converting again.
gpr.predict(X_test[:1], return_std=True)
onx64_std = convert_sklearn(
gpr, initial_types=initial_type, options=options, target_opset=12
)
sess64_std = rt.InferenceSession(onx64_std.SerializeToString())
pred_onx64_std = sess64_std.run(None, {"X": X_test[:5]})
pprint.pprint(pred_onx64_std)
###############################
# Let's compare with *scikit-learn* prediction.
pprint.pprint(gpr.predict(X_test[:5], return_std=True))
#######################################
# It looks good. Let's do a better checks.
pred_onx64_std = sess64_std.run(None, {"X": X_test})
pred_std = gpr.predict(X_test, return_std=True)
diff = numpy.sort(
numpy.abs(numpy.squeeze(pred_onx64_std[1]) - numpy.squeeze(pred_std[1]))
)[-5:]
print(diff)
#################################
# There are some discrepencies but it seems reasonable.
#
# **Versions used for this example**
print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", rt.__version__)
print("skl2onnx: ", skl2onnx.__version__)
|
[
"noreply@github.com"
] |
onnx.noreply@github.com
|
44465a4a6db8996eacce62966259ef8c47a0909e
|
1915774790a77a630c00e70738ac41a315f5a2cb
|
/doorscalc/migrations/0034_order.py
|
0f5f4e76a216a981d62729e85601dd332467b201
|
[] |
no_license
|
coconutcake/hajduktools
|
842948646d2e8d3368b4d420d73bba981d649d43
|
6f9e678a1168195d77d1163bc9145205d03bb141
|
refs/heads/master
| 2020-07-02T20:02:19.914649
| 2019-09-13T17:44:05
| 2019-09-13T17:44:05
| 201,648,138
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
# Generated by Django 2.1.11 on 2019-08-21 11:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('doorscalc', '0033_auto_20190821_0947'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('w', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Width')),
('h', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Height')),
('d', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Depth')),
('status', models.CharField(choices=[('Pending', 'Pending'), ('Accepted', 'Accepted'), ('Ordered', 'Ordred')], default='Pending', help_text='Status zamówienia', max_length=50, null=True, verbose_name='Status')),
('published_date', models.DateTimeField(blank=True, null=True)),
('door', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='doorscalc.Door', verbose_name='Type')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"contact@mign.pl"
] |
contact@mign.pl
|
c0fda12954d82dd3a44313c715b0d476d2c87363
|
e5eec1428da1d24d3e9b86f5723c51cd2ca636cd
|
/백준 삼성역량테스트기출/시험감독.py
|
4c3226fd88492be95fe560c0c9ef3c4b27668a7e
|
[] |
no_license
|
jamwomsoo/Algorithm_prac
|
3c36c381f59277721517d331a8f1640399d80c1d
|
8393f3cc2f950214c47f3cf0b2c1271791f115d0
|
refs/heads/master
| 2023-06-09T06:49:14.739255
| 2021-06-18T06:41:01
| 2021-06-18T06:41:01
| 325,227,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
n = int(input())
a_lst = list(map(int, input().split()))
b, c = map(int, input().split())
total = 0
for i in range(n):
total+=1
a_lst[i] -= b
if a_lst[i] > 0:
total += a_lst[i]//c
z = a_lst[i] % c
if z > 0:
total+=1
print(total)
|
[
"41579282+jamwomsoo@users.noreply.github.com"
] |
41579282+jamwomsoo@users.noreply.github.com
|
a6681169fe270861ab20c12bb9dd080537671d0c
|
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
|
/data/HackerRank-ProblemSolving/Is This a Binary Search Tree.py
|
731ae39593eed79cc53c99eb8fef64bfffb5dc12
|
[] |
no_license
|
Ritvik19/CodeBook
|
ef7764d89b790e902ede5802f36d5ca910d8a50e
|
2b4ed7938bbf156553d6ba5cba6216449528f0fc
|
refs/heads/master
| 2021-07-04T08:25:52.478719
| 2020-08-08T06:54:14
| 2020-08-08T06:54:14
| 138,744,302
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
""" Node is defined as
class node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
"""
def check_binary_search_tree_(root):
return check_node(root, -1, 10001)
def check_node(node, Min, Max):
if not node:
return True
if Min < node.data < Max:
return check_node(node.left, Min, node.data) and check_node(node.right, node.data, Max)
return False
|
[
"rastogiritvik99@gmail.com"
] |
rastogiritvik99@gmail.com
|
cede73216293a8ce2fb462daf6702e71a3c0f983
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc023/d.py
|
44e56efe280652514dbc388ca3b19c414d04f3e6
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601
| 2021-12-14T08:19:11
| 2021-12-14T08:19:11
| 185,161,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
#
# abc023 d
#
import sys
from io import StringIO
import unittest
import bisect
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例1(self):
input = """4
5 6
12 4
14 7
21 2"""
output = """23"""
self.assertIO(input, output)
def test_入力例2(self):
input = """6
100 1
100 1
100 1
100 1
100 1
1 30"""
output = """105"""
self.assertIO(input, output)
def resolve():
N = int(input())
ok = 0
global H, S
H = []
S = []
for _ in range(N):
h, s = map(int, input().split())
H.append(h)
S.append(s)
ok = max(ok, h+s*(N-1))
ok -= 1
ng = max(H)-1
while abs(ok-ng) > 1:
mid = (ok+ng)//2
if isOK(mid):
ok = mid
else:
ng = mid
print(ok)
def isOK(x):
time = [(x-h)/s for (h, s) in zip(H, S)]
time.sort()
for i, t in enumerate(time):
if i > t:
return False
return True
if __name__ == "__main__":
# unittest.main()
resolve()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
b550022c8996e1254ad04bbc6e68d43f9a20036d
|
8bbeb7b5721a9dbf40caa47a96e6961ceabb0128
|
/python3/745.Find Smallest Letter Greater Than Target(寻找比目标字母大的最小字母).py
|
a515ae5b4ea2c96f75d6260137b0d993b0a8432c
|
[
"MIT"
] |
permissive
|
lishulongVI/leetcode
|
bb5b75642f69dfaec0c2ee3e06369c715125b1ba
|
6731e128be0fd3c0bdfe885c1a409ac54b929597
|
refs/heads/master
| 2020-03-23T22:17:40.335970
| 2018-07-23T14:46:06
| 2018-07-23T14:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,576
|
py
|
"""
<p>
Given a list of sorted characters <code>letters</code> containing only lowercase letters, and given a target letter <code>target</code>, find the smallest element in the list that is larger than the given target.
</p><p>
Letters also wrap around. For example, if the target is <code>target = 'z'</code> and <code>letters = ['a', 'b']</code>, the answer is <code>'a'</code>.
</p>
<p><b>Examples:</b><br />
<pre>
<b>Input:</b>
letters = ["c", "f", "j"]
target = "a"
<b>Output:</b> "c"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "c"
<b>Output:</b> "f"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "d"
<b>Output:</b> "f"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "g"
<b>Output:</b> "j"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "j"
<b>Output:</b> "c"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "k"
<b>Output:</b> "c"
</pre>
</p>
<p><b>Note:</b><br>
<ol>
<li><code>letters</code> has a length in range <code>[2, 10000]</code>.</li>
<li><code>letters</code> consists of lowercase letters, and contains at least 2 unique letters.</li>
<li><code>target</code> is a lowercase letter.</li>
</ol>
</p><p>给定一个只包含小写字母的有序数组<code>letters</code> 和一个目标字母 <code>target</code>,寻找有序数组里面比目标字母大的最小字母。</p>
<p>数组里字母的顺序是循环的。举个例子,如果目标字母<code>target = 'z'</code> 并且有序数组为 <code>letters = ['a', 'b']</code>,则答案返回 <code>'a'</code>。</p>
<p><strong>示例:</strong></p>
<pre>
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "a"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "c"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "d"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "g"
<strong>输出:</strong> "j"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "j"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "k"
<strong>输出:</strong> "c"
</pre>
<p><strong>注:</strong></p>
<ol>
<li><code>letters</code>长度范围在<code>[2, 10000]</code>区间内。</li>
<li><code>letters</code> 仅由小写字母组成,最少包含两个不同的字母。</li>
<li>目标字母<code>target</code> 是一个小写字母。</li>
</ol>
<p>给定一个只包含小写字母的有序数组<code>letters</code> 和一个目标字母 <code>target</code>,寻找有序数组里面比目标字母大的最小字母。</p>
<p>数组里字母的顺序是循环的。举个例子,如果目标字母<code>target = 'z'</code> 并且有序数组为 <code>letters = ['a', 'b']</code>,则答案返回 <code>'a'</code>。</p>
<p><strong>示例:</strong></p>
<pre>
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "a"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "c"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "d"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "g"
<strong>输出:</strong> "j"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "j"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "k"
<strong>输出:</strong> "c"
</pre>
<p><strong>注:</strong></p>
<ol>
<li><code>letters</code>长度范围在<code>[2, 10000]</code>区间内。</li>
<li><code>letters</code> 仅由小写字母组成,最少包含两个不同的字母。</li>
<li>目标字母<code>target</code> 是一个小写字母。</li>
</ol>
"""
class Solution:
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
|
[
"lishulong@wecash.net"
] |
lishulong@wecash.net
|
10ba96abd7fbec0f39742d29991a6863ac7d558b
|
17c14b758959cdceec0dce8f783346fdeee8e111
|
/chap05_nlp/sequence_labeling/eng_model/main.py
|
9bc95c095d4eb0780ca8db2ad4280e23fd2c0801
|
[] |
no_license
|
yurimkoo/tensormsa_jupyter
|
b0a340119339936d347d12fbd88fb017599a0029
|
0e75784114ec6dc8ee7eff8094aef9cf37131a5c
|
refs/heads/master
| 2021-07-18T12:22:31.396433
| 2017-10-25T01:42:24
| 2017-10-25T01:42:24
| 109,469,220
| 1
| 0
| null | 2017-11-04T05:20:15
| 2017-11-04T05:20:15
| null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
import os
from eng_model.data_utils import get_trimmed_glove_vectors, load_vocab, \
get_processing_word, CoNLLDataset
from eng_model.general_utils import get_logger
from eng_model.model import NERModel
from eng_model.config import config
try :
# directory for training outputs
if not os.path.exists(config.output_path):
os.makedirs(config.output_path)
# load vocabs
vocab_words = load_vocab(config.words_filename)
vocab_tags = load_vocab(config.tags_filename)
vocab_chars = load_vocab(config.chars_filename)
# get processing functions
processing_word = get_processing_word(vocab_words, vocab_chars,
lowercase=config.lowercase, chars=config.chars)
processing_tag = get_processing_word(vocab_tags, lowercase=False)
# get pre trained embeddings
embeddings = get_trimmed_glove_vectors(config.trimmed_filename)
# create dataset
dev = CoNLLDataset(config.dev_filename, processing_word,
processing_tag, config.max_iter)
test = CoNLLDataset(config.test_filename, processing_word,
processing_tag, config.max_iter)
train = CoNLLDataset(config.train_filename, processing_word,
processing_tag, config.max_iter)
# get logger
logger = get_logger(config.log_path)
# build model
model = NERModel(config, embeddings, ntags=len(vocab_tags),
nchars=len(vocab_chars), logger=logger)
model.build()
# train, evaluate and interact
model.train(train, dev, vocab_tags)
model.evaluate(test, vocab_tags)
model.predict(vocab_tags, processing_word, "Germany 's representative")
model.predict(vocab_tags, processing_word, "Germany")
model.predict(vocab_tags, processing_word, "Hello Germany 's representative")
except Exception as e :
raise Exception (e)
|
[
"tmddno1@naver.com"
] |
tmddno1@naver.com
|
2cbd45af7d26fd7efc079cde6e33ae3cf3e2f982
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_143/ch118_2020_03_29_04_35_07_099201.py
|
d8962c300590a574cb248be19c49e2d9ef558047
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import math
def snell_descartes(n1, n2, o):
y=(n1/n2)
x=math.sin(math.radians(o))
z=x*y
o2= math.asin(z)
o2=math.degrees(o2)
return o2
def reflexao_total_interna (n1, n2, o2):
if (n2*o2)/n1 == o2:
return True
else:
return False
|
[
"you@example.com"
] |
you@example.com
|
178910e4f15626f235806824e33a9222ee63e9b0
|
308953409e1a3b828ac49b7301c1e751cbf762cf
|
/suite_EETc 12/tst_Open_Import_Export/test.py
|
4453463efcc939e846f44d4a6859e0aa61a262cf
|
[] |
no_license
|
asthagaur1/danfoss-automation
|
4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e
|
213a99d3375889cd0e0c801421a50e9fe6085879
|
refs/heads/main
| 2023-03-31T23:26:56.956107
| 2021-04-01T08:52:37
| 2021-04-01T08:52:37
| 353,627,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
def main():
excel = r"C:\gitworkspace\KoolProg-TestAutomation\Master_Functions\Test_Automation\SourceCode\suite_EETc 12\shared\testdata\Open_Import_Export.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
#source(findFile("scripts", "object_id.py"))
keyAction(excel)
|
[
"asthagaur@danfoss.com"
] |
asthagaur@danfoss.com
|
14b48bbbf62470ff68ffb9122f28308444f5f2f1
|
25873da962b0acdcf2c46b60695866d29008c11d
|
/src/programr/clients/events/console/config.py
|
16a2c9b254edf08455d0a327b7f522385af6cbbc
|
[] |
no_license
|
LombeC/program-r
|
79f81fa82a617f053ccde1115af3344369b1cfa5
|
a7eb6820696a2e5314d29f8d82aaad45a0dc0362
|
refs/heads/master
| 2022-12-01T14:40:40.208360
| 2020-08-10T21:10:30
| 2020-08-10T21:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
from programr.config.client.config import ClientConfigurationData
class ConsoleConfiguration(ClientConfigurationData):
def __init__(self):
super().__init__("console")
self._default_userid = "console"
self._prompt = ">>>"
@property
def default_userid(self):
return self._default_userid
@property
def prompt(self):
return self._prompt
def load_configuration(self, configuration_file, bot_root):
console = configuration_file.get_section(self.section_name)
if console is not None:
self._default_userid = configuration_file.get_option(console, "default_userid", missing_value="Console")
self._prompt = configuration_file.get_option(console, "prompt", missing_value=">>>")
super().load_configuration(configuration_file, console, bot_root)
def to_yaml(self, data, defaults=True):
if defaults is True:
data['default_userid'] = "console"
data['prompt'] = ">>>"
else:
data['default_userid'] = self._default_userid
data['prompt'] = self._prompt
super(ConsoleConfiguration, self).to_yaml(data, defaults)
|
[
"hilbert.cantor@gmail.com"
] |
hilbert.cantor@gmail.com
|
56c90b4716f1cc14341f23413d49aaa8b0682632
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/bulk_email/tests/test_views.py
|
d2ec21c3ba6ac57f01f91d77bfab7dc4daf89163
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,247
|
py
|
"""
Test the bulk email opt out view.
"""
import ddt
import pytest
from django.http import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.bulk_email.models import Optout
from lms.djangoapps.bulk_email.views import opt_out_email_updates
from lms.djangoapps.discussion.notification_prefs.views import UsernameCipher
from openedx.core.lib.tests import attr
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
@ddt.ddt
@override_settings(SECRET_KEY="test secret key")
class OptOutEmailUpdatesViewTest(ModuleStoreTestCase):
"""
Check the opt out email functionality.
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create(username="testuser1", email='test@example.com')
self.course = CourseFactory.create(run='testcourse1', display_name='Test Course Title')
self.token = UsernameCipher.encrypt('testuser1')
self.request_factory = RequestFactory()
self.url = reverse('bulk_email_opt_out', args=[self.token, str(self.course.id)])
# Ensure we start with no opt-out records
assert Optout.objects.count() == 0
def test_opt_out_email_confirm(self):
"""
Ensure that the default GET view asks for confirmation.
"""
response = self.client.get(self.url)
self.assertContains(response, "confirm unsubscribe from")
assert Optout.objects.count() == 0
def test_opt_out_email_unsubscribe(self):
"""
Ensure that the POSTing "confirm" creates the opt-out record.
"""
response = self.client.post(self.url, {'unsubscribe': True})
self.assertContains(response, "You have successfully unsubscribed from")
assert Optout.objects.count() == 1
def test_opt_out_email_cancel(self):
"""
Ensure that the POSTing "cancel" does not create the opt-out record
"""
response = self.client.post(self.url)
self.assertContains(response, "You have not been unsubscribed from")
assert Optout.objects.count() == 0
@ddt.data(
("ZOMG INVALID BASE64 CHARS!!!", "base64url", False),
("Non-ASCII\xff".encode(), "base64url", False),
("D6L8Q01ztywqnr3coMOlq0C3DG05686lXX_1ArEd0ok", "base64url", False),
("AAAAAAAAAAA=", "initialization_vector", False),
("nMXVK7PdSlKPOovci-M7iqS09Ux8VoCNDJixLBmj", "aes", False),
("AAAAAAAAAAAAAAAAAAAAAMoazRI7ePLjEWXN1N7keLw=", "padding", False),
("AAAAAAAAAAAAAAAAAAAAACpyUxTGIrUjnpuUsNi7mAY=", "username", False),
("_KHGdCAUIToc4iaRGy7K57mNZiiXxO61qfKT08ExlY8=", "course", 'course-v1:testcourse'),
)
@ddt.unpack
def test_unsubscribe_invalid_token(self, token, message, course):
"""
Make sure that view returns 404 in case token is not valid
"""
request = self.request_factory.get("dummy")
with pytest.raises(Http404) as err:
opt_out_email_updates(request, token, course)
assert message in err
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
bd548c6e28569374dce6cece185f426673c7f3d6
|
8d0eec5c051cf902df1ef004b537115b888fe5c6
|
/async_dev/generators_two_way.py
|
7483829ccf1ffe0d0ef3648065fd504c53c26ea0
|
[] |
no_license
|
MadhuV99/complete_py_course
|
494300225eef49470a92290f908c1d6f1296cb4f
|
ade2ac8c5722c45196b700d3ad99f37c9deb76d8
|
refs/heads/main
| 2023-02-24T06:57:57.441762
| 2021-02-04T03:49:58
| 2021-02-04T03:49:58
| 329,334,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from collections import deque
# friends = ['Rolf', 'Jose', 'Charlie', 'Jen', 'Anna']
friends = deque(('Rolf', 'Jose', 'Charlie', 'Jen', 'Anna'))
def get_friend():
yield from friends
def greet(g):
while True:
try:
friend = next(g)
yield f'HELLO {friend}'
except StopIteration:
pass
friends_generator = get_friend()
g = greet(friends_generator)
print(next(g))
print(next(g))
|
[
"madhuvasudevan@yahoo.com"
] |
madhuvasudevan@yahoo.com
|
9b64afa65c9d6ded04f35a8e66d55c8a70318c62
|
d488f052805a87b5c4b124ca93494bc9b78620f7
|
/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/serviceuser/v1/serviceuser_v1_client.py
|
81860d607ce811e4c113893404142a4427ea51cd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PacktPublishing/DevOps-Fundamentals
|
5ce1fc938db66b420691aa8106ecfb3f9ceb1ace
|
60597e831e08325c7e51e8557591917f7c417275
|
refs/heads/master
| 2023-02-02T04:48:15.346907
| 2023-01-30T08:33:35
| 2023-01-30T08:33:35
| 131,293,311
| 13
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,177
|
py
|
"""Generated client library for serviceuser version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.serviceuser.v1 import serviceuser_v1_messages as messages
class ServiceuserV1(base_api.BaseApiClient):
"""Generated client library for service serviceuser version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://serviceuser.googleapis.com/'
_PACKAGE = u'serviceuser'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/service.management']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ServiceuserV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new serviceuser handle."""
url = url or self.BASE_URL
super(ServiceuserV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_services = self.ProjectsServicesService(self)
self.projects = self.ProjectsService(self)
self.services = self.ServicesService(self)
class ProjectsServicesService(base_api.BaseApiService):
"""Service class for the projects_services resource."""
_NAME = u'projects_services'
def __init__(self, client):
super(ServiceuserV1.ProjectsServicesService, self).__init__(client)
self._upload_configs = {
}
def Disable(self, request, global_params=None):
"""Disable a service so it can no longer be used with a.
project. This prevents unintended usage that may cause unexpected billing
charges or security leaks.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesDisableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Disable')
return self._RunMethod(
config, request, global_params=global_params)
Disable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.disable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:disable',
request_field=u'disableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesDisableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Enable(self, request, global_params=None):
"""Enable a service so it can be used with a project.
See [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for
more information.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesEnableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Enable')
return self._RunMethod(
config, request, global_params=global_params)
Enable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.enable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:enable',
request_field=u'enableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesEnableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
"""List enabled services for the specified consumer.
Args:
request: (ServiceuserProjectsServicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListEnabledServicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.projects.services.list',
ordered_params=[u'projectsId'],
path_params=[u'projectsId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/projects/{projectsId}/services',
request_field='',
request_type_name=u'ServiceuserProjectsServicesListRequest',
response_type_name=u'ListEnabledServicesResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ServiceuserV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServiceuserV1.ServicesService, self).__init__(client)
self._upload_configs = {
}
def Search(self, request, global_params=None):
"""Search available services.
When no filter is specified, returns all accessible services. For
authenticated users, also returns all services the calling user has
"servicemanagement.services.bind" permission for.
Args:
request: (ServiceuserServicesSearchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SearchServicesResponse) The response message.
"""
config = self.GetMethodConfig('Search')
return self._RunMethod(
config, request, global_params=global_params)
Search.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.services.search',
ordered_params=[],
path_params=[],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/services:search',
request_field='',
request_type_name=u'ServiceuserServicesSearchRequest',
response_type_name=u'SearchServicesResponse',
supports_download=False,
)
|
[
"saneetk@packtpub.com"
] |
saneetk@packtpub.com
|
097bcb484e898145895118958d891df3c5377fe3
|
183e4126b2fdb9c4276a504ff3ace42f4fbcdb16
|
/I семестр/Програмування (Python)/Лабораторні/Братун 6305/Приклади/34/Ex26.py
|
4c5ba37cc50a32792e7f969423731ecf7a45162d
|
[] |
no_license
|
Computer-engineering-FICT/Computer-engineering-FICT
|
ab625e2ca421af8bcaff74f0d37ac1f7d363f203
|
80b64b43d2254e15338060aa4a6d946e8bd43424
|
refs/heads/master
| 2023-08-10T08:02:34.873229
| 2019-06-22T22:06:19
| 2019-06-22T22:06:19
| 193,206,403
| 3
| 0
| null | 2023-07-22T09:01:05
| 2019-06-22T07:41:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 296
|
py
|
import re
p1 = re.compile(r"[0-9]+")
print(p1.findall("2012, 2013, 2014, 2015, 2016"))
p2 = re.compile(r"[a-z]+")
print(p2.findall("2012, 2013, 2014, 2015, 2016"))
t = r"[0-9]{3}-[0-9]{2}-[0-9]{2}"
p = re.compile(t)
print(p.findall("322-55-98"))
print(p.findall("322-55-98, 678-56-12"))
|
[
"mazanyan027@gmail.com"
] |
mazanyan027@gmail.com
|
fc1a2897b55e9c6109a9729b245562e9d13b8022
|
347c70d4851b568e03e83387f77ae81071ab739e
|
/older/rc-query-rest/tests/test_rest_query.py
|
5974c1291876236f288ae59b86951e2be8b4d673
|
[
"MIT"
] |
permissive
|
neetinkandhare/resilient-community-apps
|
59d276b5fb7a92872143ce2b94edd680738693ce
|
3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f
|
refs/heads/master
| 2021-12-27T09:05:36.563404
| 2021-09-29T13:04:56
| 2021-09-29T13:04:56
| 159,804,866
| 1
| 0
|
MIT
| 2021-08-03T19:45:45
| 2018-11-30T10:07:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
"""System Integration Tests for REST Query component"""
from __future__ import print_function
import os.path
import pytest
from circuits.core.handlers import handler
data_dir = os.path.join(os.path.dirname(__file__), "rest_sample_data")
config_data = """[rest]
queue = rest
query_definitions_dir = %s
test_endpoint = http://httpbin.org/post
""" % (data_dir)
@pytest.mark.usefixtures("configure_resilient")
class TestRESTIntegrationTests:
""" System tests for the REST Query component """
# Appliance Configuration Requirements
destinations = ("rest",)
automatic_actions = {"Payload String Test": ("rest", "Incident",
({u"value": u"Payload Is String",
u"field_name": u"incident.name",
u"method": u"equals"},)),
"Payload Dict Test": ("rest", "Incident",
({u"value": u"Payload Is Dict",
u"field_name": u"incident.name",
u"method": u"equals"},))}
payload_testdata = [pytest.param("Payload Is String", "payload_string_test",
id="string_payload"),
pytest.param("Payload Is Dict", "payload_dict_test",
id="dict_payload")]
@pytest.mark.parametrize("inc_name,rule_name", payload_testdata)
def test_payload_string_or_dict(self, inc_name, rule_name, circuits_app, new_incident):
""" http-body is a string to render or a dict"""
# Incident data will be posted to HTTP Bin and then the incident name will be
# changed to the incident ID that was posted.
new_incident["name"] = inc_name
inc = circuits_app.app.action_component.rest_client().post("/incidents", new_incident)
event = circuits_app.watcher.wait(rule_name + "_success", timeout=10, channel='actions.rest')
assert event
pytest.wait_for(event, "complete", True)
event = circuits_app.watcher.wait("QueryEvent", timeout=10, channel='actions.rest')
assert event
pytest.wait_for(event, "complete", True)
updated_inc = circuits_app.app.action_component.rest_client().get("/incidents/%d" % inc["id"])
assert updated_inc["name"] == str(inc["id"])
|
[
"hpyle@us.ibm.com"
] |
hpyle@us.ibm.com
|
58abc4b1b7819ca83c47d829f036934ed54e49e7
|
bf7959048edc0005e04431a0864c719adc5ea9ea
|
/python版本/451-FrequencySort.py
|
def3b0ce4fd72584a4725058697bf09520d70677
|
[] |
no_license
|
Yohager/Leetcode
|
7c24f490cfa5fd8e3cdb09e5a2305a134a064a93
|
585af82ff2c2d534053f6886714406019ed0c7d1
|
refs/heads/master
| 2022-12-07T23:51:16.347174
| 2022-11-28T02:30:53
| 2022-11-28T02:30:53
| 178,201,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
class Solution:
def frequencySort(self, s: str) -> str:
c = collections.Counter(s)
n = len(c.keys())
ans = ''
for x in c.most_common(n):
ans += x[0] * x[1]
return ans
|
[
"guoyuhang0921@gmail.com"
] |
guoyuhang0921@gmail.com
|
1d1bce381708be4fc64b894ae43fcf0a22f2e34e
|
6ee9a46a95a504cf91eb5031b180f2d6c6cc9d98
|
/cut_rod.py
|
f4f900ef0683dad36b563fa62f8a127caac380dd
|
[] |
no_license
|
rohitmungre/dynamic_programming
|
8dc952f9f83e15a9b6eae8eef0e509da1c2add97
|
1d1f8036f5f6066bdc39436ace8132208466541e
|
refs/heads/master
| 2020-08-01T22:37:25.817167
| 2019-11-20T05:33:11
| 2019-11-20T05:33:11
| 211,140,758
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
rod = 7
sz = [1,2,3,4]
vl = [2,5,7,8]
def cut_rod_dp(sz, vl, rod, idx, memo):
if rod<= 0:
return 0
if idx <0:
return 0
tval = 0
varr = []
while rod >= 0:
varr.append(tval+cut_rod_dp(sz, vl, rod, idx-1, memo))
rod = rod - sz[idx]
tval = tval + vl[idx]
return max(varr)
def cut_rod(sz, vl, rod, idx):
if rod<= 0:
return 0
if idx <0:
return 0
tval = 0
varr = []
while rod >= 0:
varr.append(tval+cut_rod(sz, vl, rod, idx-1))
rod = rod - sz[idx]
tval = tval + vl[idx]
return max(varr)
print(cut_rod_dp(sz, vl, rod, 3, {}))
|
[
"noreply@github.com"
] |
rohitmungre.noreply@github.com
|
b83ad2d4e1821a822a0a025c4c8ac3d98b9ceca2
|
e87aec694108cb1f76716260daf569bcb8091958
|
/fluo/db/backends/postgresql_psycopg2.py
|
0dc6fcb482eacb73871660aaf300340fe45c5048
|
[
"MIT"
] |
permissive
|
rsalmaso/django-fluo
|
a283b8f75769ac6e57fa321c607819899e0c31c8
|
340e3b4f9c1b4b09feccefb9b3ab2d26d59fac2b
|
refs/heads/master
| 2023-01-12T01:37:06.975318
| 2020-12-01T17:13:11
| 2020-12-01T17:13:11
| 48,948,936
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
# Copyright (C) 2007-2020, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .postgresql import Backend as Postgresql
__all__ = ["Backend"]
class Backend(Postgresql):
pass
|
[
"raffaele@salmaso.org"
] |
raffaele@salmaso.org
|
73bbab25409bb3a778ef3dd83a746c1a3afa4f41
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/dnsresolver/azure-mgmt-dnsresolver/generated_samples/forwarding_rule_patch.py
|
ec4f075536336909b5c46cae450b85e6328d0b0b
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dnsresolver import DnsResolverManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dnsresolver
# USAGE
python forwarding_rule_patch.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DnsResolverManagementClient(
credential=DefaultAzureCredential(),
subscription_id="abdd4249-9f34-4cc6-8e42-c2e32110603e",
)
response = client.forwarding_rules.update(
resource_group_name="sampleResourceGroup",
dns_forwarding_ruleset_name="sampleDnsForwardingRuleset",
forwarding_rule_name="sampleForwardingRule",
parameters={"properties": {"forwardingRuleState": "Disabled", "metadata": {"additionalProp2": "value2"}}},
)
print(response)
# x-ms-original-file: specification/dnsresolver/resource-manager/Microsoft.Network/stable/2022-07-01/examples/ForwardingRule_Patch.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
706c4a133f112d01c765c80eac0083d6d5e90652
|
1c6283303ceb883add8de4ee07c5ffcfc2e93fab
|
/Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/topology/rxsakpool_22340fe5cb5d81664cab595d3e6d08ef.py
|
8aea7fbb4b72c3d049aa51d15c50a9fa0db81919
|
[] |
no_license
|
pdobrinskiy/devcore
|
0f5b3dfc2f3bf1e44abd716f008a01c443e14f18
|
580c7df6f5db8c118990cf01bc2b986285b9718b
|
refs/heads/main
| 2023-07-29T20:28:49.035475
| 2021-09-14T10:02:16
| 2021-09-14T10:02:16
| 405,919,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,134
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class RxSakPool(Base):
"""Rx Channels configuration.
The RxSakPool class encapsulates a required rxSakPool resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'rxSakPool'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'RxSak128': 'rxSak128',
'RxSak256': 'rxSak256',
'RxSalt': 'rxSalt',
'RxSsci': 'rxSsci',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(RxSakPool, self).__init__(parent, list_op)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def RxSak128(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 bit value of Secure Association Key with which DUT is expected to encrypt MACsec packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSak128']))
@property
def RxSak256(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 256 bit value of Secure Association Key with which DUT is expected to encrypt MACsec packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSak256']))
@property
def RxSalt(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 12 bytes Salt value for XPN cipher suites.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSalt']))
@property
def RxSsci(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 4 bytes Short SCI for XPN cipher suites.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSsci']))
def update(self, Name=None):
# type: (str) -> RxSakPool
"""Updates rxSakPool resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, RxSak128=None, RxSak256=None, RxSalt=None, RxSsci=None):
"""Base class infrastructure that gets a list of rxSakPool device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- RxSak128 (str): optional regex of rxSak128
- RxSak256 (str): optional regex of rxSak256
- RxSalt (str): optional regex of rxSalt
- RxSsci (str): optional regex of rxSsci
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
[
"pdobrinskiy@yahoo.com"
] |
pdobrinskiy@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.