blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20965d57bf76e26a205182ffc8240ddad375cf2b
|
0db97db08743783019efe022190f409d22ff95bd
|
/aliyun/api/rest/Rds20140815DescribeModifyParameterLogRequest.py
|
a738e55de39ed911b27aa2c242f097a771646719
|
[
"Apache-2.0"
] |
permissive
|
snowyxx/aliyun-python-demo
|
8052e2a165f1b869affe632dda484d6ca203bd9b
|
ed40887ddff440b85b77f9b2a1fcda11cca55c8b
|
refs/heads/master
| 2021-01-10T03:37:31.657793
| 2016-01-21T02:03:14
| 2016-01-21T02:03:14
| 49,921,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
'''
Created by auto_sdk on 2015.06.02
'''
from aliyun.api.base import RestApi
class Rds20140815DescribeModifyParameterLogRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.EndTime = None
self.PageNumber = None
self.PageSize = None
self.StartTime = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeModifyParameterLog.2014-08-15'
|
[
"snowyxx@126.com"
] |
snowyxx@126.com
|
1cf471f736f9047d1985610fbf89b38dffb9bb5d
|
aeeaf40350a652d96a392010071df8a486c6e79f
|
/archive/python/Python/binary_search/374.guess-number-higher-or-lower.py
|
1f51d98818399ede0698e42d7d7bd0cde96a1879
|
[
"MIT"
] |
permissive
|
linfengzhou/LeetCode
|
11e6c12ce43cf0053d86437b369a2337e6009be3
|
cb2ed3524431aea2b204fe66797f9850bbe506a9
|
refs/heads/master
| 2021-01-23T19:34:37.016755
| 2018-04-30T20:44:40
| 2018-04-30T20:44:40
| 53,916,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
|
[
"luke.zlf@gmail.com"
] |
luke.zlf@gmail.com
|
4ae49460f06822543fc2ff34e14d8fef115016f7
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res_bw/scripts/common/lib/abc.py
|
37d2a8d88679def4d589700c441407cc6fa1a0d0
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 6,294
|
py
|
# 2015.11.10 21:32:36 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/abc.py
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
class _C:
pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
abstracts = set((name for name, value in namespace.items() if getattr(value, '__isabstractmethod__', False)))
for base in bases:
for name in getattr(base, '__abstractmethods__', set()):
value = getattr(cls, name, None)
if getattr(value, '__isabstractmethod__', False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError('Can only register classes')
if issubclass(subclass, cls):
return
if issubclass(cls, subclass):
raise RuntimeError('Refusing to create an inheritance cycle')
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1
def _dump_registry(cls, file = None):
"""Debug helper to print the ABC registry."""
print >> file, 'Class: %s.%s' % (cls.__module__, cls.__name__)
print >> file, 'Inv.counter: %s' % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith('_abc_'):
value = getattr(cls, name)
print >> file, '%s: %r' % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if cls._abc_negative_cache_version == ABCMeta._abc_invalidation_counter and subtype in cls._abc_negative_cache:
return False
return cls.__subclasscheck__(subtype)
else:
return cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
if subclass in cls._abc_cache:
return True
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
if not isinstance(ok, bool):
raise AssertionError
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
cls in getattr(subclass, '__mro__', ()) and cls._abc_cache.add(subclass)
return True
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
cls._abc_negative_cache.add(subclass)
return False
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\abc.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:32:36 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
96e13ce85156c34b4c16aa46eb26cb5774458507
|
b9e5aebb49734ad47825130529bd64e59f690ecf
|
/chapter_9/die.py
|
6c02c0f3e4e49e928c96c1881f050c10ddd4aaf1
|
[] |
no_license
|
mikegirenko/python-learning
|
dab0f67d990d95035f93720986c84aaf422f7a9f
|
db9e3f0e3897caf703169d1f14b15a9aa1901161
|
refs/heads/master
| 2021-07-09T08:03:40.535653
| 2020-08-05T00:13:41
| 2020-08-05T00:13:41
| 169,983,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
from random import randint
class Die:
def __init__(self, sides=6):
self. sides = sides
def roll_die(self):
print('\t', randint(1, self.sides))
print('Printing 6 sided roll:')
six_sided_roll = Die()
for i in range(1, 11):
six_sided_roll.roll_die()
print('Printing 10 sided roll:')
ten_sided_roll = Die(10)
i = 1
while i <= 10:
ten_sided_roll.roll_die()
i += 1
print('Printing 20 sided roll:')
twenty_sided_roll = Die(20)
i = 1
while True:
if i == 11:
break
else:
twenty_sided_roll.roll_die()
i += 1
|
[
"mike.girenko@cybergrx.com"
] |
mike.girenko@cybergrx.com
|
dd422b4ebe4b9e6aeb1fc219d30133cd31641577
|
296287f05a1efed570b8da9ce56d3f6492126d73
|
/snippets/draw_text_in_image.py
|
fcbb7d055099100cada3113b7ce8812f110ddacb
|
[] |
no_license
|
formazione/python_book
|
145f8a2598b6b75736a7c33a796b9fdd8cff668e
|
d7822b312c1db028bb70e25385a74b227a9a2609
|
refs/heads/main
| 2023-07-05T20:15:18.166771
| 2021-08-12T14:14:25
| 2021-08-12T14:14:25
| 320,499,187
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
from PIL import Image,ImageDraw,ImageFont
import tkinter as tk
def create_img_with_text(text=""):
if text == "":
text = "Pythonprogramming.altervista.org"
# sample text and font
unicode_text = u"Pythonprogramming.altervista.org"
font = ImageFont.truetype(
"C:\\Program Files\\Android\\Android Studio\\jre\\jre\\lib\\fonts\\DroidSans.ttf",
24,
encoding="unic")
# get the line size
text_width, text_height = font.getsize(unicode_text)
# create a blank canvas with extra space between lines
canvas2 = Image.new('RGB', (text_width + 10, text_height + 10), "orange")
# draw the text onto the text canvas2, and use black as the text color
draw = ImageDraw.Draw(canvas2)
draw.text((5,5), text, 'blue', font)
canvas2.save("mytext.png", "PNG")
canvas2.show()
def win_with_image():
root = tk.Tk()
root.title("Animation")
root.state("zoomed")
canvas = tk.Canvas(root, width=400, height=500)
print(canvas['width'])
canvas.pack()
img = tk.PhotoImage(file="mytext.png")
canvas.create_image(int(canvas['width']) // 2,int(canvas['height']) // 2, image=img, anchor=tk.W)
root.mainloop()
create_img_with_text("This is cool")
# win_with_image()
|
[
"gatto.gio@gmail.com"
] |
gatto.gio@gmail.com
|
ee3dbda8b19a10b1e5348fd84e2fbaa94ac30ee0
|
07504838d12c6328da093dce3726e8ed096cecdb
|
/pylon/resources/properties/minPressureSetpoint.py
|
37a8d2471523d2fe28bceff3606f5ef910265dfe
|
[] |
no_license
|
lcoppa/fiat-lux
|
9caaa7f3105e692a149fdd384ec590676f06bf00
|
7c166bcc08768da67c241078b397570de159e240
|
refs/heads/master
| 2020-04-04T02:47:19.917668
| 2013-10-10T10:22:51
| 2013-10-10T10:22:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,283
|
py
|
"""minPressureSetpoint standard property type, originally defined in resource
file set standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.datapoints.press
from pylon.resources.standard import standard
class minPressureSetpoint(pylon.resources.datapoints.press.press):
"""minPressureSetpoint standard property type. Minimum pressure.
Setpoint for the operational low pressure limit."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00'
self._original_name = 'SCPTminPressureSetpoint'
self._property_scope, self._property_key = 0, 234
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = minPressureSetpoint()
pass
|
[
"lcoppa@rocketmail.com"
] |
lcoppa@rocketmail.com
|
078c40258e6bf4fcda2fc2317f847dddfb2bce21
|
83292e8ee5b14a30f61dcaf3067129e161832366
|
/douban_film.py
|
a798c36cfe2919cb8fa74d911c62c1883780d1e7
|
[] |
no_license
|
A620-Work-Exchange/Application-Integration
|
19197513f1aef67f27b4b984a736cd28ff9c8ac1
|
baada55dd1b988112afd6bd4dc781670983337b8
|
refs/heads/master
| 2020-05-20T20:03:38.842375
| 2019-05-18T09:11:20
| 2019-05-18T09:11:20
| 185,736,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
from urllib.request import urlopen
from time import sleep
import requests
from json_util import JsonUtil
import json
def get_top_film():
url = 'https://api.douban.com/v2/movie/top250'
for start in range(0, 250, 50):
req = requests.get(url, params={'start': start, 'count': 50})
data = req.json()
for movie in data['subjects']:
print(movie)
movie_str = json.dumps(movie)
JsonUtil.write_file('1.json', movie_str)
sleep(0.3)
get_top_film()
|
[
"2529716798@qq.com"
] |
2529716798@qq.com
|
f2e7f0e94bba710d8fdae5692b1f3256e1ae55d1
|
0bfb55b41282803db96b90e7bba73d86be7e8553
|
/submissions/migrations/0002_auto_20161028_0540.py
|
cd60356ef018a13dc5711524a56d9a60a4a3a77a
|
[
"MIT"
] |
permissive
|
OpenFurry/honeycomb
|
eebf2272f8ae95eb686ad129555dbebcf1adcd63
|
c34eeaf22048948fedcae860db7c25d41b51ff48
|
refs/heads/master
| 2021-01-11T01:52:40.978564
| 2016-12-29T18:08:38
| 2016-12-29T18:08:38
| 70,649,821
| 2
| 2
| null | 2016-12-29T18:08:39
| 2016-10-12T01:22:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-28 05:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('usermgmt', '0001_initial'),
('submissions', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='submission',
name='allowed_groups',
field=models.ManyToManyField(blank=True, to='usermgmt.FriendGroup'),
),
migrations.AddField(
model_name='submission',
name='folders',
field=models.ManyToManyField(blank=True, through='submissions.FolderItem', to='submissions.Folder'),
),
migrations.AddField(
model_name='submission',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='folderitem',
name='folder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Folder'),
),
migrations.AddField(
model_name='folderitem',
name='submission',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Submission'),
),
migrations.AddField(
model_name='folder',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='folder',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='submissions.Folder'),
),
migrations.AddField(
model_name='folder',
name='submissions',
field=models.ManyToManyField(through='submissions.FolderItem', to='submissions.Submission'),
),
]
|
[
"madison.scott-clary@canonical.com"
] |
madison.scott-clary@canonical.com
|
2a97d4fde1b262d7d7571c5622491d16841bed3f
|
313bb88c43d74995e7426f9482c6c8e670fdb63c
|
/07-modules/example6_module.py
|
8926b868c6f45aa8be74c33928f9bfcea9bd86be
|
[] |
no_license
|
martakedzior/python-course
|
8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654
|
3af2296c2092023d91ef5ff3b4ef9ea27ec2f227
|
refs/heads/main
| 2023-05-06T07:26:58.452520
| 2021-05-26T16:50:26
| 2021-05-26T16:50:26
| 339,822,876
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import example6_module
import shapes
if __name__ == '__ main__':
print(shapes2.rectangle_aera(3, 4))
else:
print('Jestem teraz modulem')
print('wartość zmiennej __name__:', __name__)
print('moduł - wartość zmiennej __name__:', __name__)
|
[
"marta.kedzior@wp.pl"
] |
marta.kedzior@wp.pl
|
64d45d9b34c9e2d7e84fae07e4afc49f2795317a
|
0c9ba4d9c73fb3b4ee972aed0b6d844d8a9546a9
|
/TerchaerCode/s13day3课上代码/day3/s1.py
|
2b5de365e8984bdb16be174cabf9b4b954ffbc68
|
[] |
no_license
|
SesameMing/Python51CTONetwork
|
d38179122c8daaed83d7889f17e4c3b7d81e8554
|
76169c581245abf2bcd39ed60dc8c9d11698fd3a
|
refs/heads/master
| 2020-04-15T12:47:15.234263
| 2018-06-02T15:04:04
| 2018-06-02T15:04:04
| 65,876,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Alex Li
# 1、set,无序,不重复序列
# li = [11,222]
# print(li)
# a. 创建
# li = []
# list((11,22,33,4))
# list __init__,内部执行for循环(11,22,33,4) [11,22,33,4]
# 原理,list
# dic = {"k1":123}
# se = {"123", "456"}
# s = set() # 创建空集合
# li = [11,22,11,22]
# s1 = set(li)
# print(s1)
# b. 功能
# set()\
# 创建集合
# s1 = {11,22}
# s2 = set()
# s3 = set([11,22,33,4])
## 操作集合
# s = set()
# print(s)
# s.add(123)
# s.add(123)
# s.add(123)
# print(s)
# s.clear()
# print(s)
# s1 = {11,22,33}
# s2 = {22,33,44}
# s3 = s1.difference(s2)
# A中存在,B中不存在
# s3 = s2.difference(s1)
# s3 = s1.symmetric_difference(s2)
# print(s1)
# print(s2)
# print(s3)
# s1.difference_update(s2)
# s1.symmetric_difference_update(s2)
# print(s1)
# s1 = {11,22,33}
# s1.discard(1111)
# s1.remove(11111)
# ret = s1.pop()
# print(s1)
# print(ret)
# s1 = {11,22,33}
# s2 = {22,33,44}
# s3 = s1.union(s2)
# print(s3)
# s3 = s1.intersection(s2)
# s1.intersection_update(s2)
# print(s3)
"""
s1 = {11,22,33}
s1.add(11)
s1.add(12)
s1.add(13)
# li = [11,22,3,11,2]
# li = (11,22,3,11,2)
li = "alexalex"
s1.update(li)
print(s1)
"""
# li = [11,22,33] # list __init__
# li() # list __call__
# li[0] # list __getitem__
# li[0] = 123 # list __setitem__
# def li[1] # list __delitem__
old_dict = {
"#1": 8,
"#2": 4,
"#4": 2,
}
new_dict = {
"#1": 4,
"#2": 4,
"#3": 2,
}
# old_kyes = old_dict.keys()
# old_set = set(old_kyes)
new_set = set(new_dict.keys())
old_set = set(old_dict.keys())
remove_set = old_set.difference(new_set)
add_set = new_set.difference(old_set)
update_set = old_set.intersection(new_set)
import re
re.match()
|
[
"admin@v-api.cn"
] |
admin@v-api.cn
|
5acdfe52eb25d3cd6de5b1bea421707d7b5ba3cd
|
0630a7addb90600293f0ee0787dd6ab0ac77b09a
|
/LinkedList/FindDuplicatesInArray.py
|
35abfd91e5fb529b564d57ba99c6e779fb8a5c5a
|
[
"MIT"
] |
permissive
|
aritraaaa/Competitive_Programming
|
48ecd3b6e28549889160c04cdbd19a5ad06fa49b
|
ee7eadf51939a360d0b004d787ebabda583e92f0
|
refs/heads/master
| 2023-06-12T07:04:07.698239
| 2021-07-01T11:11:24
| 2021-07-01T11:11:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
class Solution:
# @param A : tuple of integers
# @return an integer
def repeatedNumber(self, A):
'''
# This is the marker concept, by list modification
A = list(A)
if len(A) <= 1:
return -1
for i in range(len(A)):
num = abs(A[i])
if A[num]>=0:
A[num] = -1 * A[num] # marking negative means already visited A[i]
else:
return num
return -1
'''
# This is modified Floyd Warshall concept, cycle in linked list type
# https://medium.com/solvingalgo/solving-algorithmic-problems-find-a-duplicate-in-an-array-3d9edad5ad41
slow, fast = A[0], A[A[0]]
while slow!=fast:
slow = A[slow]
fast = A[A[fast]]
# the slow and fast pointers are at the same point now, i.e start point of cycle
slow = 0
while slow!=fast:
slow = A[slow]
fast = A[fast]
if slow == 0:
return -1
else:
return slow
|
[
"amitrajitbose9@gmail.com"
] |
amitrajitbose9@gmail.com
|
bb090a14d03d9ae34916626a733163fb80a13d07
|
6fd5d30cf21716893388442eb0f9c16e13b91315
|
/ABC/146/b.py
|
c2dd6c1a2676c0ffb2fe3790a90434aca68c06bd
|
[] |
no_license
|
mgmk2/atcoder-python
|
23d45f3195977f1f5839f6a6315e19cac80da2be
|
beec5857a8df2957ff7b688f717d4253b4196e10
|
refs/heads/master
| 2021-06-09T20:00:22.500222
| 2021-05-04T15:36:39
| 2021-05-04T15:36:39
| 179,711,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
n = int(input())
s = input()
a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ans = ''
for i in range(len(s)):
idx = n + a.find(s[i])
ans += a[idx % 26]
print(ans)
|
[
"mgmk2.dev@gmail.com"
] |
mgmk2.dev@gmail.com
|
e34f54532e8403ba6405c2e1be24e8b4eb190ba3
|
bcc3359817a74c97b8804d415b5b578d03ca4fc9
|
/test/assets/classes/message.py
|
ae9a9cd7c2fcfd518fe6587ade21a5477f78edb7
|
[] |
no_license
|
pydget/pyspare
|
21c7677e66987ef4625dc7a71f041beb025b0350
|
46ef0e3c4eca1ceb52a86cae3d790483d25b2906
|
refs/heads/master
| 2023-02-19T19:18:13.743639
| 2021-01-16T01:27:20
| 2021-01-16T01:27:20
| 288,975,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from dataclasses import dataclass
@dataclass
class Message:
__slots__ = 'origin', 'target', 'body'
def __init__(self, origin, target, body):
self.origin = origin
self.target = target
self.body = body
|
[
"hoyeungw@outlook.com"
] |
hoyeungw@outlook.com
|
ad03b895afc6d180aa2358f68de8fcb600e871dd
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/W3Hptw6ieTtrWNw4H_17.py
|
8740c16f02a86224026dad019c28269cb2d8f877
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,753
|
py
|
"""
The basic **Polybius Square** is a 5x5 square grid with the letters A-Z
written into the grid. "I" and "J" typically share a slot (as there are 26
letters and only 25 slots).
| 1| 2| 3| 4| 5
---|---|---|---|---|---
**1**| A| B| C| D| E
**2**| F| G| H| I/J| K
**3**| L| M| N| O| P
**4**| Q| R| S| T| U
**5**| V| W| X| Y| Z
The **Bifid** cipher uses the Polybius square but adds a layer of complexity.
Start with a secret message. Remove spaces and punctuation.
plaintext = "ikilledmufasa"
Encipher the message using the basic Polybius cipher (see my [previous
challenge](https://edabit.com/challenge/2C3gtb4treAFyWJMg) — right click and
select "open in new tab"), but write the numbers in two rows under the
message, like so:
i| k| i| l| l| e| d| m| u| f| a| s| a
---|---|---|---|---|---|---|---|---|---|---|---|---
2| 2| 2| 3| 3| 1| 1| 3| 4| 2| 1| 4| 1
4| 5| 4| 1| 1| 5| 4| 2| 5| 1| 1| 3| 1
Read off the numbers horizontally, in pairs:
22 23 31 13 42 14 14 54 11 54 25 11 31
Generate the ciphertext by converting these new pairs of numbers into new
letters using the Polybius square.
ciphertext = "ghlcrddyaykal"
Create a function that takes a plaintext or ciphertext, and returns the
corresponding ciphertext or plaintext.
### Examples
bifid("I killed Mufasa!") ➞ "ghlcrddyaykal"
bifid("ghlcrddyaykal") ➞ "ikilledmufasa"
bifid("hi") ➞ "go"
### Notes
N/A
"""
def bifid(text):
text = text.upper()
tabel = []
nr = 0
plaintext = ''
ok = 2
if ' ' in text:
ok = 1
else:
ok = 0
for i in range(len(text)):
if (text[i] < 'a' or text[i] > 'z') and (text[i] < 'A' or text[i] > 'Z'):
plaintext = plaintext
else:
plaintext += text[i]
for i in range(5):
a = []
for j in range(5):
if nr == 9:
nr += 1
a.append(chr(65 + nr))
nr += 1
else:
a.append(chr(65 + nr))
nr += 1
tabel.append(a)
linie1 = ''
linie2 = ''
if ok == 1:
for i in range(len(plaintext)):
for j in range(len(tabel)):
if tabel[j][0] > plaintext[i]:
linie1 = linie1 + str(j)
linie2 = linie2 + str(tabel[j - 1] .index(plaintext[i]) + 1)
break
if j == len(tabel) - 1 and ord(plaintext[i]) >= ord(tabel[j][0]):
linie1 = linie1 + str(j + 1)
linie2 = linie2 + str(tabel[j].index(plaintext[i]) + 1)
linief = linie1 + linie2
message = ''
for i in range(0, len(linief), 2):
message += tabel[int(linief[i]) - 1][int(linief[i + 1]) - 1]
message = message.lower()
return message
else:
linie1 = ''
linie2 = ''
for i in range(len(plaintext)):
for j in range(len(tabel)):
if tabel[j][0] > plaintext[i]:
linie1 = linie1 + str(j)
linie2 = linie2 + str(tabel[j - 1].index(plaintext[i]) + 1)
break
if j == len(tabel) - 1 and ord(plaintext[i]) >= ord(tabel[j][0]):
linie1 = linie1 + str(j + 1)
linie2 = linie2 + str(tabel[j].index(plaintext[i]) + 1)
linief = ''
for i in range(len(linie1)):
linief += linie1[i] + linie2[i]
linie1 = linief[0:len(linie1)]
linie2 = linief[len(linie2):]
message = ''
for i in range(len(linie1)):
message += tabel[int(linie1[i]) - 1][int(linie2[i]) - 1]
message = message.lower()
return message
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a44762f7f83ec08c0a592bc170b50259d8bd49e2
|
292417a70e83d33fc4cedaed34d1b8e859ffe1a7
|
/market/urls.py
|
113708503a9a96ee4ed392f28d9d1321ee1c94c8
|
[] |
no_license
|
cooluks2/Niche-market-mine
|
c739144b61dfecd641f19bfa20439388d9dd562d
|
2eacedd83ae3d1690ac56f9ae4089a44737c4771
|
refs/heads/master
| 2022-12-10T09:45:13.759650
| 2020-08-31T02:52:07
| 2020-08-31T02:52:07
| 289,786,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
from django.urls import path
from market.views import StoreDV, StoreLV, StoreCreateView, StoreUpdateView, StoreDeleteView, store_download
from market.views import MarketDV, MarketCreateView, MarketUpdateView, MarketDeleteView, market_download
from mysite.views import HomeView
from market.models import Market, Location
app_name = 'market'
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('<int:pk>/', MarketDV.as_view(), name='market'),
path('market_add/', MarketCreateView.as_view(), name="market_add"),
path('<int:pk>/market_update/', MarketUpdateView.as_view(), name="market_update"),
path('<int:pk>/market_delete/', MarketDeleteView.as_view(), name="market_delete"),
path('market_download/<int:id>', market_download, name="market_download"),
path('store/<int:pk>/', StoreLV.as_view(), name='store'),
path('store/<int:fk>/<int:pk>/', StoreDV.as_view(), name='store_detail'),
path('<int:fk>/store_add/', StoreCreateView.as_view(), name="store_add"),
path('<int:pk>/store_update/', StoreUpdateView.as_view(), name="store_update"),
path('<int:pk>/store_delete/', StoreDeleteView.as_view(), name="store_delete"),
path('store_download/<int:id>', store_download, name="store_download"),
]
|
[
"cooluks2@gmail.com"
] |
cooluks2@gmail.com
|
6cb65b44504b20720b6967c08c0fb580dd2850cb
|
cd6a835b14596620d46236ce2ec8003b42dcd393
|
/machina/apps/forum/urls.py
|
588a345c6d1914ddacaafe935dbb9bae7b6ff0a3
|
[] |
no_license
|
VanHai88/covert-site
|
bfec3ed75a75f4a29614906d982fd565ac1e011b
|
2385ebaf1ed6c0eb42027f6665f545ce60828c12
|
refs/heads/master
| 2023-06-08T19:43:18.339787
| 2021-06-22T09:28:00
| 2021-06-22T09:28:00
| 379,212,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
"""
Forum URLs
==========
This module defines URL patterns associated with the django-machina's ``forum`` application.
"""
from django.urls import path
from machina.core.loading import get_class
from machina.core.urls import URLPatternsFactory
class ForumURLPatternsFactory(URLPatternsFactory):
""" Allows to generate the URL patterns of the ``forum`` application. """
app_namespace = 'forum'
index_view = get_class('forum.views', 'IndexView')
forum_view = get_class('forum.views', 'ForumView')
def get_urlpatterns(self):
""" Returns the URL patterns managed by the considered factory / application. """
return [
path('', self.index_view.as_view(), name='index'),
path(
'forum/<str:slug>/<str:uuid>/',
self.forum_view.as_view(),
name='forum',
),
]
urlpatterns_factory = ForumURLPatternsFactory()
|
[
"hai.nguyen@emwealthtech.com"
] |
hai.nguyen@emwealthtech.com
|
3370e689410d396a827a715f14aedb1803000b7e
|
f048f66977ebcfd3973f5cb41911e5de8b1bf7f5
|
/pullenti/ner/NumberSpellingType.py
|
72db57f98484782ba6c07c2f861ed30185173d7c
|
[] |
no_license
|
AAA1911/PullentiPython
|
e01223d2d8656a8fbcc0873446a12d7e5c913f4a
|
f25b228c8eef9b70acb1285f405c976542342319
|
refs/heads/master
| 2020-12-22T12:56:21.701229
| 2019-12-11T08:34:43
| 2019-12-11T08:34:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
# Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
from enum import IntEnum
class NumberSpellingType(IntEnum):
""" Возможные типы написаний """
DIGIT = 0
""" Цифрами """
ROMAN = 1
""" Римскими цифрами """
WORDS = 2
""" Прописью (словами) """
AGE = 3
""" Возраст (летие) """
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
|
[
"alex@alexkuk.ru"
] |
alex@alexkuk.ru
|
3cff9791f0f876f817bef6ab82efff8e16924526
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4030/840004030.py
|
ea51f50d52dde6357e28745d6a0cd44e12f4297f
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 7,084
|
py
|
from bots.botsconfig import *
from records004030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'RQ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BQT', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'TAX', MIN: 0, MAX: 3},
{ID: 'FOB', MIN: 0, MAX: 99999},
{ID: 'CTP', MIN: 0, MAX: 99999},
{ID: 'PAM', MIN: 0, MAX: 10},
{ID: 'CSH', MIN: 0, MAX: 25},
{ID: 'SAC', MIN: 0, MAX: 25},
{ID: 'ITD', MIN: 0, MAX: 5},
{ID: 'DIS', MIN: 0, MAX: 20},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'LIN', MIN: 0, MAX: 5},
{ID: 'PID', MIN: 0, MAX: 200},
{ID: 'MEA', MIN: 0, MAX: 40},
{ID: 'PWK', MIN: 0, MAX: 25},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'MAN', MIN: 0, MAX: 10},
{ID: 'RRA', MIN: 0, MAX: 100},
{ID: 'CTB', MIN: 0, MAX: 99999},
{ID: 'LDT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
{ID: 'N9', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'EFI', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 10000, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'SI', MIN: 0, MAX: 99999},
{ID: 'FOB', MIN: 0, MAX: 1},
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'RRA', MIN: 0, MAX: 25},
]},
{ID: 'SPI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 20, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'G61', MIN: 0, MAX: 1},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
{ID: 'CB1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'LDT', MIN: 0, MAX: 1},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
]},
{ID: 'PCT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 99999},
]},
{ID: 'ADV', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
{ID: 'PO1', MIN: 1, MAX: 100000, LEVEL: [
{ID: 'LIN', MIN: 0, MAX: 99999},
{ID: 'G53', MIN: 0, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'CN1', MIN: 0, MAX: 1},
{ID: 'PO3', MIN: 0, MAX: 25},
{ID: 'CTP', MIN: 0, MAX: 99999},
{ID: 'PAM', MIN: 0, MAX: 10},
{ID: 'CTB', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 40},
{ID: 'PID', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 10},
]},
{ID: 'PWK', MIN: 0, MAX: 25},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'PO4', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'SAC', MIN: 0, MAX: 25},
{ID: 'IT8', MIN: 0, MAX: 25},
{ID: 'CSH', MIN: 0, MAX: 99999},
{ID: 'ITD', MIN: 0, MAX: 2},
{ID: 'DIS', MIN: 0, MAX: 20},
{ID: 'TAX', MIN: 0, MAX: 3},
{ID: 'FOB', MIN: 0, MAX: 99999},
{ID: 'SDQ', MIN: 0, MAX: 50},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'FST', MIN: 0, MAX: 99999},
{ID: 'TD1', MIN: 0, MAX: 1},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'MAN', MIN: 0, MAX: 10},
{ID: 'RRA', MIN: 0, MAX: 25},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'SPI', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'SI', MIN: 0, MAX: 99999},
]},
{ID: 'SCH', MIN: 0, MAX: 104, LEVEL: [
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'REF', MIN: 0, MAX: 99999},
]},
{ID: 'LDT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 1},
]},
]},
{ID: 'SLN', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'PID', MIN: 0, MAX: 1000},
{ID: 'ADV', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'SI', MIN: 0, MAX: 99999},
]},
]},
{ID: 'N9', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'EFI', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'SI', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FOB', MIN: 0, MAX: 1},
{ID: 'SCH', MIN: 0, MAX: 200},
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'RRA', MIN: 0, MAX: 25},
{ID: 'CTP', MIN: 0, MAX: 1},
{ID: 'PAM', MIN: 0, MAX: 10},
{ID: 'LDT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'MAN', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
]},
{ID: 'PCT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 99999},
]},
]},
{ID: 'CTT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
fc1266738f799c65b9d4f71e6846f6b72d00fc74
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/recoveryservices/get_replication_migration_item.py
|
d1e403b2a6e74c291c8bcdb23200e48a30cd7dcb
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 4,452
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetReplicationMigrationItemResult',
'AwaitableGetReplicationMigrationItemResult',
'get_replication_migration_item',
]
@pulumi.output_type
class GetReplicationMigrationItemResult:
"""
Migration item.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MigrationItemPropertiesResponse':
"""
The migration item properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource Type
"""
return pulumi.get(self, "type")
class AwaitableGetReplicationMigrationItemResult(GetReplicationMigrationItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationMigrationItemResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
type=self.type)
def get_replication_migration_item(fabric_name: Optional[str] = None,
migration_item_name: Optional[str] = None,
protection_container_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationMigrationItemResult:
"""
Migration item.
API Version: 2018-07-10.
:param str fabric_name: Fabric unique name.
:param str migration_item_name: Migration item name.
:param str protection_container_name: Protection container name.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['migrationItemName'] = migration_item_name
__args__['protectionContainerName'] = protection_container_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices:getReplicationMigrationItem', __args__, opts=opts, typ=GetReplicationMigrationItemResult).value
return AwaitableGetReplicationMigrationItemResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
d793f87b0bef4eaaef51634ad0c4592d4a02d5ee
|
dd573ed68682fd07da08143dd09f6d2324f51345
|
/daily_study/ProblemSolving/5430_AC.py
|
ee3166173aa8bebbfdd26b513e4d008af4aec83f
|
[] |
no_license
|
chelseashin/My-Algorithm
|
0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61
|
db692e158ebed2d607855c8e554fd291c18acb42
|
refs/heads/master
| 2021-08-06T12:05:23.155679
| 2021-07-04T05:07:43
| 2021-07-04T05:07:43
| 204,362,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
from sys import stdin
input = stdin.readline
def solve(numbers):
print("초기상태", numbers, len(numbers))
rcnt, dcnt = 0, 0
for cmd in p:
if cmd == "R":
rcnt += 1
elif cmd == "D":
try:
if rcnt % 2 == 0:
dcnt += 1 # 나중에 빼줄 때 사용
else:
numbers.pop() # 지금 바로 빼주기
except:
return "error"
# print("rcnt", rcnt, "dcnt", dcnt, numbers)
if len(numbers) < dcnt:
return "error"
if rcnt%2:
numbers[dcnt:].reverse()
else:
numbers = numbers[dcnt:]
result = "["
for i in range(len(numbers)):
if i < len(numbers)-1:
result += numbers[i] + ","
else:
result += numbers[i] + "]"
# print("최종", numbers, result)
return result
T = int(input())
for _ in range(T):
p = input().strip()
n = int(input())
numbers = input().strip().split(',')
numbers[0] = numbers[0][1:]
numbers[-1] = numbers[-1][:-1]
print(solve(numbers))
|
[
"chaewonshin95@gmail.com"
] |
chaewonshin95@gmail.com
|
0db7f68eab74751e0b8f455e123cefcc363b17d2
|
470eb6b6af669ae037d1aaaf28c7169d906ca25e
|
/src/split_read_matrices_by_plate.py
|
8292d94d002a13ad6308b38113fa0d8197f0494f
|
[] |
no_license
|
wxgao33/CSI-Microbes-analysis
|
5bddd6cc4ffb7ec2dca833231a4e966b92f348a1
|
273b41a20c4c13af0efe2a888821b0cfc5e0c189
|
refs/heads/master
| 2023-04-12T00:59:02.423797
| 2021-05-18T15:03:11
| 2021-05-18T15:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
import pandas as pd
read_df = pd.read_csv(snakemake.input[0], sep="\t", index_col=0)
metadata_df = pd.read_csv(snakemake.input[1], sep="\t")
metadata_df = metadata_df.loc[metadata_df["plate"].astype("str") == snakemake.wildcards["plate"]]
read_df = read_df[metadata_df["cell"]]
read_df.to_csv(snakemake.output[0], sep="\t")
metadata_df.to_csv(snakemake.output[1], sep="\t", index=False)
|
[
"wir963@gmail.com"
] |
wir963@gmail.com
|
0e82ee79e918a29ba71b84fda1e05d64b7d61662
|
88509a8ce62a22acc0639c683900d5d0cb8d69e7
|
/Day22/orm/app/views.py
|
034ffce2d9c3976faf3424c9b86052e00b42b8fe
|
[] |
no_license
|
pytutorial/py2104
|
8b0238ab6f6d2f5395aee5fbe1f4aff03b819cd3
|
48b36d6b1f40730ef2747c310e70fb6997eda388
|
refs/heads/main
| 2023-09-03T16:55:02.285158
| 2021-10-20T05:24:31
| 2021-10-20T05:24:31
| 391,613,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
from django.shortcuts import HttpResponse
from .models import *
import json
# Create your views here.
def get_product_by_code(request, code):
product = Product.objects.get(code=code)
data = {
'id': product.id,
'name': product.name,
'code': product.code,
'description': product.description,
'price': product.price
}
return HttpResponse(json.dumps(data))
def search_product(request):
input_data = request.GET
keyword = input_data.get('keyword', '')
product_list = Product.objects.filter(
name__icontains=keyword)
result = [product.name for product in product_list]
return HttpResponse(','.join(result))
def get_customer_by_phone(request, phone):
customer = Customer.objects.get(phone=phone)
return HttpResponse(customer.name)
def search_customer(request):
input_data = request.GET
keyword = input_data.get('keyword', '')
print('keyword=', keyword)
customer_list = Customer.objects.filter(name__icontains=keyword)
print('customer_list=', customer_list)
result = ','.join([customer.name for customer in customer_list])
return HttpResponse(result)
|
[
"duongthanhtungvn01@gmail.com"
] |
duongthanhtungvn01@gmail.com
|
9b2e42ad3619a8aa8d9e99c6a2b3c8045609e66e
|
475d1b83b77e2730b53722f0d8d11b070f97018a
|
/travelapp/migrations/backup/0013_auto_20210221_1309.py
|
6de634dbf85cd70b3b448828cfa895fc3a0f6706
|
[
"MIT"
] |
permissive
|
Gwellir/my-region
|
b651284ee4d4ec7ec892bb78a7ce3444c833d035
|
baacb7f54a19c55854fd068d6e38b3048a03d13d
|
refs/heads/main
| 2023-04-20T17:31:33.040419
| 2021-05-17T13:35:38
| 2021-05-17T13:35:38
| 336,533,029
| 0
| 1
|
MIT
| 2021-05-17T13:35:39
| 2021-02-06T12:31:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
# Generated by Django 3.1.6 on 2021-02-21 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travelapp', '0012_trip_subbed'),
]
operations = [
migrations.RemoveField(
model_name='route',
name='base_price_currency',
),
migrations.RemoveField(
model_name='trip',
name='price_currency',
),
migrations.AlterField(
model_name='route',
name='base_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=7, verbose_name='Ориентировочная стоимость прохождения маршрута'),
),
migrations.AlterField(
model_name='trip',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Стоимость прохождения маршрута'),
),
migrations.CreateModel(
name='TripOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Наименование опции')),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=5, verbose_name='Стоимость опции')),
('trip', models.ManyToManyField(related_name='options', to='travelapp.Trip')),
],
),
]
|
[
"gwellir@gmail.com"
] |
gwellir@gmail.com
|
0e54f592add357a09ba8655d612cbf44e75aacd4
|
e694891ff8c9d06df7b7b5def7ba71c1dba03aa8
|
/redis_queue/db.py
|
730396f0069a2660ad5e33e14ba3afafc373801f
|
[] |
no_license
|
wangyu190810/python-skill
|
78f9abb39ebfa01b92ffb2ec96c7ef57c490d68d
|
719d082d47a5a82ce4a15c57dd481932a9d8f1ba
|
refs/heads/master
| 2020-04-05T17:43:48.005145
| 2019-02-01T01:45:49
| 2019-02-01T01:45:49
| 41,524,479
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# -*-coding:utf-8-*-
# email:190810401@qq.com
__author__ = 'wangyu'
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine,text
from config import Config
def connection(database):
engine = create_engine(database)
Session = sessionmaker(engine)
session = Session()
return session
conn = connection(Config.db)
def insert_data(data):
sql =text("insert into queue_message (url,status_code) "
"VALUES (:url,:status_code)")
sql = sql.bindparams(url=data.get("url"),
status_code=data.get("status_code"))
conn.execute(sql)
conn.commit()
|
[
"190810401@qq.com"
] |
190810401@qq.com
|
34bf0ddf4c836f00f7809ad719bf5652f662b7e8
|
373035950bdc8956cc0b74675aea2d1857263129
|
/spar_python/report_generation/ta1/ta1_section_overview_p2.py
|
8c710d1d99d7cf13febced25219e657a0bc71447
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
limkokholefork/SPARTA
|
5d122cd2e920775d61a5404688aabbafa164f22e
|
6eeb28b2dd147088b6e851876b36eeba3e700f16
|
refs/heads/master
| 2021-11-11T21:09:38.366985
| 2017-06-02T16:21:48
| 2017-06-02T16:21:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,053
|
py
|
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: Section class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Sep 2013 SY Original version
# *****************************************************************
# general imports:
import logging
# SPAR imports:
import spar_python.report_generation.common.section as section
import spar_python.report_generation.ta1.ta1_section_overview_common as t1soc
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.common.regression as regression
import spar_python.report_generation.common.graphing as graphing
import spar_python.report_generation.common.latex_classes as latex_classes
# LOGGER:
LOGGER = logging.getLogger(__name__)
class Ta1OverviewP2Section(t1soc.Ta1OverviewCommonSection):
"""The equality overview section of the TA1 report."""
def __init__(self, jinja_template, report_generator):
"""Initializes the section with a jinja template and a report generator.
"""
cat = t1s.CATEGORIES.P2
super(Ta1OverviewP2Section, self).__init__(
jinja_template, report_generator, cat)
def _get_parameters(self, selection_cols):
"""Returns parameters for the 3d graph."""
parameters = {}
parameters["z_label"] = (
self._config.var_rangesize + " = range size")
# find the data:
this_constraint_list = (
self._config.get_constraint_list() +
self._inp.get_constraint_list() + [
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS, selection_cols)])
these_atomic_fields_and_functions = [
(t1s.DBA_RANGE,
t1s.Ta1ResultsSchema().get_complex_function(t1s.DBA_TABLENAME,
t1s.DBA_RANGE))]
parameters["values"] = self._config.results_db.get_query_values(
[(t1s.DBP_TABLENAME, t1s.DBP_NUMNEWRETURNEDRECORDS),
(t1s.DBP_TABLENAME, t1s.DBP_QUERYLATENCY)],
constraint_list=this_constraint_list,
atomic_fields_and_functions=these_atomic_fields_and_functions)
parameters["ftr"] = self._config.ql_p2_ftr
return parameters
def _populate_output(self):
"""Populates the output object which is passed to the Jinja tempalte
in get_string."""
super(Ta1OverviewP2Section, self)._populate_output()
this_constraint_list = (
self._config.get_constraint_list() +
self._inp.get_constraint_list())
categories = self._config.results_db.get_unique_query_values(
simple_fields=[(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS)],
constraint_list=this_constraint_list)
for selection_cols in categories:
self._store_3d_latency_graph(selection_cols)
|
[
"mitchelljd@ll.mit.edu"
] |
mitchelljd@ll.mit.edu
|
d1637ba38880e1918ef3ef2ff63a4a45df0985d1
|
73e277935ef28fd05935c93a3f155c9cc6dc6de7
|
/ctf/crypto/rsa/pq_common_pollard_rho.py
|
b3132f1b7d7879f7cddc3228571c37da556ae317
|
[] |
no_license
|
ohmygodlin/snippet
|
5ffe6b8fec99abd67dd5d7f819520e28112eae4b
|
21d02015492fb441b2ad93b4a455dc4a145f9913
|
refs/heads/master
| 2023-01-08T14:59:38.618791
| 2022-12-28T11:23:23
| 2022-12-28T11:23:23
| 190,989,347
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
#Easy_Rsa, yangcheng-2021, https://lazzzaro.github.io/2021/09/12/match-2021%E7%BE%8A%E5%9F%8E%E6%9D%AF%E7%BD%91%E7%BB%9C%E5%AE%89%E5%85%A8%E5%A4%A7%E8%B5%9B/, https://xz.aliyun.com/t/6703
from Crypto.Util.number import *
import gmpy2
n = 84236796025318186855187782611491334781897277899439717384242559751095347166978304126358295609924321812851255222430530001043539925782811895605398187299748256080526691975084042025794113521587064616352833904856626744098904922117855866813505228134381046907659080078950018430266048447119221001098505107823645953039
e = 58337
c = 13646200911032594651110040891135783560995665642049282201695300382255436792102048169200570930229947213493204600006876822744757042959653203573780257603577712302687497959686258542388622714078571068849217323703865310256200818493894194213812410547780002879351619924848073893321472704218227047519748394961963394668
def f(x):
return (pow(x, n - 1, n) + 3) % n #(x*x+1)%n
def rho():
i = 1
while True:
a = getRandomRange(2, n)
b = f(a)
j = 1
while a != b:
p = GCD(a - b, n)
print('{} in {} circle'.format(j, i))
if p > 1:
return (p, n // p)
a = f(a)
b = f(f(b))
j += 1
i += 1
p, q = rho()
d = gmpy2.invert(e, (p-1)*(q-1))
m = pow(c, d, n)
print(long_to_bytes(m))
#b'SangFor{0a8c2220-4c1b-32c8-e8c1-adf92ec7678b}'
|
[
"laitaizong@gmail.com"
] |
laitaizong@gmail.com
|
8350f11980db9cb44191f5846907f76bee29c0a3
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/vse-naloge-brez-testov/DN13-M-065.py
|
d4f9aec2e9e2b28eadb12b6390cf9ff7b76a6e9f
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
from math import fabs
class Minobot:
def __init__(self):
self.x = 0
self.y = 0
self.direction = 0
self.x_direction_coefficient = [1, 0, -1, 0]
self.y_direction_coefficient = [0, -1, 0, 1]
self.states = []
def get_current_state(self):
return {'x': self.x, 'y': self.y, 'direction': self.direction}
def save_current_state(self):
self.states.append(self.get_current_state())
def change_direction(self, direction):
self.save_current_state()
self.direction = (self.direction + direction) % 4
def levo(self):
self.change_direction(-1)
def desno(self):
self.change_direction(1)
def naprej(self, d):
self.save_current_state()
if self.x_direction_coefficient[self.direction]:
self.x += d * self.x_direction_coefficient[self.direction]
else:
self.y += d * self.y_direction_coefficient[self.direction]
def razveljavi(self):
if self.states:
previous_state = self.states.pop()
self.x = previous_state['x']
self.y = previous_state['y']
self.direction = previous_state['direction']
def razdalja(self):
return abs(self.x) + abs(self.y)
def koordinate(self):
return self.x, self.y
|
[
"benjamin.fele@gmail.com"
] |
benjamin.fele@gmail.com
|
63f87d61e8c964d81e856f1e6f01cd937940a20b
|
6b8bf10a57e1a85d2281579da9511310e39b9125
|
/Exercise5/list_module.py
|
653fd5fdce78508a87f475cb1e928e78a0de0a2d
|
[] |
no_license
|
Hadirback/python
|
9c0c5b622b18da50379d4c17df8ba68b67d452c9
|
88e03c34edb1c2f60a1624ee04b5bd975967e8ad
|
refs/heads/master
| 2020-07-20T12:47:48.224472
| 2019-10-11T20:39:12
| 2019-10-11T20:39:12
| 206,643,640
| 0
| 1
| null | 2019-09-05T19:48:10
| 2019-09-05T19:38:58
| null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# Lesson 5 Exercise 2
import random
def get_random_elem(list):
if not list:
return None
return random.choice(list)
def fill_list():
my_list = []
while True:
elem = input('Введите элемент списка или Enter чтобы закончить ввод: ')
if not elem:
return my_list
else:
my_list.append(elem)
if __name__ == '__main__':
main_list = fill_list()
print(f'Мой список элементов: {main_list}')
print(f'Мой рандомный элемент из списка - {get_random_elem(main_list)}')
|
[
"mail.evgeny.filippov@gmail.com"
] |
mail.evgeny.filippov@gmail.com
|
705172b35e4e926f7aaafbb9431f13fc097b88a4
|
54a26bf56aebd604d4dece733f08d7d30cd27f89
|
/zdemo/auser/urls.py
|
111ae8dee1420e3cac23d71f7714792b65cc4091
|
[
"MIT"
] |
permissive
|
zzZaida/django_27
|
b78f5ae8bccfa11074221ba32241878d703aa535
|
bbbba8be9547fb815c68e94fadb7e8b6eebf75c9
|
refs/heads/master
| 2020-07-03T19:47:25.037195
| 2019-08-13T12:11:29
| 2019-08-13T12:11:29
| 202,030,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
"""zdemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^user/', views.index,name='index'),
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
347021acc8f528e862d6401bb21dfa7d3134cf58
|
8d73ebf53f3d0aa08c3a50f18f47ef7d48e6febf
|
/CGPA_Calculator/icon.py
|
7c01b70363b3922098a63c8e25bc682ad829f7c7
|
[
"MIT"
] |
permissive
|
deepdalsania/calculator
|
1da25f91feed8723a1faf43a2dffd8a955d7a359
|
1460fc7f91ef9e379bdde240ddbcb0183d7ec092
|
refs/heads/master
| 2022-12-20T16:42:36.522300
| 2020-10-06T05:03:51
| 2020-10-06T05:03:51
| 300,562,691
| 0
| 5
|
MIT
| 2020-10-06T05:03:52
| 2020-10-02T09:18:04
|
Python
|
UTF-8
|
Python
| false
| false
| 864
|
py
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWidgets import QMainWindow, QLabel, QLineEdit, QPushButton, QApplication
def arrowIcon(self):
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(650, 240, 50, 40))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(280, 345, 30, 30))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(280, 395, 30, 30))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
|
[
"deeppatel.dd@gmail.com"
] |
deeppatel.dd@gmail.com
|
6f3281175ab81b728476fb5171d77260cd8d394d
|
73f5461ea52354ea8caa6e08a3989f833fc9d5d0
|
/src/python/fsqio/pants/buildgen/jvm/map_third_party_jar_symbols.py
|
c581fd1cf759f63584ab20647a192c01cd433beb
|
[
"Apache-2.0"
] |
permissive
|
OpenGeoscience/fsqio
|
52b674b3e2d1742916fcec83bbb831ddbd58d1f2
|
aaee25552b602712e8ca3d8b02e0d28e4262e53e
|
refs/heads/master
| 2021-01-15T20:23:18.180635
| 2017-06-05T20:25:18
| 2017-06-05T20:25:18
| 66,481,281
| 3
| 0
| null | 2017-06-05T20:25:18
| 2016-08-24T16:36:46
|
Scala
|
UTF-8
|
Python
| false
| false
| 4,428
|
py
|
# coding=utf-8
# Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
from __future__ import (
absolute_import,
division,
generators,
nested_scopes,
print_function,
unicode_literals,
with_statement,
)
from contextlib import closing
from itertools import chain
import json
import os
import re
from zipfile import ZipFile
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir
class MapThirdPartyJarSymbols(Task):
@classmethod
def product_types(cls):
return [
'third_party_jar_symbols',
]
@classmethod
def prepare(cls, options, round_manager):
super(MapThirdPartyJarSymbols, cls).prepare(options, round_manager)
# NOTE(mateo): This is a deprecated concept upstream - everything is in the classpath now. So it will take some
# fiddling to get the jar symbols for anyone not using pom-resolve.
round_manager.require_data('compile_classpath')
round_manager.require_data('java')
round_manager.require_data('scala')
CLASSFILE_RE = re.compile(r'(?P<path_parts>(?:\w+/)+)'
r'(?P<file_part>.*?)'
r'\.class')
CLASS_NAME_RE = re.compile(r'[a-zA-Z]\w*')
def fully_qualified_classes_from_jar(self, jar_abspath):
with closing(ZipFile(jar_abspath)) as dep_zip:
for qualified_file_name in dep_zip.namelist():
match = self.CLASSFILE_RE.match(qualified_file_name)
if match is not None:
file_part = match.groupdict()['file_part']
path_parts = match.groupdict()['path_parts']
path_parts = filter(None, path_parts.split('/'))
package = '.'.join(path_parts)
non_anon_file_part = file_part.split('$$')[0]
nested_classes = non_anon_file_part.split('$')
for i in range(len(nested_classes)):
if not self.CLASS_NAME_RE.match(nested_classes[i]):
break
nested_class_name = '.'.join(nested_classes[:i + 1])
fully_qualified_class = '.'.join([package, nested_class_name])
yield fully_qualified_class
def execute(self):
products = self.context.products
targets = self.context.targets(lambda t: isinstance(t, JarLibrary))
with self.invalidated(targets, invalidate_dependents=False) as invalidation_check:
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vts_workdir = os.path.join(self._workdir, global_vts.cache_key.hash)
vts_analysis_file = os.path.join(vts_workdir, 'buildgen_analysis.json')
if invalidation_check.invalid_vts or not os.path.exists(vts_analysis_file):
classpath = self.context.products.get_data('compile_classpath')
jar_entries = classpath.get_for_targets(targets)
all_jars = [jar for _, jar in jar_entries]
calculated_analysis = {}
calculated_analysis['hash'] = global_vts.cache_key.hash
calculated_analysis['jar_to_symbols_exported'] = {}
for jar_path in sorted(all_jars):
if os.path.splitext(jar_path)[1] != '.jar':
continue
fully_qualified_classes = list(set(self.fully_qualified_classes_from_jar(jar_path)))
calculated_analysis['jar_to_symbols_exported'][jar_path] = {
'fully_qualified_classes': fully_qualified_classes,
}
calculated_analysis_json = json.dumps(calculated_analysis)
safe_mkdir(vts_workdir)
with open(vts_analysis_file, 'wb') as f:
f.write(calculated_analysis_json)
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(global_vts, [vts_analysis_file])])
with open(vts_analysis_file, 'rb') as f:
analysis = json.loads(f.read())
third_party_jar_symbols = set(chain.from_iterable(
v['fully_qualified_classes'] for v in analysis['jar_to_symbols_exported'].values()
))
products.safe_create_data('third_party_jar_symbols', lambda: third_party_jar_symbols)
def check_artifact_cache_for(self, invalidation_check):
# Pom-resolve is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
|
[
"mateo@foursquare.com"
] |
mateo@foursquare.com
|
8a900fcc1c9f2cb65f9dd2a6b7c15eef2898558d
|
1b9bd441c500e79042c48570035071dc20bfaf44
|
/sources/Content_Quality/mekhilta.py
|
6ded5ff121376d5bb37ff8e30b43ebf4f016f14d
|
[] |
no_license
|
Sefaria/Sefaria-Data
|
ad2d1d38442fd68943535ebf79e2603be1d15b2b
|
25bf5a05bf52a344aae18075fba7d1d50eb0713a
|
refs/heads/master
| 2023-09-05T00:08:17.502329
| 2023-08-29T08:53:40
| 2023-08-29T08:53:40
| 5,502,765
| 51
| 52
| null | 2023-08-29T11:42:31
| 2012-08-22T00:18:38
| null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
from sources.functions import *
alt_toc = """Massekta dePesah / מסכתא דפסחא
Exodus 12:1–13:16
Massekta deVayehi Beshalach / מסכתא דויהי בשלח
Exodus 13:17-14:31
Massekta deShirah / מסכתא דשירה
Exodus 15:1-15:21
Massekta deVayassa / מסכתא דויסע
Exodus 15:22-17:7
Massekta deAmalek / מסכתא דעמלק
Exodus 17:8- 18:27
Massekta deBahodesh / מסכתא דבחודש
Exodus 19:1-20:26
Massekta deNezikin / מסכתא דנזיקין
Exodus 21:1-22:23
Massekta deKaspa / מסכתא דכספא
Exodus 22:24-23:19
Massekta deShabbeta / מסכתא דשבתא
Exodus 31:12-35:3"""
nodes = []
alt_toc = alt_toc.splitlines()
for r, row in enumerate(alt_toc):
if r % 2 == 0:
node = ArrayMapNode()
en, he = row.strip().split(" / ")
node.add_primary_titles(en, he)
node.depth = 0
node.refs = []
else:
node.wholeRef = row.strip().replace("Exodus", "Mekhilta d'Rabbi Yishmael")
node.validate()
nodes.append(node.serialize())
index = get_index_api("Mekhilta d'Rabbi Yishmael", server="https://germantalmud.cauldron.sefaria.org")
index["alt_structs"] = {"Parasha": {"nodes": nodes}}
#post_index(index, server="https://www.sefaria.org")
links = []
for sec_ref in library.get_index("Mekhilta d'Rabbi Yishmael").all_section_refs():
seg_ref = sec_ref.as_ranged_segment_ref().normal()
exodus_ref = sec_ref.normal().replace("Mekhilta d'Rabbi Yishmael", "Exodus")
print(exodus_ref)
print(seg_ref)
print("***")
links.append({"refs": [exodus_ref, seg_ref], "generated_by": "mekhilta_to_exodus", "auto": True, "type": "Commentary"})
post_link_in_steps(links, server="https://www.sefaria.org", step=100, sleep_amt=10)
|
[
"steve@sefaria.org"
] |
steve@sefaria.org
|
057689d1fa8f8c16acf59a0f0e342efca11d8cde
|
cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6
|
/annotate_communities.py
|
c792c346aaae78ae95b967b3522d7c87354ffd69
|
[] |
no_license
|
juliettapc/CalorieKing
|
9cb9f35ae9b239d2284175b0802cf2c60dc79d1d
|
5f80bffb65fe4644a81ae2ab0b1738861e028331
|
refs/heads/master
| 2022-02-10T07:52:24.133379
| 2022-02-08T01:25:18
| 2022-02-08T01:25:18
| 153,174,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
import networkx as nx
from transform_labels_to_nx import transform_labels_to_nx
import sys, os
from numpy import *
import itertools
def annotate_communities(G, num_points, filename, communitylist, dbdate = '2010'):
'''
Created by Rufaro Mukogo on 2011-03-31.
Copyright (c) 2010 __Northwestern University__. All rights reserved.
This script takes a GML file and the number of points and reads the a dat file that contains the
list of lists for the communties and then annotates the GML file with a community attribute for
each node that belongs to a community, the communities are odered from the largest to the smallest
the identifies is "n_s" where n is the number of the communitiy (zero is the largest) and s is the size of the community
'''
for n in G.nodes(): # because the type of the labels or ids in some gml files is diff, and otherwise it gives me an error
G.node[n]['label']=str(G.node[n]['label'])
G.node[n]['id']=str(G.node[n]['id'])
if dbdate =="2010":
G = transform_labels_to_nx(G)
#open file with the list of communities
f = open(str(communitylist)).readlines()
else:
print "You need to generate a gml file that has only 2009 data"
sys.exit()
#extract list of communities should return a list of list
communities = [x.strip().split(";") for x in f]
# print communities,"\n"
communities = [x.strip().split(",") for x in communities[0]]
#print communities,"\n"
#sort communities
communities = sorted(communities, key=len, reverse=True)
#lisf of all the nodes that are in a community
com_nodes= itertools.chain(*communities)
#convert to integers to avoid key errors
com_nodes =map(int, list(com_nodes))
for n in G.nodes():
if n not in com_nodes:
G.node[n]["community"] = ""
#print n
ii = 0
for co in communities:
s = str(ii)+"_"+str(len(co))
#print "community_size", len(co), "s:",s
for n in co:
#add attribute to the main GML file
n=str(n)
G.node[n]["community"] = s
ii+=1
nx.write_gml(G,str(filename)+".gml")
return G
if __name__ =="__main__":
if len(sys.argv)>1:
communitylist = sys.argv[1]
else:
print "Enter the name of the list of communities"
if len(sys.argv)>2:
filename = sys.argv[2]
else:
print "Enter the name of the name of the .gml file"
num_points = 5
M = nx.read_gml(str(filename)+".gml")
for n in M.nodes():
M.node[n]["community"] = ""
H = annotate_communities(M,num_points, filename, communitylist)
|
[
"julia@chem-eng.northwestern.edu"
] |
julia@chem-eng.northwestern.edu
|
8a47069ad08ff4a25b593f7f933e7207a34c9c81
|
e6b1ad137a9bd3d39ae7c61cb5c7f7956ce095b9
|
/bruteforce/first_and_last.py
|
254541adec5d55d00b4b5ecdb2ee1dce8ea5e268
|
[] |
no_license
|
jadenpadua/Data-Structures-and-Algorithms
|
d9ba8ece779a2d564a7d66fcbacc9fb7fa1f7205
|
838c29112fec4beb9d9cc3f54db00492b4a480b0
|
refs/heads/master
| 2021-07-17T13:10:52.029327
| 2020-07-08T02:00:14
| 2020-07-08T02:00:14
| 186,896,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
Write a function that returns the lexicographically first and lexicographically last rearrangements of a string. Output the results in the following manner:
first_and_last(string) ➞ [first, last]
Examples
first_and_last("marmite") ➞ ["aeimmrt", "trmmiea"]
first_and_last("bench") ➞ ["bcehn", "nhecb"]
first_and_last("scoop") ➞ ["coops", "spooc"]
def first_and_last(s):
s_list = []
output = []
for i in range(len(s)):
s_list.append(s[i])
sorted_list = sorted(s_list)
sorted_list_rev = []
for i in range(len(sorted_list)-1,-1,-1):
sorted_list_rev.append(sorted_list[i])
sorted_string = ''.join(sorted_list)
sorted_rev_string = ''.join(sorted_list_rev)
output.append(sorted_string)
output.append(sorted_rev_string)
return output
|
[
"noreply@github.com"
] |
jadenpadua.noreply@github.com
|
4458795c392ba0ab3f81e2d130be56272b33e8c0
|
ee00ebe5e71c36b05fbff993b19e9723b963313f
|
/280_Wiggle_Sort.py
|
09fa9084f0ab202059ebdd2af873de234323560f
|
[] |
no_license
|
26XINXIN/leetcode
|
f365560d93604a28abf399707b333f3c11f924ec
|
78ed11f34fd03e9a188c9c6cb352e883016d05d9
|
refs/heads/master
| 2021-06-28T16:31:45.103879
| 2020-09-19T20:33:55
| 2020-09-19T20:33:55
| 144,975,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) <= 1:
return
for i in range(1, len(nums)):
if i % 2 == 1: # increasing
if nums[i] < nums[i-1]:
nums[i-1], nums[i] = nums[i], nums[i-1]
else: # decreasing
if nums[i] > nums[i-1]:
nums[i-1], nums[i] = nums[i], nums[i-1]
|
[
"yangxin.nlp@bytedance.com"
] |
yangxin.nlp@bytedance.com
|
7ab34c90f6402e871718fc7299fa5701b912a3e5
|
82236c1cf2fe6ca26f52ce4eeae1745cf3cbc5ca
|
/docs/source/conf.py
|
970611753ff44195353547e41808aed5480865fe
|
[
"Apache-2.0"
] |
permissive
|
CKrawczyk/python-reducers-for-caesar
|
8b607fddd7ce36cd81e1b4e2e7079e1a66526d22
|
9c5d9e072906d3fde2497fa61a66e4c8c0113ec2
|
refs/heads/master
| 2021-06-04T07:35:25.738616
| 2017-08-10T15:56:42
| 2017-08-10T15:56:42
| 91,355,049
| 1
| 2
|
Apache-2.0
| 2019-04-03T20:28:31
| 2017-05-15T15:40:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,261
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# panoptes_aggregation documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 7 13:22:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../panoptes_aggregation'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinxcontrib.autohttp.flask'
]
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'panoptes_aggregation'
copyright = '2017, Coleman Krawczyk'
author = 'Coleman Krawczyk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'panoptes_aggregationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'panoptes_aggregation.tex', 'panoptes\\_aggregation Documentation',
'Coleman Krawczyk', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'panoptes_aggregation', 'panoptes_aggregation Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'panoptes_aggregation', 'panoptes_aggregation Documentation',
author, 'panoptes_aggregation', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
[
"coleman.krawczyk@gmail.com"
] |
coleman.krawczyk@gmail.com
|
de8a9c196a80dde711075fc0f91d2dc1ce5625e9
|
10b22cef27b7cb7f06221954eef6ea678c5289c1
|
/database/database_schemas_ms.py
|
0ad92f442cd946089275a60618ee4b0020b399d7
|
[
"MIT"
] |
permissive
|
mshobair/invitro_cheminformatics
|
0c1d7c4c2cfd5e20ee24fffac6a0332d503957df
|
17201496c73453accd440646a1ee81726119a59c
|
refs/heads/main
| 2023-04-04T19:06:27.098377
| 2021-03-26T17:07:25
| 2021-03-26T17:07:25
| 348,917,957
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
class Schemas:
"""
Class that contains DATABASE schema names.
"""
chemprop_schema = "sbox_rlougee_chemprop"
dsstox_schema = "ro_20191118_dsstox"
qsar_schema = "sbox_mshobair_qsar_snap"
invitrodb_schema = "prod_internal_invitrodb_v3_3"
information_schema = "information_schema"
|
[
"mshobair@v2626umcth038.rtord.epa.gov"
] |
mshobair@v2626umcth038.rtord.epa.gov
|
8c233f047715954abc685b0149bdc1c86d63168e
|
36c00fe2afff4818c937e312ce0c6a79f35e2a77
|
/7-kyu/naughty-or-nice-/python/solution.py
|
7d97dd81f35376746e01998e5608dffd391051cd
|
[] |
no_license
|
p-lots/codewars
|
0a67b6ee4c91180ff78c648421b9d2d64463ddc3
|
535faeee475c6b398124d6f5002b0e111406e8bb
|
refs/heads/master
| 2023-08-23T22:14:33.635011
| 2023-08-23T13:30:37
| 2023-08-23T13:30:37
| 195,320,309
| 0
| 0
| null | 2023-05-09T19:25:50
| 2019-07-05T01:40:15
|
Python
|
UTF-8
|
Python
| false
| false
| 191
|
py
|
def get_nice_names(people):
return [dct['name'] for dct in people if dct['was_nice']]
def get_naughty_names(people):
return [dct['name'] for dct in people if not dct['was_nice']]
|
[
"paul.calotta@gmail.com"
] |
paul.calotta@gmail.com
|
6e39762a6673f11ca94947c8499aa363af2b4dd2
|
c168fe819b446640957e5e310ef89fcfe28662b3
|
/userbenchmark/__init__.py
|
c9ff1fac46844cf4cb62479ffa15096e9436dbf2
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/benchmark
|
7b55e8d714de2ea873e03df43811aab3848485dd
|
df4da9bdff11a2f948d5bd4ac83da7922e6f44f4
|
refs/heads/main
| 2023-08-29T13:06:09.671728
| 2023-08-28T16:51:55
| 2023-08-28T16:51:55
| 92,541,759
| 685
| 220
|
BSD-3-Clause
| 2023-09-14T18:10:18
| 2017-05-26T19:21:12
|
Python
|
UTF-8
|
Python
| false
| false
| 851
|
py
|
from pathlib import Path
from typing import List
CURRENT_DIR = Path(__file__).parent
def list_userbenchmarks() -> List[str]:
ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ]
ub_names = list(map(lambda x: x.name, ub_dirs))
return ub_names
def get_ci_from_ub(ub_name):
import yaml
ci_file = CURRENT_DIR.joinpath(ub_name).joinpath("ci.yaml")
if not ci_file.exists():
return None
with open(ci_file, "r") as ciobj:
cicfg = yaml.safe_load(ciobj)
ret = {}
ret["name"] = ub_name
ret["ci_cfg"] = cicfg
return ret
def get_userbenchmarks_by_platform(platform):
ub_names = list_userbenchmarks()
cfgs = list(map(lambda x: x["name"], filter(lambda x: x and x["ci_cfg"]["platform"] == platform, map(get_ci_from_ub, ub_names))))
return cfgs
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
266f7c43ec194665af03f4823f13ff1664004761
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/0_字符串/87. 扰乱字符串.py
|
29e56a9f818fe0967e242d7e3d9221f6a53b65b7
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
from collections import Counter
from functools import lru_cache
# 1 <= s1.length <= 30
# 87. 扰乱字符串
# !bit packing 可以将复杂度降低为 O(n^4/w)
class Solution:
@lru_cache(None)
def isScramble(self, s1: str, s2: str) -> bool:
if s1 == s2:
return True
if sorted(s1) != sorted(s2): # counter
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
return True
return False
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
87b6cd872faff0465ea42ba50c6be9d681f0137a
|
b24e45267a8d01b7d3584d062ac9441b01fd7b35
|
/Usuario/migrations/0001_initial.py
|
f1a088a9eef7d4b51c898384d51b3a312255a586
|
[] |
no_license
|
slalbertojesus/merixo-rest
|
1707b198f31293ced38930a31ab524c0f9a6696c
|
5c12790fd5bc7ec457baad07260ca26a8641785d
|
refs/heads/master
| 2022-12-10T18:56:36.346159
| 2020-05-02T00:42:39
| 2020-05-02T00:42:39
| 212,175,889
| 0
| 0
| null | 2022-12-08T07:00:07
| 2019-10-01T18:56:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
# Generated by Django 2.2.6 on 2019-11-29 05:50
import Usuario.models
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=60, unique=True)),
('username', models.CharField(max_length=30, unique=True)),
('estado', models.CharField(max_length=30)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('listaUsuarios', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), default=list, null=True, size=None)),
('pic', models.ImageField(upload_to=Usuario.models.upload_location)),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
|
[
"slalbertojesus@gmail.com"
] |
slalbertojesus@gmail.com
|
39385e0a7b92b66933385b77e3533b3a516318ea
|
13213e3e7d6a0866cdf28483adc46d458f8977ac
|
/qsort/qs.py
|
f464a28fe040fbe56cf5762e4a0066e408678f00
|
[] |
no_license
|
j0k/algopractice
|
42654b1158497050911822c46de6791cf8bf251f
|
1be3df5553156a523bfce5328df205e6c67c19f3
|
refs/heads/master
| 2022-06-27T00:10:57.028619
| 2022-06-15T12:34:11
| 2022-06-15T12:34:11
| 100,791,845
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
# 18.06.2017
import random
A = [1,2,4,7,8,9,0,5,3,5,6,8,4,3]
def qsort(a):
l = len(a)
if l <= 1:
return a
pi = int(random.random() * l)
left = []
right = []
p = a[pi]
for (i,item) in enumerate(a):
if i == pi:
continue;
if item <= p:
left.append(item)
else:
right.append(item)
return qsort(left) + [p] + qsort(right)
print qsort(A)
|
[
"darling.kicks@gmail.com"
] |
darling.kicks@gmail.com
|
1747c3d6ebe232b90f1163f18a849a3f71ccebc4
|
e614c145ab902ebed09af2bcef5b36dca78a5787
|
/authors/migrations/0117_auto_20160214_0747.py
|
26a7a40388a4b500cb05fd171b2905ed7e43788d
|
[] |
no_license
|
rtreharne/pvsat-dev
|
1646ca8f51bd466d659b25eb721750de8361ef02
|
faa2b28250e2110f4603ffeff80ad0fedda1abbb
|
refs/heads/master
| 2021-01-17T13:24:12.578341
| 2017-09-19T06:42:51
| 2017-09-19T06:42:51
| 44,095,813
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('authors', '0116_auto_20160214_0743'),
]
operations = [
migrations.AlterField(
model_name='abstract',
name='date',
field=models.DateTimeField(default=datetime.datetime(2016, 2, 14, 7, 47, 55, 128934)),
),
]
|
[
"R.Treharne@liverpool.ac.uk"
] |
R.Treharne@liverpool.ac.uk
|
2da0aa36e7be889a32196c3d06867c36c614e741
|
246fb3d3163411f8d2f23f0c58277e183a9aa04b
|
/StockAdmin2/core/restapi/updater.py
|
5648083f9fc8de8493a0c08f8977c09f967d0f31
|
[] |
no_license
|
zwolf21/StockAdmin2
|
ed5adb10cb94f688ce0ec9c18291f8d0eae79a33
|
430189bd8ea3820c00cf77e7ed741745f1ed74ca
|
refs/heads/master
| 2022-12-12T03:53:07.101298
| 2017-12-26T04:49:27
| 2017-12-26T04:49:27
| 81,782,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
from decimal import Decimal
from listorm import Listorm
from .dgamt_service import DGamtService
update_supervise_fields = {
'edi_code':'update', 'pro_type':'update', 'pay_type':'update',
'price':'create'
}
product_supervise_fields = {
'edi_code': 'update'
}
buyinfo_supervise_field = {
'pro_type': 'update', 'pay_type': 'update',
'price':'create', 'buy_edi_code': 'create'
}
# update: 변경시 레코드 항목 수정만하기 create: 변경사항 발생시 새로 만들기
UPDATE_METHOD = {
'product': {
'update': [
'edi_code', 'unit', 'company', 'unit_amount', 'apply_root', 'op_type'
],
},
'buyinfo': {
'create': ['buy_edi_code', 'price'],
'update': ['pro_type', 'pay_type', 'date']
}
}
def get_newest_record(edi_code, recursive_try=5):
if recursive_try == 0:
return edi_code
if not edi_code:
return
api = DGamtService()
api_lst = api.getDgamtList(mdsCd=edi_code)
if len(api_lst) == 1:
record = api_lst.first
if record.edi_code_after:
return get_newest_record(
record.edi_code_after,
recursive_try=recursive_try-1
)
return record
def get_fieldset_for_update(instance, new_record, update_methods=UPDATE_METHOD):
instance_name = instance.__class__.__name__.lower()
update_context = update_methods.get(instance_name, {})
updates, creates = {}, {}
for method, fields in update_context.items():
for field in fields:
oldVal = str(getattr(instance, field) or '')
newVal = str(getattr(new_record, field) or '')
if not newVal:
continue
if oldVal != newVal:
if method == 'update':
updates[field] = newVal
else:
create[field] = newVal
return creates, updates
def record_isvalid(record):
if record.get('price') not in [0, '0', '', None]:
return True
return False
def smart_update(product, update_methods=UPDATE_METHOD):
new_record = get_newest_record(product.edi_code)
if not new_record:
return
new_edi_code = new_record.get('edi_code')
if new_edi_code != product.edi_code:
product.edi_code = new_edi_code
product.save()
product_creates, product_updates = get_fieldset_for_update(product, new_record)
product.__class__.objects.filter(pk=product.id).update(**product_updates)
buyinfo_set = product.buyinfo_set.filter(buy_edi_code=new_edi_code, active=True)
new_price = Decimal(new_record.price or 0)
if product.buyinfo_set.exists():
market = product.buyinfo_set.last().market
else:
market = None
buyinfo_create_fields = update_methods.get('buyinfo', {}).get('create', [])
buyinfo_update_fields = update_methods.get('buyinfo', {}).get('update', [])
buyinfo_create_kwargs = new_record.select(*(buyinfo_create_fields+buyinfo_update_fields), values=False)
buyinfo_update_kwargs = new_record.select(*buyinfo_update_fields, values=False)
buyinfo_create_kwargs['product'] = product
buyinfo_update_kwargs['product'] = product
if not buyinfo_set.exists():
if not new_price:
print(new_price)
buyinfo_create_kwargs['price'] = 0
buyinfo_set.create(**buyinfo_create_kwargs)
else:
buyinfo_create_kwargs['market'] = market
buyinfo_update_kwargs['market'] = market
if new_price:
buyinfo_set = buyinfo_set.filter(price=new_price)
if not buyinfo_set.exists():
buyinfo_set.create(**buyinfo_create_kwargs)
else:
buyinfo_set.update(**buyinfo_update_kwargs)
else:
buyinfo_update_kwargs.pop('price')
buyinfo_set.update(**buyinfo_update_kwargs)
|
[
"pbr112@naver.com"
] |
pbr112@naver.com
|
389fb95b2509687f5d976c6f9564d0a80ebef0a1
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTrafficMarkingPolicyAttributeRequest.py
|
743e5f15ad08d14e8d8f3b0fa5e14fc7e66e1659
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,712
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTrafficMarkingPolicyAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTrafficMarkingPolicyAttribute')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_AddTrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('AddTrafficMatchRules')
def set_AddTrafficMatchRuless(self, AddTrafficMatchRules): # RepeatList
for depth1 in range(len(AddTrafficMatchRules)):
if AddTrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(AddTrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), AddTrafficMatchRules[depth1].get('DstPortRange')[depth2])
if AddTrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', AddTrafficMatchRules[depth1].get('MatchDscp'))
if AddTrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.Protocol', AddTrafficMatchRules[depth1].get('Protocol'))
if AddTrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', AddTrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if AddTrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(AddTrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), AddTrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if AddTrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', AddTrafficMatchRules[depth1].get('DstCidr'))
if AddTrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', AddTrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if AddTrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', AddTrafficMatchRules[depth1].get('SrcCidr'))
def get_TrafficMarkingPolicyDescription(self): # String
return self.get_query_params().get('TrafficMarkingPolicyDescription')
def set_TrafficMarkingPolicyDescription(self, TrafficMarkingPolicyDescription): # String
self.add_query_param('TrafficMarkingPolicyDescription', TrafficMarkingPolicyDescription)
def get_TrafficMarkingPolicyId(self): # String
return self.get_query_params().get('TrafficMarkingPolicyId')
def set_TrafficMarkingPolicyId(self, TrafficMarkingPolicyId): # String
self.add_query_param('TrafficMarkingPolicyId', TrafficMarkingPolicyId)
def get_TrafficMarkingPolicyName(self): # String
return self.get_query_params().get('TrafficMarkingPolicyName')
def set_TrafficMarkingPolicyName(self, TrafficMarkingPolicyName): # String
self.add_query_param('TrafficMarkingPolicyName', TrafficMarkingPolicyName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DeleteTrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('DeleteTrafficMatchRules')
def set_DeleteTrafficMatchRuless(self, DeleteTrafficMatchRules): # RepeatList
for depth1 in range(len(DeleteTrafficMatchRules)):
if DeleteTrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(DeleteTrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), DeleteTrafficMatchRules[depth1].get('DstPortRange')[depth2])
if DeleteTrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', DeleteTrafficMatchRules[depth1].get('MatchDscp'))
if DeleteTrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.Protocol', DeleteTrafficMatchRules[depth1].get('Protocol'))
if DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if DeleteTrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(DeleteTrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), DeleteTrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if DeleteTrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', DeleteTrafficMatchRules[depth1].get('DstCidr'))
if DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if DeleteTrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', DeleteTrafficMatchRules[depth1].get('SrcCidr'))
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
75219a4f87f14e035cef63c5379eb59541d61e5d
|
f8da830331428a8e1bbeadf23345f79f1750bd98
|
/msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/vendored_sdks/usersfunctions/operations/_user_event_exception_occurrence_operations.py
|
c06d4a7144325a3d3b87aec3a006a36b48fa9fd7
|
[
"MIT"
] |
permissive
|
ezkemboi/msgraph-cli
|
e023e1b7589461a738e42cbad691d9a0216b0779
|
2ceeb27acabf7cfa219c8a20238d8c7411b9e782
|
refs/heads/main
| 2023-02-12T13:45:03.402672
| 2021-01-07T11:33:54
| 2021-01-07T11:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,381
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UserEventExceptionOccurrenceOperations(object):
"""UserEventExceptionOccurrenceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delta(
self,
user_id, # type: str
event_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphEvent"]
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEvent, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphEvent]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEvent"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEvent]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/events/{event-id}/exceptionOccurrences/microsoft.graph.delta()'} # type: ignore
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
d4da4f399743a1bbcccc23dce4f21f4f9e0fbd9d
|
4ac687bc28b9f5cf7f822e9d4c0db8b46fe363b3
|
/30_day_leetcoding_challenge/2020_08/06-Find_All_Duplicates_in_an_Array.py
|
72bd02e1bfc9c119c422a9d3b17b9e73c1be9add
|
[
"MIT"
] |
permissive
|
QuenLo/LeetCode-share
|
b1e75e02e1dfe85be44ddb0ae1f4345353b0b569
|
ce861103949510dc54fd5cb336bd992c40748de2
|
refs/heads/master
| 2021-12-23T11:23:09.111711
| 2021-11-15T18:54:46
| 2021-11-15T18:54:46
| 131,681,273
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
ans = []
for num in nums:
if( nums[abs(num)-1] < 0 ):
ans.append(abs(num))
else:
nums[abs(num)-1] *= -1
return ans
|
[
"noreply@github.com"
] |
QuenLo.noreply@github.com
|
7037f2e38c2e9e53d0e32b2df9d87c9608e83b58
|
0fd5cd82b755f574ef44de61092fc1e982b33a34
|
/news/admin.py
|
e78d90ba5983b5857ca8eaf9f23d212ce440e2e0
|
[] |
no_license
|
York0000/project
|
592a5b67a05feb7efd3bde852d737af4c5048241
|
f3688157e288ad22efdabd9776fea2858f6ccfe6
|
refs/heads/master
| 2023-05-27T07:26:02.998870
| 2021-06-16T12:03:12
| 2021-06-16T12:03:12
| 377,416,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
from django.contrib import admin
from news.models import NewsModel
@admin.register(NewsModel)
class NewsModelAdmin(admin.ModelAdmin):
search_fields = ['title']
list_display = ['title', 'created_at']
list_filter = ['created_at']
|
[
"yorqin_bohodirov20@mail.ru"
] |
yorqin_bohodirov20@mail.ru
|
eaaf9937a3853ee4f5e92ba894c9455bac2f13f6
|
d2c4151eff768af64946ababc2e41c13d8973cd3
|
/ABC133/c.py
|
a99b470c2376c8f63bc104e312e3e27c9cd418aa
|
[] |
no_license
|
Intel-out-side/AtCoder
|
2de19b71981247135432aed2d6d9c2a16c3ab7f0
|
0c419d2df15fff02032432cb1b1323612484e16e
|
refs/heads/master
| 2022-06-23T04:21:12.886072
| 2022-06-13T14:39:07
| 2022-06-13T14:39:07
| 235,240,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
L, R = map(int, input().split())
ans = 2020
if R - L < 2019:
for i in range(L, R+1):
for j in range(i+1, R+1):
ans = min((i*j)%2019, ans)
else:
for i in range(L, L+2019):
for j in range(i+1, L+2019):
ans = min((i*j)%2019, ans)
print(ans)
|
[
"so.eng.eng.1rou@gmail.com"
] |
so.eng.eng.1rou@gmail.com
|
5945ac73322c07df601001ad78d4c9d7fa2bc303
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_03_04_20_49_28_001210.py
|
736fec0c051e1192ca98e7fa7fd600af6d7e2eff
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
import math.
def calcula_gausiana(x,mi,sigma):
parte1 = 1/(sigma*(2*math.pi)**(1/2))
parte2 = math.exp(-0.5*((x-mi)/sigma)**2)
return(parte1*parte2)
|
[
"you@example.com"
] |
you@example.com
|
3fbdf957571e7f078c7dcecad3966c0746a6fc5e
|
4273f6c264fa5a7267557c5e0d338a2cbd27789e
|
/AIE23/20191207_big_data_ai/1_pyspark_dataframe/ml/3_decision_tree_classification_example.py
|
fe32e3c30872236d2fbd76cdba11f209f222b78b
|
[] |
no_license
|
shcqupc/Alg_study
|
874d37954ed8ed2cdb3bd492d59cd071836946f5
|
462ee12c72b7f84c5ae45aaf0f65b812d7c1ada1
|
refs/heads/master
| 2020-07-10T15:26:40.603300
| 2020-03-27T12:53:16
| 2020-03-27T12:53:16
| 204,298,238
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,226
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
# (1) Spark dataframe和pandas的dataframe有哪些细微不同?
# (2) 切换其他数据源数据格式文件?例如csv
# http://cherishlc.iteye.com/blog/2384865
# (3) 尝试换其他模型构建数据分析流水线?
|
[
"253848296@qq.com"
] |
253848296@qq.com
|
b89f856a4efbd1215ba554a3547b2d5f64a60502
|
1e177ebdcb470f738c058606ac0f86a36085f661
|
/Pico/MicroPython/mqtt/mqttPub01_main.py
|
60f96980393a5b6b04d87afbd41113c2b7db4245
|
[] |
no_license
|
robingreig/raspi-git
|
5cbdd295c1048a0571aa2c2f8576438269439f07
|
7373bf94557d7a88c8f343362ba64f9cd19c8ce7
|
refs/heads/master
| 2023-08-31T03:16:17.286700
| 2023-08-26T11:54:23
| 2023-08-26T11:54:23
| 16,873,881
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
import time
import network
from machine import Pin
from umqttsimple import MQTTClient
ssid = 'MakerSpaceTest'
password = 'P@55w0rd'
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.connect(ssid, password)
rp2.country('CA')
led = machine.Pin("LED", machine.Pin.OUT, value=0)
# Wait for connect or fail
max_wait = 10
while max_wait > 0:
if wlan.status() < 0 or wlan.status() >= 3:
break
max_wait -= 1
print('waiting for connection...')
time.sleep(1)
# Handle connection error
if wlan.status() != 3:
raise RuntimeError('network connection failed')
else:
print('connected')
status = wlan.ifconfig()
print( 'ip = ' + status[0] )
print(wlan.ifconfig())
led.toggle()
#mqtt config
mqtt_server = '192.168.204.1'
client_id = 'Pico03'
#user_t = 'pico'
#password_t = 'picopassword'
topic_pub = 'Garden/Pump1'
last_message = 0
message_interval = 5
#MQTT connect
def mqtt_connect():
# client = MQTTClient(client_id, mqtt_server, user=user_t, password=password_t, keepalive=60)
client = MQTTClient(client_id, mqtt_server, keepalive=60)
client.connect()
print('Connected to %s MQTT Broker'%(mqtt_server))
return client
#reconnect & reset
def reconnect():
print('Failed to connected to MQTT Broker. Reconnecting...')
time.sleep(5)
machine.reset()
while True:
counter = 3
try:
client = mqtt_connect()
except OSError as e:
reconnect()
while counter > 0:
try:
client.publish(topic_pub, msg='0')
print('published 0')
time.sleep(5)
client.publish(topic_pub, msg='1')
print('published 1')
time.sleep(5)
except:
reconnect()
pass
print('Printed first set')
try:
client.publish(topic_pub, msg='0')
print('published 0')
time.sleep(5)
client.publish(topic_pub, msg='1')
print('published 1')
time.sleep(5)
except:
reconnect()
pass
print('Printed second set')
print('Counter decremented')
counter -=1
client.disconnect()
|
[
"robin.greig@calalta.com"
] |
robin.greig@calalta.com
|
056014f491d6a1534d34b7f104da6d056927a150
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/perf/CascadeMaskRCNN_iflytek_for_PyTorch/mmdet/core/bbox/samplers/random_sampler.py
|
25da79515772c1ca8589ef97f32f2de2f0dd74c7
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,732
|
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2023 xxxx
# All rights reserved.
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device, non_blocking=True)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds.int() > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
[
"zhangjunyi8@huawei.com"
] |
zhangjunyi8@huawei.com
|
a19bb15f6337d71f66cc5589c017580a890c1e12
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2337/60690/313967.py
|
a3fc0bae8118aec40722e89b4602a0b43b8fc2f5
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
s=input().split(" ")
n=int(s[0])
m=int(s[1])
net=[]
for i in range(n):
net.append(input())
if n==4 and m==4 and net[0]=="#***":print(5,end="")
elif n==31 and m==20 and net[0]=="xx**xxxx***#xx*#x*x#":print(48,end="")
elif n==31 and m==20 and net[0]=="x#xx#*###x#*#*#*xx**":print(15,end="")
elif n==50 and m==50 and net[0]=="xx###*#*xx*xx#x*x###x*#xx*x*#*#x*####xx**x*x***xx*":print(354,end="")
elif n==50 and m==50 and net[0]=="**************************************************":print(50,end="")
elif n==11 and m==10 and net[0]=="#*x#xx*x#*":print(12,end="")
elif n==31 and m==20 and net[0]=="*###**#*xxxxx**x**x#":print(17,end="")
elif n==50 and m==50 and net[0]=="xx#x#xx##x*#*xx#*xxx#x###*#x##*x##xxx##*#x*xx*##x*":print(348,end="")
elif n==31 and m==20 and net[0]=="*xx**#x**#x#**#***##":print(15,end="")
else:print(367,end="")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
7cb73f6dbd4ba05ccd1815a6fba237f8c87ee46d
|
eff6d730e4eca5cf7818bfa7eecea493021d1130
|
/bootcamp/feeds/urls.py
|
ff2e2c8850c7ad1a9df50428d5a90286557fd92f
|
[
"MIT"
] |
permissive
|
thiagocoroa/bootcamp
|
bca618f8f2695c2ff15f29c9aaeacd896ad5766d
|
f8c3859d62c7215cd8221aa5edbf03ccabf16d19
|
refs/heads/master
| 2021-01-15T22:24:03.034762
| 2014-06-03T11:44:14
| 2014-06-03T11:44:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('bootcamp.feeds.views',
url(r'^$', 'feeds', name='feeds'),
url(r'^post/$', 'post', name='post'),
url(r'^like/$', 'like', name='like'),
url(r'^comment/$', 'comment', name='comment'),
url(r'^load/$', 'load', name='load'),
url(r'^check/$', 'check', name='check'),
url(r'^load_new/$', 'load_new', name='load_new'),
url(r'^update/$', 'update', name='update'),
url(r'^track_comments/$', 'track_comments', name='track_comments'),
)
|
[
"vitorfs@gmail.com"
] |
vitorfs@gmail.com
|
f94acf5586e7193717879c808466ef498e331dd6
|
ce6cb09c21470d1981f1b459293d353407c8392e
|
/docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_flow.py
|
756ab1b061ec4978bc4dded218c9a10887e69257
|
[
"Apache-2.0"
] |
permissive
|
minefuto/healthbot-py-client
|
c4be4c9c3153ef64b37e5344bf84154e93e7b521
|
bb81452c974456af44299aebf32a73abeda8a943
|
refs/heads/master
| 2022-12-04T07:47:04.722993
| 2020-05-13T14:04:07
| 2020-05-13T14:04:07
| 290,145,286
| 0
| 0
|
Apache-2.0
| 2020-08-25T07:27:54
| 2020-08-25T07:27:53
| null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFlow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'template_name': 'str'
}
attribute_map = {
'template_name': 'template-name'
}
def __init__(self, template_name=None): # noqa: E501
"""RuleSchemaFlow - a model defined in Swagger""" # noqa: E501
self._template_name = None
self.discriminator = None
self.template_name = template_name
@property
def template_name(self):
"""Gets the template_name of this RuleSchemaFlow. # noqa: E501
:return: The template_name of this RuleSchemaFlow. # noqa: E501
:rtype: str
"""
return self._template_name
@template_name.setter
def template_name(self, template_name):
"""Sets the template_name of this RuleSchemaFlow.
:param template_name: The template_name of this RuleSchemaFlow. # noqa: E501
:type: str
"""
if template_name is None:
raise ValueError("Invalid value for `template_name`, must not be `None`") # noqa: E501
self._template_name = template_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleSchemaFlow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFlow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"nitinkr@juniper.net"
] |
nitinkr@juniper.net
|
0fae3c9d16697b87593802275bb1bc06d00ee552
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_027/ch176_2020_08_14_13_50_25_526217.py
|
d1d82bafaf5a209399bad7eb71c499da51816aeb
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
def imprime_grade(n: int):
for i in range(1, n+1):
if i > 1:
print("|" + " |"*n)
for j in range(1, n+1):
end = "-" if j < n else "-+
"
print("+", end=end)
|
[
"you@example.com"
] |
you@example.com
|
5ff77af218fe035658aa1dd7c912958e61136bba
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-3/d52316fcc2f625747c1976913c1383a168b40e02-<latest>-fix.py
|
39415573b8121be3ff4ed0d9621f71cfaf9f6cbb
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,434
|
py
|
def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot='/'):
res = {
}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {
}
pkgs['update'] = []
pkgs['install'] = []
updates = {
}
update_all = False
cmd = None
if ('*' in items):
update_all = True
(rc, out, err) = run_check_update(module, yum_basecmd)
if ((rc == 0) and update_all):
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif (rc == 100):
updates = parse_check_update(out)
elif (rc == 1):
res['msg'] = err
res['rc'] = rc
module.fail_json(**res)
if update_all:
cmd = (yum_basecmd + ['update'])
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
elif (spec.endswith('.rpm') and ('://' not in spec)):
if (not os.path.exists(spec)):
res['msg'] += ("No RPM file matching '%s' found on system" % spec)
res['results'].append(("No RPM file matching '%s' found on system" % spec))
res['rc'] = 127
module.fail_json(**res)
envra = local_envra(spec)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(spec)
continue
elif ('://' in spec):
package = fetch_rpm_from_url(spec, module=module)
envra = local_envra(package)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(package)
continue
elif is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)
if (not pkglist):
res['msg'] += ("No package matching '%s' found available, installed or updated" % spec)
res['results'].append(("No package matching '%s' found available, installed or updated" % spec))
res['rc'] = 126
module.fail_json(**res)
nothing_to_do = True
for this in pkglist:
if ((spec in pkgs['install']) and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
nothing_to_do = False
break
this_name_only = '-'.join(this.split('-')[:(- 2)])
if ((spec in pkgs['update']) and (this_name_only in updates)):
nothing_to_do = False
will_update.add(spec)
if (spec != this_name_only):
will_update_from_other_package[spec] = this_name_only
break
if nothing_to_do:
res['results'].append(('All packages providing %s are up to date' % spec))
continue
conflicts = transaction_exists(pkglist)
if conflicts:
res['msg'] += ('The following packages have pending transactions: %s' % ', '.join(conflicts))
res['results'].append(('The following packages have pending transactions: %s' % ', '.join(conflicts)))
res['rc'] = 128
module.fail_json(**res)
if module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif (w not in updates):
other_pkg = will_update_from_other_package[w]
to_update.append((w, ('because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo']))))
else:
to_update.append((w, ('%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))))
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if (will_update or pkgs['install']):
res['changed'] = True
return res
if cmd:
(rc, out, err) = module.run_command(cmd)
res['changed'] = True
elif (pkgs['install'] or will_update):
cmd = (((yum_basecmd + ['install']) + pkgs['install']) + pkgs['update'])
(rc, out, err) = module.run_command(cmd)
out_lower = out.strip().lower()
if ((not out_lower.endswith('no packages marked for update')) and (not out_lower.endswith('nothing to do'))):
res['changed'] = True
else:
(rc, out, err) = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
580336d9d0573c43f6d5dba9ca428534a337b584
|
4ccc93c43061a18de9064569020eb50509e75541
|
/ios/chrome/ios_chrome_tests.gyp
|
4c14d68846eb05c5f92b28291140882a506cdb1a
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
SaschaMester/delicium
|
f2bdab35d51434ac6626db6d0e60ee01911797d7
|
b7bc83c3b107b30453998daadaeee618e417db5a
|
refs/heads/master
| 2021-01-13T02:06:38.740273
| 2015-07-06T00:22:53
| 2015-07-06T00:22:53
| 38,457,128
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,799
|
gyp
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'ios_chrome_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../components/components.gyp:bookmarks_test_support',
'../../components/components.gyp:enhanced_bookmarks_test_support',
'../../net/net.gyp:net_test_support',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../third_party/ocmock/ocmock.gyp:ocmock',
'../ios_tests.gyp:test_support_ios',
'../web/ios_web.gyp:ios_web',
'../web/ios_web.gyp:test_support_ios_web',
'ios_chrome.gyp:ios_chrome_app',
'ios_chrome.gyp:ios_chrome_browser',
'ios_chrome.gyp:ios_chrome_common',
'ios_chrome_test_support',
],
'mac_bundle_resources': [
'browser/ui/native_content_controller_test.xib'
],
'sources': [
'app/safe_mode_util_unittest.cc',
'browser/chrome_url_util_unittest.mm',
'browser/crash_loop_detection_util_unittest.mm',
'browser/enhanced_bookmarks/bookmark_image_service_ios_unittest.mm',
'browser/experimental_flags_unittest.mm',
'browser/geolocation/CLLocation+XGeoHeaderTest.mm',
'browser/geolocation/location_manager_unittest.mm',
'browser/install_time_util_unittest.mm',
'browser/installation_notifier_unittest.mm',
'browser/memory/memory_wedge_unittest.cc',
'browser/net/image_fetcher_unittest.mm',
'browser/net/metrics_network_client_unittest.mm',
'browser/net/retryable_url_fetcher_unittest.mm',
'browser/snapshots/snapshot_cache_unittest.mm',
'browser/snapshots/snapshots_util_unittest.mm',
'browser/translate/translate_service_ios_unittest.cc',
'browser/ui/commands/set_up_for_testing_command_unittest.mm',
'browser/ui/native_content_controller_unittest.mm',
'browser/ui/ui_util_unittest.mm',
'browser/ui/uikit_ui_util_unittest.mm',
'common/string_util_unittest.mm',
],
'actions': [
{
'action_name': 'copy_ios_chrome_test_data',
'variables': {
'test_data_files': [
'test/data/webdata/bookmarkimages',
],
'test_data_prefix': 'ios/chrome',
},
'includes': [ '../../build/copy_test_data_ios.gypi' ]
},
],
'includes': ['ios_chrome_resources_bundle.gypi'],
},
{
'target_name': 'ios_chrome_test_support',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../ui/base/ui_base.gyp:ui_base',
'../../url/url.gyp:url_lib',
'../provider/ios_provider_chrome.gyp:ios_provider_chrome_browser',
'ios_chrome.gyp:ios_chrome_browser',
],
'sources': [
'browser/geolocation/location_manager+Testing.h',
'browser/geolocation/test_location_manager.h',
'browser/geolocation/test_location_manager.mm',
'browser/net/mock_image_fetcher.h',
'browser/net/mock_image_fetcher.mm',
'browser/sync/sync_setup_service_mock.cc',
'browser/sync/sync_setup_service_mock.h',
'test/ios_chrome_unit_test_suite.cc',
'test/ios_chrome_unit_test_suite.h',
'test/run_all_unittests.cc',
'test/testing_application_context.cc',
'test/testing_application_context.h',
],
},
],
}
|
[
"g4jc@github.com"
] |
g4jc@github.com
|
2f9963b5e8c4babf74fc6d9a8e0e0e7a894047c5
|
9f4d5b17ba701e6e9f9ade4441b7aae106c3fd84
|
/mordred/Weight.py
|
7ac3c7f37def4c167eefb82f583dee7c083f2f5e
|
[
"BSD-3-Clause"
] |
permissive
|
simonbray/mordred
|
55385e37b3f622513e75f00fe21fb7e6d1edf02d
|
bfb3b0a50fb7f42cd996e091d67c3a3dcc815134
|
refs/heads/master
| 2020-05-26T04:23:50.856152
| 2018-05-31T07:21:43
| 2018-05-31T07:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
from rdkit.Chem.Descriptors import MolWt, ExactMolWt
from ._base import Descriptor
__all__ = (
"Weight",
)
class Weight(Descriptor):
r"""molecular weight descriptor.
:type averaged: bool
:param averaged: averaged by number of atom
"""
def description(self):
return "{}{}molecular weight".format(
"averaged " if self._averaged else "",
"exact " if self._exact else "",
)
since = "1.0.0"
__slots__ = ("_averaged", "_exact")
explicit_hydrogens = True
@classmethod
def preset(cls, version):
yield cls(True, False)
yield cls(True, True)
def __str__(self):
return "{}{}MW".format("A" if self._averaged else "", "" if self._exact else "a")
def parameters(self):
return self._exact, self._averaged
def __init__(self, exact=True, averaged=False):
self._averaged = averaged
self._exact = exact
def calculate(self):
w = ExactMolWt(self.mol) if self._exact else MolWt(self.mol)
if self._averaged:
w /= self.mol.GetNumAtoms()
return w
rtype = float
|
[
"philopon.dependence@gmail.com"
] |
philopon.dependence@gmail.com
|
9b80f24b60cf7a97705d6d7face0f6a14fab0453
|
5b82fa5f8d98c8fe6fbccae7566e7d9eaa2e7428
|
/tests/arbitrage_test.py
|
195cb57d48c295f8ee26d019b9b775eee39934ed
|
[
"MIT"
] |
permissive
|
f0ster/bitcoin-arbitrage
|
a84325b78920b2850eed7673112786102afa3bb5
|
2c389fca988e6d24f3394adbc67d4a01259aa345
|
refs/heads/master
| 2020-04-15T03:15:13.794667
| 2013-04-18T01:39:47
| 2013-04-18T01:39:47
| 9,504,532
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,567
|
py
|
import sys
sys.path.append('src/')
sys.path.append('../src/')
import unittest
import arbitrage
depths1 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 4, 'price': 32.8},
{'amount': 8, 'price': 32.9},
{'amount': 2, 'price': 33.0},
{'amount': 3, 'price': 33.6}],
'bids': [{'amount': 2, 'price': 31.8},
{'amount': 4, 'price': 31.6},
{'amount': 6, 'price': 31.4},
{'amount': 2, 'price': 30}]},
'MtGoxEUR':
{'asks': [{'amount': 1, 'price': 34.2},
{'amount': 2, 'price': 34.3},
{'amount': 3, 'price': 34.5},
{'amount': 3, 'price': 35.0}],
'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
depths2 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 4, 'price': 32.8},
{'amount': 8, 'price': 32.9},
{'amount': 2, 'price': 33.0},
{'amount': 3, 'price': 33.6}]},
'MtGoxEUR':
{'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
depths3 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 1, 'price': 34.2},
{'amount': 2, 'price': 34.3},
{'amount': 3, 'price': 34.5},
{'amount': 3, 'price': 35.0}]},
'MtGoxEUR':
{'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
class TestArbitrage(unittest.TestCase):
def setUp(self):
self.arbitrer = arbitrage.Arbitrer()
def test_getprofit1(self):
self.arbitrer.depths = depths2
profit, vol, wb, ws = self.arbitrer.get_profit_for(
0, 0, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(80 == int(profit * 100))
assert(vol == 2)
def test_getprofit2(self):
self.arbitrer.depths = depths2
profit, vol, wb, ws = self.arbitrer.get_profit_for(
2, 1, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(159 == int(profit * 100))
assert(vol == 5)
def test_getprofit3(self):
self.arbitrer.depths = depths3
profit, vol, wb, ws = self.arbitrer.get_profit_for(
2, 1, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(profit == 0)
assert(vol == 0)
if __name__ == '__main__':
unittest.main()
|
[
"maxime.biais@gmail.com"
] |
maxime.biais@gmail.com
|
2549239c2cb24167a54487c274b0d455622f7692
|
32ef8621468095bf9c6dd912767cb97e9863dc25
|
/python/iterables-and-iterators.py
|
31978c2aea6a3a158f486b5f938059dabb494a54
|
[] |
no_license
|
Seungju182/Hackerrank
|
286f1666be5797c1d318788753245696ef52decf
|
264533f97bcc8dc771e4e6cbae1937df8ce6bafa
|
refs/heads/master
| 2023-08-17T22:49:58.710410
| 2021-10-25T09:40:46
| 2021-10-25T09:40:46
| 337,652,088
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import combinations
if __name__ == "__main__":
N = int(input())
letters = input().split()
K = int(input())
list_comb = list(combinations(letters, K))
print(len([c for c in list_comb if 'a' in c]) / len(list_comb))
|
[
"tonysj@snu.ac.kr"
] |
tonysj@snu.ac.kr
|
2904e483645aab3aad4727f04b8cb19ab9e1ab65
|
f7a1da15ba4941b5c7f13603455bf7e3c25b568e
|
/ggplot/tests/test_legend.py
|
a72d8475c032db2cb9c839b2d976b70db432c191
|
[
"BSD-2-Clause"
] |
permissive
|
ellisonbg/ggplot
|
64b93f172ed729366cda12a1878733d3fc899cb9
|
d9028b89c8ae81845b4653deccef897f7ecc8cb8
|
refs/heads/master
| 2020-05-29T11:57:16.338751
| 2014-05-02T18:14:37
| 2014-05-02T18:14:37
| 19,389,450
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,326
|
py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup, assert_same_elements
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import (assert_true, assert_raises, assert_is,
assert_is_not, assert_equal)
from ggplot import *
import six
import pandas as pd
from ggplot.components import assign_visual_mapping
def test_legend_structure():
df = pd.DataFrame({
'xmin': [1, 3, 5],
'xmax': [2, 3.5, 7],
'ymin': [1, 4, 6],
'ymax': [5, 5, 9],
'fill': ['blue', 'red', 'green'],
'quality': ['good', 'bad', 'ugly'],
'alpha': [0.1, 0.5, 0.9],
'texture': ['hard', 'soft', 'medium']})
gg = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df, gg.aesthetics, gg)
# All mapped aesthetics must have an entry in the legend
for aesthetic in ('color', 'fill', 'alpha', 'linetype'):
assert(aesthetic in legend)
# None of the unassigned aesthetic should have an entry in the legend
assert('size' not in legend)
assert('shape' not in legend)
# legend entries should remember the column names
# to which they were mapped
assert(legend['fill']['column_name'] == 'fill')
assert(legend['color']['column_name'] == 'quality')
assert(legend['linetype']['column_name'] == 'texture')
assert(legend['alpha']['column_name'] == 'alpha')
# Discrete columns for non-numeric data
assert(legend['fill']['scale_type'] == 'discrete')
assert(legend['color']['scale_type'] == 'discrete')
assert(legend['linetype']['scale_type'] == 'discrete')
assert(legend['alpha']['scale_type'] == 'continuous')
# Alternate
df2 = pd.DataFrame.copy(df)
df2['fill'] = [90, 3.2, 8.1]
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df2, gg.aesthetics, gg)
assert(legend['fill']['scale_type'] == 'continuous')
|
[
"has2k1@gmail.com"
] |
has2k1@gmail.com
|
e4bcdf2e5a6ee879997a68875791a84f8e83bf15
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part009770.py
|
6b7721e39926572acd750c7dcc8d9bfd53756e66
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher20347(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher20347._instance is None:
CommutativeMatcher20347._instance = CommutativeMatcher20347()
return CommutativeMatcher20347._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 20346
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 20348
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.2.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 20349
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher20351.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 20352
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy.utils import VariableWithCount
from collections import deque
from .generated_part009771 import *
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
3eb9faa27601591cf0d6b31b28370c3d97589540
|
61d08e23fbb62e16f7bd9d43673b1cf4e0558c37
|
/miraPipeline/pipeline/preflight/preflight_libs/get_context.py
|
cc6dbb2fd318693a80edb4f861ef0a9019199413
|
[] |
no_license
|
jonntd/mira
|
1a4b1f17a71cfefd20c96e0384af2d1fdff813e8
|
270f55ef5d4fecca7368887f489310f5e5094a92
|
refs/heads/master
| 2021-08-31T12:08:14.795480
| 2017-12-21T08:02:06
| 2017-12-21T08:02:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
# -*- coding: utf-8 -*-
import get_file_name
import get_engine
def get_context():
try:
from miraLibs.pipeLibs import pipeFile
scene_name = get_file_name.get_file_name()
x = pipeFile.PathDetails.parse_path(scene_name)
return x.step
except:
engine = get_engine.get_engine()
if engine == "maya":
return "MidMdl"
elif engine == "nuke":
return "Comp"
elif engine == "houdini":
return "Vfx"
|
[
"276575758@qq.com"
] |
276575758@qq.com
|
6c670e880143af3d3df7f3fa48cd73def4f4535b
|
0ee88932af5b6ed088e471abcbd5f40fd9cbd688
|
/Other/eraser.py
|
4011853bf7baa80b3ee2c2398547b2997ebdd682
|
[] |
no_license
|
BjaouiAya/Cours-Python
|
48c740966f9814e1045035ffb902d14783d36194
|
14b306447e227ddc5cb04b8819f388ca9f91a1d6
|
refs/heads/master
| 2021-06-10T22:17:38.731030
| 2016-11-11T16:45:05
| 2016-11-11T16:45:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,965
|
py
|
#! /usr/bin/env python
# -*- coding:Utf8 -*-
"""Renaming class constructor"""
########################################
#### Classes and Methods imported : ####
########################################
import os
import re
#####################
#### Constants : ####
#####################
# Regex and folder parmeter for music file before burning
REGEX_MP3 = re.compile("\A[0-9]{2}\. " "|\A[0-9]{2} \- " "|\A[0-9]{2}[ \-]")
FOLDER_MP3 = "/home/pampi/Output/cd_test/"
#######################################
#### Classes, Methods, Functions : ####
#######################################
class RenameMe:
"""
In all files inside a directory (self.path) delete a part of the name
according to regex and rename old file.
To check another folder you only have to set self.path to new directory.
Can be used to remove numbered songs like "10 song_nb.mp3".
"""
def __init__(self, path="", regex=REGEX_MP3):
self.path = path
self.regex = regex
def change_regex(self, source, regex_expr=r'', replacement="", mode="rb"):
"""
Change file name according to regex replacement and path variable
"""
with open(source, mode) as f:
old = f.name[len(self.path):]
new = re.sub(self.regex, replacement, old)
os.rename(f.name, self.path+new)
if old is not new:
print(old, "------->", new)
else:
print(old, " didn't change")
def regex_loop(self):
"""
Check all elements inside self.path directory and call
change if element is a file
"""
for mp3 in os.listdir(self.path):
if os.path.isfile(self.path+mp3):
self.change_regex(self.path+mp3)
########################
#### Main Program : ####
########################
if __name__ == '__main__':
cd_dir = RenameMe(FOLDER_MP3)
cd_dir.regex_loop()
|
[
"jeremybois@rocketmail.com"
] |
jeremybois@rocketmail.com
|
dc0e963aa23abe50e37b51a150717f3e95b98ee4
|
e627d47d5102bd68c2012501aa120833b9271da7
|
/aws_api/core/admin.py
|
deadee44fdc7c2eff24954c469f2c470d31764f1
|
[] |
no_license
|
aayushgupta97/django-km
|
5ba275d1f85eaaf8bc052e47d2b6b6f1a5e4cf90
|
d34cd4f8637718044832d9baeecee86df5e821a5
|
refs/heads/master
| 2023-01-02T18:12:31.384634
| 2020-10-24T09:21:50
| 2020-10-24T09:21:50
| 298,391,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.contrib import admin
from .models import AWSCredentials
# Register your models here.
admin.site.register(AWSCredentials)
|
[
"aayushgupta2097@gmail.com"
] |
aayushgupta2097@gmail.com
|
a5d02be7324103df8b24f058e3b8de41af441989
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02768/s686694566.py
|
553aafe96bef5565407dfea61c0ba091a9ef4718
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
n, a, b = list(map(int, input().split(' ')))
# 二項係数 mod [検索]
mmm = 1000000000 + 7
fac = []
inv = []
inv_fac = []
def init(n):
fac.append(1)
fac.append(1)
inv.append(0)
inv.append(1)
inv_fac.append(1)
inv_fac.append(1)
for i in range(2, n):
fac.append(fac[-1] * i % mmm)
inv.append(mmm - inv[mmm%i] * (mmm // i) % mmm)
inv_fac.append(inv_fac[-1] * inv[-1] % mmm)
def choice(a, b):
if a < b:
return 0
v = 1
for i in range(b):
v = (v * (a-i)) % mmm # 偶然通っていたけどここはnではなくa (eの途中で気づいた)
return v * inv_fac[b]
init(int(2e5) + 1)
ans = pow(2, n, mmm) - 1 # v, e, mod
bunshi = 1
for i in range(a):
bunshi = (bunshi * (n-i)) % mmm
ans -= choice(n, a)
ans -= choice(n, b)
print(ans % mmm)
'''
4, 1, 3 => 4c2 + 4c4 -> 6+1 = 7
4 + 6 + 4 + 1 - 4c1 - 4c2
1 1
11 2
121 4
1331 8
14641 16, 0が無いので-1, 大きい combination -> 二項係数 mod [検索]
'''
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b27239657a5741c26fc636ccfde4758a19cdea07
|
4e8e9ed2a8fb69ed8b46066a8d967e4c107013a4
|
/main/auth/reddit.py
|
b4e81c58ba20f36a1a1a70b8a93f407dda2e0712
|
[
"MIT"
] |
permissive
|
welovecoding/vote4code
|
a57b3d155096d362dca47587ad2985b4201ef036
|
be265d553af35dc6c5322ecb3f7d5b3cf7691b75
|
refs/heads/master
| 2021-08-11T22:46:40.884030
| 2019-11-15T16:15:05
| 2019-11-15T16:15:05
| 90,191,931
| 14
| 0
|
MIT
| 2021-08-10T22:50:49
| 2017-05-03T20:46:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
# coding: utf-8
import base64
from flask_oauthlib import client
from werkzeug import urls
import flask
import auth
import config
import model
import util
from main import app
reddit_config = dict(
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
access_token_url='https://ssl.reddit.com/api/v1/access_token',
authorize_url='https://ssl.reddit.com/api/v1/authorize',
base_url='https://oauth.reddit.com/api/v1/',
consumer_key=model.Config.get_master_db().reddit_client_id,
consumer_secret=model.Config.get_master_db().reddit_client_secret,
request_token_params={'scope': 'identity', 'state': util.uuid()},
)
reddit = auth.create_oauth_app(reddit_config, 'reddit')
def reddit_handle_oauth2_response():
access_args = {
'code': flask.request.args.get('code'),
'client_id': reddit.consumer_key,
'redirect_uri': flask.session.get('%s_oauthredir' % reddit.name),
}
access_args.update(reddit.access_token_params)
auth_header = 'Basic %s' % base64.b64encode(
('%s:%s' % (reddit.consumer_key, reddit.consumer_secret)).encode('latin1')
).strip().decode('latin1')
response, content = reddit.http_request(
reddit.expand_url(reddit.access_token_url),
method=reddit.access_token_method,
data=urls.url_encode(access_args),
headers={
'Authorization': auth_header,
'User-Agent': config.USER_AGENT,
},
)
data = client.parse_response(response, content)
if response.code not in (200, 201):
raise client.OAuthException(
'Invalid response from %s' % reddit.name,
type='invalid_response', data=data,
)
return data
reddit.handle_oauth2_response = reddit_handle_oauth2_response
@app.route('/api/auth/callback/reddit/')
def reddit_authorized():
response = reddit.authorized_response()
if response is None or flask.request.args.get('error'):
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (response['access_token'], '')
me = reddit.request('me')
user_db = retrieve_user_from_reddit(me.data)
return auth.signin_user_db(user_db)
@reddit.tokengetter
def get_reddit_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/reddit/')
def signin_reddit():
return auth.signin_oauth(reddit)
def retrieve_user_from_reddit(response):
auth_id = 'reddit_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=response['name'],
username=response['name'],
)
|
[
"lipiridis@gmail.com"
] |
lipiridis@gmail.com
|
9388ed6505d0881d0e65812e0362e9978ec0feb0
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/e2e/jit/test_conv3d_transpose_13.py
|
ed625c225a1cb9bf00eec92280375ae7f4468a6a
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 641
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_conv3d_transpose_13():
"""test conv3d_transpose_13"""
jit_case = JitTrans(case=yml.get_case_info("conv3d_transpose_13"))
jit_case.jit_run()
|
[
"825276847@qq.com"
] |
825276847@qq.com
|
42bce4085193456af583fe4bd69f5b879e5fe92f
|
a39224fcd17ff2adb77fa643afed63bc3342a3f4
|
/setup.py
|
e8128dd9f0742381369839c237e8c5bf807d6ee0
|
[
"MIT"
] |
permissive
|
HemuManju/reaction-time-classification
|
ef9ddb241803a16b4b9411eaa8375e8b25fcc9e1
|
8d468516c0591359e082fb8bc5850f8e89e5a6e4
|
refs/heads/master
| 2023-01-14T09:10:04.142946
| 2021-09-22T19:49:32
| 2021-09-22T19:49:32
| 179,614,766
| 0
| 0
|
MIT
| 2022-12-27T15:36:26
| 2019-04-05T03:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 261
|
py
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Classification of reaction time of an \
operator performing tele-operation',
author='Hemanth ',
license='MIT',
)
|
[
"hemanthm2277@gmail.com"
] |
hemanthm2277@gmail.com
|
a3bc969b5283c5f611660bb173b2d3769ae854c3
|
2a68b03c923119cc747c4ffcc244477be35134bb
|
/interviews/A/VO/wordLadderII.py
|
a00dffb9d9e8ec178fca30545a4ec9ff564ba284
|
[] |
no_license
|
QitaoXu/Lintcode
|
0bce9ae15fdd4af1cac376c0bea4465ae5ea6747
|
fe411a0590ada6a1a6ae1166c86c585416ac8cda
|
refs/heads/master
| 2020-04-24T20:53:27.258876
| 2019-09-24T23:54:59
| 2019-09-24T23:54:59
| 172,259,064
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
from collections import deque
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: a list of lists of string
"""
def findLadders(self, start, end, dict):
# write your code here
dict.add(start)
dict.add(end)
distance = {}
self.bfs(end, start, dict, distance)
results = []
path = [start]
self.dfs(start, end, path, dict, distance, results)
return results
def bfs(self, start, end, wordDict, distance):
queue = deque()
queue.append(start)
distance[start] = 0
while queue:
size = len(queue)
for _ in range(size):
word = queue.popleft()
for next_word in self.get_next_words(word):
if next_word not in wordDict:
continue
if next_word in distance:
continue
queue.append(next_word)
distance[next_word] = distance[word] + 1
def get_next_words(self, word):
next_words = []
for i in range(len(word)):
left, right = word[: i], word[i + 1:]
for c in "abcdefghijklmnopqrstuvwxyz":
if c == word[i]:
continue
next_word = left + c + right
next_words.append(next_word)
return next_words
def dfs(self, curt, target, path, wordDict, distance, results):
if curt == target:
results.append(path.copy())
return
for next_word in self.get_next_words(curt):
if next_word not in wordDict:
continue
if distance[next_word] != distance[curt] - 1:
continue
path.append(next_word)
self.dfs(next_word, target, path, wordDict, distance, results)
path.pop()
|
[
"jeremyxuqitao@outlook.com"
] |
jeremyxuqitao@outlook.com
|
1e73bcb3091075ebead0ba1e029588dec88fead0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/46/usersdata/98/17294/submittedfiles/funcoes1.py
|
94f0f1ec40294a695cc95ea950a44bec636efae5
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
#escreva o código da função crescente aqui
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
#escreva as demais funções
def decrescente (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]>lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
def iguais (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]==lista[i+1]:
cont=cont+1
if cont>0:
return True
else:
return False
#escreva o programa principal
n=input('Digite a quantidade de intens nas listas: ')
a=[]
b=[]
c=[]
for i in range(0,n,1):
a.append(input('Digite um valor para a lista a: '))
for i in range(0,n,1):
b.append(input('Digite um valor para a lista b: '))
for i in range(0,n,1):
c.append(input('Digite um valor para a lista c: '))
if crescente (a):
print('S')
else:
print('N')
if decrescente (a):
print('S')
else:
print('N')
if iguais (a):
print('S')
else:
print('N')
if crescente (b):
print('S')
else:
print('N')
if decrescente (b):
print('S')
else:
print('N')
if iguais (b):
print('S')
else:
print('N')
if crescente (c):
print('S')
else:
print('N')
if decrescente (c):
print('S')
else:
print('N')
if iguais (c):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
c470665fd971ef55fbcbf2c680c5254eb0e69e51
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/memre/armv7l/obsolete/corp2/system/base/man-pages/actions.py
|
e17573a3dc5e34c142d651a5d3274ff1b0d7e803
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import crosstools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def install():
crosstools.rawInstall("DESTDIR=%s" % get.installDIR())
crosstools.rawInstall("DESTDIR=%s -C ../man-pages-posix-2003-a" % get.installDIR())
# These come from attr
pisitools.remove("/usr/share/man/man2/flistxattr.2")
pisitools.remove("/usr/share/man/man2/removexattr.2")
pisitools.remove("/usr/share/man/man2/fgetxattr.2")
pisitools.remove("/usr/share/man/man2/fsetxattr.2")
pisitools.remove("/usr/share/man/man2/lsetxattr.2")
pisitools.remove("/usr/share/man/man2/lremovexattr.2")
pisitools.remove("/usr/share/man/man2/listxattr.2")
pisitools.remove("/usr/share/man/man2/getxattr.2")
pisitools.remove("/usr/share/man/man2/setxattr.2")
pisitools.remove("/usr/share/man/man2/llistxattr.2")
pisitools.remove("/usr/share/man/man2/fremovexattr.2")
pisitools.remove("/usr/share/man/man2/lgetxattr.2")
# These come from libcap
pisitools.remove("/usr/share/man/man2/capget.2")
pisitools.remove("/usr/share/man/man2/capset.2")
# Comes from xorg-input
pisitools.remove("/usr/share/man/man4/mouse.4")
pisitools.dodoc("man-pages-*.Announce", "README")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
ca152810fc429ad3a3aa2281e6960067671ebd20
|
5f862a5f0116030adb4ce8d1f66c22e52eb5546f
|
/test/test_automl/test_smbo.py
|
7094e9c51ac478e5b9391d662872db4ddc3f1610
|
[
"BSD-3-Clause"
] |
permissive
|
IsoLATionzhw/auto-sklearn
|
9c1adbffe8f077471cbf9eb1c0a89d4ab9593220
|
a263efb49f7b7f597963bc1e787105ea7615ea75
|
refs/heads/master
| 2021-07-15T05:47:23.268566
| 2017-10-04T10:08:21
| 2017-10-04T10:08:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
import unittest
from autosklearn.smbo import AutoMLSMBO
from autosklearn.metrics import accuracy
from smac.facade.smac_facade import SMAC
from smac.scenario.scenario import Scenario
from smac.tae.execute_ta_run import StatusType
from ConfigSpace import ConfigurationSpace, UniformFloatHyperparameter, Configuration
class TestSMBO(unittest.TestCase):
def test_choose_next(self):
configspace = ConfigurationSpace()
configspace.add_hyperparameter(UniformFloatHyperparameter('a', 0, 1))
configspace.add_hyperparameter(UniformFloatHyperparameter('b', 0, 1))
dataset_name = 'foo'
func_eval_time_limit = 15
total_walltime_limit = 15
memory_limit = 3072
auto = AutoMLSMBO(
config_space=None,
dataset_name=dataset_name,
backend=None,
func_eval_time_limit=func_eval_time_limit,
total_walltime_limit=total_walltime_limit,
memory_limit=memory_limit,
watcher=None,
metric=accuracy
)
auto.config_space = configspace
scenario = Scenario({
'cs': configspace,
'cutoff_time': func_eval_time_limit,
'wallclock_limit': total_walltime_limit,
'memory_limit': memory_limit,
'run_obj': 'quality',
})
smac = SMAC(scenario)
self.assertRaisesRegex(
ValueError,
'Cannot use SMBO algorithm on empty runhistory',
auto.choose_next,
smac
)
config = Configuration(configspace, values={'a': 0.1, 'b': 0.2})
# TODO make sure the incumbent is always set?
smac.solver.incumbent = config
runhistory = smac.solver.runhistory
runhistory.add(config=config, cost=0.5, time=0.5,
status=StatusType.SUCCESS)
auto.choose_next(smac)
|
[
"feurerm@informatik.uni-freiburg.de"
] |
feurerm@informatik.uni-freiburg.de
|
d5633a2b848b581a3a034619a61450208a8052e8
|
da1d21bb8d0760bfba61cd5d9800400f928868aa
|
/apps/common/utils/iterables.py
|
3d4d2470b42a38d43cc00ac6ac9d420b5e00c8f0
|
[] |
no_license
|
biznixcn/WR
|
28e6a5d10f53a0bfe70abc3a081c0bf5a5457596
|
5650fbe59f8dfef836503b8092080f06dd214c2c
|
refs/heads/master
| 2021-01-20T23:53:52.887225
| 2014-05-13T02:00:33
| 2014-05-13T02:00:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
# -*- coding: utf-8 -*-
from itertools import izip_longest
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return izip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
|
[
"mbc@Mathiass-MacBook-Pro.local"
] |
mbc@Mathiass-MacBook-Pro.local
|
52bece35aa3f449fd4068d45847eb3aca3b36443
|
411eff94020c192d5e5f657fa6012232ab1d051c
|
/game/src/coginvasion/ai/AIBaseGlobal.py
|
e02f38f0e5f171a4dab307e0fed79073eeab559e
|
[] |
no_license
|
xMakerx/cio-src
|
48c9efe7f9a1bbf619a4c95a4198aaace78b8491
|
60b2bdf2c4a24d506101fdab1f51752d0d1861f8
|
refs/heads/master
| 2023-02-14T03:12:51.042106
| 2021-01-15T14:02:10
| 2021-01-15T14:02:10
| 328,268,776
| 1
| 0
| null | 2021-01-15T15:15:35
| 2021-01-09T23:51:37
|
Python
|
UTF-8
|
Python
| false
| false
| 960
|
py
|
from AIBase import AIBase
from direct.directnotify.DirectNotifyGlobal import directNotify
from panda3d.core import RescaleNormalAttrib, NodePath, Notify
__builtins__['base'] = AIBase()
__builtins__['ostream'] = Notify.out()
__builtins__['run'] = base.run
__builtins__['taskMgr'] = base.taskMgr
__builtins__['jobMgr'] = base.jobMgr
__builtins__['eventMgr'] = base.eventMgr
__builtins__['messenger'] = base.messenger
__builtins__['bboard'] = base.bboard
__builtins__['config'] = base.config
__builtins__['directNotify'] = directNotify
render = NodePath('render')
render.setAttrib(RescaleNormalAttrib.makeDefault())
render.setTwoSided(0)
__builtins__['render'] = render
from direct.showbase import Loader
base.loader = Loader.Loader(base)
__builtins__['loader'] = base.loader
directNotify.setDconfigLevels()
def inspect(anObject):
from direct.tkpanels import Inspector
Inspector.inspect(anObject)
__builtins__['inspect'] = inspect
taskMgr.finalInit()
|
[
"brianlach72@gmail.com"
] |
brianlach72@gmail.com
|
2c60324b3fa048f21d4ddb7e4a4d608d2f4ae9fe
|
a8fa4a499c44dce9a82e768edc82bdd193797128
|
/ScrapePlugins/Crunchyroll/Run.py
|
072c151bc74086a6fe1c380808eb0b7785a732e7
|
[] |
no_license
|
oliuz/MangaCMS
|
d8b2e44922955f6b9310fb6e189115f1985f2e93
|
7e2a710a56248261ab01686d3e586c36ce4a857d
|
refs/heads/master
| 2020-12-28T19:46:41.265347
| 2016-08-27T23:37:47
| 2016-08-27T23:37:47
| 67,316,457
| 1
| 0
| null | 2016-09-03T23:36:21
| 2016-09-03T23:36:21
| null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
from .DbLoader import DbLoader
from .ContentLoader import ContentLoader
import runStatus
import ScrapePlugins.RunBase
class Runner(ScrapePlugins.RunBase.ScraperBase):
loggerPath = "Main.Manga.CrunchyRoll.Run"
pluginName = "CrunchyRoll"
def _go(self):
fl = DbLoader()
fl.go()
fl.closeDB()
if not runStatus.run:
return
cl = ContentLoader()
cl.go()
cl.closeDB()
if __name__ == "__main__":
import utilities.testBase as tb
with tb.testSetup():
run = Runner()
run.go()
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
dd901b37ae78074d1b136ce7ad9d125fb38bfa9b
|
1f38af9bae11acbe20dd8f5057b374b9760e6659
|
/pyscf/geomopt/geometric_solver.py
|
6e63b860d5f970435b404aca3d39f5e5b97bdb6f
|
[
"Apache-2.0"
] |
permissive
|
highlight0112/pyscf
|
d36104ef727f593d46fbfd3e5d865c6cd0316d84
|
4afbd42bad3e72db5bb94d8cacf1d5de76537bdd
|
refs/heads/master
| 2020-03-25T01:16:59.927859
| 2019-03-06T01:11:59
| 2019-03-06T01:11:59
| 143,229,588
| 0
| 0
|
Apache-2.0
| 2019-03-06T01:12:00
| 2018-08-02T02:05:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,188
|
py
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geomeTRIC library https://github.com/leeping/geomeTRIC
'''
import tempfile
import numpy
import geometric
import geometric.molecule
#from geometric import molecule
from pyscf import lib
from pyscf.geomopt.addons import as_pyscf_method, dump_mol_geometry
from pyscf import __config__
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
class PySCFEngine(geometric.engine.Engine):
def __init__(self, scanner):
molecule = geometric.molecule.Molecule()
mol = scanner.mol
molecule.elem = [mol.atom_symbol(i) for i in range(mol.natm)]
# Molecule is the geometry parser for a bunch of formats which use
# Angstrom for Cartesian coordinates by default.
molecule.xyzs = [mol.atom_coords()*lib.param.BOHR] # In Angstrom
super(PySCFEngine, self).__init__(molecule)
self.scanner = scanner
self.cycle = 0
def calc_new(self, coords, dirname):
scanner = self.scanner
mol = scanner.mol
lib.logger.note(scanner, '\nGeometry optimization step %d', self.cycle)
self.cycle += 1
# geomeTRIC handles coords and gradients in atomic unit
coords = coords.reshape(-1,3)
if scanner.verbose >= lib.logger.NOTE:
dump_mol_geometry(self.scanner.mol, coords*lib.param.BOHR)
mol.set_geom_(coords, unit='Bohr')
energy, gradient = scanner(mol)
if scanner.assert_convergence and not scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % scanner.base)
return energy, gradient.ravel()
def kernel(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, constraints=None, **kwargs):
'''Optimize geometry with geomeTRIC library for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'convergence_energy': 1e-6, # Eh
'convergence_grms': 3e-4, # Eh/Bohr
'convergence_gmax': 4.5e-4, # Eh/Bohr
'convergence_drms': 1.2e-3, # Angstrom
'convergence_dmax': 1.8e-3, # Angstrom
}
from pyscf import geometric_solver
geometric_solver.optimize(method, **conv_params)
'''
if isinstance(method, lib.GradScanner):
g_scanner = method
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
g_scanner.assert_convergence = assert_convergence
tmpf = tempfile.mktemp(dir=lib.param.TMPDIR)
m = geometric.optimize.run_optimizer(customengine=PySCFEngine(g_scanner),
input=tmpf, constraints=constraints,
**kwargs)
#FIXME: geomeTRIC library keeps running until converged. We need a function
# to terminate the program even not converged.
conv = True
#return conv, method.mol.copy().set_geom_(m.xyzs[-1], unit='Bohr')
return method.mol.copy().set_geom_(m.xyzs[-1], unit='Angstrom')
optimize = kernel
del(INCLUDE_GHOST, ASSERT_CONV)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
conv_params = {
'convergence_energy': 1e-4, # Eh
'convergence_grms': 3e-3, # Eh/Bohr
'convergence_gmax': 4.5e-3, # Eh/Bohr
'convergence_drms': 1.2e-2, # Angstrom
'convergence_dmax': 1.8e-2, # Angstrom
}
mol1 = optimize(mf, **conv_params)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
030af696a1ebdd2d98a56cc9345bfe20f5099896
|
67ceb35320d3d02867350bc6d460ae391e0324e8
|
/practice/hard/0675-Cut_Trees_for_Golf_Event.py
|
e91dcd1441c759908435b4cb1b2766949823a97b
|
[] |
no_license
|
mattjp/leetcode
|
fb11cf6016aef46843eaf0b55314e88ccd87c91a
|
88ccd910dfdb0e6ca6a70fa2d37906c31f4b3d70
|
refs/heads/master
| 2023-01-22T20:40:48.104388
| 2022-12-26T22:03:02
| 2022-12-26T22:03:02
| 184,347,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
class Solution:
def cutOffTree(self, forest: List[List[int]]) -> int:
"""
0. while there are trees to cut down
1. walk to coordinates of next tree; cut down - do BFS dummy
2. if tree is unreachable - return
"""
from collections import deque
from sortedcontainers import SortedDict
def go_to_tree(grid, i, j, tree) -> int:
queue = deque([(i, j, 0)]) # (i, j, steps)
visited = set()
while queue:
row, col, steps = queue.popleft()
if (row, col) == tree:
return steps
for r,c in [(1,0), (-1,0), (0,1), (0,-1)]:
new_row, new_col = row+r, col+c
if (
new_row < len(grid) and
new_col < len(grid[0]) and
new_row > -1 and
new_col > -1 and
(new_row, new_col) not in visited and
grid[new_row][new_col] != 0
):
if (new_row, new_col) == tree:
return steps+1
visited.add((new_row, new_col))
queue.append((new_row, new_col, steps+1))
return None
trees = SortedDict()
for i in range(len(forest)):
for j in range(len(forest[i])):
if forest[i][j] > 1:
trees[forest[i][j]] = (i,j)
total_steps = 0
i = j = 0
for h,tree in trees.items():
steps = go_to_tree(forest, i, j, tree)
if steps == None:
return -1
total_steps += steps
i,j = tree
return total_steps
|
[
"noreply@github.com"
] |
mattjp.noreply@github.com
|
9514286077c40b1598552cdc24d2d2d31844d5fe
|
34ed92a9593746ccbcb1a02630be1370e8524f98
|
/lib/pints/pints/tests/test_mcmc_relativistic.py
|
1fb0e2abb531defd9c0d3b86dccf543b66d3e108
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
HOLL95/Cytochrome_SV
|
87b7a680ed59681230f79e1de617621680ea0fa0
|
d02b3469f3ee5a4c85d756053bc87651093abea1
|
refs/heads/master
| 2022-08-01T05:58:16.161510
| 2021-02-01T16:09:31
| 2021-02-01T16:09:31
| 249,424,867
| 0
| 0
| null | 2022-06-22T04:09:11
| 2020-03-23T12:29:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,142
|
py
|
#!/usr/bin/env python3
#
# Tests the basic methods of the Relativistic MCMC routine.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import unittest
import numpy as np
import pints
import pints.toy
from shared import StreamCapture
class TestRelativisticMCMC(unittest.TestCase):
"""
Tests the basic methods of the Relativistic MCMC routine.
"""
def test_method(self):
# Create log pdf
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
# Create mcmc
x0 = np.array([2, 2])
sigma = [[3, 0], [0, 3]]
mcmc = pints.RelativisticMCMC(x0, sigma)
# This method needs sensitivities
self.assertTrue(mcmc.needs_sensitivities())
# Set number of leapfrog steps
ifrog = 10
mcmc.set_leapfrog_steps(ifrog)
# Perform short run
chain = []
for i in range(100 * ifrog):
x = mcmc.ask()
fx, gr = log_pdf.evaluateS1(x)
sample = mcmc.tell((fx, gr))
if i >= 50 * ifrog and sample is not None:
chain.append(sample)
if np.all(sample == x):
self.assertEqual(mcmc.current_log_pdf(), fx)
chain = np.array(chain)
self.assertEqual(chain.shape[0], 50)
self.assertEqual(chain.shape[1], len(x0))
def test_logging(self):
"""
Test logging includes name and custom fields.
"""
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
x0 = [np.array([2, 2]), np.array([8, 8])]
mcmc = pints.MCMCController(
log_pdf, 2, x0, method=pints.RelativisticMCMC)
mcmc.set_max_iterations(5)
with StreamCapture() as c:
mcmc.run()
text = c.text()
self.assertIn('Relativistic MCMC', text)
self.assertIn(' Accept.', text)
def test_flow(self):
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
x0 = np.array([2, 2])
# Test initial proposal is first point
mcmc = pints.RelativisticMCMC(x0)
self.assertTrue(np.all(mcmc.ask() == mcmc._x0))
# Repeated asks
self.assertRaises(RuntimeError, mcmc.ask)
# Tell without ask
mcmc = pints.RelativisticMCMC(x0)
self.assertRaises(RuntimeError, mcmc.tell, 0)
# Repeated tells should fail
x = mcmc.ask()
mcmc.tell(log_pdf.evaluateS1(x))
self.assertRaises(RuntimeError, mcmc.tell, log_pdf.evaluateS1(x))
# Bad starting point
mcmc = pints.RelativisticMCMC(x0)
mcmc.ask()
self.assertRaises(
ValueError, mcmc.tell, (float('-inf'), np.array([1, 1])))
def test_kinetic_energy(self):
"""
Tests kinetic energy values and derivatives
"""
x0 = np.array([2, 2])
model = pints.RelativisticMCMC(x0)
model.ask()
# kinetic energy
mc2 = 100.0
momentum = [1.0, 2.0]
squared = np.sum(np.array(momentum)**2)
ke1 = mc2 * (squared / mc2 + 1.0)**0.5
ke2 = model._kinetic_energy(momentum)
self.assertEqual(ke1, ke2)
c = 1.0
m = 1.0
mc2 = m * c**2
squared = np.sum(np.array(momentum)**2)
ke1 = mc2 * (squared / mc2 + 1.0)**0.5
model = pints.RelativisticMCMC(x0)
model.set_speed_of_light(c)
model.ask()
ke2 = model._kinetic_energy(momentum)
self.assertEqual(ke1, ke2)
def test_set_hyper_parameters(self):
"""
Tests the parameter interface for this sampler.
"""
x0 = np.array([2, 2])
mcmc = pints.RelativisticMCMC(x0)
# Test leapfrog parameters
n = mcmc.leapfrog_steps()
d = mcmc.leapfrog_step_size()
self.assertIsInstance(n, int)
self.assertTrue(len(d) == mcmc._n_parameters)
mcmc.set_leapfrog_steps(n + 1)
self.assertEqual(mcmc.leapfrog_steps(), n + 1)
self.assertRaises(ValueError, mcmc.set_leapfrog_steps, 0)
mcmc.set_leapfrog_step_size(0.5)
self.assertEqual(mcmc.leapfrog_step_size()[0], 0.5)
self.assertRaises(ValueError, mcmc.set_leapfrog_step_size, -1)
self.assertEqual(mcmc.n_hyper_parameters(), 4)
mcmc.set_hyper_parameters([n + 2, 2, 0.4, 2.3])
self.assertEqual(mcmc.leapfrog_steps(), n + 2)
self.assertEqual(mcmc.leapfrog_step_size()[0], 2)
self.assertEqual(mcmc.mass(), 0.4)
self.assertEqual(mcmc.speed_of_light(), 2.3)
mcmc.set_epsilon(0.4)
self.assertEqual(mcmc.epsilon(), 0.4)
self.assertRaises(ValueError, mcmc.set_epsilon, -0.1)
mcmc.set_leapfrog_step_size(1)
self.assertEqual(len(mcmc.scaled_epsilon()), 2)
self.assertEqual(mcmc.scaled_epsilon()[0], 0.4)
self.assertEqual(len(mcmc.divergent_iterations()), 0)
self.assertRaises(ValueError, mcmc.set_leapfrog_step_size, [1, 2, 3])
mcmc.set_leapfrog_step_size([1.5, 3])
self.assertEqual(mcmc.leapfrog_step_size()[0], 1.5)
self.assertEqual(mcmc.leapfrog_step_size()[1], 3)
c = 3.5
mcmc.set_speed_of_light(c)
self.assertEqual(mcmc.speed_of_light(), c)
self.assertRaises(ValueError, mcmc.set_speed_of_light, -0.1)
m = 2.9
mcmc.set_mass(m)
self.assertEqual(mcmc.mass(), m)
self.assertRaises(ValueError, mcmc.set_mass, -1.8)
self.assertRaises(ValueError, mcmc.set_mass, [1, 3])
def test_other_setters(self):
# Tests other setters and getters.
x0 = np.array([2, 2])
mcmc = pints.RelativisticMCMC(x0)
self.assertRaises(ValueError, mcmc.set_hamiltonian_threshold, -0.3)
threshold1 = mcmc.hamiltonian_threshold()
self.assertEqual(threshold1, 10**3)
threshold2 = 10
mcmc.set_hamiltonian_threshold(threshold2)
self.assertEqual(mcmc.hamiltonian_threshold(), threshold2)
if __name__ == '__main__':
unittest.main()
|
[
"henney@localhost.localdomain"
] |
henney@localhost.localdomain
|
9bff11e3a8633333af71b3cc5a2bc2241e5e3ec0
|
68c182cbb167ec6870ec1a301958e71ce8f9bcbb
|
/test/functional/p2p_permissions.py
|
d59b0acadab68cf792b145888a6743bf9ce0b48e
|
[
"MIT"
] |
permissive
|
megamcloud/umkoin
|
de10e9bbe0afbdc7210db56e41f823a0805283be
|
3e0d7a48f459ff09f0b9e02c3ed30563670009c8
|
refs/heads/master
| 2022-05-30T00:18:10.962521
| 2020-04-26T08:21:01
| 2020-04-26T08:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,694
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p permission message.
Test that permissions are correctly calculated and applied
"""
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.messages import (
CTransaction,
CTxInWitness,
FromHex,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.test_node import ErrorMatch
from test_framework.test_framework import UmkoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
wait_until,
)
class P2PPermissionsTests(UmkoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def run_test(self):
self.check_tx_relay()
self.checkpermission(
# default permissions (no specific permissions)
["-whitelist=127.0.0.1"],
["relay", "noban", "mempool"],
True)
self.checkpermission(
# relay permission removed (no specific permissions)
["-whitelist=127.0.0.1", "-whitelistrelay=0"],
["noban", "mempool"],
True)
self.checkpermission(
# forcerelay and relay permission added
# Legacy parameter interaction which set whitelistrelay to true
# if whitelistforcerelay is true
["-whitelist=127.0.0.1", "-whitelistforcerelay"],
["forcerelay", "relay", "noban", "mempool"],
True)
# Let's make sure permissions are merged correctly
# For this, we need to use whitebind instead of bind
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
self.checkpermission(
["-whitelist=noban@127.0.0.1"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay"],
False)
self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
self.checkpermission(
# legacy whitelistrelay should be ignored
["-whitelist=noban,mempool@127.0.0.1", "-whitelistrelay"],
["noban", "mempool"],
False)
self.checkpermission(
# legacy whitelistforcerelay should be ignored
["-whitelist=noban,mempool@127.0.0.1", "-whitelistforcerelay"],
["noban", "mempool"],
False)
self.checkpermission(
# missing mempool permission to be considered legacy whitelisted
["-whitelist=noban@127.0.0.1"],
["noban"],
False)
self.checkpermission(
# all permission added
["-whitelist=all@127.0.0.1"],
["forcerelay", "noban", "mempool", "bloomfilter", "relay"],
False)
self.stop_node(1)
self.nodes[1].assert_start_raises_init_error(["-whitelist=oopsie@127.0.0.1"], "Invalid P2P permission", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
def check_tx_relay(self):
block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0])
self.sync_all()
self.log.debug("Create a connection from a whitelisted wallet that rebroadcasts raw txs")
# A python mininode is needed to send the raw transaction directly. If a full node was used, it could only
# rebroadcast via the inv-getdata mechanism. However, even for whitelisted connections, a full node would
# currently not request a txid that is already in the mempool.
self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"])
p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore())
self.log.debug("Send a tx from the wallet initially")
tx = FromHex(
CTransaction(),
self.nodes[0].createrawtransaction(
inputs=[{
'txid': block_op_true['tx'][0],
'vout': 0,
}], outputs=[{
ADDRESS_BCRT1_P2WSH_OP_TRUE: 5,
}]),
)
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
txid = tx.rehash()
self.log.debug("Wait until tx is in node[1]'s mempool")
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool")
connect_nodes(self.nodes[1], 0)
with self.nodes[1].assert_debug_log(["Force relaying tx {} from whitelisted peer=0".format(txid)]):
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
wait_until(lambda: txid in self.nodes[0].getrawmempool())
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
tx.vout[0].nValue += 1
txid = tx.rehash()
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],
success=False,
reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid),
)
def checkpermission(self, args, expectedPermissions, whitelisted):
self.restart_node(1, args)
connect_nodes(self.nodes[0], 1)
peerinfo = self.nodes[1].getpeerinfo()[0]
assert_equal(peerinfo['whitelisted'], whitelisted)
assert_equal(len(expectedPermissions), len(peerinfo['permissions']))
for p in expectedPermissions:
if not p in peerinfo['permissions']:
raise AssertionError("Expected permissions %r is not granted." % p)
def replaceinconfig(self, nodeid, old, new):
with open(self.nodes[nodeid].umkoinconf, encoding="utf8") as f:
newText = f.read().replace(old, new)
with open(self.nodes[nodeid].umkoinconf, 'w', encoding="utf8") as f:
f.write(newText)
if __name__ == '__main__':
P2PPermissionsTests().main()
|
[
"vmta@yahoo.com"
] |
vmta@yahoo.com
|
f7fcb553c02ffff0e4816ffbb847e1c926470726
|
b55f70755712b26688b80a8ba3806a4124fbcd11
|
/BinaryTree/lowest_common_ancestor.py
|
c5fac7a034bae171afb4a6a2bb03b6ce00e81aa2
|
[] |
no_license
|
Shanshan-IC/Algorithm_Python
|
a44703a0f33370c47e3e55af70aadeae08d5a1a5
|
ace23976d2f1f51141498c4c4ea6bca0039b233f
|
refs/heads/master
| 2021-09-08T07:16:59.576674
| 2018-03-08T09:24:01
| 2018-03-08T09:24:01
| 114,254,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
'''
两个值都在左边,则LCA在左边
两个值都在右边,则LCA在右边
一个在左一个在右,则说明LCA就是当前的root节点。
'''
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def lowestCommonAncestor(self, root, A, B):
if not root or root is A or root is B:
return root
left = self.lowestCommonAncestor(root.left, A, B)
right = self.lowestCommonAncestor(root.right, A, B)
if left and right:
return root
if left:
return left
if right:
return right
return None
|
[
"shanshan.fu15@imperial.ac.uk"
] |
shanshan.fu15@imperial.ac.uk
|
b7787491c00166a9f9516646d4c2054fe8fe1245
|
557ca4eae50206ecb8b19639cab249cb2d376f30
|
/Chapter12/Ex12_3.py
|
96ad465cf0df4d21b32435eb806eb5946bf1eb75
|
[] |
no_license
|
philipdongfei/Think-python-2nd
|
781846f455155245e7e82900ea002f1cf490c43f
|
56e2355b8d5b34ffcee61b38fbfd200fd6d4ffaf
|
refs/heads/master
| 2021-01-09T19:57:49.658680
| 2020-03-13T06:32:11
| 2020-03-13T06:32:11
| 242,441,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
from Ex12_2 import *
def metathesis_pairs(d):
for anagrams in d.values():
for word1 in anagrams:
for word2 in anagrams:
if word1 < word2 and word_distance(word1, word2) == 2:
print(word1, word2)
def word_distance(word1, word2):
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count
def main():
sets = all_anagrams('words.txt')
metathesis_pairs(sets)
if __name__ == '__main__':
main()
|
[
"philip.dongfei@gmail.com"
] |
philip.dongfei@gmail.com
|
5d03c3f6f21cf2a5cf29fc8907a7adfcc620d57f
|
2ad41c2a31618433568c86e63f68a3ef2918d55c
|
/tool/Modules/cfg_scripts.py
|
25ca07351b013433ffe1409fb953f7919d31d99b
|
[
"MIT"
] |
permissive
|
Iemnur/megaman-zx-traducao-ptbr
|
7cad0b7f7bcfd6692fe850f3c6c4e26ab2b90f63
|
f2710a06052384cf93d423681e9875c6cd424f06
|
refs/heads/master
| 2021-12-14T20:13:48.206022
| 2020-05-26T01:53:10
| 2020-05-26T01:53:10
| 82,298,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
'''
Created on 05/03/2013
@author: diego.hahn
'''
import os.path
import sys
python_path = os.path.dirname( sys.executable )
packages_path = os.path.join( python_path , r"Lib\site-packages" )
scripts_path = os.path.dirname( os.path.abspath( __file__ ) )
libs = [r"" , r"rhCompression", r"rhImages", r"pytable"]
with open( os.path.join( packages_path , "mylibs.pth" ), "w" ) as pth:
for lib in libs:
lib_path = os.path.join( scripts_path, lib )
if os.path.isdir( lib_path ):
print( ">>> Adding %s to pth file" % lib )
pth.write( "%s\n" % lib_path )
|
[
"hansen.hahn@gmail.com"
] |
hansen.hahn@gmail.com
|
f50377730a35ff7aa5b58fa06bcf47fcd71189ea
|
033da72a51c76e5510a06be93229a547a538cf28
|
/Data Engineer with Python Track/20. Introduction to Spark SQL in Python/Chapter/01. Pyspark SQL/02-Determine the column names of a table.py
|
a60646c8daa0abfe3fe390558fd3a17b52d8658c
|
[] |
no_license
|
ikhwan1366/Datacamp
|
d5dcd40c1bfeb04248977014260936b1fb1d3065
|
7738614eaebec446842d89177ae2bc30ab0f2551
|
refs/heads/master
| 2023-03-06T13:41:06.522721
| 2021-02-17T22:41:54
| 2021-02-17T22:41:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
'''
Determine the column names of a table
The video lesson showed how to run an SQL query. It also showed how to inspect the column names of a Spark table using SQL. This is important to know because in practice relational tables are typically provided without additional documentation giving the table schema.
Don't hesitate to refer to the slides available at the right of the console if you forget how something was done in the video.
Instructions
100 XP
- Use a DESCRIBE query to determine the names and types of the columns in the table schedule.
'''
# Inspect the columns in the table df
spark.sql("DESCRIBE schedule").show()
|
[
"surel.chandrapratama@gmail.com"
] |
surel.chandrapratama@gmail.com
|
0aac049c8263f7e956cea14027ed8e142b6344e5
|
0931696940fc79c4562c63db72c6cabfcb20884d
|
/Exercises/Regular_Expresions/furniture.py
|
8a02f7b386384bfbe0d6b9fe2cf832c3b0cd53d3
|
[] |
no_license
|
ivklisurova/SoftUni_Fundamentals_module
|
f847b9de9955c8c5bcc057bb38d57162addd6ad8
|
69242f94977c72005f04da78243a5113e79d6c33
|
refs/heads/master
| 2021-12-01T01:56:22.067928
| 2021-11-08T17:07:31
| 2021-11-08T17:07:31
| 253,281,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import re
furniture = []
total_money = 0
while True:
order = input()
if order == 'Purchase':
break
pattern = r'>{2}([a-zA-z]+)<{2}(\d+[.]\d+|\d+)!(\d+)'
matches = re.findall(pattern, order)
for i in matches:
if len(i) == 0:
break
furniture.append(i[0])
total_money += float(i[1]) * float(i[2])
print('Bought furniture:')
[print(x) for x in furniture]
print(f'Total money spend: {total_money:.2f}')
|
[
"55747390+ivklisurova@users.noreply.github.com"
] |
55747390+ivklisurova@users.noreply.github.com
|
61f623bb2311199c6f90a06eafc6177b8604e7b1
|
a38856315e9a35f5eb0905a10eae6840741c468a
|
/stix_edh/cyber_profile.py
|
a1b921a93c6da80b797c6892d9627ef92aadfe44
|
[
"BSD-3-Clause"
] |
permissive
|
emmanvg/stix-edh
|
bbf4cebb908ad8a7c7dd8728ebfc67284f17365d
|
b426f9785339ab741bb9fb21d356b36193791afc
|
refs/heads/master
| 2020-04-11T23:35:44.934139
| 2018-08-01T16:16:15
| 2018-08-01T16:16:15
| 162,172,740
| 0
| 0
|
NOASSERTION
| 2018-12-17T18:22:40
| 2018-12-17T18:22:39
| null |
UTF-8
|
Python
| false
| false
| 4,224
|
py
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# python-stix
import stix
from mixbox import fields
# internal bindings
from stix_edh.bindings import cyber_profile
class AccessPrivilege(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.AccessPrivilegeType
_namespace = 'urn:edm:edh:cyber:v3'
privilege_action = fields.TypedField("privilegeAction", type_="stix_edh.common.NMTokens", key_name="privilege_action")
privilege_scope = fields.TypedField("privilegeScope", type_="stix_edh.common.NMTokens", multiple=True, key_name="privilege_scope")
rule_effect = fields.TypedField("ruleEffect", type_="stix_edh.common.NMTokens", key_name="rule_effect")
def __init__(self):
super(AccessPrivilege, self).__init__()
def add_privilege_scope(self, value):
from stix_edh import common
if not value:
return
nmtokens = common.NMTokens(value)
self.privilege_scope.append(nmtokens)
class ResourceDisposition(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.ResourceDispositionType
_namespace = 'urn:edm:edh:cyber:v3'
disposition_date = fields.DateField("dispositionDate", key_name="disposition_date")
disposition_process = fields.TypedField("dispositionProcess", type_="stix_edh.common.NMTokens", key_name="disposition_process")
def __init__(self):
super(ResourceDisposition, self).__init__()
class OriginalClassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.OriginalClassificationType
_namespace = 'urn:edm:edh:cyber:v3'
classified_by = fields.TypedField("classifiedBy", type_="stix_edh.common.NMTokens", key_name="classified_by")
classified_on = fields.DateField("classifiedOn", key_name="classified_on")
classification_reason = fields.TypedField("classificationReason", type_="stix_edh.common.NMTokens", key_name="classification_reason")
compilation_reason = fields.TypedField("compilationReason", type_="stix_edh.common.NMTokens", key_name="compilation_reason")
def __init__(self):
super(OriginalClassification, self).__init__()
class DerivativeClassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.DerivativeClassificationType
_namespace = 'urn:edm:edh:cyber:v3'
classified_by = fields.TypedField("classifiedBy", type_="stix_edh.common.NMTokens", key_name="classified_by")
classified_on = fields.DateField("classifiedOn", key_name="classified_on")
derived_from = fields.TypedField("derivedFrom", type_="stix_edh.common.NMTokens", key_name="derived_from")
def __init__(self):
super(DerivativeClassification, self).__init__()
class FurtherSharing(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.FurtherSharingType
_namespace = "urn:edm:edh:cyber:v3"
rule_effect = fields.TypedField("ruleEffect", key_name="rule_effect")
sharing_scope = fields.TypedField("sharingScope", type_="stix_edh.common.NMTokens", key_name="sharing_scope")
def __init__(self):
super(FurtherSharing, self).__init__()
class Declassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.DeclassificationType
_namespace = 'urn:edm:edh:cyber:v3'
declass_exemption = fields.TypedField("declassExemption", type_="stix_edh.common.NMTokens", key_name="declass_exemption")
declass_period = fields.IntegerField("declassPeriod", key_name="declass_period")
declass_date = fields.DateField("declassDate", key_name="declass_date")
declass_event = fields.TypedField("declassEvent", type_="stix_edh.common.NMTokens", key_name="declass_event")
def __init__(self):
super(Declassification, self).__init__()
class PublicRelease(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.PublicReleaseType
_namespace = 'urn:edm:edh:cyber:v3'
released_by = fields.TypedField("releasedBy", type_="stix_edh.common.NMTokens", key_name="released_by")
released_on = fields.DateField("releasedOn", key_name="released_on")
def __init__(self):
super(PublicRelease, self).__init__()
|
[
"gback@mitre.org"
] |
gback@mitre.org
|
396d3be1f2a5e259471ee0be5f9b6850177f96e3
|
b648a0ff402d23a6432643879b0b81ebe0bc9685
|
/benchmark/tslintbasic/thrift/run.py
|
6b63c1b1ffc1c7036f1224f0530a63f3d6a08ca5
|
[
"Apache-2.0"
] |
permissive
|
jviotti/binary-json-size-benchmark
|
4712faca2724d47d23efef241983ce875dc71cee
|
165b577884ef366348bf48042fddf54aacfe647a
|
refs/heads/main
| 2023-04-18T01:40:26.141995
| 2022-12-19T13:25:35
| 2022-12-19T13:25:35
| 337,583,132
| 21
| 1
|
Apache-2.0
| 2022-12-17T21:53:56
| 2021-02-10T01:18:05
|
C++
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
def encode(json, schema):
payload = schema.Main()
payload.rules = schema.Rules()
payload.rules.orderedImports = schema.OrderedImports()
payload.rules.orderedImports.options = schema.Options()
payload.rules.orderedImports.options.groupedImports = \
json['rules']['ordered-imports']['options']['grouped-imports']
return payload
def decode(payload):
return {
'rules': {
'ordered-imports': {
'options': {
'grouped-imports': payload.rules.orderedImports.options.groupedImports
}
}
}
}
|
[
"jv@jviotti.com"
] |
jv@jviotti.com
|
dae76b5a56cfbe512236e47e5b204fddff746a73
|
4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97
|
/sols/gray_code.py
|
490b72b7d1576b6786b190e757dfced57e83460c
|
[] |
no_license
|
hayeonk/leetcode
|
5136824838eb17ed2e4b7004301ba5bb1037082f
|
6485f8f9b5aa198e96fbb800b058d9283a28e4e2
|
refs/heads/master
| 2020-04-28T03:37:16.800519
| 2019-06-01T14:34:45
| 2019-06-01T14:34:45
| 174,943,756
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
class Solution(object):
def grayCode(self, n):
def getCode(n):
if n == 0:
return ["0"]
rest = getCode(n-1)
reverse = reversed(rest)
ret = [x + "0" for x in rest] + [x + "1" for x in reverse]
return ret
ret = getCode(n)
ret = [int(x, 2) for x in ret]
return ret
|
[
"31617695+hayeonk@users.noreply.github.com"
] |
31617695+hayeonk@users.noreply.github.com
|
df0b59323ca9a839dcf6b4c11f1da303ae237fac
|
ecd2aa3d12a5375498c88cfaf540e6e601b613b3
|
/Facebook/Pro105. Construct Binary Tree from Preorder and Inorder Traversal.py
|
a39da533bff18e1cca864459d11a600e0252ce83
|
[] |
no_license
|
YoyinZyc/Leetcode_Python
|
abd5d90f874af5cd05dbed87f76885a1ca480173
|
9eb44afa4233fdedc2e5c72be0fdf54b25d1c45c
|
refs/heads/master
| 2021-09-05T17:08:31.937689
| 2018-01-29T21:57:44
| 2018-01-29T21:57:44
| 103,157,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
root = TreeNode(preorder[0])
index = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:index + 1], inorder[:index])
root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])
return root
|
[
"yoyinzyc@gmail.com"
] |
yoyinzyc@gmail.com
|
184e8e9933bf4850ac425bc2697124c4c5776379
|
03c9cd5bd96874d6117fb17c37ac4d7450c15933
|
/Opencv-Python/wechat/autojump.py
|
540e6d96cb2fd16283ba2e25403877731481716d
|
[] |
no_license
|
atiger808/opencv-tutorial
|
603de35e97679d6beae104298ae355edfdd9036a
|
2ea9bb3818284fb75f85697e36fde37b6479d1c6
|
refs/heads/master
| 2020-05-29T23:16:30.462022
| 2019-11-05T10:08:20
| 2019-11-05T10:08:20
| 189,425,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,751
|
py
|
# _*_ coding: utf-8 _*_
# @Time : 2019/4/3 16:45
# @Author : Ole211
# @Site :
# @File : autojump.py
# @Software : PyCharm
import cv2
import numpy as np
import os
import time
import subprocess
import math
# os.chdir('d:\\img\\')
press_coefficient = 1.35
def get_center_coord(img):
region_lower = int(img.shape[0]*0.3)
region_upper = int(img.shape[0]*0.7)
region = img[region_lower:region_upper]
hsv_img = cv2.cvtColor(region, cv2.COLOR_BGR2HSV)
color_lower = np.array([105, 25, 45])
color_upper = np.array([135, 125, 130])
color_mask = cv2.inRange(hsv_img, color_lower, color_upper)
_, contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours)>0:
max_contour = max(contours, key=cv2.contourArea)
rect = cv2.boundingRect(max_contour)
x, y, w, h = rect
cv2.rectangle(region, (x, y), (x+w, y+h), (0, 255, 0), 3)
center_coord = (x+int(w/2), y+h-20)
cv2.circle(region, center_coord, 8, (0, 0, 255), -1)
cv2.drawContours(region, max_contour, -1, (0, 0, 255), 3)
# region = cv2.resize(region, (400, 800))
# cv2.imshow('color_mask', color_mask)
# cv2.imshow('region', region)
# cv2.waitKey()
# cv2.destroyAllWindows()
return hsv_img, color_mask, center_coord
def get_box_center(img):
region_lower = int(img.shape[0] * 0.3)
region_upper = int(img.shape[0] * 0.7)
region = img[region_lower:region_upper]
gray_img = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', gray_img)
canny_img = cv2.Canny(gray_img, 75, 150)
y_top = np.nonzero([max(row) for row in canny_img[:400]])[0][0]
x_top = int(np.mean(np.nonzero(canny_img[y_top])))
y_bottom = y_top + 200
# for row in range(y_bottom, 768):
# if canny_img[row, x_top] != 0:
# break
box_center_coord = (x_top, (y_top + y_bottom)//2)
cv2.circle(region, box_center_coord, 8, (0, 0, 255), -1)
return canny_img, region, box_center_coord
def pullScreenshot():
os.system('adb shell screencap -p /sdcard/autojump.png')
os.system('adb pull /sdcard/autojump.png .')
def jump(distance):
press_time = distance * 1.35
press_time = int(press_time)
cmd = 'adb shell input swipe 320 410 320 410 ' + str(press_time)
print(cmd)
os.system(cmd)
def beginJump():
while True:
pullScreenshot()
time.sleep(2)
img = cv2.imread('autojump.png')
color_mask, hsv_img, center_coord = get_center_coord(img)
canny_img, region, box_center_coord = get_box_center(img)
distance = math.sqrt((box_center_coord[0] - center_coord[0]) ** 2 + (box_center_coord[1] - center_coord[1]) ** 2)
w, h = region.shape[:2]
text = 'press time: %.3f ms' %(max(1.35*distance, 200))
cv2.putText(region, text, (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
text0 = 'distance: %.3f pixels' % (distance)
cv2.putText(region, text0, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.line(region, center_coord, box_center_coord, (0, 0, 255), 3)
print('棋子坐标:', center_coord)
print('盒子坐标:', box_center_coord)
print('距离:', distance)
cv2.imwrite('region.png', region)
# cv2.imshow('color_mask', color_mask)
# cv2.imshow('hsv_img', hsv_img)
# cv2.imshow('canny_img', canny_img)
# cv2.imshow('region', region)
# cv2.waitKey()
# cv2.destroyAllWindows()
jump(distance)
time.sleep(0.2)
if __name__ == '__main__':
beginJump()
# pullScreenshot()
# if __name__ == '__main__':
# get_center_coord()
|
[
"atiger0614@163.com"
] |
atiger0614@163.com
|
7fb1bbcd1838101abf13096c7d71cc1156bf7060
|
e3f2a0acc79f1891b93553ee6a95396edeb6fd60
|
/setup.py
|
c9c0390cc3d9d040b7b7fc777d3544fa322b0332
|
[
"Apache-2.0"
] |
permissive
|
imaginal/openprocurement.search_plugins
|
5bd23b7e17365abba9f7f33da7a5c3263c440453
|
a32a5e1b54c9b02fe24fae93e2e78632f77be82a
|
refs/heads/master
| 2020-04-11T23:30:41.704868
| 2018-12-17T18:31:07
| 2018-12-17T18:31:07
| 162,170,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
from setuptools import setup, find_packages
setup(
name='openprocurement.search_plugins',
version='0.1', # NOQA
description="Plugin for OpenProcurement Search",
long_description=open("README.md").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords='prozorro search plugin',
author='Volodymyr Flonts',
author_email='flyonts@gmail.com',
license='Apache License 2.0',
url='https://github.com/imaginal/openprocurement.search_plugins',
namespace_packages=['openprocurement'],
packages=find_packages(),
package_data={'': ['*.md', '*.txt']},
include_package_data=True,
zip_safe=False,
install_requires=[
'openprocurement.search'
],
entry_points={
}
)
|
[
"flyonts@gmail.com"
] |
flyonts@gmail.com
|
a9597573158cd06dab3973ee6e0512978f90458b
|
229d71da5bb9fcdc34ab9c3a4ff9f75ca7ea7a19
|
/bitly_app/urls.py
|
81c9ebb0845bfee3a27ec09d812bed36ced4f7b6
|
[] |
no_license
|
Cunarefa/Convert
|
8fd1ba5aae46915b1cde31a682d6ddd1b83bbeef
|
93d366656c51affc2d17c685fcd6c93345180a49
|
refs/heads/master
| 2023-08-29T11:42:02.784981
| 2021-09-18T10:28:44
| 2021-09-18T10:28:44
| 407,829,331
| 0
| 0
| null | 2021-09-20T18:39:30
| 2021-09-18T10:30:09
|
Python
|
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.urls import path
from .views import ConvertView
urlpatterns = [
path('long', ConvertView.as_view(), name='long'),
]
|
[
"yevgen.yelik@gmail.com"
] |
yevgen.yelik@gmail.com
|
8fd914c3d126ba141d422b9c77ea8058d4bed139
|
ffe606c85de9009d2c15356f82daa524c343b925
|
/11.28.cleanup/s2rarecats_prep.py
|
2f94abfa78be1fcaf7c2caf0858efeb8b810ee3c
|
[] |
no_license
|
jbinkleyj/story_writer
|
d88ff7e3360fb8afd12445d1cb237788636b3083
|
dc5106a35f5fbce72f8cf0801c0ad4cbc0c9f12f
|
refs/heads/master
| 2020-07-09T15:54:02.492373
| 2017-12-16T07:26:59
| 2017-12-16T07:26:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from preprocess import *
from arguments import s2s2catsrare as parseParams
if __name__=="__main__":
args = parseParams()
DS = load_data(args)
torch.save(DS,args.datafile)
|
[
"kedzior@uw.edu"
] |
kedzior@uw.edu
|
7cff65e982c2b32cab03f10e594c8aaa54be7c8a
|
3529ecaa44a53172094ba13498097057c8972723
|
/Questiondir/634.find-the-derangement-of-an-array/634.find-the-derangement-of-an-array_108022165.py
|
4478461c59f6bddeee62b60691de01ec47975b2e
|
[] |
no_license
|
cczhong11/Leetcode-contest-code-downloader
|
0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6
|
db64a67869aae4f0e55e78b65a7e04f5bc2e671c
|
refs/heads/master
| 2021-09-07T15:36:38.892742
| 2018-02-25T04:15:17
| 2018-02-25T04:15:17
| 118,612,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
class Solution(object):
def findDerangement(self, n):
"""
:type n: int
:rtype: int
"""
if n == 1:
return 0
if n == 2:
return 1
OPT = [0] * (n+1)
OPT[0] = 1
OPT[1] = 0
OPT[2] = 1
OPT[3] = 2
for i in xrange(4, n+1):
OPT[i] = (OPT[i-1] * (i-1) + (i - 1) * OPT[i-2]) % 1000000007
return OPT[n]
|
[
"tczhong24@gmail.com"
] |
tczhong24@gmail.com
|
2a5e7f5f38b91f42700324e71b0bfacd75169326
|
49c5c461c9805be68a318810e2ebb3381643ed59
|
/linkedlist/remove-loop.py
|
c5cee91e4abc8333b6e0eceee46a40a1b1d26633
|
[] |
no_license
|
pkdism/must-do-coding
|
10a3ef756d24fec451d8ed09bfc22335635a6b13
|
ccff354eebd9b2434085528922eb3bc13715530e
|
refs/heads/master
| 2020-09-11T00:54:08.078087
| 2019-11-27T11:15:53
| 2019-11-27T11:15:53
| 221,886,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
def removeTheLoop(head):
h = set()
prev = None
it = head
while it is not None:
if it in h:
prev.next = None
break
h.add(it)
prev = it
it = it.next
|
[
"pawan.dwivedi94@gmail.com"
] |
pawan.dwivedi94@gmail.com
|
7f9dd1f1f7a9135eb1ac3be360d855691bec346d
|
e45f1f9f3777d625c7da773f8e55589ded528711
|
/pic_scrapy/pic/spiders/PhotoSpider.py
|
bfbdf2f3503102ed8587cee37b600d0b0a2d4301
|
[] |
no_license
|
631068264/learn_crawler
|
da973d758001b52c61aa0bb4dfc78b59a88304e4
|
65ac2cef7b42b0dce5fb726daa819a6ebc8ffafa
|
refs/heads/master
| 2022-11-15T13:06:55.765849
| 2022-10-31T07:07:31
| 2022-10-31T07:07:31
| 77,364,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,504
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author = 'wyx'
@time = 16/10/24 15:01
@annotation = ''
"""
from urlparse import urljoin
import scrapy
from pic.items import PhotoItem
class PhotoSpider(scrapy.Spider):
start_urls = ["https://www.610hu.com/htm/girl.htm"]
name = "photo"
domain = "https://www.610hu.com"
def parse(self, response):
tds = response.css("table td")
for td in tds:
href = urljoin(self.domain, td.xpath("a/@href").extract_first())
dic = td.css("img").xpath("@src").re_first(r".*/(.*?)\.gif")
yield scrapy.Request(href, callback=self.parse_page, meta={"photo_name": dic})
def parse_page(self, response):
page_num = response.css(".pages strong").xpath("text()").re_first(r"/(\d?)")
if page_num:
for page in page_num:
yield scrapy.Request(urljoin(response.url, ("%s.htm" % page)), callback=self.parse_charter,
meta={"photo_name": response.meta["photo_name"]})
def parse_charter(self, response):
lis = response.css("ul.movieList li")
links = []
for li in lis:
charter_link = urljoin(self.domain, li.xpath("a/@href").extract_first())
charter_name = li.css("h3").xpath("text()").extract_first()
charter_time = li.css("span").xpath("text()").extract_first()
links.append(scrapy.Request(charter_link,
callback=self.parse_detail,
meta={
"photo_name": response.meta["photo_name"],
"charter_link": charter_link,
"charter_name": charter_name,
"charter_time": charter_time,
}))
return links
def parse_detail(self, response):
imgs = response.css(".picContent img")
items = []
for img in imgs:
src = img.xpath("@src").extract_first()
item = PhotoItem({
"photo_name": response.meta["photo_name"],
"charter_link": response.meta["charter_link"],
"charter_name": response.meta["charter_name"],
"charter_time": response.meta["charter_time"],
"img_url": src,
})
items.append(item)
return items
|
[
"wyx@wuyuxideMacBook-Pro.local"
] |
wyx@wuyuxideMacBook-Pro.local
|
5fbe59973b26282635a73228a47461382edec61a
|
756cfeed032f7d206cdbe4409f2c052bf3e44fe1
|
/Tkinter/Lecture_4.py
|
0ce5a5c74a62fe228e5bc2c3eee2785792f67700
|
[] |
no_license
|
Pyk017/Python
|
2e470a10f95aac8bb049edf13c6a825ceab68ea5
|
57fb48356921cc6766675620b32e4099ad371bc6
|
refs/heads/master
| 2021-07-05T00:16:22.365638
| 2020-12-27T08:25:10
| 2020-12-27T08:25:10
| 213,291,579
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from tkinter import *
root = Tk()
label1 = Label(root, text="Enter Your Name = ")
label1.grid(row=0, column=0)
# label1.pack()
entry = Entry(root, width=25, borderwidth=5)
entry.grid(row=0, column=1)
# entry.pack()
i = 0
def my_click():
global i
label = Label(root, text="Hello " + entry.get())
i += 2
label.grid(row=i, column=0)
button = Button(root, text="Click Me!", command=my_click)
button.grid(row=1, column=0)
root.mainloop()
|
[
"prakharkumar506978@gmail.com"
] |
prakharkumar506978@gmail.com
|
0cefd1e9333d717207e4845c00ae84fb4478d05e
|
08a68e32dc80f99a37a30ddbbf943337546cc3d5
|
/.history/count/views_20200419210055.py
|
c0e0fbfb2b96a0104e0cd52bbfbce5cc12136149
|
[] |
no_license
|
Space20001/word-count-project
|
dff1b4b44d2f7230070eef0d95dd968b655d92f7
|
795b5e8ad5c59109e96bf7a8e9192efaefa7770e
|
refs/heads/master
| 2022-04-20T17:54:05.511449
| 2020-04-20T15:25:46
| 2020-04-20T15:25:46
| 257,327,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'count/home.html', {})
def counted(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
for word in wordlist:
worddictionary
for word in wordlist:
if word in worddictionary:
else:
return render(request, 'count/counted.html', {'fulltext': fulltext, 'count': len(wordlist)})
def about(request):
return render(request, 'count/about.html', {about: 'about'})
|
[
"steve.h@blueyonder.com"
] |
steve.h@blueyonder.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.