blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
94547a2d5945424fe6d0f40934e69eedc44a5cba
|
86a017dd4c8d4d77c511cc598190aaa9dc0ae3e8
|
/data structure/mine_tree.py
|
50c7743ae9b425ddc25d7aea392e1c7f56c1aff7
|
[] |
no_license
|
sungguenja/studying
|
fd7459eb9faa6488d7b63bf3884a92513daf3c54
|
719f4dfbda211c34de2a0c8cf3b9d3001f29fcec
|
refs/heads/master
| 2023-08-17T13:46:44.343780
| 2023-08-10T11:55:15
| 2023-08-10T11:55:15
| 232,306,053
| 0
| 0
| null | 2022-12-16T10:53:26
| 2020-01-07T11:00:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
import mine_node
from collections import deque
def preorder(n):
if n is not None:
print(n.data)
preorder(n.left)
preorder(n.right)
def inorder(n):
if n is not None:
inorder(n.left)
print(n.data)
inorder(n.right)
def postorder(n):
if n is not None:
postorder(n.left)
postorder(n.right)
print(n.data)
def levelorder(n):
Que = deque()
Que.append(n)
while Que:
node = Que.popleft()
if node is not None:
print(node.data)
Que.append(node.left)
Que.append(node.right)
# 최소 공통 조상 찾기
checked = [False]*21
depth = [0]*21
atree = [
[1,2],
[3,4],
[5,6],
[7,8],
[9,10,11],
[],
[],
[],
[12,13],
[14],
[15],
[],
[],
[16,17],
[18],
[19],
[],
[20],
[],
[],
[]
]
parent = [[] for i in range(21)]
log = 11
def dfs(x,dep):
checked[x] = True
depth[x] = dep
for i in atree[x]:
if checked[i]:
continue
parent[i].append(x)
dfs(i,dep+1)
def setParent():
dfs(0,0)
for i in range(20,-1,-1):
j = parent[i]
while len(j) > 0:
j = parent[j[0]]
if len(j) == 0:
break
parent[i].append(j[0])
setParent()
def setSameDepth(A,B):
while depth[A] > depth[B]:
A = parent[A][0]
while depth[A] < depth[B]:
B = parent[B][0]
return A,B
def findSameParent(A,B):
value1,value2 = setSameDepth(A,B)
while value1 != value2:
value1 = parent[value1][0]
value2 = parent[value2][0]
return value1
print(findSameParent(20,15))
|
[
"59605197+sungguenja@users.noreply.github.com"
] |
59605197+sungguenja@users.noreply.github.com
|
7f08de5fc9d940f912450c7c8a1c200a3d404b56
|
21f38f1a9f6d4edfa3b233697e17d86f30b168ce
|
/janeway/migrations/0002_add_releases_and_credits.py
|
8046d7841d026120c8fed38e386d74af867f80ff
|
[] |
no_license
|
m100bit/demozoo
|
3734d0126a6f0bd9ff98128a4350e40b22cdd8a3
|
93918da57e7cb96a0d2f724e5a876406d3477891
|
refs/heads/master
| 2023-03-14T05:34:20.323613
| 2023-01-27T23:10:32
| 2023-01-27T23:23:52
| 232,933,218
| 0
| 0
| null | 2020-01-10T00:32:53
| 2020-01-10T00:32:53
| null |
UTF-8
|
Python
| false
| false
| 2,068
|
py
|
# Generated by Django 1.11.8 on 2019-07-11 16:07
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('janeway', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Credit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('janeway_id', models.IntegerField()),
('category', models.CharField(max_length=50)),
('description', models.CharField(blank=True, max_length=255)),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='credits', to='janeway.Name')),
],
),
migrations.CreateModel(
name='Release',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('janeway_id', models.IntegerField()),
('title', models.CharField(max_length=255)),
('supertype', models.CharField(choices=[(b'production', b'Production'), (b'graphics', b'Graphics'), (b'music', b'Music')], max_length=20)),
('author_names', models.ManyToManyField(related_name='authored_releases', to='janeway.Name')),
],
),
migrations.CreateModel(
name='ReleaseType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_name', models.CharField(max_length=255)),
('release', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='types', to='janeway.Release')),
],
),
migrations.AddField(
model_name='credit',
name='release',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='credits', to='janeway.Release'),
),
]
|
[
"matthew@torchbox.com"
] |
matthew@torchbox.com
|
93cdae512cf458fe26263ccfd8795b2587eed450
|
e074be8c042a872c3c97abe6e01ccaad101f0564
|
/appr/models/kv/etcd/blob.py
|
17855f9cedc685bea9cfb1616fc8beca065f1a7e
|
[
"Apache-2.0"
] |
permissive
|
quay/appr
|
1a8f80a4a3fcaf92403cae3cba27a5fc29aeea8f
|
8e3dc3417c3b43eacb6ebe9543155b3586a75146
|
refs/heads/master
| 2022-07-13T22:59:24.023665
| 2022-07-05T20:54:56
| 2022-07-05T20:54:56
| 233,668,674
| 1
| 2
|
Apache-2.0
| 2022-07-05T20:54:56
| 2020-01-13T18:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
from __future__ import absolute_import, division, print_function
from appr.models.kv.blob_kv_base import BlobKvBase
from appr.models.kv.etcd.models_index import ModelsIndexEtcd
class Blob(BlobKvBase):
index_class = ModelsIndexEtcd
|
[
"2t.antoine@gmail.com"
] |
2t.antoine@gmail.com
|
68717c72dc0d4f2c33bd500983a9437f5e933a2b
|
b05b89e1f6378905bbb62e2a2bf2d4f8e3187932
|
/reverseLinkedListRecursive.py
|
99ef5fe1734f0dd94fb91ef0d068505c0e4d5a81
|
[
"MIT"
] |
permissive
|
anishmo99/Daily-Interview-Pro
|
c959cd336209132aebad67a409df685e654cfdfc
|
d8724e8feec558ab1882d22c9ca63b850b767753
|
refs/heads/master
| 2023-04-10T08:09:46.089227
| 2021-04-27T07:27:38
| 2021-04-27T07:27:38
| 269,157,996
| 1
| 1
|
MIT
| 2020-06-08T07:09:19
| 2020-06-03T17:57:21
|
C++
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
else:
new_head = self.reverseList(head.next)
new_tail = head.next
new_tail.next = head
head.next = None
return new_head
|
[
"ani10sh@gmail.com"
] |
ani10sh@gmail.com
|
9e567a091572b54744008d9eb043669ee4cda949
|
2aebde5f6ad67fbfed26427b5463036cc752eee4
|
/model/news_crawler/sina_news/sina_news_daily.py
|
0176b67554b8f743be47ea51d239af7657f4437d
|
[] |
no_license
|
L-S-G/duck_prophet
|
be46d4d8fc63eb9030f55947626ffe056415eb8c
|
23decf840d1431f0df1bcc8bd647c1ae01adec4e
|
refs/heads/master
| 2023-02-23T11:51:32.262971
| 2021-01-08T10:25:18
| 2021-01-08T10:25:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# -*- coding:utf-8 -*-
import sys
from config.news_crawler.sina_news.sina_news_conf import entrance_list
import requests
import re
import Queue
reload(sys)
sys.setdefaultencoding('utf-8')
class SinaNewsDaily():
# 每日抓取新浪新闻文章
pass
|
[
"nankaizhl@gmail.com"
] |
nankaizhl@gmail.com
|
30fc81e3450db9c9effaf08cd8ee119875de164a
|
f5627a74bb6b8923b639fad71033b18c047cd32e
|
/telemetry/telemetry/internal/platform/tracing_agent/battor_tracing_agent.py
|
d973bb13b2066ad7063a3f8a711ebea404827206
|
[
"BSD-3-Clause"
] |
permissive
|
fanarm/catapult
|
49c02ffede981ef07b13069f5ce7483e17e4c3e8
|
26750ce5fe46882abb4e72dd488a1a08a2352b0f
|
refs/heads/master
| 2021-01-15T13:14:38.301954
| 2016-09-20T10:28:20
| 2016-09-20T10:28:20
| 66,614,707
| 0
| 0
| null | 2016-08-26T03:48:41
| 2016-08-26T03:48:40
| null |
UTF-8
|
Python
| false
| false
| 3,845
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from battor import battor_error
from battor import battor_wrapper
from catapult_base import cloud_storage
from devil.android import battery_utils
from py_trace_event import trace_time
from telemetry.internal.platform import tracing_agent
from telemetry.internal.util import atexit_with_log
from telemetry.timeline import trace_data
def _ReenableChargingIfNeeded(battery):
if not battery.GetCharging():
battery.SetCharging(True)
logging.info('Charging status checked at exit.')
class BattOrTracingAgent(tracing_agent.TracingAgent):
"""A tracing agent for getting power data from a BattOr device.
BattOrTracingAgent allows Telemetry to issue high-level tracing commands
(StartTracing, StopTracing, RecordClockSyncMarker) to BattOrs, which are
high-frequency power monitors used for battery testing.
"""
def __init__(self, platform_backend):
super(BattOrTracingAgent, self).__init__(platform_backend)
self._platform_backend = platform_backend
android_device = (
platform_backend.device if platform_backend.GetOSName() == 'android'
else None)
self._battery = (
battery_utils.BatteryUtils(platform_backend.device)
if platform_backend.GetOSName() == 'android' else None)
self._battor = battor_wrapper.BattorWrapper(
platform_backend.GetOSName(), android_device=android_device,
serial_log_bucket=cloud_storage.TELEMETRY_OUTPUT)
@classmethod
def IsSupported(cls, platform_backend):
"""Returns True if BattOr tracing is available."""
if platform_backend.GetOSName() == 'android':
# TODO(rnephew): When we pass BattOr device map into Telemetry, change
# this to reflect that.
return battor_wrapper.IsBattOrConnected(
'android', android_device=platform_backend.device)
return battor_wrapper.IsBattOrConnected(platform_backend.GetOSName())
def StartAgentTracing(self, config, timeout):
"""Start tracing on the BattOr.
Args:
config: A TracingConfig instance.
timeout: number of seconds that this tracing agent should try to start
tracing until timing out.
Returns:
True if the tracing agent started successfully.
"""
if not config.enable_battor_trace:
return False
try:
if self._battery:
self._battery.SetCharging(False)
atexit_with_log.Register(_ReenableChargingIfNeeded, self._battery)
self._battor.StartShell()
self._battor.StartTracing()
return True
except battor_error.BattorError:
if self._battery:
self._battery.SetCharging(True)
raise
def StopAgentTracing(self):
"""Stops tracing on the BattOr."""
try:
self._battor.StopTracing()
finally:
if self._battery:
self._battery.SetCharging(True)
def SupportsExplicitClockSync(self):
return self._battor.SupportsExplicitClockSync()
def RecordClockSyncMarker(self, sync_id,
record_controller_clock_sync_marker_callback):
"""Records a clock sync marker in the BattOr trace.
Args:
sync_id: Unique id for sync event.
record_controller_clock_sync_marker_callback: Function that takes a sync
ID and a timestamp as arguments. This function typically will record the
tracing controller clock sync marker.
"""
timestamp = trace_time.Now()
self._battor.RecordClockSyncMarker(sync_id)
record_controller_clock_sync_marker_callback(sync_id, timestamp)
def CollectAgentTraceData(self, trace_data_builder, timeout=None):
data = self._battor.CollectTraceData(timeout=timeout)
trace_data_builder.SetTraceFor(trace_data.BATTOR_TRACE_PART, data)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
9b7d7ff0fef6c8d28943e6e8b4c7ba57787ce5a9
|
ebc7607785e8bcd6825df9e8daccd38adc26ba7b
|
/python/baekjoon/2.algorithm/implementation/백준_웰컴.py
|
4ead3ab821977dbf627364ad7c5fb431c2ca1181
|
[] |
no_license
|
galid1/Algorithm
|
18d1b72b0d5225f99b193e8892d8b513a853d53a
|
5bd69e73332f4dd61656ccdecd59c40a2fedb4b2
|
refs/heads/master
| 2022-02-12T07:38:14.032073
| 2022-02-05T08:34:46
| 2022-02-05T08:34:46
| 179,923,655
| 3
| 0
| null | 2019-06-14T07:18:14
| 2019-04-07T05:49:06
|
Python
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
# . . .
# | | _ | _. _ ._ _ _
# |/\|(/.|(_.(_)[ | )(/.
print('. . .')
print('| | _ | _. _ ._ _ _')
print('|/\|(/.|(_.(_)[ | )(/.')
|
[
"galid1@naver.com"
] |
galid1@naver.com
|
05a6dc50b44a0fa741f092b8ddad671c62f8a292
|
5ee05e771a97d465048c50a9f9490b8b0eb7c00d
|
/adminsavingcategory/urls.py
|
06811381e6f63dddde32d3c2e9e21b9a74276420
|
[] |
no_license
|
ninsgosai/budget
|
614bb01ef4a629d3a7e41205e653acadb23984fd
|
722ae7f957913422f1b77247cec7d3b06156ad43
|
refs/heads/master
| 2023-03-26T14:52:30.242868
| 2021-03-21T07:33:49
| 2021-03-21T07:33:49
| 349,928,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.adminsavingcategory, name='adminsavingcategory'),
path('addsavingcategory', views.addsavingcategory, name='addsavingcategory'),
path('delete_saving_category', views.delete_saving_category, name='delete_saving_category'),
path('editsavingcategory', views.editsavingcategory, name='editsavingcategory'),
path('updatesaving_category', views.updatesaving_category, name='updatesaving_category'),
path('update_income_category', views.update_income_category, name='update_income_category'),
path('incomecategory', views.incomecategory, name='incomecategory'),
path('addincomecategory', views.addincomecategory, name='addincomecategory'),
path('editincomecategory', views.editincomecategory, name='editincomecategory'),
path('delete_income_category', views.delete_income_category, name='delete_income_category'),
]
|
[
"69950933+ninad-goswamy@users.noreply.github.com"
] |
69950933+ninad-goswamy@users.noreply.github.com
|
03432933f40125fba98a10caa76b8817f552cba1
|
48a1c21f3b1596acb1e987b73677af65e9f9416a
|
/packagehashtable.py
|
8030cf73f09e9a08a4d646c5ba71761d9687cc63
|
[] |
no_license
|
earthafire/c950
|
fe982685d91453b157461e2eec385a6b16aeb990
|
af1116d6ab834a63c9496fa054385970f33b4c1f
|
refs/heads/master
| 2023-06-04T07:03:18.427522
| 2021-06-21T02:54:07
| 2021-06-21T02:54:07
| 354,667,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
class PackageHashTable:
# big O: O(1)
def __init__(self):
self.current_size = 25
self.filled_slots = 0
self.data = [None] * self.current_size # list of packages
def get_filled_slots(self):
return self.filled_slots
def get_hash(self, value):
return value % self.current_size
def is_full(self): # check if array is close to full
if self.filled_slots > self.current_size - 10:
return True
else:
return False
def double_table_capacity(self): # double size of current array
for x in range(0, self.current_size):
self.data.append(None)
self.current_size *= 2
# big O: O(1)
def add_package(self, new_package):
package_num = self.get_hash(new_package.package_id)
if self.data[package_num] is None:
self.filled_slots += 1 # keeps track of how many full slots in order to prevent filling it up
self.data[package_num] = new_package # set index to new package
if self.is_full(): # if the hashtable is getting full, add another 40 slots
self.double_table_capacity()
# big O(1) time to retrieve package
def get_package(self, int_id):
package = self.data[self.get_hash(int_id)]
if package is None:
print("No such package found!")
return package
# big O(1) time to edit package
def edit_package_status(self, int_key, status):
package_to_edit = self.get_package(int_key)
if package_to_edit is None:
print("No such package found!")
else:
package_to_edit.status = status
def print_all_packages_status(self):
for item in self.data:
if item is not None:
item.print()
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
d335e8ebc38e765fc6771cbf64c0c0dfd9c8c353
|
b7dd07413c05a13207988535b755b7d28dbc5663
|
/Chapter_7/7-3_multiples_10.py
|
895c7529af5c2a587ee9e35165d9f19b3504417c
|
[] |
no_license
|
GrnTeaLatte/AlienInvasion
|
b671a87cd730c3d4b31a8e8d760d2d02d576cfb3
|
d60e8e65adb79e54a1e1c579825827355a7e85ea
|
refs/heads/main
| 2023-02-26T03:55:26.799446
| 2020-11-03T00:42:06
| 2020-11-03T00:42:06
| 336,111,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
number = input("Pick a number from 1 to 100.")
number = int(number)
if number %10 == 0:
print("This number is a multiple of ten.")
else:
print("This number is not a multiple of ten.")
|
[
"audreyfu14@gmail.com"
] |
audreyfu14@gmail.com
|
cdc568f6f8b729c0cb3256a847415ecb9daf8769
|
78e60a7d8a67ed76244004e8a3ed573fbf396e41
|
/samples/del_admin_role.py
|
ad52a172a1488dd2d1f632616ef30fbe40844ca8
|
[
"MIT"
] |
permissive
|
Crivez/apiclient-python
|
837a9f7cc0453ccd3121311adc7920b5fe6b3e33
|
860fc054f546152a101e29b1af388c381075ac47
|
refs/heads/master
| 2023-06-08T13:24:09.249704
| 2021-06-17T12:16:35
| 2021-06-17T12:16:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Delete the admin role.
ADMIN_ROLE_ID = 10
try:
res = voxapi.del_admin_role(admin_role_id=ADMIN_ROLE_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
[
"andrey@voximplant.com"
] |
andrey@voximplant.com
|
ca9fd7db7af76cf7e25b5898992310a5733c1001
|
88e06bab1989c81a2dd649bb09b144fa7c958f89
|
/leet_simplify_path.py
|
4bcdad7a72949aab1ed4bd7e8b422c4511b91450
|
[] |
no_license
|
VaibhavD143/Coding
|
4499526b22ee4ef13f66c3abcea671c80a8f748a
|
5de3bae8891c7d174cbc847a37c3afb00dd28f0e
|
refs/heads/master
| 2023-08-06T21:56:44.934954
| 2021-10-09T18:31:29
| 2021-10-09T18:31:29
| 263,890,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
class Solution:
def simplifyPath(self, path: str) -> str:
ss = []
for s in path.split('/') :
if s and s != '.' :
if s == ".." :
if ss :
ss.pop()
else :
ss.append(s)
return '/' + '/'.join(ss)
|
[
"vaibhav.dodiya143vd@gmail.com"
] |
vaibhav.dodiya143vd@gmail.com
|
7565fccefc1f94b90892e24e8bc4c59ad706f8ce
|
601c2b26b115e59002fd0d105daef714aa6803a2
|
/Problem68.py
|
de6e4c370c5364a8f9fd371e752b2672507cabb7
|
[] |
no_license
|
madrury/euler
|
4be0f28edbfdf5a10de1b3f68336f73dcc73455d
|
04ad94fe67af09e48bd795a2cc9f229f7c752397
|
refs/heads/master
| 2021-01-17T07:40:46.930178
| 2018-01-21T23:49:21
| 2018-01-21T23:49:21
| 15,242,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
from itertools import permutations, chain
def magic_3_ring():
S = set([1, 2, 3, 4, 5, 6])
for a_0, a_1, a_2 in permutations(S, 3):
b_0 = b_1 = b_2 = c_0 = c_1 = c_2 = None
s = a_0 + a_1 + a_2
b_1 = a_2
for b_0 in S - set([a_0, a_1, a_2]):
b_2 = c_1 = s - b_1 - b_0
if b_2 in S - set([a_0, a_1, a_2]) and b_2 != b_0:
for c_0 in S - set([a_0, a_1, a_2, b_0, b_1, b_2]):
c_2 = s - c_1 - c_0
if c_2 == a_1 and a_0 < b_0 and a_0 < c_0:
yield (a_0, a_1, a_2, b_0, b_1, b_2, c_0, c_1, c_2)
def magic_5_ring():
S = set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
for A in permutations(S, 3):
A = list(A)
B, C, D, E = [None]*3, [None]*3, [None]*3, [None]*3
s = sum(A)
B[1] = A[2]
for b_0 in S - set(A):
B[0] = b_0
B[2] = C[1] = s - B[1] - B[0]
if B[2] in S - set(A) and B[2] != B[0]:
for c_0 in S - (set(A) | set(B)):
C[0] = c_0
C[2] = D[1] = s - C[1] - C[0]
if C[2] in S - (set(A) | set(B)) and C[2] != C[0]:
for d_0 in S - (set(A) | set(B) | set(C)):
D[0] = d_0
D[2] = E[1] = s - D[0] - D[1]
if D[2] in S - (set(A) | set(B) | set(C)) and D[2] != D[0]:
for e_0 in S - (set(A) | set(B) | set(C) | set(D)):
E[0] = e_0
E[2] = s - E[1] - E[0]
if E[2] == A[1] and E[2] != E[0] and A[0] < B[0] and A[0] < C[0] and A[0] < D[0] and A[0] < E[0]:
yield (A, B, C, D, E)
for ring in magic_5_ring():
print int("".join([str(i) for i in list(chain.from_iterable(ring))]))
|
[
"matthew.drury.83@gmail.com"
] |
matthew.drury.83@gmail.com
|
47eb91749a631a8839cbadb2b1ae5950f56d3da1
|
2834e05cfb56e16fb4a20a46d713ee3e7393a30a
|
/mzitu_spider.py
|
0716848d851bd48611516e87812f40ec2161a5b2
|
[] |
no_license
|
jayhebe/Python_Data_Analysis
|
29680f1a13ca5756b8fed96c75f7bf4ee432851e
|
2bd39ce2688ef7fe4340810529f35799eff7de7a
|
refs/heads/master
| 2020-07-04T16:29:59.470195
| 2020-01-14T15:12:42
| 2020-01-14T15:12:42
| 202,339,479
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
from bs4 import BeautifulSoup
import requests
import os
import time
mzitu_folder_name = "mzitu_images"
mzitu_base_url = "https://www.mzitu.com"
mzitu_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/77.0.3865.120 Safari/537.36",
"Request": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Referer": "https://www.mzitu.com"
}
def get_page_info(url):
page_res = requests.get(url, headers=mzitu_headers)
page_res.encoding = page_res.apparent_encoding
page_bs = BeautifulSoup(page_res.text, "html.parser")
page_info = page_bs.find("ul", id="pins").find_all("span", class_="")
page_next = page_bs.find("a", class_="next page-numbers")
return page_info, page_next
def get_pic_url(page_url):
pic_res = requests.get(page_url, headers=mzitu_headers)
pic_bs = BeautifulSoup(pic_res.text, "html.parser")
pic_url = pic_bs.find("div", class_="main-image").find("img")["src"]
pic_pages = pic_bs.find("div", class_="pagenavi").find_all("a")
pic_next_page = ""
for pic_page in pic_pages:
if "下一页" in pic_page.text:
pic_next_page = pic_page["href"]
return pic_url, pic_next_page
def get_file_name(file_extension):
file_name = str(time.time()).replace(".", "") + file_extension
return os.path.sep.join([os.getcwd(), mzitu_folder_name, file_name])
def download_pic(pic_url):
print("Downloading: {}".format(pic_url))
with open(get_file_name(".jpg"), "wb") as pic_fp:
pic_content = requests.get(pic_url, headers=mzitu_headers).content
pic_fp.write(pic_content)
mzitu_page_info, mzitu_page_next = get_page_info(mzitu_base_url)
while mzitu_page_next:
for mzitu_pic_span in mzitu_page_info:
mzitu_pic_next_page = mzitu_pic_span.find("a")["href"]
# print(mzitu_pic_link["href"])
print("Parsing page: {}".format(mzitu_pic_next_page))
while True:
mzitu_pic_url, mzitu_pic_next_page = get_pic_url(mzitu_pic_next_page)
download_pic(mzitu_pic_url)
if not mzitu_pic_next_page:
break
mzitu_page_info, mzitu_page_next = get_page_info(mzitu_page_next["href"])
|
[
"jayhebe1983@sina.com"
] |
jayhebe1983@sina.com
|
0e042a6a418f78275e29bb9fe263d90fa38a2b3c
|
cf59d92614a3505aeed9455482ef327572578228
|
/venv/lib/python3.6/site-packages/djoser/urls/authtoken.py
|
87ed8962570463caffb44de8795bf7e87591014d
|
[
"MIT"
] |
permissive
|
slarkjm0803/autobets
|
e1d1a3b00cf94ee90fd1fed7464431677b4f9e11
|
f92a5d999acaf5d7c83ca2768a260c2282eabbee
|
refs/heads/master
| 2020-09-23T21:40:46.057648
| 2019-11-29T11:42:37
| 2019-11-29T11:42:37
| 225,591,526
| 1
| 0
|
MIT
| 2019-12-03T10:22:21
| 2019-12-03T10:22:20
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from django.conf.urls import url
from djoser import views
urlpatterns = [
url(
r'^token/create/$',
views.TokenCreateView.as_view(),
name='token-create'
),
url(
r'^token/destroy/$',
views.TokenDestroyView.as_view(),
name='token-destroy'
),
]
|
[
"mac@macs-MacBook-Pro.local"
] |
mac@macs-MacBook-Pro.local
|
95205781cf62e93ad3b0b25c8c12a11ba6a87ce3
|
462c56e7454c97e0541588b9be66a4e216ea20fd
|
/337.house-robber-iii.py
|
b7df910aa92e9bed203055651ced6ea30bbb2e43
|
[] |
no_license
|
LouisYLWang/leetcode_python
|
d5ac6289e33c5d027f248aa3e7dd66291354941c
|
2ecaeed38178819480388b5742bc2ea12009ae16
|
refs/heads/master
| 2020-05-27T08:38:48.532000
| 2019-12-28T07:08:57
| 2019-12-28T07:08:57
| 188,549,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
#
# @lc app=leetcode id=337 lang=python3
#
# [337] House Robber III
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#
class Solution:
def rob(self, root: TreeNode) -> int:
def rob_sub(root):
if not root:
return [0,0]
left = rob_sub(root.left)
right = rob_sub(root.right)
return [max(left) + max(right), root.val + left[0] + right[0]]
return max(rob_sub(root))
# This method use recursion with memorization, but not performance well
'''class Solution:
rob_map = dict()
def rob(self, root: TreeNode) -> int:
def cur_out(root):
if root in self.rob_map:
if 0 in self.rob_map[root]:
return self.rob_map[root][0]
ans_i_i = 0
ans_o_o = 0
ans_i_o = 0
ans_o_i = 0
if root.left or root.right:
if root.left:
ans_i_i += cur_in(root.left)
ans_o_o += cur_out(root.left)
ans_i_o += cur_in(root.left)
ans_o_i += cur_out(root.left)
if root.right:
ans_i_i += cur_in(root.right)
ans_o_o += cur_out(root.right)
ans_o_i += cur_in(root.right)
ans_i_o += cur_out(root.right)
ans = max(ans_i_i, ans_o_o, ans_o_i, ans_i_o)
if root not in self.rob_map:
self.rob_map[root] = dict()
self.rob_map[root][0] = ans
return self.rob_map[root][0]
def cur_in(root):
if root in self.rob_map:
if 1 in self.rob_map[root]:
return self.rob_map[root][1]
ans = root.val
if root.left or root.right:
if root.left:
ans += cur_out(root.left)
if root.right:
ans += cur_out(root.right)
if root not in self.rob_map:
self.rob_map[root] = dict()
self.rob_map[root][1] = ans
return ans
if root:
return max(cur_out(root), cur_in(root))
return 0'''
|
[
"louis.yl.wang@outlook.com"
] |
louis.yl.wang@outlook.com
|
31356e0d99a19ee522cda865378a49b0ec1c5187
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq1623.py
|
debc4e59f6ac2116d11df4140a57d981a774db94
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,252
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=60
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=40
c.append(cirq.H.on(input_qubit[0])) # number=49
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=50
c.append(cirq.H.on(input_qubit[0])) # number=51
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=52
c.append(cirq.Z.on(input_qubit[1])) # number=53
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=54
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=47
c.append(cirq.H.on(input_qubit[0])) # number=32
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[0])) # number=34
c.append(cirq.X.on(input_qubit[4])) # number=48
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=57
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=58
c.append(cirq.H.on(input_qubit[0])) # number=59
c.append(cirq.Z.on(input_qubit[3])) # number=42
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=43
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[3])) # number=44
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.X.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=56
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.rx(-2.9845130209103035).on(input_qubit[4])) # number=55
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=35
c.append(cirq.X.on(input_qubit[3])) # number=36
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=37
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=24
c.append(cirq.X.on(input_qubit[0])) # number=25
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=26
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
c.append(cirq.X.on(input_qubit[1])) # number=22
c.append(cirq.X.on(input_qubit[1])) # number=23
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1623.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
448d240cac3a1caba7534ece90ad949bfc2df05b
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/providers/test/azure/test_client.py
|
c77e19be39011df1ea5a3260c8323920ed3ce2e7
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,298
|
py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test Azure Client Class."""
import random
from unittest.mock import patch
from azure.identity import ClientSecretCredential
from azure.mgmt.costmanagement import CostManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage.blob import BlobServiceClient
from django.test import TestCase
from faker import Faker
from providers.azure.client import AzureClientFactory
FAKE = Faker()
class AzureClientFactoryTestCase(TestCase):
"""Parent Class for AzureClientFactory test cases."""
def setUp(self):
"""Test case setup."""
self.clouds = ["china", "germany", "public", "usgov"]
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_constructor(self, mock_get_token):
"""Test that we can create an AzureClientFactory object."""
obj = AzureClientFactory(
subscription_id=FAKE.uuid4(),
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
self.assertTrue(isinstance(obj, AzureClientFactory))
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_costmanagement_client(self, mock_get_token):
"""Test the costmanagement_client property."""
obj = AzureClientFactory(
subscription_id=FAKE.uuid4(),
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
self.assertTrue(isinstance(obj.cost_management_client, CostManagementClient))
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_credentials(self, mock_get_token):
"""Test the credentials property."""
obj = AzureClientFactory(
subscription_id=FAKE.uuid4(),
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
self.assertTrue(isinstance(obj._credentials, ClientSecretCredential))
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_resource_client(self, mock_get_token):
"""Test the resource_client property."""
obj = AzureClientFactory(
subscription_id=FAKE.uuid4(),
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
self.assertTrue(isinstance(obj.resource_client, ResourceManagementClient))
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_storage_client(self, mock_get_token):
"""Test the storage_client property."""
obj = AzureClientFactory(
subscription_id=FAKE.uuid4(),
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
self.assertTrue(isinstance(obj.storage_client, StorageManagementClient))
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_subscription_id(self, mock_get_token):
"""Test the subscription_id property."""
subscription_id = FAKE.uuid4()
obj = AzureClientFactory(
subscription_id=subscription_id,
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
self.assertTrue(obj.subscription_id, subscription_id)
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_cloud_storage_account(self, mock_get_token):
"""Test the cloud_storage_account method."""
subscription_id = FAKE.uuid4()
resource_group_name = FAKE.word()
storage_account_name = FAKE.word()
obj = AzureClientFactory(
subscription_id=subscription_id,
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
)
with patch.object(StorageManagementClient, "storage_accounts", return_value=None):
cloud_account = obj.cloud_storage_account(resource_group_name, storage_account_name)
self.assertTrue(isinstance(cloud_account, BlobServiceClient))
@patch("providers.azure.client.ClientSecretCredential.get_token")
def test_scope_and_export_name(self, mock_get_token):
"""Test the scope and export_name properties."""
subscription_id = FAKE.uuid4()
scope = f"/subscriptions/{subscription_id}"
export_name = "cost_export"
obj = AzureClientFactory(
subscription_id=subscription_id,
tenant_id=FAKE.uuid4(),
client_id=FAKE.uuid4(),
client_secret=FAKE.word(),
cloud=random.choice(self.clouds),
scope=scope,
export_name=export_name,
)
self.assertTrue(obj.scope, scope)
self.assertTrue(obj.export_name, export_name)
|
[
"noreply@github.com"
] |
project-koku.noreply@github.com
|
2bd53cdf3cf4e912f272d060d6e07a9650c5bf45
|
7ba5ec9aa9ddca3f9b3384fc4457b0a865c2a0a1
|
/src/559.py
|
f063866e484fd03a1aa4d26b5e7b19a1d8e94cd2
|
[] |
no_license
|
ecurtin2/Project-Euler
|
71f79ee90a9abd0943421677d78a6c087419e500
|
79479da7a45b3ae67c0c7ea24da5f7d43c6f25d3
|
refs/heads/master
| 2021-03-19T14:52:57.045443
| 2018-04-12T22:05:37
| 2018-04-12T22:05:37
| 100,059,180
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
"""
An ascent of a column j in a matrix occurs if the value of column j is smaller than the value of column j+1 in all rows.
Let P(k, r, n) be the number of r x n matrices with the following properties:
The rows are permutations of {1, 2, 3, ... , n}.
Numbering the first column as 1, a column ascent occurs at column j<n if and only if j is not a multiple of k.
For example, P(1, 2, 3) = 19, P(2, 4, 6) = 65508751 and P(7, 5, 30) mod 1000000123 = 161858102.
Let Q(n) =$\, \displaystyle \sum_{k=1}^n\,$ P(k, n, n).
For example, Q(5) = 21879393751 and Q(50) mod 1000000123 = 819573537.
Find Q(50000) mod 1000000123.
"""
|
[
"ecurtin2@illinois.edu"
] |
ecurtin2@illinois.edu
|
2970d8227cccf50e65da0dda71c84de76531c099
|
e510a82771967d8677ccb77be7d8fe199970ec39
|
/setup.py
|
94c41706ab048c6d1b4f753918410c242eff4509
|
[] |
no_license
|
mbr/ragstoriches
|
4c1a7501a05f40aad13aa796105df5ce57beb9b9
|
aa11405673dfd307915e38110145cede72892804
|
refs/heads/master
| 2023-06-06T20:49:07.700820
| 2015-12-23T10:53:40
| 2015-12-23T10:53:40
| 8,688,096
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='ragstoriches',
version='0.4.1dev',
description='Develop highly-concurrent web scrapers, easily.',
long_description=read('README.rst'),
author='Marc Brinkmann',
author_email='git@marcbrinkmann.de',
url='http://github.com/mbr/ragstoriches',
license='MIT',
install_requires=['gevent', 'logbook', 'requests', 'requests_cache',
'stuf', 'colorama', 'python-dateutil'],
packages=find_packages(exclude=['test']),
entry_points={
'console_scripts': [
'ragstoriches = ragstoriches.apps:run_scraper',
],
})
|
[
"git@marcbrinkmann.de"
] |
git@marcbrinkmann.de
|
847e7826459e7dffae6d5bcde0376c4b7086d6d5
|
186f694b65b43cd56e746ce8538e4f1edad6129e
|
/1on1/BFS/547-friendcircle.py
|
373d30f3c54ec2cd1aa12674be315d70fc5f22f8
|
[] |
no_license
|
zionhjs/algorithm_repo
|
287486e0173e68cfa9e535490004c952192a54db
|
26b4a770d5335abd738ae26c68d91f6af7b13749
|
refs/heads/master
| 2022-12-17T15:59:17.932490
| 2020-09-23T04:12:38
| 2020-09-23T04:12:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
if not M:
return 0
for i in range(len(M)):
for j in range(len(M):
if j > i:
|
[
"hjszion@gmail.com"
] |
hjszion@gmail.com
|
1681cf3956952c3b2add6ceb0f3ae7b5d6d21f62
|
dd31ec8f3f979b0339cf686ce9094def03ef003a
|
/myvenv/Lib/site-packages/pylint/reporters/json.py
|
be37b21102933ca7fe5d330945a71fcc8280180a
|
[
"MIT"
] |
permissive
|
rvmoura96/projeto-almoxarifado
|
872bb945b4057bdbf108776e2101e9966a23f4de
|
4ca5e5d00f449a940f7c601479bb3fe14c54f012
|
refs/heads/master
| 2022-11-11T07:45:33.475443
| 2017-11-21T21:13:19
| 2017-11-21T21:13:19
| 106,044,249
| 1
| 1
|
MIT
| 2022-10-26T05:02:32
| 2017-10-06T19:48:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""JSON reporter"""
from __future__ import absolute_import, print_function
import cgi
import json
import sys
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
class JSONReporter(BaseReporter):
"""Report messages and layouts in JSON."""
__implements__ = IReporter
name = 'json'
extension = 'json'
def __init__(self, output=sys.stdout):
BaseReporter.__init__(self, output)
self.messages = []
def handle_message(self, msg):
"""Manage message of different type and in the context of path."""
self.messages.append({
'type': msg.category,
'module': msg.module,
'obj': msg.obj,
'line': msg.line,
'column': msg.column,
'path': msg.path,
'symbol': msg.symbol,
# pylint: disable=deprecated-method; deprecated since 3.2.
'message': cgi.escape(msg.msg or ''),
})
def display_messages(self, layout):
"""Launch layouts display"""
if self.messages:
print(json.dumps(self.messages, indent=4), file=self.out)
def display_reports(self, layout): # pylint: disable=arguments-differ
"""Don't do nothing in this reporter."""
def _display(self, layout):
"""Don't do nothing."""
def register(linter):
"""Register the reporter classes with the linter."""
linter.register_reporter(JSONReporter)
|
[
"rvmoura.96@gmail.com"
] |
rvmoura.96@gmail.com
|
219305932842bb0a951cda0aab9e9783eb969379
|
f361126ee099303113b5ed3cc0e838bd01a9e41b
|
/Semana3/pattern.py
|
ce21f32f68915c1c4ac487f6d46600d7fa9a86c0
|
[] |
no_license
|
ju-c-lopes/Univesp_Algoritmos_II
|
e1ce5557d342ea75fe929cf7b207e633f9aa89cd
|
5d4eec368be91c18f0ae5c17d342e6eb0f1c79be
|
refs/heads/master
| 2023-06-05T11:09:25.415719
| 2021-07-07T22:26:53
| 2021-07-07T22:26:53
| 383,600,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def pattern(n):
if n == 0:
print(n, end=' ')
else:
pattern(n-1)
print(n, end=' ')
pattern(n-1)
pattern(3)
|
[
"juliano.co.lopes@gmail.com"
] |
juliano.co.lopes@gmail.com
|
73766be87c861a771f5ae7e6784f2aa71dfca856
|
f954729a6941d5309f02865b5313b8524e9e6f53
|
/resources/genomes.py
|
d4d95ae8aa3d7389867fd4d1f1a5fe88474820de
|
[] |
no_license
|
bnbowman/NoAmpTools
|
5f597adec6f49ab8422f443dfdd234b7a9a1dd8d
|
b59800c675c764ba8b5aee734c3ed79e4ac8e9a5
|
refs/heads/master
| 2018-12-18T18:58:27.201236
| 2018-09-14T19:33:04
| 2018-09-14T19:34:23
| 107,060,977
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,330
|
py
|
#! /usr/bin/env python
from collections import defaultdict
import matplotlib.cm as cm
class Genome(object):
def labels(self):
return self._labels
def sizes(self):
return self._sizes
def size(self, key):
return self._sizes[key]
def targets(self):
return self._targets
def targetDictionary(self):
pass
def colors(self, N):
colors = {}
for i, n in enumerate( sorted(self._sizes.keys()) ):
color = cm.gist_rainbow(i%(N+1) / float(N))
colors[i] = color
colors[n] = color
return colors
class HG19(Genome):
_labels = ["chr{0}".format(l) for l in range(1,23) + ['M', 'X', 'Y']]
_sizes = {"chr1": 249250621, "chr2": 243199373, "chr3": 198022430, "chr4": 191154276,
"chr5": 180915260, "chr6": 171115067, "chr7": 159138663, "chr8": 146364022,
"chr9": 141213431, "chr10": 135534747, "chr11": 135006516, "chr12": 133851895,
"chr13": 115169878, "chr14": 107349540, "chr15": 102531392, "chr16": 90354753,
"chr17": 81195210, "chr18": 78077248, "chr19": 59128983, "chr20": 63025520,
"chr21": 48129895, "chr22": 51304566,
"chrM": 16571, "chrX": 155270560, "chrY": 59373566}
## Locus,ChrName,ChrIdx,GeneStart,RegionStart,RegionEnd,GeneEnd
_targets = [["HTT", "chr4", 4, 3075691, 3076603, 3076661, 3076815],
["FMR1", "chrX", 23, 146993123, 146993568, 146993629, 146994131],
["ALS", "chr9", 9, 27572985, 27573522, 27573541, 27574014],
["FUCHS", "chr18", 18, 53251995, 53253386, 53253458, 53253577],
["SCA10", "chr22", 22, 46190744, 46191234, 46191305, 46191756],
["EWINGS_Chr20", "chr20", 20, 21553989, 21556922, 21557001, 21557036],
["EWINGS_ChrX", "chrX", 23, 30325813, 30328875, 30328976, 30329062]]
def __init__(self):
# Update the size dictionary so we can index by index as well as name
for i, c in enumerate(self._labels):
self._sizes[i] = self._sizes[c]
def targetDictionary(self):
tDict = defaultdict(list)
for t in self.targets():
if t[2] <= 22:
target_tId = t[2]-1
else:
target_tId = t[2]
tDict[target_tId].append( t )
return tDict
class GRC38(Genome):
_labels = ["chr{0}".format(l) for l in range(1,23) + ['X', 'Y', 'M']]
_sizes = {"chr1": 248956422, "chr2": 242193529, "chr3": 198295559, "chr4": 190214555,
"chr5": 181538259, "chr6": 170805979, "chr7": 159345973, "chr8": 145138636,
"chr9": 138394717, "chr10": 133797422, "chr11": 135086622, "chr12": 133275309,
"chr13": 114364328, "chr14": 107043718, "chr15": 101991189, "chr16": 90338345,
"chr17": 83257441, "chr18": 80373285, "chr19": 58617616 , "chr20": 64444167,
"chr21": 46709983, "chr22": 50818468,
"chrX": 156040895, "chrY": 57227415, "chrM": 16569}
## Locus,ChrName,ChrIdx,GeneStart,RegionStart,RegionEnd,GeneEnd
_targets = [["HTT", "chr4", 3, 3072621, 3074866, 3074949, 3075351],
["ALS", "chr9", 8, 27571412, 27573474, 27573556, 27574248],
["FUCHS", "chr18", 17, 55584764, 55586145, 55586237, 55586346],
["EWINGS_Chr20", "chr20", 19, 21573351, 21576271, 21576374, 21576399],
["SCA10", "chr22", 21, 45793649, 45795344, 45795434, 45796093],
["EWINGS_ChrX", "chrX", 22, 30307696, 30310741, 30310899, 30310946],
["FMR1", "chrX", 22, 147911603, 147912040, 147912120, 147914564]]
def __init__(self):
# Update the size dictionary so we can index by index as well as name
for i, c in enumerate(self._labels):
self._sizes[i] = self._sizes[c]
def targetDictionary(self):
tDict = defaultdict(list)
for t in self.targets():
tDict[t[2]].append( t )
return tDict
def decodeGenome(genome):
"""
"""
if genome.lower() == "hg19":
return HG19()
elif genome.lower() == "grc38":
return GRC38()
else:
raise ValueError("Invalid genome: specified genome must be HG19 or GHC38")
|
[
"bbowman@pacificbiosciences.com"
] |
bbowman@pacificbiosciences.com
|
7e5a0a4b2e4b83568dafc54541a561cc3404b48e
|
04b71cef66a039196a2965dfab0ff56b0793fe32
|
/python/run/brian_2_xgb.py
|
231a8c822d29764c92ace11f0ede645217b21be4
|
[] |
no_license
|
gviejo/Prediction_xgb_head_direction
|
69d57b7d7a2f366f2c96a6e6e933d0978592718f
|
6fbe724f92c15afcc634c84383f57bbeedff7d24
|
refs/heads/master
| 2021-11-05T09:10:48.067439
| 2021-11-05T01:15:17
| 2021-11-05T01:15:17
| 93,687,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,386
|
py
|
import scipy.io
import sys,os
import numpy as np
from matplotlib.pyplot import *
import pandas as pd
import xgboost as xgb
store = pd.HDFStore("../data/spikes_brian.h5")
data = store['data']
store.close()
adn_neuron = [n for n in data.keys() if 'ADn' in n]
pos_neuron = [n for n in data.keys() if 'Pos' in n]
bin_size = 25
bin_length = 500
def extract_tree_threshold(trees):
n = len(trees.get_dump())
thr = {}
for t in xrange(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in xrange(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if thr.has_key(tmp[0]):
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr.iterkeys():
thr[k] = np.sort(np.array(thr[k]))
return thr
###########################################################################################
# create shifted spiking activity from -500 to 500 ms with index 0 to 40 (20 for t = 0 ms) for all ADn_neuron
# remove 20 points at the beginning and 20 points at the end
###########################################################################################
nb_bins = bin_length/bin_size
duration = len(data)
time_shifted = np.zeros(( duration-nb_bins, len(adn_neuron), nb_bins+1))
for n,i in zip(adn_neuron, range(len(adn_neuron))):
tmp = data[n]
for j in range(0,nb_bins+1):
# time_shifted[:,i,j] = tmp[40-j:duration-j]
time_shifted[:,i,j] = tmp[j:duration-nb_bins+j]
combination = {}
for k in pos_neuron:
combination[k] = { 'features' : adn_neuron,
'targets' : k,
}
#####################################################################
# LEARNING XGB
#####################################################################
bsts = {i:{} for i in combination.iterkeys()} # to keep the boosted tree
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 1,
'learning_rate': 0.1,
'min_child_weight': 2, 'n_estimators': 580,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.4}
num_round = 90
time_shifted = time_shifted.reshape(time_shifted.shape[0],time_shifted.shape[1]*time_shifted.shape[2])
for k in combination.keys():
print(k)
features = combination[k]['features']
targets = combination[k]['targets']
Yall = data[targets].values
# need to cut Yall
Yall = Yall[nb_bins//2:-nb_bins//2]
print(time_shifted.shape)
print(Yall.shape)
dtrain = xgb.DMatrix(time_shifted, label=Yall)
bst = xgb.train(params, dtrain, num_round)
bsts[k] = bst
#####################################################################
# EXTRACT TREE STRUCTURE
#####################################################################
thresholds = {}
for i in bsts.iterkeys():
thresholds[i] = extract_tree_threshold(bsts[i])
#####################################################################
# EXTRACT GAIN VALUE
#####################################################################
gain = {}
for i in bsts.iterkeys():
gain[i] = bsts[i].get_score(importance_type = 'gain')
#####################################################################
# CONVERT TO TIMING OF SPLIT POSITION
#####################################################################
time_count = np.zeros((len(pos_neuron), len(adn_neuron), nb_bins+1))
index = np.repeat(np.arange(len(adn_neuron)), nb_bins+1)
for n in thresholds.iterkeys():
splits = thresholds[n]
for s in splits.keys():
time_count[int(n.split(".")[1]), index[int(s[1:])], int(s[1:])%(nb_bins+1)] = len(splits[s])
time_count = time_count.sum(1)
gain_value = np.zeros((len(pos_neuron), len(adn_neuron), nb_bins+1))
index = np.repeat(np.arange(len(adn_neuron)), nb_bins+1)
for n in gain.iterkeys():
g = gain[n]
for s in g.keys():
gain_value[int(n.split(".")[1]), index[int(s[1:])], int(s[1:])%(nb_bins+1)] = g[s]
# gain_value = gain_value.reshape(len(pos_neuron)*len(adn_neuron), 41)
gain_value = gain_value.sum(1)
time = np.arange(-(bin_length/2), (bin_length/2)+bin_size, bin_size)
xgb_peaks = pd.DataFrame(index = time, data = (time_count*gain_value).transpose())
#####################################################################
# PLOT
#####################################################################
plot(time, xgb_peaks.mean(1))
title("XGB")
show()
|
[
"guillaume.viejo@gmail.com"
] |
guillaume.viejo@gmail.com
|
d86242cd87dd8323bb6214deb9dacb2b9f552040
|
2b3817fb9e4078e912fe1df2e964a68dcd48d053
|
/code/pgms/can-sum.py
|
021e88ad56ffd0af0b5efa8652465910059432a6
|
[
"MIT"
] |
permissive
|
souradeepta/PythonPractice
|
350a130b341efec7b22ebd061c3d89036603587f
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
refs/heads/master
| 2023-08-08T03:46:01.238861
| 2021-09-23T02:37:13
| 2021-09-23T02:37:13
| 256,668,632
| 1
| 0
|
MIT
| 2021-09-22T18:54:17
| 2020-04-18T04:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,810
|
py
|
from typing import List
def canSumRepeat(target: int, input: List, memo: dict) -> bool:
"""Can the sum of input number lead to the target value
Args:
target (int): target
input (List): list of numbers
Returns:
bool: True or False
"""
if not input:
return None
if target == 0:
return True
if target < 0:
return False
if target in memo:
return memo[target]
for elem in input:
remainder = target - elem
if(canSumRepeat(remainder, input, memo)):
memo[target] = True
return True
memo[target] = False
return False
def canSumRepeatList(target: int, input: List, output: List, memo) -> List:
"""Can the sum of input number lead to the target value
Args:
target (int): target
input (List): list of numbers
output (List): list of numbers which sum to target
Returns:
List: List of elements
"""
if not input:
return None
if target == 0:
return output
if target < 0:
return None
if target in memo:
return memo[target]
for elem in input:
remainder = target - elem
output.append(elem)
if(canSumRepeatList(remainder, input, output, {})):
memo[target] = output
return output
memo[target] = None
return None
print(canSumRepeat(7, [2, 3], {})) # true
print(canSumRepeat(7, [5, 3, 4, 7], {})) # true
print(canSumRepeat(8, [2, 3, 5], {})) # false
print(canSumRepeat(300, [7, 14], {})) # true
print(canSumRepeatList(7, [2, 3], [], {})) # true
print(canSumRepeatList(7, [5, 3, 4, 7], [], {})) # true
print(canSumRepeatList(8, [2, 3, 5], [], {})) # false
print(canSumRepeatList(300, [7, 14], [], {})) # true
|
[
"sdb.svnit@gmail.com"
] |
sdb.svnit@gmail.com
|
3b7dbaa8d490c3da4756ccad3b0ccf36d205d1c9
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/C_LinkList/Swap/L2_24_Swap_Nodes_in_Pairs.py
|
875e98e38bc4183aafaf3c32d8ccd3bd631735f4
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177
| 2023-06-16T16:25:39
| 2023-06-16T16:25:39
| 69,733,624
| 14
| 3
| null | 2022-05-25T06:48:38
| 2016-10-01T10:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
""" https://leetcode.com/problems/swap-nodes-in-pairs/
from dba: https://leetcode.com/problems/swap-nodes-in-pairs/discuss/984392/Python-O(n)-solution-explained
"""
from header import *
class Solution:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
ans = pre = ListNode(next=head)
while pre.next and pre.next.next:
a = pre.next
b = a.next
# 132 -> 213
pre.next, b.next, a.next = b, a, b.next
pre = a
return ans.next
|
[
"zhengyuan.zhu@mavs.uta.edu"
] |
zhengyuan.zhu@mavs.uta.edu
|
ed329fe829585a042a59d6c01959272c9d3ee31e
|
cdc48931cb3adb62c5e4963e43aeaf3cbc5080c4
|
/Scripts/Read_omi_data.py
|
c977f0e3517d4bf8d1888816d3f96f6023bdb222
|
[] |
no_license
|
giovannilopez9808/SEDEMA_2000_2019
|
ce8f1955b7d0f760485e2a984f36e72141867a0f
|
857be19c0acd9587904107ecd470b94a6a7d93b3
|
refs/heads/main
| 2023-06-25T10:32:06.536548
| 2021-08-04T19:55:16
| 2021-08-04T19:55:16
| 382,178,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
def date_format(data):
data["Date"] = data["Datetime"].str[0:4]+"-" + \
data["Datetime"].str[4:6]+"-"+data["Datetime"].str[6:8]
data["Date"] = pd.to_datetime(data["Date"])
data.index = data["Date"]
data = data.drop(["Date", "Datetime"], 1)
return data
def clean_data(data, columns):
for column in data.columns:
if not column in columns:
data = data.drop(column, 1)
return data
def obtain_data_in_period(data, date_i, date_f):
data = data[data.index >= date_i]
data = data[data.index <= date_f]
return data
def drop_data_useless(data, columns, limit):
for column in columns:
data = data[data[column] < limit]
return data
inputs = {
"path data": "../Data/",
"file data": "Data_OMI_",
"product": "OMUVB",
"skiprows": 50,
"UVI limit": 18,
"UVIcolumns": ["CSUVindex", "UVindex"],
"file results": "UVI_",
"day initial": "2005-01-01",
"day final": "2019-12-31",
}
data = pd.read_fwf(inputs["path data"]+inputs["file data"]+inputs["product"]+".dat",
skiprows=inputs["skiprows"])
data = date_format(data)
data = clean_data(data,
inputs["UVIcolumns"])
data = obtain_data_in_period(data,
inputs["day initial"],
inputs["day final"])
data = drop_data_useless(data,
inputs["UVIcolumns"],
inputs["UVI limit"])
print(data.max())
for uvicolumn in inputs["UVIcolumns"]:
print("Creando archivo {}".format(uvicolumn))
data_UVI = data[uvicolumn]
print(data_UVI.count())
data_UVI.to_csv("{}{}{}.csv".format(inputs["path data"],
inputs["file results"],
uvicolumn),
float_format='%.4f')
|
[
"giovannilopez9808@gmail.com"
] |
giovannilopez9808@gmail.com
|
7d382bee63d4ab632c33c235b382848e1693243a
|
09fd456a6552f42c124c148978289fae1af2d5c3
|
/Graph/733.py
|
5ae87bd51e724a76ced10a0220dd9ab5401483da
|
[] |
no_license
|
hoang-ng/LeetCode
|
60b4e68cbcf54cbe763d1f98a70f52e628ab32fb
|
5407c6d858bfa43325363503c31134e560522be3
|
refs/heads/master
| 2021-04-10T11:34:35.310374
| 2020-07-28T10:22:05
| 2020-07-28T10:22:05
| 248,932,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
# 733. Flood Fill
# An image is represented by a 2-D array of integers, each integer representing the pixel value of the image (from 0 to 65535).
# Given a coordinate (sr, sc) representing the starting pixel (row and column) of the flood fill, and a pixel value newColor, "flood fill" the image.
# To perform a "flood fill", consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel, plus any pixels connected 4-directionally to those pixels (also with the same color as the starting pixel), and so on. Replace the color of all of the aforementioned pixels with the newColor.
# At the end, return the modified image.
# Example 1:
# Input:
# image = [[1,1,1],[1,1,0],[1,0,1]]
# sr = 1, sc = 1, newColor = 2
# Output: [[2,2,2],[2,2,0],[2,0,1]]
# Explanation:
# From the center of the image (with position (sr, sc) = (1, 1)), all pixels connected
# by a path of the same color as the starting pixel are colored with the new color.
# Note the bottom corner is not colored 2, because it is not 4-directionally connected
# to the starting pixel.
# Note:
# The length of image and image[0] will be in the range [1, 50].
# The given starting pixel will satisfy 0 <= sr < image.length and 0 <= sc < image[0].length.
# The value of each color in image[i][j] and newColor will be an integer in [0, 65535].
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
oldColor = image[sr][sc]
if oldColor != newColor:
self.dfs(image, sr, sc, oldColor, newColor)
return image
def dfs(self, image, i, j, oldColor, newColor):
if i < 0 or j < 0 or i >= len(image) or j >= len(image[i]) or image[i][j] != oldColor:
return
image[i][j] = newColor
self.dfs(image, i + 1, j, oldColor, newColor)
self.dfs(image, i - 1, j, oldColor, newColor)
self.dfs(image, i, j + 1, oldColor, newColor)
self.dfs(image, i, j - 1, oldColor, newColor)
sol = Solution()
sol.floodFill([[0,0,0],[0,1,1]], 1, 1, 1)
|
[
"hoang2109@gmail.com"
] |
hoang2109@gmail.com
|
36c8aa5f1334f81fcdcf3cffaf6fe2f5836d7abe
|
951fc0da7384b961726999e5451a10e2783462c4
|
/script.module.exodusscrapers/lib/exodusscrapers/sources_placenta/en_placenta-1.7.8/ororo.py
|
da4be034f81d870663f7cf7dbc21eacd06fa0f60
|
[
"Beerware"
] |
permissive
|
vphuc81/MyRepository
|
eaf7b8531b2362f0e0de997a67b889bc114cd7c2
|
9bf8aca6de07fcd91bcec573f438f29e520eb87a
|
refs/heads/master
| 2022-01-02T15:07:35.821826
| 2021-12-24T05:57:58
| 2021-12-24T05:57:58
| 37,680,232
| 6
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,444
|
py
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Exodus
# Addon id: plugin.video.exodus
# Addon Provider: Exodus
import re,urlparse,json,base64
from resources.lib.modules import cache
from resources.lib.modules import control
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ororo.tv']
self.base_link = 'https://ororo.tv'
self.moviesearch_link = '/api/v2/movies'
self.tvsearch_link = '/api/v2/shows'
self.movie_link = '/api/v2/movies/%s'
self.show_link = '/api/v2/shows/%s'
self.episode_link = '/api/v2/episodes/%s'
self.user = control.setting('ororo.user')
self.password = control.setting('ororo.pass')
self.headers = {
'Authorization': 'Basic %s' % base64.b64encode('%s:%s' % (self.user, self.password)),
'User-Agent': 'Exodus for Kodi'
}
def movie(self, imdb, title, localtitle, aliases, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_moviecache, 60, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.movie_link % url
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_tvcache, 120, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.show_link % url
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, headers=self.headers)
r = json.loads(r)['episodes']
r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r]
url = [i for i in r if season == '%01d' % int(i[1]) and episode == '%01d' % int(i[2])]
url += [i for i in r if premiered == i[3]]
url= self.episode_link % url[0][0]
return url
except:
return
def ororo_moviecache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.moviesearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['movies']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def ororo_tvcache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['shows']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, headers=self.headers)
url = json.loads(url)['url']
sources.append({'source': 'ororo', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
|
[
"vinhphuc_81@yahoo.com"
] |
vinhphuc_81@yahoo.com
|
0e4ea3b20b875aa0d0f525b5688b469b1ae1cd07
|
242f1dafae18d3c597b51067e2a8622c600d6df2
|
/src/1300-1399/1389.create.target.array.in.given.order.py
|
fbf27d7eff98adccaa487419a2b2644f45f97e17
|
[] |
no_license
|
gyang274/leetcode
|
a873adaa083270eb05ddcdd3db225025533e0dfe
|
6043134736452a6f4704b62857d0aed2e9571164
|
refs/heads/master
| 2021-08-07T15:15:01.885679
| 2020-12-22T20:57:19
| 2020-12-22T20:57:19
| 233,179,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
from typing import List
class Solution:
def createTargetArray(self, nums: List[int], index: List[int]) -> List[int]:
ans = []
for i, x in zip(index, nums):
ans.insert(i, x)
return ans
if __name__ == '__main__':
solver = Solution()
cases = [
([1], [0]),
([0,1,2,3,4], [0,1,2,2,1]),
([1,2,3,4,0], [0,1,2,3,0]),
]
rslts = [solver.createTargetArray(nums, index) for nums, index in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
|
[
"gyang274@gmail.com"
] |
gyang274@gmail.com
|
7c7a13988c1414c47d5397c1e6f97e7be4e23afa
|
90b95ac525ee731ec5ba7d5da5c9038396ac4c3d
|
/zoom_data/migrations/0032_auto_20180129_1556.py
|
852054a2c4f744955572c6156d903d4e86f2e8a1
|
[] |
no_license
|
5klynna5/zoom_c
|
33364146915611917ae0e6e0fd49233370424929
|
59c39eece1dd0ad5e7e210f4f03d8bb64df44b98
|
refs/heads/master
| 2021-05-12T04:06:39.031130
| 2018-08-04T23:52:19
| 2018-08-04T23:52:19
| 117,153,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,542
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zoom_data', '0031_auto_20180129_1151'),
]
operations = [
migrations.AlterField(
model_name='annual',
name='annual_income',
field=models.SmallIntegerField(help_text='in U.S. dollars', blank=True),
),
migrations.AlterField(
model_name='annual',
name='employment_status',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='annual',
name='student_status',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='contact_pref',
field=models.CharField(choices=[('Email', 'EMAIL'), ('Call', 'CALL'), ('Text', 'TEXT'), ('Mail', 'MAIL'), ('Facebook', 'FACEBOOK')], null=True, max_length=8, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_call',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_email',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_facebook',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_mail',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_photo',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_text',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
]
|
[
"anderson.kirsten.l@gmail.com"
] |
anderson.kirsten.l@gmail.com
|
10b42b9c096fb19446e043163c9cfb3ae6a2ed9d
|
098f80474295aa024657330b8f0813eca7d015c2
|
/UnrealPythonLibrary/PythonLibraries/PythonHelpers.py
|
9614830514ae6520ef42d4429675bed7ed05a765
|
[] |
no_license
|
sniler/UnrealScript
|
e4c4387caa6402a61b4bf0ba8952faf598e4464e
|
a4587d578366551b2470862f18b33c42439c5cdd
|
refs/heads/master
| 2023-04-01T18:45:37.803690
| 2021-04-13T11:25:13
| 2021-04-13T11:25:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
# unreal._ObjectBase
# https://api.unrealengine.com/INT/PythonAPI/class/_ObjectBase.html
import unreal
# object_to_cast: obj unreal.Object : The object you want to cast
# object_class: obj unreal.Class : The class you want to cast the object into
def cast(object_to_cast=None, object_class=None):
try:
return object_class.cast(object_to_cast)
except:
return None
# Cpp ########################################################################################################################################################################################
# Note: Also work using the command : help(unreal.StaticMesh)
# unreal_class: obj : The class you want to know the properties
# return: str List : The available properties (formatted the way you can directly use them to get their values)
def getAllProperties(unreal_class=None):
return unreal.CppLib.get_all_properties(unreal_class)
|
[
"timmyliang@tencent.com"
] |
timmyliang@tencent.com
|
ff61f9d12ceebe86532e622aaecb819f0c39eb8f
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/LeetcodeNew/python/LC_573.py
|
a585295ef01ebf47f30590d98f2feb1f45f6657f
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089
| 2023-02-07T04:11:09
| 2023-02-07T04:11:09
| 170,044,224
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
"""
There's a tree, a squirrel, and several nuts. Positions are represented by the cells in a 2D grid. Your goal is to find the minimal distance for the squirrel to collect all the nuts and put them under the tree one by one. The squirrel can only take at most one nut at one time and can move in four directions - up, down, left and right, to the adjacent cell. The distance is represented by the number of moves.
Example 1:
Input:
Height : 5
Width : 7
Tree position : [2,2]
Squirrel : [4,4]
Nuts : [[3,0], [2,5]]
Output: 12
Explanation:
Note:
All given positions won't overlap.
The squirrel can take at most one nut at one time.
The given positions of nuts have no order.
Height and width are positive integers. 3 <= height * width <= 10,000.
The given positions contain at least one nut, only one tree and one squirrel.
"""
class Solution:
def minDistance(self, height: int, width: int, tree, squirrel, nuts) -> int:
total = 0
dis = float('-inf')
for nut in nuts:
total += self.distance(nut, tree) * 2
dis = max(dis, self.distance(nut, tree) - self.distance(nut, squirrel))
return total - dis
def distance(self, a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
56113bdeea081fbc16893e7fdd670b1aea96fa36
|
4cdd73fe38027d41bda2959f940fc8a2a6c4ca78
|
/l10n_ve_islr_report/__openerp__.py
|
437bf359edf4cb159557ab769dca6e3d5272dd75
|
[] |
no_license
|
adrt271988/l10n_ve
|
af408fcc0bd2c87475beccd5ec92ee180d35a0d8
|
0a762490f4ee0a4257fb75dc5ea5607dec91d3bd
|
refs/heads/master
| 2020-04-05T14:04:14.374612
| 2016-09-05T22:19:54
| 2016-09-05T22:19:54
| 53,200,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Formato Comprobante ISLR Venezuela',
'category': 'Account',
'version': '1.0',
'description': """
Formato Comprobante ISLR Venezuela
====================================================
* Adaptación Qweb del formato del Comprobante de Retencion ISLR
""",
'author': 'Alexander Rodriguez <adrt271988@gmail.com>',
'website': '',
'depends': ['l10n_ve_withholding_islr','report'],
'data': [
'report/islr_wh_doc_report.xml',
'account_report.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"adrt271988@gmail.com"
] |
adrt271988@gmail.com
|
f49db6663ec0211a03497fae9b0a7d5b4a7ae930
|
e5f1befb7c7ca0072747b33086fc6569a6befd01
|
/old/videos/02.py
|
18f9ca234fd808ffb6c7a2703c07f80bf5cdd3a6
|
[] |
no_license
|
nepomnyashchii/TestGit
|
ae08d8bb1b7d2ab9389a309fd1dc9e24729b019c
|
c7abf4ab08ee3c2f3ea1fb09a1938bff7a3e0e5c
|
refs/heads/master
| 2020-04-28T23:41:51.053547
| 2020-01-24T12:22:40
| 2020-01-24T12:22:40
| 175,666,093
| 0
| 1
| null | 2019-03-15T13:44:03
| 2019-03-14T17:08:58
| null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
import random
from random import randint
import math
# from random import *
from math import sqrt
from math import sqrt as my_sqrt
for element in range (10):
print(random.randint(1, 10))
#random object (randint function as method and 1,10 are arguments)
num =10
print(math.sqrt(num))
print(sqrt(num))
print(randint (1,10))
def sqrt():
print("my function")
# sqrt()
print(my_sqrt(25))
# STL # standard library of python
# PyPi
|
[
"nepomnyashchii@gmail.com"
] |
nepomnyashchii@gmail.com
|
abf24deb33aa929ed64e1f57511d697e2db26a85
|
e1436eb68e51dcd1becb7e0f8671b51eb4b23ec0
|
/desktop/kde/applications/parley/actions.py
|
4ecd82fc5604b7c01eeb26036ad05c7448cec256
|
[] |
no_license
|
SulinOS/SulinKDE
|
bef0ebbecafa6082ad7599f377c95573468827fb
|
9984e0f40a5a011e59d439a24856bde78deea1c2
|
refs/heads/master
| 2020-09-16T05:34:20.333558
| 2020-06-10T08:10:53
| 2020-06-10T08:10:53
| 223,669,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import inarytools
from inary.actionsapi import kde
def setup():
kde.configure()
def build():
kde.make()
def install():
kde.install()
inarytools.dodoc("AUTHORS", "COPYING*", "TODO*")
|
[
"zaryob.dev@gmail.com"
] |
zaryob.dev@gmail.com
|
cf29657646e6dc10ca79da4d4d8f025b52a0bdd1
|
7416056e689dfc94391c4b108652cea02d59a31a
|
/reservation/migrations/0009_auto_20200128_0154.py
|
ee59b4e4dcefa53f8ef69cf34200507a3c15f18a
|
[] |
no_license
|
zshanabek/house-booking-app
|
0ea29fb8113671eb164ead8d335a986b850898a1
|
cca5225f40b8a055a2db78810258325f2ba7ded1
|
refs/heads/master
| 2022-11-28T00:20:12.789534
| 2020-08-14T09:16:40
| 2020-08-14T09:16:40
| 225,791,244
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
# Generated by Django 2.2.7 on 2020-01-27 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservation', '0008_auto_20200125_2102'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='check_in',
field=models.DateField(),
),
migrations.AlterField(
model_name='reservation',
name='check_out',
field=models.DateField(),
),
]
|
[
"zshanabek@gmail.com"
] |
zshanabek@gmail.com
|
f662e13a92f620780fa576c53bfe5eaaf4dc40d3
|
5dfbfa153f22b3f58f8138f62edaeef30bad46d3
|
/ros_ws/build/baxter_examples/catkin_generated/pkg.develspace.context.pc.py
|
ce6228515fedb8113370e7fdd69d2285856a9049
|
[] |
no_license
|
adubredu/rascapp_robot
|
f09e67626bd5a617a569c9a049504285cecdee98
|
29ace46657dd3a0a6736e086ff09daa29e9cf10f
|
refs/heads/master
| 2022-01-19T07:52:58.511741
| 2019-04-01T19:22:48
| 2019-04-01T19:22:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bill/bill_ros/ros_ws/devel/include".split(';') if "/home/bill/bill_ros/ros_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;xacro;actionlib;sensor_msgs;control_msgs;trajectory_msgs;cv_bridge;dynamic_reconfigure;baxter_core_msgs;baxter_interface".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_examples"
PROJECT_SPACE_DIR = "/home/bill/bill_ros/ros_ws/devel"
PROJECT_VERSION = "1.2.0"
|
[
"alphonsusbq436@gmail.com"
] |
alphonsusbq436@gmail.com
|
5b7b08cc64f27b668429b60c89c6dfff39a1be47
|
3e2ec14daf3e246334e175719bc38adcf15cee5a
|
/challenges/graphs/black_shapes.py
|
711d0422815e2bd4f14aaa3777a4b3ce9f1aaf6a
|
[
"CC0-1.0"
] |
permissive
|
lukasmartinelli/sharpen
|
a616ee981d81efb2c844c5106ce30bd97f36e034
|
6f314fc2aa17990ede04055e7c3ac9394a6c12c0
|
refs/heads/master
| 2021-01-20T12:11:25.452306
| 2019-06-08T21:06:12
| 2019-06-08T21:06:12
| 58,558,368
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
import collections
def adjacent_black_fields(matrix, row_idx, col_idx):
adjacent = [(row_idx + 1, col_idx), (row_idx - 1, col_idx),
(row_idx, col_idx + 1), (row_idx, col_idx - 1)]
def is_within_matrix(row_idx, col_idx):
row_count = len(matrix)
col_count = len(matrix[0])
return 0 <= row_idx < row_count and 0 <= col_idx < col_count
def is_black(row_idx, col_idx):
return matrix[row_idx][col_idx] == 'X'
return [f for f in adjacent if is_within_matrix(f[0], f[1]) and
is_black(f[0], f[1])]
def find_black_fields(matrix):
for row_idx, row in enumerate(matrix):
for col_idx, field in enumerate(row):
if field == 'X':
yield (row_idx, col_idx)
def count_black_shapes(matrix):
part_of_shape = {}
def is_part_of_shape(row_idx, col_idx):
return (row_idx, col_idx) in part_of_shape
def mark_shape(row_idx, col_idx):
part_of_shape[(row_idx, col_idx)] = True
for row_idx, col_idx in adjacent_black_fields(matrix, row_idx, col_idx):
if not is_part_of_shape(row_idx, col_idx):
mark_shape(row_idx, col_idx)
shape_count = 0
for row_idx, col_idx in find_black_fields(matrix):
if not is_part_of_shape(row_idx, col_idx):
shape_count += 1
mark_shape(row_idx, col_idx)
return shape_count
def test_single_black_shape():
matrix = ['XXX', 'XXX', 'XXX']
assert count_black_shapes(matrix) == 1
def test_multipel_black_shape():
matrix = ['OOOXOOO',
'OOXXOXO',
'OXOOOXO']
assert count_black_shapes(matrix) == 3
|
[
"me@lukasmartinelli.ch"
] |
me@lukasmartinelli.ch
|
83f67eb1126eb3952caf34740d621467f28863e0
|
3a19c1b17f553b6d54e5c345d550ca494c3593e1
|
/td1-problem22.py
|
4130c59d812ac8cfd5ae3a90bfb24b370ea9992d
|
[] |
no_license
|
mines-nancy-tcss5ac-2018/td1-TomLaville
|
95faf73aca9375fe7ba990043e9c371713524eaa
|
4967cda4594b7706d8edcdaf99a7945ea90ad8e3
|
refs/heads/master
| 2020-03-31T00:16:54.328127
| 2018-10-07T19:05:42
| 2018-10-07T19:05:42
| 151,733,523
| 0
| 0
| null | null | null | null |
IBM852
|
Python
| false
| false
| 876
|
py
|
values = ["\"","A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M","N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
def scoreName(nom):
s = 0
for char in nom:
s+= values.index(char)
return s
def solve():
noms = [] ##liste qui contient tous les noms
##liste utile pour le score
##converti le fichier txt en liste de noms
f = open('p022_names.txt', 'r')
for l in f:
noms += l.split(',') ##lis le fichier et sÚpare les noms par les ','
##on a donc toujours les ""
##tri
noms_tries = sorted(noms, reverse = False)
##calcul du score
score_tot = 0
for i in range(len(noms_tries)):
score_tot += (i+1)*scoreName(noms_tries[i])
return score_tot
print(solve())
|
[
"noreply@github.com"
] |
mines-nancy-tcss5ac-2018.noreply@github.com
|
7acffc09af312c4cf50348b8c11ac1c2f7a9299c
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/16/usersdata/134/7136/submittedfiles/triangulo.py
|
ed25ffd7bbcc72ababe8bfa8ba20892f52ef7ec7
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
a = input('Digite o valor de a:')
b = input('Digite o valor de b:')
c = input('Digite o valor de c:')
if a>=b>=c and a<(b+c) :
print ('S')
if (a**2)==((b**2)+(c**2)):
print
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
05947191f7a5ddb2a9ff5e8e0385d1616f07bd04
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/machinelearningservices/v20210301preview/get_job.py
|
e9a9f325635bdf8571c3b2144867f1b62d2a343b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 4,080
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetJobResult',
'AwaitableGetJobResult',
'get_job',
]
@pulumi.output_type
class GetJobResult:
"""
Azure Resource Manager resource envelope.
"""
def __init__(__self__, id=None, name=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Additional attributes of the entity.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System data associated with resource provider
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetJobResult(GetJobResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_job(id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobResult:
"""
Azure Resource Manager resource envelope.
:param str id: The name and identifier for the Job.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['id'] = id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20210301preview:getJob', __args__, opts=opts, typ=GetJobResult).value
return AwaitableGetJobResult(
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
system_data=__ret__.system_data,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
69291f4bb7b082fcb63d8d0a0ab580ce63b63c2a
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/GX3pQxvbTJApWYgRJ_22.py
|
ea51d20994790e177a6d6bdea78c3a0e36e6bd7b
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
"""
A Kaprekar Number is a positive integer that is equal to a number formed by
first squaring, then splitting and summing its two lexicographical parts:
* If the quantity of digits of the squared number is even, the left and right parts will have the same length.
* If the quantity of digits of the squared number is odd, then the right part will be the longer half, with the left part being the shorter or equal to zero if the quantity of digits is equal to 1.
Given a positive integer `n` implement a function that returns `True` if it's
a Kaprekar number, and `False` if it's not.
### Examples
is_kaprekar(3) ➞ False
# n² = "9"
# Left + Right = 0 + 9 = 9 ➞ 9 != 3
is_kaprekar(5) ➞ False
# n² = "25"
# Left + Right = 2 + 5 = 7 ➞ 7 != 5
is_kaprekar(297) ➞ True
# n² = "88209"
# Left + Right = 88 + 209 = 297 ➞ 297 == 297
### Notes
Trivially, 0 and 1 are Kaprekar Numbers being the only two numbers equal to
their square. Any number formed only by digits equal to _9_ will always be a
Kaprekar Number.
"""
def is_kaprekar(n):
if n in [0,1]:
return True
test = str(n**2)
if len(test) == 1:
return False
left = test[:len(test)//2]
right = test[len(test)//2:]
return int(left) + int(right) == n
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a56febed6885c0b35f0a30ddce4934f5b6836066
|
53396d12d606bebea71c149aed0150af7b17b6f5
|
/array/medium/221-maximal-square-1.py
|
b3885e1f68c33ae2c1dc19434984e2ec1137c8ff
|
[] |
no_license
|
superggn/myleetcode
|
4c623bd9ad3892d826df73ad3b2c122e08aaa9e9
|
40ca33aefbf0cf746a2d0b7e7f52643ae39591be
|
refs/heads/master
| 2023-02-02T11:06:35.163570
| 2020-12-19T10:36:45
| 2020-12-19T10:36:45
| 322,821,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
"""
dp
https://leetcode-cn.com/problems/maximal-square/solution/zui-da-zheng-fang-xing-by-leetcode-solution/
"""
from typing import List
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
maxSide = 0
rows, columns = len(matrix), len(matrix[0])
dp = [[0] * columns for _ in range(rows)]
for i in range(rows):
for j in range(columns):
if matrix[i][j] == '1':
if i == 0 or j == 0:
dp[i][j] = 1
else:
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1
maxSide = max(maxSide, dp[i][j])
maxSquare = maxSide * maxSide
return maxSquare
|
[
"939401399@qq.com"
] |
939401399@qq.com
|
33dc808475234978f454c5997f1ea1bd3996a31e
|
05ace4491b97699333057e35f7e9225864f7130d
|
/dygraphsex/urls.py
|
689201cc230eb577bc30387699482d5ada5e4179
|
[] |
no_license
|
scott858/dajs
|
b6878123748f563550fa2f5e59b1d5dcd4fdcaa5
|
bc6b23d0e24be038e278490e34422d69b06d6543
|
refs/heads/master
| 2021-01-01T16:12:43.477340
| 2015-09-18T01:04:55
| 2015-09-18T01:04:55
| 41,645,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.example_app_view, name='main'),
url(r'^plot/$', views.plot_view, name='plot'),
)
|
[
"scott.lee@nucleusscientific.com"
] |
scott.lee@nucleusscientific.com
|
8de4c818ca766df5a345ae0b90065e5d770de5b1
|
bc4656f6f74911f114626538294e0e275105c703
|
/tests/dat/test_dar_packet.py
|
b02d91483b4179221c025a2cbc331f1950d03916
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
AdrianCano-01/spsdk
|
d8679ae58fc67c6369bceff4b31db658d9ad6bc4
|
4a31fb091f95fb035bc66241ee4e02dabb580072
|
refs/heads/master
| 2023-03-15T00:37:07.419191
| 2021-03-05T16:33:50
| 2021-03-05T16:33:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
"""Tests with Debug Authentication Packet (DAR) Packet."""
import os
import pytest
import yaml
from spsdk.dat.dar_packet import DebugAuthenticateResponse
from spsdk.dat import DebugAuthenticationChallenge as DAC
from spsdk.dat.debug_credential import DebugCredential as DC
from spsdk.utils.misc import load_binary, use_working_directory
@pytest.mark.parametrize(
"yml_file_name, dac_bin_file, version, dck_key_file, expected_length",
[
('new_dck_rsa2048.yml', 'sample_dac.bin', '1.0', 'new_dck_2048.pem', 1200),
('new_dck_secp256.yml', 'sample_dac_ecc.bin', '2.0', 'new_dck_secp256r1.pem', 968)
]
)
def test_dar_packet_rsa(tmpdir, data_dir, yml_file_name, version, dck_key_file, expected_length, dac_bin_file):
with use_working_directory(data_dir):
dac_bytes = load_binary(os.path.join(data_dir, dac_bin_file))
with open(os.path.join(data_dir, yml_file_name), 'r') as f:
yaml_config = yaml.safe_load(f)
dc = DC.create_from_yaml_config(version=version, yaml_config=yaml_config)
dc.sign()
assert dc.VERSION == DAC.parse(dac_bytes).version, "Version of DC and DAC are different."
dar = DebugAuthenticateResponse.create(version=version, socc=dc.socc, dc=dc,
auth_beacon=0, dac=DAC.parse(dac_bytes),
dck=os.path.join(data_dir, dck_key_file))
dar_bytes = dar.export()
assert len(dar_bytes) == expected_length
assert isinstance(dar_bytes, bytes)
assert 'Authentication Beacon' in dar.info()
@pytest.mark.parametrize(
"yml_file_name, version, file_key, expected_length",
[
('new_dck_secp256_N4A.yml', '2.0', 'new_dck_secp256r1.pem', 316),
('new_dck_secp384_N4A.yml', '2.1', 'new_dck_secp384r1.pem', 444)
]
)
def test_dar_packet_4_analog_256(tmpdir, data_dir, yml_file_name, version, file_key, expected_length):
with use_working_directory(data_dir):
dac_bytes = load_binary(os.path.join(data_dir, 'sample_dac_analog.bin'))
with open(os.path.join(data_dir, yml_file_name), 'r') as f:
yaml_config = yaml.safe_load(f)
dc = DC.create_from_yaml_config(version=version, yaml_config=yaml_config)
dc.sign()
dar = DebugAuthenticateResponse.create(version=version, socc=dc.socc, dc=dc,
auth_beacon=0, dac=DAC.parse(dac_bytes),
dck=os.path.join(data_dir, file_key))
dar_bytes = dar.export()
assert len(dar_bytes) == expected_length
assert isinstance(dar_bytes, bytes)
assert 'Authentication Beacon' in dar.info()
|
[
"maria.wisniewska@nxp.com"
] |
maria.wisniewska@nxp.com
|
ef1d77c3ead5c963da4bda0a6758391542a24536
|
234b581de16f0eebfe3db5281d2920d50e3a3631
|
/src/com/dtmilano/android/adb/dumpsys.py
|
efd9d4962f5cb6e26555547c6475bd84fb789c06
|
[
"Apache-2.0"
] |
permissive
|
jili0503/AndroidViewClient
|
3d453884d68b508fe4d5d28f5bcea0db0cad6062
|
c1c38e6fa53dc09697eadb9c1670d6bef8587ab6
|
refs/heads/master
| 2020-03-06T21:07:33.744022
| 2018-03-22T04:15:50
| 2018-03-22T04:15:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,612
|
py
|
'''
Copyright (C) 2012-2018 Diego Torres Milano
Created on Dec 1, 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
from __future__ import print_function
import re
import sys
from _warnings import warn
__version__ = '14.0.0'
DEBUG = False
class Dumpsys:
FRAMESTATS = 'framestats'
GFXINFO = 'gfxinfo'
MEMINFO = 'meminfo'
RESET = 'reset'
ACTIVITIES = 'activities'
TOTAL = 'total'
VIEW_ROOT_IMPL = 'viewRootImpl'
VIEWS = 'views'
FLAGS = 0
INTENDED_VSYNC = 1
FRAME_COMPLETED = 13
def __init__(self, adbclient, subcommand, *args):
self.nativeHeap = -1
self.dalvikHeap = -1
self.total = 0
self.views = -1
self.activities = -1
self.appContexts = -1
self.viewRootImpl = -1
self.gfxProfileData = []
self.framestats = []
if args:
args_str = ' '.join(args)
else:
args_str = ''
if adbclient:
cmd = 'dumpsys ' + subcommand + (' ' + args_str if args_str else '')
self.parse(adbclient.shell(cmd), subcommand, *args)
else:
warn('No adbclient specified')
@staticmethod
def listSubCommands(adbclient):
return Dumpsys(adbclient, '-l')
@staticmethod
def meminfo(adbclient, args=None):
return Dumpsys(adbclient, Dumpsys.MEMINFO, args)
def get(self, name):
return getattr(self, name)
def parse(self, out, subcommand, *args):
if subcommand == Dumpsys.MEMINFO:
self.parseMeminfo(out)
elif subcommand == Dumpsys.GFXINFO:
if Dumpsys.RESET in args:
# Actually, reset does not need to parse anything
pass
elif Dumpsys.FRAMESTATS in args:
self.parseGfxinfoFramestats(out)
else:
self.parseGfxinfo(out)
elif '-l':
# list dumpsys subcommands
return out
else:
pass
def parseMeminfo(self, out):
m = re.search('Native Heap[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.nativeHeap = int(m.group(1))
m = re.search('Dalvik Heap[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.dalvikHeap = int(m.group(1))
m = re.search('Views:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.views = int(m.group(1))
m = re.search('Activities:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.activities = int(m.group(1))
m = re.search('AppContexts:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.appContexts = int(m.group(1))
m = re.search('ViewRootImpl:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.viewRootImpl = int(m.group(1))
m = re.search('TOTAL[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.total = int(m.group(1))
else:
raise RuntimeError('Cannot find TOTAL in "' + out + '"')
def parseGfxinfo(self, out):
pass
def parseGfxinfoFramestats(self, out):
pd = '---PROFILEDATA---'
l = re.findall(r'%s.*?%s' % (pd, pd), out, re.DOTALL)
if l:
s = ''
for e in l:
if not e:
continue
sl = e.splitlines()
for s in sl:
if s == pd:
continue
pda = s.split(',')
if pda[Dumpsys.FLAGS] == 'Flags':
if pda[Dumpsys.INTENDED_VSYNC] != 'IntendedVsync' and pda[
Dumpsys.FRAME_COMPLETED] != 'FrameCompleted':
raise RuntimeError('Unsupported gfxinfo version')
continue
if pda[Dumpsys.FLAGS] == '0':
# Only keep lines with Flags=0
# If this is non-zero the row should be ignored, as the frame has been determined as being an
# outlier from normal performance, where it is expected that layout & draw take longer than
# 16ms.
# See https://developer.android.com/training/testing/performance.html#timing-info for details
# on format
if DEBUG:
print('pda={}'.format(pda), file=sys.stderr)
self.gfxProfileData.append(pda[:-1])
# All done! The total time spent working on this frame can be computed by doing
# FRAME_COMPLETED - INTENDED_VSYNC.
self.framestats.append(
(int(pda[Dumpsys.FRAME_COMPLETED]) - int(pda[Dumpsys.INTENDED_VSYNC])) / 10 ** 6)
else:
raise RuntimeError('No profile data found')
@staticmethod
def gfxinfo(adbclient, *args):
return Dumpsys(adbclient, Dumpsys.GFXINFO, *args)
@staticmethod
def resetGfxinfo(adbclient, pkg):
return Dumpsys(adbclient, Dumpsys.GFXINFO, pkg, Dumpsys.RESET)
|
[
"dtmilano@gmail.com"
] |
dtmilano@gmail.com
|
1e8d16b806a47a308758f7e5980e7257ffc52afe
|
a8c0867109974ff7586597fe2c58521277ab9d4d
|
/LC645.py
|
199b710d2450fe19fc854e94ce08adbfb1a3b352
|
[] |
no_license
|
Qiao-Liang/LeetCode
|
1491b01d2ddf11495fbc23a65bb6ecb74ac1cee2
|
dbdb227e12f329e4ca064b338f1fbdca42f3a848
|
refs/heads/master
| 2023-05-06T15:00:58.939626
| 2021-04-21T06:30:33
| 2021-04-21T06:30:33
| 82,885,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
xor = xor0 = xor1 = 0
for num in range(1, len(nums) + 1):
xor ^= num
for num in nums:
xor ^= num
rightmost_bit = xor & -xor
for num in range(1, len(nums) + 1):
if num & rightmost_bit:
xor1 ^= num
else:
xor0 ^= num
for num in nums:
if num & rightmost_bit:
xor1 ^= num
else:
xor0 ^= num
for num in nums:
if num == xor0:
return [xor0, xor1]
return [xor1, xor0]
# if not nums:
# return None
# stat = [0] * (len(nums) + 1)
# result = [0, 0]
# for n in nums:
# stat[n] += 1
# for idx in range(1, len(stat)):
# if stat[idx] == 0:
# result[1] = idx
# if stat[idx] == 2:
# result[0] = idx
# return result
sol = Solution()
nums = [1,2,2,4]
# nums = [1,3,3]
# nums = [8,7,3,5,3,6,1,4]
print(sol.findErrorNums(nums))
|
[
"qiaoliang@Qiaos-MacBook-Pro.local"
] |
qiaoliang@Qiaos-MacBook-Pro.local
|
70ba21f2461cde284f5558daadde2b7a79b1ce76
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/wc/src/442.py
|
0e9c48707a834d8dbb3976c1a62d17af127fe5a1
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
"""
wordcount
"""
def word_count(text):
"""
Return a dictionary of words and their
word counds
:param text:
:return:
"""
words = {}
text = ' '.join(text.split())
for word in text.split(" "):
if word in words:
words[word] += 1
else:
words[word] = 1
return words
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
b16e3e2cc0d11fee303d0c099065faad2ac767bd
|
9e549ee54faa8b037f90eac8ecb36f853e460e5e
|
/venv/lib/python3.6/site-packages/wtforms/ext/django/templatetags/wtforms.py
|
33a60e3feffb231cb81bf5a196b76d008f86ba12
|
[
"MIT"
] |
permissive
|
aitoehigie/britecore_flask
|
e8df68e71dd0eac980a7de8c0f20b5a5a16979fe
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
refs/heads/master
| 2022-12-09T22:07:45.930238
| 2019-05-15T04:10:37
| 2019-05-15T04:10:37
| 177,354,667
| 0
| 0
|
MIT
| 2022-12-08T04:54:09
| 2019-03-24T00:38:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,878
|
py
|
"""
Template tags for easy WTForms access in Django templates.
"""
from __future__ import unicode_literals
import re
from django import template
from django.conf import settings
from django.template import Variable
from ....compat import iteritems
register = template.Library()
class FormFieldNode(template.Node):
def __init__(self, field_var, html_attrs):
self.field_var = field_var
self.html_attrs = html_attrs
def render(self, context):
try:
if "." in self.field_var:
base, field_name = self.field_var.rsplit(".", 1)
field = getattr(Variable(base).resolve(context), field_name)
else:
field = context[self.field_var]
except (template.VariableDoesNotExist, KeyError, AttributeError):
return settings.TEMPLATE_STRING_IF_INVALID
h_attrs = {}
for k, v in iteritems(self.html_attrs):
try:
h_attrs[k] = v.resolve(context)
except template.VariableDoesNotExist:
h_attrs[k] = settings.TEMPLATE_STRING_IF_INVALID
return field(**h_attrs)
@register.tag(name="form_field")
def do_form_field(parser, token):
"""
Render a WTForms form field allowing optional HTML attributes.
Invocation looks like this:
{% form_field form.username class="big_text" onclick="alert('hello')" %}
where form.username is the path to the field value we want. Any number
of key="value" arguments are supported. Unquoted values are resolved as
template variables.
"""
parts = token.contents.split(" ", 2)
if len(parts) < 2:
error_text = '%r tag must have the form field name as the first value, followed by optional key="value" attributes.'
raise template.TemplateSyntaxError(error_text % parts[0])
html_attrs = {}
if len(parts) == 3:
raw_args = list(args_split(parts[2]))
if (len(raw_args) % 2) != 0:
raise template.TemplateSyntaxError(
"%r tag received the incorrect number of key=value arguments."
% parts[0]
)
for x in range(0, len(raw_args), 2):
html_attrs[str(raw_args[x])] = Variable(raw_args[x + 1])
return FormFieldNode(parts[1], html_attrs)
args_split_re = re.compile(
r"""("(?:[^"\\]*(?:\\.[^"\\]*)*)"|'(?:[^'\\]*(?:\\.[^'\\]*)*)'|[^\s=]+)"""
)
def args_split(text):
""" Split space-separated key=value arguments. Keeps quoted strings intact. """
for bit in args_split_re.finditer(text):
bit = bit.group(0)
if bit[0] == '"' and bit[-1] == '"':
yield '"' + bit[1:-1].replace('\\"', '"').replace("\\\\", "\\") + '"'
elif bit[0] == "'" and bit[-1] == "'":
yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'"
else:
yield bit
|
[
"aitoehigie@gmail.com"
] |
aitoehigie@gmail.com
|
6d306b1850726a9a38b7a78a8b1bdffe4758ef5c
|
b6dd7ffc68957f381ae27b9e2a324f555793f238
|
/part-1-basics/ch_10/write_message.py
|
ac1958c6899a46aaa6a517519f733af594560396
|
[] |
no_license
|
lopezjronald/Python-Crash-Course
|
0a1100a1888238053f4865f8987cbc023d159d38
|
b6add3fc70b0d09b4b5dab9b06a02be2ae94b9da
|
refs/heads/master
| 2022-12-26T21:31:37.286430
| 2020-09-30T04:12:22
| 2020-09-30T04:12:22
| 298,722,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
filename = 'guest.txt'
# with open(filename, 'w') as file_object:
# file_object.write("Guest List\n")
# with open(filename, 'a') as file_object:
# response = True
# while response:
# file_object.write(input("Please enter the name of your guest: "))
# file_object.write('\n')
# continue_app = input("Continue? ('q' to quit): ")
# if continue_app.lower() == 'q':
# response = False
with open('guest.txt') as file_object:
for content in file_object:
print(content.strip())
|
[
"lopez.j.ronald@gmail.com"
] |
lopez.j.ronald@gmail.com
|
87baa53650db2e62d3ac7b05f529fe8fc7792281
|
ec8fef96af2a6b6610d298637f05bcdfe67cba2b
|
/long_range_compare/multicut_solvers.py
|
74311cc71aac426c617f2c912f4507dab3e459ec
|
[] |
no_license
|
abailoni/longRangeAgglo
|
8b98aca75b17d177cb5e408460f95ff20f411aeb
|
260b452e106125722ae3824755584ce7bfd5b81c
|
refs/heads/master
| 2021-06-25T14:14:57.150233
| 2020-11-06T11:14:52
| 2020-11-06T11:14:52
| 150,707,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,522
|
py
|
import time
import numpy as np
import sys
# -------------------
# MULTICUT SOLVERS:
# -------------------
def solve_multicut(graph, edge_costs, p=None, solver_type="exact_solver",
proposal_generator_type='WS',
fusion_moves_kwargs=None,
proposal_gener_WS_kwargs=None,
proposal_gener_HC_kwargs=None,
KL_kwargs=None,
HC_kwargs=None):
"""
Accepted options:
:param solver_type: exact_solver, KL, HC, HC-KL, HC-KL-fusionMoves
:param proposal_generator_type: WS, HC
"""
if fusion_moves_kwargs is None:
fusion_moves_kwargs = {'numberOfIterations': 100, # Max number of iterations
'stopIfNoImprovement': 10, # If no improvements, I stop earlier
'numberOfThreads': 1 # Parallel solutions of the fusionMove
}
if proposal_gener_WS_kwargs is None:
proposal_gener_WS_kwargs = {'sigma': 2.0, # Amount of noise added
'numberOfSeeds': 0.009, # Fractions of nodes that are randomly selected as seeds
'seedingStrategie': "SEED_FROM_NEGATIVE"
}
if proposal_gener_HC_kwargs is None:
proposal_gener_HC_kwargs = {'sigma':1.5,
'weightStopCond':0.0,
'nodeNumStopCond':-1.0
}
if HC_kwargs is None:
HC_kwargs = {'weightStopCond': 0.0, # Stop aggl. when this weight is reached
'nodeNumStopCond': -1.0, # Stop aggl. when this nb. of nodes is found
'visitNth': 100 # How often to print
}
if KL_kwargs is None:
KL_kwargs = {'numberOfInnerIterations': sys.maxsize,
'numberOfOuterIterations': 100,
'epsilon': 1e-6
}
# Costs to the power of p:
if p is None or p==1:
p = 1
exp_costs = edge_costs.copy()
else:
neg_weights = edge_costs < 0.
exp_costs = np.abs(edge_costs)**p
exp_costs[neg_weights] *= -1
mc_obj = graph.MulticutObjective(graph=graph, weights=exp_costs)
tick = time.time()
if solver_type == "exact_solver":
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.multicutIlpFactory()
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor)
elif solver_type == "KL":
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.kernighanLinFactory(**KL_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor)
elif solver_type == "HC":
log_visitor = mc_obj.loggingVisitor(verbose=True, visitNth=100)
solverFactory = mc_obj.greedyAdditiveFactory(**HC_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor)
elif solver_type == "HC-KL":
log_visitor = mc_obj.loggingVisitor(verbose=False)
solverFactory = mc_obj.greedyAdditiveFactory(**HC_kwargs)
solver = solverFactory.create(mc_obj)
node_labels = solver.optimize(visitor=log_visitor)
# 2. Use a second better warm-up solver to get a better solution:
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.kernighanLinFactory(**KL_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor, nodeLabels=node_labels)
elif solver_type == "HC-KL-fusionMoves":
log_visitor = mc_obj.loggingVisitor(verbose=False)
# 1. Initialize a warm-up solver and run optimization
solverFactory = mc_obj.greedyAdditiveFactory(**HC_kwargs)
solver = solverFactory.create(mc_obj)
node_labels = solver.optimize(visitor=log_visitor)
# 2. Use a second better warm-up solver to get a better solution:
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.kernighanLinFactory(**KL_kwargs)
solver = solverFactory.create(mc_obj)
new_node_labels = solver.optimize(visitor=log_visitor, nodeLabels=node_labels)
# 4. Run the funsionMuves solver
if proposal_generator_type == "WS":
pgen = mc_obj.watershedCcProposals(**proposal_gener_WS_kwargs)
elif proposal_generator_type == "HC":
pgen = mc_obj.greedyAdditiveCcProposals(**proposal_gener_HC_kwargs)
else:
raise ValueError("Passed type of proposal generator is not implemented")
# fsMoveSett = mc_obj.fusionMoveSettings(mc_obj.cgcFactory(doCutPhase=True, doGlueAndCutPhase=True, mincutFactory=None,
# multicutFactory=None,
# doBetterCutPhase=False, nodeNumStopCond=0.1, sizeRegularizer=1.0))
solverFactory = mc_obj.ccFusionMoveBasedFactory(proposalGenerator=pgen, **fusion_moves_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor, nodeLabels=new_node_labels)
else:
raise ValueError("Passed type of solver is not implemented")
tock = time.time()
final_edge_labels = graph.nodesLabelsToEdgeLabels(final_node_labels)
energy = (edge_costs * final_edge_labels).sum()
return energy, final_node_labels, final_edge_labels, log_visitor, tock-tick
|
[
"bailoni.alberto@gmail.com"
] |
bailoni.alberto@gmail.com
|
d07b700b026672f3fe65d2438f6c08a22556f2df
|
34474048ec5c4850623cf0fea993b43de76fada4
|
/Tests/unittest/code_gen/tac_o1/local_chars.tac
|
e00797894280d102fd4d8b3938dc5cdb5c77ad11
|
[] |
no_license
|
imsure/C--
|
69a80e152936e31b14319ab16c2317d2cacc9165
|
9991e7135d6ebc8f6f08f46f37b82bfe353ec17f
|
refs/heads/master
| 2021-01-13T02:04:07.295401
| 2015-05-01T01:26:07
| 2015-05-01T01:26:07
| 30,732,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
tac
|
main:
Enter main 16
x = 'A'
y = 'B'
z = 'C'
Param x
Call print_int 1
_tstr0 = "\n"
Param _tstr0
Call print_string 1
Param y
Call print_int 1
_tstr1 = "\n"
Param _tstr1
Call print_string 1
Param z
Call print_int 1
_tstr2 = "\n"
Param _tstr2
Call print_string 1
Return
|
[
"imsure95@gmail.com"
] |
imsure95@gmail.com
|
7f91b5fa338e5d6f010bd2a91a2b4428dc2e61f6
|
c3db4c42360c47471635a97568bfc9c21bc14c06
|
/pdfmerge/migrations/0002_auto_20190616_1800.py
|
8708c2ce97d9ded4162bf6eb11e98c10d8063689
|
[
"MIT"
] |
permissive
|
rupin/pdfmerger
|
3ede9aa9f1f374eba9b1ea2c33b6920403a8f4ad
|
fee19523e88362d215f1a29cdab0d140f4c9385c
|
refs/heads/master
| 2020-04-07T20:37:56.821730
| 2019-07-18T16:58:01
| 2019-07-18T16:58:01
| 158,696,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
# Generated by Django 2.1.3 on 2019-06-16 12:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pdfmerge', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='formfield',
name='field_page_number',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='formfield',
name='field_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='formfield',
name='field_x',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
migrations.AddField(
model_name='formfield',
name='field_x_increment',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
migrations.AddField(
model_name='formfield',
name='field_y',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
migrations.AddField(
model_name='formfield',
name='fk_pdf_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='pdfmerge.PDFForm'),
),
migrations.AddField(
model_name='pdfform',
name='file_path',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='pdfform',
name='pdf_name',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='pdfform',
name='pdf_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userdata',
name='field_text',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='userdata',
name='field_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userdata',
name='fk_user_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"rupin.chheda@gmail.com"
] |
rupin.chheda@gmail.com
|
3d6e519d8173a542b6493ac848758d22a09e11a6
|
c513008cacf5592e645e7da3652d90d12a11a988
|
/program/niuke-python/Sprial_2.py
|
3a59db5855032d2ff2e365c91cdbded357e65f20
|
[] |
no_license
|
PiKaChu-R/code-learn
|
f17cb5ad95d4e8b698320d23e472eb1687576bdc
|
b94814ac3c72da4c840758569005b7ac6589586a
|
refs/heads/master
| 2020-07-01T02:42:40.235753
| 2019-09-17T13:06:50
| 2019-09-17T13:06:50
| 201,021,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@File : Sprial_2.py
@Time : 2019/04/22 17:09:47
@Author : R.
@Version : 2.0
@Contact : 827710637@qq.com
@Desc : None
'''
'''
Sprial.py:可实现方法
'''
# here put the import lib
import itertools
def spiral(init):
status = itertools.cycle(['right', 'down', 'left', 'up']) # 用于状态周期性的切换
movemap = {
'right': (1, 0),
'down': (0, 1),
'left': (-1, 0),
'up': (0, -1),
}
# 初始化二维数组
position_map = dict.fromkeys(
[(x, y) for x in range(init) for y in range(init)])
# 初始化当前位置以及当前方向
positon = (0, 0)
new_status = next(status)
for i in range(4*init+1, init * (init+4) + 1):
old_positon = positon
# print(list( zip(positon, movemap[new_status])))
# print('22')
# print(list(map(sum, zip(positon, movemap[new_status]))))
# 根据状态进行移动
positon = tuple(map(sum, zip(positon, movemap[new_status])))
# 如果超过范围或者碰到已经有值的位置则切换方向
if (positon not in position_map) or (position_map[positon]):
new_status = next(status)
positon = tuple(map(sum, zip(old_positon, movemap[new_status])))
position_map[old_positon] = i
# 构造输出信息
print("When:init = {}".format(init))
# 打印第一行
for i in range(1, init+1):
if i < init:
print("{}".format(i), end='\t')
else:
print("{}".format(i))
# 构造中心螺旋结构
for i in range(init):
print("{}".format(4 * init - i), end='\t')
for j in range(init):
print((str(position_map[(j, i)])), end='\t')
print("{}".format(i + init + 1))
# 添加最后一行
for i in range(init*3, init*2, -1):
# 打印第一行
print("{}".format(i), end='\t')
if i == init:
print("{}".format(i))
if __name__ == "__main__":
# 参数为init值
spiral(3)
|
[
"827710637@qq.com"
] |
827710637@qq.com
|
672a44ccb8cf352f213782f10b1bd23f6a7814e5
|
c2e1b17001357f2c13f6b8287e2b6ee0956c955b
|
/sweetpea/metrics.py
|
18f7356dec88fa61c916c2db35377da7329030b3
|
[
"MIT"
] |
permissive
|
musslick/sweetpea-py
|
e0c9fec35c571fbf846808cbdeec58f68c405d4c
|
b0d9769025022936d57d71a501c9ab5f51b4a4ef
|
refs/heads/master
| 2023-03-21T01:50:51.045650
| 2021-03-24T19:37:18
| 2021-03-24T19:37:18
| 293,494,087
| 1
| 0
| null | 2020-09-07T10:20:12
| 2020-09-07T10:20:12
| null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import operator as op
from functools import reduce
from math import factorial
from typing import Dict
from sweetpea.blocks import Block
from sweetpea.constraints import ExactlyKInARow, AtMostKInARow
from sweetpea import __generate_cnf
"""
Given a block, this function will collect various metrics pertaining to the block
and return them in a dictionary.
"""
def collect_design_metrics(block: Block) -> Dict:
backend_request = block.build_backend_request()
dimacs_header = __generate_cnf(block).split('\n')[0].split(' ')
return {
'full_factor_count': len(block.design),
'crossing_factor_count': len(block.crossing),
'constraint_count': len(block.constraints),
'block_length': block.trials_per_sample(),
'block_length_factorial': factorial(block.trials_per_sample()),
'low_level_request_count': len(backend_request.ll_requests),
'cnf_total_variables': int(dimacs_header[2]),
'cnf_total_clauses': int(dimacs_header[3])
}
|
[
"drautb@gmail.com"
] |
drautb@gmail.com
|
1ec71b82893c3f64ed495e2bf6673385d2f01c5a
|
c7d91529db199322e39e54fe4051a75704ea843e
|
/chaper01_list/t1.12.py
|
52c5131518690d49becba7d34818301aad263a2f
|
[] |
no_license
|
2226171237/Algorithmpractice
|
fc786fd47aced5cd6d96c45f8e728c1e9d1160b7
|
837957ea22aa07ce28a6c23ea0419bd2011e1f88
|
refs/heads/master
| 2020-12-26T07:20:37.226443
| 2020-09-13T13:31:05
| 2020-09-13T13:31:05
| 237,431,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,342
|
py
|
#-*- coding=utf-8 -*-
'''
给定一个有序链表,其中每个节点也表示一个有序链表,
实现flatten()函数,该函数将联保扁平化成一个单链表,扁平化后也是有序的链表
'''
class LNode:
def __init__(self,x,right=None,down=None):
self._data=x
self.right=right
self.down=down
class LList:
def __init__(self,data=[]):
self.head=None
if data:
data=list(data)
for d in data:
self.push(d)
def is_empty(self):
return self.head is None
def push(self,x):
new_node=LNode(x) if not isinstance(x,LList) else x.head
if self.is_empty():
self.head=new_node
else:
if isinstance(x,LList):
node=self.head
while node.right:
node=node.right
node.right=new_node
else:
node=self.head
while node.down:
node=node.down
node.down=LNode(x)
def visit(self):
if self.is_empty():
return
print('head')
node=self.head
while node:
childnode = node
while childnode:
print(childnode._data,end='->')
childnode=childnode.down
print('end')
node=node.right
print('end')
def merge(self,a,b):
'''
合并有序链表,归并排序中的合并
:param a:
:param b:
:return:
'''
if a is None:
return b
if b is None:
return a
if a._data<b._data:
result=a
result.down=self.merge(a.down,b)
else:
result=b
result.down=self.merge(a,b.down)
return result
def flatten(self,head):
if head is None or head.right is None:
return head
head.right=self.flatten(head.right)
head=self.merge(head,head.right)
return head
if __name__ == '__main__':
L1=LList([3,6,8,31])
L2=LList([11,21])
L3=LList([15,22,50])
L4=LList([30,39,40,55])
L=LList([L1,L2,L3,L4])
L.visit()
head=L.flatten(L.head)
node=head
while node:
print(node._data,end='->')
node=node.down
|
[
"2226171237@qq.com"
] |
2226171237@qq.com
|
97b3a57cfd6020fc40c236296004864bb838d3b5
|
a00f703ac6561ac99066c7220075dd4a420bb3ff
|
/goiteens/models/post_manager.py
|
34947ba95b09a3a68bd37065db233d762cdb45c5
|
[] |
no_license
|
volodymyrhusak/goiteens_docker
|
9c3e56091218f3e7633dc5d94816959254f5c8ca
|
618722fce82e85fe13f8c60ee76216fbf13338a7
|
refs/heads/master
| 2022-12-22T02:01:43.589833
| 2018-03-14T20:55:26
| 2018-03-14T20:55:26
| 125,272,517
| 0
| 0
| null | 2022-11-04T19:17:52
| 2018-03-14T20:55:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from models.model import PostModel ,UserModel ,CommentsModel
from models.base_manager import SNBaseManager
from models.user_manager import UserManager
class PostManager(SNBaseManager):
def __init__(self):
class_model = PostModel
super(PostManager, self).__init__(class_model)
def get_posts(self,user):
self.select().And([('user','=',user.object.id)]).run()
def save_post(self,form, user):
self.object.title = form.get('title', '')
self.object.photos = form.get('photos', '')
self.object.text = form.get('text', '')
self.object.user = user.object
self.save()
def _get_post_id(self, id):
self.select().And([('id', '=', str(id))]).run()
def add_comment(self,comment,user,post):
if not isinstance(post, PostModel):
post = self.get_post(post)
if not isinstance(user, UserModel):
user = UserManager().get_user(user)
comment_manager = SNBaseManager(CommentsModel)
comment_manager.object.text = comment
comment_manager.object.post = post
comment_manager.object.user = user
comment_manager.save()
|
[
"vovatrap@gmail.com"
] |
vovatrap@gmail.com
|
b6cf62297f6f5a18d8098fd663d50ceed6d2fb6a
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/mnurolcay/2009/network/chat/gyachi/actions.py
|
27982a32850ab05a787bda82f499cce34b776942
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def setup():
shelltools.system("sh autogen.sh")
autotools.configure("--enable-plugin_pulseaudio \
--enable-plugin_blowfish \
--enable-plugin_mcrypt \
--enable-gtkspell \
--enable-wine \
--enable-plugin_gpgme \
--disable-plugin_xmms \
--disable-rpath \
--disable-esd")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.insinto("/usr/share/pixmaps", "themes/gyachi-classic/gyach-icon_48.png", "gyachi.png")
pisitools.insinto("/usr/share/icons/hicolor/32x32/apps/gyachi", "themes/gyachi-classic/gyach-icon_32.png", "gyachi.png")
pisitools.insinto("/usr/share/icons/hicolor/48x48/apps/gyachi", "themes/gyachi-classic/gyach-icon_48.png", "gyachi.png")
pisitools.dodoc("ChangeLog", "VERSION", "doc/*.txt", "doc/txt/COPYING", "doc/txt/README", "doc/txt/webcams.txt", "doc/txt/gyachi-help-short.txt")
pisitools.dohtml("doc/html/*")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
32de413aeb5cf0e0c6a5e8ba621fb4d62f92ef03
|
e872e1136887cd6753ae292939e4656130c8f7d9
|
/api/urls.py
|
f68962bb645e1c84bfe889e90658576f6e1b915b
|
[
"MIT"
] |
permissive
|
florimondmanca/personal-api
|
92e9d3ba8e3b16466ba54f5e9ea0493030e9cf95
|
6300f965d3f51d1bf5f10cf1eb15d673bd627631
|
refs/heads/master
| 2020-03-22T14:48:06.087822
| 2019-11-16T15:31:05
| 2019-11-16T15:31:05
| 140,206,398
| 4
| 1
|
MIT
| 2019-10-21T19:24:44
| 2018-07-08T22:16:47
|
Python
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
"""API URLs."""
from django.urls import path
from rest_framework.routers import DefaultRouter
import blog.views
from .views import obtain_auth_token
# Enable view names as 'api:...'
app_name = "api"
router = DefaultRouter()
# Blog endpoints
router.register("posts", blog.views.PostViewSet)
router.register("popular-tags", blog.views.PopularTagViewSet)
urlpatterns = router.urls + [path("login/", obtain_auth_token)]
|
[
"florimond.manca@gmail.com"
] |
florimond.manca@gmail.com
|
0071f6d25357b403c468cee9f14e317f1172b23f
|
a0488ed86f297f5f18864bf3f317dbed48b3b00d
|
/setup.py
|
1dafa036d3e90c0bcceec24674af2c2720c35ab2
|
[
"MIT"
] |
permissive
|
DamianArado/moya-techblog
|
7aefeea5bb74fa410e7cf896a83c0af0f4b0d25c
|
4f7d606b22773db40850b742945e83e328c63bb7
|
refs/heads/master
| 2021-12-22T02:53:40.535675
| 2017-10-09T14:07:59
| 2017-10-09T14:07:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from setuptools import setuptools
VERSION = "0.1.0"
setup(
name='techblog',
version=VERSION,
description="Blog for Coders and Photographers",
zip_safe=False,
license="MIT",
author="Will McGugan",
author_email="willmcgugan@gmail.com",
url="https://github.com/moyaproject/moya-techblog",
entry_points={
"console_scripts": [
'techblog = techblog:main'
]
}
)
|
[
"willmcgugan@gmail.com"
] |
willmcgugan@gmail.com
|
1161c88b6f97450eb92ecddc96a9913d4b4cdca6
|
6609c26b4ed72c156104ce282c3cf88c6aac59f6
|
/chapter09/example02.py
|
65652c91aa68ad1a2e42702ecbcabdda16ed64fc
|
[
"MIT"
] |
permissive
|
yordanivh/intro_to_cs_w_python
|
4ab9dbbc2963b285b22cacb6648d1300fded18ce
|
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
|
refs/heads/master
| 2020-09-06T12:25:23.362118
| 2020-02-14T14:07:07
| 2020-02-14T14:07:07
| 220,423,698
| 0
| 0
|
MIT
| 2020-02-14T14:07:08
| 2019-11-08T08:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 179
|
py
|
#variable holding the last value of the loop
speed = 2
velocities = [0.0, 9.81, 19.62, 29.43]
for speed in velocities:
print('Metric',speed, 'm/sec')
print('Final:', speed)
|
[
"yordan@hashicorp.com"
] |
yordan@hashicorp.com
|
a193ad9d8cf0b7edb6fea29bdc621f469c12e0ba
|
9653d2c933c95f6a7e956751814a38a935fabf14
|
/source/code/menu_addFontGuideline.py
|
c211b008a1cac518efa7bbec3cc9d5b7c1ee2e90
|
[
"MIT"
] |
permissive
|
benkiel/guidetool
|
f98863c72920bbddc9fb355852a42c1e441f02ea
|
ee6f4fce8f472622ab20a3b09bf4594f5631be25
|
refs/heads/main
| 2023-06-18T21:39:24.269399
| 2021-07-15T00:41:33
| 2021-07-15T00:41:33
| 387,886,590
| 0
| 0
|
MIT
| 2021-07-20T18:54:21
| 2021-07-20T18:54:20
| null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
import AppKit
from fontParts.world import CurrentGlyph
from mojo.UI import getDefault, CurrentGlyphWindow
from guideTool.guess import guessPositionAndAngleFromSelectedPoints
from guideTool.editor import GuidelineEditorController
def run():
glyph = CurrentGlyph()
if glyph is None:
return
font = glyph.font
editor = CurrentGlyphWindow()
data = guessPositionAndAngleFromSelectedPoints(glyph)
if data is None:
AppKit.NSBeep()
return
font.prepareUndo("Add Guide")
guideline = font.appendGuideline(**data)
font.performUndo()
GuidelineEditorController(guideline, glyph, editor.getGlyphView())
if __name__ == "__main__":
run()
|
[
"tal@typesupply.com"
] |
tal@typesupply.com
|
55975b35d008f8b4dddbc1c5c47ae99ff5a4998d
|
04e1c60ac7864a0bdcdd41026a2336b1ff699613
|
/model/ll.py
|
f5098ecc5ce3319e31d675292eb699f307ed938b
|
[] |
no_license
|
jianzhnie/RetinaNet_Pytorch
|
00ec318d2e57c6b646f193e4d3a066f9891762b3
|
03679766847757f28bb9410c31ddaf99adf524c8
|
refs/heads/master
| 2020-09-22T00:09:18.617565
| 2019-11-30T08:27:36
| 2019-11-30T08:27:36
| 224,981,680
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, num_classes):
super(FocalLoss, self).__init__()
self.num_classes = num_classes
def _one_hot_embeding(self, labels):
"""Embeding labels to one-hot form.
Args:
labels(LongTensor): class labels
num_classes(int): number of classes
Returns:
encoded labels, sized[N, #classes]
"""
y = torch.eye(self.num_classes+1) # [D, D]
return y[labels] # [N, D]
def focal_loss(self, x, y):
"""Focal loss
Args:
x(tensor): size [N, D]
y(tensor): size [N, ]
Returns:
(tensor): focal loss
"""
alpha = 0.25
gamma = 2
t = self._one_hot_embeding(y.data.cpu()) # [N,21]
t = t[:, 1:] # exclude background
t = Variable(t).cuda() # [N,20]
logit = F.softmax(x)
logit = logit.clamp(1e-7, 1.-1e-7)
conf_loss_tmp = -1 * t.float() * torch.log(logit)
conf_loss_tmp = alpha * conf_loss_tmp * (1-logit)**gamma
conf_loss = conf_loss_tmp.sum()
return conf_loss
def forward(self, loc_preds, loc_targets, cls_preds, cls_targets):
"""Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).
Args:
loc_preds(tensor): predicted locations, sized [batch_size, #anchors, 4].
loc_targets(tensor): encoded target locations, sized [batch_size, #anchors, 4].
cls_preds(tensor): predicted class confidences, sized [batch_size, #anchors, #classes].
cls_targets(tensor): encoded target labels, sized [batch_size, #anchors].
Returns:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + FocalLoss(cls_preds, cls_targets).
"""
pos = cls_targets > 0 # [N,#anchors]
num_pos = pos.data.long().sum()
# loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
mask = pos.unsqueeze(2).expand_as(loc_preds) # [N,#anchors,4]
masked_loc_preds = loc_preds[mask].view(-1, 4) # [#pos,4]
masked_loc_targets = loc_targets[mask].view(-1, 4) # [#pos,4]
loc_loss = F.smooth_l1_loss(masked_loc_preds, masked_loc_targets, size_average=False)
# cls_loss = FocalLoss(loc_preds, loc_targets)
pos_neg = cls_targets > -1 # exclude ignored anchors
# num_pos_neg = pos_neg.data.long().sum()
mask = pos_neg.unsqueeze(2).expand_as(cls_preds)
masked_cls_preds = cls_preds[mask].view(-1, self.num_classes)
cls_loss = self.focal_loss(masked_cls_preds, cls_targets[pos_neg])
num_pos = max(1.0, num_pos.item())
print('loc_loss: %.3f | cls_loss: %.3f' % (loc_loss.item() / num_pos, cls_loss.item() / num_pos), end=' | ')
loss = loc_loss / num_pos + cls_loss / num_pos
return loss
|
[
"jianzhnie@126.com"
] |
jianzhnie@126.com
|
f0ff4f0567586c4dc9dd54b5497b4819f77b2378
|
855416c669f765e4cd0f5a749e82c112641a9e11
|
/Interest.blog-1.1/utils/public.py
|
9cdb4342017ac3c51968c900609d649d1726535b
|
[
"MIT"
] |
permissive
|
chenzhenpin/my_flask
|
a2f63422921d4b73c25a8e093ad09e6a48f8b568
|
0c101b7a1aa01283a0b8e3ef9b7555750ea03ecb
|
refs/heads/master
| 2022-11-28T20:16:35.854225
| 2018-11-27T16:04:29
| 2018-11-27T16:04:29
| 159,362,205
| 0
| 0
| null | 2022-11-22T01:37:36
| 2018-11-27T16:02:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 5,254
|
py
|
# -*- coding:utf8 -*-
import requests
import hashlib
import datetime
import random
import upyun
from uuid import uuid4
from log import Syslog
from config import SSO, MYSQL, PLUGINS
from torndb import Connection
from flask import g
#Something public variable
md5 = lambda pwd:hashlib.md5(pwd).hexdigest()
today = lambda :datetime.datetime.now().strftime("%Y-%m-%d")
logger = Syslog.getLogger()
gen_requestId = lambda :str(uuid4())
gen_filename = lambda :"%s%s" %(datetime.datetime.now().strftime('%Y%m%d%H%M%S'), str(random.randrange(1000, 10000)))
def timeChange(timestring):
logger.debug("Change time, source time is %s" %timestring)
startedat = timestring.replace('T', ' ')[:19]
try:
dt = datetime.datetime.strptime(startedat, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(hours=8)
res = dt.strftime("%Y-%m-%d %H:%M:%S")
except Exception, e:
logger.warn(e, exc_info=True)
else:
logger.debug("Change time, result time is %s" %res)
return res
def ParseMySQL(mysql, callback="dict"):
try:
protocol, dburl = mysql.split("://")
if "?" in mysql:
dbinfo, dbargs = dburl.split("?")
else:
dbinfo, dbargs = dburl, "charset=utf8&timezone=+8:00"
host,port,user,password,database = dbinfo.split(":")
charset, timezone = dbargs.split("&")[0].split("charset=")[-1] or "utf8", dbargs.split("&")[-1].split("timezone=")[-1] or "+8:00"
if callback in ("list", "tuple"):
return protocol,host,port,user,password,database,charset, timezone
else:
return {"Protocol": protocol, "Host": host, "Port": port, "Database": database, "User": user, "Password": password, "Charset": charset, "Timezone": timezone}
except Exception,e:
logger.warn(e, exc_info=True)
if callback in ("list", "tuple"):
return ()
else:
return {}
mysql = Connection(
host = "%s:%s" %(ParseMySQL(MYSQL).get('Host'), ParseMySQL(MYSQL).get('Port', 3306)),
database = ParseMySQL(MYSQL).get('Database'),
user = ParseMySQL(MYSQL).get('User'),
password = ParseMySQL(MYSQL).get('Password'),
time_zone= ParseMySQL(MYSQL).get('Timezone','+8:00'),
charset = ParseMySQL(MYSQL).get('Charset', 'utf8'),
connect_timeout=3,
max_idle_time=2)
def ClickMysqlWrite(data):
if isinstance(data, dict):
if data.get("agent") and data.get("method") in ("GET", "POST", "PUT", "DELETE", "OPTIONS"):
sql = "insert into clickLog set requestId=%s, url=%s, ip=%s, agent=%s, method=%s, status_code=%s, referer=%s"
try:
mysql.insert(sql, data.get("requestId"), data.get("url"), data.get("ip"), data.get("agent"), data.get("method"), data.get("status_code"), data.get("referer"))
except Exception, e:
logger.warn(e, exc_info=True)
def isLogged_in(cookie_str):
''' To determine whether to log on with cookie '''
SSOURL = SSO.get("SSO.URL")
if cookie_str and not cookie_str == '..':
username, expires, sessionId = cookie_str.split('.')
#success = Requests(SSOURL+"/sso/").post(data={"username": username, "time": expires, "sessionId": sessionId}).get("success", False)
success = requests.post(SSOURL+"/sso/", data={"username": username, "time": expires, "sessionId": sessionId}, timeout=5, verify=False, headers={"User-Agent": SSO.get("SSO.PROJECT")}).json().get("success", False)
logger.info("check login request, cookie_str: %s, success:%s" %(cookie_str, success))
return success
else:
logger.info("Not Logged in")
return False
def chunks(arr, n):
"""arr是被分割的list,n是每个chunk中含n元素。"""
return [arr[i:i+n] for i in range(0, len(arr), n)]
def isAdmin(username):
AdminUsers = requests.get(g.apiurl + "/user/", params={"getadminuser": True}, timeout=5, verify=False, headers={"User-Agent": SSO.get("SSO.PROJECT")}).json().get("data")
if username in AdminUsers:
return True
return False
def UploadImage2Upyun(file, imgurl, kwargs=PLUGINS['UpYunStorage']):
""" Upload image to Upyun Cloud with Api """
logger.info({"UploadFile": file, "imgurl": imgurl, "kwargs": kwargs})
up = upyun.UpYun(kwargs.get("bucket"), username=kwargs.get("username"), password=kwargs.get("password"), secret=kwargs.get("secret"), timeout=kwargs.get("timeout", 10))
formkw = { 'allow-file-type': kwargs.get('allow-file-type', 'jpg,jpeg,png,gif') }
with open(file, "rb") as f:
res = up.put(imgurl, f, checksum=True, need_resume=True, form=True, **formkw)
return res
def BaiduActivePush(pushUrl, original=True, callUrl=PLUGINS['BaiduActivePush']['callUrl']):
"""百度主动推送(实时)接口提交链接"""
callUrl = callUrl + "&type=original" if original else callUrl
res = requests.post(url=callUrl, data=pushUrl, timeout=3, headers={"User-Agent": "BaiduActivePush/www.saintic.com"}).json()
logger.info("BaiduActivePush PushUrl is %s, Result is %s" % (pushUrl, res))
return res
|
[
"1595347682@qq.com"
] |
1595347682@qq.com
|
2a7d09228483b3b7c912600fea60bd9f300653f9
|
6b10d7a745b70d3b8533ea91b7bf1052e43b7d70
|
/Week 4/Admin_page/main/migrations/0002_auto_20180930_2229.py
|
9e5cf7c3c18d3c670ba02c13dfc5ff2f2fa3db2a
|
[] |
no_license
|
Ablay09/BFDjango
|
9701f6b1d36d54e6a2b511c57374e47ac0048d0e
|
c41423f5e86bad107769f518eeca2bfefd524919
|
refs/heads/master
| 2020-03-27T21:33:55.433951
| 2018-10-12T20:14:12
| 2018-10-12T20:14:12
| 147,101,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# Generated by Django 2.1.1 on 2018-09-30 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='created_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='task',
name='finished_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='task_list',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"ablay.yedilbayev@gmail.com"
] |
ablay.yedilbayev@gmail.com
|
1ba7ed3f3e6972ac5d2d780b49ba2a2b641a4ba6
|
d89290fd2ecc166287e065784ae290a516ca2cef
|
/archives/convert_framerates.py
|
83c83ead40072e2feef33d6b123b8c9ad4acad3f
|
[
"MIT"
] |
permissive
|
rec/bbcprc
|
adc85ca3fd7a7b303c83323c2bdf63d44654112f
|
d4f4fee5f5c0beaf9c23af6e1655dbb49f3912e3
|
refs/heads/main
| 2023-02-08T20:18:38.763943
| 2023-01-30T12:47:21
| 2023-01-30T12:51:38
| 130,397,044
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
from . import audio_io
from . import constants
from . import files
from scipy import signal
import json
import numpy as np
import os
ERROR = 'fp.getframerate() != constants.FRAME_RATE: 48000'
def get_framerate_error_files():
for f in sorted(files.with_suffix(constants.METADATA_DIR, '.json')):
if json.load(open(f)).get('error') == ERROR:
yield constants.source(os.path.basename(f)[:-5])
def resample_file(filename):
if True:
original = filename
filename = filename + '.48KHz'
else:
original = filename + '.48KHz'
os.rename(filename, original)
fp, frames = audio_io.read_frames_and_fp(original)
assert fp.getframerate() == 48000
samples = audio_io.from_frames(frames, fp.getnchannels())
resampled = np.stack([signal.resample_poly(s, 160, 147) for s in samples])
audio_io.write(filename, resampled)
print('Resampled to', filename)
if __name__ == '__main__':
# resample_file(list(get_framerate_error_files())[
for f in get_framerate_error_files():
print(f)
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
c07b75d16c6a9d140e590d268b51632f6b7f93bc
|
c8e44c50bcc77c6ad5d95516dcec8dada7c284bd
|
/gidgethub/abc.py
|
444cf02043c0df46f44a1acc14c6f691ffc35f44
|
[
"Apache-2.0"
] |
permissive
|
Lukasa/gidgethub
|
495510e276cd34f7b9c37431f3b3b011d02b0795
|
6b3dc032f1fcdf0fbf23dfb061f11588798c1e7e
|
refs/heads/master
| 2021-01-18T16:04:54.012847
| 2017-03-08T00:51:29
| 2017-03-08T00:51:29
| 84,348,121
| 0
| 0
| null | 2017-03-08T17:31:09
| 2017-03-08T17:31:09
| null |
UTF-8
|
Python
| false
| false
| 4,473
|
py
|
"""Provide an abstract base class for easier requests."""
import abc
import datetime
import json
from typing import Any, AsyncIterable, Dict, Mapping, Optional, Tuple
from . import sansio
class GitHubAPI(abc.ABC):
"""Provide an idiomatic API for making calls to GitHub's API."""
def __init__(self, requester: str, *, oauth_token: str = None) -> None:
self.requester = requester
self.oauth_token = oauth_token
self.rate_limit: sansio.RateLimit = None
@abc.abstractmethod
async def _request(self, method: str, url: str,
headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
"""Make an HTTP request."""
@abc.abstractmethod
async def _sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
async def _make_request(self, method: str, url: str,
url_vars: Dict[str, str], data: Any,
accept) -> Tuple[Any, str]:
"""Construct and make an HTTP request."""
# If the rate limit isn't known yet then assume there's enough quota.
if self.rate_limit is not None:
if self.rate_limit:
# Proactively assume this request is counted by GitHub so as to
# not have a race condition on the final request.
self.rate_limit.remaining -= 1
else:
# /rate_limit returns the current rate limit,
# but the assumption is an async application won't be making multi-threaded calls with
# the same oauth token so the last call will have set the rate_limit accurately.
now = datetime.datetime.now(datetime.timezone.utc)
wait = self.rate_limit.reset_datetime - now
await self._sleep(wait.total_seconds())
filled_url = sansio.format_url(url, url_vars)
request_headers = sansio.create_headers(self.requester, accept=accept,
oauth_token=self.oauth_token)
if data == "":
body = b""
request_headers["content-length"] = "0"
else:
charset = "utf-8"
body = json.dumps(data).encode(charset)
request_headers['content-type'] = f"application/json; charset={charset}"
request_headers['content-length'] = str(len(body))
response = await self._request(method, filled_url, request_headers, body)
data, self.rate_limit, more = sansio.decipher_response(*response)
return data, more
async def getitem(self, url: str, url_vars: Dict[str, str] = {}, *,
accept=sansio.accept_format()) -> Any:
"""Send a GET request for a single item to the specified endpoint."""
data, _ = await self._make_request("GET", url, url_vars, "", accept)
return data
async def getiter(self, url: str, url_vars: Dict[str, str] = {}, *,
accept: str = sansio.accept_format()) -> AsyncIterable[Any]:
"""Return an async iterable for all the items at a specified endpoint."""
data, more = await self._make_request("GET", url, url_vars, "", accept)
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept):
yield item
async def post(self, url: str, url_vars: Dict[str, str] = {}, *,
data: Any, accept: str = sansio.accept_format()) -> Any:
data, _ = await self._make_request("POST", url, url_vars, data, accept)
return data
async def patch(self, url: str, url_vars: Dict[str, str] = {}, *,
data: Any, accept: str = sansio.accept_format()) -> Any:
data, _ = await self._make_request("PATCH", url, url_vars, data, accept)
return data
async def put(self, url: str, url_vars: Dict[str, str] = {}, *,
data: Any = "",
accept: str = sansio.accept_format()) -> Optional[Any]:
data, _ = await self._make_request("PUT", url, url_vars, data, accept)
return data
async def delete(self, url: str, url_vars: Dict[str, str] = {}, *,
accept: str = sansio.accept_format()) -> None:
await self._make_request("DELETE", url, url_vars, "", accept)
|
[
"brett@python.org"
] |
brett@python.org
|
56b4116fe0ba8840df5f463b6306c8fd733d774a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02804/s286662068.py
|
0f0965086da8245027d2dedeb3044d4203c93284
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
n, k = map(int, input().split())
a = list(map(int, input().split()))
a.sort()
# prepare combs upto 10000
mod = 10**9 + 7
facts = [1] * 100001
for i in range(0, 100000):
facts[i+1] = facts[i] * (i + 1) % mod
ifacts = [1] * 100001
ifacts[100000] = pow(facts[100000], mod - 2, mod)
for i in range(100000, 0, -1):
ifacts[i-1] = ifacts[i] * i % mod
def comb(n, k):
return facts[n] * ifacts[n-k] % mod * ifacts[k] % mod
ans = 0
for i in range(k-1, n):
# take k-1 from i
ans = (ans + a[i] * comb(i, k-1)) % mod
for i in range(0, n-k+1):
# take k-1 from n-i-1
ans = (ans - a[i] * comb(n-i-1, k-1)) % mod
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
63508df4d0df451b9d0818de91691ffcd18a74bb
|
f5a53f0f2770e4d7b3fdace83486452ddcc996e1
|
/env3/lib/python3.6/site-packages/django_rq/workers.py
|
b6f23d91a3623ea4116dcd4691f19e56d13e2a11
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
fireman0865/PingBox
|
35e8fc9966b51320d571b63967e352a134022128
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
refs/heads/master
| 2023-01-20T07:55:59.433046
| 2020-03-15T13:36:31
| 2020-03-15T13:36:31
| 247,466,832
| 1
| 0
|
Apache-2.0
| 2022-12-26T21:30:32
| 2020-03-15T12:59:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
from rq import Worker
from rq.utils import import_attribute
from django.conf import settings
from .jobs import get_job_class
from .queues import get_queues
def get_exception_handlers():
"""
Custom exception handlers could be defined in settings.py:
RQ = {
'EXCEPTION_HANDLERS': ['path.to.handler'],
}
"""
from .settings import EXCEPTION_HANDLERS
return [import_attribute(path) for path in EXCEPTION_HANDLERS]
def get_worker_class(worker_class=None):
"""
Return worker class from RQ settings, otherwise return Worker.
If `worker_class` is not None, it is used as an override (can be
python import path as string).
"""
RQ = getattr(settings, 'RQ', {})
if worker_class is None:
worker_class = Worker
if 'WORKER_CLASS' in RQ:
worker_class = RQ.get('WORKER_CLASS')
if isinstance(worker_class, str):
worker_class = import_attribute(worker_class)
return worker_class
def get_worker(*queue_names, **kwargs):
"""
Returns a RQ worker for all queues or specified ones.
"""
job_class = get_job_class(kwargs.pop('job_class', None))
queue_class = kwargs.pop('queue_class', None)
queues = get_queues(*queue_names, **{'job_class': job_class,
'queue_class': queue_class})
# normalize queue_class to what get_queues returns
queue_class = queues[0].__class__
worker_class = get_worker_class(kwargs.pop('worker_class', None))
return worker_class(queues,
connection=queues[0].connection,
exception_handlers=get_exception_handlers() or None,
job_class=job_class,
queue_class=queue_class,
**kwargs)
|
[
"fireman0865@gmail.com"
] |
fireman0865@gmail.com
|
67790833485b01272a5d8f7ba10d549f6bc187e6
|
b7b8cac59c24c28efb3002f639865121d3b1f3e1
|
/hyperion/grid/yt3_wrappers.py
|
fe04e9f80b875a00a54282dfaeb5b72899b325bd
|
[
"BSD-2-Clause"
] |
permissive
|
koepferl/hyperion
|
51a461f3cde30faa6dc82f63803b659a831273d1
|
d43e1d06889e8b649038b85ef6721c64dd269a4e
|
refs/heads/master
| 2020-04-01T19:11:18.373471
| 2015-01-14T13:31:36
| 2015-03-30T15:38:08
| 34,328,089
| 0
| 0
| null | 2015-04-21T13:17:41
| 2015-04-21T13:17:40
| null |
UTF-8
|
Python
| false
| false
| 6,366
|
py
|
from __future__ import print_function, division
import numpy as np
def almost_equal(a, b):
return a / b < 1. + 1.e-4 and b / a < 1. + 1.e-4
def amr_grid_to_yt_stream(levels, dust_id=0):
# Try and guess the refinement ratio - if it is not constant, then
# we can't use yt
if len(levels) == 0 or len(levels[0].grids) == 0:
raise Exception("Need at least one level with one grid to convert to a yt object")
elif len(levels) == 1:
refine = 2
else:
dx = []
dy = []
dz = []
for ilevel, level in enumerate(levels):
for igrid, grid in enumerate(level.grids):
gdx = (grid.xmax - grid.xmin) / float(grid.nx)
gdy = (grid.ymax - grid.ymin) / float(grid.ny)
gdz = (grid.zmax - grid.zmin) / float(grid.nz)
if igrid == 0:
dx.append(gdx)
dy.append(gdy)
dz.append(gdz)
else:
if not almost_equal(dx[-1], gdx):
raise Exception("dx scale differs between grids in level %i (expected %g and got %g)" % (ilevel, dx[-1], gdx))
if not almost_equal(dy[-1], gdy):
raise Exception("dy scale differs between grids in level %i (expected %g and got %g)" % (ilevel, dy[-1], gdy))
if not almost_equal(dz[-1], gdz):
raise Exception("dz scale differs between grids in level %i (expected %g and got %g)" % (ilevel, dz[-1], gdz))
dx = np.array(dx)
dy = np.array(dy)
dz = np.array(dz)
refine_x = dx[:-1] / dx[1:]
refine_y = dy[:-1] / dy[1:]
refine_z = dz[:-1] / dz[1:]
for i in range(len(levels) - 1):
if abs(refine_x[i] - round(refine_x[i])) > 1.e-5:
raise Exception("refinement ratio is not an integer (%g)" % refine_x[i])
if abs(refine_y[i] - round(refine_y[i])) > 1.e-5:
raise Exception("refinement ratio is not an integer (%g)" % refine_y[i])
if abs(refine_z[i] - round(refine_z[i])) > 1.e-5:
raise Exception("refinement ratio is not an integer (%g)" % refine_z[i])
refine_x = np.round(refine_x).astype(int)
refine_y = np.round(refine_y).astype(int)
refine_z = np.round(refine_z).astype(int)
if not np.all(np.hstack([refine_x, refine_y, refine_z]) == refine_x[0]):
raise Exception("refinement ratio changes between levels and/or directions (x = %s, y = %s, z = %s)" % (str(refine_x), str(refine_y), str(refine_z)))
refine = int(refine_x[0])
# TODO: generalize this once yt supports a custom refinement factor
if refine != 2:
raise ValueError("load_amr_grid only supports refine=2")
xmin = ymin = zmin = +np.inf
xmax = ymax = zmax = -np.inf
grid_data = []
for ilevel, level in enumerate(levels):
for grid in level.grids:
grid_dict = {}
grid_dict['left_edge'] = [grid.zmin, grid.ymin, grid.xmin]
grid_dict['right_edge'] = [grid.zmax, grid.ymax, grid.xmax]
grid_dict['dimensions'] = [grid.nz, grid.ny, grid.nx]
grid_dict['level'] = ilevel
for field in grid.quantities:
grid_dict[('gas', field)] = grid.quantities[field][dust_id]
grid_data.append(grid_dict)
xmin = min(xmin, grid.xmin)
xmax = max(xmax, grid.xmax)
ymin = min(ymin, grid.ymin)
ymax = max(ymax, grid.ymax)
zmin = min(zmin, grid.zmin)
zmax = max(zmax, grid.zmax)
# Determine domain resolution
grid0 = levels[0].grids[0]
dx = (grid0.xmax - grid0.xmin) / float(grid0.nx)
nx = int(round((xmax - xmin) / dx))
dy = (grid0.ymax - grid0.ymin) / float(grid0.ny)
ny = int(round((ymax - ymin) / dy))
dz = (grid0.zmax - grid0.zmin) / float(grid0.nz)
nz = int(round((zmax - zmin) / dz))
domain_dimensions = np.array([nz, ny, nx])
bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
from yt.mods import load_amr_grids
spf = load_amr_grids(grid_data, domain_dimensions, bbox=bbox)
return spf
def find_order(refined):
"""
Find the index array to use to sort the ``refined`` and ``density`` arrays
to swap the xyz <-> zyx order.
"""
order = np.zeros(refined.shape)
if not refined[0]:
return [0]
def find_nested(i):
cells = [i]
for cell in range(8):
i += 1
if refined[i]:
parent = i
i, sub_cells = find_nested(i)
cells.append(sub_cells)
else:
cells.append(i)
cells = [cells[j] for j in [0,1,5,3,7,2,6,4,8]]
return i, np.hstack(cells)
return find_nested(0)[1]
def octree_grid_to_yt_stream(grid, dust_id=0):
order = find_order(grid.refined)
refined = grid.refined[order]
xmin = grid.x - grid.dx
xmax = grid.x + grid.dx
ymin = grid.y - grid.dy
ymax = grid.y + grid.dy
zmin = grid.z - grid.dz
zmax = grid.z + grid.dz
from yt.mods import load_octree
quantities = {}
for field in grid.quantities:
quantities[('gas', field)] = np.atleast_2d(grid.quantities[field][dust_id][order][~refined]).transpose()
bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
octree_mask = refined.astype(np.uint8) * 8
spf = load_octree(octree_mask=octree_mask,
data=quantities,
bbox=bbox,
over_refine_factor=0,
partial_coverage=0)
return spf
def cartesian_grid_to_yt_stream(grid, xmin, xmax, ymin, ymax, zmin, zmax, dust_id=0):
# TODO: only works for regular grids, need to catch non-uniform cases here
# Make data dict which should contain (array, unit) tuples
data = {}
for field in grid.quantities:
data[field] = (grid.quantities[field][dust_id], '')
# Load cartesian grid into yt
from yt.mods import load_uniform_grid
spf = load_uniform_grid(data=data,
domain_dimensions=np.array(grid.shape, dtype=np.int32),
bbox=np.array([(xmin, xmax), (ymin, ymax), (zmin, zmax)]))
return spf
|
[
"thomas.robitaille@gmail.com"
] |
thomas.robitaille@gmail.com
|
5db77f5c0ccc08e6f16c479642688f934e405b8b
|
6757339759559cc741178ed4236b449ff27df221
|
/chrZ_make_seq_for_ldhelmet.py
|
5833cc08ecd26ce0740ee512fb400e2178dbabf0
|
[] |
no_license
|
anandksrao/postdoc
|
18095f675cc5d67bc6e6a1b70bdc7dca29ac880d
|
a09e2d810bc4b562dc0e6de2999c063f5bd59cf8
|
refs/heads/master
| 2020-04-22T23:28:03.482966
| 2015-09-08T16:40:24
| 2015-09-08T16:40:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,695
|
py
|
import re
import glob
import sys
import subprocess
import os
import copy
import argparse
import gzip
import time
def get_vcf_var(vcf_file, min_allele):
site_freq = {}
vcf_f = gzip.open(vcf_file, 'r')
for l in vcf_f:
if not re.search('^#', l):
d = re.split('\t', l)
allele1 = len(re.findall('\s0[\/|\:]', l)) + len(re.findall('\/0', l))
allele2 = len(re.findall('\s1[\/|\:]', l)) + len(re.findall('\/1', l))
min_ac = min(allele1, allele2)
if min_ac <= min_allele:
site_freq[int(d[1])] = 1
vcf_f.close()
return site_freq
def parse_male_haps(hap_file, site_freq, sites_file, chr):
f = open(hap_file, 'r')
var = {}
sites = {}
for l in f:
d = re.split('\s+', l.rstrip())
if int(d[2]) not in site_freq:
if int(d[2]) in sites:
var.pop(int(d[2]), None)
else:
sites[int(d[2])] = 1
for ix, i in enumerate(d[5:len(d)]):
if ix not in var:
var[ix] = dict()
if i == '0':
var[ix][int(d[2]) - 1] = d[3]
else:
var[ix][int(d[2]) - 1] = d[4]
f.close()
sites_f = open(sites_file, 'w')
for site in sorted(var[0].keys()):
sites_f.write('%s,%s\n' % (chr, site + 1))
sites_f.close()
return var
def parse_female_haps(var, vcf_file):
f = gzip.open(vcf_file)
start_hap = max(var.keys()) + 1
for l in f:
if not re.search('#', l):
l = l.rstrip()
d = re.split('\t', l)
if (int(d[1]) - 1) in var[0]:
fem_gens = []
for geno in d[9:]:
if not re.search('\S\/', geno):
allele = re.search('^(\S)', geno).group(1)
fem_gens.append(allele)
for ix, gen in enumerate(fem_gens):
hapnum = start_hap + ix
if hapnum not in var:
var[hapnum] = {}
if gen == '.':
var[hapnum][int(d[1]) - 1] = 'N'
elif gen == '1':
var[hapnum][int(d[1]) - 1] = d[4]
elif gen == '0':
var[hapnum][int(d[1]) - 1] = d[3]
f.close()
return var
def get_chromosome(genome, chr):
outfile = genome + '_' + chr
subprocess.call('~/bin/samtools-0.1.19/samtools faidx %s %s > %s' % (genome, chr, outfile), shell=True)
out_f = open(outfile, 'r')
chromosome = ''
locus_name = out_f.next()
for l in out_f:
chromosome = chromosome + l.rstrip().upper()
out_f.close()
os.remove(outfile)
return list(chromosome)
def print_seq(var, chr_as_list, masked, out_file):
out_f = open(out_file, 'w')
for ind in var:
out_f.write('>haplo%s\n' % ind)
tmp_chr = list(chr_as_list)
for pos, base in enumerate(masked):
if base in ['4', '5', '6', '7']:
tmp_chr[pos] = 'N'
for pos in var[ind]:
tmp_chr[pos] = var[ind][pos]
for i in xrange(0, len(tmp_chr), 60):
out_f.write(''.join(tmp_chr[i:i+60]) + '\n')
out_f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--sp", help="species for which to run analysis")
args = parser.parse_args()
sp = args.sp
if sp == 'ZF':
vcf_file = '/mnt/gluster/home/sonal.singhal1/ZF/after_vqsr/by_chr/all_vcf/for_shapeit/gatk.ug.finch19.chrZ.allfilters.recodedsex.recoded_biallelicSNPs.vcf.gz'
hap_file = '/mnt/gluster/home/sonal.singhal1/ZF/phasing/PIR_approach/finch19/chrZ_haplotypes.haps'
masked_genome = '/mnt/gluster/home/sonal.singhal1/ZF/masked_genome/ZF.masked_genome.repeat_masked.switch_masked.fa'
if sp == 'LTF':
vcf_file = '/mnt/gluster/home/sonal.singhal1/LTF/after_vqsr/by_chr/for_shapeit/gatk.ug.ltf.chrZ.allfilters.recodedsex.recoded_biallelicSNPs.vcf.gz'
hap_file = '/mnt/gluster/home/sonal.singhal1/%s/phasing/PIR_approach/chrZ_haplotypes.haps' % sp
masked_genome = '/mnt/gluster/home/sonal.singhal1/LTF/masked_genome/LTF.masked_genome.repeat_masked.fa'
out_file = '/mnt/gluster/home/sonal.singhal1/%s/analysis/LDhelmet/chrZ_haplotypes.fasta' % sp
site_file = '/mnt/gluster/home/sonal.singhal1/%s/analysis/LDhelmet/chrZ_sites.csv' % sp
genome = '/mnt/gluster/home/sonal.singhal1/reference/taeGut1_60.bamorder.fasta'
min_allele = 1
chr = 'chrZ'
site_freq = get_vcf_var(vcf_file, min_allele)
chr_as_list = get_chromosome(genome, chr)
masked = get_chromosome(masked_genome, chr)
var = parse_male_haps(hap_file, site_freq, site_file, chr)
var = parse_female_haps(var, vcf_file)
print_seq(var, chr_as_list, masked, out_file)
if __name__ == "__main__":
main()
|
[
"sonal.singhal1@gmail.com"
] |
sonal.singhal1@gmail.com
|
40bfd6b2d53f43ebd7039a79a1d0df64f193dd3e
|
f96636810509786bd7afdfb1580fd276b930ade1
|
/client/sendDiagPopup.py
|
cbc672534de873b006ab10258fe7a3a4006185e6
|
[] |
no_license
|
Bharathkumar-nb/SCSI-simulation
|
0b0d47fa2bce028e6214bf3e348c4be28cfaa118
|
c94041043793deaa1ac4a1298eca9685952ff1eb
|
refs/heads/master
| 2021-01-20T10:29:15.966464
| 2013-05-03T07:39:38
| 2013-05-03T07:39:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,734
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'send_diagnostic.ui'
#
# Created: Sat Apr 27 09:51:24 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_send_diagnostic(object):
def setupUi(self, send_diagnostic):
send_diagnostic.setObjectName(_fromUtf8("send_diagnostic"))
send_diagnostic.resize(632, 515)
# send_diagnostic = QtGui.QWidget(send_diagnostic)
# send_diagnostic.setObjectName(_fromUtf8("centralWidget"))
self.label = QtGui.QLabel(send_diagnostic)
self.label.setGeometry(QtCore.QRect(80, 40, 66, 17))
self.label.setText(_fromUtf8(""))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(send_diagnostic)
self.label_2.setGeometry(QtCore.QRect(60, 40, 121, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(send_diagnostic)
self.label_3.setGeometry(QtCore.QRect(70, 220, 111, 17))
self.label_3.setText(_fromUtf8(""))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(send_diagnostic)
self.label_4.setGeometry(QtCore.QRect(60, 90, 211, 20))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(send_diagnostic)
self.label_5.setGeometry(QtCore.QRect(60, 160, 211, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.pushButton = QtGui.QPushButton(send_diagnostic)
self.pushButton.setGeometry(QtCore.QRect(320, 370, 101, 27))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.lineEdit = QtGui.QLineEdit(send_diagnostic)
self.lineEdit.setGeometry(QtCore.QRect(350, 30, 111, 27))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.lineEdit_2 = QtGui.QLineEdit(send_diagnostic)
self.lineEdit_2.setGeometry(QtCore.QRect(350, 150, 113, 27))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.lineEdit_3 = QtGui.QLineEdit(send_diagnostic)
self.lineEdit_3.setGeometry(QtCore.QRect(350, 210, 113, 27))
self.lineEdit_3.setObjectName(_fromUtf8("lineEdit_3"))
self.lineEdit_4 = QtGui.QLineEdit(send_diagnostic)
self.lineEdit_4.setGeometry(QtCore.QRect(350, 90, 113, 27))
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.label_6 = QtGui.QLabel(send_diagnostic)
self.label_6.setGeometry(QtCore.QRect(60, 220, 121, 17))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_7 = QtGui.QLabel(send_diagnostic)
self.label_7.setGeometry(QtCore.QRect(60, 240, 211, 17))
self.label_7.setObjectName(_fromUtf8("label_7"))
# send_diagnostic.setCentralWidget(send_diagnostic)
#self.menuBar = QtGui.QMenuBar(send_diagnostic)
# self.menuBar.setGeometry(QtCore.QRect(0, 0, 632, 25))
# self.menuBar.setObjectName(_fromUtf8("menuBar"))
# send_diagnostic.setMenuBar(self.menuBar)
# self.mainToolBar = QtGui.QToolBar(send_diagnostic)
#self.mainToolBar.setObjectName(_fromUtf8("mainToolBar"))
#send_diagnostic.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
#self.statusBar = QtGui.QStatusBar(send_diagnostic)
#self.statusBar.setObjectName(_fromUtf8("statusBar"))
#send_diagnostic.setStatusBar(self.statusBar)
self.retranslateUi(send_diagnostic)
QtCore.QMetaObject.connectSlotsByName(send_diagnostic)
def retranslateUi(self, send_diagnostic):
send_diagnostic.setWindowTitle(QtGui.QApplication.translate("send_diagnostic", "send_diagnostic", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("send_diagnostic", "Self Test Bit (0/1)", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("send_diagnostic", "DEVOFFL (Device Offline) (0/1)", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("send_diagnostic", "UNITOFFL (Unit Offline) (0/1)", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("send_diagnostic", "OK", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("send_diagnostic", "Self Test Code", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("send_diagnostic", "(Value between 000 and 111)", None, QtGui.QApplication.UnicodeUTF8))
|
[
"bharathkmr.nb@gmail.com"
] |
bharathkmr.nb@gmail.com
|
3a23b26701ccfc73af6fef97637eab76dab9b738
|
0a43afbcba776ed8ada0fef5425b1507aa4d51c1
|
/smartbook/smartbook/web/migrations/0016_auto__del_ownercompanyname__add_ownercompany.py
|
b443dac4b4ee2cee94af23d6f41f3b7c34b366fb
|
[] |
no_license
|
geethusuresh/inventory-systems
|
c76d6d10429f483499594df8c8f34d780531f18c
|
fd4211d29042776fa47da92162cbbbe8220090cd
|
refs/heads/master
| 2021-01-02T08:51:31.278578
| 2014-09-28T07:35:54
| 2014-09-28T07:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,583
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'OwnerCompanyName'
db.delete_table(u'web_ownercompanyname')
# Adding model 'OwnerCompany'
db.create_table(u'web_ownercompany', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'web', ['OwnerCompany'])
def backwards(self, orm):
# Adding model 'OwnerCompanyName'
db.create_table(u'web_ownercompanyname', (
('company_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'web', ['OwnerCompanyName'])
# Deleting model 'OwnerCompany'
db.delete_table(u'web_ownercompany')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'web.customer': {
'Meta': {'object_name': 'Customer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'web.designation': {
'Meta': {'object_name': 'Designation'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'web.ownercompany': {
'Meta': {'object_name': 'OwnerCompany'},
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'web.staff': {
'Meta': {'object_name': 'Staff'},
'designation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Designation']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'web.transportationcompany': {
'Meta': {'object_name': 'TransportationCompany'},
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'web.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'email_id': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'house_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_line': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'user_type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'web.vendor': {
'Meta': {'object_name': 'Vendor'},
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['web']
|
[
"geethu@technomicssolutions.com"
] |
geethu@technomicssolutions.com
|
8b92d801b14bce7463cefb8954677a9b0694325a
|
415920616d5efccee4667126c4bb29f91f1d5321
|
/blood/donor/urls.py
|
f4eb33826ecb67e2f4cedf717f45045d210213b2
|
[] |
no_license
|
ManogaranArumugam/blood
|
6c779b3bfe308a95d52cb730be65b25cb2c3eda6
|
bb6ef86bfeaf67ed70eafa97dcb6b6c1da0c9f4f
|
refs/heads/master
| 2020-03-25T22:43:31.004539
| 2018-08-09T12:43:50
| 2018-08-09T12:43:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.conf.urls import url
from blood.donor.views import DonorRegistrationView
urlpatterns = [
url(r'^$', DonorRegistrationView.as_view(), name='home'),
]
|
[
"palashpatidar51@gmail.com"
] |
palashpatidar51@gmail.com
|
6c340cf04b054bb75cdfb63b9fbdf78c10395714
|
fa93e53a9eee6cb476b8998d62067fce2fbcea13
|
/build/pal_statistics/catkin_generated/generate_cached_setup.py
|
91044b831264767383d53fe4fda54ceccbaa6122
|
[] |
no_license
|
oyetripathi/ROS_conclusion_project
|
2947ee2f575ddf05480dabc69cf8af3c2df53f73
|
01e71350437d57d8112b6cec298f89fc8291fb5f
|
refs/heads/master
| 2023-06-30T00:38:29.711137
| 2021-08-05T09:17:54
| 2021-08-05T09:17:54
| 392,716,311
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/sandeepan/tiago_public_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/sandeepan/tiago_public_ws/devel/.private/pal_statistics/env.sh')
output_filename = '/home/sandeepan/tiago_public_ws/build/pal_statistics/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"sandeepan.ghosh.ece20@itbhu.ac.in"
] |
sandeepan.ghosh.ece20@itbhu.ac.in
|
55565979bd9e9ceb8e3a4424587b82a4a4a0688a
|
b94ab99f9c1f8bbb99afd23e1bfcd2332060b4bd
|
/library/migrations/0012_auto_20170805_0851.py
|
050383e317e1d09ac7e3c45f3c1ea4e50db4dfca
|
[] |
no_license
|
georgecai904/bookshelf
|
e54ccae00d4ee48e91ca1564a425ba4586b52d93
|
0002207dc8ca586ce1127d3ea98bb53102d043df
|
refs/heads/master
| 2021-01-02T22:52:26.046535
| 2017-08-05T15:32:13
| 2017-08-05T15:32:13
| 99,409,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 08:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0011_auto_20170805_0850'),
]
operations = [
migrations.AlterField(
model_name='book',
name='pages',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='book',
name='year',
field=models.IntegerField(blank=True),
),
]
|
[
"georgemail608@gmail.com"
] |
georgemail608@gmail.com
|
542650ec2698610417fd1074ecee715e3a7ecf4e
|
55d6de252e61c4b60688ebd8b1f637807acc1e7c
|
/sale_report/wizard/aged_customer_list.py
|
6bee3ea9ef1268815749ad4cfb0996f5175cc12b
|
[] |
no_license
|
mosadiqit/eerna_erp_uslbd
|
b707a1d49a4fce7c1543b63e0120e8f9b77b26ce
|
73e3994a9e32df7809d244eb6592513162ab7853
|
refs/heads/main
| 2023-06-30T14:53:04.837197
| 2021-08-04T11:30:46
| 2021-08-04T11:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,133
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT, BytesIO, xlsxwriter, base64
class AgedCustomerReportWizard(models.TransientModel):
_name = 'aged.customer.report.wizard'
company_id = fields.Many2one('res.company', string='Company', domain=lambda self:self._get_companies(),default=lambda self: self.env.user.company_id,required=True)
branch_ids = fields.Many2one( 'res.branch',string='Branch')
date_start = fields.Date(string='Start Date', required=True, default=fields.Date.today)
date_end = fields.Date(string='End Date', required=True, default=fields.Date.today)
def _get_companies(self):
query="""select * from res_company_users_rel where user_id={}""".format(self.env.user.id)
self._cr.execute(query=query)
allowed_companies=self._cr.fetchall()
allowed_company=[]
for company in allowed_companies:
allowed_company.append(company[0])
return [('id', 'in', allowed_company)]
def get_report(self):
data = {
'model': self._name,
'ids': self.ids,
'form': {
'date_start': self.date_start, 'date_end': self.date_end,'company_id':self.company_id.id, 'branch_id': self.branch_ids.id,
'branch_name': self.branch_ids.name,
},
}
# ref `module_name.report_id` as reference.
return self.env.ref('sale_report.aged_customer_list_report').report_action(
self, data=data)
class AreaWiseSalesReportView(models.AbstractModel):
"""
Abstract Model specially for report template.
_name = Use prefix `report.` along with `module_name.report_name`
"""
_name = 'report.sale_report.aged_customer_list_view'
@api.model
def _get_report_values(self, docids, data=None):
branch_id = data['form']['branch_id']
branch_name = data['form']['branch_name']
company_id=data['form']['company_id']
date_start = data['form']['date_start']
date_end = data['form']['date_end']
# query = """select (cast(date_trunc('month',current_date) as date)) startdate"""
# self._cr.execute(query=query)
# result = self._cr.fetchall()
# date_start = result[0]
#
# query = """select (cast(date_trunc('month',current_date)-INTERVAL '90 day' as date)) todate"""
# self._cr.execute(query=query)
# result_1 = self._cr.fetchall()
# date_end = result_1[0]
if branch_id:
branch_id = " m.branch_id = %s" % branch_id
else:
branch_id = "1=1"
if company_id:
company_id = " m.company_id = %s" % company_id
else:
company_id = "1=1"
query = """select distinct p.id,p.name,ca.area_name,max(m.date) as last_trans_date,sum(ml.debit) as debit,sum(ml.credit) as credit, (sum(debit)-sum(credit)) as Balance
from res_partner p
left join account_move m on m.partner_id=p.id
left join account_move_line ml on m.id=ml.move_id
left join customer_area_setup ca on ca.id=p.customer_area
where m.state='posted' --and
and p.id not in
(select distinct m.partner_id from account_move_line ml
left join account_move m on m.id=ml.move_id
where {} and m.date between '{}' and '{}' and {} and m.partner_id is not null)
group by p.id,p.name,ca.area_name
order by p.name,ca.area_name
""".format(branch_id, date_start, date_end, company_id)
self._cr.execute(query=query)
query_result = self._cr.fetchall()
return {
'date_start': date_start,
'date_end': date_end,
'branch': branch_name,
'idle_customer': query_result,
}
|
[
"ibrahimalazhar264@gmail.com"
] |
ibrahimalazhar264@gmail.com
|
4c0ac46f1d84771dc353dc57195fde968e23467e
|
630fe47bb5aa5e49b45ab101d87c2dd2c53d180f
|
/Bubble_soft_json.py
|
4670c4958ea3d09c64e4879c8e4c5913ea0222f9
|
[] |
no_license
|
shrivastava-himanshu/Leetcode_practice
|
467497a58d82ff3ae2569d5e610dc6f27a1f31d6
|
4c59799947c2b17bfd22ca2a08707ef85e84a913
|
refs/heads/main
| 2023-06-12T13:14:45.381839
| 2021-07-05T04:09:05
| 2021-07-05T04:09:05
| 367,546,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
def bubble_sort_json(elements,key='name'):
size = len(elements)
for i in range(size-1):
swapped = False
for j in range(size-1-i):
a = elements[j][key]
b = elements[j+1][key]
if a > b:
tmp = elements[j]
elements[j] = elements[j+1]
elements[j+1] = tmp
swapped = True
if not swapped:
break
if __name__ == '__main__':
elements = [
{'name': 'mona', 'transaction_amount': 1000, 'device': 'iphone-10'},
{'name': 'dhaval', 'transaction_amount': 400, 'device': 'google pixel'},
{'name': 'kathy', 'transaction_amount': 200, 'device': 'vivo'},
{'name': 'aamir', 'transaction_amount': 800, 'device': 'iphone-8'},
]
bubble_sort_json(elements,key='device')
print(elements)
|
[
"Himanshu.Shrivastava@vce.com"
] |
Himanshu.Shrivastava@vce.com
|
5b52140b650b1eaf1fa06ca3369752b6bba03eb9
|
ef786be9b7c7145d63797cb8c351780059996873
|
/watchlist_app/migrations/0001_initial.py
|
76d570588a7802ab729017622b43b470dcd0ec1d
|
[] |
no_license
|
nileshnagarwal/djangorest_course_sarda
|
31c27ab625139f632d1121296c981c108301de70
|
933d7b5330d7fda1b17c367d30cb903543eebb02
|
refs/heads/main
| 2023-07-17T11:49:43.562982
| 2021-08-27T07:19:57
| 2021-08-27T07:19:57
| 394,706,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
# Generated by Django 3.2.6 on 2021-08-10 07:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
],
),
]
|
[
"eternalshenron@gmail.com"
] |
eternalshenron@gmail.com
|
8aa8349d9f1213a364dd5a5713676193303f913c
|
159aed4755e47623d0aa7b652e178296be5c9604
|
/data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_disease_action_a.py
|
20a75b8ac4bc4329cb51b260ec5349043fb09d4a
|
[
"MIT"
] |
permissive
|
anhstudios/swganh
|
fb67d42776864b1371e95f769f6864d0784061a3
|
41c519f6cdef5a1c68b369e760781652ece7fec9
|
refs/heads/develop
| 2020-12-24T16:15:31.813207
| 2016-03-08T03:54:32
| 2016-03-08T03:54:32
| 1,380,891
| 33
| 44
| null | 2016-03-08T03:54:32
| 2011-02-18T02:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 463
|
py
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_disease_action_a.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
[
"rwl3564@rit.edu"
] |
rwl3564@rit.edu
|
efaeae158142d783d0c4d3c5f624d9fbd08615b8
|
6f151b64427d47571ff8d02a24a98c9cbd8c68a5
|
/[leetcode-08]strings-to-integer-atoi.py
|
17245f772a1bae021abee1832c545d882e0b4cb2
|
[
"MIT"
] |
permissive
|
Menah3m/leetcode-Python
|
50c0a0e518274cfa9a5ce939c37c075ce226dd04
|
212cae16ae868e5f031d3aeb8f614c539c1a27f1
|
refs/heads/master
| 2021-04-02T03:24:35.855185
| 2020-12-15T09:39:03
| 2020-12-15T09:39:03
| 248,238,533
| 0
| 0
| null | 2020-12-15T09:39:05
| 2020-03-18T13:25:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,470
|
py
|
"""
请你来实现一个 atoi 函数,使其能将字符串转换成整数。
首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下:
如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。
假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。
该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。
注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。
在任何情况下,若函数不能进行有效的转换时,请返回 0 。
提示:
本题中的空白字符只包括空格字符 ' ' 。
假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−231, 231 − 1]。如果数值超过这个范围,请返回 INT_MAX (231 − 1) 或 INT_MIN (−231) 。
示例 1:
输入: "42"
输出: 42
示例 2:
输入: " -42"
输出: -42
解释: 第一个非空白字符为 '-', 它是一个负号。
我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。
示例 3:
输入: "4193 with words"
输出: 4193
解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。
示例 4:
输入: "words and 987"
输出: 0
解释: 第一个非空字符是 'w', 但它不是数字或正、负号。
因此无法执行有效的转换。
示例 5:
输入: "-91283472332"
输出: -2147483648
解释: 数字 "-91283472332" 超过 32 位有符号整数范围。
因此返回 INT_MIN (−231) 。
来源:LeetCode-08
链接:https://leetcode-cn.com/problems/string-to-integer-atoi
"""
class Solution:
def myAtoi(self, str: str) -> int:
str = str.lstrip()
if len(str)==0 or (str[0].isdigit()==False and str[0] not in ["-", "+"]):
return 0
res, i = str[0], 1
while i < len(str) and str[i].isdigit():
res += str[i]
i += 1
try:
res = int(res)
return min(max(res, -2**31), 2**31-1)
except:
return 0
|
[
"568200065@qq.com"
] |
568200065@qq.com
|
d046514180a9e37274ac16c00eabafba5f77c479
|
9a9fb43d866dc8fd829211d2b47328ef1f5ed428
|
/PI_ROS_WORKSPACES/ros_catkin_ws/build_isolated/rosboost_cfg/catkin_generated/pkg.develspace.context.pc.py
|
72b73d066877678ca5a5c6ee1ed6b3c0bdd104c5
|
[] |
no_license
|
droter/auto_mow
|
326df42a54676079cac61fe63c40d5d04beb049b
|
3742cb2ef78bc06d2771ac4c679e5110909774f8
|
refs/heads/master
| 2022-05-19T20:18:33.409777
| 2020-04-29T00:42:24
| 2020-04-29T00:42:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosboost_cfg"
PROJECT_SPACE_DIR = "/home/pi/ros_catkin_ws/devel_isolated/rosboost_cfg"
PROJECT_VERSION = "1.14.4"
|
[
"joshuatygert@gmail.com"
] |
joshuatygert@gmail.com
|
26f1f046fbcc3e826fb8fa1f586db82a5eadc742
|
aea02d626c10396c2220d5ee642cb9c279e5bc37
|
/migrations/versions/34c8e6e836da_email_column.py
|
d039f076f6691862d68521cdf0df979072f894e7
|
[
"MIT"
] |
permissive
|
Derrick-Nyongesa/Blog
|
5fb176575865a75a02658bc8622fed3b9e05c919
|
aff6b97aac958e6f626c934c57fffba1bb1f845d
|
refs/heads/main
| 2023-04-14T12:21:20.890964
| 2021-04-26T07:07:55
| 2021-04-26T07:07:55
| 360,806,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""email column
Revision ID: 34c8e6e836da
Revises: 4fb50df0a785
Create Date: 2021-04-23 14:54:17.658067
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '34c8e6e836da'
down_revision = '4fb50df0a785'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('email', sa.String(length=255), nullable=True))
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_column('users', 'email')
# ### end Alembic commands ###
|
[
"nyongesaderrick@gmail.com"
] |
nyongesaderrick@gmail.com
|
bd3b3d45311a3acf29007751bcf7d26209d85391
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part000492.py
|
7a98b03c8016f65306e467ddfb470f008a62508e
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,532
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher43335(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1, 1: 1}), [
(VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({2: 1}), [
(VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher43335._instance is None:
CommutativeMatcher43335._instance = CommutativeMatcher43335()
return CommutativeMatcher43335._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 43334
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 43336
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp2 = subjects.popleft()
subjects3 = deque(tmp2._args)
# State 43337
if len(subjects3) >= 1:
tmp4 = subjects3.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.1', tmp4)
except ValueError:
pass
else:
pass
# State 43338
if len(subjects3) >= 1 and subjects3[0] == Integer(2):
tmp6 = subjects3.popleft()
# State 43339
if len(subjects3) == 0:
pass
# State 43340
if len(subjects) == 0:
pass
# 0: g*x**2
yield 0, subst2
subjects3.appendleft(tmp6)
subjects3.appendleft(tmp4)
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 43348
if len(subjects) >= 1:
tmp8 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.1', tmp8)
except ValueError:
pass
else:
pass
# State 43349
if len(subjects) == 0:
pass
# 1: f*x
yield 1, subst2
subjects.appendleft(tmp8)
if len(subjects) >= 1:
tmp10 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.0', tmp10)
except ValueError:
pass
else:
pass
# State 55141
if len(subjects) == 0:
pass
# 2: x*f
yield 2, subst2
subjects.appendleft(tmp10)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp12 = subjects.popleft()
associative1 = tmp12
associative_type1 = type(tmp12)
subjects13 = deque(tmp12._args)
matcher = CommutativeMatcher43342.get()
tmp14 = subjects13
subjects13 = []
for s in tmp14:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp14, subst0):
pass
if pattern_index == 0:
pass
# State 43347
if len(subjects) == 0:
pass
# 0: g*x**2
yield 0, subst1
if pattern_index == 1:
pass
# State 43350
if len(subjects) == 0:
pass
# 1: f*x
yield 1, subst1
if pattern_index == 2:
pass
# State 55142
if len(subjects) == 0:
pass
# 2: x*f
yield 2, subst1
subjects.appendleft(tmp12)
return
yield
from .generated_part000493 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
843d08c826f8a29b84e9e209cbd7cf14df5aad5d
|
bcc199a7e71b97af6fbfd916d5a0e537369c04d9
|
/acmicpc/solved/2448_Draw_Stars/solution.py
|
bd9a9474145925faf28b5b29de95d694d48808eb
|
[] |
no_license
|
sungminoh/algorithms
|
9c647e82472905a2c4e505c810b622b734d9d20d
|
1389a009a02e90e8700a7a00e0b7f797c129cdf4
|
refs/heads/master
| 2023-05-01T23:12:53.372060
| 2023-04-24T06:34:12
| 2023-04-24T06:34:12
| 87,406,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
import math
base = [' * ', ' * * ', '*****']
n = input()
def get_k(n):
return int(math.log(n/3., 2))
def concat1(a, b):
return [x[0]+' '+x[1] for x in zip(a, b)]
def concat2(a, b):
return [x[0]+x[1] for x in zip(a, b)]
def nth(i):
if i == 0:
return base
else:
child = nth(i-1)
space = [' '*(2**(i-1)) for x in range(len(child))]
return concat2(space, concat2(child, space)) + concat1(child, child)
print '\n'.join(nth(get_k(n)))
|
[
"smoh2044@gmail.com"
] |
smoh2044@gmail.com
|
d12b330c8a86dae7b2e3ff874faa4a0c84278ccd
|
d697c1d45e96bd440be9c17ab14243a5882b1f52
|
/hm/oop/Tool.py
|
bc86e8847c84a48bbaf2b70702747c4f2cdd6d05
|
[] |
no_license
|
ithjl521/python
|
9eeda2e60dda97ee36e8764c06400eb12818689f
|
f4fe50799501c483cb64445fd05ee0f30f56576c
|
refs/heads/master
| 2020-07-12T23:10:53.608276
| 2019-11-08T08:59:35
| 2019-11-08T08:59:35
| 204,931,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
class Tool(object):
count = 0
def __init__(self,name):
self.name = name
Tool.count += 1
# self.count += 1
tool1= Tool('futou')
tool2= Tool('maoie')
tool3= Tool('fuzi')
Tool.count = 99
print(Tool.count)
|
[
"it_hjl@163.com"
] |
it_hjl@163.com
|
a6223c3a60b16697d235aa1eeeb4a1c5dda89b26
|
5c254373f6725107931b68704436c2dbcd39d877
|
/data_utils/FS_utils/eval_map.py
|
ff3f00e38f9b4ccfb4a4a595343df50faa23d6c3
|
[
"MIT"
] |
permissive
|
JunLi-Galios/unsup_temp_embed_alternating
|
22330346094720ecba2e5af305febe586566b92f
|
1b054fd82aadcfe1aa219be17beb77c89efd974e
|
refs/heads/master
| 2023-03-21T04:06:16.044321
| 2021-03-20T06:06:06
| 2021-03-20T06:06:06
| 322,737,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
#!/usr/bin/env python
"""Eval level activity
"""
__author__ = 'Anna Kukleva'
__date__ = 'January 2019'
import os
from ute.utils.arg_pars import opt
import data_utils.FS_utils.update_argpars as fs_utils
fs_utils.update()
actions = ['add_dressing',
'add_oil',
'add_pepper',
'cut',
'mix_dressing',
'mix_ingredients',
'peel_cucumber',
'place',
'serve_salad_onto_plate']
eval = {}
eval['action_start'] = ['action_start']
eval['add_dressing'] = ['add_dressing']
eval['add_oil'] = ['add_oil']
eval['add_pepper'] = ['add_pepper']
eval['cut'] = ['cut_cucumber',
'cut_tomato',
'cut_cheese',
'cut_lettuce']
eval['mix_dressing'] = ['mix_dressing']
eval['mix_ingredients'] = ['mix_ingredients']
eval['peel_cucumber'] = ['peel_cucumber']
eval['place'] = ['place_cucumber_into_bowl',
'place_tomato_into_bowl',
'place_cheese_into_bowl',
'place_lettuce_into_bowl']
eval['serve_salad_onto_plate'] = ['serve_salad_onto_plate']
eval['null'] = ['add_salt',
'add_vinegar']
eval['action_end'] = ['action_end']
label2idx = {}
idx2label = {}
path = os.path.join(opt.dataset_root, opt.gt, 'mapping', 'mappingeval.txt')
with open(path, 'w') as f:
for idx, (high_act, mid_acts) in enumerate(eval.items()):
for mid_act in mid_acts:
f.write('%d %s\n' % (idx, mid_act))
|
[
"kuklevaanna@gmail.com"
] |
kuklevaanna@gmail.com
|
07b1dc0344a5647316639af1cc3e0d015e5e107f
|
7a5c9962ee40996a9f24f5493c715d5553052cf7
|
/jobs/apps.py
|
659d18b194f99c3b388bbaea0e799ca23b237f8c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
dymaxionlabs/satlomas-back
|
da6355d1fc90a2e9d4a7795b4751e3ebe043ffa6
|
f4568f6535755fd4a2432ecc661a264872206c6c
|
refs/heads/master
| 2023-07-17T17:07:43.037314
| 2021-08-28T15:54:21
| 2021-08-28T15:54:21
| 262,424,687
| 0
| 0
| null | 2020-05-08T20:42:49
| 2020-05-08T20:42:48
| null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
from django.apps import AppConfig
class JobsConfig(AppConfig):
name = 'jobs'
def ready(self):
import jobs.signals
|
[
"munshkr@gmail.com"
] |
munshkr@gmail.com
|
78befbcc094a1c019b67d6b56a7b35cf4e3d6b6b
|
a89c739589d0ee29ff6fff1a1508a426dfe4489a
|
/basics/assert.py
|
b7872ff4b51952a23919213ac3ad863af939f0fd
|
[] |
no_license
|
macabdul9/python-learning
|
107e3533998e3f373b804d6b59152fc41938604b
|
f0d5e0e37cbed3d846684be80f0f92e5cbb9ceb5
|
refs/heads/master
| 2020-04-27T04:31:47.907486
| 2020-03-05T16:48:53
| 2020-03-05T16:48:53
| 174,057,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
"""
@author : macab (macab@debian)
@file : assert
@created : Sunday Mar 17, 2019 00:28:58 IST
"""
'''
Python provides the assert statement to check if a given logical expression is true or false. Program execution proceeds
only if the expression is true and raises the AssertionError when it is false. The following code shows the usage of the
assert statement.It is much like an if-else
'''
x = int(input())
assert x >= 0
print(x)
|
[
"abdulwaheed1513@gmail.com"
] |
abdulwaheed1513@gmail.com
|
7a6c254cbc7e0b5d94437a6f0cb3061191327052
|
5942e3e75ef7dc22a67b04fb1f12e14658a2093d
|
/documentation_files/platform.py
|
d83912b2d5f5be349492150ecc6894802fff344d
|
[] |
no_license
|
the-factory/kdevelop-python
|
9e94d2a4d4906a31a4d2a8a08300766e02d41a59
|
1e91f2cb4c94d9455a2ee22fef13df680aeed1ab
|
refs/heads/master
| 2021-01-18T08:57:16.707711
| 2012-04-09T22:37:47
| 2012-04-09T22:37:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,818
|
py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
""":synopsis: Retrieves as much platform identifying data as possible.
"""
def machine():
"""
Returns the machine type, e.g. ``'i386'``. An empty string is returned if the
value cannot be determined.
"""
pass
def node():
"""
Returns the computer's network name (may not be fully qualified!). An empty
string is returned if the value cannot be determined.
"""
pass
def platform(aliased=0,terse=0):
"""
Returns a single string identifying the underlying platform with as much useful
information as possible.
The output is intended to be *human readable* rather than machine parseable. It
may look different on different platforms and this is intended.
If *aliased* is true, the function will use aliases for various platforms that
report system names which differ from their common names, for example SunOS will
be reported as Solaris. The :func:`system_alias` function is used to implement
this.
Setting *terse* to true causes the function to return only the absolute minimum
information needed to identify the platform.
"""
pass
def processor():
"""
Returns the (real) processor name, e.g. ``'amdk6'``.
An empty string is returned if the value cannot be determined. Note that many
platforms do not provide this information or simply return the same value as for
:func:`machine`. NetBSD does this.
"""
pass
def python_build():
"""
Returns a tuple ``(buildno, builddate)`` stating the Python build number and
date as strings.
"""
pass
def python_compiler():
"""
Returns a string identifying the compiler used for compiling Python.
"""
pass
def python_branch():
"""
Returns a string identifying the Python implementation SCM branch.
"""
pass
def python_implementation():
"""
Returns a string identifying the Python implementation. Possible return values
are: 'CPython', 'IronPython', 'Jython', 'PyPy'.
"""
pass
def python_revision():
"""
Returns a string identifying the Python implementation SCM revision.
"""
pass
def python_version():
"""
Returns the Python version as string ``'major.minor.patchlevel'``
Note that unlike the Python ``sys.version``, the returned value will always
include the patchlevel (it defaults to 0).
"""
pass
def python_version_tuple():
"""
Returns the Python version as tuple ``(major, minor, patchlevel)`` of strings.
Note that unlike the Python ``sys.version``, the returned value will always
include the patchlevel (it defaults to ``'0'``).
"""
pass
def release():
"""
Returns the system's release, e.g. ``'2.2.0'`` or ``'NT'`` An empty string is
returned if the value cannot be determined.
"""
pass
def system():
"""
Returns the system/OS name, e.g. ``'Linux'``, ``'Windows'``, or ``'Java'``. An
empty string is returned if the value cannot be determined.
"""
pass
def system_alias(system,release,version):
"""
Returns ``(system, release, version)`` aliased to common marketing names used
for some systems. It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
pass
def version():
"""
Returns the system's release version, e.g. ``'#3 on degas'``. An empty string is
returned if the value cannot be determined.
"""
pass
def uname():
"""
Fairly portable uname interface. Returns a tuple of strings ``(system, node,
release, version, machine, processor)`` identifying the underlying platform.
Note that unlike the :func:`os.uname` function this also returns possible
processor information as additional tuple entry.
Entries which cannot be determined are set to ``''``.
Java Platform
-------------
"""
pass
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
"""
Version interface for Jython.
Returns a tuple ``(release, vendor, vminfo, osinfo)`` with *vminfo* being a
tuple ``(vm_name, vm_release, vm_vendor)`` and *osinfo* being a tuple
``(os_name, os_version, os_arch)``. Values which cannot be determined are set to
the defaults given as parameters (which all default to ``''``).
Windows Platform
----------------
"""
pass
def win32_ver(release='',version='',csd='',ptype=''):
"""
Get additional version information from the Windows Registry and return a tuple
``(version, csd, ptype)`` referring to version number, CSD level and OS type
(multi/single processor).
As a hint: *ptype* is ``'Uniprocessor Free'`` on single processor NT machines
and ``'Multiprocessor Free'`` on multi processor machines. The *'Free'* refers
to the OS version being free of debugging code. It could also state *'Checked'*
which means the OS version uses debugging code, i.e. code that checks arguments,
ranges, etc.
"""
pass
def popen(cmd,mode='r',bufsize=None):
"""
Portable :func:`popen` interface. Find a working popen implementation
preferring :func:`win32pipe.popen`. On Windows NT, :func:`win32pipe.popen`
should work; on Windows 9x it hangs due to bugs in the MS C library.
Mac OS Platform
---------------
"""
pass
def mac_ver(release='',versioninfo=('','',''),machine=''):
"""
Get Mac OS version information and return it as tuple ``(release, versioninfo,
machine)`` with *versioninfo* being a tuple ``(version, dev_stage,
non_release_version)``.
Entries which cannot be determined are set to ``''``. All tuple entries are
strings.
Documentation for the underlying :cfunc:`gestalt` API is available online at
http://www.rgaros.nl/gestalt/.
Unix Platforms
--------------
"""
pass
def dist(distname='',version='',id='',supported_dists=('SuSE','debian','redhat','mandrake',more)):
"""
This is an old version of the functionality now provided by
:func:`linux_distribution`. For new code, please use the
:func:`linux_distribution`.
The only difference between the two is that ``dist()`` always
returns the short name of the distribution taken from the
``supported_dists`` parameter.
"""
pass
def linux_distribution(distname='',version='',id='',supported_dists=('SuSE','debian','redhat','mandrake',more),full_distribution_name=1):
"""
Tries to determine the name of the Linux OS distribution name.
``supported_dists`` may be given to define the set of Linux distributions to
look for. It defaults to a list of currently supported Linux distributions
identified by their release file name.
If ``full_distribution_name`` is true (default), the full distribution read
from the OS is returned. Otherwise the short name taken from
``supported_dists`` is used.
Returns a tuple ``(distname,version,id)`` which defaults to the args given as
parameters. ``id`` is the item in parentheses after the version number. It
is usually the version codename.
"""
pass
|
[
"svenbrauch@googlemail.com"
] |
svenbrauch@googlemail.com
|
428bd78c26371e93841a86cf15ea344b9b336399
|
50d39f7a91047c7498714fd68958156320efdf5f
|
/cwr/grammar/record/writer_territory.py
|
f2dcc1c3454b7d7ebcad3fc23a15bd1c95df4167
|
[
"MIT"
] |
permissive
|
toddrimes/CWR-DataApi
|
a82784ec198e35ab311bf5576d31eefb9269939c
|
4d9f504d9032cf1aa1bd86db6efbe26042c6a6ae
|
refs/heads/master
| 2021-01-24T23:00:57.383927
| 2015-03-13T10:04:29
| 2015-03-13T10:04:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
# -*- coding: utf-8 -*-
from data.accessor import CWRConfiguration
from cwr.grammar.field import table as field_table
from cwr.grammar.field import special as field_special
from cwr.grammar.field import record as field_record
from cwr.grammar.field import society as field_society
from cwr.grammar.field import writer_territory as field_writer_territory
from cwr.interested_party import IPTerritoryRecord
"""
CWR Writer Territory of Control (SWT) records grammar.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
# Acquires data sources
_config = CWRConfiguration()
"""
General fields.
"""
"""
Patterns.
"""
territory = field_special.lineStart + field_record.record_prefix(
_config.record_type(
'writer_territory'),
compulsory=True) + field_special.ip_n() + field_society.pr_share() + field_society.mr_share() + field_society.sr_share() + \
field_table.ie_indicator() + field_table.tis_code() + field_writer_territory.shares_change + field_writer_territory.sequence_n + field_special.lineEnd
"""
Parsing actions for the patterns.
"""
territory.setParseAction(lambda p: _to_writerterritory(p))
"""
Parsing methods.
These are the methods which transform nodes into instances of classes.
"""
def _to_writerterritory(parsed):
"""
Transforms the final parsing result into an IPTerritoryRecord instance.
:param parsed: result of parsing the Territory record
:return: an IPTerritoryRecord created from the parsed record
"""
return IPTerritoryRecord(parsed.record_type, parsed.transaction_sequence_n, parsed.record_sequence_n,
parsed.ip_n, parsed.ie_indicator, parsed.tis_code, parsed.sequence_n,
parsed.pr_share, parsed.mr_share, parsed.sr_share, parsed.shares_change)
|
[
"programming@wandrell.com"
] |
programming@wandrell.com
|
b4528282c5d0f3c4f595fde399e3016578457b11
|
b2ff5ac2ef633e41ecec6ff7baae4b89254bf151
|
/Hello_World/src/mainapp/profiles/migrations/0005_auto_20201015_1938.py
|
aa9304e6331f9e2da41aa37edffbcfac7c8524bf
|
[] |
no_license
|
r3bunker/Python-Projects
|
2bda2be348bc4e0aa530cadbf8c26a7f163bcd3f
|
e8742a9c5ed92424b5aeee0041e6e2267f26ccc6
|
refs/heads/master
| 2023-01-04T20:30:50.250655
| 2020-10-29T17:41:14
| 2020-10-29T17:41:14
| 300,012,282
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# Generated by Django 3.1.2 on 2020-10-16 01:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_auto_20201015_1913'),
]
operations = [
migrations.AlterField(
model_name='profiles',
name='prefix',
field=models.CharField(choices=[('Mrs.', 'Mrs.'), ('Mr.', 'Mr.'), ('Ms.', 'Ms.')], default='', max_length=20),
),
]
|
[
"r3bunker@gmail.com"
] |
r3bunker@gmail.com
|
c0e8d2a4dd57f7f8365f36b333ff42431805e131
|
6300fcf67d4fcb5387a9f0f7370a8ffe8f4097d9
|
/AutoParts/Tests/account_auth/views/sign_out_test.py
|
958b55a8fb6505816eee2d92d73bf3c17334b56b
|
[] |
no_license
|
Borislav-source/Final-Project
|
e34ac1cbb71e3a32ed490361d3583c2e1e8bfbc9
|
501b258d103c2e1b8947451f4bdf750709d040fd
|
refs/heads/master
| 2023-07-17T15:03:19.390774
| 2021-09-01T14:06:09
| 2021-09-01T14:06:09
| 393,977,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
from django.urls import reverse
from Tests.base.tests import AutoPartsTestCase
class SignOutTest(AutoPartsTestCase):
def test_sign_out__if_user_is_logged_out(self):
self.client.force_login(self.user)
self.assertTrue(self.user)
self.client.logout()
self.assertTrue(self.user)
|
[
"tsv.borislav@gmail.com"
] |
tsv.borislav@gmail.com
|
11db259082bfba48cb8bd0c27e64e3d77bc28f6a
|
b87ea98bc166cade5c78d246aeb0e23c59183d56
|
/samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/model/uniqueitems_false_validation.py
|
9a1260048babd250a0448eb1540cbf3fbb04e197
|
[
"Apache-2.0"
] |
permissive
|
holisticon/openapi-generator
|
88f8e6a3d7bc059c8f56563c87f6d473694d94e5
|
6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272
|
refs/heads/master
| 2023-05-12T02:55:19.037397
| 2023-04-14T08:31:59
| 2023-04-14T08:31:59
| 450,034,139
| 1
| 0
|
Apache-2.0
| 2022-01-20T09:34:14
| 2022-01-20T09:34:13
| null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
# coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
class UniqueitemsFalseValidation(
schemas.AnyTypeSchema,
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
unique_items = False
def __new__(
cls,
*_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'UniqueitemsFalseValidation':
return super().__new__(
cls,
*_args,
_configuration=_configuration,
**kwargs,
)
|
[
"noreply@github.com"
] |
holisticon.noreply@github.com
|
ae132c35f9378954b35796886aca3491386db3b5
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/5804612/snippet.py
|
365ec99e3c6e1802b17ec70aaac6ebe7524850f5
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,333
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import curses
from math import floor
from datetime import datetime as date
from subprocess import Popen as popen
# Globals:
screen = curses.initscr()
last_width = 0
last_height = 0
alarm_hour = 0
alarm_minute = 0
alarm_state = False
alarm = None
glyph = {
'0': [" ##### ", " ## ## ", "## ## ", "## ## ", "## ## ", " ## ## ", " ##### "],
'1': [" ## ", " #### ", " ## ", " ## ", " ## ", " ## ", " ###### "],
'2': [" ####### ", "## ## ", " ## ", " ####### ", "## ", "## ", "######### "],
'3': [" ####### ", "## ## ", " ## ", " ####### ", " ## ", "## ## ", " ####### "],
'4': ["## ", "## ## ", "## ## ", "## ## ", "######### ", " ## ", " ## "],
'5': [" ######## ", " ## ", " ## ", " ####### ", " ## ", " ## ## ", " ###### "],
'6': [" ####### ", "## ## ", "## ", "######## ", "## ## ", "## ## ", " ####### "],
'7': [" ######## ", " ## ## ", " ## ", " ## ", " ## ", " ## ", " ## "],
'8': [" ####### ", "## ## ", "## ## ", " ####### ", "## ## ", "## ## ", " ####### "],
'9': [" ####### ", "## ## ", "## ## ", " ######## ", " ## ", "## ## ", " ####### "],
':': [" ", " ", " # ", " ", " # ", " ", " "]
}
def addstr(y, x, string, color):
try:
screen.addstr( origin_y + y, origin_x + x, string, color)
screen.refresh()
except: return
def print_time(now):
time_line = now.strftime("%I:%M:%S")
time_array = ["" for i in range(0,7)]
# Concat glyphs:
for char in time_line:
char_array = glyph[char]
for row in range(0, len(char_array)):
time_array[row] += char_array[row]
# Print glyphs:
for y in range(0, len(time_array)):
for x in range(0, len(time_array[y])):
char = time_array[y][x]
color = 1 if char == " " else 3
addstr( y, x, " ",
curses.color_pair(color))
# Add meridian:
addstr( 6, len(time_array[0]), now.strftime("%p"),
curses.color_pair(2) | curses.A_BOLD)
def print_date(now):
day_line = now.strftime("%A").center(11," ")
date_line = now.strftime("%B %d, %Y")
addstr(8, 0, day_line, curses.color_pair(3))
addstr(8, len(day_line) + 1, date_line, curses.color_pair(2) | curses.A_BOLD)
def print_alarm():
minute = alarm_minute
hour = alarm_hour - 12 if alarm_hour > 12 else (12 if not alarm_hour else alarm_hour)
meridian = "AM" if alarm_hour < 12 else "PM"
state = "ACT" if alarm_state else "OFF"
time = " %02d:%02d %s " % (hour, minute, meridian)
addstr(8, 46, state.center(5," "), curses.color_pair(3))
addstr(8, 52, " < ", curses.color_pair(3))
addstr(8, 55, time, curses.color_pair(2) | curses.A_BOLD)
addstr(8, 65, " > ", curses.color_pair(3))
def step_alarm(direction):
global alarm_minute, alarm_hour
alarm_minute = (30 if alarm_minute == 0 else 0)
if direction and alarm_minute == 0: alarm_hour = (alarm_hour + 1) % 24
elif not direction and alarm_minute == 30: alarm_hour = (alarm_hour - 1) % 24
def handle_mouse():
global alarm_state
(i, x, y, z, bstate) = curses.getmouse()
if y == origin_y + 8 and bstate == curses.BUTTON1_CLICKED:
if x > origin_x + 51 and x < origin_x + 55:
step_alarm(False)
if x > origin_x + 64 and x < origin_x + 68:
step_alarm(True)
if x > origin_x + 45 and x < origin_x + 51:
alarm_state = not alarm_state
# Setup
screen.keypad(1)
curses.curs_set(0)
curses.start_color()
curses.init_pair(1, 0, 0) # BB
curses.init_pair(2, 3, 0) # YB
curses.init_pair(3, 0, 3) # BY
curses.mousemask(curses.ALL_MOUSE_EVENTS)
curses.noecho()
curses.cbreak()
# Main
a = 0
while True:
width = screen.getmaxyx()[1]
height = screen.getmaxyx()[0]
origin_x = floor(width / 2) - 34
origin_y = floor(height / 2) - 4
now = date.now()
if width != last_width or height != last_height: screen.clear()
last_width = width
last_height = height
print_time(now)
print_date(now)
print_alarm()
if alarm_state and \
int(now.hour) == alarm_hour and \
int(now.minute) == alarm_minute and \
int(now.second) == 0:
pass
screen.timeout(30)
char = screen.getch()
if (char != -1):
if char == curses.KEY_MOUSE: handle_mouse()
elif char == 113: break
# Cleanup:
curses.endwin()
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
d3ca685b9c2136a3c1c581a042146b3f9b4be186
|
4a0eb422dea8b3b911d56d4eae54137753cdefb0
|
/python-52-weeks/Device_classes/train_netmiko.py
|
aa215f6fdeb69e540b32f016fa3a73ddec8598dc
|
[] |
no_license
|
aramidetosin/python-netmon
|
a52c85bf124c051ec4ffe9a252501520d0f7bb39
|
de6f935bfcb8134e769eb2be81c8ebc0abd3df1d
|
refs/heads/master
| 2023-03-27T17:01:56.080756
| 2021-03-28T12:57:45
| 2021-03-28T12:57:45
| 332,056,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,577
|
py
|
import re
from netmiko import Netmiko
junos = {
"hostname": "192.168.1.229",
"username": "admin",
"password": "juniper1",
"device_type": "juniper",
}
connection = Netmiko(
host=junos["hostname"],
username=junos["username"],
password=junos["password"],
device_type=junos["device_type"],
)
show_hostname_output = connection.send_command("show system information")
show_uptime_output = connection.send_command("show system uptime")
show_serial_output = connection.send_command("show chassis hardware")
show_interface_output = connection.send_command("show interface terse")
print(show_hostname_output)
print(show_uptime_output)
print(show_serial_output)
print(show_interface_output)
def junos_get_information(show_hostname_output):
information = {}
pattern = re.compile(r"Model: (.*)")
model = pattern.search(show_hostname_output)
if model:
information['model'] = model.group(1)
else:
information['model'] = None
pattern = re.compile(r"Junos: (.*)")
model = pattern.search(show_hostname_output)
if model:
information['Version'] = model.group(1)
else:
information['Version'] = None
pattern = re.compile(r"Hostname: (.*)")
model = pattern.search(show_hostname_output)
if model:
information['Hostname'] = model.group(1)
else:
information['Hostname'] = None
return information
def junos_get_uptime_from_show(show_uptime_output):
re_junos_uptime = re.compile(r'System booted: .*\((\d{2}:\d{2}:\d{2}) ago\)')
junos_uptime_match = re_junos_uptime.search(show_uptime_output)
if junos_uptime_match:
uptime = junos_uptime_match.group(1)
uptime_split = uptime.split(":")
hours = int(uptime_split[0])
minutes = int(uptime_split[1])
seconds = int(uptime_split[2])
return hours * 3600 + minutes * 60 + seconds
def junos_get_serial_number(show_serial_output):
re_serial_number = re.compile(r"Chassis\s*(\w*)\s*")
serial_number_match = re_serial_number.search(show_serial_output)
if serial_number_match:
return serial_number_match.group(1)
print(junos_get_information(show_hostname_output))
print(junos_get_uptime_from_show(show_uptime_output))
print(junos_get_serial_number(show_serial_output))
line_show_interface_output = show_interface_output.splitlines()
interfaces = []
for line in line_show_interface_output:
xx = line.split(" ")[0]
if xx != "Interface" and xx != '':
if '.' not in xx:
interfaces.append(xx)
print(interfaces)
|
[
"aoluwatosin10@gmail.com"
] |
aoluwatosin10@gmail.com
|
ccad155c93d1dc3713bc931ca59362dda019cbe3
|
aaf306b4117027bd66dfdbac80f2147a9b48a455
|
/Day66-75/code/example01.py
|
b1fee7aa04cbe05762d013ada68b220f217fb53c
|
[] |
no_license
|
xiangsxuan/Python-100-Days
|
309da160fc4c85aa9699a0c522525e2b01e0421d
|
e86dece224b0a77103f6d6b734fecd9eef7dca97
|
refs/heads/master
| 2020-03-18T19:56:59.744032
| 2018-05-28T15:21:07
| 2018-05-28T15:21:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
from urllib.error import URLError
from urllib.request import urlopen
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8', )):
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8', )):
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
pattern_regex = re.compile(pattern_str, pattern_ignore_case)
return pattern_regex.findall(page_html) if page_html else []
def start_crawl(seed_url, match_pattern):
conn = pymysql.connect(host='localhost', port=3306,
database='crawler', user='root',
password='123456', charset='utf8')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
while url_list:
current_url = url_list.pop(0)
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_matched_parts(page_html, match_pattern)
url_list += links_list
param_list = []
for link in links_list:
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings[0], link))
cursor.executemany('insert into tb_result values (default, %s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']')
if __name__ == '__main__':
main()
|
[
"jackfrued@126.com"
] |
jackfrued@126.com
|
2dde9d6b6d7882fd2f8221971f5affeee6735fa2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03072/s635930653.py
|
041f13523a4cc93f92b5e54f56ee442444872570
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
n = int(input())
h = list(map(int, input().split()))
max = h[0]
count = 0
for i in h:
if max <= i:
count += 1
max = i
else:
continue
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.