blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20f7c68abb20697f37faf94315775c04cc2e88e0
|
e47bc9571c59b1c6e8aeb4231a286ab8577802d4
|
/easy/700-search-in-a-binary-search-tree.py
|
80810f66956e8a17a8d0435cfb5517b67981d3fe
|
[
"MIT"
] |
permissive
|
changmeng72/leecode_python3
|
d0176502dfaf3c8b455ec491c72979dd25b66b3e
|
8384f52f0dd74b06b1b6aefa277dde6a228ff5f3
|
refs/heads/main
| 2023-05-27T10:35:43.465283
| 2021-06-09T00:20:59
| 2021-06-09T00:20:59
| 375,127,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
while root!=None:
if root==None:
return None
if root.val==val:
return root
if val< root.val:
root = root.left
else:
root = root.right
return root
"""
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
if root==None:
return None
if root.val==val:
return root
if val< root.val:
return self.searchBST(root.left,val)
else:
return self.searchBST(root.right,val)
"""
|
[
"noreply@github.com"
] |
changmeng72.noreply@github.com
|
222d3a8b5fe6ead7ed774a31e3384f17c77c532b
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/io_scs_tools/internals/persistent/initialization.py
|
7f4da8dcb5af84372c4c4687960620d3e72755b6
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,303
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2014: SCS Software
import bpy
import os
from bpy.app.handlers import persistent
from io_scs_tools.internals import preview_models as _preview_models
from io_scs_tools.internals.callbacks import open_gl as _open_gl_callback
from io_scs_tools.internals.callbacks import lighting_east_lock as _lighting_east_lock_callback
from io_scs_tools.internals.containers import config as _config_container
from io_scs_tools.internals.connections.wrappers import group as _connections_group_wrapper
from io_scs_tools.utils import get_scs_globals as _get_scs_globals
from io_scs_tools.utils import info as _info_utils
from io_scs_tools.utils.printout import lprint
@persistent
def initialise_scs_dict(scene):
"""Parts and Variants data initialisation (persistent).
Things which this function does:
1. copies all the settings to current world
2. checks object identities
3. updates shaders presets path and reloads them
Cases when it should be run:
1. Blender startup -> SCS tools needs to configured
2. Opening .blend file -> because all the configs needs to be moved to current world
3. addon reloading and enable/disable -> for SCS tools this is the same as opening Blender
:param scene: Current Blender Scene
:type scene: bpy.types.Scene
"""
# SCREEN CHECK...
if bpy.context.screen:
lprint("I Initialization of SCS scene, BT version: " + _info_utils.get_tools_version())
# NOTE: covers: start-up, reload, enable/disable and it should be immediately removed
# from handlers as soon as it's executed for the first time
if initialise_scs_dict in bpy.app.handlers.scene_update_post:
bpy.app.handlers.scene_update_post.remove(initialise_scs_dict)
# INITIALIZE CUSTOM CONNECTIONS DRAWING SYSTEM
_connections_group_wrapper.init()
# release lock as user might saved blender file during engaged lock.
# If that happens config lock property gets saved to blend file and if user opens that file again,
# lock will be still engaged and no settings could be applied without releasing lock here.
_config_container.release_config_lock()
# USE SETTINGS FROM CONFIG...
# NOTE: Reapplying the settings from config file to the currently opened Blender file datablock.
# The thing is, that every Blend file holds its own copy of SCS Global Settings from the machine on which it got saved.
# The SCS Global Settings needs to be overwritten upon each file load to reflect the settings from local config file,
# but also upon every SCS Project Base Path change.
_config_container.apply_settings()
# GLOBAL PATH CHECK...
if _get_scs_globals().scs_project_path != "":
if not os.path.isdir(_get_scs_globals().scs_project_path):
lprint("\nW The Project Path %r is NOT VALID!\n\tPLEASE SELECT A VALID PATH TO THE PROJECT BASE FOLDER.\n",
(_get_scs_globals().scs_project_path,))
# CREATE PREVIEW MODEL LIBRARY
_preview_models.init()
# ADD DRAW HANDLERS
_open_gl_callback.enable(mode=_get_scs_globals().drawing_mode)
# ENABLE LIGHTING EAST LOCK HANDLER
# Blender doesn't call update on properties when file is opened,
# so in case lighting east was locked in saved blend file, we have to manually enable callback for it
# On the other hand if user previously had east locked and now loaded the file without it,
# again we have to manually disable callback.
if _get_scs_globals().lighting_east_lock:
_lighting_east_lock_callback.enable()
else:
_lighting_east_lock_callback.disable()
# as last notify user if his Blender version is outdated
if not _info_utils.is_blender_able_to_run_tools():
message = "Your Blender version %s is outdated, all SCS Blender Tools functionalities were internally disabled.\n\t " \
"Please update Blender before continue, minimal required version for SCS Blender Tools is: %s!"
message = message % (_info_utils.get_blender_version()[0], _info_utils.get_required_blender_version())
# first report error with blender tools printing system
lprint("E " + message)
# then disable add-on as it's not usable in the case Blender is out-dated
bpy.ops.wm.addon_disable('INVOKE_DEFAULT', module="io_scs_tools")
# and as last show warning message in the form of popup menu for user to see info about outdated Blender
# As we don't have access to our 3D view report operator anymore,
# we have to register our ShowWarningMessage class back and invoke it.
from io_scs_tools.operators.wm import ShowWarningMessage
bpy.utils.register_class(ShowWarningMessage)
bpy.ops.wm.show_warning_message('INVOKE_DEFAULT',
is_modal=True,
title="SCS Blender Tools Initialization Problem",
message="\n\n" + message.replace("\t ", "") + "\n\n", # some nasty formatting for better visibility
width=580, # this is minimal width to properly fit in given message
height=bpy.context.window.height if bpy.context and bpy.context.window else 200)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
7a9a6945f9f95e49f44ee1506c40205fac83cef9
|
e8274f167fd219ef78241ba8ea89e5d5875ed794
|
/cloud/nova/nova/db/sqlalchemy/migration.py
|
31c40234a4065b76995aba1d11758b1ed73855fc
|
[
"Apache-2.0"
] |
permissive
|
virt2x/folsomCloud
|
02db0147f7e0f2ab0375faf4f36ca08272084152
|
e6fd612dd77f35a72739cf4d4750e9795c0fa508
|
refs/heads/master
| 2021-01-01T17:26:28.405651
| 2013-10-17T12:36:04
| 2013-10-17T12:36:04
| 13,647,787
| 0
| 1
| null | 2020-07-24T08:25:22
| 2013-10-17T12:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.version as dist_version
import os
from nova.db import migration
from nova.db.sqlalchemy.session import get_engine
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
import migrate
from migrate.versioning import util as migrate_util
import sqlalchemy
LOG = logging.getLogger(__name__)
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.NovaException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(migration.INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.NovaException(
_("Upgrade DB using Essex release first."))
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
[
"quan.xu@intel.com"
] |
quan.xu@intel.com
|
533cc85f23f1baa1d29a5f2efd413bc56d46819a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/4b1ce8efbadf4ae08434623c951e8a03.py
|
23c859cb84bcc66a0a8d02897ece826b32d14a4c
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
def hey(question):
"""
Inputs:
question (string) - A question to ask Bob.
Outputs:
answer (string) - Bob's response to the question.
Bob answers 'Sure.' if you ask him a question.
He answers 'Whoa, chill out!' if you yell at him.
He says 'Fine. Be that way!' if you address him without actually saying anything.
He answers 'Whatever.' to anything else.
"""
if (question.strip() == ''):
answer = 'Fine. Be that way!'
elif (question.isupper()):
answer = 'Whoa, chill out!'
elif (question[-1] == '?'):
answer = 'Sure.'
else:
answer = 'Whatever.'
return answer
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
2e218c49d71cdead57bbbf5de7b87264ce4b5167
|
58cea667b91271f8d61a70b3eff93ef56814d99a
|
/reservations/urls.py
|
c8433daa084bbe716861fd4932cf700c5f47ed5d
|
[] |
no_license
|
BKLemontea/AirBnB_CloneCoding
|
06e900d0c2f1735ebc539172569205956bab7b52
|
b88f9bb6d1bca6a10cb48206872c0a392ce436bf
|
refs/heads/master
| 2022-12-11T00:17:47.066796
| 2020-05-04T12:56:28
| 2020-05-04T12:56:28
| 235,279,197
| 0
| 0
| null | 2022-12-10T20:47:58
| 2020-01-21T07:21:38
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
from django.urls import path
from . import views
app_name = "reservations"
urlpatterns = [
path("create/<int:room>/<int:year>-<int:month>-<int:day>/", views.create, name="create"),
path("<int:pk>/", views.ReservationDetail.as_view(), name="detail"),
path("<int:pk>/<str:verb>/", views.edit_reservation, name="edit"),
]
|
[
"jyf1128@naver.com"
] |
jyf1128@naver.com
|
0dd093c2129194b810bfb2ebb4684574b589f6ef
|
f3df69d552c0749d054f77a1f739bb13c70b23c0
|
/Boundary Detection and Object Matching/p6.py
|
e4864c8a0f2b6bed08c7671778204230ac45e152
|
[] |
no_license
|
thomas-liao/Computer-Vision-Projects
|
a13e5b1122f7089c5ee49c60d87f9db7b0887aa7
|
66a6cf269dbcad23831f15dfb5695cc1c1f2084b
|
refs/heads/master
| 2021-10-24T01:14:03.387306
| 2019-03-21T06:57:27
| 2019-03-21T06:57:27
| 109,339,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
import math
import numpy as np
def p6(edge_image, edge_thresh): # return [edge_thresh_image, hough_image]
height = len(edge_image)
width = len(edge_image[0])
ret_img = [[0 for x in range(width)] for y in range(height)]
rou_points = 500
theta_points = 500
for i in range(height):
for j in range(width):
if edge_image[i][j] < edge_thresh:
continue
else:
ret_img[i][j] = 255
rou_max = int(math.ceil(math.sqrt(width**2 + height**2)))
# rou: from -rou_max to rou_max
# thetas: form -pi/2 to pi/2
rou_vector = np.linspace(-rou_max, rou_max, rou_points)
thetas = np.linspace(-math.pi / 2, math.pi / 2, theta_points)
# parameter space
parameter_space = [[0 for x in range(len(thetas))] for y in range(2*rou_max)]
for i in range(len(ret_img)):
for j in range(len(ret_img[0])):
if ret_img[i][j] == 0:
continue
for k in range(len(thetas)):
rou = int(round(j*math.cos(thetas[k]) + i*math.sin(thetas[k])) + rou_max)
parameter_space[rou][k] += 1
# scale parameter space to range 0 ~ 255
max_vote = 0
m = len(parameter_space)
n = len(parameter_space[0])
for i in range(m):
for j in range(n):
k = parameter_space[i][j]
if k > max_vote:
max_vote = k
for i in range(m):
for j in range(n):
parameter_space[i][j] = int(math.floor(255.0 * parameter_space[i][j] / max_vote))
return ret_img, parameter_space
|
[
"thomas.liao256@gmail.com"
] |
thomas.liao256@gmail.com
|
4f27d04ab2fceaec0820eec914370a8e5c241bbf
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/large_case_or_government/small_time.py
|
b02f0a7c20c3165a30a6c3a143c2d12f005cf4e1
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
#! /usr/bin/env python
def place(str_arg):
year(str_arg)
print('know_person_by_different_point')
def year(str_arg):
print(str_arg)
if __name__ == '__main__':
place('go_long_place_into_high_eye')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
d34e57dc3fad50b488fcbad29d3b477d302341fd
|
e2348943fcb5ea22137187c23cd39f7138bed725
|
/practise/form_demo/forms.py
|
21eee2a72f75d43acb27ad3ce278652a542ef653
|
[] |
no_license
|
Gaurav41/Django-Practise
|
6bf8d7c58ad5624990aa8d707f72e504f778dbfa
|
36754e54d0ae2be86855620916cc9262e382e597
|
refs/heads/master
| 2023-07-26T06:51:38.002692
| 2021-09-07T14:19:29
| 2021-09-07T14:19:29
| 403,638,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
from django import forms
import django
from django.db.models.base import Model
from django.forms import fields, widgets
from django.forms.fields import BooleanField, CharField
from django.forms import ModelForm
from .models import Customer
# https://docs.djangoproject.com/en/3.2/topics/forms/
class NameForm(forms.Form):
your_name = forms.CharField(label='Your name', max_length=10)
your_age = forms.IntegerField(label='Your age')
message = forms.CharField(widget=forms.Textarea)
email = forms.EmailField()
booleanField = forms.BooleanField(required=False)
class CustomerForm(ModelForm):
# Extra Validator if any
password= CharField(min_length=8,required=False)
class Meta:
model = Customer
fields = '__all__'
labels = {'first_name':'Enter yout first name','last_name':'Enter yout last name'}
error_messages ={'first_name':{'required':'Name required'},
'last_name':{'required':'Last name required'},
'password':{'required':'Password is required'}
}
widgets={'first_name':forms.TextInput,
'last_name':forms.TextInput(attrs={'id':'ls_name','class':'myclass',
'placeholder':'Write your last name here',}),
'password':forms.PasswordInput()
}
|
[
"gauravpingale4@gmail.com"
] |
gauravpingale4@gmail.com
|
8a173bcb6121ee95083280933b1dad376b3685f5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_cockatoo.py
|
44cd2b849cd52c15c8be04daf76e0becb8196798
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
#calss header
class _COCKATOO():
def __init__(self,):
self.name = "COCKATOO"
self.definitions = [u'an Australian bird with a decorative crest (= growth of feathers) on its head and a powerful beak']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3d629e559a04bc09cbc9a4cf103d4b96bee693c3
|
ba4f68fb01aa32970dadea67cc8d039b4c0f6d9e
|
/python/armstrong_numbers.py
|
a4ac5f11afc0ef9619fc0db4c3370fac8ba44b30
|
[] |
no_license
|
campbellmarianna/Code-Challenges
|
12a7808563e36b1a2964f10ae64618c0be41b6c0
|
12e21c51665d81cf1ea94c2005f4f9d3584b66ec
|
refs/heads/master
| 2021-08-03T23:23:58.297437
| 2020-05-15T07:13:46
| 2020-05-15T07:13:46
| 168,234,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 914
|
py
|
# Exercism Problem: Armstrong Numbers
# Find out if a number is the sum of its own digits each raised to the power of
# number of digits
'''
Technical Interview Problem Solving Strategy
1. Generate reasonable test inputs
2. Understand the problem = Solve it!
a. Simplify the problem if needed
3. Find a pattern in your solution
4. Make a plan - Write pseudocode
5. Follow a plan - Write real code
6. Check your work - Test your code
'''
def is_armstrong_number(number): # 153
sum = 0
result = 0
string_version = str(number)
power = len(string_version)
# get the sum
for i in range(0, power):
individual_num = int(string_version[i])
result = individual_num**power
sum += result
# check if the sum equals the given number
if sum == number:
return True
else:
return False
if __name__ == '__main__':
print(is_armstrong_number(153))
|
[
"mckd2018@gmail.com"
] |
mckd2018@gmail.com
|
753ec27efafeb634d3084d7139d39eb30acf94ec
|
bd696223aaf5404987df11832b4c17c916b9690f
|
/py_sample/django_rest_tutorial/snippets/permissions.py
|
6c42c1910d183fa5df19ddac3d74f8bee85a6473
|
[] |
no_license
|
wararaki718/scrapbox3
|
000a285477f25c1e8a4b6017b6ad06c76f173342
|
9be5dc879a33a1988d9f6611307c499eec125dc2
|
refs/heads/master
| 2023-06-16T08:46:32.879231
| 2021-07-17T14:12:54
| 2021-07-17T14:12:54
| 280,590,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
|
[
"ky7.ott.w@gmail.com"
] |
ky7.ott.w@gmail.com
|
3bb12ba94fcbec290decd82b09f9fec9e679e487
|
6cc37dfc44880f57823bb9523ea5f8206d5e3f22
|
/python_OOP/labs_and_homeworks/03_attributes_and_methods_exercise/gym_04/project/customer.py
|
ff5ecd94c9f32fe85c41758770709746349e3848
|
[] |
no_license
|
dimitar-daskalov/SoftUni-Courses
|
70d265936fd86712a7bfe0586ec6ebd1c7384f77
|
2054bc58ffb5f41ed86f5d7c98729b101c3b1368
|
refs/heads/main
| 2023-05-31T06:44:35.498399
| 2021-07-11T10:16:08
| 2021-07-11T10:16:08
| 322,896,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
class Customer:
id_count = 0
def __init__(self, name: str, address: str, email: str):
self.name = name
self.address = address
self.email = email
self.id = self.get_next_id()
@staticmethod
def get_next_id():
Customer.id_count += 1
next_id = Customer.id_count
return next_id
def __repr__(self):
return f"Customer <{self.id}> {self.name}; " \
f"Address: {self.address}; Email: {self.email}"
|
[
"dimitar.daskalov22@gmail.com"
] |
dimitar.daskalov22@gmail.com
|
aa54ea0160cb8d5f0aa9751e2ec10309491e862a
|
f933e6a2c30003df771f902924880e5f531ba57f
|
/src/deltapy/communication/pyro/__init__.py
|
0af9dbc76e1753625c3cdf247208b8a21b8e7e44
|
[] |
no_license
|
hamed1361554/sportmagazine-server
|
861e624912ffc2e623b027e693111d4bcb10a709
|
a2ee333d2a4fe9821f3d24ee15d458f226ffcde5
|
refs/heads/master
| 2020-04-17T13:57:00.624899
| 2017-07-25T03:30:24
| 2017-07-25T03:30:24
| 67,815,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
'''
Created on Sep 15, 2009
@author: Abi.Mohammadi & Majid.Vesal
'''
from deltapy.packaging.package import Package
import deltapy.communication.services as communication
from deltapy.communication.pyro.factory import PyroFactory
class PyroPackage(Package):
def load(self):
Package.load(self)
communication.register_factory('pyro', PyroFactory())
def unload(self):
Package.unload(self)
|
[
"hamed.zekri@gmail.com"
] |
hamed.zekri@gmail.com
|
962b0aaba50613ca402feaf5fa33193831e19e07
|
8f2f83bc1381d4ce7fc968aec72fa400aae4155d
|
/api/network/requestmodels/__init__.py
|
4a07f17722a06027b304b663cfb2a276909b7f29
|
[
"MIT"
] |
permissive
|
nifrali/pyStratis
|
c855fb33be77064c9a741255e324003319a4789f
|
b1a80bf155b7941e9ef8fc2ea93fa1b08a0c4366
|
refs/heads/master
| 2023-06-20T16:02:30.863589
| 2021-07-01T19:24:18
| 2021-07-01T19:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from .clearbannedrequest import ClearBannedRequest
from .disconnectpeerrequest import DisconnectPeerRequest
from .setbanrequest import SetBanRequest
|
[
"skaal@protonmail.com"
] |
skaal@protonmail.com
|
e62e850eb5cc7ca5d2fff2ccb3c73f00f13a4362
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_Class575.py
|
d1049a69516cdaa111be622c5a515f85597de048
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=18
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class575.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
03fada5c669da9504b6aa423e88fff759e2862bc
|
b17f269a24e98f6a28c54cf49569c305b4f1dac3
|
/src/nix_ray/__init__.py
|
50c69155f0044cf26776bd7a17a94faf26e40df9
|
[] |
no_license
|
qknight/nix-ray
|
981d41e6706b455e3e8c1c9572e595a654833617
|
08b0ca2d139abbf700c04483f2ec3046aa284a49
|
refs/heads/master
| 2016-09-10T17:35:25.964041
| 2013-07-27T10:21:55
| 2013-07-27T10:23:01
| 11,617,136
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
"""xin - prototypical unified cli for nix
"""
import tpv.cli
import tpv.pkg_resources
from plumbum import FG
from plumbum.cmd import ls, grep, wc, git
class NixRay(tpv.cli.Command):
"""nix-ray
Assistant for debugging the phases of a nix expression
"""
VERSION = 0
entry_point_group="nix_ray.commands"
verbose = tpv.cli.Flag(["v", "verbose"],
help="If given, I will be very talkative")
def __call__(self, filename=None):
self.help()
# chain = ls['-la'] | grep['a'] | wc
# print(chain)
# chain & FG
# if self.verbose:
# print "Yadda " * 200
@tpv.cli.switch(['f', 'foo'], int)
def foo(self, bar):
"""foomagic
"""
self.bar = bar
print(bar)
app = NixRay.run
|
[
"flo@chaoflow.net"
] |
flo@chaoflow.net
|
800a07049a25c104919188f12cd07941255d2647
|
7e574c25506a7b4f68b873fa63d2d38f52c7c56e
|
/utils/correspondence_tools/correspondence_plotter.py
|
64ec2758a8b598aec99938a40c9ba5d79121d7f1
|
[] |
no_license
|
vcowwy/SuperGlue_paddle
|
9912e6dfda34dfc42cfb34d4e2de8eaed827ed15
|
4cd5280dbb228498e2b3c6df1f4621c320af47c7
|
refs/heads/master
| 2023-08-21T15:58:20.122712
| 2021-10-29T11:10:26
| 2021-10-29T11:10:26
| 420,937,959
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,466
|
py
|
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
def plot_correspondences(images, uv_a, uv_b, use_previous_plot=None,
circ_color='g', show=True):
if use_previous_plot is None:
fig, axes = plt.subplots(nrows=2, ncols=2)
else:
fig, axes = use_previous_plot[0], use_previous_plot[1]
fig.set_figheight(10)
fig.set_figwidth(15)
pixel_locs = [uv_a, uv_b, uv_a, uv_b]
axes = axes.flat[0:]
if use_previous_plot is not None:
axes = [axes[1], axes[3]]
images = [images[1], images[3]]
pixel_locs = [pixel_locs[1], pixel_locs[3]]
for ax, img, pixel_loc in zip(axes[0:], images, pixel_locs):
ax.set_aspect('equal')
if isinstance(pixel_loc[0], int) or isinstance(pixel_loc[0], float):
circ = Circle(pixel_loc, radius=10, facecolor=circ_color,
edgecolor='white', fill=True, linewidth=2.0, linestyle='solid')
ax.add_patch(circ)
else:
for x, y in zip(pixel_loc[0], pixel_loc[1]):
circ = Circle((x, y), radius=10, facecolor=circ_color,
edgecolor='white', fill=True, linewidth=2.0, linestyle=\
'solid')
ax.add_patch(circ)
ax.imshow(img)
if show:
plt.show()
return None
else:
return fig, axes
def plot_correspondences_from_dir(log_dir, img_a, img_b, uv_a, uv_b,
use_previous_plot=None, circ_color='g', show=True):
img1_filename = log_dir + '/images/' + img_a + '_rgb.png'
img2_filename = log_dir + '/images/' + img_b + '_rgb.png'
img1_depth_filename = log_dir + '/images/' + img_a + '_depth.png'
img2_depth_filename = log_dir + '/images/' + img_b + '_depth.png'
images = [img1_filename, img2_filename, img1_depth_filename,
img2_depth_filename]
images = [mpimg.imread(x) for x in images]
return plot_correspondences(images, uv_a, uv_b, use_previous_plot=\
use_previous_plot, circ_color=circ_color, show=show)
def plot_correspondences_direct(img_a_rgb, img_a_depth, img_b_rgb,
img_b_depth, uv_a, uv_b, use_previous_plot=None, circ_color='g', show=True
):
images = [img_a_rgb, img_b_rgb, img_a_depth, img_b_depth]
return plot_correspondences(images, uv_a, uv_b, use_previous_plot=\
use_previous_plot, circ_color=circ_color, show=show)
|
[
"1719870799@qq.com"
] |
1719870799@qq.com
|
7e3bfaabf0e7e46b0a25c2d41ce8a0a1e4281a74
|
925a067ff1473cf45ad8aa9cf99db4311a7799ed
|
/archive/get_data_modules_3.py
|
c9f61dd2cc18f30b1ac3542e9c449adb11015e7e
|
[] |
no_license
|
ORNL-Fusion/Collector-Probes
|
fd7250738e797befa06fad487e9d2498b61436a5
|
16e15a0d3dcaa8a88da25aaf3ea126e9eb2a5f96
|
refs/heads/master
| 2022-09-03T01:02:39.520659
| 2022-08-28T13:28:53
| 2022-08-28T13:28:53
| 95,914,293
| 1
| 3
| null | 2019-11-14T15:00:39
| 2017-06-30T18:34:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,404
|
py
|
import openpyxl as xl
from MDSplus import *
import sys
# Used to get location of .scn files.
from Tkinter import Tk
from tkFileDialog import askopenfilename
def get_RBS(tree, letter_probes, shot):
# Grab the RBS data.
print "\nLoading RBS Excel file... This may take a minute."
rbs_file = xl.load_workbook("RBS_excel_file.xlsx", data_only=True)
print "RBS Excel file loaded."
rbs_probe_list = rbs_file.get_sheet_names()
# Remove unecessary sheets.
rbs_probe_list.remove('refs')
rbs_probe_list.remove('RCX')
rbs_probe_list.remove('SUMMARY')
rbs_probe_list.remove('Sheet6')
# Check if RBS data available for the selected probes.
for letter_probe in letter_probes:
tmp_name = letter_probe + 'U' + str(shot)
if (tmp_name not in rbs_probe_list):
print 'RBS data not available for ' + tmp_name + '.'
tmp_name = letter_probe + 'D' + str(shot)
if (tmp_name not in rbs_probe_list):
print 'RBS data not available for ' + tmp_name + '.'
# Collect data from Excel sheet and put them into a signal.
for letter_probe in letter_probes:
for u_or_d in ['U', 'D']:
name = letter_probe + u_or_d + str(shot)
# Pass through if there isn't RBS data.
if (name not in rbs_probe_list): continue
print "Assigning RBS data to " + name + " probe..."
# Grab the corresponding RBS sheet.
sheet = rbs_file.get_sheet_by_name(name)
# Fill in run data, microcoul, w_counts and w_areal density.
count = 0
for row in 'BCDEFGHIJKLMNOPQRSTUV':
count = count + 1
if count < 10:
count_str = '0' + str(count)
else:
count_str = str(count)
# Run data.
rbs_cells = sheet[row + '2': row + '513']
rbs_vals = []
for index in range(0,512):
rbs_vals.append(rbs_cells[index][0].value)
# If "NoneType" (i.e. blank cell), skip over.
if (rbs_vals[0] is None):
print "Column " + row + " blank."
continue
path = '\\DP_PROBES::TOP.' + letter_probe + '.' + letter_probe + u_or_d + '.RBS.RUN' + count_str + ':' + 'SIGNAL'
my_node = tree.getNode(path)
#sig_expr = Data.compile("BUILD_SIGNAL($VALUE, BUILD_WITH_UNITS($1,'COUNTS'), \
# BUILD_WITH_UNITS($2,'CHANNEL'))", rbs_vals, range(1,513))
#my_node.putData(sig_expr)
raw = Int32Array(rbs_vals)
raw = raw.setUnits('Counts')
dim = Int32Array(range(1,513))
dim = dim.setUnits('Channel')
sig = Signal('$VALUE', raw, dim)
my_node.putData(sig)
# W Counts data.
wCount = sheet[row + '515'].value
path = '\\DP_PROBES::TOP.' + letter_probe + '.' + letter_probe + u_or_d + '.RBS.RUN' + count_str + ':' + 'w_counts'
my_node = tree.getNode(path)
wCount = Int32(wCount)
wCount = wCount.setUnits('Counts')
my_node.putData(wCount)
# Microcoulomb data.
microcol = sheet[row + '516'].value
path = '\\DP_PROBES::TOP.' + letter_probe + '.' + letter_probe + u_or_d + '.RBS.RUN' + count_str + ':' + 'microcol'
my_node = tree.getNode(path)
my_node.putData(microcol)
# W Areal Density
w_areal = sheet[row + '517'].value
w_areal_error = sheet[row + '518'].value
path = '\\DP_PROBES::TOP.' + letter_probe + '.' + letter_probe + u_or_d + '.RBS.RUN' + count_str + ':' + 'w_areal'
my_node = tree.getNode(path)
w_areal = Float64(w_areal)
w_areal = w_areal.setUnits('W/cm^2')
w_areal_error = Float64(w_areal_error)
w_areal - w_areal.setError(w_areal_error)
#expr = Data.compile("BUILD_WITH_UNITS(BUILD_WITH_ERROR($1, $2), 'W/cm^2')", w_areal, w_areal_error)
my_node.putData(w_areal)
# Location
loc = sheet[row + '525'].value
path = '\\DP_PROBES::TOP.' + letter_probe + '.' + letter_probe + u_or_d + '.RBS.RUN' + count_str + ':' + 'loc'
my_node = tree.getNode(path)
loc = Int32(loc)
loc = loc.setUnits('mm')
my_node.putData(loc)
def get_ICPMS(tree, letter_probes, shot):
# Ask user which probe data is being inserted for.
another = True
while (another == True):
while (True):
print "Which probe is ICP-MS data being added for? Please select from the following: \nAD, AU, BD, BU, CD, CU"
print "Enter 'q' to quit."
probe = raw_input("--> ")
if (probe == 'q'): break
elif probe not in ['AD', 'AU', 'BD', 'BU', 'CD', 'CU']:
print "Error: Incorrect entry. Please try again."
else: break
# Get the location of the ICPMS measurements for the samples.
if (probe == 'q'): break
locations = input("Enter in measured locations, separated by commas: ")
concentrations = input("Enter in concentrations used for this probe, separated by commas: ")
# Get the .scn files for each ppb at each location.
conc_files_all = []
for location in locations:
conc_files = []
for conc in concentrations:
print "Select .scn file for " + str(conc) + " ppb at " + str(location) + " mm..."
Tk().withdraw()
filename = askopenfilename()
conc_files.append(filename)
conc_files_all.append(conc_files)
# Get the standard used for this probe.
standards = []
print "Select the five standard .scn files used."
for value in range(1,6):
print "Standard " + str(value) + "..."
standards.append(askopenfilename())
# Start filling in the tree. Starting with the locations.
for number in range(1, len(locations)+1):
print "Adding data for location " + str(location[number-1])
path = '\\DP_PROBES::TOP.' + probe[0] + '.' + probe + '.ICPMS.LOC' + str(number) + ':POSITION'
my_node = tree.getNode(path)
my_node.putData(locations[number-1])
# Then fill in concentration values.
for sample in range(1, len(concentrations)+1):
path = '\\DP_PROBES::TOP.' + probe[0] + '.' + probe + '.ICPMS.LOC' + str(number) + '.SPECTRUM' + str(sample) + ':CONC'
my_node = tree.getNode(path)
my_node.putData(concentrations[sample-1])
# Then the .scn files.
for m in conc_files_all:
for n in m:
print "Adding file: " + str(n)
with open(n) as f:
content = f.readlines()
content = [x.strip() for x in content]
counts = [float(x) for x in content[4:len(content)-2]]
path = '\\DP_PROBES::TOP.' + probe[0] + '.' + probe + '.ICPMS.LOC' + str(number) + '.SPECTRUM' + str(sample) + ':DATA'
my_node = tree.getNode(path)
sig_expr = Data.compile("BUILD_SIGNAL($VALUE, BUILD_WITH_UNITS($1,'COUNTS'), \
BUILD_WITH_UNITS($2,'CHANNEL'))", counts, range(0,len(counts)))
my_node.putData(sig_expr)
# Then the standard .scn files.
count = 0
for m in standards:
count = count + 1
print "Adding standard: " + str(m)
with open(m) as f:
content = f.readlines()
content = [x.strip() for x in content]
counts = [float(x) for x in content[4:len(content)-2]]
path = '\\DP_PROBES::TOP.' + probe[0] + '.' + probe + '.ICPMS.LOC' + str(number) + '.STANDARDS.STANDARD' + str(count) + ':DATA'
my_node = tree.getNode(path)
sig_expr = Data.compile("BUILD_SIGNAL($VALUE, BUILD_WITH_UNITS($1,'COUNTS'), \
BUILD_WITH_UNITS($2,'CHANNEL'))", counts, range(0,len(counts)))
my_node.putData(sig_expr)
print ""
# Ask if user wants to select data for another probe.
print "Insert data for another probe (y/n)?"
answer = None
while (answer not in ['y', 'n']):
answer = raw_input("--> ")
if (answer == 'y'):
another = True
break
elif (answer == 'n'):
another = False
break
else:
print "Please answer (y/n)."
|
[
"shawnzamperini@gmail.com"
] |
shawnzamperini@gmail.com
|
d5a97378f8e2eb8a234eefd79c834da56ef3a6b1
|
79ee8affe61807c0d732418b74e8b21447e64342
|
/todo_list/migrations/0001_initial.py
|
342d94714a86342deab7d3b9b3fccad667a5f783
|
[] |
no_license
|
ayushkr07/todo_list_app
|
59e6b2ce804927bf65d910408f8c9e53052bc8d8
|
07fd51bbfd395e0a08126578046687b74c894dc4
|
refs/heads/master
| 2020-04-28T10:07:20.748492
| 2019-03-18T05:08:18
| 2019-03-18T05:08:18
| 175,191,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Generated by Django 2.1.7 on 2019-03-13 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
],
),
]
|
[
"aks010598@gmail.com"
] |
aks010598@gmail.com
|
fa648d0af3f80e130d85710fa227f2d494b2f82d
|
6206ad73052b5ff1b6690c225f000f9c31aa4ff7
|
/Code/Checking Existence of Edge Length Limited Paths.py
|
dbffe95d8d1cced22bb8ba46105d264463746769
|
[] |
no_license
|
mws19901118/Leetcode
|
7f9e3694cb8f0937d82b6e1e12127ce5073f4df0
|
752ac00bea40be1e3794d80aa7b2be58c0a548f6
|
refs/heads/master
| 2023-09-01T10:35:52.389899
| 2023-09-01T03:37:22
| 2023-09-01T03:37:22
| 21,467,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,517
|
py
|
class UnionFind: #Union find.
def __init__(self, x: int):
self.label = x
self.parent = []
def find(self) -> 'UnionFind':
if not self.parent:
return self
self.parent = self.parent.find()
return self.parent
def union(self, uf: 'UnionFind') -> None:
if self.find().label != uf.find().label:
self.find().parent = uf.find()
class Solution:
def distanceLimitedPathsExist(self, n: int, edgeList: List[List[int]], queries: List[List[int]]) -> List[bool]:
sortedQuries = sorted([(x, y, q, i) for i, (x, y, q) in enumerate(queries)], key = lambda x: x[2]) #Sort queires by limit in query in ascending order and keep the original order.
edgeList.sort(key = lambda x: x[2]) #Sort edges by weight in asceding order.
ufs = [UnionFind(i) for i in range(n)] #Create a union find for each node.
result = [False] * len(queries) #Initialize result.
index = 0 #Initialize the pointer traversing edge list.
for x, y, q, i in sortedQuries: #Traverse sortedQuries.
while index < len(edgeList) and edgeList[index][2] < q: #Traverse edge list while current edge has smaller weight than the query limit.
ufs[edgeList[index][0]].union(ufs[edgeList[index][1]]) #Union the 2 nodes of the edge.
index += 1
result[i] = ufs[x].find().label == ufs[y].find().label #There is a path if parent of x and parent of y has same label, and all the edges in path are smaller than query limit.
return result #Return result.
|
[
"noreply@github.com"
] |
mws19901118.noreply@github.com
|
f9aa8e3db126e5369da93cb33fa60275690f08dd
|
91cff2fb42de0f20d2acebf22266bfe185aba9f1
|
/build/pyrobot/robots/LoCoBot/locobot_navigation/base_navigation/catkin_generated/pkg.develspace.context.pc.py
|
0531b4bd8bb7d38a5866c291b0d3923dd1afdba1
|
[] |
no_license
|
Tiga002/PyRobot_V2
|
b98b47a6c2015715c150e3df6617f22783472350
|
a72373cee6cff1baab7e248b4b5ea5811a666cec
|
refs/heads/master
| 2023-01-05T07:08:29.072177
| 2019-12-20T10:15:36
| 2019-12-20T10:15:36
| 229,241,878
| 0
| 0
| null | 2023-01-04T13:43:05
| 2019-12-20T10:16:53
|
Common Lisp
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "move_base".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "base_navigation"
PROJECT_SPACE_DIR = "/home/developer/low_cost_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"tigaleung002@gmail.com"
] |
tigaleung002@gmail.com
|
6c702bd35453fddd8cd392dcb6b4206937b473df
|
17b70c6444ab0f2ca1bfd7315f70d9ce39d7064f
|
/blog/migrations/0005_replycomment.py
|
6c8fbc2cc4ec2c4091b343bcc229bfe40ebb3394
|
[] |
no_license
|
GannTrader/django_blog
|
7de873cdae9d561c8afdb03a80d658ebb625f189
|
53157c049e31b96cbd5f9bfc1b909062b6382313
|
refs/heads/master
| 2022-12-10T12:04:47.291119
| 2020-09-13T07:55:04
| 2020-09-13T07:55:04
| 295,065,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.2 on 2020-09-13 07:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200913_1436'),
]
operations = [
migrations.CreateModel(
name='ReplyComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('reply', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('active', 'active'), ('inactive', 'inactive')], default='inactive', max_length=255)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Comment')),
],
),
]
|
[
"="
] |
=
|
bbde6b3bf2b4db5905785b96fe1034677c867327
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayTradeRepaybillCreateModel.py
|
96df27422c98e20818899a35633bf9ef183f4a11
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,348
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RepayTradeDetail import RepayTradeDetail
class AlipayTradeRepaybillCreateModel(object):
def __init__(self):
self._bill_amount = None
self._bill_product = None
self._out_bill_no = None
self._repay_expire_date = None
self._repay_trade_details = None
self._user_id = None
@property
def bill_amount(self):
return self._bill_amount
@bill_amount.setter
def bill_amount(self, value):
self._bill_amount = value
@property
def bill_product(self):
return self._bill_product
@bill_product.setter
def bill_product(self, value):
self._bill_product = value
@property
def out_bill_no(self):
return self._out_bill_no
@out_bill_no.setter
def out_bill_no(self, value):
self._out_bill_no = value
@property
def repay_expire_date(self):
return self._repay_expire_date
@repay_expire_date.setter
def repay_expire_date(self, value):
self._repay_expire_date = value
@property
def repay_trade_details(self):
return self._repay_trade_details
@repay_trade_details.setter
def repay_trade_details(self, value):
if isinstance(value, list):
self._repay_trade_details = list()
for i in value:
if isinstance(i, RepayTradeDetail):
self._repay_trade_details.append(i)
else:
self._repay_trade_details.append(RepayTradeDetail.from_alipay_dict(i))
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.bill_amount:
if hasattr(self.bill_amount, 'to_alipay_dict'):
params['bill_amount'] = self.bill_amount.to_alipay_dict()
else:
params['bill_amount'] = self.bill_amount
if self.bill_product:
if hasattr(self.bill_product, 'to_alipay_dict'):
params['bill_product'] = self.bill_product.to_alipay_dict()
else:
params['bill_product'] = self.bill_product
if self.out_bill_no:
if hasattr(self.out_bill_no, 'to_alipay_dict'):
params['out_bill_no'] = self.out_bill_no.to_alipay_dict()
else:
params['out_bill_no'] = self.out_bill_no
if self.repay_expire_date:
if hasattr(self.repay_expire_date, 'to_alipay_dict'):
params['repay_expire_date'] = self.repay_expire_date.to_alipay_dict()
else:
params['repay_expire_date'] = self.repay_expire_date
if self.repay_trade_details:
if isinstance(self.repay_trade_details, list):
for i in range(0, len(self.repay_trade_details)):
element = self.repay_trade_details[i]
if hasattr(element, 'to_alipay_dict'):
self.repay_trade_details[i] = element.to_alipay_dict()
if hasattr(self.repay_trade_details, 'to_alipay_dict'):
params['repay_trade_details'] = self.repay_trade_details.to_alipay_dict()
else:
params['repay_trade_details'] = self.repay_trade_details
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeRepaybillCreateModel()
if 'bill_amount' in d:
o.bill_amount = d['bill_amount']
if 'bill_product' in d:
o.bill_product = d['bill_product']
if 'out_bill_no' in d:
o.out_bill_no = d['out_bill_no']
if 'repay_expire_date' in d:
o.repay_expire_date = d['repay_expire_date']
if 'repay_trade_details' in d:
o.repay_trade_details = d['repay_trade_details']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
247193480074898ebc24c9ec752b45c047cab22c
|
4711f3c69992f6bd19eba1d5d201189539a8bcec
|
/market/urls.py
|
03608bd572e5e37968b2c2680beda9fc589a9969
|
[] |
no_license
|
ajithkjames/OpenMart
|
3d98ec380a8f2fbc24670fba29a5c83c331fd728
|
64bfe714e5a435f8a06a3509ef2213fda227924c
|
refs/heads/master
| 2021-07-05T07:50:30.443277
| 2017-09-28T12:19:56
| 2017-09-28T12:19:56
| 104,214,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from django.conf.urls import url, include
from rest_framework import routers
from rest_framework.routers import DefaultRouter
from market.views import CategoryViewSet, AdvertisementViewSet
router = routers.DefaultRouter()
router.register(r'category', CategoryViewSet, 'categories')
router.register(r'advertisement', AdvertisementViewSet, 'advertisements')
urlpatterns = [
]
urlpatterns += router.urls
|
[
"ajith.kjames3@gmail.com"
] |
ajith.kjames3@gmail.com
|
a2d82a587ab3df76aa9288380a7ef8423865f5da
|
c83d0f00a67c4b418f1b4868ab18493fda109e8e
|
/tests/conftest.py
|
745be785e0cc4e64ce048d09a8c3446ae7d83f15
|
[] |
no_license
|
rshk-archive/datacat-poc-141007
|
b8d09f4fb88dec5bf7837244d7d7904a67e31030
|
eb91b0d039dc36ea264d75850cab4834831f042c
|
refs/heads/master
| 2020-06-04T19:54:36.398365
| 2014-10-08T15:28:16
| 2014-10-08T15:28:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,011
|
py
|
import os
import random
import time
from urlparse import urlparse
import shutil
import pytest
import py.path
TESTS_ROOT_DIR = py.path.local(__file__).dirpath()
TESTS_DATA_DIR = TESTS_ROOT_DIR.join('data')
POSTGRES_ENV_NAME = 'POSTGRES_URL'
def _celery_testing_conf():
return dict(
CELERY_BROKER_URL='redis://localhost:6399/0',
CELERY_RESULT_BACKEND='redis://localhost:6399/0',
CELERY_ACCEPT_CONTENT=['json', 'msgpack', 'yaml'],
CELERY_ALWAYS_EAGER=True,
)
def _celery_testing_conf_py():
return "\n".join("{0} = {1!r}".format(key, val)
for key, val in _celery_testing_conf().iteritems()) + "\n"
@pytest.fixture(scope='module')
def postgres_conf():
if POSTGRES_ENV_NAME not in os.environ:
raise RuntimeError(
"Missing configuration: the {0} environment variable is required"
" in order to be able to create a PostgreSQL database for running"
" tests. Please set it to something like: ``postgresql://"
"user:password@host:port/database``."
.format(POSTGRES_ENV_NAME))
url = urlparse(os.environ[POSTGRES_ENV_NAME])
return {
'database': url.path.split('/')[1],
'user': url.username,
'password': url.password,
'host': url.hostname,
'port': url.port or 5432,
}
@pytest.fixture(scope='module')
def postgres_admin_db(request, postgres_conf):
from datacat.db import connect
conn = connect(**postgres_conf)
request.addfinalizer(lambda: conn.close())
return conn
@pytest.fixture(scope='module')
def postgres_user_conf(request, postgres_conf):
from datacat.db import connect
conn = connect(**postgres_conf)
conn.autocommit = True
randomcode = random.randint(0, 999999)
name = 'dtctest_{0:06d}'.format(randomcode)
# Note: we need to use separate transactions to perform
# administrative activities such as creating/dropping databases
# and roles.
# For this reason, we need to set the connection isolation level
# to "autocommit"
with conn.cursor() as cur:
cur.execute("""
CREATE ROLE "{name}" LOGIN
PASSWORD %(password)s;
""".format(name=name), dict(password=name))
cur.execute("""
CREATE DATABASE "{name}"
WITH OWNER "{name}"
ENCODING = 'UTF-8';
""".format(name=name))
def cleanup():
conn.autocommit = True
with conn.cursor() as cur:
# Kill all connections to database first
cur.execute("""
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '{name}'
AND pid <> pg_backend_pid();
""".format(name=name))
cur.execute('DROP DATABASE "{name}";'.format(name=name))
cur.execute('DROP ROLE "{name}";'.format(name=name))
request.addfinalizer(cleanup)
conf = postgres_conf.copy()
conf['user'] = name
conf['password'] = name
conf['database'] = name
# HACK to create PostGIS extension, used by some plugins
_conf = postgres_conf.copy()
_conf['database'] = name
_conn = connect(**_conf)
_conn.autocommit = True
with _conn.cursor() as cur:
cur.execute("CREATE EXTENSION postgis;")
return conf
@pytest.fixture
def postgres_user_db(request, postgres_user_conf):
from datacat.db import connect
conn = connect(**postgres_user_conf)
conn.autocommit = False
request.addfinalizer(lambda: conn.close())
return conn
@pytest.fixture
def postgres_user_db_ac(request, postgres_user_conf):
"""User database with autocommit on"""
from datacat.db import connect
conn = connect(**postgres_user_conf)
conn.autocommit = True
request.addfinalizer(lambda: conn.close())
return conn
@pytest.fixture(scope='module')
def app_config(postgres_user_conf):
from flask.config import Config
from datacat.settings import testing
conf = Config('')
conf.from_object(testing)
conf['DATABASE'] = postgres_user_conf
return conf
@pytest.fixture(scope='module')
def configured_app(request, app_config):
from datacat.web.core import make_app
app_config.update(_celery_testing_conf())
app = make_app(app_config)
app.debug = True
return app
@pytest.yield_fixture(scope='module')
def configured_app_ctx(configured_app):
ctx = configured_app.app_context()
ctx.push()
yield configured_app
ctx.pop()
@pytest.fixture(scope='module')
def redis_instance(request):
import subprocess
import tempfile
tempdir = tempfile.mkdtemp()
command = ['redis-server', '--port', '6399']
proc = subprocess.Popen(command, cwd=tempdir)
def cleanup():
proc.terminate()
proc.wait()
shutil.rmtree(tempdir)
request.addfinalizer(cleanup)
time.sleep(1)
return ('localhost', 6399)
@pytest.fixture
def data_dir():
return TESTS_DATA_DIR
|
[
"redshadow@hackzine.org"
] |
redshadow@hackzine.org
|
2417d121fd937b274579e82c572a60f4e7013fbf
|
58f6127876282bc27d28bb8f7a21a1148a4ed474
|
/tests/test_main.py
|
7a16bcbbb78d86d3ee861e38a9dc9fc38dc84454
|
[
"MIT"
] |
permissive
|
yoursbest/simple_calculator
|
cb8374e3cb0216bcc39b988f62492586b43fcfad
|
da3b88a743789ed4668981f8533094a11a7dd808
|
refs/heads/master
| 2022-12-14T03:39:37.471959
| 2020-03-15T12:43:51
| 2020-09-09T09:43:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,965
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from simple_calculator.main import SimpleCalculator
def test_add_two_numbers():
calculator = SimpleCalculator()
result = calculator.add(4, 5)
assert result == 9
def test_add_three_numbers():
calculator = SimpleCalculator()
result = calculator.add(4, 5, 6)
assert result == 15
def test_add_many_numbers():
numbers = range(100)
calculator = SimpleCalculator()
result = calculator.add(*numbers)
assert result == 4950
def test_subtract_two_numbers():
calculator = SimpleCalculator()
result = calculator.sub(10, 3)
assert result == 7
def test_mul_two_numbers():
calculator = SimpleCalculator()
result = calculator.mul(6, 4)
assert result == 24
def test_mul_many_numbers():
numbers = range(1, 10)
calculator = SimpleCalculator()
result = calculator.mul(*numbers)
assert result == 362880
def test_div_two_numbers_float():
calculator = SimpleCalculator()
result = calculator.div(13, 2)
assert result == 6.5
def test_div_by_zero_returns_inf():
calculator = SimpleCalculator()
result = calculator.div(5, 0)
assert result == float('inf')
def test_mul_by_zero_raises_exception():
calculator = SimpleCalculator()
with pytest.raises(ValueError):
calculator.mul(3, 0)
def test_avg_correct_average():
calculator = SimpleCalculator()
result = calculator.avg([2, 5, 12, 98])
assert result == 29.25
def test_avg_removes_upper_outliers():
calculator = SimpleCalculator()
result = calculator.avg([2, 5, 12, 98], ut=90)
assert result == pytest.approx(6.333333)
def test_avg_removes_lower_outliers():
calculator = SimpleCalculator()
result = calculator.avg([2, 5, 12, 98], lt=10)
assert result == pytest.approx(55)
def test_avg_uppper_threshold_is_included():
calculator = SimpleCalculator()
result = calculator.avg([2, 5, 12, 98], ut=98)
assert result == 29.25
def test_avg_lower_threshold_is_included():
calculator = SimpleCalculator()
result = calculator.avg([2, 5, 12, 98], lt=2)
assert result == 29.25
def test_avg_empty_list():
calculator = SimpleCalculator()
result = calculator.avg([])
assert result == 0
def test_avg_manages_empty_list_after_outlier_removal():
calculator = SimpleCalculator()
result = calculator.avg([12, 98], lt=15, ut=90)
assert result == 0
def test_avg_manages_empty_list_before_outlier_removal():
calculator = SimpleCalculator()
result = calculator.avg([], lt=15, ut=90)
assert result == 0
def test_avg_manages_zero_value_lower_outlier():
calculator = SimpleCalculator()
result = calculator.avg([-1, 0, 1], lt=0)
assert result == 0.5
def test_avg_manages_zero_value_upper_outlier():
calculator = SimpleCalculator()
result = calculator.avg([-1, 0, 1], ut=0)
assert result == -0.5
|
[
"giordani.leonardo@gmail.com"
] |
giordani.leonardo@gmail.com
|
db10f1354db46d48e201db222bb29f36ff4b5d31
|
967c707b0e675968052006346b67147d59078e6b
|
/191.number-of-1-bits.py
|
29eb5d2ada559090b418b43794b80a53f749fd54
|
[] |
no_license
|
hahaliu/LeetCode-Python3
|
40330f8ee2c9613d75bd70eb77b41767893c2fd1
|
1caeab8dc1c2a7c1c2beee3f09ef4b953d276033
|
refs/heads/master
| 2020-04-23T19:09:49.958096
| 2018-10-16T07:49:49
| 2018-10-16T07:49:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# ex2tron's blog:
# http://ex2tron.wang
# class Solution(object):
# def hammingWeight(self, n):
# """
# :type n: int
# :rtype: int
# """
# return (bin(n)[2:]).count('1')
# 别人的代码:
class Solution:
def hammingWeight(self, n):
result = 0
while n:
n &= n - 1
result += 1
return result
print(Solution().hammingWeight(11))
|
[
"ex2tron@foxmail.com"
] |
ex2tron@foxmail.com
|
d4f8c80b80c63ec2b8463a94cbfe57c292b2ddfa
|
3cd75f3d62911ba3d2114f95203e81d91be32877
|
/4day/Book07.py
|
1ef18971c44708423b3b1e89ca7e86ad3f998849
|
[] |
no_license
|
kukukuni/Python_ex
|
3667e2fe1db3a161d9e6acd5d8145a3e692f8e89
|
e56d10bbcf3dc33b4422462a5b3c2dedb082b8c3
|
refs/heads/master
| 2022-11-05T13:58:55.770984
| 2019-04-14T00:57:18
| 2019-04-14T00:57:18
| 181,244,073
| 0
| 1
| null | 2022-10-23T06:38:06
| 2019-04-14T00:50:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 879
|
py
|
# Book07.py
class Book :
def __init__(self,t,a,p):
self.__title__ = t # public 변수앞에__두개면 권한을 주겠다는 의미 뒤에 두개는 public
self.__author_ = a # private --> 메소드 우회접근 밖에서 외부접근 차단
self.__price = p # private --> 메소드 우회접근
self.category = '' # 방치 (public)
def pBook(self):
print(self.__title__+','+self.__author_+','+str(self.__price))
def setTitle(self,t): self.__title__ = t
def setAuthor(self,a): self.__author_ = a #__author_를 접근할 수 있는 메소드를 만듬.
# author를 보호하고 우회
def getAuthor(self): print(self.__author_)
def setPrice(self,p): self.__price =p
b1 = Book("파이썬","홍길동",30000)
b1.pBook()
b1.setAuthor("김연아"); b1.getAuthor()
|
[
"mxneyelo@gmail.com"
] |
mxneyelo@gmail.com
|
9abfe1720861b0f602574f7e87a97ac4aca860f8
|
206e2b8a6df8a2a9375fe188d74ffaa287484029
|
/poc/simple/publish.py
|
bc24442105aee93ed71a8f8ffe6afdbe5ea865ab
|
[] |
no_license
|
chaeplin/watchman
|
2af63eaf78f333b0e69e51063cc0cda8131e0e89
|
68e0893883a57bf8d703d7191e66f45407eccb75
|
refs/heads/master
| 2021-01-22T08:02:50.814892
| 2017-09-07T13:23:50
| 2017-09-07T13:23:50
| 81,871,983
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io, os, sys
import time
import socket
import simplejson as json
import psutil
import paho.mqtt.client as mqtt
def get_load_average():
try:
raw_average = os.getloadavg()
load_average = { '1min': raw_average[0], '5min': raw_average[1], '15min': raw_average[2] }
return load_average
except:
return None
def get_cpu_percent():
try:
raw_percent = psutil.cpu_times_percent(interval=1, percpu=False)
cpu_percent = round(100 - raw_percent.idle, 1)
return cpu_percent
except:
return None
def get_virtual_memory():
try:
raw_vmem = psutil.virtual_memory()
vmem_usage = raw_vmem.percent
return vmem_usage
except:
return None
def get_disk_usage():
try:
raw_disk = psutil.disk_usage('/')
disk_usage = raw_disk.percent
return disk_usage
except:
return None
def get_process_list():
try:
process = []
for p in psutil.process_iter():
info = p.as_dict(attrs=["pid", "cmdline", "username", "memory_percent", "cpu_percent"])
info["cmdline"] = " ".join(info["cmdline"]).strip()
if len(info.get('cmdline', None)) > 0:
process.append(info)
return process
except:
return None
def on_connect(client, userdata, flags, rc):
print ("Connected with result code "+str(rc))
if __name__ == "__main__":
ipaddress = '10.10.10.10'
assert (len(ipaddress)) > 0, 'configure private address'
client = mqtt.Client()
client.on_connect = on_connect
client.connect('127.0.0.1', 1883, 10)
client.loop_start()
try:
while True:
epoch = int(time.time())
if epoch % 5 == 0:
hostname = socket.gethostname()
loadavg = get_load_average()
cpu = get_cpu_percent()
vmem = get_virtual_memory()
disk = get_disk_usage()
plist = get_process_list()
report = {
'hostname': hostname,
'ip': ipaddress,
'timestamp': epoch,
'loadavg': loadavg,
'cpu': cpu,
'vmem': vmem,
'disk': disk,
'plist': plist
}
print(json.dumps(report, sort_keys=True, indent=4, separators=(',', ': ')))
client.publish("host/" + ipaddress, json.dumps(report), 0, True)
time.sleep(1)
else:
time.sleep(0.8)
except Exception as e:
print(e.args[0])
sys.exit()
except KeyboardInterrupt:
sys.exit(1)
|
[
"chaeplin@gmail.com"
] |
chaeplin@gmail.com
|
5bb9f92064b542e3bf928e60a8cbbd04e449d861
|
66d7292253d2815ce80fa2abd09b898117426b81
|
/tests/conftest.py
|
47b87ddc5599cc0828724b9be3f28ddb4959da63
|
[
"MIT"
] |
permissive
|
ubergesundheit/brightsky
|
5801b13aa61e1f8cf2b3b1708858f932f353b3d2
|
e8aec199dd2ade8ed520de8d0602db604cf0647e
|
refs/heads/master
| 2021-05-19T10:13:03.706968
| 2020-03-31T15:01:53
| 2020-03-31T15:01:53
| 251,646,442
| 0
| 0
|
MIT
| 2020-03-31T15:28:55
| 2020-03-31T15:28:55
| null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import os
from pathlib import Path
import pytest
@pytest.fixture
def data_dir():
return Path(os.path.dirname(__file__)) / 'data'
|
[
"jakobdm1@gmail.com"
] |
jakobdm1@gmail.com
|
659ca6564bbc1040e080989d9f0b099b3a9fa9e2
|
30278f51d61cda6cb2e7dc0d5e8ba71f63092285
|
/HELLOPYTHON/day02/myclass_01.py
|
7b57b4fdcd3cd92ee5e11193f409aae3733bb347
|
[] |
no_license
|
shywj05/HelloPython
|
a99097ffc177e40ea7469bff5009bf06fe566a35
|
82ec0cf9fd00545ddb3a9c81d4474132d3c24767
|
refs/heads/master
| 2023-06-27T19:40:22.259916
| 2021-08-03T00:55:29
| 2021-08-03T00:55:29
| 392,139,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
class Animal:
def __init__(self):
self.age = 1
def getOld(self):
self.age+=1
class Human(Animal):
def __init__(self):
super().__init__()
self.power_lang = 1
def learn_lang(self):
self.power_lang += 1
def pt(self,power):
self.power_lang += power
if __name__ == '__main__':
ani = Animal()
print(ani.age)
ani.getOld()
print(ani.age)
hum = Human()
print(hum.age)
hum.getOld()
print(hum.age)
hum.learn_lang()
print(hum.power_lang)
hum.pt(5)
print(hum.power_lang)
|
[
"shywj05@gmail.com"
] |
shywj05@gmail.com
|
71fd0e69a186260a53278340be66f03f80866095
|
7bdb0e12359162c5dd2bddc58d2ca1d234fb29d2
|
/trunk/playground/intern/2009/Pakito/.svn/text-base/pakito.py.svn-base
|
2973af6b4b23495fa94e8e661c2ea85598d13029
|
[] |
no_license
|
hitaf/Pardus-2011-Svn-
|
f40776b0bba87d473aac45001c4b946211cbc7bc
|
16df30ab9c6ce6c4896826814e34cfeadad1be09
|
refs/heads/master
| 2021-01-10T19:48:33.836038
| 2012-08-13T22:57:37
| 2012-08-13T22:57:37
| 5,401,998
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from kdecore import KApplication, KAboutData, KCmdLineArgs, KGlobal, KIcon
from qt import QObject, SIGNAL, SLOT
from pakito.gui.mainwindow import MainWindow
def I18N_NOOP(x):
return x
name = "Pakito"
version = "0.3"
mail = "gokcen.eraslan@gmail.com"
description = I18N_NOOP("A tool for accelerating package making process")
if __name__ == "__main__":
about = KAboutData(name.lower(), name, version, description, KAboutData.License_GPL_V2, "(C) Gökçen Eraslan 2007", None, None, mail)
about.addAuthor("Gökçen Eraslan", None, mail)
KCmdLineArgs.init(sys.argv, about)
app = KApplication()
programLogo = KGlobal.iconLoader().loadIcon("pisikga", KIcon.Desktop)
about.setProgramLogo(programLogo.convertToImage())
QObject.connect(app, SIGNAL("lastWindowClosed()"), app, SLOT("quit()"))
pac = MainWindow(None, name)
app.setMainWidget(pac)
pac.show()
app.exec_loop()
|
[
"fatih@dhcppc1.(none)"
] |
fatih@dhcppc1.(none)
|
|
68e70fbbe907d4bf87455154db59c78968f64354
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_07_01/_configuration.py
|
16e7beaf5b875bc8a0fafc102999e9b2661a6327
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class IotHubClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for IotHubClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription identifier. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(IotHubClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2021-07-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-iothub/{}".format(VERSION))
self._configure(**kwargs)
def _configure(
self, **kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
0a15e5071a4cf5caaaaf7b34122dc43319feed79
|
105ee300f5d50be628b81f30550c070fdec04495
|
/neuripsconf/done/ma_crosstr_v504b.py
|
e05d06387669366b4437825d7e66fde6f88f7351
|
[] |
no_license
|
Myyyr/segmentation3D
|
9b12c08b6eee245cc93b8ba2d1ac932a349eb618
|
0bd33d7a4c24816f3ecb4089a7d96ceaf64f298b
|
refs/heads/main
| 2023-06-21T06:45:12.609911
| 2021-07-13T07:49:43
| 2021-07-13T07:49:43
| 309,314,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,341
|
py
|
# More Parameters (depth) to match with classical UNet number of parameters.
# n_parameters = 114557582
import os
from models.utils import get_scheduler
import torch.optim as optim
import alltrain.atlasUtils as atlasUtils
from PatchedMultiatlasDataset_v3 import *
from torch.utils.data import DataLoader
import torch
import torchio as tio
from models.mymod.cross_patch import CrossPatch3DTr
from utils.metrics import DC_and_CE_loss, MultipleOutputLoss2
from nnunet.utilities.nd_softmax import softmax_helper
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# TRAINING NO CROSS
# BIGGER MODEL
class ExpConfig():
def __init__(self):
# ID and Name
self.id = '504b'
self.experiment_name = "ma_crosstr_v{}".format(self.id)
self.debug = False
# System
self.checkpointsBasePath = "./checkpoints/"
self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
self.labelpath = '/local/DEEPLEARNING/MULTI_ATLAS/MULTI_ATLAS/nnUNet_preprocessed/Task017_BCV/nnUNetData_plans_v2.1_stage1/'
self.datapath = self.labelpath
self.input_shape = [512,512,256]
# self.filters = [16, 32, 64, 128]
self.filters = [64, 192, 448, 704]
d_model=self.filters[-1]
# skip_idx = [1,3,5,6]
# self.patch_size=(128,128,128)
self.patch_size=(192,192,48)
# n_layers=6
self.clip = False
self.patched = True
# GPU
self.gpu = '1'
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu
# torch.backends.cudnn.benchmark = False
# Model
number_of_cross_heads = 1
number_of_self_heads = 8
number_of_self_layer = 1
self.n_classes = 14
self.net = CrossPatch3DTr(filters=self.filters,patch_size=[1,1,1],d_model=d_model,
n_classes=self.n_classes,n_cheads=number_of_cross_heads,
n_sheads=number_of_self_heads,bn=True,up_mode='deconv',
n_strans=number_of_self_layer, do_cross=True)
self.net.inference_apply_nonlin = softmax_helper
self.n_parameters = count_parameters(self.net)
print("N PARAMS : {}".format(self.n_parameters))
# self.model_path = './checkpoints/models/crosstr_big.pth'
# self.model_path = './checkpoints/models/300/mod.pth'
self.model_path = 'checkpoints/models/504/modlast.pt'
max_displacement = 5,5,5
deg = (0,5,10)
scales = 0
self.transform = tio.Compose([
tio.RandomElasticDeformation(max_displacement=max_displacement),
tio.RandomAffine(scales=scales, degrees=deg)
])
# Training
self.start_epoch = 1000
self.epoch = 2000
# self.loss = torch.nn.CrossEntropyLoss()
self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
self.ds_scales = ((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25))
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = 4
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.batchsize = 2
self.lr_rate = 1e-3
# self.final_lr_rate = 1e-5
# self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate)
self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-6, momentum=0.99)
self.optimizer.zero_grad()
self.validate_every_k_epochs = 10
# self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch
self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch)
self.load_model(False)
# Other
self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
def set_data(self, split = 0):
# Data
# print(self.ds_scales)s
self.trainDataset = PatchedMultiAtlasDataset(self, mode="train", n_iter=250, patch_size=self.patch_size, return_full_image=True, ds_scales=self.ds_scales, do_tr=False, return_pos=True)
self.testDataset = PatchedMultiAtlasDataset(self, mode="test", n_iter=1, patch_size=self.patch_size, return_full_image=True, ds_scales=None, do_tr=False, return_pos=True)
self.trainDataLoader = DataLoader(dataset=self.trainDataset, num_workers=1, batch_size=self.batchsize, shuffle=True)
self.testDataLoader = DataLoader(dataset=self.testDataset, num_workers=1, batch_size=1, shuffle=False)
def load_model(self, load_lr=True):
print('LOAD MODEL ...')
if not os.path.exists(self.model_path):
torch.save(self.net.state_dict(), self.model_path)
elif self.start_epoch == 0:
self.net.load_state_dict(torch.load(self.model_path))
else:
a = torch.load(self.model_path)
self.net.load_state_dict(a['net_state_dict'])
# self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=0)
self.optimizer.load_state_dict(a['optimizer_state_dict'])
if load_lr:
self.lr_scheduler.load_state_dict(a['scheduler'])
def net_stats(self):
s = 0
for p in self.net.parameters():
if p.requires_grad:
s += p.sum()
print('Mean :', s.item()/self.n_parameters)
|
[
"loic.themyr@gmail.com"
] |
loic.themyr@gmail.com
|
a7db5c206101d67106804b107e212f2550467f76
|
6f1034b17b49f373a41ecf3a5a8923fb4948992b
|
/docs/user_guide/operation/scripts/examples/argus/extraction/jan/test scripts/PrepareForBlankNP10.py
|
53218ab06727a51bf390cd988a4e472692541d64
|
[
"Apache-2.0"
] |
permissive
|
NMGRL/pychron
|
a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f
|
8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6
|
refs/heads/main
| 2023-08-30T07:00:34.121528
| 2023-06-12T17:43:25
| 2023-06-12T17:43:25
| 14,438,041
| 38
| 28
|
Apache-2.0
| 2023-08-09T22:47:17
| 2013-11-15T23:46:10
|
Python
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
def main():
info('Prepare for NP10 Blank analysis')
close(description='Jan Inlet')
open(description='Jan Ion Pump')
close(description='Microbone to Minibone')
open(description='Microbone to Turbo')
open(description='Microbone to Inlet Pipette')
close(description='Microbone to Getter NP-10C')
open(description='Microbone to Getter NP-10H')
close(description='CO2 Laser to Felix')
close(description='CO2 Laser to Jan')
close(description='Microbone to CO2 Laser')
sleep(20)
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
984063d568aa192a28ddbfe589875bef2f13dddd
|
a939e018333a9ecd26ddc618f99835b7eb381686
|
/le_crawler/core/.svn/text-base/urlmd5_inserter.py.svn-base
|
a208af395d09a8d72da4a6707dcead850008a074
|
[] |
no_license
|
cash2one/crawl_youtube
|
bff5ba254001c2f31f770e55a4aca39bc54e45ee
|
0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc
|
refs/heads/master
| 2021-01-16T22:30:17.800282
| 2016-02-18T11:50:09
| 2016-02-18T11:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
import memcache
import hashlib
import urllib
class UrlMd5Inserter:
def __init__(self, logger = None):
self._client = memcache.Client(['127.0.0.1:11211'])
self._logger = logger
self._miss_count = 0
self._has_send_message = False
def send_message(self):
for tel in ['13426031534', '18515029185', '15330025605']:
api = 'http://10.182.63.85:8799/warn_messages'
params = {}
params['m'] = 'insert md5 failed in lejian crawler.'
params['p'] = tel
params = urllib.urlencode(params)
urllib.urlopen("%s?%s" % (api, params))
def insert_urlmd5(self, url):
if not url:
return False
if not isinstance(url, basestring):
return False
md5_str = hashlib.md5(url).hexdigest()
if not self._client.get(md5_str):
if self._client.set(md5_str, url):
self._miss_count = 0
if self._logger:
self._logger.debug('insert %s %s' % (md5_str, url))
return True
else:
self._miss_count += 1
if not self._has_send_message and self._miss_count > 5:
self.send_message()
self._has_send_message = True
if self._miss_count < 5 or self._miss_count & 1023 == 0:
self._client = memcache.Client(['127.0.0.1:11211'])
if self._client.set(md5_str, url):
return True
if self._logger:
self._logger.error('insert url_md5 failed! %s' % url)
return False
else:
# if self._logger:
# self._logger.info('md5 %s already has, url = %s' % (md5_str, url))
return True
if __name__ == '__main__':
c = UrlMd5Inserter()
import sys
if len(sys.argv) == 1:
print 'need url param!'
else:
url = sys.argv[1]
c.insert_urlmd5(url)
|
[
"zjc0516@126.com"
] |
zjc0516@126.com
|
|
c4e30ab3b20858daa94874c13887eb46b306a2dd
|
c1bfadbc033efba287ad55a804e9d69d297c3bf2
|
/valohai_cli/yaml_wizard.py
|
044a3a8e52e639b7fe3f82e1cee9cc4721456238
|
[
"MIT"
] |
permissive
|
valohai/valohai-cli
|
16560b078d20a02c8cdc7388beeea9bebac4be7d
|
c57cc164e749fb77b622d629a5ad05b2685534bb
|
refs/heads/master
| 2023-08-31T14:04:26.979762
| 2023-08-22T12:54:51
| 2023-08-22T12:54:51
| 81,329,264
| 14
| 5
|
MIT
| 2023-09-11T13:35:04
| 2017-02-08T12:46:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,261
|
py
|
import codecs
import os
from typing import List
import click
import requests
import yaml
from valohai_cli.messages import error, success, warn
from valohai_cli.utils import find_scripts
from valohai_cli.utils.cli_utils import prompt_from_list
YAML_SKELLINGTON = """---
- step:
name: Execute {command}
image: {image}
command: {command}
#inputs:
# - name: example-input
# default: https://example.com/
#parameters:
# - name: example
# description: Example parameter
# type: integer
# default: 300
"""
def get_image_suggestions() -> List[dict]:
try:
resp = requests.get('https://raw.githubusercontent.com/valohai/images/master/images.v2.yaml')
resp.raise_for_status()
images = [
{
'name': image,
'description': info['description'],
}
for image, info
in yaml.safe_load(resp.content).items()
if info.get("isRecommended")
]
images.sort(key=lambda i: str(i.get('name')).lower())
return images
except Exception as exc:
warn(f'Could not load online image suggestions: {exc}')
return []
def yaml_wizard(directory: str) -> None:
while True:
command = choose_command(directory)
image = choose_image()
yaml = YAML_SKELLINGTON.format(
image=image,
command=command,
)
click.secho('Here\'s a preview of the Valohai.yaml file I\'m going to create.', fg='cyan')
print(yaml)
yaml_path = os.path.join(directory, 'valohai.yaml')
if not click.confirm(f'Write this to {click.style(yaml_path, bold=True)}?'): # pragma: no cover
click.echo('Okay, let\'s try again...')
continue
with codecs.open(yaml_path, 'w', 'UTF-8') as out_fp:
out_fp.write(yaml)
success(f'All done! Wrote {yaml_path}.')
break
def choose_image() -> str:
image_suggestions = get_image_suggestions()
click.echo(
'Now let\'s pick a Docker image to use with your code.\n' +
(
'Here are some recommended choices, but feel free to type in one of '
'your own from the ones available at https://hub.docker.com/'
if image_suggestions
else ''
)
)
while True:
image = prompt_from_list(
image_suggestions,
(
'Choose a number or enter a Docker image name.'
if image_suggestions else
'Enter a Docker image name.'
),
nonlist_validator=lambda s: s.strip()
)
image_name = str(image["name"]) if isinstance(image, dict) else str(image)
if click.confirm(f'Is {click.style(image_name, bold=True)} correct?'):
break
success(f'Great! Using {image_name}.')
return image_name
def choose_command(directory: str) -> str:
scripts = sorted(find_scripts(directory))
while True:
if scripts:
click.echo(
'We found these script files in this directory.\n'
'If any of them is the script file you\'d like to use for Valohai, type its number.\n'
'Otherwise, you can just type the command to run.'
)
command = prompt_from_list(
[
{'name': f'{interpreter} {script}'}
for (interpreter, script)
in scripts
],
'Choose a number or enter a command.',
nonlist_validator=lambda s: s.strip()
)
if isinstance(command, dict):
command = command['name']
else: # pragma: no cover
command = click.prompt(
'We couldn\'t find script files in this directory.\n'
'Please enter the command you\'d like to run in the Valohai platform.\n'
)
if not command: # pragma: no cover
error('Please try again.')
continue
if click.confirm(f'Is {click.style(command, bold=True)} correct?'):
break
success(f'Got it! Using {command} as the command.')
return str(command)
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
3b98be268d96202ec11b07ae5bf7405d018285ef
|
4b322cc4a7d5ce099e99fd597e4c51caf1704d16
|
/packs/python_packs.py
|
82322c932fc0b2c9fa9cae3a507ae5cdf303635c
|
[] |
no_license
|
mjgpy3/umm_script
|
7037b8aa8d3052b0534cbf33760cb681f9549c29
|
2f7a6e6ac34c2d0ec58195dc1d2d0912cc168d28
|
refs/heads/master
| 2021-01-10T20:11:06.473971
| 2013-03-06T15:27:30
| 2013-03-06T15:27:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
#!/usr/bin/env python
# Created by Michael Gilliland
# Date: Fri Jan 25 16:47:44 EST 2013
#
#
from package_container import PackageContainer
packages = {'Sympy': 'python-sympy',
'Numpy': 'python-numpy',
'Scipy': 'python-scipy',
'Matplotlib': 'python-matplotlib',
'Spyder': 'spyder'}
container = PackageContainer("Python", 'python', packages)
|
[
"mjg.py3@gmail.com"
] |
mjg.py3@gmail.com
|
8962b5672f25b13d653dde89d99035c4d8090e95
|
2d5d13c4bdc64202a520f32e7d4a44bb75e2004f
|
/week-02/d02/sum.py
|
a6efc084acd3c1c5d52d0715fbd71556a40e3c3e
|
[] |
no_license
|
green-fox-academy/andrasnyarai
|
43b32d5cc4ad3792ef8d621328f9593fc9623e0b
|
19759a146ba2f63f1c3e4e51160e6111ca0ee9c3
|
refs/heads/master
| 2021-09-07T16:19:34.636119
| 2018-02-26T00:38:00
| 2018-02-26T00:38:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# - Write a function called `sum` that sum all the numbers
# until the given parameter
def sum(x):
s = 0
for i in range(1,x+1):
s = s + i
return s
x = int(input())
print(sum(x))
|
[
"andrasnyarai@gmail.com"
] |
andrasnyarai@gmail.com
|
7684a374aa577e0696430e5dec1be15046f15e42
|
c3ad35192313827ae1064e9614182c182690d675
|
/3 - Two Pointers Algorithm/228. Middle of Linked List.py
|
f4319e7153443b8a090acb1b9ee6e7585b714d05
|
[] |
no_license
|
LingHsiLiu/Algorithm1
|
2614cb6e7d6d83e53081397153e0c7938a8c3196
|
817332c374cab5d2fa8d6abd1b27dbcad85656d5
|
refs/heads/master
| 2020-03-27T00:46:55.948804
| 2019-11-15T04:15:12
| 2019-11-15T04:15:12
| 145,656,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
# Find the middle node of a linked list.
# Example
# Given 1->2->3, return the node with value 2.
# Given 1->2, return the node with value 1.
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: the head of linked list.
@return: a middle node of the linked list
"""
def middleNode(self, head):
# write your code here
if head is None:
return None
slow = head
fast = head.next
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
return slow
|
[
"noreply@github.com"
] |
LingHsiLiu.noreply@github.com
|
9c24e3ed3ce8a5a9fbed8e742abf78e1de21d5ce
|
e21f7d14e564d7fb921277a329ff078e86ad86a2
|
/2016/day_02/day_02.py
|
6e7b32d360e727f1672245c6798f5f753b46ca59
|
[] |
no_license
|
MrGallo/advent-of-code-solutions
|
31456a0718303cca6790cf1227831bcb14649e27
|
28e0331e663443ffa2638188437cc7e46d09f465
|
refs/heads/master
| 2022-07-07T08:49:30.460166
| 2020-12-17T17:22:24
| 2020-12-17T17:22:24
| 160,988,019
| 0
| 1
| null | 2022-06-21T22:26:19
| 2018-12-08T23:34:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
from typing import List
DIRECTIONS = {
"U": (0, -1),
"D": (0, 1),
"L": (-1, 0),
"R": (1, 0)
}
KEYPAD_WIDTH = 3
KEYPAD_HEIGHT = 3
KEYPAD = [
[i for i in range(1+KEYPAD_HEIGHT*j, KEYPAD_HEIGHT*j+KEYPAD_WIDTH+1)]
for j in range(KEYPAD_HEIGHT)
]
ACTUAL_KEYPAD = [
[None, None, "1", None, None],
[None, "2", "3", "4", None],
["5", "6", "7", "8", "9"],
[None, "A", "B", "C", None],
[None, None, "D", None, None]
]
def main() -> None:
with open("input.txt", "r") as f:
instructions = [list(line) for line in f.read().split("\n")]
print(part1(instructions)) # answer: 44558
print(part2(instructions)) # answer: 6BBAD
def part1(instructions: List[List[str]]) -> str:
passcode = ""
cursor_x, cursor_y = 1, 1
for digit_instruction in instructions:
for direction in digit_instruction:
dx, dy = DIRECTIONS[direction]
cursor_x = clamp(cursor_x+dx, 0, KEYPAD_WIDTH-1)
cursor_y = clamp(cursor_y+dy, 0, KEYPAD_HEIGHT-1)
passcode += str(KEYPAD[cursor_y][cursor_x])
return passcode
def part2(instructions: List[List[str]]) -> str:
passcode = ""
cursor_x, cursor_y = 0, 2
for digit_instruction in instructions:
for direction in digit_instruction:
dx, dy = DIRECTIONS[direction]
if cursor_y+dy < 0 or cursor_x+dx < 0: # fix list[-1] wrap-around
continue
try:
if ACTUAL_KEYPAD[cursor_y+dy][cursor_x+dx] is not None:
cursor_x += dx
cursor_y += dy
except IndexError:
continue
passcode += ACTUAL_KEYPAD[cursor_y][cursor_x]
return passcode
def clamp(n: int, a: int, b: int):
"""Clamp an integer (n) within the range of a to b inclusive"""
return min(max(n, a), b)
if __name__ == "__main__":
main()
|
[
"daniel.gallo@ycdsbk12.ca"
] |
daniel.gallo@ycdsbk12.ca
|
e4ce2491502b31086ded61177a25b8a7017a2484
|
bb79411e60fb06844f4d7cc3069e44caaac4d919
|
/asq/tests/test_data_record.py
|
85579f6af3b4bbbf38dacb6c7a0ebd60ccb1c75d
|
[
"MIT"
] |
permissive
|
geraintpalmer/ASQ
|
789bf37f7b51fd493fcb3ed10fabc3ad0ac2a904
|
4ff207317b201c96548bfa8263b6f04fcd4a546c
|
refs/heads/master
| 2021-01-10T11:20:06.865591
| 2015-12-14T16:00:15
| 2015-12-14T16:00:15
| 46,278,239
| 2
| 4
| null | 2015-12-14T16:00:15
| 2015-11-16T14:05:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import unittest
import asq
class TestDataRecord(unittest.TestCase):
def test_init_method(self):
r = asq.DataRecord(2, 3, 2, 8, 1, 2)
self.assertEqual(r.arrival_date, 2)
self.assertEqual(r.wait, 0)
self.assertEqual(r.service_start_date, 2)
self.assertEqual(r.service_time, 3)
self.assertEqual(r.service_end_date, 5)
self.assertEqual(r.blocked, 3)
self.assertEqual(r.exit_date, 8)
self.assertEqual(r.node, 1)
self.assertEqual(r.customer_class, 2)
r = asq.DataRecord(5.7, 2.1, 8.2, 10.3, 1, 3)
self.assertEqual(r.arrival_date, 5.7)
self.assertEqual(round(r.wait, 1), 2.5)
self.assertEqual(r.service_start_date, 8.2)
self.assertEqual(r.service_time, 2.1)
self.assertEqual(round(r.service_end_date, 1), 10.3)
self.assertEqual(round(r.blocked, 1), 0.0)
self.assertEqual(r.exit_date, 10.3)
self.assertEqual(r.node, 1)
self.assertEqual(r.customer_class, 3)
|
[
"palmer.geraint@googlemail.com"
] |
palmer.geraint@googlemail.com
|
b885fe440bae9dafc2e7c60d43c6c4a58b12034d
|
947720511ee9a0cba09347042c3eadbb10f3e01e
|
/Results_plotting/semantic.py
|
c698e1bae4eea14a485974b0436e8568274b37ca
|
[] |
no_license
|
OMARI1988/language_and_vision
|
8737ee83043517f58e0cb36943b389c0bf617a04
|
527098c1d47ec959a24669d44effcd307b8309c2
|
refs/heads/master
| 2021-01-17T01:14:04.994195
| 2020-10-04T21:26:58
| 2020-10-04T21:26:58
| 31,417,989
| 0
| 1
| null | 2017-09-07T14:07:58
| 2015-02-27T12:18:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,482
|
py
|
# This is a ported version of a MATLAB example from the signal processing
# toolbox that showed some difference at one time between Matplotlib's and
# MATLAB's scaling of the PSD. This differs from psd_demo3.py in that
# this uses a complex signal, so we can see that complex PSD's work properly
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
"""
Simple demo of a horizontal bar chart.
"""
import matplotlib.pyplot as plt
plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.subplots_adjust(hspace=0.45, wspace=1.3)
ax = plt.subplot(3, 1, 1)
opacity = 0.8
bar_width=1
# index = np.arange(3)
# plt.xticks(index, ('A', 'B', 'C'))
# plt.title('real-world dataset')
# ax.bar([0], [0.2], color="blue", width=bar_width, label="A- unsupervised", alpha=opacity, align="center")
# ax.bar([1], [0.75], color="red", width=bar_width, label="B- our approach", alpha=opacity, align="center")
# ax.bar([2], [0.99], color="green", width=bar_width, label="C- supervised", alpha=opacity, align="center")
# ax.legend(loc=2)
#
# ax = plt.subplot(1, 2, 2)
# plt.xticks(index, ('A', 'B', 'C'))
# plt.title('synthetic-world dataset')
# ax.bar([0], [0.2], color="blue", width=bar_width, label="A- unsupervised", alpha=opacity, align="center")
# ax.bar([1], [0.88], color="red", width=bar_width, label="B- our approach", alpha=opacity, align="center")
# ax.bar([2], [0.99], color="green", width=bar_width, label="C- supervised", alpha=opacity, align="center")
# ax.legend(loc=2)
# Example data
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [32.9], align='center', height=1, alpha=0.9,color='orange')
plt.barh([1], [85.6], align='center', height=1, alpha=0.7, color="green")
plt.barh([2], [98.1], align='center', height=1, alpha=0.9, color=(.4,.3,1))
ax.text(32.9-4,0-.2,'32.9',size=16)
ax.text(85.6-4,1-.2,'85.6',size=16)
ax.text(98.1-4,2-.2,'98.1',size=16)
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Dukes (2013) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
ax = plt.subplot(3, 1, 2)
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [14], align='center', height=1, alpha=0.4)
plt.barh([1], [75], align='center', height=1, alpha=0.4, color="red")
plt.barh([2], [99], align='center', height=1, alpha=0.4, color="green")
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Jivko (2016) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
ax = plt.subplot(3, 1, 3)
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [39.8], align='center', height=1, alpha=0.9,color='orange')
plt.barh([1], [91.3], align='center', height=1, alpha=0.7, color="green")
plt.barh([2], [98.9], align='center', height=1, alpha=0.9, color=(.4,.3,1))
ax.text(31.2-4,0-.2,'39.8',size=16)
ax.text(81.5-4,1-.2,'91.3',size=16)
ax.text(97.4-4,2-.2,'98.9',size=16)
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Sinapov (2016) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
ax = plt.subplot(3, 1, 3)
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [31.2], align='center', height=1, alpha=0.9,color='orange')
plt.barh([1], [81.5], align='center', height=1, alpha=0.7, color="green")
plt.barh([2], [97.4], align='center', height=1, alpha=0.9, color=(.4,.3,1))
ax.text(31.2-4,0-.2,'31.2',size=16)
ax.text(81.5-4,1-.2,'81.5',size=16)
ax.text(97.4-4,2-.2,'97.4',size=16)
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Alomari (2016) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
plt.show()
|
[
"omari.1988@gmail.com"
] |
omari.1988@gmail.com
|
6f99374339cac941da9a9510f04d46779fdb8a46
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/ACL_PyTorch/contrib/cv/tracking/SiamFC/utils.py
|
e5d77ea29fc4685c0728936c9cf7e966f078b806
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,910
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cv2
import torch
def ToTensor(sample):
sample = sample.transpose(2, 0, 1)
C, H, W = sample.shape
sample = sample.reshape(1, C, H, W)
return torch.from_numpy(sample.astype(np.float32))
def get_center(x):
return (x - 1.) / 2.
# top-left bottom-right --> cx,cy,w,h
def xyxy2cxcywh(bbox):
return get_center(bbox[0] + bbox[2]), \
get_center(bbox[1] + bbox[3]), \
(bbox[2] - bbox[0]), \
(bbox[3] - bbox[1])
# model_sz=127, a picture is resized from original_sz to model_sz
def crop_and_pad(img, cx, cy, model_sz, original_sz, img_mean=None):
xmin = cx - original_sz // 2
xmax = cx + original_sz // 2
ymin = cy - original_sz // 2
ymax = cy + original_sz // 2
im_h, im_w, _ = img.shape
left = right = top = bottom = 0
if xmin < 0:
left = int(abs(xmin))
if xmax > im_w:
right = int(xmax - im_w)
if ymin < 0:
top = int(abs(ymin))
if ymax > im_h:
bottom = int(ymax - im_h)
xmin = int(max(0, xmin))
xmax = int(min(im_w, xmax))
ymin = int(max(0, ymin))
ymax = int(min(im_h, ymax))
im_patch = img[ymin:ymax, xmin:xmax]
if left != 0 or right != 0 or top != 0 or bottom != 0:
if img_mean is None:
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
im_patch = cv2.copyMakeBorder(im_patch, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=img_mean)
if model_sz != original_sz:
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
return im_patch
# size_z=127
def get_exemplar_image(img, bbox, size_z, context_amount, img_mean=None):
cx, cy, w, h = xyxy2cxcywh(bbox)
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z # 0.75
exemplar_img = crop_and_pad(img, cx, cy, size_z, s_z, img_mean) # 127*127
return exemplar_img, scale_z, s_z
def get_pyramid_instance_image(img, center, size_x, size_x_scales, img_mean=None):
if img_mean is None:
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
pyramid = [crop_and_pad(img, center[0], center[1], size_x, size_x_scale, img_mean)
for size_x_scale in size_x_scales]
return pyramid
def center_error(rects1, rects2):
r"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
|
[
"liuzhuheng@huawei.com"
] |
liuzhuheng@huawei.com
|
d6d9862ce74535a49554e76cbdf4a1b1b3d1f9c1
|
ebd6d5c981b2a897e398ccb6be188cfbad734d8a
|
/experiments/testAllSimilarNets.py
|
14c815b8919ba4912a90256a4c9a18e52076b6e3
|
[] |
no_license
|
thodorisGeorgiou/transfer_learning_experiments
|
02a21b762e5e863dbcc595423955747f4dad245a
|
9180c4f2750d56863ea56a1d2d9db9efaf955f11
|
refs/heads/main
| 2023-03-19T04:15:49.287784
| 2021-03-04T11:37:27
| 2021-03-04T11:37:27
| 344,453,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,741
|
py
|
import os
import sys
import multiprocessing
# modelTypes = ["op", "cc"]
# modelTypes = ["vc", "ds", "op", "cc"]
modelTypes = ["vc"]
numRuns = 4
# basePath = sys.argv[1]
# mType = sys.argv[2]
mType = "vc"
# if basePath[-1] != "/":
# exit("Path must end with a slash")
# gpu = sys.argv[1]
# releaseDirs = ["vc/1/","vc/2/","vc/3/","vc/4/"]
# numTrainExamples = ["500/forces/vc/", "1000/forces/vc/", "2000/forces/vc/", "4000/forces/vc/", "8000/forces/vc/"]
# numTrainExamples = ["500/forceFlow_forces/vc/", "1000/forceFlow_forces/vc/", "2000/forceFlow_forces/vc/", "4000/forceFlow_forces/vc/", "8000/forceFlow_forces/vc/"]
# numTrainExamples += ["500/force_forces/vc/", "1000/force_forces/vc/", "2000/force_forces/vc/", "4000/force_forces/vc/", "8000/force_forces/vc/"]
# numTrainExamples = ["500/flow_forces/vc/", "1000/flow_forces/vc/", "2000/flow_forces/vc/", "4000/flow_forces/vc/", "8000/flow_forces/vc/"]
# numTrainExamples += ["500/forceRecon_forces/vc/", "1000/forceRecon_forces/vc/", "2000/forceRecon_forces/vc/", "4000/forceRecon_forces/vc/", "8000/forceRecon_forces/vc/"]
# numTrainExamples += ["500/flowRecon_forces/vc/", "1000/flowRecon_forces/vc/", "2000/flowRecon_forces/vc/", "4000/flowRecon_forces/vc/", "8000/flowRecon_forces/vc/"]
# numTrainExamples += ["500/all_forces/vc/", "1000/all_forces/vc/", "2000/all_forces/vc/", "4000/all_forces/vc/", "8000/all_forces/vc/"]
# numTrainExamples = ["500_2/forces/vc/", "1000_2/forces/vc/"]
# numTrainExamples = ["500_2/force_forces/vc/", "1000_2/force_forces/vc/", "500_2/all_forces/vc/", "1000_2/all_forces/vc/", "500_2/forceFlow_forces/vc/", \
# "1000_2/forceFlow_forces/vc/", "500_2/forceRecon_forces/vc/", "1000_2/forceRecon_forces/vc/"]
# numTrainExamples = ["500_2/force_forces/vc/", "1000_2/force_forces/vc/", "500_2/all_forces/vc/", "1000_2/all_forces/vc/", "500_2/forceFlow_forces/vc/", \
# "1000_2/forceFlow_forces/vc/", "500_2/forceRecon_forces/vc/", "1000_2/forceRecon_forces/vc/", "500_2/flow_forces/vc/", "1000_2/flow_forces/vc/", \
# "500_2/flowRecon_forces/vc/", "1000_2/flowRecon_forces/vc/"]
# numTrainExamples = ["500/", "1000/", "2000/", "4000/", "8000/"]
numTrainExamples = ["500_2/", "1000_2/"]
# paths = ["trainedFromScratchTrainSetSize/", "trainedFromCheckpointFullModelTrainSetSize/", "trainedFromCheckpointOnlyConvLayersTrainSetSize/"]
paths = ["trainedFromCheckpointOnlyConvLayersTrainSetSize/"]
subPaths = ["force_flow/vc/", "forceRecon_flow/vc/"]
# subPaths = ["flow/vc/", "flow_flow/vc/", "force_flow/vc/", "flowRecon_flow/vc/", "forceRecon_flow/vc/", "forceFlow_flow/vc/", "all_flow/vc/"]
runs = ["1", "2", "3", "4"]
def runTest(relDir):
# gpu = int(multiprocessing.current_process().name[-1]) - 1
# run = str(gpu+1)
# relDir = basePath+run+"Release/"
if not os.path.isdir(relDir):
print(relDir+"\nNot there :/")
return
# if gpu > 3:
# exit("ID not dependable :(")
os.system('python3 testNetworksOnFlow.py '+relDir+" "+mType)
# os.system('CUDA_VISIBLE_DEVICES='+str(gpu)+' python3 testNetworksOnFlow.py '+relDir+" "+mType)
allDirs = [basePath+ntExamples+subPath+run+"Release/" for basePath in paths for ntExamples in numTrainExamples for subPath in subPaths for run in runs if os.path.isdir(basePath+ntExamples+subPath+run+"Release/")]
# allDirs = [basePath+run+"Release/" for run in runs]
p = multiprocessing.Pool(1)
res = p.map(runTest, allDirs)
p.close()
p.join()
# for mType in modelTypes:
# for run in range(numRuns):
# # relDir = basePath+mType+"/"+str(run+1)+"/"
# relDir = basePath+str(run+1)+"Release/"
# if not os.path.isdir(relDir):
# print(relDir)
# continue
# os.system('CUDA_VISIBLE_DEVICES='+gpu+' python3 testNetworks.py '+relDir+" "+mType)
# # os.system('python3 testNetworks.py '+relDir+" "+mType)
|
[
"thodorisgeorgiou65@gmail.com"
] |
thodorisgeorgiou65@gmail.com
|
9286d75d6f5700bd05546d605a80cf14630ff80f
|
b7126fb70f72fea0e7bba6fe2fef6925302ef07b
|
/tceh6/venv/bin/easy_install-3.6
|
86c4357459840c772a72c2796a316a5c70675a33
|
[] |
no_license
|
duk1edev/tceh
|
79cd909c5a6221a2ca77d342b917462345140faa
|
21649d42488883beb58d709f4a9d1a05c75d2900
|
refs/heads/master
| 2021-07-12T10:20:22.330005
| 2020-04-29T09:24:08
| 2020-04-29T09:24:08
| 239,434,484
| 0
| 0
| null | 2021-03-20T03:38:26
| 2020-02-10T05:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 434
|
6
|
#!/home/duk1e/code/tceh6/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"duk1e.ptc.ua@yandex.ru"
] |
duk1e.ptc.ua@yandex.ru
|
53acb3b43aae15e5fd6faadb8b7a2ca68b87c566
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/294/78128/submittedfiles/testes.py
|
6763a618605abdfcedcb0473d77f3999150f2231
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a= int(input('Digite o número: '))
if (a%2)==0:
print('PAR')
else:
print('IMPAR')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
fc8146540a4e5180187271bc7e25e4a7259440eb
|
5b78602dd41bb226bc97e0acc60d2ec7dff10da4
|
/Temur_online_magazini/django_project/store/serializer.py
|
12892efabc4dc35b77f33ed5459f7c556c514a04
|
[] |
no_license
|
developeryuldashev/Rest_Api_boshlang-ich
|
fac742823442d052930526b60dc613853e9a9773
|
a2f1b5cc104d53504a694d26ba8f492f0743e67e
|
refs/heads/main
| 2023-08-18T23:52:40.441378
| 2021-09-13T03:32:29
| 2021-09-13T03:32:29
| 393,358,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
from rest_framework import serializers
from .models import *
class OrderDetActions(serializers.Serializer):
action=serializers.CharField(max_length=5)
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model=Customer
fields='__all__'
class CategoriesSerializer(serializers.ModelSerializer):
class Meta:
model=Categories
fields='__all__'
class ProductsSerializer(serializers.ModelSerializer):
class Meta:
model=Products
fields='__all__'
class OrdersSerializer(serializers.ModelSerializer):
class Meta:
model=Orders
fields='__all__'
class OrderDetailsSerializer(serializers.ModelSerializer):
class Meta:
model=Order_details
fields='__all__'
|
[
"81365808+developeryuldashev@users.noreply.github.com"
] |
81365808+developeryuldashev@users.noreply.github.com
|
3a6bb59f575212600ec46dd373b02dbb5de0329e
|
83f443f454716d534eff57ef399f86aa9a267b20
|
/b1_cnn/basic/1_tf_low_api.py
|
51f9795d81d41463963b3c6e3fd595539420d139
|
[] |
no_license
|
Gilbert-Gb-Li/Artificial-Intelligence
|
fef400c9e767ba7e348e1f135164da1e13c04592
|
28247627eab50613c1a5bf67f70e979a0a9eecb2
|
refs/heads/master
| 2021-04-23T19:46:19.556837
| 2020-04-09T08:47:42
| 2020-04-09T08:47:42
| 249,986,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import tensorflow as tf
'''
tf 底层api
----------
input, 输入的形式[B, H, W, C]
filter, 卷积核输入形式[B, H, C_in, C_out]
strides, 步长[B, H, W, C]
'''
c_out = 128
'''# 通过输入的数据获取shape'''
b, h, w, c = input.get_shape()
'''
定义filter, 名为kernel;之后操作可以使用该名称提取变量
'''
filter = tf.get_variable('kernel', [3, 3, c, c_out])
tf.nn.conv2d(input,
filter=filter,
strides=[1, 2, 2, 1],
padding='SAME',
use_cudnn_on_gpu=False, # 是否是gpu加速
data_format='NHWC', # NHWC == BHWC
dilations=[1, 2, 2, 1], # 空洞卷积,在卷积过程中补零不增加可训练参数的同时增加感受野
name=None) # 名字,用于tensorboard图形显示
|
[
"gb.l@foxmail.com"
] |
gb.l@foxmail.com
|
093a07fb661b25f93b35aa5258dea95855fc25a2
|
3e0468eb7101281ff8133b2acd08b6f83f8953f9
|
/chap10/map的使用.py
|
48dae53d968733878f5da77b18466b818b9dfc27
|
[] |
no_license
|
etoy0328/python_base_knowledge
|
4e514f93b844a1e5d2a654267cf5ea295ae634e2
|
7db140e838939da1ddf9967f82fc78d109aa6362
|
refs/heads/master
| 2023-03-22T10:23:28.211691
| 2021-03-16T10:32:42
| 2021-03-16T10:32:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
# 中国矿业大学(北京)/ 机电硕-6 / ZQT2000405103 / 李天鸽
# 编辑时间:2021/3/16 14:25
ages = [11,13,12,50,30,33,19]
m = map(lambda ele : ele +2,ages)
print(m) #<map object at 0x00000220C1D6F160>
new_ages = list(m)
print(new_ages)
|
[
"1740636835@qq.com"
] |
1740636835@qq.com
|
83087236d2a91b777ad987ef68a4c78684b0c4aa
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC344.py
|
1ace2c0f3a04e6d8f71e1bb60a1394826cc48796
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,589
|
py
|
# qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[0]) # number=8
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC344.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
2669fd30072a0d9fbdbd1acd75c604560975a01b
|
bcf11ccd5ec986f461edcf874c2d12e2fad13e76
|
/junk/208.py
|
39cd778d1a04341e41199f5b9766962f166b8c6b
|
[] |
no_license
|
buxuele/algo_snippet
|
c1a8a0855e6c2a02f5045d21710baa7fa8b00d13
|
9ab71d523f5079b6d72aef11b09120fee047f66c
|
refs/heads/master
| 2021-05-18T06:45:56.111807
| 2021-02-21T17:13:31
| 2021-02-21T17:13:31
| 251,164,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
# author: fanchuangwater@gmail.com
# date: 2020/3/25 下午11:17
# 目的:
# 先暴力一点试试看
ret = 0
for i in range(10 ** 7):
if len(str(i)) == len(set(str(i))):
# print(i)
ret += 1
print()
print("finally")
print(ret)
|
[
"baogebuxuele@163.com"
] |
baogebuxuele@163.com
|
0c634e6bbd42c0e27ca3bc801542af775ec860b2
|
cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3
|
/Programming_Foundtions_Algorithms/Ex_Files_Programming_Foundations_Algorithms/Exercise Files/6 Other Algorithms/Filter_start.py
|
106aa0ba317296d654bc0d6d427504adf4ebebf0
|
[] |
no_license
|
sedstan/LinkedIn-Learning-Python-Course
|
2b936d0f00703a6e66a872220ed47572123dc7fd
|
b4584218355bf07aa3d2939b950911eae67adb0b
|
refs/heads/master
| 2021-10-11T10:19:13.675662
| 2019-01-24T17:55:20
| 2019-01-24T17:55:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# use a hashtable to filter out duplicate items
# define a set of items that we want to reduce duplicates
items = ["apple", "pear", "orange", "banana", "apple",
"orange", "apple", "pear", "banana", "orange",
"apple", "kiwi", "pear", "apple", "orange"]
# TODO: create a hashtable to perform a filter
filter = dict()
# TODO: loop over each item and add to the hashtable
for key in items:
filter[key] = 0
# TODO: create a set from the resulting keys in the hashtable
result = set(filter.keys())
print(result)
|
[
"sed@wearewhy.co.uk"
] |
sed@wearewhy.co.uk
|
d527bc01e6d4354042cdd1609447257fbca71a4f
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/worksheet/test_worksheet07.py
|
8987c4dc09a18d9fefa1ba175fa8d598780e5d5f
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752
| 2015-03-31T20:32:28
| 2015-03-31T20:32:28
| 33,300,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with formulas in cells."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
# Write some data and formulas.
worksheet.write_number(0, 0, 1)
worksheet.write_number(1, 0, 2)
worksheet.write_formula(2, 2, '=A1+A2', None, 3)
worksheet.write_formula(4, 1, """="<&>" & ";"" '\"""", None, """<&>;" '""")
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:C5"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:3">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:3">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:3">
<c r="C3">
<f>A1+A2</f>
<v>3</v>
</c>
</row>
<row r="5" spans="1:3">
<c r="B5" t="str">
<f>"<&>" & ";"" '"</f>
<v><&>;" '</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
8c4037f2fd69e4f6614d648cbd35c997b2d1510e
|
536bce6ca78a9a151247b51acb8c375c9db7445f
|
/src/files/rw_csv.py
|
39ccf3e1f9bacec7c15e21e42533d71dca30dde3
|
[] |
no_license
|
clicianaldoni/aprimeronpython
|
57de34313f4fd2a0c69637fefd60b0fb5861f859
|
a917b62bec669765a238c4b310cc52b79c7df0c9
|
refs/heads/master
| 2023-01-28T18:02:31.175511
| 2023-01-23T08:14:57
| 2023-01-23T08:14:57
| 112,872,454
| 0
| 0
| null | 2017-12-02T19:55:40
| 2017-12-02T19:55:40
| null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
infile = open('budget.csv', 'r')
import csv
table = []
for row in csv.reader(infile):
table.append(row)
infile.close()
import pprint
pprint.pprint(table)
# transform numbers in table into float objects
# (let first row and first column remain strings)
for r in range(1,len(table)):
for c in range(1, len(table[0])):
table[r][c] = float(table[r][c])
pprint.pprint(table)
# add a new row with sums:
row = [0.0]*len(table[0])
row[0] = 'sum'
for c in range(1, len(row)):
s = 0
for r in range(1, len(table)):
s += table[r][c]
row[c] = s
table.append(row)
pprint.pprint(table)
outfile = open('budget2.csv', 'w')
writer = csv.writer(outfile)
for row in table:
writer.writerow(row)
outfile.close()
|
[
"martin@rodvand.net"
] |
martin@rodvand.net
|
ab7df51266add97d209fa99b82f75cf4f1e0eae2
|
9063052d8e2c294efa3b501d42aef2ac59d84fa0
|
/영상처리/assignment3/b.py
|
10dce4c350900a30d1a1377b7d280efdc9e3c9c2
|
[] |
no_license
|
yes99/practice2020
|
ffe5502d23038eabea834ebc2b18ff724f849c4a
|
100ac281f4fe6d0f991213802fbd8524451f1ac2
|
refs/heads/master
| 2021-07-08T00:54:19.728874
| 2021-06-13T05:52:07
| 2021-06-13T05:52:07
| 245,789,109
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
import numpy as np
import cv2
img = cv2.imread('nine.png',cv2.IMREAD_COLOR)
#정상적으로 출력되는지 확인
cv2.imshow('image1',img)
#색을 쪼갠다.
b,g,r = cv2.split(img)
#히스토그램을 긁어오는데 쓴다
histogramR = np.zeros((256,),dtype=np.float64)
histogramG = np.zeros((256,),dtype=np.float64)
histogramB = np.zeros((256,),dtype=np.float64)
histogramY = np.zeros((256,),dtype=np.float64)
#구해온 히스토그램을 바탕으로 equalization 계산을 위해 쓴다
cal = np.zeros((256,),dtype=np.float64)
#float16으로 하니까, 용량때문에 수치가 너무 커져서 64로 바꿔주니 정상으로 작동한다
height,width=img.shape
#현재 주어진 수치들을 확인한다.
print ("height = ", height)
print ("width = ", width)
print("histogram")
print("cal")
print(cal)
print("image")
print(img)
#주어진 이미지를 바탕으로 히스토그램을 모두 하나씩 다 긁어온다
for i in range(width):
for j in range(height):
his = img[j,i]
histogram[his] = histogram[his]+1
print("다시 A출력")
print(histogram)
#누적으로 계산을 먼저 해주는 것이 좋다
for i in range(256):
for j in range(i+1):
cal[i] += histogram[j]
print(cal)
#총 픽셀수 만큼 나눠준다
for i in range(256):
cal[i] = cal[i] * (1.0/(height*width))
print(cal)
#max - min 을 곱해준다
for i in range(256):
cal[i] = round(cal[i] * 255)
print(cal)
#변환된 히스토그램을 해당 변환된 값으로 변환해주어서 img에 넣는다
for i in range(width):
for j in range(height):
his = img[j,i]
img[j,i]= cal[his]
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"yes950324@naver.com"
] |
yes950324@naver.com
|
328897e5bad2df1b8130ad7198c2b38636080d6b
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow/source/tensorflow/python/ops/image_grad.py
|
b6b61ab92ce0e82331d349d7615f3badd7b6399b
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
# pylint: disable=protected-access
grads = gen_image_ops._resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"))
# pylint: enable=protected-access
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops._resize_bilinear_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"))
# pylint: enable=protected-access
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(grad,
op.inputs[1],
op.inputs[2],
image_shape,
T=op.get_attr("T"))
# pylint: enable=protected-access
else:
grad0 = None
grad1 = gen_image_ops.crop_and_resize_grad_boxes(grad, op.inputs[0],
op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
a0fd3dd7e8aad3c7028e7c5168f79cb4d2878e3e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2285/60889/279158.py
|
c12b8b99d18f9cb48e74a03d5c66649041d29e36
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
numOfInput = int(input())
for i in range(numOfInput):
days = int(input())
profits = list(map(int,input().split(" ")))
trade = []
trade.append(0)
for j in range(1,days-1):
if ((profits[j] < profits[j-1]) and (profits[j] < profits[j+1])) or ((profits[j] > profits[j-1]) and (profits[j] > profits[j+1])):
trade.append(j)
trade.append(days-1)
if profits[0] > profits[1]:
if len(trade) == 2:
print("没有利润")
else:
j = 1
while len(trade) > j+3:
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")"+" ",end = "")
j = j + 2
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")")
else:
j = 0
while len(trade) > j+3:
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")"+" ",end = "" )
j = j + 2
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e2126a086747b41651adecffe157114a80e7dc03
|
8e657d3f3d94d84e1948c61a82d5fbffcf913348
|
/akivymd/uix/hintwidget.py
|
65895294556bbe496a714b74e305b01e50d2a51d
|
[
"MIT"
] |
permissive
|
quitegreensky/akivymd
|
169b4466b9cbc39e4d940c3d1a616f80528ab8f4
|
b2daddd2f58889859514286606f46a4af6f03828
|
refs/heads/master
| 2023-06-03T15:12:44.746360
| 2020-11-02T19:04:37
| 2020-11-02T19:04:37
| 273,256,286
| 51
| 16
|
MIT
| 2021-06-17T03:34:51
| 2020-06-18T14:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 7,234
|
py
|
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang.builder import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
Builder.load_string(
"""
<AKHintWidgetItem>
pos: self.parent.pos
<AKHintWidget>:
FloatLayout:
pos: root.pos
size: root.size
FloatLayout:
id: _float_box
# pos: root._hintwidget_x, root._hintwidget_y
size_hint: None, None
size: root.hintwidget_width, root.hintwidget_height
opacity: root._hintwidget_alpha
"""
)
class AKHintWidgetItem(BoxLayout):
pass
class AKHintWidget(BoxLayout):
hintwidget_width = NumericProperty("150dp")
hintwidget_height = NumericProperty("150dp")
opacity_duration = NumericProperty(0.2)
transition = StringProperty("out_quad")
offset_x = NumericProperty("10dp")
offset_y = NumericProperty("10dp")
show_mode = OptionProperty("touch", options=["touch", "hover"])
hintwidget_pos = OptionProperty("tr", options=["tr", "tl", "br", "bl"])
auto_dismiss = BooleanProperty(True)
open_button = OptionProperty("left", options=["left", "right"])
show_delay = NumericProperty(0)
_hintwidget_x = NumericProperty()
_hintwidget_y = NumericProperty()
_hintwidget_alpha = NumericProperty(0)
_opac_anim_started = False
_state = "close"
def __init__(self, **kwargs):
super(AKHintWidget, self).__init__(**kwargs)
Clock.schedule_once(lambda x: self._update())
def _update(self):
if self.show_mode == "hover":
Window.bind(mouse_pos=self._show_hover)
elif self.show_mode == "touch":
Window.unbind(mouse_pos=self._show_hover)
self.bind(_hintwidget_x=self.ids._float_box.setter("x"))
self.bind(_hintwidget_y=self.ids._float_box.setter("y"))
def _right_top_hint(self):
return (self._hintwidget_x, self._hintwidget_y)
def _show_hover(self, instance, pos):
if self.collide_point(pos[0], pos[1]):
self._set_hintwidget_pos(pos)
Clock.schedule_once(
lambda x: self._change_opacity(1), self.show_delay
)
self._state = "open"
else:
self._change_opacity(0)
self._state = "close"
@property
def state(self):
return self._state
def _set_hintwidget_pos(self, pos):
space_x = self.hintwidget_width + self.offset_x
space_y = self.hintwidget_height + self.offset_y
image_top = self.y + self.height
image_right = self.x + self.width
image_left = self.x
image_bottom = self.y
if self.hintwidget_pos == "tr":
mag_right = pos[0] + space_x
mag_top = pos[1] + space_y
mag_left = pos[0]
mag_bottom = pos[1]
if self.hintwidget_pos == "br":
mag_right = pos[0] + space_x
mag_top = pos[1]
mag_left = pos[0]
mag_bottom = pos[1] - space_y
if self.hintwidget_pos in "tl":
mag_right = pos[0]
mag_top = pos[1] + space_y
mag_left = pos[0] - space_x
mag_bottom = pos[1]
if self.hintwidget_pos in "bl":
mag_right = pos[0]
mag_top = pos[1]
mag_left = pos[0] - space_x
mag_bottom = pos[1] - space_y
# ===============
if mag_right >= image_right:
self._hintwidget_x = pos[0] - self.offset_x - self.hintwidget_width
elif mag_left <= image_left:
self._hintwidget_x = pos[0] + self.offset_x
else:
if self.hintwidget_pos in ["tr", "br"]:
self._hintwidget_x = pos[0] + self.offset_x
elif self.hintwidget_pos in ["tl", "bl"]:
self._hintwidget_x = (
pos[0] - self.offset_x - self.hintwidget_width
)
if mag_top >= image_top:
self._hintwidget_y = pos[1] - self.offset_y - self.hintwidget_height
elif mag_bottom <= image_bottom:
self._hintwidget_y = pos[1] + self.offset_y
else:
if self.hintwidget_pos in ["tr", "tl"]:
self._hintwidget_y = pos[1] + self.offset_y
elif self.hintwidget_pos in ["bl", "br"]:
self._hintwidget_y = (
pos[1] - self.offset_y - self.hintwidget_height
)
# ===============
if pos[0] > image_right:
self._hintwidget_x = image_right - space_x
if pos[0] < image_left:
self._hintwidget_x = image_left + self.offset_x
if pos[1] > image_top:
self._hintwidget_y = image_top - space_y
if pos[1] < image_bottom:
self._hintwidget_y = image_bottom + self.offset_y
def _change_opacity(self, opacity):
if not self._opac_anim_started:
anim = Animation(
_hintwidget_alpha=opacity,
duration=self.opacity_duration,
t=self.transition,
)
anim.start(self)
self._opac_anim_started = True
Clock.schedule_once(
lambda x: self._allow_opac_animation(), self.opacity_duration
)
def _allow_opac_animation(self):
self._opac_anim_started = False
def on_touch_down(self, touch):
pos = touch.pos
if self.show_mode == "touch" and self.collide_point(pos[0], pos[1]):
if self._state == "open" and not self.ids._float_box.collide_point(
pos[0], pos[1]
):
opac = 0
self._state = "close"
elif touch.button == self.open_button:
if not self.ids._float_box.collide_point(pos[0], pos[1]):
self._set_hintwidget_pos(pos)
opac = 1
self._state = "open"
else:
return super().on_touch_down(touch)
Clock.schedule_once(
lambda x: self._change_opacity(opac), self.show_delay
)
return super().on_touch_down(touch)
def on_touch_up(self, touch):
pos = touch.pos
if self.show_mode == "touch":
if not self.auto_dismiss and self._state == "open":
opac = 1
else:
opac = 0
self._state = "close"
Clock.schedule_once(
lambda x: self._change_opacity(opac), self.opacity_duration
)
return super().on_touch_up(touch)
def on_touch_move(self, touch):
pos = touch.pos
if self.show_mode == "touch":
self._set_hintwidget_pos(pos)
return super().on_touch_move(touch)
def add_widget(self, widget, index=0, canvas=None):
if issubclass(widget.__class__, AKHintWidgetItem):
self.ids._float_box.add_widget(widget)
else:
super().add_widget(widget, index=index, canvas=canvas)
|
[
"quitegreensky@gmail.com"
] |
quitegreensky@gmail.com
|
5791643f8785ad34b90e8a376555f5e409d40fb1
|
facbdbdadacd23f6c83d266116dc14744741070f
|
/Core_Python/Day-8/Part-2 Loops/22.py
|
fbe299c4b5770cba90a8d6c96f2804bda69c3b43
|
[] |
no_license
|
Yogesh-Singh-Gadwal/YSG_Python
|
51b6b53fe34567bf066b6e487c00da766b47ac6b
|
f0d6841e1f92d1d2b27d8ecdd332d40b49a5ca69
|
refs/heads/master
| 2023-06-06T04:40:12.004713
| 2021-07-06T19:59:26
| 2021-07-06T19:59:26
| 292,482,586
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
# Loops
# While
a = 0
while a < 10:
a += 1
if a > 7:
break
print('Micky ',a)
else:
print('Complete Loop Executes')
print('End Loop')
|
[
"noreply@github.com"
] |
Yogesh-Singh-Gadwal.noreply@github.com
|
c0ba75e7f47fa1c382e6c8d5e91fb1dc9615395f
|
4ec4bc10fb1fd02e56eb1763bde624a47e9b7f9e
|
/sf_dd/model/distracted_driver_configer.py
|
e5791d7eca1a61e28335e3d3ba5299853df5dde1
|
[] |
no_license
|
cooperoelrichs/gpml
|
d4d1682a7074964dfa749af3bcbe2aef16aaafbb
|
fd62908f86d66e03668d9c58850133d0c3af80de
|
refs/heads/master
| 2020-12-12T00:41:59.494739
| 2016-05-13T06:53:54
| 2016-05-13T06:53:54
| 53,009,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
from gpml.model import configer
def from_json(config_file, project_dir):
return configer.from_json(
DistractedDriverConfiger, config_file, project_dir)
class DistractedDriverConfiger(configer.ConfigerBase):
def __init__(self, config, project_dir):
super().__init__(config, project_dir)
self.nb_classes = config['nb_classes']
self.class_names = config['class_names']
self.driver_imgs_list = self.data_dir + config['driver_imgs_list']
self.sample_submission = self.data_dir + config['sample_submission']
self.image_dirs = self.add_dir_to_names(
config['image_dirs'], self.data_dir)
self.image_size = tuple(config['image_size'])
self.data_sets = self.add_dir_to_names(
config['data_sets'], self.data_dir)
self.image_lists = self.add_dir_to_names(
config['image_lists'], self.data_dir)
|
[
"c.oelrichs@gmail.com"
] |
c.oelrichs@gmail.com
|
c1d3a1b216b74fd355d314fbf664e2c9a2b0caaa
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_223/ch62_2020_04_27_13_36_33_208956.py
|
31d6e4a06a8eed9bb438bef4c0f69df2a33d789d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
def pos_arroba(email):
i=0
while i < len(email)-1:
if email[i] == "@":
posicao = i
else:
i+=1
return posicao
|
[
"you@example.com"
] |
you@example.com
|
3f2c5e2b102f57bd098165ff8171c55a59f18753
|
9df1da438b59a3a53b186db8999b46e92cd44168
|
/src/awp_processing/check_param.py
|
3b84034f08b56ce834a4158fd2470ba5b62a92da
|
[
"MIT"
] |
permissive
|
hzfmer/awp_processing
|
a59b62ba6de5e1859ce26c4f66e64a863ce3fd2a
|
e0aa0040f2ddeb2356386aae9d9b4d77bd77f15f
|
refs/heads/master
| 2021-01-03T06:05:06.785556
| 2020-12-05T04:38:05
| 2020-12-05T04:38:46
| 239,953,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
import argparse
from collections.abc import Iterable
import numpy as np
from awp_processing import awp, read_params
from awp_processing.check import check_mesh_cont
from pathlib2 import Path
# !Check these cons in pmcl3d_cons.h in the source code
BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z = 2, 2, 4
nbit_float = 4
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="", help="configuration file")
parser.add_argument("--conf_file", default="param.sh", help="configuration file")
parser.add_argument("--batch_file", default="run.lsf", help="batch script")
args = parser.parse_args()
C = awp.Scenario(model=args.model, conf_file=args.conf_file)
cfg = C.cfg
# Convert parameters to floats or integers
"""
for k, v in cfg.items():
if not isinstance(v, Iterable):
print(k, v, type(v))
if type(v) == str and v and v.isdigit():
cfg[k] = float(v) if "." in v else int(v)
else:
print(k, v, type(v[0]))
if not isinstance(v, str) and type(v[0]) == str and v[0].isdigit():
# is list
v = [float(x) if "." in v else int(x) for x in v ]
cfg[k] = v
"""
# output directories
assert Path(args.model, cfg.chkfile).parent.exists()
assert Path(args.model, cfg.out).exists()
# layers
assert len(cfg.z) == len(cfg.nbgx) == len(cfg.dh) == len(cfg.nsrc) == cfg.g
for i in range(cfg.g):
assert cfg.x[i] % cfg.px == 0 and cfg.x[i] // cfg.px % BLOCK_SIZE_X == 0, f"Layer-{i}: Mismatch in X"
assert cfg.y[i] % cfg.py == 0 and cfg.y[i] // cfg.py % BLOCK_SIZE_Y == 0, f"Layer-{i}: Mismatch in Y"
assert cfg.z[i] // cfg.pz % BLOCK_SIZE_Z == 0, f"Layer-{i}: Mismatch in Z"
if cfg.insrc != "":
assert Path(args.model, cfg.insrc + "_" + str(i)).exists(), f"Layer-{i}: Source does not exist"
assert Path(args.model, cfg.insrc + "_" + str(i)).stat().st_size == cfg.nsrc[i] * (cfg.nst * 6 + 3) * nbit_float, f"Layer-{i}: Mismatch in source size"
assert Path(args.model, cfg.invel + "_" + str(i)).exists(), f"Layer-{i}: Mesh does not exist"
assert Path(args.model, cfg.invel + "_" + str(i)).stat().st_size == cfg.x[i] * cfg.y[i] * cfg.z[i] * cfg.nvar * nbit_float, f"Layer-{i}: Mismatch of mesh size"
if i + 1 < cfg.g:
# Check consistency of adjcent meshes
check_mesh_cont(Path(args.model, cfg.invel + "_" + str(i)),
Path(args.model, cfg.invel + "_" + str(i + 1)),
cfg.x[i], cfg.y[i], cfg.z[i])
# Topography
if cfg.intopo:
file_topo = Path(args.model, cfg.intopo)
nx, ny, pad = np.fromfile(file_topo, dtype='int32', count=3)
assert nx == cfg.x[0] and ny == cfg.y[0], f"Mismatch topography domain size"
assert (nx + 2 * pad) * (ny + 2 * pad) * nbit_float == file_topo.stat().st_size, f"Topography size does not match parameters"
# Receivers
if cfg.recvfile:
assert Path(args.model, cfg.recvfile).parent.exists(), f"Receiver output directory does not exist"
assert cfg.recv_steps % (cfg.recv_stride * cfg.recv_cpu_buffer_size \
* cfg.recv_gpu_buffer_size * cfg.recv_num_writes) == 0, "Check divisibility of receiver writing"
assert cfg.recv_length <= len(cfg.recv_coords), f"More receivers required than given"
# Source files in Ossian's format
if cfg.sourcefile:
assert Path(args.model, cfg.sourcefile).parent.exists(), f"Source file doesn't exist"
assert cfg.src_steps % (cfg.src_stride * cfg.src_cpu_buffer_size \
* cfg.src_gpu_buffer_size * cfg.src_num_writes) == 0, f"Check divisibility of source reading"
assert cfg.src_length == len(cfg.src_coords), f"Mismatch number of sources"
for suf in ['xx', 'yy', 'zz', 'xy', 'xz', 'yz']:
assert cfg.src_length * cfg.src_steps * nbit_float == Path(args.model, cfg.src_file + "_" + suf).stat().st_size, f"Input source file size doesn't match"
|
[
"hzfmer94@gmail.com"
] |
hzfmer94@gmail.com
|
97635af1733de430d86b77509ac1a87fac4f87bf
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyTypeCheckerInspection/OverloadsAndPureStubInSamePyiScope/module.pyi
|
95eb5fcb6819b9f21f9e987cf4054e29cd662c7e
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 254
|
pyi
|
from typing import overload
if undefined:
def foo(p: str) -> str: pass
else:
@overload
def foo(p: int) -> int: pass
@overload
def foo(p: str, i: int) -> str: pass
def bar(p: str) -> str: pass
@overload
def bar(p: int) -> int: pass
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
fd05edecb92c88140588f6bbeef383ef68594d40
|
9028b6983685a3ace074049fccf2b8c503b77de8
|
/PyStationB/libraries/StaticCharacterization/staticchar/datasets.py
|
d26f4310ed43f6fc533142473c6655b5a4829e65
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
mebristo/station-b-libraries
|
7f5517e5e77e6cdc54c03355804b8c0a4fcae65b
|
40bab526af6562653c42dbb32b174524c44ce2ba
|
refs/heads/main
| 2023-09-03T03:54:53.181082
| 2021-10-01T03:21:11
| 2021-10-01T03:21:11
| 412,871,835
| 0
| 0
|
MIT
| 2021-10-02T17:53:07
| 2021-10-02T17:53:06
| null |
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""This is a submodule used to retrieve the example data sets.
Its API may frequently change and it should *not* be used in production.
Exports:
Dataset, which is essentially a dictionary of data frames
load_dataframes_from_directory, a function reading all data frames in a directory into a dictionary
"""
import logging
from os import PathLike
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import pandas as pd
CONDITION_KEY = "_conditions"
def missing_directory_message(path: Path) -> Optional[str]: # pragma: no cover
path = path.absolute()
if path.is_dir():
return None
ancestor = path
while ancestor.parent != ancestor:
ancestor = ancestor.parent
if ancestor.is_dir():
break
return f"Dataset directory {path} not found (only found {ancestor})"
class Dataset(Dict[str, pd.DataFrame]):
"""A class representing a set of data frames in a given directory.
Methods:
__getitem__, so that the data can be accessed using ``dataset[key]`` syntax
items, so that one can iterate over pairs (key, data frame) as in ``dict.items()``
get_a_frame, gives a data frame, what is useful for illustratory purposes
"""
def __init__(self, path: PathLike) -> None:
self._path = Path(path)
assert self._path.is_dir(), missing_directory_message(self._path)
all_csvs = self._path.glob("*.csv")
frames = dict(map(load_dataframe, all_csvs))
if CONDITION_KEY in frames:
conditions = frames[CONDITION_KEY]
self.conditions = {idx: row.to_dict() for idx, row in conditions.set_index("SampleID").iterrows()}
frames.pop(CONDITION_KEY)
else:
self.conditions = {key: {} for key in frames.keys()}
super().__init__(frames)
self.check_conditions_coverage()
def check_conditions_coverage(self):
"""
Warn if the contents of the _conditions.csv file do not exactly match the data files in the folder.
"""
condition_keys = set(self.conditions.keys())
data_keys = set(self.keys())
n_condition_only_keys = len(condition_keys.difference(data_keys))
if n_condition_only_keys > 0: # pragma: no cover
logging.warning(
f"{self._path} has {n_condition_only_keys} rows in {CONDITION_KEY}.csv with no corresponding data file"
)
n_data_only_keys = len(data_keys.difference(condition_keys))
if n_data_only_keys > 0: # pragma: no cover
logging.warning(
f"{self._path} has {n_data_only_keys} data files with no corresponding row in {CONDITION_KEY}.csv"
)
def __repr__(self) -> str:
return f"{type(self).__name__}(path='{self._path}')"
def get_a_frame(self, index: int = 0) -> pd.DataFrame:
"""A utility function, returning a data frame at position `index` in lexicographical order of the keys."""
keys = sorted(self.keys())
key = keys[index]
return self[key]
def items_by_well(self) -> List[Tuple[Optional[str], str, pd.DataFrame]]:
"""
Returns a sorted list of tuples of the form (well_id, sample_id, data_frame), where well_id is the value
of the "Well" field in the conditions, or None if that is absent. The ordering is by well row (letter)
and column (number) if there are well IDs, otherwise alphabetically by sample ID.
"""
items = [(self.conditions[sample_id].get("Well", None), sample_id, value) for sample_id, value in self.items()]
def ordering_tuple(tup: Tuple[Optional[str], str, Any]) -> Tuple[str, int]:
well, sample_id, _ = tup
try:
return well[0], int(well[1:]) # type: ignore
except (ValueError, IndexError, TypeError):
return sample_id, 0
return sorted(items, key=ordering_tuple) # type: ignore
def plate_layout(self) -> Optional[Tuple[List[str], List[int]]]:
"""
Attempts to return the set of letters (row IDs) and numbers (column IDs) for the wells in the dataset,
or None if that fails (most likely because there are no wells defined).
"""
wells = set(self.conditions[sample_id].get("Well", None) for sample_id in self)
try: # pragma: no cover
well_letters = sorted(set(w[0] for w in wells))
well_numbers = sorted(set(int(w[1:]) for w in wells))
return well_letters, well_numbers
except (ValueError, IndexError, TypeError):
return None
def load_dataframe(csv_path: PathLike) -> Tuple[str, pd.DataFrame]:
"""Returns a tuple (name, data frame). Used to construct a data set by `load_dataframes_from_directory`.
See:
load_dataframes_from_directory
Dataset
"""
return Path(csv_path).stem, pd.read_csv(csv_path) # type: ignore # auto
|
[
"noreply@github.com"
] |
mebristo.noreply@github.com
|
da39ff45a23d0fed6921618b9cd8f936042f5398
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/p6uXeD7JC7cmxeD2Z_5.py
|
8144f3ce1a89da17cf533400d0f88447f632ef3e
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
"""
Abigail and Benson are playing Rock, Paper, Scissors.
Each game is represented by an array of length 2, where the first element
represents what Abigail played and the second element represents what Benson
played.
Given a sequence of games, determine who wins the most number of matches. If
they tie, output "Tie".
* R stands for Rock
* P stands for Paper
* S stands for Scissors
### Examples
calculate_score([["R", "P"], ["R", "S"], ["S", "P"]]) ➞ "Abigail"
# Benson wins the first game (Paper beats Rock).
# Abigail wins the second game (Rock beats Scissors).
# Abigail wins the third game (Scissors beats Paper).
# Abigail wins 2/3.
calculate_score([["R", "R"], ["S", "S"]]) ➞ "Tie"
calculate_score([["S", "R"], ["R", "S"], ["R", "R"]]) ➞ "Tie"
### Notes
N/A
"""
def calculate_score(games):
s = 0
for g in games:
if "".join(g) in "PRSP": s+=1
if "".join(g) in "PRSP"[::-1]: s-=1
if s>0: return "Abigail"
if s<0: return "Benson"
return "Tie"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f0c1d119d54e73da200cd04bdfebbe4fb2e8a65a
|
b3ac12dfbb8fa74500b406a0907337011d4aac72
|
/tests/util/benchmark_cost.py
|
2cd221f3761d43be40774d8ace02b880cdd844da
|
[
"Apache-2.0"
] |
permissive
|
chia-os/goldcoin-blockchain
|
ab62add5396b7734c11d3c37c41776994489d5e7
|
5c294688dbbe995ae1d4422803f6fcf3e1cc6077
|
refs/heads/main
| 2023-08-11T23:58:53.617051
| 2021-09-12T15:33:26
| 2021-09-12T15:33:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,088
|
py
|
import time
from secrets import token_bytes
from blspy import AugSchemeMPL, PrivateKey
from clvm_tools import binutils
from goldcoin.consensus.default_constants import DEFAULT_CONSTANTS
from goldcoin.types.blockchain_format.program import Program, INFINITE_COST
from goldcoin.types.condition_opcodes import ConditionOpcode
from goldcoin.types.condition_with_args import ConditionWithArgs
from goldcoin.util.ints import uint32
from tests.wallet_tools import WalletTool
from goldcoin.wallet.derive_keys import master_sk_to_wallet_sk
from goldcoin.wallet.puzzles.p2_delegated_puzzle import puzzle_for_pk
def float_to_str(f):
float_string = repr(f)
if "e" in float_string: # detect scientific notation
digits, exp_str = float_string.split("e")
digits = digits.replace(".", "").replace("-", "")
exp = int(exp_str)
zero_padding = "0" * (abs(int(exp)) - 1) # minus 1 for decimal point in the sci notation
sign = "-" if f < 0 else ""
if exp > 0:
float_string = "{}{}{}.0".format(sign, digits, zero_padding)
else:
float_string = "{}0.{}{}".format(sign, zero_padding, digits)
return float_string
def run_and_return_cost_time(chialisp):
start = time.time()
clvm_loop = "((c (q ((c (f (a)) (c (f (a)) (c (f (r (a))) (c (f (r (r (a))))"
" (q ()))))))) (c (q ((c (i (f (r (a))) (q (i (q 1) ((c (f (a)) (c (f (a))"
" (c (- (f (r (a))) (q 1)) (c (f (r (r (a)))) (q ()))))))"
" ((c (f (r (r (a)))) (q ()))))) (q (q ()))) (a)))) (a))))"
loop_program = Program.to(binutils.assemble(clvm_loop))
clvm_loop_solution = f"(1000 {chialisp})"
solution_program = Program.to(binutils.assemble(clvm_loop_solution))
cost, sexp = loop_program.run_with_cost(solution_program, INFINITE_COST)
end = time.time()
total_time = end - start
return cost, total_time
def get_cost_compared_to_addition(addition_cost, addition_time, other_time):
return (addition_cost * other_time) / addition_time
def benchmark_all_operators():
addition = "(+ (q 1000000000) (q 1000000000))"
substraction = "(- (q 1000000000) (q 1000000000))"
multiply = "(* (q 1000000000) (q 1000000000))"
greater = "(> (q 1000000000) (q 1000000000))"
equal = "(= (q 1000000000) (q 1000000000))"
if_clvm = "(i (= (q 1000000000) (q 1000000000)) (q 1000000000) (q 1000000000))"
sha256tree = "(sha256 (q 1000000000))"
pubkey_for_exp = "(pubkey_for_exp (q 1))"
point_add = "(point_add"
" (q 0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb)"
" (q 0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb))"
point_add_cost, point_add_time = run_and_return_cost_time(point_add)
addition_cost, addition_time = run_and_return_cost_time(addition)
substraction_cost, substraction_time = run_and_return_cost_time(substraction)
multiply_cost, multiply_time = run_and_return_cost_time(multiply)
greater_cost, greater_time = run_and_return_cost_time(greater)
equal_cost, equal_time = run_and_return_cost_time(equal)
if_cost, if_time = run_and_return_cost_time(if_clvm)
sha256tree_cost, sha256tree_time = run_and_return_cost_time(sha256tree)
pubkey_for_exp_cost, pubkey_for_exp_time = run_and_return_cost_time(pubkey_for_exp)
one_addition = 1
one_substraction = get_cost_compared_to_addition(addition_cost, addition_time, substraction_time) / addition_cost
one_multiply = get_cost_compared_to_addition(addition_cost, addition_time, multiply_time) / addition_cost
one_greater = get_cost_compared_to_addition(addition_cost, addition_time, greater_time) / addition_cost
one_equal = get_cost_compared_to_addition(addition_cost, addition_time, equal_time) / addition_cost
one_if = get_cost_compared_to_addition(addition_cost, addition_time, if_time) / addition_cost
one_sha256 = get_cost_compared_to_addition(addition_cost, addition_time, sha256tree_time) / addition_cost
one_pubkey_for_exp = (
get_cost_compared_to_addition(addition_cost, addition_time, pubkey_for_exp_time) / addition_cost
)
one_point_add = get_cost_compared_to_addition(addition_cost, addition_time, point_add_time) / addition_cost
print(f"cost of addition is: {one_addition}")
print(f"cost of one_substraction is: {one_substraction}")
print(f"cost of one_multiply is: {one_multiply}")
print(f"cost of one_greater is: {one_greater}")
print(f"cost of one_equal is: {one_equal}")
print(f"cost of one_if is: {one_if}")
print(f"cost of one_sha256 is: {one_sha256}")
print(f"cost of one_pubkey_for_exp is: {one_pubkey_for_exp}")
print(f"cost of one_point_add is: {one_point_add}")
if __name__ == "__main__":
"""
Naive way to calculate cost ratio between vByte and CLVM cost unit.
AggSig has assigned cost of 20vBytes, simple CLVM program is benchmarked against it.
"""
wallet_tool = WalletTool(DEFAULT_CONSTANTS)
benchmark_all_operators()
secret_key: PrivateKey = AugSchemeMPL.key_gen(bytes([2] * 32))
puzzles = []
solutions = []
private_keys = []
public_keys = []
for i in range(0, 1000):
private_key: PrivateKey = master_sk_to_wallet_sk(secret_key, uint32(i))
public_key = private_key.public_key()
solution = wallet_tool.make_solution(
{ConditionOpcode.ASSERT_MY_COIN_ID: [ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [token_bytes()])]}
)
puzzle = puzzle_for_pk(bytes(public_key))
puzzles.append(puzzle)
solutions.append(solution)
private_keys.append(private_key)
public_keys.append(public_key)
# Run Puzzle 1000 times
puzzle_start = time.time()
clvm_cost = 0
for i in range(0, 1000):
cost_run, sexp = puzzles[i].run_with_cost(solutions[i], INFINITE_COST)
clvm_cost += cost_run
puzzle_end = time.time()
puzzle_time = puzzle_end - puzzle_start
print(f"Puzzle_time is: {puzzle_time}")
print(f"Puzzle cost sum is: {clvm_cost}")
private_key = master_sk_to_wallet_sk(secret_key, uint32(0))
public_key = private_key.get_g1()
message = token_bytes()
signature = AugSchemeMPL.sign(private_key, message)
pk_message_pair = (public_key, message)
# Run AggSig 1000 times
agg_sig_start = time.time()
agg_sig_cost = 0
for i in range(0, 1000):
valid = AugSchemeMPL.verify(public_key, message, signature)
assert valid
agg_sig_cost += 20
agg_sig_end = time.time()
agg_sig_time = agg_sig_end - agg_sig_start
print(f"Aggsig Cost: {agg_sig_cost}")
print(f"Aggsig time is: {agg_sig_time}")
# clvm_should_cost = agg_sig_cost * puzzle_time / agg_sig_time
clvm_should_cost = (agg_sig_cost * puzzle_time) / agg_sig_time
print(f"Puzzle should cost: {clvm_should_cost}")
constant = clvm_should_cost / clvm_cost
format = float_to_str(constant)
print(f"Constant factor: {format}")
print(f"CLVM RATIO MULTIPLIER: {1/constant}")
|
[
"faurepierre78@yahoo.com"
] |
faurepierre78@yahoo.com
|
14ec9f43cb1eeea48224b48dd0cbf8762a59731f
|
cb1ce85f80c9315d2bb0342badc0998f416839b2
|
/apps/childcount/commands/LangCommand.py
|
57ab33f976300e27f4c1760f7ed1a7379c8df070
|
[] |
no_license
|
katembu/move-it
|
eb609529d6527694aa1d1c9dbc972f70cd921e5d
|
86c44f5228811bdcba6fa609bf9d8c9d8e46263a
|
HEAD
| 2016-09-05T14:51:47.105500
| 2012-04-12T20:28:08
| 2012-04-12T20:28:08
| 4,008,730
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 coding=utf-8
# maintainer: rgaudin
from django.utils.translation import ugettext as _, activate
from childcount.commands import CCCommand
from childcount.models import Patient
from childcount.utils import authenticated
class LangCommand(CCCommand):
''' '''
KEYWORDS = {
'en': ['lang'],
'fr': ['lang'],
}
@authenticated
def process(self):
chw = self.message.persistant_connection.reporter.chw
# warn if no lang specified
if self.params.__len__() < 2:
self.message.respond(_(u"Your language preference is set " \
"to: %(lang)s. Change it by sending your " \
"new language preference code.") \
% {'lang': chw.language.upper()})
return True
newlang = self.params[1].strip()
if chw.language == newlang:
self.message.respond(_(u"Your language preference is already " \
"set to: %(lang)s.") \
% {'lang': chw.language.upper()})
return True
if newlang not in self.KEYWORDS:
self.message.respond(_(u"That language preference code " \
"(%(code)s) is not valid.") \
% {'code': newlang.upper()})
return True
oldlang = chw.language
chw.language = newlang
chw.save()
activate(chw.language)
self.message.respond(_(u"Your language preference has been changed " \
"from %(old)s to %(new)s. ") % \
{'old': oldlang.upper(), \
'new': chw.language.upper()})
return True
|
[
"rgaudin@gmail.com"
] |
rgaudin@gmail.com
|
5b7863a23b2e472cbe0ecf5c16042e25ba4016f6
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/ModelManagerModule/app/downloader/tools/accuracy_checker/tests/test_reid_metrics.py
|
915fd137bf4441ef1b2f02defee828853bcd82fa
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from accuracy_checker.metrics.reid import eval_cmc
class TestCMC:
def test_only_distance_matrix(self):
distance_matrix = np.array([
[0, 1, 2, 3, 4],
[1, 0, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[1, 2, 3, 4, 0]
])
m, n = distance_matrix.shape
result = eval_cmc(
distance_matrix,
query_ids=np.arange(m),
gallery_ids=np.arange(n),
query_cams=np.zeros(m).astype(np.int32),
gallery_cams=np.ones(n).astype(np.int32)
)
assert np.all(result[:5] == [0.6, 0.6, 0.8, 1.0, 1.0])
def test_duplicate_ids(self):
distance_matrix = np.array([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]
])
result = eval_cmc(
distance_matrix,
query_ids=np.array([0, 0, 1, 1]),
gallery_ids=np.array([0, 0, 1, 1]),
top_k=4,
gallery_cams=np.ones(distance_matrix.shape[1]).astype(np.int32),
query_cams=np.zeros(distance_matrix.shape[0]).astype(np.int32),
separate_camera_set=False,
single_gallery_shot=False
)
assert np.all(result == [0.5, 0.5, 1, 1])
def test_duplicate_cams(self):
distance_matrix = np.tile(np.arange(5), (5, 1))
result = eval_cmc(
distance_matrix,
query_ids=np.array([0, 0, 0, 1, 1]),
gallery_ids=np.array([0, 0, 0, 1, 1]),
query_cams=np.array([0, 0, 0, 0, 0]),
gallery_cams=np.array([0, 1, 1, 1, 1]),
top_k=5,
separate_camera_set=False,
single_gallery_shot=False
)
assert np.all(result == [0.6, 0.6, 0.6, 1, 1])
|
[
"waitingkuo0527@gmail.com"
] |
waitingkuo0527@gmail.com
|
c1d02968598f3beb707555c296286cadd368736f
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/t_systems_mms/icinga_director/plugins/modules/icinga_hostgroup.py
|
84adda4c8cdc3654cebc548d0afb7e85971573d0
|
[
"GPL-3.0-only",
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 3,623
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 T-Systems Multimedia Solutions GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: icinga_hostgroup
short_description: Manage hostgroups in Icinga2
description:
- Add or remove a hostgroup to Icinga2 through the director API.
author: Sebastian Gumprich (@rndmh3ro)
extends_documentation_fragment:
- ansible.builtin.url
- t_systems_mms.icinga_director.common_options
version_added: '1.0.0'
notes:
- This module supports check mode.
options:
state:
description:
- Apply feature state.
choices: [ "present", "absent" ]
default: present
type: str
object_name:
description:
- Icinga object name for this hostgroup.
aliases: ['name']
required: true
type: str
display_name:
description:
- An alternative display name for this group.
- If you wonder how this could be helpful just leave it blank.
type: str
assign_filter:
description:
- This allows you to configure an assignment filter.
- Please feel free to combine as many nested operators as you want.
type: str
"""
EXAMPLES = """
- name: Create hostgroup
t_systems_mms.icinga_director.icinga_hostgroup:
state: present
url: "{{ icinga_url }}"
url_username: "{{ icinga_user }}"
url_password: "{{ icinga_pass }}"
object_name: foohostgroup
display_name: foohostgroup
assign_filter: 'host.name="foohost"'
"""
RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible_collections.t_systems_mms.icinga_director.plugins.module_utils.icinga import (
Icinga2APIObject,
)
# ===========================================
# Module execution.
#
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# add our own arguments
argument_spec.update(
state=dict(default="present", choices=["absent", "present"]),
url=dict(required=True),
object_name=dict(required=True, aliases=["name"]),
display_name=dict(required=False),
assign_filter=dict(required=False),
)
# Define the main module
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
data = {
"object_name": module.params["object_name"],
"object_type": "object",
"display_name": module.params["display_name"],
"assign_filter": module.params["assign_filter"],
}
icinga_object = Icinga2APIObject(
module=module, path="/hostgroup", data=data
)
changed, diff = icinga_object.update(module.params["state"])
module.exit_json(
changed=changed,
diff=diff,
)
# import module snippets
if __name__ == "__main__":
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
e22152a4d7fccfed5a456ac24264e156583c5444
|
b57d337ddbe946c113b2228a0c167db787fd69a1
|
/scr/Spell390 - Remove Blindness Deafness.py
|
6dda7d4d7cf28406c2fe25e031c3232ca63bf139
|
[] |
no_license
|
aademchenko/ToEE
|
ebf6432a75538ae95803b61c6624e65b5cdc53a1
|
dcfd5d2de48b9d9031021d9e04819b309d71c59e
|
refs/heads/master
| 2020-04-06T13:56:27.443772
| 2018-11-14T09:35:57
| 2018-11-14T09:35:57
| 157,520,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
from toee import *
def OnBeginSpellCast( spell ):
print "Remove Blindness/Deafness OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-conjuration-conjure", spell.caster )
def OnSpellEffect ( spell ):
print "Remove Blindness/Deafness OnSpellEffect"
spell.duration = 0
target = spell.target_list[0]
## Solves Radial menu problem for Wands/NPCs
spell_arg = spell.spell_get_menu_arg( RADIAL_MENU_PARAM_MIN_SETTING )
if spell_arg != 1 and spell_arg != 2:
spell_arg = 2
game.particles( 'sp-Remove Blindness Deafness', target.obj )
if spell_arg == 1:
# apply remove blindness
target.obj.condition_add_with_args( 'sp-Remove Blindness', spell.id, spell.duration, 0 )
else:
# apply deafness
target.obj.condition_add_with_args( 'sp-Remove Deafness', spell.id, spell.duration, 0 )
spell.target_list.remove_target( target.obj )
spell.spell_end(spell.id)
def OnBeginRound( spell ):
print "Remove Blindness/Deafness OnBeginRound"
def OnEndSpellCast( spell ):
print "Remove Blindness/Deafness OnEndSpellCast"
|
[
"demchenko.recruitment@gmail.com"
] |
demchenko.recruitment@gmail.com
|
11c0ce16ec9d3be3a1ce94d97b03ab2c20e62f75
|
d996edcd595c565c5725a16286ce8d338af67246
|
/src/text_classification_benchmarks/bi_lstm/__init__.py
|
d1f2c6b6a9147402c96cf472d5bb51324ae16b49
|
[] |
no_license
|
preddy5/dltemplate
|
fbbfce7660c451495e255cf8d8437e4b4e207f9c
|
77b04b767cbd4914e0a3d3609c645e475aabcc43
|
refs/heads/master
| 2020-04-28T19:37:04.893001
| 2019-03-13T13:35:04
| 2019-03-13T13:35:04
| 175,517,056
| 1
| 1
| null | 2019-03-13T23:59:40
| 2019-03-13T23:59:39
| null |
UTF-8
|
Python
| false
| false
| 3,547
|
py
|
from argparse import ArgumentParser
from common.util import load_hyperparams, merge_dict
import csv
import os
from text_classification_benchmarks.bi_lstm.util import batch_iter, load_dataset, test, train
def run(constant_overwrites):
config_path = os.path.join(os.path.dirname(__file__), 'hyperparams.yml')
constants = merge_dict(load_hyperparams(config_path), constant_overwrites)
outdir = constants['outdir']
run_dir = constants['run_dir']
x_train, y_train, train_lengths, x_val, y_val, val_lengths, max_length, vocab_size, classes = \
load_dataset(outdir, dirname=constants['data_dir'], vocab_name=constants['vocab_name'])
if constants['test']:
print('\nTesting...')
preds = test(x_val, y_val, val_lengths, constants['test_batch_size'], run_dir, constants['checkpoint'],
constants['model_name'])
# Save all predictions
with open(os.path.join(run_dir, 'predictions.csv'), 'w', encoding='utf-8', newline='') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(['True class', 'Prediction'])
for i in range(len(preds)):
csvwriter.writerow([y_val[i], preds[i]])
print('Predictions saved to {}'.format(os.path.join(run_dir, 'predictions.csv')))
else:
print('\nTraining...')
train_data = batch_iter(x_train, y_train, train_lengths, constants['batch_size'], constants['n_epochs'])
train(train_data, x_val, y_val, val_lengths, len(classes), vocab_size,
constants['n_hidden'], constants['n_layers'],
constants['l2_reg_lambda'], constants['learning_rate'],
constants['decay_steps'], constants['decay_rate'],
constants['keep_prob'], outdir, constants['num_checkpoint'],
constants['evaluate_every_steps'], constants['save_every_steps'],
constants['summaries_name'], constants['model_name'])
if __name__ == '__main__':
# read args
parser = ArgumentParser(description='Run Bi-LSTM Classifier')
parser.add_argument('--epochs', dest='n_epochs', type=int, help='number epochs')
parser.add_argument('--batch-size', dest='batch_size', type=int, help='batch size')
parser.add_argument('--hidden-size', dest='n_hidden', type=int, help='number hidden layers')
parser.add_argument('--embedding-size', dest='embedding_size', type=int, help='embedding size')
parser.add_argument('--num-layers', dest='n_layers', type=int, help='number LSTM cells')
parser.add_argument('--learning-rate', dest='learning_rate', type=float, help='learning rate')
parser.add_argument('--outdir', dest='outdir', type=str, help='save directory')
parser.add_argument('--rundir', dest='run_dir', type=str, help='run directory')
parser.add_argument('--data-dir', dest='data_dir', type=str, help='relative path to data')
parser.add_argument('--model-name', dest='model_name', type=str, help='model name')
parser.add_argument('--vocab-name', dest='vocab_name', type=str, help='vocab name')
parser.add_argument('--summaries-name', dest='summaries_name', type=str, help='summaries name')
parser.add_argument('--checkpoint', dest='checkpoint', type=str,
help='restore the graph from this model checkpoint')
parser.add_argument('--test', dest='test',
help='run eval on the test dataset using a fixed checkpoint', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
run(vars(args))
|
[
"markmo@me.com"
] |
markmo@me.com
|
0058be8e27a678bd5d55c56eddcdf0e87555f32f
|
eef6f6e1074a105a4554c79a80d18d5507d5c886
|
/liir/nlp/ml/eval/ConfusionMatrix.py
|
92bda9c9b36c5ecbae53b136ba97c94fbc75a722
|
[] |
no_license
|
quynhdtn/DL
|
0dc09359fd0bb1280cd005f28c454126dc101dc4
|
017ea76a1f926e989581cd6c41f984c8651a5e0d
|
refs/heads/master
| 2021-01-10T04:51:06.354273
| 2015-11-12T12:48:57
| 2015-11-12T12:48:57
| 46,052,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
__author__ = 'quynhdo'
import numpy as np
class ConfusionMatrix:
def __init__(self, n_classes, class_names = None):
self.n_classes = n_classes
self.class_names = class_names
self.mat = np.zeros((n_classes, n_classes), dtype='int')
def addBatch(self, y_true, y_predicted):
assert len(y_true) == len(y_predicted)
for i in range(len(y_true)):
self.mat[y_true[i],y_predicted[i]] +=1
def __str__(self):
s = "\t"
for idx in range(self.n_classes): s += str(idx) + "\t"
s += "\n"
for i in range (len(self.mat)):
s += str(i) + "\t"
for j in range(len(self.mat[i])):
s += str(self.mat[i][j]) + "\t"
s += "\n"
return s
def getScore(self):
num_instances = np.sum(self.mat, axis=1)
predict = np.sum(self.mat, axis=0)
correct = np.diag(self.mat).flatten()
p = correct / predict * 100
r = correct / num_instances * 100
f = np.zeros (len(p))
for i in range (len(p)):
if (p[i]+ r[i] != 0):
f = 2 * p * r / (p+r)
else:
f = None
return np.matrix([p, r,f]).transpose()
if __name__ == "__main__":
cm= ConfusionMatrix(3)
cm.addBatch([1,2,1,0],[2,2,0,0])
print (cm.__str__())
print (cm.getScore())
|
[
"quynhdtn.hut@gmail.com"
] |
quynhdtn.hut@gmail.com
|
3c4a8df0cddd9fb678ac426f9645c2dd41ee0171
|
d7e642d34530db686f76e835c7594543adebfedc
|
/例子-1122-04.函数的参数传递.py
|
58bef1ce8a72d0c07e42a9c879d7e34f482b366c
|
[] |
no_license
|
blackplusy/python1119
|
929c61f0b896737d58b77eef50428002ec2f254f
|
546ef52f29a084fd6b1d3d8df78ff0a74a3dfd71
|
refs/heads/master
| 2020-04-07T06:42:56.901263
| 2019-02-26T02:17:54
| 2019-02-26T02:17:54
| 158,148,121
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
def animal(pet1,pet2):
print(pet1+' wang!'+pet2+' miao')
#调用函数传入两个参数
animal('dog','cat')
def animal(pet1,pet2):
print(pet1+' wang!'+pet2+' miao')
animal(pet2='cat',pet1='dog')
def animal(pet2,pet1='2ha'):
print(pet1+' wang!'+pet2+' miao')
animal('bosi')
animal('pig','out man')
print('************************************')
def test(x,y,*args):
print(x,y,args)
test(1,2,'heygor','simida')
print('************************************')
def test1(x,y,**args):
print(x,y,args)
test1(1,2,a=9,b='heygor',c=300)
|
[
"noreply@github.com"
] |
blackplusy.noreply@github.com
|
0d983221abcf4857628f47481dd34d54c9271ec3
|
2bc21de1b3204c677d2912f24a82ba473d90bcf1
|
/Comprehensions/09. Bunker.py
|
2748268e79e762e41f5d8ba64de754b3cb2beea7
|
[
"MIT"
] |
permissive
|
milenpenev/Python_Advanced
|
24260bbdf5b6682157acb2d73f6f77d1cdba97b1
|
2f32012dd682fa9541bbf5fa155f6bdbcfa946de
|
refs/heads/main
| 2023-05-30T16:45:09.599304
| 2021-06-27T16:00:35
| 2021-06-27T16:00:35
| 351,788,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
categories = input().split(", ")
n = int(input())
inventory = {category: [] for category in categories}
for _ in range(n):
category, item_name, quantity_quality = input().split(" - ")
quantity, quality = quantity_quality.split(";")
quantity, quality = quantity.split(":")[1], quality.split(":")[1]
quantity, quality = int(quantity), int(quality)
inventory[category].append({"name": item_name, "quantity": quantity, "quality": quality})
total_items = sum([item["quantity"] for items in inventory.values() for item in items])
avg_quality = sum([item["quality"] for items in inventory.values() for item in items])/ len(categories)
print(f"Count of items: {total_items}")
print(f"Average quality: {avg_quality:.2f}")
print('\n'.join(f'{category} -> {", ".join(item["name"] for item in inventory[category])}' for category in categories))
|
[
"milennpenev@gmail.com"
] |
milennpenev@gmail.com
|
3c1d61bdcad5d0817c3e678f870318b7bb51cbd1
|
13d2fe21726a2a528b6eb165e6a5ebe13f08b9f2
|
/sqltext/lianxi17.py
|
2676fe262b34e540e075a06562a572641944c69e
|
[] |
no_license
|
123456thomas/python_learn
|
aa49e898b9ede549a3e1c376eae10f0f1c09ca7d
|
9891d2e988ebf8896360f60a24d61430e538bf3e
|
refs/heads/master
| 2020-04-16T10:01:55.245420
| 2019-01-13T10:30:13
| 2019-01-13T10:35:11
| 165,487,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import pymysql
con=pymysql.connect(host="localhost", user="root", password="361365",
database="mytest", port=3306)
cur=con.cursor()
# cur.execute("create table test1(Id int primary key,Name varchar(20) not null)")
cur.execute("create table test2(id int primary key,name varchar(20) not null,userid int, foreign key(userid) references test1(Id))")
|
[
"17625809083@sina.cn"
] |
17625809083@sina.cn
|
a1d7d3f60a17d5091571fd8669f336b136cab517
|
f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7
|
/htdocs/submissions/a1d7d3f60a17d5091571fd8669f336b136cab517.py
|
aea45ae8c7ebf0de25e7c0894f3e9f43a33fa3e3
|
[] |
no_license
|
pycontest/pycontest.github.io
|
ed365ebafc5be5d610ff9d97001240289de697ad
|
606015cad16170014c41e335b1f69dc86250fb24
|
refs/heads/master
| 2021-01-10T04:47:46.713713
| 2016-02-01T11:03:46
| 2016-02-01T11:03:46
| 50,828,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
d={'0':' ','1':' _ ','2':' |','3':' _|','4':'|_ ','5':'|_|','6':'| |'};k='101101111162335442555243235253'
def f(x,y):
i=int(y);x[0]+=d[k[i]];x[1]+=d[k[i+10]];x[2]+=d[k[i+20]];return x
def seven_seg(x):
return reduce(lambda x,y:x+y+'\n',reduce(f,x,['','','']),'')
|
[
"info@pycontest.net"
] |
info@pycontest.net
|
d370b6da42bc60d3d21c9bdde90a9441a4a77354
|
c33496682b760deac61fedecba3e82ce4e41dfde
|
/scripts/e259.py
|
d66c4c06b2e02569f32f6d438c8330a5424b6a19
|
[
"MIT"
] |
permissive
|
ferasalsaab/neuralnilm_prototype
|
c5e9cde02d475ac499b15fea62143e76adff07d0
|
2119292e7d5c8a137797ad3c9abf9f37e7f749af
|
refs/heads/master
| 2020-04-16T14:38:03.615279
| 2018-01-29T15:30:43
| 2018-01-29T15:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,253
|
py
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
252
attempt to predict fdiff 1 sample ahead. Unfair?
253
regurgitate fdiff from 1 sample ago
254
lag of 10 time steps
255
lag of 5 time steps
257
slowly increasing lag
258
output is different appliances diff
259
start off just trying to regurgitate diff of aggregate
then swap to disaggregation (to diff)
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[2500]*5,
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=True,
lag=1,
target_is_diff=True
)
def change_learning_rate(net, epoch, learning_rate):
net.updates = partial(nesterov_momentum, learning_rate=learning_rate)
net.compile()
def change_lag(net, epoch, lag):
net.source.lag = lag
net.compile()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def new_source(net, epoch):
source_dict_copy = deepcopy(source_dict)
source_dict_copy['target_is_prediction'] = False
net.source = RealApplianceSource(**source_dict_copy)
net.generate_validation_data_and_set_shapes()
net.loss_function = scaled_cost
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
}
],
layer_changes={
1001: {
'remove_from': -3,
'callback': new_source,
'new_layers': [
{
'type': DenseLayer,
'num_units': 5,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(50)))
}
]
}
}
)
def exp_x(name):
global source
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(50)))
}
)
net = Net(**net_dict_copy)
return net
def main():
experiment = 'a'
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
print("***********************************")
print("Preparing", full_exp_name, "...")
try:
net = exp_x(full_exp_name)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
return
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
|
[
"jack-list@xlk.org.uk"
] |
jack-list@xlk.org.uk
|
31bc673f6d080c081b787817a51e382be8d91600
|
8e69eee9b474587925e22413717eb82e4b024360
|
/v2.5.7/otp/chat/TalkHandle.py
|
d5919401c995d3c50e5367a9d6249955c0aa44df
|
[
"MIT"
] |
permissive
|
TTOFFLINE-LEAK/ttoffline
|
afaef613c36dc3b70514ccee7030ba73c3b5045b
|
bb0e91704a755d34983e94288d50288e46b68380
|
refs/heads/master
| 2020-06-12T15:41:59.411795
| 2020-04-17T08:22:55
| 2020-04-17T08:22:55
| 194,348,185
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
from otp.avatar.AvatarHandle import AvatarHandle
class TalkHandle(AvatarHandle):
def __init__(self, doId, message):
self.avatarId = doId
self.avatarName = None
self.accountId = None
self.accountName = None
self.addMessageInfo(message)
return
def getName(self):
return self.avatarName
def isUnderstandable(self):
return False
def isOnline(self):
return False
def addMessageInfo(self, message):
if self.avatarId == message.getSenderAvatarId():
if not self.avatarName and message.getSenderAvatarName():
self.avatarName = message.getSenderAvatarName()
if not self.accountId and message.getSenderAccountId():
self.accountId = message.getSenderAccountId()
if not self.accountName and message.getSenderAccountName():
self.accountName = message.getSenderAccountName()
else:
if self.avatarId == message.getReceiverAvatarId():
if not self.avatarName and message.getReceiverAvatarName():
self.avatarName = message.getReceiverAvatarName()
if not self.accountId and message.getReceiverAccountId():
self.accountId = message.getReceiverAccountId()
if not self.accountName and message.getReceiverAccountName():
self.accountName = message.getReceiverAccountName()
def setTalkWhisper(self, fromAV, fromAC, avatarName, chat, mods, flags):
newText, scrubbed = localAvatar.scrubTalk(chat, mods)
base.talkAssistant.receiveWhisperTalk(fromAV, avatarName, fromAC, None, self.avatarId, self.getName(), newText, scrubbed)
return
|
[
"s0mberdemise@protonmail.com"
] |
s0mberdemise@protonmail.com
|
f4a0ac7b71c7b755a827b97478b5f834db35f4d6
|
39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43
|
/poet/trunk/pythonLibs/Django-1.3/tests/regressiontests/file_uploads/views.py
|
f1cd8a65ea56a2d24fc625d24925f4d5b6483f61
|
[
"BSD-3-Clause"
] |
permissive
|
AgileAdaptiveTools/POETTools
|
85158f043e73b430c1d19a172b75e028a15c2018
|
60244865dd850a3e7346f9c6c3daf74ea1b02448
|
refs/heads/master
| 2021-01-18T14:46:08.025574
| 2013-01-28T19:18:11
| 2013-01-28T19:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,999
|
py
|
import os
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpResponse, HttpResponseServerError
from django.utils import simplejson
from models import FileModel, UPLOAD_TO
from uploadhandler import QuotaUploadHandler, ErroringUploadHandler
from django.utils.hashcompat import sha_constructor
from tests import UNICODE_FILENAME
def file_upload_view(request):
"""
Check that a file upload can be updated into the POST dictionary without
going pear-shaped.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], unicode):
# If a file is posted, the dummy client should only post the file name,
# not the full path.
if os.path.dirname(form_data['file_field'].name) != '':
return HttpResponseServerError()
return HttpResponse('')
else:
return HttpResponseServerError()
def file_upload_view_verify(request):
"""
Use the sha digest hash to verify the uploaded contents.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
for key, value in form_data.items():
if key.endswith('_hash'):
continue
if key + '_hash' not in form_data:
continue
submitted_hash = form_data[key + '_hash']
if isinstance(value, UploadedFile):
new_hash = sha_constructor(value.read()).hexdigest()
else:
new_hash = sha_constructor(value).hexdigest()
if new_hash != submitted_hash:
return HttpResponseServerError()
# Adding large file to the database should succeed
largefile = request.FILES['file_field2']
obj = FileModel()
obj.testfile.save(largefile.name, largefile)
return HttpResponse('')
def file_upload_unicode_name(request):
# Check to see if unicode name came through properly.
if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME):
return HttpResponseServerError()
response = None
# Check to make sure the exotic characters are preserved even
# through file save.
uni_named_file = request.FILES['file_unicode']
obj = FileModel.objects.create(testfile=uni_named_file)
full_name = u'%s/%s' % (UPLOAD_TO, uni_named_file.name)
if not os.path.exists(full_name):
response = HttpResponseServerError()
# Cleanup the object with its exotic file name immediately.
# (shutil.rmtree used elsewhere in the tests to clean up the
# upload directory has been seen to choke on unicode
# filenames on Windows.)
obj.delete()
os.unlink(full_name)
if response:
return response
else:
return HttpResponse('')
def file_upload_echo(request):
"""
Simple view to echo back info about uploaded files for tests.
"""
r = dict([(k, f.name) for k, f in request.FILES.items()])
return HttpResponse(simplejson.dumps(r))
def file_upload_quota(request):
"""
Dynamically add in an upload handler.
"""
request.upload_handlers.insert(0, QuotaUploadHandler())
return file_upload_echo(request)
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response
def file_upload_getlist_count(request):
"""
Check the .getlist() function to ensure we receive the correct number of files.
"""
file_counts = {}
for key in request.FILES.keys():
file_counts[key] = len(request.FILES.getlist(key))
return HttpResponse(simplejson.dumps(file_counts))
def file_upload_errors(request):
request.upload_handlers.insert(0, ErroringUploadHandler())
return file_upload_echo(request)
|
[
"ssaltzman@mitre.org"
] |
ssaltzman@mitre.org
|
fdf3461a668928331ac2e97d93cf0b12b6516007
|
11f29fef10e684553a452eb40d4b3daa696b87fc
|
/Exercicios/III/questao02.py
|
2b93b4be707623021e68aa37a4e2bff58a932a4a
|
[] |
no_license
|
asferreir/PythonParaZumbis
|
49032f999e054513409d649c9ac0b45a05af5c5d
|
fc59061dd5c64c2c7f95adf2b0d76b5af329fbb2
|
refs/heads/master
| 2020-07-18T05:18:38.253478
| 2019-09-04T14:03:51
| 2019-09-04T14:03:51
| 206,184,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
"""
Faça um programa que leia um nome de usuário e a sua senha e não aceite a senha igual ao nome do usuário,
mostrando uma mensagem de erro e voltando a pedir as informações.
"""
usuario = input("Informe o usuario: ")
senha = input("Informe a senha: ")
while usuario == senha:
print("Usuario e senha devem ser diferentes!")
usuario = input("Informe o usuario: ")
senha = input("Informe a senha: ")
|
[
"havour@gmail.com"
] |
havour@gmail.com
|
639dea54803f71834f720dc313e91386be35dbff
|
ef66e4d2f0ff31298c9ab93aa2c268ecf89311a6
|
/src/regex/accounts/models.py
|
9f14078df236f84e15839098be9357b07969aecc
|
[] |
no_license
|
Clarity-89/regex-it
|
dd8da6fe39374e1722d32943e4545d0ab95f31b6
|
94485198430bffc6aa14e4ed75dbfddb1c9efef9
|
refs/heads/master
| 2020-11-25T01:45:14.365413
| 2019-11-17T14:43:15
| 2019-11-17T14:43:15
| 228,435,557
| 0
| 0
| null | 2019-12-16T17:06:37
| 2019-12-16T17:06:36
| null |
UTF-8
|
Python
| false
| false
| 2,399
|
py
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(email=email, is_staff=is_staff, is_active=True,
is_superuser=is_superuser, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(_('first name'), max_length=150, blank=True)
last_name = models.CharField(_('last name'), max_length=150, blank=True)
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(
_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin site.')
)
is_active = models.BooleanField(
_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
|
[
"sergei@maykinmedia.nl"
] |
sergei@maykinmedia.nl
|
da40c898f3633e1f5c71c0b5368a096825cc4766
|
dc980bbd2bd6078f1e49004afcc710a89ed12565
|
/test/functional/rpc_createmultisig.py
|
673cce4ed55b3bc456e58c27cfee063395103752
|
[
"MIT"
] |
permissive
|
frankjardel/isocoin
|
c21408225399b33f941c303d0e66e0db264a03c2
|
36e3ce6d64839a37c45b6e17aedfb2238c3a5257
|
refs/heads/master
| 2020-03-28T10:11:59.484529
| 2019-07-17T17:06:11
| 2019-07-17T17:06:11
| 148,090,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import IsocoinTestFramework
import decimal
class RpcCreateMultiSigTest(IsocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
node0, node1, node2 = self.nodes
self.add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in self.add]
self.priv = [node1.dumpprivkey(a) for a in self.add]
self.final = node2.getnewaddress()
def run_test(self):
node0,node1,node2 = self.nodes
# 50 BTC each, rest will be 25 BTC each
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3,5]:
for self.nsigs in [2,3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
def checkbalances(self):
node0,node1,node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149*50 + (height-149-100)*25
assert bal1 == 0
assert bal2 == self.moved
assert bal0+bal1+bal2 == total
def do_multisig(self):
node0,node1,node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses",[])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
|
[
"jardelfrank42@gmail.com"
] |
jardelfrank42@gmail.com
|
4c85376e7b44cc8dd485f66e1eeb81f4d0bc174a
|
3831421b5f4f294bf8f4089b1f617cfc82c2351a
|
/MyInte/SCRIPTS/assit/chwich.py
|
c90f41f1e1228d8bc2129ef4c70da712a840b91d
|
[] |
no_license
|
jesuel51/MyInte
|
6ce31b813c51e30780115f1a5efcafd8d264ae43
|
817a6df61cb77dedf0e4a586bd09906a4b175e96
|
refs/heads/master
| 2020-05-31T01:46:35.983688
| 2019-06-03T18:17:34
| 2019-06-03T18:17:34
| 190,056,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# as we know, the Te in the core region is not well calculated , so we main use the profile shape of ion channel to replace the electron channel .
nj=root['SETTINGS']['PHYSICS']['nj']
if root['SETTINGS']['PHYSICS']['chwich'][0]==1:
pvt_i=root['SETTINGS']['PHYSICS']['chwich'][1]
# num=int(pvt_i/0.02)+1
num=int(pvt_i*(nj-1))+1
diff_Tie=root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tiin'][num]-root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tein'][num]
root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tein'][0:num]=root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tiin'][0:num]-diff_Tie
root['INPUTS']['ONETWOInput']['inone_ss']['namelis1']['tein'][0:num]=root['INPUTS']['ONETWOInput']['inone_ss']['namelis1']['tiin'][0:num]-diff_Tie
|
[
"1018910084@qq.com"
] |
1018910084@qq.com
|
7df6658e806cae939fedc20b87c13b405766e5f2
|
ae76a9296e91c50f7394c22281aa20e82315d9fa
|
/transformer/translate.py
|
33020e7efbc6ec514288094d034f6f892742adae
|
[] |
no_license
|
PeterDing/mxnet-learning
|
3cdd56162417027e07d0a3e56001018038b3fafe
|
b3cab40e650d20ee183ede63fc8c6c4ea0d8d582
|
refs/heads/master
| 2022-10-31T18:27:05.035498
| 2018-06-20T02:59:21
| 2018-06-20T02:59:21
| 129,977,891
| 1
| 1
| null | 2022-10-27T16:49:44
| 2018-04-17T23:49:33
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
from mxnet import nd
from data import make_src_mask, make_trg_mask
MAX_LEN = 20
def translate(net, src, trg_vocab, s_pad, t_bos, t_eos, t_pad):
src_mask = make_src_mask(src, s_pad)
trg_list = [t_bos]
for _ in range(MAX_LEN):
trg = nd.array([trg_vocab.to_indices(trg_list)])
trg_mask = make_trg_mask(trg, t_pad)
pred = net(src, trg, src_mask, trg_mask)
out = pred.argmax(-1)
next_idx = out[-1][0].asscalar()
if next_idx == t_eos:
break
trg_list.append(int(next_idx))
return trg_vocab.to_tokens(trg_list)
|
[
"dfhayst@gmail.com"
] |
dfhayst@gmail.com
|
da95f538ecc178c10e81d9ef871468fc6b733155
|
3700c716293b010b68f05f01099fa426449ddc42
|
/xirl/xirl/trainers/tcc.py
|
c18e3ab5171a9a099de96252834100a76ceb8337
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
riven314/google-research
|
31f3ed2635e092bc0f8c89b535e123ea731946d3
|
dd971143b85879fcac7c983b8d2b30d145e4cbdc
|
refs/heads/master
| 2023-06-14T02:03:12.475884
| 2021-07-10T18:47:33
| 2021-07-10T18:47:33
| 384,185,293
| 0
| 0
|
Apache-2.0
| 2021-07-15T03:16:47
| 2021-07-08T16:25:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TCC trainer."""
from typing import Dict, List, Union
from ml_collections import ConfigDict
import torch
from xirl.losses import compute_tcc_loss
from xirl.trainers.base import Trainer
BatchType = Dict[str, Union[torch.Tensor, List[str]]]
class TCCTrainer(Trainer):
"""A trainer for Temporal Cycle Consistency Learning [1].
References:
[1]: arxiv.org/abs/1904.07846
"""
def __init__(
self,
model,
optimizer,
device,
config,
):
super().__init__(model, optimizer, device, config)
self.normalize_embeddings = config.MODEL.NORMALIZE_EMBEDDINGS
self.stochastic_matching = config.LOSS.TCC.STOCHASTIC_MATCHING
self.loss_type = config.LOSS.TCC.LOSS_TYPE
self.similarity_type = config.LOSS.TCC.SIMILARITY_TYPE
self.cycle_length = config.LOSS.TCC.CYCLE_LENGTH
self.temperature = config.LOSS.TCC.SOFTMAX_TEMPERATURE
self.label_smoothing = config.LOSS.TCC.LABEL_SMOOTHING
self.variance_lambda = config.LOSS.TCC.VARIANCE_LAMBDA
self.huber_delta = config.LOSS.TCC.HUBER_DELTA
self.normalize_indices = config.LOSS.TCC.NORMALIZE_INDICES
def compute_loss(
self,
embs,
batch,
):
steps = batch["frame_idxs"].to(self._device)
seq_lens = batch["video_len"].to(self._device)
# Dynamically determine the number of cycles if using stochastic
# matching.
batch_size, num_cc_frames = embs.shape[:2]
num_cycles = int(batch_size * num_cc_frames)
return compute_tcc_loss(
embs=embs,
idxs=steps,
seq_lens=seq_lens,
stochastic_matching=self.stochastic_matching,
normalize_embeddings=self.normalize_embeddings,
loss_type=self.loss_type,
similarity_type=self.similarity_type,
num_cycles=num_cycles,
cycle_length=self.cycle_length,
temperature=self.temperature,
label_smoothing=self.label_smoothing,
variance_lambda=self.variance_lambda,
huber_delta=self.huber_delta,
normalize_indices=self.normalize_indices,
)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
305e474130d764368ac102f25f9dd530a1cb4a02
|
717558d6a075163294054bd5aea4ef3234df23ad
|
/models_nonconvex_simple/fuel.py
|
ca85bfc10f4aa8dbb9960f7e1dbcefd48cd8cb23
|
[
"MIT"
] |
permissive
|
RomeoV/pyomo-MINLP-benchmarking
|
1270766397fbc4e57ea1bd0c2285fb7edf64062d
|
996d2c8ee1cb9b03fe00c6246f52294337d8b92c
|
refs/heads/master
| 2021-07-11T17:54:25.284712
| 2020-08-13T23:43:14
| 2020-08-13T23:43:14
| 185,664,992
| 8
| 1
|
MIT
| 2019-05-10T19:07:05
| 2019-05-08T19:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
# MINLP written by GAMS Convert at 08/13/20 17:37:43
#
# Equation counts
# Total E G L N X C B
# 16 7 6 3 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 16 13 3 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 39 33 6 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=100)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=100)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=100)
m.x7 = Var(within=Reals,bounds=(50,700),initialize=50)
m.x8 = Var(within=Reals,bounds=(50,700),initialize=50)
m.x9 = Var(within=Reals,bounds=(50,700),initialize=50)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,4000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,4000),initialize=0)
m.x15 = Var(within=Reals,bounds=(2000,4000),initialize=2000)
m.obj = Objective(expr=0.0025*m.x7**2 + 6*m.x7 + 0.0025*m.x8**2 + 6*m.x8 + 0.0025*m.x9**2 + 6*m.x9
+ 900, sense=minimize)
m.c2 = Constraint(expr= - 100*m.b1 + m.x4 >= 0)
m.c3 = Constraint(expr= - 100*m.b2 + m.x5 >= 0)
m.c4 = Constraint(expr= - 100*m.b3 + m.x6 >= 0)
m.c5 = Constraint(expr= - 500*m.b1 + m.x4 <= 0)
m.c6 = Constraint(expr= - 500*m.b2 + m.x5 <= 0)
m.c7 = Constraint(expr= - 500*m.b3 + m.x6 <= 0)
m.c8 = Constraint(expr= m.x10 + m.x13 == 3500)
m.c9 = Constraint(expr= m.x11 - m.x13 + m.x14 == 500)
m.c10 = Constraint(expr= m.x12 - m.x14 + m.x15 == 500)
m.c11 = Constraint(expr= m.x4 + m.x7 >= 400)
m.c12 = Constraint(expr= m.x5 + m.x8 >= 900)
m.c13 = Constraint(expr= m.x6 + m.x9 >= 700)
m.c14 = Constraint(expr=-(0.005*m.x4**2 + m.x4) - 50*m.b1 + m.x10 == 0)
m.c15 = Constraint(expr=-(0.005*m.x5**2 + m.x5) - 50*m.b2 + m.x11 == 0)
m.c16 = Constraint(expr=-(0.005*m.x6**2 + m.x6) - 50*m.b3 + m.x12 == 0)
|
[
"peng_zedong@126.com"
] |
peng_zedong@126.com
|
1f026430b482740e673cdd71c0097e75438afb07
|
51353dc8d2dce1c392d3a39da16a1644692de8d5
|
/dictionaryAndSets/join_2.py
|
220edaa509e1e2b38d09d0d40a595016bf99f57f
|
[] |
no_license
|
aiqingr/python-lesson
|
64cf21b9f945e92a79414069f64f84d47aa7ee66
|
21aff567f6340ae74a923a82648e25bdaac554a7
|
refs/heads/master
| 2020-05-23T04:16:06.557785
| 2019-11-12T02:50:51
| 2019-11-12T02:50:51
| 58,403,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
# Modify the program so that the exits is a dictionary rather than a list,
# with the keys being the numbers of he locations and the values being
# dictionaries holding the exits (as they do at present). No change should
# be needed to the actual code.
#
# Once that is working, create another dictionary that contains words that
# players may use. These words will be the keys, and their values will be
# a single letter that the program can use to determine which way to go.
locations = {0: "you are sitting in froint of a computer learning python",
1: "You are sitting at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a small house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0}}
vocabulary = {"QUIT": "Q",
"NORTH": "N",
"SOUTH": "S",
"EAST": "E",
"WEST": "W"}
# print(locations[0].split())
# print(locations[3].split())
loc = 1
while True:
availableExits = ", ".join(exits[loc].keys())
print(locations[loc])
if loc == 0:
break
direction = input("Available exists are " + availableExits).upper()
print()
# Parse the user input, using our vocabulary dictionary if necessary
if len(direction) > 1: # More than one letter
words = direction.split()
for word in words: # Does it contain a word we know
if word in vocabulary:
direction = vocabulary[word]
if direction in exits[loc]:
loc = exits[loc][direction]
else:
print("You cannot go in that direction")
|
[
"ntyaiqingr@gmail.com"
] |
ntyaiqingr@gmail.com
|
8491e029f5851120fca824478d2bb40ddbbd74a3
|
b2e5677ecd4c2c0bb2d091d3371d2815dd36ffd5
|
/examples/topicmod_lda.py
|
5213c25548fa03bacd15e40f7869f26a2697836a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
WZBSocialScienceCenter/tmtoolkit
|
f573e3b4db7975106b63a108ed146e43b443c6d6
|
02990865ee896625d5cf540bf2b0dbc159bedf38
|
refs/heads/master
| 2023-05-10T23:26:37.265300
| 2023-05-03T09:35:04
| 2023-05-03T09:35:04
| 109,812,180
| 202
| 33
|
Apache-2.0
| 2022-01-10T12:17:43
| 2017-11-07T09:11:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,671
|
py
|
"""
An example for topic modeling with LDA with focus on the new plotting functions in `tmtoolkit.corpus.visualize` and
in `tmtoolkit.topicmod.visualize`.
This examples requires that you have installed tmtoolkit with the recommended set of packages plus "lda" and have
installed an English language model for spaCy:
pip install -U "tmtoolkit[recommended,lda]"
python -m tmtoolkit setup en
For more information, see the installation instructions: https://tmtoolkit.readthedocs.io/en/latest/install.html
.. codeauthor:: Markus Konrad <markus.konrad@wzb.eu>
"""
import os.path
import matplotlib.pyplot as plt
from tmtoolkit.utils import enable_logging, pickle_data, unpickle_file
from tmtoolkit.corpus import Corpus, lemmatize, to_lowercase, remove_punctuation, remove_common_tokens, \
remove_uncommon_tokens, filter_clean_tokens, print_summary, remove_documents_by_length, dtm, \
corpus_retokenize, save_corpus_to_picklefile, load_corpus_from_picklefile
from tmtoolkit.corpus.visualize import plot_doc_lengths_hist, plot_doc_frequencies_hist, plot_vocab_counts_hist, \
plot_ranked_vocab_counts, plot_num_sents_hist, plot_sent_lengths_hist, plot_num_sents_vs_sent_length, \
plot_token_lengths_hist
from tmtoolkit.topicmod.tm_lda import evaluate_topic_models # we're using lda for topic modeling
from tmtoolkit.topicmod.evaluate import results_by_parameter
from tmtoolkit.topicmod.model_io import print_ldamodel_topic_words
from tmtoolkit.topicmod.visualize import plot_eval_results, plot_topic_word_ranked_prob, plot_doc_topic_ranked_prob
#%%
enable_logging()
#%% loading the sample corpus (English news articles)
corp_picklefile = 'data/topicmod_lda_corpus.pickle'
if os.path.exists(corp_picklefile):
docs = load_corpus_from_picklefile(corp_picklefile)
else:
docs = Corpus.from_builtin_corpus('en-NewsArticles', max_workers=1.0)
save_corpus_to_picklefile(docs, corp_picklefile)
print_summary(docs)
#%% plot some corpus summary statistics
# you can copy those and also do the plotting also after corpus transformations in the next cell
# this shows you nicely how the transformations change the distribution of words in the corpus
fig, ax = plt.subplots()
plot_doc_lengths_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_vocab_counts_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_ranked_vocab_counts(fig, ax, docs, zipf=True)
plt.show()
fig, ax = plt.subplots()
plot_doc_frequencies_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_num_sents_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_sent_lengths_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_num_sents_vs_sent_length(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_token_lengths_hist(fig, ax, docs)
plt.show()
#%% apply preprocessing pipeline
corp_preproc_picklefile = 'data/topicmod_lda_corpus_preprocessed.pickle'
if os.path.exists(corp_preproc_picklefile):
docs = load_corpus_from_picklefile(corp_preproc_picklefile)
else:
remove_punctuation(docs)
corpus_retokenize(docs)
lemmatize(docs)
to_lowercase(docs)
filter_clean_tokens(docs, remove_numbers=True)
remove_common_tokens(docs, df_threshold=0.90)
remove_uncommon_tokens(docs, df_threshold=0.05)
remove_documents_by_length(docs, '<', 30)
save_corpus_to_picklefile(docs, corp_preproc_picklefile)
print_summary(docs)
#%% generating the document-term matrix
dtm_picklefile = 'data/topicmod_lda_dtm.pickle'
if os.path.exists(dtm_picklefile):
bow_mat, doc_labels, vocab = unpickle_file(dtm_picklefile)
else:
bow_mat, doc_labels, vocab = dtm(docs, return_doc_labels=True, return_vocab=True)
pickle_data((bow_mat, doc_labels, vocab), dtm_picklefile)
#%% running the evaluation
eval_res_picklefile = 'data/topicmod_lda_eval_res.pickle'
if os.path.exists(dtm_picklefile):
eval_results = unpickle_file(eval_res_picklefile)
else:
const_params = {
'n_iter': 1500,
'eta': 0.3,
'random_state': 20220105 # to make results reproducible
}
var_params = [{'n_topics': k, 'alpha': 10.0/k}
for k in list(range(20, 101, 20)) + [125, 150, 175, 200, 250, 300]]
metrics = ['cao_juan_2009', 'arun_2010', 'coherence_mimno_2011', 'griffiths_2004']
eval_results = evaluate_topic_models(bow_mat,
varying_parameters=var_params,
constant_parameters=const_params,
return_models=True,
metric=metrics)
pickle_data(eval_results, eval_res_picklefile)
#%% plotting evaluation results
eval_by_topics = results_by_parameter(eval_results, 'n_topics')
plot_eval_results(eval_by_topics)
plt.show()
#%% selecting the model and printing the topics' most likely words
selected_model = dict(eval_by_topics)[200]['model']
print_ldamodel_topic_words(selected_model.topic_word_, vocab=vocab)
#%% investigating, how many "top words" sufficiently describe a topic
fig, ax = plt.subplots()
plot_topic_word_ranked_prob(fig, ax, selected_model.topic_word_, n=40, log_scale=False,
highlight=[4, 12, 32], alpha=0.025)
plt.show()
# -> about 5 to 10 words aggregate most of the probability per topic
#%% investigating, how many "top topics" sufficiently describe a document
fig, ax = plt.subplots()
plot_doc_topic_ranked_prob(fig, ax, selected_model.doc_topic_, n=40, log_scale=False, highlight=list(range(4)),
alpha=0.003)
plt.show()
# -> about 10 to 15 topics aggregate most of the probability per document
|
[
"markus.konrad@wzb.eu"
] |
markus.konrad@wzb.eu
|
5954ef4325137dc49b30e115d82e1d71e116ab1b
|
353f4a3ff46a3c6fbbb866598b8561edf6e71562
|
/server/dvaapp/migrations/0021_remove_video_query.py
|
7ff21bcf1a3f150060313affe1eddd34c09f0f06
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
MrGoogol/DeepVideoAnalytics
|
ab0f72179c62fd1b0a5bddea4a9b3970678790bd
|
e25e4bf6670fabd62fe86ad68ad03a854e22aed6
|
refs/heads/master
| 2021-05-14T12:25:48.355995
| 2018-01-05T08:59:24
| 2018-01-05T08:59:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-01 17:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dvaapp', '0020_retriever_last_built'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='query',
),
]
|
[
"akshayubhat@gmail.com"
] |
akshayubhat@gmail.com
|
458d28e1f991f3dc25217deaa2b0ce53104ef827
|
b0e738d074574af920e63f453d3628f69ce1321f
|
/watch/migrations/0006_auto_20180318_0104.py
|
7f7ea38bf96d90f7be5917e405eec50cd11b4a33
|
[] |
no_license
|
vincentmuya/neighborhood-watch
|
b23d56a9c92cefc6b4da124f337b776a9cc0ada7
|
3744d6bd5f5e63bb6b47f2c34f728e05d7bc2362
|
refs/heads/master
| 2021-04-09T14:20:25.588959
| 2018-03-20T12:04:45
| 2018-03-20T12:04:45
| 125,489,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-17 22:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('watch', '0005_post_title'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='title',
),
migrations.AlterField(
model_name='post',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"vincentmuya13@gmail.com"
] |
vincentmuya13@gmail.com
|
b1f778f8944c11ea9d756134cee1e718c3326499
|
f08bee97371d28256f5f669979ea5e8e88192be0
|
/mcm_scripts/get_requests.py
|
e14e9bba40485f312f214a85ce45d67e82c273d4
|
[] |
no_license
|
CMSSNU/GenGen
|
c6cd08aad9f6929860351f8bdde19250480d59e0
|
6cec337a19748468cc920412d9cfbfc56ce61b55
|
refs/heads/master
| 2020-04-09T17:13:09.850706
| 2019-09-20T06:57:06
| 2019-09-20T06:57:06
| 160,474,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
import sys
sys.path.append('/afs/cern.ch/cms/PPD/PdmV/tools/McM/')
from rest import McM
from json import dumps
mcm = McM(dev=False)
#mcm = McM(dev=True)
# Example to get ALL requesst which are member of a given campaign and are submitted
# It uses a generic search for specified columns: query='status=submitted'
# Queries can be combined: query='status=submitted&member_of_campaign=Summer12'
#campaign_requests = mcm.get('requests', query='member_of_campaign=Summer12&status=submitted')
#for request in campaign_requests:
# print(request['prepid'])
# Example to retrieve single request dictionary
# More methods are here:
# https://cms-pdmv.cern.ch/mcm/restapi/requests/
#single_request_prepid = 'TOP-Summer12-00368'
#single_request = mcm.get('requests', single_request_prepid, method='get')
#print('Single request "%s":\n%s' % (single_request_prepid, dumps(single_request, indent=4)))
# Example how to get multiple requests using range
requests_query = """
HIG-RunIIFall17wmLHEGS-02442 -> HIG-RunIIFall17wmLHEGS-02477
"""
range_of_requests = mcm.get_range_of_requests(requests_query)
print('Found %s requests' % (len(range_of_requests)))
for request in range_of_requests:
print(request['prepid'])
print(request['time_event'])
|
[
"d4space@gmail.com"
] |
d4space@gmail.com
|
6c63500bf53bf58a8c653ea3488de0da13613156
|
5a01497e7c29e2488b6a4cb0478405239375eb66
|
/apetools/pipes/pipeenums.py
|
7c0eb61e95b8a049b278cef0d146e8154fd7cb22
|
[
"Apache-2.0"
] |
permissive
|
russell-n/oldape
|
8b4d9e996181dc1c7175f72d75c6193443da591b
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
refs/heads/master
| 2021-05-30T20:02:18.895922
| 2016-03-27T04:38:18
| 2016-03-27T04:38:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
class PipeEnum(object):
"""
A holder of constants for the StoragePipe
"""
__slots__ = ()
start = "start"
pipe = "pipe"
sink = "sink"
# end class StoragePipeEnum
|
[
"necromuralist@google.com"
] |
necromuralist@google.com
|
27b9463ca0f70caf4d98d28f2dfba0b380af9223
|
dcee93ce4b9fcf0a7ffa6ea658c403ed1fc84043
|
/Meteor/src/thirdManage/migrations/0003_auto_20170807_1623.py
|
e0d6ee7dab4b082d10e23368f50d65c656128424
|
[] |
no_license
|
henryliuom/drv-study
|
3eed96eef58138003371011034562a15ebc16b79
|
dcab011bce0f34bcf50f8ab5601eb859a5a07cb7
|
refs/heads/master
| 2021-06-06T23:49:20.869907
| 2020-07-30T09:06:48
| 2020-07-30T09:06:48
| 95,858,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-07 08:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('thirdManage', '0002_auto_20170807_1622'),
]
operations = [
migrations.AlterModelTable(
name='paydnses',
table='classmate_paydnses',
),
]
|
[
"henry@techdog.com"
] |
henry@techdog.com
|
5de5f713afc9e6239000c77a67d43371491966bf
|
839f9c64c0c2c4178f2a8a0166fa4a0a3f649aac
|
/tilepack/check_toi.py
|
46b3abf0ccdabc0f276edefb5d094268a0122071
|
[
"MIT"
] |
permissive
|
anilkunchalaece/tilepacks
|
1ab70a0f8ba7886e5b29fbcb761072b9d4e61b0e
|
86008e85826e5b1f97ef22ba92152b40353557a7
|
refs/heads/master
| 2020-06-04T07:44:09.179732
| 2019-06-14T12:30:03
| 2019-06-14T12:30:03
| 191,929,548
| 0
| 0
|
MIT
| 2019-06-14T11:09:34
| 2019-06-14T11:09:34
| null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
import mercantile
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('min_lon',
type=float,
help='Bounding box minimum longitude/left')
parser.add_argument('min_lat',
type=float,
help='Bounding box minimum latitude/bottom')
parser.add_argument('max_lon',
type=float,
help='Bounding box maximum longitude/right')
parser.add_argument('max_lat',
type=float,
help='Bounding box maximum latitude/top')
parser.add_argument('min_zoom',
type=int,
help='The minimum zoom level to include')
parser.add_argument('max_zoom',
type=int,
help='The maximum zoom level to include')
args = parser.parse_args()
print("zoom\tmissing from toi\tin aoi")
for zoom in range(args.min_zoom, args.max_zoom + 1):
tiles_in_aoi = set([
'{}/{}/{}'.format(z, x, y)
for x, y, z in mercantile.tiles(
args.min_lon, args.min_lat, args.max_lon, args.max_lat,
[zoom]
)
])
with open('toi.z{}.txt'.format(zoom), 'r') as f:
tiles_in_toi = set([
l.strip()
for l in f.readlines()
])
print("{zoom:2d}\t{tiles_not_in_toi}\t{tiles_in_aoi}".format(
zoom=zoom,
tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi),
tiles_in_aoi=len(tiles_in_aoi),
))
if __name__ == '__main__':
main()
|
[
"ian.dees@gmail.com"
] |
ian.dees@gmail.com
|
24449c28191a51d1f60c35022f2c37ed430095fa
|
468e75c8b64b137621bcfb523f741b2cb791bf36
|
/GUI/component/navigation_bar.py
|
74855fdcd51b5dc1b3431d4efab8e5744b713ade
|
[
"MIT"
] |
permissive
|
acc-cosc-1336/cosc-1336-fall-2017-guilgrahl
|
dd0055b68c6c56041be2b33e56b13bd3a6a9cdc1
|
c1b6a8abf779db9e5242e84c176bb5e8fb1d97a6
|
refs/heads/master
| 2021-08-28T19:03:37.364375
| 2017-12-06T03:25:24
| 2017-12-06T03:25:24
| 103,597,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
from tkinter import LEFT
from tkinter.ttk import Frame, Button
class NavigationBar:
def __init__(self, parent, data_source):
Frame.__init__(self, parent)
self.data_source = data_source
self.init_form()
def init_form(self):
nextButton = Button(self, text="Next ", command=self.on_next)
updateButton = Button(self, text="Update ", command=self.on_update)
deleteButton = Button(self, text="Delete ", command=self.on_delete)
previousButton = Button(self, text="Previous ", command=self.on_previous)
searchButton = Button(self, text="Search ", command=self.master.on_search)
nextButton.pack(side=LEFT)
previousButton.pack(side=LEFT)
updateButton.pack(side=LEFT)
deleteButton.pack(side=LEFT)
searchButton.pack(side=LEFT)
def on_next(self):
self.data_source.next_record()
def on_update(self):
self.data_source.request_update()
def on_delete(self):
pass
def on_previous(self):
self.data_source.previous_record()
|
[
"noreply@github.com"
] |
acc-cosc-1336.noreply@github.com
|
9dc398861c6fc13e1e5d99fcf69fce298f153848
|
2670452749c6299386a33391f9fb5014db0203ec
|
/meraki/aio/api/mg_uplink_settings.py
|
313b63a05262ac239c8965b44326f42618827427
|
[
"MIT"
] |
permissive
|
npappin-wsu/dashboard-api-python
|
f9d3fc682b517e6bac437cd54101afd09b653274
|
5aedfc740f676fbf34e5f79269e8ece73421e3da
|
refs/heads/master
| 2020-06-28T17:49:44.911294
| 2020-04-14T04:27:38
| 2020-04-14T04:27:38
| 255,509,439
| 0
| 0
|
MIT
| 2020-04-14T04:24:55
| 2020-04-14T04:24:54
| null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
class AsyncMGUplinkSettings:
def __init__(self, session):
super().__init__()
self._session = session
async def getNetworkCellularGatewaySettingsUplink(self, networkId: str):
"""
**Returns the uplink settings for your MG network.**
https://api.meraki.com/api_docs#returns-the-uplink-settings-for-your-mg-network
- networkId (string)
"""
metadata = {
'tags': ['MG uplink settings'],
'operation': 'getNetworkCellularGatewaySettingsUplink',
}
resource = f'/networks/{networkId}/cellularGateway/settings/uplink'
return await self._session.get(metadata, resource)
async def updateNetworkCellularGatewaySettingsUplink(self, networkId: str, **kwargs):
"""
**Updates the uplink settings for your MG network.**
https://api.meraki.com/api_docs#updates-the-uplink-settings-for-your-mg-network
- networkId (string)
- bandwidthLimits (object): The bandwidth settings for the 'cellular' uplink
"""
kwargs.update(locals())
metadata = {
'tags': ['MG uplink settings'],
'operation': 'updateNetworkCellularGatewaySettingsUplink',
}
resource = f'/networks/{networkId}/cellularGateway/settings/uplink'
body_params = ['bandwidthLimits']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
90cd41d7f119122d566e22cb587ce4a3c2472432
|
3cd4902b67de144d8e6f36335e125d0548d8cf97
|
/src/data/Places365Data.py
|
7f17280172b9488ad0942cad3e68ec562241adf9
|
[
"MIT"
] |
permissive
|
stefantaubert/imageclef-lifelog-2019
|
5d201c2a28f15f608b9b58b94ab2ecddb5201205
|
ad49dc79db98a163c5bc282fb179c0f7730546b3
|
refs/heads/master
| 2022-10-06T12:42:30.011610
| 2022-08-29T13:35:09
| 2022-08-29T13:35:09
| 196,553,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
from src.io.ReadingContext import ReadingContext
from src.data.DataBase import DataBase
from src.globals import vc_cat_cols
from src.globals import vc_cat_score_cols
name_places = "places365"
class Places365Data(DataBase):
def __init__(self, ctx: ReadingContext):
return super().__init__(ctx)
def get_name(self):
return name_places
def __unify__(self, word):
cat = word.replace('_', ' ').replace('/', ' ')
#cat = cat.replace("outdoor", "").replace("indoor", "").strip()
return cat
def __get_data_dict__(self):
return self.ctx.vc_dict()
def __get_label_columns__(self):
return vc_cat_cols
def __get_score_columns__(self):
return vc_cat_score_cols
|
[
"stefan.taubert@posteo.de"
] |
stefan.taubert@posteo.de
|
d147631fbb07f6f190098326828eba033083da6b
|
7c12b6487874b3db564e5c900be55fd9e050f765
|
/hilder_deal_price/start_youda.py
|
6f3fcbd388bcabfa3c0c14dbf6a235f36f6dbcff
|
[] |
no_license
|
pjkui/githubproject
|
9f1ea63bb12903d8a72e0ecb4aa6c8c02a7a45f8
|
808cb78fc3887f35bf838d77d62308fce9e6aa5d
|
refs/heads/master
| 2022-10-06T00:11:34.493886
| 2019-10-07T13:09:18
| 2019-10-07T13:09:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
from youda_res.youda import Record
from dbfread import DBF
from youda_res.youda_match_insert import YouData
if __name__ == '__main__':
# """
# 需要把cjxx_3.DBF文件放到相同路径下
# """
# table = DBF('cjxx_3.DBF', recfactory=Record, ignore_missing_memofile=True)
# for record in table:
# record.insert()
youda = YouData('友达')
# """
# 地址去除室号,小区名切分成list
# """
# youda.format()
# """
# 匹配城市区域小区名
# """
# youda.match()
# """
# 入43成交库
# """
youda.insert_43()
|
[
"1735429225@qq.com"
] |
1735429225@qq.com
|
1b94706b73f45a94cc41deb5aa795a074bcafd09
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/127/usersdata/172/35097/submittedfiles/ex11.py
|
ecaecb512a8ce7a4951ad5382e645ab5e88c079d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# -*- coding: utf-8 -*-
d1=int(input('Digite o dia da data 1'))
m1=int(input('Digite o mês da data 1'))
a1=int(input('Digite o ano da data 1'))
d2=int(input('Digite o dia da data 2'))
m2=int(input('Digite o mês da data 2'))
a2=int(input('Digite o ano da data 2'))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.