blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a702e4b80caf7a516d063910b16df3193a3b12f8
|
c7c1a7030ce94f9678fbb9c9e8469a9726592a0a
|
/server.py
|
8f43ad1cccafa8117dc18e3aa062fcf936d5ae0e
|
[
"WTFPL"
] |
permissive
|
giogodo/hydrus
|
a7e5d8a6b256109e914216d18efa2e4ed341ccf7
|
836ae13e1f80b02e063dac9829faaec0e5c89f89
|
refs/heads/master
| 2020-04-17T18:52:00.309884
| 2019-01-16T22:40:53
| 2019-01-16T22:40:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,392
|
py
|
#!/usr/bin/env python3
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
try:
from include import HydrusPy2To3
HydrusPy2To3.do_2to3_test()
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
from include import HydrusExceptions
from include import HydrusConstants as HC
from include import HydrusData
from include import HydrusPaths
import os
import sys
import time
from include import ServerController
import threading
from twisted.internet import reactor
from include import HydrusGlobals as HG
from include import HydrusLogger
import traceback
#
import argparse
argparser = argparse.ArgumentParser( description = 'hydrus network server' )
argparser.add_argument( 'action', default = 'start', nargs = '?', choices = [ 'start', 'stop', 'restart' ], help = 'either start this server (default), or stop an existing server, or both' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--no_daemons', action='store_true', help = 'run without background daemons' )
argparser.add_argument( '--no_wal', action='store_true', help = 'run without WAL db journalling' )
result = argparser.parse_args()
action = result.action
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWritable( db_dir ) or HC.RUNNING_FROM_OSX_APP:
db_dir = HC.USERPATH_DB_DIR
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path ' + db_dir + ' exists! Check the location is correct and that you have permission to write to it!' )
no_daemons = result.no_daemons
no_wal = result.no_wal
#
action = ServerController.ProcessStartingAction( db_dir, action )
with HydrusLogger.HydrusLogger( db_dir, 'server' ) as logger:
try:
if action in ( 'stop', 'restart' ):
ServerController.ShutdownSiblingInstance( db_dir )
if action in ( 'start', 'restart' ):
HydrusData.Print( 'Initialising controller\u2026' )
threading.Thread( target = reactor.run, kwargs = { 'installSignalHandlers' : 0 } ).start()
controller = ServerController.Controller( db_dir, no_daemons, no_wal )
controller.Run()
except HydrusExceptions.PermissionException as e:
error = str( e )
HydrusData.Print( error )
except:
error = traceback.format_exc()
HydrusData.Print( 'Hydrus server failed' )
HydrusData.Print( traceback.format_exc() )
finally:
HG.view_shutdown = True
HG.model_shutdown = True
try: controller.pubimmediate( 'wake_daemons' )
except: pass
reactor.callFromThread( reactor.stop )
except HydrusExceptions.PermissionException as e:
HydrusData.Print( e )
except Exception as e:
import traceback
import os
print( traceback.format_exc() )
if 'db_dir' in locals() and os.path.exists( db_dir ):
dest_path = os.path.join( db_dir, 'crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( traceback.format_exc() )
print( 'Critical error occurred! Details written to crash.log!' )
|
[
"hydrus.admin@gmail.com"
] |
hydrus.admin@gmail.com
|
323dc9dfafaaecd399097690749c647753a3ee63
|
2699b6508febc0fddde5520c5498000746eee775
|
/metadata/ns_swe20.py
|
6c5a30471dad8351f5968fbd06f135d5c361138e
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
DREAM-ODA-OS/tools
|
e1c71ddb65c255dc291a1e10778b461f32e1b017
|
66090fc9c39b35b4ef439d4dfe26ac1349d9c5f2
|
refs/heads/master
| 2021-01-19T02:13:59.489229
| 2017-11-18T10:27:25
| 2017-11-18T10:27:25
| 18,381,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
#------------------------------------------------------------------------------
#
# SWE v2.0 namespace
#
# Project: XML Metadata Handling
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from lxml.builder import ElementMaker
from xml_utils import nn
#------------------------------------------------------------------------------
# namespace
NS = "http://www.opengis.net/swe/2.0"
NS_MAP = {"swe": NS}
#------------------------------------------------------------------------------
# element maker
E = ElementMaker(namespace=NS, nsmap=NS_MAP)
#------------------------------------------------------------------------------
# predefined fully qualified names
# attributes
# elements
DataRecord = nn(NS, 'DataRecord')
AllowedValues = nn(NS, 'AllowedValues')
NilValues = nn(NS, 'NilValues')
field = nn(NS, 'field')
Quantity = nn(NS, 'Quantity')
description = nn(NS, 'description')
nilValues = nn(NS, 'nilValues')
nilValue = nn(NS, 'nilValue')
uom = nn(NS, 'uom')
constraint = nn(NS, 'constraint')
interval = nn(NS, 'interval')
significantFigures = nn(NS, 'significantFigures')
|
[
"martin.paces@eox.at"
] |
martin.paces@eox.at
|
7e674560d2a9be8aff0f294dad3f242b65276e81
|
fd40d6375ddae5c8613004a411341f0c984e80d5
|
/src/visions/core/implementations/types/visions_object.py
|
4ca3062561d0563c83583f542943063a6f1c8e55
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
ieaves/tenzing
|
93c3353e62621c90adefc5a174a2dcde9aacbc46
|
92d39c1c3a5633d8074e0ffe8c2687c465aebbc8
|
refs/heads/master
| 2020-04-25T07:14:31.388737
| 2020-01-07T02:51:13
| 2020-01-07T02:51:13
| 172,608,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
import pandas.api.types as pdt
import pandas as pd
from typing import Sequence
from visions.core.model.relations import (
IdentityRelation,
InferenceRelation,
TypeRelation,
)
from visions.core.model.type import VisionsBaseType
def _get_relations() -> Sequence[TypeRelation]:
from visions.core.implementations.types import visions_generic
relations = [IdentityRelation(visions_object, visions_generic)]
return relations
class visions_object(VisionsBaseType):
"""**Object** implementation of :class:`visions.core.model.type.VisionsBaseType`.
Examples:
>>> x = pd.Series(['a', 1, np.nan])
>>> x in visions_object
True
"""
@classmethod
def get_relations(cls) -> Sequence[TypeRelation]:
return _get_relations()
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return pdt.is_object_dtype(series)
|
[
"ian.k.eaves@gmail.com"
] |
ian.k.eaves@gmail.com
|
35a8c74de378ac521d1c86e29ff4fb0fe804eba7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03829/s130253347.py
|
369c68d33d9d4cee44e80bfa246abbd4af28f0be
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
n,a,b=map(int,input().split())
xs=list(map(int,input().split()))
l=[xs[i+1]-xs[i] for i in range(n-1)]
ans=[a*k if a*k < b else b for k in l]
print(sum(ans))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
66b046fca4348342f44b6fccb8ee072e0a4f1306
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03696/s509896752.py
|
d2b5b083923b99b338db190c7478f534cf557b8f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
n = int(input())
s = input()
opens = 0
l = 0
for i in range(n):
if s[i] == "(":
opens += 1
else:
opens -= 1
if opens < 0:
l += 1
opens = 0
print("("*l + s + ")"*opens)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7824f1a34399e412abf45a090606e6f9607837a1
|
0eaab1305900d8e70dd746d676126d1667d9c314
|
/scripts/cached_credentials.py
|
d2360fae08a54472419d0ad570e1d841faa8c3e2
|
[
"Apache-2.0"
] |
permissive
|
scudette/winreg-kb
|
89ffc7f63c2630b266bed41d1c66dff64fd1d32d
|
f81b8bcaef8365d0c52bf3c87af2bccb4274bece
|
refs/heads/master
| 2020-06-08T20:51:37.427445
| 2019-06-14T06:47:16
| 2019-06-14T06:47:16
| 193,304,780
| 1
| 0
| null | 2019-06-23T04:07:02
| 2019-06-23T04:07:02
| null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract cached credentials."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import sys
from dfvfs.helpers import command_line as dfvfs_command_line
from winregrc import cached_credentials
from winregrc import collector
from winregrc import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts the cached credentials from a SECURITY Registry file.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False, help=(
'enable debug output.'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH', default=None, help=(
'path of the volume containing C:\\Windows, the filename of '
'a storage media image containing the C:\\Windows directory, '
'or the path of a SECURITY Registry file.'))
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutOutputWriter()
if not output_writer.Open():
print('Unable to open output writer.')
print('')
return False
volume_scanner_mediator = dfvfs_command_line.CLIVolumeScannerMediator()
registry_collector = collector.WindowsRegistryCollector(
mediator=volume_scanner_mediator)
if not registry_collector.ScanForWindowsVolume(options.source):
print('Unable to retrieve the Windows Registry from: {0:s}.'.format(
options.source))
print('')
return False
if registry_collector.IsSingleFileRegistry():
print('Both SECURITY and SYSYEM Registry files are required.')
print('')
return False
# TODO: map collector to available Registry keys.
collector_object = cached_credentials.CachedCredentialsKeyCollector(
debug=options.debug, output_writer=output_writer)
result = collector_object.Collect(registry_collector.registry)
if not result:
print('No Cache key found.')
else:
output_writer.WriteText('\n')
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
e1be6cb81e9d66c660fafd0db382193191ef9e47
|
6b2db6fca8f31c4e6c96e68cf11e5ca3ce7e8a9b
|
/src/drawPerceptionAnaTimeSeries.py
|
32382b65cd5bd0c0a5f3649d575b1d52a7cebdd3
|
[
"MIT"
] |
permissive
|
ningtangla/escapeFromMultipleSuspectors
|
e04da12488be9767c5b6511355c167fdcf18e723
|
e6dcb0f7f9371b7ca6cca8779f69f18095092140
|
refs/heads/master
| 2022-05-03T05:25:21.556950
| 2022-04-20T13:51:53
| 2022-04-20T13:51:53
| 190,686,484
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,552
|
py
|
from collections import OrderedDict
from collections import OrderedDict
import pandas as pd
from matplotlib import pyplot as plt
import itertools as it
import os
import trajectoriesSaveLoad as tsl
class Readcsv:
def __init__(self, getCSVSavePathByCondition, columnNames):
self.getCSVSavePathByCondition = getCSVSavePathByCondition
self.columnNames = columnNames
def __call__(self, condition):
getCSVSavePath = self.getCSVSavePathByCondition(tsl.readParametersFromDf(condition))
CSVSavePath = getCSVSavePath({})
results = pd.read_csv(CSVSavePath, header = None, skiprows = [0], names = self.columnNames)#, header = None)
mean = results.mean()
return mean
def main():
manipulatedVariables = OrderedDict()
manipulatedVariables['alphaForStateWidening'] = [0.25]
#manipulatedVariables['attentionType'] = ['idealObserver']#, 'hybrid4']
manipulatedVariables['attentionType'] = ['hybrid4']#, 'preAttention']
#manipulatedVariables['attentionType'] = ['preAttention', 'attention4', 'hybrid4', 'idealObserver']#, 'attention3', 'hybrid3']
#manipulatedVariables['attentionType'] = ['preAttentionMem0.65', 'preAttentionMem0.25', 'preAttentionPre0.5', 'preAttentionPre4.5', 'preAttention']
manipulatedVariables['measure'] = ['attentionNumber']
#manipulatedVariables['measure'] = ['identity']
manipulatedVariables['chasingSubtlety'] = [3.3]
manipulatedVariables['CForStateWidening'] = [2]
#manipulatedVariables['minAttentionDistance'] = [8.5, 12.5]#[18.0, 40.0]
manipulatedVariables['minAttentionDistance'] = [10.0]#[5.0, 10.0, 20.0, 40.0]
manipulatedVariables['rangeAttention'] = [10.0]#[5.0, 10.0, 20.0, 40.0]#, 6.2, 6.3]
manipulatedVariables['cBase'] = [44]
manipulatedVariables['numTrees'] = [1]
manipulatedVariables['numSimulationTimes'] = [1]
manipulatedVariables['actionRatio'] = [0.2]
manipulatedVariables['burnTime'] = [0]
productedValues = it.product(*[[(key, value) for value in values] for key, values in manipulatedVariables.items()])
parametersAllCondtion = [dict(list(specificValueParameter)) for specificValueParameter in productedValues]
DIRNAME = os.path.dirname(__file__)
trajectoryDirectory = os.path.join(DIRNAME, '..', 'data', 'mcts',
'trajectories')
if not os.path.exists(trajectoryDirectory):
os.makedirs(trajectoryDirectory)
measurementEscapeExtension = '.csv'
getCSVSavePathByCondition = lambda condition: tsl.GetSavePath(trajectoryDirectory, measurementEscapeExtension, condition)
#columnNames = [500.0, 11.0, 3.3, 1.83, 0.92, 0.31, 0.001]
columnNames = list(range(250))
readcsv = Readcsv(getCSVSavePathByCondition, columnNames)
precisionToSubtletyDict={500.0:0, 50.0:5, 11.0:30, 3.3:60, 1.83:90, 0.92:120, 0.31:150, 0.001: 180}
levelNames = list(manipulatedVariables.keys())
levelValues = list(manipulatedVariables.values())
modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
toSplitFrame = pd.DataFrame(index = modelIndex)
modelResultDf = toSplitFrame.groupby(levelNames).apply(readcsv)
toDropLevels = ['alphaForStateWidening', 'CForStateWidening', 'cBase', 'numTrees', 'numSimulationTimes', 'actionRatio', 'burnTime', 'measure', 'chasingSubtlety']
modelResultDf.index = modelResultDf.index.droplevel(toDropLevels)
fig = plt.figure()
numColumns = len(manipulatedVariables['minAttentionDistance'])
numRows = len(manipulatedVariables['rangeAttention'])
plotCounter = 1
for key, group in modelResultDf.groupby(['rangeAttention', 'minAttentionDistance']):
#columnNamesAsSubtlety = [precisionToSubtletyDict[precision] for precision in group.columns]
#group.columns = columnNamesAsSubtlety
group = group.stack()
group.index.names = ['attentionType', 'minAttentionDistance', 'rangeAttention', 'time']
group.index = group.index.droplevel(['minAttentionDistance', 'rangeAttention'])
group = group.to_frame()
group.columns = ['model']
axForDraw = fig.add_subplot(numRows, numColumns, plotCounter)
if (plotCounter) % max(numColumns, 2) == 1:
axForDraw.set_ylabel(str(key[0]))
if plotCounter <= numColumns:
axForDraw.set_title(str(key[1]))
for attentionType, grp in group.groupby('attentionType'):
grp.index = grp.index.droplevel('attentionType')
#if str(attentionType) == manipulatedVariables['attentionType'][-1]:
# grp['human'] = [0.6, 0.37, 0.24]
# grp['human'] = [0.6, 0.48, 0.37, 0.25, 0.24, 0.42, 0.51]
# grp.plot.line(ax = axForDraw, y = 'human', label = 'human', ylim = (0, 0.7), marker = 'o', rot = 0 )
grp.plot.line(ax = axForDraw, y = 'model', label = str(attentionType), ylim = (0, 4.1), marker = 'o', ms = 3, rot = 0 )
plotCounter = plotCounter + 1
plt.suptitle('Measurement = Perception Rate')
#plt.suptitle('Measurement = Action Deviation')
#plt.suptitle('Measurement = Velocity Diff')
#plt.suptitle('Measurement = Escape rate')
#fig.text(x = 0.5, y = 0.92, s = 'Min Attention Distance', ha = 'center', va = 'center')
fig.text(x = 0.05, y = 0.5, s = 'Attention Range', ha = 'center', va = 'center', rotation=90)
fig.text(x = 0.05, y = 0.5, s = 'Number of Simulations', ha = 'center', va = 'center', rotation=90)
plt.show()
if __name__ == "__main__":
main()
|
[
"ningtangzju@gmail.com"
] |
ningtangzju@gmail.com
|
23b9d302ab52d35d1cb3fdd270c785503d99aacb
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/Scripts/simulation/interactions/base/cheat_interaction.py
|
90cea49d59271ca03aa011bf3d7fd934e0685f7e
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485
| 2021-02-13T21:27:38
| 2021-02-13T21:27:38
| 337,543,310
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,778
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\base\cheat_interaction.py
# Compiled at: 2020-10-09 00:03:45
# Size of source mod 2**32: 6769 bytes
from date_and_time import create_time_span
from distributor.shared_messages import IconInfoData
from interactions.aop import AffordanceObjectPair
from interactions.base.immediate_interaction import ImmediateSuperInteraction
from interactions.base.picker_interaction import PickerSuperInteraction
from interactions.interaction_finisher import FinishingType
from scheduler import AlarmData
from sims4.tuning.tunable import Tunable
from sims4.utils import flexmethod
from singletons import DEFAULT
from situations.service_npcs.service_npc_manager import ServiceNpcSituationCreationParams
from statistics.skill import Skill
from ui.ui_dialog_generic import UiDialogTextInputOkCancel
from ui.ui_dialog_picker import ObjectPickerRow
import services, sims4
TEXT_INPUT_SKILL_LEVEL = 'skill_level'
class CheatSetSkillSuperInteraction(PickerSuperInteraction):
INSTANCE_TUNABLES = {'skill_level_dialog':UiDialogTextInputOkCancel.TunableFactory(description="\n The dialog that is displayed (and asks for the user to enter\n the skill level).\n \n An additional token is passed in: the selected stat's name. \n ",
text_inputs=(
TEXT_INPUT_SKILL_LEVEL,)),
'set_almost_level_up':Tunable(description='\n True means this interaction will set the skill to the value\n that almost level up the skill level passed in. False means it\n will set the skill directly to the level',
tunable_type=bool,
default=False)}
def _run_interaction_gen(self, timeline):
self._show_picker_dialog((self.target), target_sim=(self.target))
return True
if False:
yield None
@flexmethod
def picker_rows_gen(cls, inst, target, context, **kwargs):
skill_manager = services.get_instance_manager(sims4.resources.Types.STATISTIC)
for skill in skill_manager.get_ordered_types(only_subclasses_of=Skill):
if not skill.can_add(target):
continue
row = ObjectPickerRow(name=(skill.stat_name), icon=(skill.icon),
row_description=(skill.skill_description(context.sim)),
tag=skill)
yield row
def on_choice_selected(self, choice_tag, **kwargs):
if choice_tag is None:
return
skill = choice_tag
sim = self.target
def on_response(level_dialog):
if not level_dialog.accepted:
self.cancel((FinishingType.DIALOG), cancel_reason_msg='Set Skill level dialog timed out from client.')
return
level = level_dialog.text_input_responses.get(TEXT_INPUT_SKILL_LEVEL)
if not level:
self.cancel((FinishingType.DIALOG), cancel_reason_msg='Empty skill level returned from client.')
return
try:
level = int(level)
except:
self.cancel((FinishingType.DIALOG), cancel_reason_msg='Invalid skill level returned from client.')
return
else:
tracker = sim.get_tracker(skill)
stat = tracker.get_statistic(skill, add=True)
if stat is None:
self.cancel((FinishingType.FAILED_TESTS), cancel_reason_msg='Unable to add Skill due to entitlement restriction.')
return
if self.set_almost_level_up:
skill_value = stat.get_skill_value_for_level(level) - 50
tracker.set_value(skill, skill_value)
else:
tracker.set_user_value(skill, level)
dialog = self.skill_level_dialog(sim, self.get_resolver())
dialog.show_dialog(on_response=on_response, additional_tokens=(skill.stat_name,), icon_override=IconInfoData(icon_resource=(skill.icon)))
class CheatRequestServiceNpcSuperInteraction(ImmediateSuperInteraction):
def __init__(self, aop, context, service_tuning=None, **kwargs):
(super().__init__)(aop, context, **kwargs)
self._service_tuning = service_tuning
def _run_interaction_gen(self, timeline):
sim = self.sim
end_time = services.time_service().sim_now + create_time_span(hours=8)
fake_alarm_data = AlarmData(None, end_time, None, False)
default_user_specified_data_id = self._service_tuning.get_default_user_specified_data_id()
creation_data = ServiceNpcSituationCreationParams((sim.household), (self._service_tuning), user_specified_data_id=default_user_specified_data_id, is_recurring=False)
services.current_zone().service_npc_service._send_service_npc(None, fake_alarm_data, creation_data)
return True
if False:
yield None
@flexmethod
def _get_name(cls, inst, target=DEFAULT, context=DEFAULT, service_tuning=None, outfit_index=None, **interaction_parameters):
if inst is not None:
inst._service_tuning.display_name
return service_tuning.display_name
@classmethod
def potential_interactions(cls, target, context, **kwargs):
for service_tuning_type in services.service_npc_manager().types:
service_tuning = services.service_npc_manager().get(service_tuning_type)
yield AffordanceObjectPair(cls, target, cls, None, service_tuning=service_tuning, **kwargs)
|
[
"cristina.caballero2406@gmail.com"
] |
cristina.caballero2406@gmail.com
|
1a6d7326648830c51c9d6ddcfd52f90ae9a5f1ed
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/PythiaChargedResonance_WG/PythiaChargedResonance_WGToLNuG_M4000_width0p01_13TeV-pythia8_cff.py
|
45ca4f0a00d2ad1221666d6f875dca26a7287b5c
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
"37:onMode = off",
"37:addChannel = 1 0.00001 101 24 22",
"37:onIfMatch = 24 22",
"37:m0 = 4000",
"37:doForceWidth = on",
"37:mWidth = 0.400000",
"24:onMode = off",
"24:onIfAny = 11 13 15",
"Higgs:useBSM = on",
"HiggsBSM:ffbar2H+- = on"),
parameterSets = cms.vstring(
"pythia8CommonSettings",
"pythia8CUEP8M1Settings",
"processParameters")
)
)
|
[
"shubhanshu.chauhan.cern.ch"
] |
shubhanshu.chauhan.cern.ch
|
b538521c1a4e868a82ca658bfa9726213a686173
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/W7S25BPmjEMSzpnaB_4.py
|
1710d798d37e7f63b51f71720d88fb6747d84732
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
def bonacci(sum_num, index):
list = []
for i in range(1, sum_num):
list.append(0)
list.append(1)
for i in range(sum_num , index ):
num = 0
for j in range(1, sum_num + 1):
num += list[i - j]
list.append(num)
return list[index - 1]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
cc2e37ddd71fb1e52708459d304b69b5718bc428
|
8d55d41a4f5c0b89331cac714c1525e9581d9720
|
/PyCommon/modules/VirtualPhysics/setup_mac.py
|
df7c84427729204e8ca11d58188d86486b5080f3
|
[
"Apache-2.0"
] |
permissive
|
hpgit/HumanFoot
|
8cf35ceeeb35a0371e03eaf19d6da58dc01487eb
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
refs/heads/master
| 2022-04-13T23:38:19.072203
| 2019-12-06T06:36:10
| 2019-12-06T06:36:10
| 41,348,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
from distutils.core import setup, Extension
import sys
py_major_ver = sys.version_info[0]
boost_lib = 'boost_python'
if py_major_ver == 3:
boost_lib = boost_lib + '3'
def moduleSetup(moduleName):
moduleToSetup = Extension(moduleName,
include_dirs = ['../usr/include/'],
#extra_compile_args=['-fopenmp'],
# extra_link_args=['-lgomp'],
libraries = [boost_lib, 'vpLib'],
library_dirs = ['../usr/lib'],
sources = ['pyV'+moduleName[1:]+'.cpp'])
setup (name = moduleName,
version = '0.1',
description = moduleName,
ext_modules = [moduleToSetup])
moduleSetup('vpWorld')
moduleSetup('vpBody')
|
[
"garethvlf@gmail.com"
] |
garethvlf@gmail.com
|
f4199947212a6889d65b9832b7341e5f36dafffd
|
39405775f9ba7b9325a1348c8172f98ad24d442c
|
/user_portrait/user_portrait/cron/info_consume/propagate/propagate_time_weibo.py
|
6e2575bd9086745a5444b63f5d897d7058f14b32
|
[] |
no_license
|
yuyuqi/info_consume
|
cfef72d697b929eb69244bd54b335cf46c4252ff
|
c984fb3b7235bae02cd5ba8f04f7ffb41ba0b910
|
refs/heads/master
| 2021-01-14T12:22:25.929097
| 2016-11-11T08:18:38
| 2016-11-11T08:18:38
| 66,696,804
| 0
| 0
| null | 2016-08-27T04:56:46
| 2016-08-27T04:56:46
| null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
# -*- coding: utf-8 -*-
from config import db
class PropagateTimeWeibos(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
topic = db.Column(db.String(20))
end = db.Column(db.BigInteger(10, unsigned=True))
range = db.Column(db.BigInteger(10, unsigned=True))
mtype = db.Column(db.Integer(1, unsigned=True))
limit = db.Column(db.BigInteger(10, unsigned=True))
weibos = db.Column(db.Text) # weibos=[weibos]
def __init__(self, topic, end, range, mtype, limit, weibos):
self.topic = topic
self.end = end
self.range = range
self.mtype = mtype
self.limit = limit
self.weibos = weibos
@classmethod
def _name(cls):
return u'PropagateTimeWeibos'
if __name__ == '__main__':
db.create_all()
|
[
"1257819385@qq.com"
] |
1257819385@qq.com
|
b5126f5c82e7deb25eaa06a3bf6f79162e98be49
|
9c8fdfa389eaaf2df4c8ba0e3072d94671b5a622
|
/0547. Friend Circles.py
|
0a8f07d64c078d5a21a44614caac046e77dc1720
|
[] |
no_license
|
aidardarmesh/leetcode2
|
41b64695afa850f9cc7847158abb6f2e8dc9abcd
|
4cf03307c5caeccaa87ccce249322bd02397f489
|
refs/heads/master
| 2023-02-27T11:22:09.803298
| 2021-02-07T06:47:35
| 2021-02-07T06:47:35
| 264,491,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
from typing import *
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
n = len(M)
self.circles = n
uf = {i:i for i in range(n)}
def find(v):
if uf[v] == v:
return v
uf[v] = find(uf[v])
return uf[v]
def union(v, u):
v_root = find(v)
u_root = find(u)
if v_root != u_root:
self.circles -= 1
uf[v_root] = u_root
for i in range(n):
for j in range(n):
if M[i][j]:
if find(i) != find(j):
union(i,j)
return self.circles
|
[
"darmesh.aidar@gmail.com"
] |
darmesh.aidar@gmail.com
|
728690b3b498ad97e32534f2b46aa151a0ac28a4
|
bee9d96912078d68877aa53e0c96537677ec3e6a
|
/peakpo/control/cakemakecontroller.py
|
1003e9635181ea50fa417f7c3418c3b18cf82084
|
[
"Apache-2.0"
] |
permissive
|
SHDShim/PeakPo
|
ce0a637b6307787dd84fd3dcb3415e752d180c32
|
4c522e147e7715bceba218de58ee185cccd2055e
|
refs/heads/master
| 2022-06-26T11:26:45.097828
| 2022-06-19T22:03:24
| 2022-06-19T22:03:24
| 94,345,216
| 17
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
import os
from PyQt5 import QtWidgets
from utils import undo_button_press, dialog_savefile, writechi
class CakemakeController(object):
def __init__(self, model, widget):
self.model = model
self.widget = widget
def read_settings(self):
self.model.diff_img.set_calibration(self.model.poni)
self.model.diff_img.set_mask((self.widget.spinBox_MaskMin.value(),
self.widget.spinBox_MaskMax.value()))
def cook(self):
self.read_settings()
self.model.diff_img.integrate_to_cake()
|
[
"SHDShim@gmail.com"
] |
SHDShim@gmail.com
|
aa9336da4d4fa8ada35ebde970a9ccf696c296fb
|
b2826350dab8935de4c54092fac2090ec9b562bc
|
/prog_1d_hubbard.py
|
1ba9a833239abe660c4ca0920ebd3f3c417466c4
|
[
"MIT"
] |
permissive
|
ryuikaneko/exactly_solvable_models
|
73c8bea988279a3ea595d067986a131494cd0d2b
|
d6f7617ae979b2ca7cbb1e9c1c562fae780b6956
|
refs/heads/master
| 2022-01-29T10:12:43.789610
| 2022-01-12T02:20:31
| 2022-01-12T02:20:31
| 205,546,034
| 15
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import scipy as scipy
import scipy.integrate as integrate
import scipy.special
if __name__ == "__main__":
Umax = 101
dU = 0.1
for intU in range(Umax):
U = dU*intU
# f[U_] = -4 Integrate[ BesselJ[0, x] BesselJ[1, x]/x/(1 + Exp[0.5 U*x]), {x, 0, Infinity}]
ene = integrate.quad(lambda x, c : \
-4.0 * scipy.special.j0(x) * scipy.special.j1(x) \
/ x / (1+np.exp(0.5*c*x)) \
, 0.0, np.inf, args=U \
, epsabs=1e-11, epsrel=1e-11, limit=10000)
print("{0:.16f} {1:.16f} {2:.16f}".format(U,ene[0],ene[1]))
print("# exact (4/pi):",0.0,4.0/np.pi)
|
[
"27846552+ryuikaneko@users.noreply.github.com"
] |
27846552+ryuikaneko@users.noreply.github.com
|
f75acb67e52cb8ec24de9fe04efc2181e8dae4b9
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1606.py
|
224e1d24dc5734464eade6e5711ee8dbc5f0ffb1
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113
| 2020-08-04T03:29:27
| 2020-08-04T03:29:27
| 284,863,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/100000/29F6835B-E804-9247-B8B2-2A78E410E681.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1606.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"rsk146@scarletmail.rutgers.edu"
] |
rsk146@scarletmail.rutgers.edu
|
eea817d60254307a68dbd9b0943756d17da0c46b
|
3f100a1002a1f8ed453c8b81a9b403444d77b4c6
|
/while_loops/trial.py
|
0bf2cb0e9f1ac677e22c4333bf6b64a527e9b842
|
[] |
no_license
|
Kimuda/Phillip_Python
|
c19c85a43c5a13760239e4e94c08436c99787ebf
|
59d56a0d45839656eb15dbe288bdb0d18cb7df2b
|
refs/heads/master
| 2016-09-09T22:19:02.347744
| 2015-05-01T10:56:49
| 2015-05-01T10:56:49
| 32,330,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
1. The program will continue producing output until it is interrupted.
2. A line that increments i by 1 needs to be put at the end of, but inside of
the while loop.
3. --- smallest.py ---
smallest = "strings are always larger than integers"
print "Enter a sequence of numbers:"
line = raw_input("> ")
while line != "":
if int(line) < smallest:
smallest = int(line)
line = raw_input("> ")
print smallest
--- end average.py ---
4. --- average.py ---
print "Enter a sequence of numbers:"
total = 0
count = 0
line = raw_input("> ")
while line != "":
total += int(line)
count += 1
line = raw_input("> ")
print float(total)/count
--- end average.py ---
5. --- average_loop.py ---
answer = 'y'
while answer == 'y':
print "Enter a sequence of numbers:"
total = 0
count = 0
line = raw_input("> ")
while line != "":
total += int(line)
count += 1
line = raw_input("> ")
print float(total)/count
answer = raw_input('Would you like to calculate the average of another sequence (y/n): ')
--- end average_loop.py ---
|
[
"pjkanywa@gmail.com"
] |
pjkanywa@gmail.com
|
5cb365c88d11e4906a757b2995cf93d0128fcde3
|
a4e5a695e749e49953d173e7ac2aeaf372d0513d
|
/flask/myapp.py
|
33873b70b27a80cd5f95b83bd1af3eeb4cebe55f
|
[] |
no_license
|
mamueller/nix_examples
|
7e79194a479e8a73490f22d9087bc11a834f9c66
|
98fd728d0bba83d9c7a8151e724ef06aadd6cb95
|
refs/heads/master
| 2021-06-12T11:02:02.224015
| 2021-04-09T18:44:51
| 2021-04-09T18:44:51
| 167,869,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
#! /usr/bin/env python
import subprocess
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, Nix!"
def run():
res=subprocess.run(['qemu-x86_64','-h'])
print(res)
app.run(host="0.0.0.0")
if __name__ == "__main__":
run()
|
[
"markus.mueller.1.g@googlemail.com"
] |
markus.mueller.1.g@googlemail.com
|
6fdb094e7f8936d65474156243043ac349a04633
|
c6bfc62c5d90a8eaa7dcb0f4149b48a18dec3dc1
|
/model.py
|
503d31ccb1cd347ef5807b9bd8ced9f9f9543e4d
|
[] |
no_license
|
PabloRR100/Bert_Sentiment_Multilingual
|
4e2e897a7638ca097043980f2bf00336b7f87673
|
d19b0ebca0c890313b25b3c8fbd3ac165f94856c
|
refs/heads/master
| 2023-07-12T17:34:46.746524
| 2020-05-13T10:23:01
| 2020-05-13T10:23:01
| 261,702,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
import torch
import torch.nn as nn
import config
import transformers
"""
EXTENDES TRANSFORMERS BERT MODEL
"""
class BERTBaseUncased(nn.Module):
def __init__(self):
super(BERTBaseUncased, self).__init__()
self.bert = transformers.BertModel.from_pretrained(config.BERT_PATH)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(2*768, 1)
def forward(self, ids, mask, token_type_ids):
o1, _ = self.bert(
ids,
attention_mask=mask,
token_type_ids=token_type_ids
)
mean_pool = torch.mean(o1,1)
max_pool,_ = torch.max(o1,1)
cat = torch.cat((mean_pool, max_pool),dim=1)
bo = self.bert_drop(cat)
output = self.out(bo)
return output
class DistilBERTBaseUncased(nn.Module):
def __init__(self):
super(DistilBERTBaseUncased, self).__init__()
self.bert = transformers.DistilBertModel.from_pretrained(config.DISTILBERT_PATH)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(2*768, 1)
def forward(self, ids, mask, token_type_ids):
o1, _ = self.bert(
ids,
attention_mask=mask
)
mean_pool = torch.mean(o1,1)
max_pool,_ = torch.max(o1,1)
cat = torch.cat((mean_pool, max_pool),dim=1)
bo = self.bert_drop(cat)
output = self.out(bo)
return output
|
[
"pabloruizruiz10@gmail.com"
] |
pabloruizruiz10@gmail.com
|
9a9875f9ef74c131bf2885171b2c1645e587bd42
|
9d64a438cdfe4f3feb54f2f0dc7431139c4b9fb9
|
/trendmicro_apex/icon_trendmicro_apex/actions/search_agents/action.py
|
82238c4c95b6add16427768df0993ff18d46a01e
|
[
"MIT"
] |
permissive
|
PhilippBehmer/insightconnect-plugins
|
5ad86faaccc86f2f4ed98f7e5d518e74dddb7b91
|
9195ddffc575bbca758180473d2eb392e7db517c
|
refs/heads/master
| 2021-07-25T02:13:08.184301
| 2021-01-19T22:51:35
| 2021-01-19T22:51:35
| 239,746,770
| 0
| 0
|
MIT
| 2020-02-11T11:34:52
| 2020-02-11T11:34:51
| null |
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
import komand
from .schema import SearchAgentsInput, SearchAgentsOutput, Input, Output, Component
# Custom imports below
import re
import urllib.parse
class SearchAgents(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='search_agents',
description=Component.DESCRIPTION,
input=SearchAgentsInput(),
output=SearchAgentsOutput())
def run(self, params={}):
agent_ids = params.get(Input.AGENT_IDS)
query_params = {}
quoted_param = ""
entity_pattern = re.compile(r'^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{8}$')
ip_address_pattern = re.compile(r'^(?:\d{1,3}\.){3}\d{1,3}$')
mac_address_pattern = re.compile(r'\d\d([-:]\d\d){5}')
if agent_ids:
for agent_id in agent_ids:
if re.match(mac_address_pattern, agent_id):
query_params["mac_address"] = agent_id
elif re.match(ip_address_pattern, agent_id):
query_params["ip_address"] = agent_id
elif re.match(entity_pattern, agent_id):
query_params["entity_id"] = agent_id
else:
query_params["host_name"] = agent_id
quoted_param = urllib.parse.quote("&".join([k + "=" + v for k, v in query_params.items()]))
if quoted_param:
quoted_param = "?" + quoted_param
agents = self.connection.api.execute(
"get",
"/WebApp/API/AgentResource/ProductAgents" + quoted_param,
""
)
if agents.get("result_code", 0) == 1:
f = agents.get("result_content")
self.logger.info(f"result_content: {f}")
return {
Output.SEARCH_AGENT_RESPONSE: agents.get("result_content")
}
return {
Output.SEARCH_AGENT_RESPONSE: []
}
|
[
"noreply@github.com"
] |
PhilippBehmer.noreply@github.com
|
1ea8d734403be436ba043166a9c312562de2a51a
|
48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a
|
/Choose Your Own Colors/horizontal_striper_15.py
|
9e0f6da63d3a326564cc317ef111c0c8348888c7
|
[
"MIT"
] |
permissive
|
Breakfast-for-Pigeons/Unicorn-HAT
|
1ae033bf11c05b9cc739b1eacfc77665506e0bc8
|
9ff1388ee627a8e81f361929e9e9b708db4e2832
|
refs/heads/master
| 2021-06-06T12:22:48.162031
| 2020-10-22T17:31:51
| 2020-10-22T17:31:51
| 74,648,524
| 1
| 0
| null | 2018-10-02T17:37:31
| 2016-11-24T07:28:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
#!/usr/bin/python3
"""
Horizontal Striper 15 - Choose Your Own Colors
With the Raspberry Pi oriented with the GPIO pins at the top, this
program stripes from the top to the bottom and alternates from left to
right and right to left.
This is exactly the same as Horizontal Striper 7 except the color order
is reversed.
....................
Functions:
- horizontal_striper_15: Gets x and y coordinates and sends them to the
striping function
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
from bfp_unicornhat import stripe_horizontally_rev_alt_2
########################################################################
# Import variables #
########################################################################
from bfp_unicornhat import X_COORDINATES
from bfp_unicornhat import Y_COORDINATES
########################################################################
# Functions #
########################################################################
def horizontal_striper_15():
"""
Sends x and y coordinates to the striper function
"""
x_coordinate_list = X_COORDINATES
y_coordinate_list = Y_COORDINATES[::-1]
stripe_horizontally_rev_alt_2(x_coordinate_list, y_coordinate_list)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
horizontal_striper_15()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
|
[
"noreply@github.com"
] |
Breakfast-for-Pigeons.noreply@github.com
|
9a2bfa2e085d4bdbbcd7e45e25f31886e442b9b4
|
3cd19164c17d9793ea506369454b8bacd5cebfa9
|
/Backend/py2club/bin/pyjwt
|
56637acb75b755885e5c894368fe1121d7d3a106
|
[] |
no_license
|
Myxg/BadmintonClubSystem
|
337a17728122ab929d37e7f2732850beb49d8be0
|
1c51236098ab3770cadd925212f9d3978ed83c2a
|
refs/heads/master
| 2022-12-27T10:13:55.129630
| 2020-09-16T10:21:36
| 2020-09-16T10:21:36
| 295,998,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/home/ubuntu/BadmintonClubSystem/Backend/py2club/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jwt.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"15234407153@163.com"
] |
15234407153@163.com
|
|
cf8d0b5cf4e4a0107de6cd9c133e7f4aa366db30
|
95deb106d41a4612628c50568b2e6107e0a6773d
|
/applications/admin/handlers/role.py
|
bbeb9f28634de63f2130e98cf28534b2ae5137dc
|
[
"BSD-3-Clause"
] |
permissive
|
sjl421/py_admin
|
a57b0d2449a1beabef5ccffb88bd510a38db8013
|
2fb404af8b8435d247eb23c13386ae8deb88d144
|
refs/heads/master
| 2020-03-18T21:04:26.213089
| 2018-05-22T06:11:15
| 2018-05-22T06:11:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,344
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""URL处理器
[description]
"""
import json
import tornado
from applications.core.settings_manager import settings
from applications.core.logger.client import SysLogger
from applications.core.cache import sys_config
from applications.core.decorators import required_permissions
from ..models import Role
from ..models import AdminMenu
from .common import CommonHandler
class RoleHandler(CommonHandler):
"""docstring for Passport"""
@tornado.web.authenticated
@required_permissions('admin:role:index')
def get(self, *args, **kwargs):
params = {
}
self.render('role/index.html', **params)
@tornado.web.authenticated
@required_permissions('admin:role:delete')
def delete(self, *args, **kwargs):
"""删除角色
"""
uuid = self.get_argument('uuid', None)
# 超级管理员角色 默认角色
user_role_li = [settings.SUPER_ROLE_ID,'6b0642103a1749949a07f4139574ead9']
if uuid in user_role_li:
return self.error('角色不允许删除')
Role.Q.filter(Role.uuid==uuid).delete()
Role.session.commit()
return self.success()
class RoleListHandler(CommonHandler):
"""用户组列表"""
@tornado.web.authenticated
@required_permissions('admin:role:index')
def get(self, *args, **kwargs):
limit = self.get_argument('limit', 10)
page = self.get_argument('page', 1)
pagelist_obj = Role.Q.filter().paginate(page=page, per_page=limit)
if pagelist_obj is None:
return self.error('暂无数据')
total = pagelist_obj.total
page = pagelist_obj.page
items = pagelist_obj.items
params = {
'count': total,
'uri': self.request.uri,
'path': self.request.path,
'data': [role.as_dict() for role in items],
}
return self.success(**params)
class RoleAddHandler(CommonHandler):
"""用户组添加功能"""
@tornado.web.authenticated
@required_permissions('admin:role:add')
def post(self, *args, **kwargs):
rolename = self.get_argument('rolename', None)
uuid = self.get_argument('uuid', None)
status = self.get_argument('status', 1)
if not rolename:
return self.error('分组名称不能为空')
count = Role.Q.filter(Role.rolename==rolename).count()
if count>0:
return self.error('名称已被占用')
role = {
'rolename':rolename,
'status': status,
}
role = Role(**role)
Role.session.add(role)
Role.session.commit()
return self.success()
class RoleEditHandler(CommonHandler):
"""用户组增删查改功能"""
@tornado.web.authenticated
@required_permissions('admin:role:edit')
def get(self, *args, **kwargs):
uuid = self.get_argument('uuid', None)
role = Role.Q.filter(Role.uuid==uuid).first()
menu_list = AdminMenu.children(status=1)
data_info = role.as_dict()
try:
data_info['permission'] = json.loads(role.permission)
except Exception as e:
data_info['permission'] = []
params = {
'role': role,
'menu_list': menu_list,
'data_info': data_info,
}
self.render('role/edit.html', **params)
@tornado.web.authenticated
@required_permissions('admin:role:edit')
def post(self, *args, **kwargs):
rolename = self.get_argument('rolename', None)
uuid = self.get_argument('uuid', None)
sort = self.get_argument('sort', None)
status = self.get_argument('status', 0)
permission = self.get_body_arguments('permission[]')
role = {
'status': status,
}
if rolename:
role['rolename'] = rolename
count = Role.Q.filter(Role.uuid!=uuid).filter(Role.rolename==rolename).count()
if count>0:
return self.error('名称已被占用')
if sort:
role['sort'] = sort
if permission:
role['permission'] = json.dumps(permission)
Role.Q.filter(Role.uuid==uuid).update(role)
Role.session.commit()
return self.success(data=role)
|
[
"leeyisoft@icloud.com"
] |
leeyisoft@icloud.com
|
12c2fb913ef27cff8bbefcf384d426de3192582e
|
9da8754002fa402ad8e6f25659978bd269bbcec8
|
/src/314B/test_cdf_314B.py
|
e9e2e5ce0864198663b5fa16bab21ce3f1026005
|
[
"MIT"
] |
permissive
|
kopok2/CodeforcesSolutionsPython
|
a00f706dbf368ba0846c8ae86d4145b5dd3e1613
|
35bec0dbcff47765b123b5fe60476014376153df
|
refs/heads/master
| 2023-02-02T03:08:22.097651
| 2020-12-17T22:00:50
| 2020-12-17T22:00:50
| 196,035,812
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
import unittest
from unittest.mock import patch
from cdf_314B import CodeforcesTask314BSolution
class TestCDF314B(unittest.TestCase):
def test_314B_acceptance_1(self):
mock_input = ['10 3', 'abab', 'bab']
expected = '3'
with patch('builtins.input', side_effect=mock_input):
Solution = CodeforcesTask314BSolution()
Solution.read_input()
Solution.process_task()
actual = Solution.get_result()
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
|
[
"oleszek.karol@gmail.com"
] |
oleszek.karol@gmail.com
|
f2cc5f6f8b3ac1a429f71c5a1e78bcae39d6be7a
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/2604992/snippet.py
|
d1ee8b056d79d916da3e98173cd2729fc7778229
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 485
|
py
|
# Print each permutation.
def perm(l, n, str_a):
if len(str_a) == n:
print str_a
else:
for c in l:
perm(l, n, str_a+c)
# Return a list of permutations.
def perm2(l, n, str_a, perm_a):
if len(str_a) == n:
return [str_a] + perm_a
else:
new_perm_a = perm_a
for c in l:
new_perm_a = perm2(l, n, str_a+c, new_perm_a)
return new_perm_a
perm(['a','b','c'], 3, "")
print perm2(['a','b','c'], 3, "", [])
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
19a76e26092ec389793026a5b6b3a32e09ff7ac3
|
ac1d3a1ff9e4ccac37c1ae206f3c12021e56e420
|
/code/figures/MBL_xan_titration.py
|
d3cb715bf2652b4a6ed9ccd6e0df1e2db8f0fb2c
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"CC-BY-4.0"
] |
permissive
|
RPGroup-PBoC/mwc_activation
|
4bcdeb2271047df4c1fe8243de2eade709012d0a
|
6ef3f02a53ecd80877082006ecc4b8fe4204c1d6
|
refs/heads/master
| 2023-07-22T22:09:25.442468
| 2020-02-27T18:59:17
| 2020-02-27T18:59:17
| 167,025,404
| 0
| 0
|
MIT
| 2023-07-06T21:20:04
| 2019-01-22T16:16:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,692
|
py
|
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import joypy
import glob
import sys
import scipy.stats
sys.path.insert(0, '../../')
import act.viz
import act.flow
colors = act.viz.pub_style()
# %%
# Parse all of hte files.
files = glob.glob('../../data/flow/csv/*.csv')
dfs = []
for f in files:
if '2018' in f:
date, _, strain, conc = f.split('/')[-1].split('_')
conc = float(conc.split('mg')[0]) / 1000
promoter = 'wt'
elif '_XAN' in f:
date = '20180628'
strain, conc, _ = f.split('/')[-1].split('_')
conc = float(conc.split('mg')[0])
promoter = 'wt'
elif '27yfp' in f:
date, _, _, strain, _, conc = f.split('/')[-1].split('_')
if strain == 'dilution':
strain = 'delAB'
elif strain == 'delta':
strain = 'delAB_delta'
elif strain == 'auto':
strain = 'delAB_auto'
conc = float(conc.split('mg')[0])
promoter = 'wt'
elif '28yfp' in f:
date, _, _, strain, _, conc = f.split('/')[-1].split('_')
if strain == 'dilution':
strain = 'delAB'
elif strain == 'delta':
strain = 'delAB_delta'
elif strain == 'auto':
strain = 'delAB_auto'
conc = float(conc.split('mg')[0])
promoter = 'proximal'
_df = pd.read_csv(f)
gated = act.flow.gaussian_gate(_df, 0.4)
gated = gated[gated['gate']==1]
keys = _df.keys()
if 'GFP-A' in keys:
intensity = _df['GFP-A']
elif 'GFP' in keys:
intensity = _df['GFP']
elif 'FITC-H' in keys:
intensity = _df['FITC-H']
_df['intensity'] = np.log(intensity)
_df['date'] = date
_df['xan_mgml'] = conc
_df['strain'] = strain.lower()
_df['promoter'] = promoter
_df = _df[['intensity', 'date', 'xan_mgml', 'strain', 'promoter']]
dfs.append(_df)
intensity_df = pd.concat(dfs, sort=False)
# %%
wt = intensity_df[(intensity_df['strain']=='wt') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > -2) & (intensity_df['intensity'] < 10)]
delta = intensity_df[(intensity_df['strain']=='delta') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > -2) & (intensity_df['intensity'] < 10)]
delAB_wt = intensity_df[(intensity_df['date']=='20190205') & (intensity_df['strain']=='delab') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > 5) & (intensity_df['intensity'] < 12)]
delAB_prox = intensity_df[(intensity_df['date']=='20190205') & (intensity_df['strain']=='delab') & (intensity_df['promoter']=='proximal')]
delAB_wt_delta = intensity_df[(intensity_df['strain']=='delab_delta') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > -5) & (intensity_df['intensity'] < 15)]
delAB_prox_delta = intensity_df[(intensity_df['strain']=='delab_delta') & (intensity_df['promoter']=='proximal')]
# Rescale all of the delAB properly.
delAB_wt_delta_mean = delAB_wt_delta['intensity'].mean()
delAB_prox_delta_mean = delAB_prox_delta['intensity'].mean()
delAB_wt['norm_int'] = delAB_wt['intensity'].values / delAB_wt_delta_mean
delAB_prox['norm_int'] = delAB_prox['intensity'].values / delAB_prox_delta_mean
# %%
_ = joypy.joyplot(delta, column='intensity', by='xan_mgml', color=colors[1],
figsize=(3, 4))
plt.savefig('../../figs/delta_xan_titration.pdf', bbox_inches='tight')
#%%
_ = joypy.joyplot(wt, column='intensity', by='xan_mgml', color=colors[0],
figsize=(3, 4))
plt.savefig('../../figs/wt_xan_titration.pdf', bbox_inches='tight')
#%%
_ = joypy.joyplot(delAB_wt, column='intensity', by='xan_mgml', color=colors[2],
figsize=(3, 4))
plt.savefig('../../figs/delAB_xan_titration.pdf', bbox_inches='tight')
# %%
# Isolate the two concentrations
wt = delAB_wt[(delAB_wt['xan_mgml']==0) | (delAB_wt['xan_mgml']==4)]
prox = delAB_prox[(delAB_prox['xan_mgml']==0) | (delAB_prox['xan_mgml']==4)]
fig, ax = plt.subplots(1, 1, figsize=(5.5, 2))
_ = joypy.joyplot(wt, by='xan_mgml', column='norm_int', color=colors[1], ax=ax)
_ = joypy.joyplot(prox, by='xan_mgml', column='norm_int', ax=ax, color=colors[0])
#%%
fig, ax = plt.subplots(1, 2, figsize=(6, 2), sharex=True)
for a in ax:
a.tick_params(labelsize=8)
a.tick_params(labelsize=8)
a.yaxis.set_ticks([])
# Compute the KDEs
x_range = np.linspace(0.5, 2, 500)
wt_low_kde = scipy.stats.gaussian_kde(wt_low['norm_int'])(x_range)
wt_high_kde = scipy.stats.gaussian_kde(wt_high['norm_int'])(x_range)
prox_low_kde = scipy.stats.gaussian_kde(prox_low['norm_int'].dropna())(x_range)
prox_high_kde = scipy.stats.gaussian_kde(prox_high['norm_int'].dropna())(x_range)
ax[0].fill_between(x_range, wt_low_kde, color=colors[1], label='0 mg/mL', alpha=0.25)
ax[0].plot(x_range, wt_low_kde, '-', color=colors[1], lw=1, label='__nolegend__')
ax[0].fill_between(x_range, wt_high_kde, color=colors[0], label='4 mg/mL', alpha=0.25)
ax[0].plot(x_range, wt_high_kde, '-',color=colors[0], lw=1, label='__nolegend__')
ax[1].fill_between(x_range, prox_low_kde, color=colors[1], label='0 mg/mL', alpha=0.25)
ax[1].plot(x_range,prox_low_kde, '-', color=colors[1], lw=1, label='__nolegend__')
ax[1].fill_between(x_range, prox_high_kde, color=colors[0], label='4 mg/mL', alpha=0.25)
ax[1].plot(x_range, prox_high_kde, '--', color=colors[0], lw=1, label='__nolegend__')
ax[1].set_xlabel('fold-change in expression', fontsize=8)
ax[0].set_ylabel('density', fontsize=8)
ax[1].set_ylabel('density', fontsize=8)
for a in ax:
leg = a.legend(title='xanthosine', fontsize=8)
leg.get_title().set_fontsize(8)
plt.savefig('../../figs/wt_prox_comparison.svg', bbox_inches='tight')
# %%
# %%
|
[
"gchure@caltech.edu"
] |
gchure@caltech.edu
|
0a85d36cc1a0d025855da19e08d0826209a0d735
|
508321d683975b2339e5292202f3b7a51bfbe22d
|
/Userset.vim/ftplugin/python/CompletePack/PySide2/QtWidgets/QStyleOption.py
|
c82ecadb7fa2cdc8b2a6f3c10eb0f9bc1b529d32
|
[] |
no_license
|
cundesi/vimSetSa
|
4947d97bcfe89e27fd2727423112bb37aac402e2
|
0d3f9e5724b471ab21aa1199cc3b4676e30f8aab
|
refs/heads/master
| 2020-03-28T05:54:44.721896
| 2018-08-31T07:23:41
| 2018-08-31T07:23:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,232
|
py
|
# encoding: utf-8
# module PySide2.QtWidgets
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtWidgets.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import PySide2.QtGui as __PySide2_QtGui
import Shiboken as __Shiboken
class QStyleOption(__Shiboken.Object):
# no doc
def init(self, *args, **kwargs): # real signature unknown
pass
def initFrom(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fontMetrics = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
palette = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
rect = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
styleObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
OptionType = None # (!) real value is ''
SO_Button = None # (!) real value is ''
SO_ComboBox = None # (!) real value is ''
SO_Complex = None # (!) real value is ''
SO_ComplexCustomBase = None # (!) real value is ''
SO_CustomBase = None # (!) real value is ''
SO_Default = None # (!) real value is ''
SO_DockWidget = None # (!) real value is ''
SO_FocusRect = None # (!) real value is ''
SO_Frame = None # (!) real value is ''
SO_GraphicsItem = None # (!) real value is ''
SO_GroupBox = None # (!) real value is ''
SO_Header = None # (!) real value is ''
SO_MenuItem = None # (!) real value is ''
SO_ProgressBar = None # (!) real value is ''
SO_RubberBand = None # (!) real value is ''
SO_SizeGrip = None # (!) real value is ''
SO_Slider = None # (!) real value is ''
SO_SpinBox = None # (!) real value is ''
SO_Tab = None # (!) real value is ''
SO_TabBarBase = None # (!) real value is ''
SO_TabWidgetFrame = None # (!) real value is ''
SO_TitleBar = None # (!) real value is ''
SO_ToolBar = None # (!) real value is ''
SO_ToolBox = None # (!) real value is ''
SO_ToolButton = None # (!) real value is ''
SO_ViewItem = None # (!) real value is ''
StyleOptionType = None # (!) real value is ''
StyleOptionVersion = None # (!) real value is ''
Type = None # (!) real value is ''
Version = None # (!) real value is ''
|
[
"noreply@github.com"
] |
cundesi.noreply@github.com
|
2d955a76eb32edaf404ea4ca61f1fa9696e17582
|
9852cee063eb438227abf75fa4aa06c0d2d7a23a
|
/hacktivitycon/web/ladybug.py
|
6c45caddae6a0597e627efbc95dec881fc2a2349
|
[] |
no_license
|
arwildo/ctf
|
227a6038b4d068f33134153534942e91ec5f36f8
|
623ac5cf7f177870d837ae298310bbd244a1db56
|
refs/heads/master
| 2021-09-08T16:50:32.890102
| 2021-08-28T04:13:37
| 2021-08-28T04:13:37
| 248,462,299
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#!/usr/bin/python3
import requests
url = 'http://two.jh2i.com:50018/mail.php'
myobj = {'name': 'fsdkfkjs', 'email': 'dfjskjsf', 'subject': 'dfdfs', 'message': 'dfssf'}
x = requests.post(url, data = myobj)
print(x.text)
|
[
"arwildo@gmail.com"
] |
arwildo@gmail.com
|
534ff2ff2d4c67fe84121d7af419371c58a918e2
|
1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c
|
/paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/EGG-INFO/scripts/eulb-attach-lb-to-subnets
|
fcf3618ac8aa00e3864a9b452656c3acae5f986c
|
[
"MIT"
] |
permissive
|
cirobessa/receitas-aws
|
c21cc5aa95f3e8befb95e49028bf3ffab666015c
|
b4f496050f951c6ae0c5fa12e132c39315deb493
|
refs/heads/master
| 2021-05-18T06:50:34.798771
| 2020-03-31T02:59:47
| 2020-03-31T02:59:47
| 251,164,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
#!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
import euca2ools.commands.elasticloadbalancing.attachloadbalancertosubnets
if __name__ == '__main__':
euca2ools.commands.elasticloadbalancing.attachloadbalancertosubnets.AttachLoadBalancerToSubnets.run()
|
[
"cirobessa@yahoo.com"
] |
cirobessa@yahoo.com
|
|
310fbda3e7edaa5707602795633ce2c70e3ef932
|
75cd5cb3ad898548b3be1863de233c2ad01334d2
|
/python/Drone_utils.py
|
42cee4cff8311d96cd44869e30d0f3e7d7763e01
|
[] |
no_license
|
bacisback/tfm
|
25e6fa61c202b679e64c10f18062717539a4432d
|
626ef8065ea150de882442f523c9fbb0af5c8428
|
refs/heads/master
| 2023-07-17T06:22:03.869401
| 2021-09-03T11:18:12
| 2021-09-03T11:18:12
| 349,205,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,989
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 17:40:46 2021
@author: e321075
"""
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import pandas as pd
import cv2
import scipy.misc
from skimage.transform import resize
import os
from collections import namedtuple
import re
#############################
# global variables #
#############################
label_translation= {"unlabeled":0,
1:1,#road
"paved-area":2, #sidewalk
"roof":3,#building
"wall":3, #wall
"door": 3,
"window": 4,
"fence": 4,
5:3,#billboard
"fence-pole":6, #pole
7:7, #trafic light
8:5, #trafic sign
"dirt":8, #vegetation
"grass":8,#terrain
"water":8,#terrain
"rocks":8,#terrain
"pool":8,#terrain
"vegetation":8,#terrain
"tree":8,
"bald-tree":8,
11:9,#sky
"person":10,# pedestrian
13:10,# rider
"car":11, #car
15:12, #truck
16:12, #bus
17:12, #train
18:11, #moto
"bicycle":11, #bike
20:1, #roadmarks
21:0, #unknown
}
root_dir = "./../semantic_drone_dataset/"
training_set = "training_set/"
training_dir = os.path.join(root_dir, training_set)
img_dir = os.path.join(training_dir, "images/")
gt_dir = os.path.join(training_dir, "gt/semantic/")
class_dict = pd.read_csv(os.path.join(gt_dir, "class_dict.csv"))
class_label_dict = {tuple(class_dict.iloc[i,1:].values): class_dict.iloc[i,0] for i in range(len(class_dict))}
label_dir = os.path.join(gt_dir, "label_images/")
np_labels = os.path.join(gt_dir, "np_labels/")
train_label_file = os.path.join(root_dir, "train.csv") # train file
csv_file = open(train_label_file, "w")
csv_file.write("img,label\n")
for idx, img in enumerate(os.listdir(img_dir)):
img_name = os.path.join(img_dir, img)
label_name = os.path.join(label_dir, img[:-3]+"png")
image = Image.open(label_name)
labels = np.asarray(image.convert("RGB"))
height, weight, _ = labels.shape
label = np.zeros((height,weight))
for h in range(height):
for w in range(weight):
try:
label[h,w] = label_translation[class_label_dict[tuple(labels[h,w,:])]]
except:
label[h,w] = 0
label_name = os.path.join(np_labels, img[:-4]+".npy")
np.save(label_name, label)
csv_file.write("{},{}\n".format(img_name, label_name))
csv_file.close()
|
[
"you@example.com"
] |
you@example.com
|
3e6f34481593e6d3a94d3f6cd125b50d1b4b91db
|
b001b44c95f4a7c5574385baa4fe72c5f3d02236
|
/home/migrations/0014_auto_20181104_1611.py
|
4acbda228febaf659b1e14a983a45d508d1d4673
|
[
"MIT"
] |
permissive
|
gjergjk71/Attendence
|
3ae9b9f3cb3c4e0bfe2addf4124b7612a78f1533
|
9e9370125bfc4958de02171a3ae4c8e16bf10913
|
refs/heads/master
| 2020-04-14T08:46:58.649674
| 2019-01-01T14:27:29
| 2019-01-01T14:27:29
| 163,743,067
| 0
| 0
|
MIT
| 2019-01-01T14:34:28
| 2019-01-01T14:34:28
| null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
# Generated by Django 2.1.3 on 2018-11-04 10:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20181104_1610'),
]
operations = [
migrations.AlterField(
model_name='semester_1',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_2',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_3',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_4',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_5',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_6',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_7',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_8',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
]
|
[
"201651011@iiitvadodara.ac.in"
] |
201651011@iiitvadodara.ac.in
|
52d152cd936a19bcfaff23e213b40de5abd66f4d
|
e4e4c60ffa509f257afc915d4c6cd32c0cb7098c
|
/.history/app_20200916151418.py
|
ce395406ad7ab0b64c3691b8145d6d189f1fbc90
|
[] |
no_license
|
QianyueMa/Google-Health-Search-Project
|
01dbd597780158f50eebfba2a228b505f8169726
|
6ef6b270dc7ab0826ad4f0338c9cd95d3571e19a
|
refs/heads/master
| 2022-12-19T03:55:10.328167
| 2020-10-02T12:54:27
| 2020-10-02T12:54:27
| 296,495,736
| 0
| 0
| null | 2020-09-18T02:44:12
| 2020-09-18T02:44:11
| null |
UTF-8
|
Python
| false
| false
| 3,775
|
py
|
import numpy as np
import os
import requests
import json
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
import pandas.io.sql as pdsql
from config import pg_user, pg_password, db_name
from flask import Flask, jsonify, render_template, abort, redirect
#################################################
# Database Setup
##################################################
connection_string = f"{pg_user}:{pg_password}@localhost:5432/{db_name}"
engine = create_engine(f'postgresql://{connection_string}')
# checking the table names
engine.table_names()
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def home():
return render_template("index.html")
@app.route("/comparison")
def comparison():
return render_template("comparison.html")
@app.route('/searchbyyear')
def searchbyyear():
sqlStatement = """
SELECT year, SUM ("Cancer" + "cardiovascular" + "stroke" + "depression" + "rehab" + "vaccine" + "diarrhea" + "obesity" + "diabetes") AS Searches
FROM search_condition
GROUP BY year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/searchyearandcondition')
def searchyearandcondition():
sqlStatement = """
SELECT year, SUM ("Cancer") AS Cancer,SUM ("cardiovascular") As Cardiovascular,SUM ("stroke") As Stroke,SUM ("depression") As Depression,SUM ("rehab") AS Rehab,SUM ("vaccine") AS Vaccine, SUM ("diarrhea") AS Diarrhea, SUM("obesity") AS Obesity, SUM ("diabetes") AS Diabetes
FROM search_condition
GROUP BY year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/searchbystate')
def searchbystate():
sqlStatement = """
SELECT l.location, l.latitude, l.longitude, SUM (s."Cancer" + s."cardiovascular" + s."stroke" + s."depression" + s."rehab" + s."vaccine" + s."diarrhea" + s."obesity" + s."diabetes") AS Searches
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
GROUP BY l.location, l.latitude, l.longitude
ORDER BY location;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('location', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/bylocationandyear')
def bylocationandyear():
sqlStatement = """
SELECT l.location, l.latitude, l.longitude,s.year, SUM (s."Cancer" + s."cardiovascular" + s."stroke" + s."depression" + s."rehab" + s."vaccine" + s."diarrhea" + s."obesity" + s."diabetes") AS Searches
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
GROUP BY l.location, l.latitude, l.longitude,s.year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/casesleadingdeath')
def casesleadingdeath():
sqlStatement = """
SELECT * FROM leading_causes_of_death;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True)
|
[
"ermiasgelaye@gmail.com"
] |
ermiasgelaye@gmail.com
|
dd3a8b4df0a8549639ca2811e00c55d75a191693
|
1c63089e6efa2e63948075cdfad69ded88b7d40e
|
/symphony/cli/pyinventory/graphql/service_endpoint_role_enum.py
|
cb96e2664531afd15454ef3508c35251b2889523
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
aharonnovo/magma
|
4c305b3b0ea414a0355a7dc8fdcc7ed7e1be436e
|
d96a272ee58fea436cc94990c7ad05c4edc76341
|
refs/heads/master
| 2021-08-01T05:38:35.882598
| 2020-04-02T15:40:13
| 2020-04-02T15:41:55
| 252,531,569
| 0
| 0
|
NOASSERTION
| 2020-04-02T18:11:35
| 2020-04-02T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class ServiceEndpointRole(Enum):
CONSUMER = "CONSUMER"
PROVIDER = "PROVIDER"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: str) -> "ServiceEndpointRole":
return cls.MISSING_ENUM
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
0deb5d5658c60f9a95c30297977be47460475bd9
|
f4c4546f21046ddfd109a0dd34005ac4872f123d
|
/oldboy/day2/基础数据类型4.py
|
c5fe2670b3666f8eb90c4ca3640d26fd759070c6
|
[] |
no_license
|
wuzhisheng/Python
|
ebbeacc81881b65d6085b8f4bf10e15a25549ab0
|
84930c2eb5fb0397d546819effef0d879055e2c8
|
refs/heads/master
| 2022-01-17T14:57:08.926481
| 2021-12-31T09:52:01
| 2021-12-31T09:52:01
| 146,844,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,770
|
py
|
''''''
'''
字符串:str
存储少量数据类型
python中:凡是用单/双引号引起来的数据就是字符串。
str = 'wuzhisheng 吴志盛'
str1 = "wuzhisheng 吴志盛"
wu='''
'''
# """ """或''' ''' 保留格式
字符串可以用 + 或 *
+ 将字符串进行拼接
w = 'wu'
z = 'zhi'
# 加可以
print(w + z)
print(w * 3)
切片:
w="wwww"
print(w[::2])
'''
#capitalize()首字母大写,其余小写 **
s1='oldBoy'
s2=s1.capitalize()
print (s2)
#swapcase 大小写转换 **
s1='oldBoy'
print (s1.swapcase())
#title() 对字符串非字母隔开的每个单词首字母大写**
s1='alex oldboy*tai2tian'
print(s1.title())
#upper 全部转换为大写 *****
# lower 全部转换为小写
# s1='oldBoy'
# print (s1.upper())
# #应用场景
# username=input("用户名:")
# password=input("密码:")
# code = 'QweR'.upper()
# your_code=input('验证码').upper()
# if code == your_code:
# if username == 'alex' and password=='123':
# print('登录成功')
# else:
# print('验证失败')
#strip 默认去除字符串前后两端的空格,换行符,制表符*****
#可指定去除某和某些元素 ????
#lstrip() rstrip()
name=' alex '
print(name.strip()) #这种删除只是暂时的
#要永久删除这个字符串中的空白,必须将删除操作的结果存回到变量中
name1=name.strip()
n1=' :aldydtojhgld'
print(n1.strip('ald'))
print(n1.strip(':'))
#startswith endswith *****判断以...开头....结尾
s1='oldBoy'
print(s1.startswith('o'))
print(s1.startswith('old'))
print(s1.startswith('B',3,))
s1.endswith
s1='oldBoyB'
#find 通过字符串元素找索引,找不到返回-1 *****
#index 通过字符串元素找索引,找不到会报错
print (s1.index('B'))
print (s1.index('B',4)) #4查找范围
# print (s1.index('A')) #找不到,会报错
# print (s1.find('A')) #找不到返回-1
i=(s1.index('B'))
print(i,type(i))
#split 分割 str ---> list 默认以空格分隔 *****
#分割后以列表显示
s1='wu zhi sheng'
s2='wu,zhi,sheng'
s3='wuzhisheng'
print (s1.split())
print (s2.split(','))
print(s3.split('w',1)) #切第一个,则会流空格
#replace 替换*****
s1='alex是一个sg,alex确实是,alex'
print(s1.replace('alex','oldboy'))
print(s1.replace('alex','oldboy',2)) #2指定替换个数
print(s1.replace('wu','oldboy'))#不存在,打印原来的,但没报错
#format 三种用法
#1
msg='我们{},今年{},性别{}'.format('wu',25,'男')
print (msg)
#2
msg='我们{0},今年{1},性别{0}'.format('wu',25,'男')
print (msg)
#3
msg='我们{name},今年{age},性别{sex}'.format(name='wu',sex='男',age=25)
print(msg)
#count 查询元素出现的次数
s1='oldBoyB'
print(s1.count('B'))
#len 显示长度
print(len(s1))
name='wuzhi123'
print(name.isalnum()) #判断字符串是否由字母或数字组成
print (name.isalpha()) #判断字符串是否只由字母组成
print (name.isdigit()) #判断字符串是否只由数字组成
#join 将序列中的元素以指定的字符连接生成一个新的字符串
lst = ['alex','alex3714']
ret = '|'.join(lst)
print(ret)
str='-'
seq=("a","b","c")
print (str.join(seq))
#eval 去掉字符串,转换相应数据类型
l = '[1,2,3,4,[5,6,7,8,9]]'
d = "{'a':123,'b':456,'c':789}"
t = '([1,3,5],[5,6,7,8,9],[123,456,789])'
print(type(l))
print(type(eval(l)))
print(eval(l))
print(type(d))
print(type(eval(d)))
print(eval(d))
print(type(t))
print(type(eval(t)))
print(eval(t))
#while:无限循环,强调次数
s1='oldboy'
index=0
while index < len(s1):
print(s1[index])
index+=1
#for 有限循环,遍历存在的对象
'''
for循环可以与else配合,与break和continue
'''
s1='oldboy'
for i in s1:
print(i)
#break
#continue
else:
pass
|
[
"806215829@qq.com"
] |
806215829@qq.com
|
71b4904a308e77ad43f1976e2817ec36814cab43
|
4644eb637a27eb7a546ee0b0887a174f7d4c13d6
|
/old/test/das_memcache_t.py
|
f137479d19a8ab168f7662a474e36ae144de4a5c
|
[] |
no_license
|
dmwm/DAS
|
4c33ce63156bbe7f87703532e04321320fd6d66d
|
a2dddea2c2cb18287d607a3f8da0c6364f9f70d7
|
refs/heads/master
| 2021-08-08T17:40:27.066179
| 2021-05-27T18:47:21
| 2021-05-27T18:47:21
| 4,300,629
| 7
| 5
| null | 2021-05-27T18:47:22
| 2012-05-11T19:07:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,977
|
py
|
#!/usr/bin/env python
#pylint: disable-msg=C0301,C0103
"""
Unit test for DAS cache module
"""
import unittest
import time
from DAS.utils.utils import genkey
from DAS.utils.das_config import das_readconfig
from DAS.utils.logger import DASLogger
from DAS.core.das_memcache import DASMemcache
try:
# with python 2.5
import hashlib
except:
# prior python 2.5
import md5
class testDASMemcache(unittest.TestCase):
"""
A test class for the DAS cache module
"""
def setUp(self):
"""
set up DAS core module
"""
debug = 0
config = das_readconfig()
logger = DASLogger(verbose=debug, stdout=debug)
config['logger'] = logger
config['verbose'] = debug
self.memcache = DASMemcache(config)
def test_key(self):
"""test DAS cache key generator"""
query = "find site where site=T2_UK"
result = genkey(query)
try:
hash = hashlib.md5()
except:
# prior python 2.5
hash = md5.new()
hash.update(query)
expect = hash.hexdigest()
self.assertEqual(expect, result)
def test_result(self):
"""test DAS memcache result method"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 60
expect = [0,1,2,3,4,5,6,7,8,9]
expect = self.memcache.update_cache(query, expect, expire)
expect = [i for i in expect]
result = [i for i in self.memcache.get_from_cache(query)]
result.sort()
self.assertEqual(expect, result)
def test_pagintation(self):
"""test DAS memcache result method with pagination"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 60
expect = [0,1,2,3,4,5,6,7,8,9]
expect = self.memcache.update_cache(query, expect, expire)
expect = [i for i in expect]
idx = 1
limit = 3
result = [i for i in self.memcache.get_from_cache(query, idx, limit)]
result.sort()
self.assertEqual(expect[idx:limit+1], result)
def test_sorting(self):
"""test DAS memcache result method with sorting"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 60
data = [
{'id':0, 'data':'a', 'run':1},
{'id':1, 'data':'b', 'run':3},
{'id':2, 'data':'c', 'run':2},
]
gen = self.memcache.update_cache(query, data, expire)
res = [i for i in gen]
skey = 'run'
order = 'desc'
result = [i for i in \
self.memcache.get_from_cache(query, skey=skey, order=order)]
expect = [
{'id':1, 'data':'b', 'run':3},
{'id':2, 'data':'c', 'run':2},
{'id':0, 'data':'a', 'run':1},
]
self.assertEqual(expect, result)
skey = 'run'
order = 'asc'
result = [i for i in \
self.memcache.get_from_cache(query, skey=skey, order=order)]
expect = [
{'id':0, 'data':'a', 'run':1},
{'id':2, 'data':'c', 'run':2},
{'id':1, 'data':'b', 'run':3},
]
self.assertEqual(expect, result)
def test_incache(self):
"""test DAS memcache incache method"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 1
expect = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
expect = self.memcache.update_cache(query, expect, expire)
expect = [i for i in expect]
result = self.memcache.incache(query)
self.assertEqual(1, result)
time.sleep(2)
result = self.memcache.incache(query)
self.assertEqual(0, result)
#
# main
#
if __name__ == '__main__':
unittest.main()
|
[
"metson@4525493e-7705-40b1-a816-d608a930855b"
] |
metson@4525493e-7705-40b1-a816-d608a930855b
|
c2f66ca6d5d76b2c7657e6035b85941cfe6b9f61
|
db303c68682dfd18965a04026ff14e15c1ba6120
|
/ch04/ans35.py
|
b9577362e4fe622f0c34d0265e8758e7446b7b73
|
[] |
no_license
|
Toshiyana/nlp100v2020
|
1a89f164de0c720da6d42c19b3fa60f8013d662c
|
37d4d208d5d527d163356793b630f36eb7595779
|
refs/heads/master
| 2023-07-15T15:01:28.454515
| 2021-08-21T13:20:03
| 2021-08-21T13:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
from collections import defaultdict
def parse_mecab(block):
res = []
for line in block.split('\n'):
if line == '':
return res
(surface, attr) = line.split('\t')
attr = attr.split(',')
lineDict = {
'surface': surface,
'base': attr[6],
'pos': attr[0],
'pos1': attr[1]
}
res.append(lineDict)
def extract_words(block):
return [b['base'] + '_' + b['pos'] + '_' + b['pos1'] for b in block]
filename = 'ch04/neko.txt.mecab'
with open(filename, mode='rt', encoding='utf-8') as f:
blocks = f.read().split('EOS\n')
blocks = list(filter(lambda x: x != '', blocks))
blocks = [parse_mecab(block) for block in blocks]
words = [extract_words(block) for block in blocks]
d = defaultdict(int)
for word in words:
for w in word:
d[w] += 1
ans = sorted(d.items(), key=lambda x: x[1], reverse=True)
print(ans)
|
[
"upura0@gmail.com"
] |
upura0@gmail.com
|
fa97372f1c7c1784d5b67306cc72c036f1556e99
|
bb048a319e732cb7c059b5fb877b834aab7525c5
|
/dcbots/bench/sqltypes.py
|
b6544393cc54f81cbb49f1292c8f0a71288a7a8f
|
[] |
no_license
|
dodysw/dodysw-svn
|
9ec38513b7be8ec87d4ae8f6510bc7624e4ce4c0
|
1f784dae610b1052bf4cd96f5d470e56f0a08528
|
refs/heads/master
| 2021-01-22T02:34:00.193636
| 2016-09-10T17:02:22
| 2016-09-10T17:02:22
| 25,316,568
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
import time
import MySQLdb as dbm
def measure(f):
start = time.clock()
f()
print time.clock()-start
def init():
global tth_rows, cur
MYSQL_CONNECT_PARAM = dict(host='localhost', user='whatsnew_dcbot', passwd='', db='whatsnew_dcbot')
conn = dbm.connect(**MYSQL_CONNECT_PARAM)
cur = conn.cursor()
sql = "select tth from shares limit 20000"
cur.execute(sql)
tth_rows = cur.fetchall()
def normal():
for (tth,) in tth_rows:
sql = "insert into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
try:
cur.execute(sql, tth)
except dbm.IntegrityError:
pass
def fast1():
sql = "insert ignore into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
cur.executemany(sql, [r[0] for r in tth_rows])
def try1():
for (tth,) in tth_rows:
if cur.execute("select 1 from shares where tth=%s", tth) == 0:
sql = "insert into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
cur.execute(sql, tth)
def try2():
# create heap table
cur.execute("create temporary table tth_list (tth char(39)) engine=MEMORY")
# populate the heap
cur.executemany("insert into tth_list (tth) values (%s)", [r[0] for r in tth_rows])
# join with shares, and get list of new tth
if cur.execute("select tth_list.tth from tth_list left join shares on tth_list.tth = shares.tth where shares.tth is null"):
print 'at least 1 is new'
sql = "insert into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
cur.executemany(sql, [r[0] for r in cur])
else:
print 'all tth are not new'
#~ cur.execute("drop table tth_new")
if __name__ == '__main__':
init()
measure(normal)
measure(fast1)
measure(try1)
measure(try2)
|
[
"dody@cryptolab.net"
] |
dody@cryptolab.net
|
55c0387190029797fc40a4a5795409b09aa8c92e
|
f98de2db6b24d30d64f1145c7d8da4a40385a87f
|
/packages/grid_control/backends/aspect_cancel.py
|
5b86166d12d61c4cbe5cccbb139d0d6dfe99df5a
|
[] |
no_license
|
greyxray/grid-control
|
f9f453491fe7bc506d4cfc240afaa364ba9db84b
|
ed10fdb6ff604006a5d52dcd43c2e55c9e962c0a
|
refs/heads/master
| 2020-04-15T13:15:21.103357
| 2019-01-08T18:23:07
| 2019-01-08T18:23:07
| 164,709,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,028
|
py
|
# | Copyright 2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import time
from grid_control import utils
from grid_control.backends.backend_tools import BackendExecutor, ProcessCreatorAppendArguments
from grid_control.utils.activity import Activity
from hpfwk import AbstractError
from python_compat import identity, lmap
class CancelJobs(BackendExecutor):
def execute(self, wmsIDs, wmsName): # yields list of (wmsID,)
raise AbstractError
class CancelJobsWithProcess(CancelJobs):
def __init__(self, config, proc_factory):
CancelJobs.__init__(self, config)
self._timeout = config.getTime('cancel timeout', 60, onChange = None)
self._errormsg = 'Job cancel command returned with exit code %(proc_status)s'
self._proc_factory = proc_factory
def _parse(self, wmsIDs, proc): # yield list of (wmsID,)
raise AbstractError
def execute(self, wmsIDs, wmsName):
proc = self._proc_factory.create_proc(wmsIDs)
for result in self._parse(wmsIDs, proc):
if not utils.abort():
yield result
if proc.status(timeout = 0, terminate = True) != 0:
self._handleError(proc)
def _handleError(self, proc):
self._filter_proc_log(proc, self._errormsg)
class CancelJobsWithProcessBlind(CancelJobsWithProcess):
def __init__(self, config, cmd, args = None, fmt = identity, unknownID = None):
proc_factory = ProcessCreatorAppendArguments(config, cmd, args, fmt)
CancelJobsWithProcess.__init__(self, config, proc_factory)
self._blacklist = None
if unknownID is not None:
self._blacklist = [unknownID]
def _parse(self, wmsIDs, proc): # yield list of (wmsID,)
proc.status(self._timeout, terminate = True)
return lmap(lambda wmsID: (wmsID,), wmsIDs)
def _handleError(self, proc):
self._filter_proc_log(proc, self._errormsg, blacklist = self._blacklist, log_empty = False)
class CancelAndPurgeJobs(CancelJobs):
def __init__(self, config, cancel_executor, purge_executor):
CancelJobs.__init__(self, config)
(self._cancel_executor, self._purge_executor) = (cancel_executor, purge_executor)
def setup(self, log):
CancelJobs.setup(self, log)
self._cancel_executor.setup(log)
self._purge_executor.setup(log)
def execute(self, wmsIDs, wmsName): # yields list of (wmsID,)
marked_wmsIDs = lmap(lambda result: result[0], self._cancel_executor.execute(wmsIDs, wmsName))
time.sleep(5)
activity = Activity('Purging jobs')
for result in self._purge_executor.execute(marked_wmsIDs, wmsName):
yield result
activity.finish()
|
[
"stober@cern.ch"
] |
stober@cern.ch
|
6eb50de935128c4b95da982aeecb7b0ac8837c6e
|
f121695e2dff353607fa47fb42482470e03bbf8a
|
/capitulo_05-Instrucoes_if/hello_admin.py
|
498fd1d357d23865008f7fd5357e38db07827afc
|
[] |
no_license
|
ranog/python_work
|
76cbcf784c86fae4482be5383223e4b0a34f4130
|
47c442a90dcf32d5aef70858693a772a3c76a7ac
|
refs/heads/master
| 2022-12-22T11:02:26.482059
| 2021-04-17T01:12:22
| 2021-04-17T01:12:22
| 233,634,221
| 2
| 1
| null | 2022-12-08T07:38:43
| 2020-01-13T15:58:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,523
|
py
|
#! /usr/bin/env python3
"""
NOME
hello_admin.py - FAÇA VOCÊ MESMO
SINOPSES
chmod +x hello_admin.py
./hello_admin.py
- Precisamos encontrar alguns usuários!
- Breno: Usuário está disponível.
- Felipe: Usuário está disponível.
- João: Usuário está disponível.
- ISAAC: Usuário indisponível, fornecer um novo nome por favor.
- eRicK: Usuário indisponível, fornecer um novo nome por favor.
1st
2nd
3rd
4th
5th
6th
7th
8th
9th
DESCRIÇÃO
FAÇA VOCÊ MESMO
5.8 – Olá admin: Crie uma lista com cinco ou mais nomes de usuários,
incluindo o nome 'admin'. Suponha que você esteja escrevendo um código que
exibirá uma saudação a cada usuário depois que eles fizerem login em um site.
Percorra a lista com um laço e mostre uma saudação para cada usuário:
• Se o nome do usuário for 'admin', mostre uma saudação especial, por exemplo,
Olá admin, gostaria de ver um relatório de status?
• Caso contrário, mostre uma saudação genérica, como Olá Eric, obrigado por
fazer login novamente.
5.9 – Sem usuários: Acrescente um teste if em hello_admin.py para garantir
que a lista de usuários não esteja vazia.
• Se a lista estiver vazia, mostre a mensagem Precisamos encontrar alguns
usuários!
• Remova todos os nomes de usuário de sua lista e certifique-se de que a
mensagem correta seja exibida.
5.10 – Verificando nomes de usuários: Faça o seguinte para criar um programa
que simule o modo como os sites garantem que todos tenham um nome de usuário
único.
• Crie uma lista chamada current_users com cinco ou mais nomes de usuários.
• Crie outra lista chamada new_users com cinco nomes de usuários. Garanta
que um ou dois dos novos usuários também estejam na lista current_users.
• Percorra a lista new_users com um laço para ver se cada novo nome de
usuário já foi usado. Em caso afirmativo, mostre uma mensagem informando
que a pessoa deverá fornecer um novo nome. Se um nome de usuário não foi
usado, apresente uma mensagem dizendo que o nome do usuário está disponível.
• Certifique-se de que sua comparação não levará em conta as diferenças
entre letras maiúsculas e minúsculas. Se 'John' foi usado, 'JOHN' não deverá
ser aceito.
5.11 – Números ordinais: Números ordinais indicam sua posição em uma lista,
por exemplo, 1st ou 2nd, em inglês. A maioria dos números ordinais nessa
língua termina com th, exceto 1, 2 e 3.
• Armazene os números de 1 a 9 em uma lista.
• Percorra a lista com um laço.
• Use uma cadeia if-elif-else no laço para exibir a terminação apropriada
para cada número ordinal. Sua saída deverá conter "1st 2nd 3rd 4th 5th
6th 7th 8th 9th", e cada resultado deve estar em uma linha separada.
----------------------------------------------------------------------
HISTÓRICO
20202210: João Paulo, outubro de 2020.
- FAÇA VOCÊ MESMO - Pag. 127-128;
- 5.8 – Olá admin - Pag. 127-128.
20202310: João Paulo, outubro de 2020.
- 5.9 – Sem usuários - Pag. 128;
- 5.10 – Verificando nomes de usuários - 128;
- 5.11 – Números ordinais - Pag. 128.
"""
# users = ['maria', 'joão', 'josé', 'joana', 'manoel', 'admin']
users = []
if users:
for user in users:
if user == 'admin':
print("\n- Olá " + user + ", gostaria de ver um relatório de status?")
else:
print("\n- Olá " + user.title() + ", obrigado por fazer login novamente.")
else: print("\n- Precisamos encontrar alguns usuários!")
# XXX na lista current_users tive que colocar tudo em minúscula para a função
# lower() funcionar, não consigo aplicar a função nas duas variáveis.
current_users = ['alexandre', 'eduardo', 'henrique', 'augusto', 'erick', 'isaac']
new_users = ['Breno', 'Felipe', 'João', 'ISAAC', 'eRicK']
for new_user in new_users:
if new_user.lower() in current_users:
print("\n- " + new_user + ": Usuário indisponível, fornecer um novo nome por favor.")
else: print("\n- " + new_user + ": Usuário está disponível.")
numbers = list(range(1, 10))
for number in numbers:
if number == 1: print("\n" + str(number) + "st")
elif number == 2: print(str(number) + "nd")
elif number == 3: print(str(number) + "rd")
else: print(str(number) + "th")
|
[
"jprnogueira@yahoo.com.br"
] |
jprnogueira@yahoo.com.br
|
02e8402c5117c2b27d6c879a64f03f2ff0da8a59
|
1afae73794c6d4874360c7a12e4d2f2dbe62ca56
|
/gilda/tests/test_process.py
|
10e7d813d8f9ab94695884c5b91a62d83ae79e63
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
steppi/gilda
|
ce2dc1d1d56363c6543b6076a8605d32baca0f60
|
4927469e5f9a4ca20a056f617c126fe6a4bf3b34
|
refs/heads/master
| 2021-11-19T12:52:13.465346
| 2021-09-25T00:40:17
| 2021-09-25T00:40:17
| 194,151,959
| 0
| 0
| null | 2019-06-27T19:28:27
| 2019-06-27T19:28:26
| null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
from gilda.process import depluralize, replace_greek_spelled_out
def test_depluralize():
assert depluralize('BRAF') == ('BRAF', 'non_plural')
assert depluralize('apoptosis') == ('apoptosis', 'non_plural')
assert depluralize('mosquitoes') == ('mosquito', 'plural_oes')
assert depluralize('antibodies') == ('antibody', 'plural_ies')
assert depluralize('branches') == ('branch', 'plural_es')
assert depluralize('CDs') == ('CD', 'plural_caps_s')
assert depluralize('receptors') == ('receptor', 'plural_s')
def test_greek():
assert replace_greek_spelled_out('interferon-γ') == \
'interferon-gamma'
|
[
"ben.gyori@gmail.com"
] |
ben.gyori@gmail.com
|
f5a4068248717cdecf969c101345a43b1250ad3f
|
dd5d05ecb3e6752941dd5c72ca955307fca8ec14
|
/d11/d11p2.py
|
0b9004cb76477d36074131ce8fe0ef87a6d26774
|
[] |
no_license
|
jabadia/adventOfCode2020
|
81e7166cb2646f83da10d70bbaf4c31026ce7938
|
587001526b90bc6eed703e273b2d6cbec7e9b151
|
refs/heads/main
| 2023-02-05T15:39:22.123879
| 2020-12-28T08:52:54
| 2020-12-28T08:52:54
| 317,472,068
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,140
|
py
|
import time
from collections import defaultdict
from utils.test_case import TestCase
from d11_input import INPUT
TEST_CASES = [
TestCase("""
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""", 26),
]
def find_nearest_from(ferry, pos, delta):
rows = len(ferry)
cols = len(ferry[0])
neighbour = (pos[0] + delta[0], pos[1] + delta[1])
while 0 <= neighbour[0] < rows and 0 <= neighbour[1] < cols and ferry[neighbour[0]][neighbour[1]] == '.':
neighbour = (neighbour[0] + delta[0], neighbour[1] + delta[1])
if 0 <= neighbour[0] < rows and 0 <= neighbour[1] < cols:
return neighbour
else:
return None
def find_neighbours(ferry):
neighbours = {}
rows = len(ferry)
cols = len(ferry[0])
for row in range(rows):
for col in range(cols):
# search for nearest neighbour in each direction
if ferry[row][col] == '.':
continue
key = (row, col)
neighbours[key] = list(filter(None, [
find_nearest_from(ferry, (row, col), delta)
for delta in [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]
]))
return neighbours
def visible_seats(ferry, neighbours, row, col):
return sum(1 for i, j in neighbours[(row, col)] if ferry[i][j] == '#')
def next_generation(ferry, neighbours):
rows = len(ferry)
cols = len(ferry[0])
next_ferry = [['.'] * cols for _ in range(rows)]
for row in range(rows):
for col in range(cols):
if ferry[row][col] == 'L':
next_ferry[row][col] = '#' if visible_seats(ferry, neighbours, row, col) == 0 else 'L'
elif ferry[row][col] == '#':
next_ferry[row][col] = '#' if visible_seats(ferry, neighbours, row, col) < 5 else 'L'
return next_ferry
def test_visible(ferry, row, col, expected_visible):
neighbours = find_neighbours(ferry)
actual_visible = visible_seats(ferry, neighbours, row, col)
assert actual_visible == expected_visible
print('ok')
test_visible([
".......#.",
"...#.....",
".#.......",
".........",
"..#L....#",
"....#....",
".........",
"#........",
"...#.....",
], 4, 3, 8)
test_visible([
".............",
".L.L.#.#.#.#.",
".............",
], 1, 1, 0)
test_visible([
".##.##.",
"#.#.#.#",
"##...##",
"...L...",
"##...##",
"#.#.#.#",
".##.##.",
], 3, 3, 0)
def solve(input):
ferry = [list(row) for row in input.strip().split('\n')]
iteration = 0
neighbours = find_neighbours(ferry)
while True:
next_ferry = next_generation(ferry, neighbours)
if next_ferry == ferry:
return sum(row.count('#') for row in ferry)
ferry = next_ferry
# print(iteration)
iteration += 1
if __name__ == '__main__':
for case in TEST_CASES:
result = solve(case.case)
case.check(result)
t0 = time.time()
print(solve(INPUT))
t1 = time.time()
print(f"{(t1 - t0) * 1000:0.1f} ms")
|
[
"javi.abadia@gmail.com"
] |
javi.abadia@gmail.com
|
5e365d856993c7130f5b9560833669d748b12ddf
|
d60ee49abaee6c74c5b777f8f112a7f75f71f029
|
/genome/variants2/active_driver/convert_ptm.py
|
8bc23a1001aa4e3e7c10a5588f7623f8747323f3
|
[] |
no_license
|
ak352/melanomics
|
41530f623b4bfdbd5c7b952debcb47622d1a8e88
|
fc5e6fdb1499616fb25a8dc05259add8a65aeca0
|
refs/heads/master
| 2020-12-24T16:14:42.271416
| 2015-08-06T12:48:52
| 2015-08-06T12:48:52
| 18,439,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
import sys
import re
def get_genes(gene_file):
genes = {}
for gene in gene_file:
gene = gene.strip('\n').split('\t')
transcript = gene[0]
genename = gene[1]
genes[transcript] = genename
return genes
def get_gene_intersection(data_file, gene_list, out_file):
# open files
data = open(data_file, 'r')
g = open(gene_list, 'r')
out = open(out_file, 'w')
logfile = out_file + ".log"
log = open(logfile, "w")
sys.stderr.write("Input: %s\n"% data_file)
sys.stderr.write("Output: %s\n"% out_file)
sys.stderr.write("Logfile: %s\n"% logfile)
genes = get_genes(g)
num_not_found = 0
out.write(next(data))
for line in data:
line = line.strip('\n').split('\t')
idname = line[0]
#print line
#print idname
if idname not in genes:
log.write(idname + " NOT in genes\n")
num_not_found += 1
else:
idname = genes[idname][2:-2]
line[0] = idname
line = "\t".join(line)
out.write(line + "\n")
sys.stderr.write("Number of transcripts not found in RefSeq = %d\n"% num_not_found)
# close files
data.close()
g.close()
out.close()
if __name__ == '__main__':
# python script.py myData.txt gene_list.txt gene_intersection.txt
data = sys.argv[1]
genes = sys.argv[2]
out = sys.argv[3]
get_gene_intersection(data, genes, out)
|
[
"ak@uni.fake"
] |
ak@uni.fake
|
bd36975c81170ebc8231899dace68949b8c3af3b
|
c4a57dced2f1ed5fd5bac6de620e993a6250ca97
|
/huaxin/huaxin_ui/ui_ios_xjb_2_0/setting_trade_password_page.py
|
ad3b0792d49495208856a117a3e182156aa9fd78
|
[] |
no_license
|
wanglili1703/firewill
|
f1b287b90afddfe4f31ec063ff0bd5802068be4f
|
1996f4c01b22b9aec3ae1e243d683af626eb76b8
|
refs/heads/master
| 2020-05-24T07:51:12.612678
| 2019-05-17T07:38:08
| 2019-05-17T07:38:08
| 187,169,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
# coding=utf-8
from _common.page_object import PageObject
import huaxin_ui.ui_ios_xjb_2_0.security_center_page
from _common.xjb_decorator import robot_log
FIND_TRADE_PASSWORD = "accId_UIAElement_找回交易密码"
MODIFY_TRADE_PASSWORD = "accId_UIAElement_修改交易密码"
CURRENT_TRADE_PASSWORD = "accId_UIATextField_(tradePwdTextField)"
CURRENT_TRADE_PASSWORD_CONFIRM = "accId_UIAButton_下一步"
SETTING_TRADE_PASSWORD = "accId_UIATextField_(tradePwdTextField)"
SETTING_TRADE_PASSWORD_AGAIN = "accId_UIATextField_(tradePwdTextField)"
SETTING_TRADE_PASSWORD_AGAIN_CONFIRM = "accId_UIAButton_下一步"
ID_FACE = "accId_UIAButton_(imageButton)"
ID_BACK = "xpath_//android.widget.ImageView[@resource-id='com.shhxzq.xjb:id/pickBack']"
FROM_PHONE_PICTURE = "accId_UIAButton_从相册选择"
RECENTLY = "accId_UIATableCell_屏幕快照"
# ID_FACE_PICTURE = "xpathIOS_UIACollectionCell_/AppiumAUT/UIAApplication/UIAWindow/UIACollectionView/UIACollectionCell[contains(@name, '月')]"
ID_FACE_PICTURE = "axis_IOS_月"
ID_FACE_PICTURE_CONFIRM = "axis_IOS_选取"
# ID_BAC_PICTURE = "xpathIOS_UIACollectionCell_/AppiumAUT/UIAApplication/UIAWindow/UIACollectionView/UIACollectionCell[contains(@name, '月')]"
ID_CONFIRM = "accId_UIAButton_确认"
SETTING_TRADE_PASSWORD_CONFIRM = "accId_UIAButton_确定提交"
SETTING_TRADE_PASSWORD_DONE = "accId_UIAButton_确认"
current_page = []
class SettingTradePasswordPage(PageObject):
def __init__(self, web_driver):
super(SettingTradePasswordPage, self).__init__(web_driver)
self.elements_exist(*current_page)
@robot_log
def modify_trade_password(self, trade_password_old, trade_password_new):
self.perform_actions(
MODIFY_TRADE_PASSWORD,
CURRENT_TRADE_PASSWORD, trade_password_old,
# CURRENT_TRADE_PASSWORD_CONFIRM,
SETTING_TRADE_PASSWORD, trade_password_new,
SETTING_TRADE_PASSWORD_AGAIN, trade_password_new,
# SETTING_TRADE_PASSWORD_AGAIN_CONFIRM,
)
# page = huaxin_ui.ui_ios_xjb_2_0.security_center_page.SecurityCenterPage(self.web_driver)
page = self
return page
@robot_log
def find_trade_password(self):
self.perform_actions(
FIND_TRADE_PASSWORD,
ID_FACE,
FROM_PHONE_PICTURE,
RECENTLY,
ID_FACE_PICTURE,
ID_FACE_PICTURE_CONFIRM,
# ID_BACK,
# FROM_PHONE_PICTURE,
# RECENTLY,
# ID_BAC_PICTURE,
ID_CONFIRM,
SETTING_TRADE_PASSWORD_CONFIRM,
SETTING_TRADE_PASSWORD_DONE,
)
page = huaxin_ui.ui_ios_xjb_2_0.security_center_page.SecurityCenterPage(self.web_driver)
return page
|
[
"wanglili@shhxzq.com"
] |
wanglili@shhxzq.com
|
e01ad7691ea38c36d638bae31a9495c9d187f087
|
cf55e0e3ac8f340b379bb211c9f40a71930a4891
|
/src/app_v1/order_paid.py
|
8561892863f309ffc3634b0fafdf09563c70ae50
|
[] |
no_license
|
jack139/pretty
|
666af96fd412c9fba1b4180dc14b2dcb77ae677d
|
5641341d23191cd3dcc050524cf862c5e1dde2c7
|
refs/heads/master
| 2021-01-19T23:02:25.805841
| 2017-08-24T07:19:56
| 2017-08-24T07:19:56
| 101,262,255
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,894
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
import time, json
from config import setting
import app_helper
db = setting.db_web
# 支付完成
url = ('/app/v1/order_paid')
class handler:
@app_helper.check_sign(['app_id','dev_id','ver_code','tick','session','order_trade_id','data'])
def POST(self, version='v1'):
web.header('Content-Type', 'application/json')
param = web.input(app_id='', dev_id='', ver_code='', session='', order_trade_id='', data='', tick='')
if '' in (param.app_id, param.dev_id, param.ver_code, param.order_trade_id, param.session, param.tick):
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
# 检查session登录
uname = app_helper.app_logged(param.session)
if uname is None:
return json.dumps({'ret' : -4, 'msg' : '无效的session'})
#--------------------------------------------------
# 修改充值订单状态
r2 = db.order_recharge.find_one_and_update(
{'recharge_id' : param.order_trade_id}, # 实充值订单号
{
'$set' : {'status':'PREPAY'},
'$push' : {'order_paid_data':param.data},
},
)
if r2 is None:
return json.dumps({'ret' : -3, 'msg' : '未找到订单'})
# 如果是IAP订单,使用data数据检查支付情况,backrun异步检查
if r2['pay_type']=='iap':
if r2['status']=='DUE': # 只有DUE才推,防止重复支付 2017-06-21, gt
app_helper.event_push_notify('iap', param.data, param.order_trade_id)
else:
print 'Error: 可疑重复支付', param.order_trade_id
ret_data = {
"order_trade_id" : param.order_trade_id,
"due" : r2['due'], # 应付金额,单位 分
"paid" : r2['due'], # 实付金额
"status" : "PENDING", # 订单状态:PAID/PENDING 已支付/待支付
}
# 返回
return json.dumps({
'ret' : 0,
'data' : ret_data,
})
'''
IAP 校验结果
{
u'status': 0,
u'environment': u'Sandbox',
u'receipt': {
u'download_id': 0,
u'adam_id': 0,
u'request_date': u'2017-06-19 03:19:56 Etc/GMT',
u'app_item_id': 0,
u'original_purchase_date_pst': u'2013-08-01 00:00:00 America/Los_Angeles',
u'version_external_identifier': 0,
u'receipt_creation_date': u'2017-06-17 07:27:09 Etc/GMT',
u'in_app': [
{
u'is_trial_period': u'false',
u'purchase_date_pst': u'2017-06-17 00:27:09 America/Los_Angeles',
u'product_id': u'com.006.pay',
u'original_transaction_id': u'1000000307992143',
u'original_purchase_date_pst': u'2017-06-17 00:27:09 America/Los_Angeles',
u'original_purchase_date': u'2017-06-17 07:27:09 Etc/GMT',
u'original_purchase_date_ms': u'1497684429000',
u'purchase_date': u'2017-06-17 07:27:09 Etc/GMT',
u'purchase_date_ms': u'1497684429000',
u'transaction_id': u'1000000307992143',
u'quantity': u'1'
}
],
u'original_purchase_date_ms': u'1375340400000',
u'original_application_version': u'1.0',
u'original_purchase_date': u'2013-08-01 07:00:00 Etc/GMT',
u'request_date_ms': u'1497842396474',
u'bundle_id': u'com.nuoyin.app',
u'receipt_creation_date_pst': u'2017-06-17 00:27:09 America/Los_Angeles',
u'application_version': u'1.0',
u'request_date_pst': u'2017-06-18 20:19:56 America/Los_Angeles',
u'receipt_creation_date_ms': u'1497684429000',
u'receipt_type': u'ProductionSandbox'
}
}
'''
|
[
"gt@f8geek.com"
] |
gt@f8geek.com
|
8eb81c367c3e97cfc7fc8836fe6fe26710691f9e
|
9f2f386a692a6ddeb7670812d1395a0b0009dad9
|
/python/paddle/fluid/tests/unittests/test_tensor_uva.py
|
4af04b8f6d41e2801fb1bba280f648c81bd3c2ae
|
[
"Apache-2.0"
] |
permissive
|
sandyhouse/Paddle
|
2f866bf1993a036564986e5140e69e77674b8ff5
|
86e0b07fe7ee6442ccda0aa234bd690a3be2cffa
|
refs/heads/develop
| 2023-08-16T22:59:28.165742
| 2022-06-03T05:23:39
| 2022-06-03T05:23:39
| 181,423,712
| 0
| 7
|
Apache-2.0
| 2022-08-15T08:46:04
| 2019-04-15T06:15:22
|
C++
|
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestTensorCopyFrom(unittest.TestCase):
def func_main(self):
if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CPUPlace()
np_value = np.random.random(size=[10, 30]).astype('float32')
tensor = paddle.to_tensor(np_value, place=place)
tensor._uva()
self.assertTrue(tensor.place.is_gpu_place())
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
class TestUVATensorFromNumpy(unittest.TestCase):
def func_uva_tensor_creation(self):
if paddle.fluid.core.is_compiled_with_cuda():
dtype_list = [
"int32", "int64", "float32", "float64", "float16", "int8",
"int16", "bool"
]
for dtype in dtype_list:
data = np.random.randint(10, size=[4, 5]).astype(dtype)
if _in_legacy_dygraph():
tensor = paddle.fluid.core.to_uva_tensor(data, 0)
else:
tensor = core.eager.to_uva_tensor(data, 0)
self.assertTrue(tensor.place.is_gpu_place())
self.assertTrue(np.allclose(tensor.numpy(), data))
def test_uva_tensor_creation(self):
with _test_eager_guard():
self.func_uva_tensor_creation()
self.func_uva_tensor_creation()
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
sandyhouse.noreply@github.com
|
f47f0e566dc3c61d9cdc0fb73ae153e1f08d47fc
|
6c00499dfe1501294ac56b0d1607fb942aafc2ee
|
/eventregistry/tests/TestERQueryArticle.py
|
bdd55097b88d584354831bd931f97c34064372e3
|
[
"MIT"
] |
permissive
|
EventRegistry/event-registry-python
|
dd692729cb5c505e421d4b771804e712e5b6442b
|
bf3ce144fa61cc195840591bae5ca88b31ca9139
|
refs/heads/master
| 2023-07-06T11:04:41.033864
| 2023-06-23T08:40:31
| 2023-06-23T08:40:31
| 40,995,963
| 176
| 48
|
MIT
| 2020-10-21T09:17:06
| 2015-08-18T20:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
import unittest
from eventregistry import *
from eventregistry.tests.DataValidator import DataValidator
class TestQueryArticle(DataValidator):
def createQuery(self):
q = QueryArticles(conceptUri = self.er.getConceptUri("Obama"))
q.setRequestedResult(RequestArticlesUriWgtList(count = 100))
res = self.er.execQuery(q)
q = QueryArticle([uri for uri in EventRegistry.getUriFromUriWgt(res["uriWgtList"]["results"]) if uri.endswith("TEMP") == False][:10])
return q
def testArticleList(self):
q = self.createQuery()
q.setRequestedResult(RequestArticleInfo(returnInfo = self.returnInfo))
res = self.er.execQuery(q)
self.assertEqual(len(res), 10, "Expected to get a list of 10 articles")
for article in list(res.values()):
self.ensureValidArticle(article["info"], "articleList")
uris = [article.get("info").get("uri") for article in list(res.values())]
urls = [article.get("info").get("url") for article in list(res.values())]
uniqueUrls = list(set(urls))
mapper = ArticleMapper(self.er)
mappedUris = []
for url in uniqueUrls:
# getArticleUri returns a list, so we extend the list of items
urls = mapper.getArticleUri(url)
if urls:
mappedUris.append(urls)
if mappedUris == []:
return
q = QueryArticle.queryByUri(mappedUris)
q.setRequestedResult(RequestArticleInfo(returnInfo = self.returnInfo))
res = self.er.execQuery(q)
for article in list(res.values()):
# it's possible that the article was removed from ER
if "error" in article:
continue
self.ensureValidArticle(article["info"], "articleList")
q = QueryArticle.queryByUri(uris)
q.setRequestedResult(RequestArticleInfo(returnInfo = self.returnInfo))
res = self.er.execQuery(q)
self.assertEqual(len(res), 10, "Expected to get a list of 10 articles when searching by uris")
for article in list(res.values()):
self.ensureValidArticle(article["info"], "articleList")
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueryArticle)
unittest.TextTestRunner(verbosity=3).run(suite)
|
[
"gleban@gmail.com"
] |
gleban@gmail.com
|
0e6eb8dd980dea3dca19b288358cbfacba99fa4e
|
78d35bb7876a3460d4398e1cb3554b06e36c720a
|
/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_manage_models_async.py
|
e4a2f1a079c6b24be6a7fd47b513e0d7ea6e2b56
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
catchsrinivas/azure-sdk-for-python
|
e35f59b60318a31b3c940a7a3a07b61b28118aa5
|
596227a7738a5342274486e30489239d539b11d1
|
refs/heads/main
| 2023-08-27T09:08:07.986249
| 2021-11-11T11:13:35
| 2021-11-11T11:13:35
| 427,045,896
| 0
| 0
|
MIT
| 2021-11-11T15:14:31
| 2021-11-11T15:14:31
| null |
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_manage_models_async.py
DESCRIPTION:
This sample demonstrates how to manage the models on your account. To learn
how to build a model, look at sample_build_model_async.py.
USAGE:
python sample_manage_models_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CONTAINER_SAS_URL - The shared access signature (SAS) Url of your Azure Blob Storage container
"""
import os
import asyncio
async def sample_manage_models_async():
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
container_sas_url = os.environ["CONTAINER_SAS_URL"]
# [START get_account_info_async]
document_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with document_model_admin_client:
account_info = await document_model_admin_client.get_account_info()
print("Our account has {} custom models, and we can have at most {} custom models\n".format(
account_info.model_count, account_info.model_limit
))
# [END get_account_info_async]
# Next, we get a paged list of all of our custom models
# [START list_models_async]
models = document_model_admin_client.list_models()
print("We have the following 'ready' models with IDs and descriptions:")
async for model in models:
print("{} | {}".format(model.model_id, model.description))
# [END list_models_async]
# let's build a model to use for this sample
poller = await document_model_admin_client.begin_build_model(container_sas_url, description="model for sample")
model = await poller.result()
# [START get_model_async]
my_model = await document_model_admin_client.get_model(model_id=model.model_id)
print("\nModel ID: {}".format(my_model.model_id))
print("Description: {}".format(my_model.description))
print("Model created on: {}".format(my_model.created_on))
# [END get_model_async]
# Finally, we will delete this model by ID
# [START delete_model_async]
await document_model_admin_client.delete_model(model_id=my_model.model_id)
try:
await document_model_admin_client.get_model(model_id=my_model.model_id)
except ResourceNotFoundError:
print("Successfully deleted model with ID {}".format(my_model.model_id))
# [END delete_model_async]
async def main():
await sample_manage_models_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[
"noreply@github.com"
] |
catchsrinivas.noreply@github.com
|
a182ce756249ca3cacb5023e1cfa2f9c7184bdc4
|
d47436cad4e2b08ee0e58a157a0860f496037df7
|
/sApp/migrations/0001_initial.py
|
8aae841cbabb0695048dac5b91cf44de9cebbf73
|
[] |
no_license
|
rushabhgediya38/Django-BeautifulSoup
|
ff8a9d5514bf0e3b7ecc2e26dadde605c39c9b71
|
11029364c161d6a63f74b17a5aa04b45a43b3140
|
refs/heads/master
| 2023-05-06T10:03:56.474083
| 2021-05-25T13:49:13
| 2021-05-25T13:49:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
# Generated by Django 3.2.3 on 2021-05-24 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='sData',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=400)),
('author', models.CharField(max_length=400)),
('created', models.DateTimeField()),
('modified', models.DateTimeField()),
('SITE_URL', models.URLField()),
],
),
]
|
[
"rushabhgediya38@gmail.com"
] |
rushabhgediya38@gmail.com
|
86e188f2e4988ed3ff9dbc179788f4eaa24c9c97
|
f1b86f057b7982163055e36cc97ff1532d3afb46
|
/encode_decode/auto_encode.py
|
e6c13f11080263f3e9ff30b6dd02fad224ede17e
|
[] |
no_license
|
bigboyooo/ner-bilstm-dnn
|
0a29b2268ce48960ee222863673d7fd5785ff54b
|
7afd569dd59706e16007eeb50f2bc2049bc33c80
|
refs/heads/master
| 2020-06-01T22:29:28.019022
| 2018-06-21T07:23:56
| 2018-06-21T07:23:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,158
|
py
|
#coding="utf8"
import os,sys,re,traceback,json
import pandas as pd
import numpy as np
import tensorflow as tf
#import sklearn.preprocessing as prep
#from tensorflow.examples.tutorials.mnist import input_data
import sys
def _path(filepath):
return os.path.join(CURPATH, filepath)
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * np.sqrt(6.0 /(fan_in +fan_out))
high = constant * np.sqrt(6.0 /(fan_in +fan_out))
return tf.random_uniform((fan_in, fan_out), minval = low, maxval = high, dtype = tf.float32)
class AdditiveGaussianNoiseAutoEncoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(), scale=0.1, sess = ""):
print('\n> cls AdditiveGaussianNoiseAutoEncoder instance')
"""
!attention this para is used on train
"""
self.n_samples = 200000
self.training_epochs = 100
self.batch_size = 32
self.n_input = 100
self.display_step = 1
"""
!attention this para is used on model
"""
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(
tf.add(
tf.matmul(
self.x + scale * tf.random_normal((self.n_input,)),
self.weights['w1']),
self.weights['b1']
)
)
self.reconstruction = tf.add(
tf.matmul(self.hidden, self.weights['w2']),
self.weights['b2']
)
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) # 3 steps sub pow sum
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = sess
#self.sess = tf.Session()
#self.sess.run(init)
#self.save_graph_meta()
def save_graph_meta(self):
tf.add_to_collection('x', self.x)
tf.add_to_collection('scale', self.scale)
tf.add_to_collection('cost', self.cost)
tf.add_to_collection('reconstruction', self.reconstruction)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self,X):
cost,opt = self.sess.run((self.cost , self.optimizer), feed_dict={self.x:X, self.scale: self.training_scale})
return cost
def partial_predict(self,X):
_y = self.sess.run((self.reconstruction), feed_dict={self.x:X, self.scale: self.training_scale})
return _y
def calc_total_cost(self,X):
return self.sess.run(self.cost,feed_dict = {self.x:X, self.scale: self.training_scale})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x:X, self.scale: self.training_scale})
def generate(self, hidden=None):
if hidden is NOne:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.hidden:hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstrucion, feed_dict = {sekf.x:X, self.scale:self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
#mnist = input_data.read_data_sets('MNIST_dat', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocesor.transform(X_train)
X_test= preprocesor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_indeix = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
def generat_batch(data_generate, n):
_lst = []
for i in range(n):
_arr = data_generate.__next__()
_lst.append(_arr)
_out = np.array(_lst).reshape(batch_size*n, n_input)
#print(_out.shape)
assert _out.shape == (batch_size*n,n_input)
return _out
def gen_w2v():
w2v = wd2vec()
return w2v
def data_generate(w2v, df):
word2vec, model = w2v.spot_vec()
lines = w2v.load_txt(w2v.txtfilepath, False)
_random_lst = np.random.permutation(len(lines))
_arrlst = []
cnt = 0
for _id in _random_lst:
words = lines[_id].split(" ")
for word in words:
try:
_word_arr = word2vec[word] # renturn one (100,) shape array
_arrlst.append(_word_arr)
except KeyError:
#print("\n> the word", word, " is not in the vocab")
continue
cnt+=1
if cnt % batch_size == 0:
_arr = np.array(_arrlst)
_arrlst = []
cnt = 0
yield _arr
print("\n> all data read finish")
"""
def main():
#--- get data
w2v = gen_w2v()
dg = data_generate(w2v)
print("\n> print the df for test: ")
print(dg.__next__())
print(dg.__next__().shape)
print("\n> now we going to train the model")
X_train = dg
X_test = data_generate(w2v)
autoencoder = AdditiveGaussianNoiseAutoEncoder(n_input = 8,
n_hidden = 32,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
scale = 0.01,
)
saver = tf.train.Saver(max_to_keep=1)
model_save_path = os.path.join(CUR_PATH,'data/auto_encode.ckpt') # 模型保存位置
for epoch in range(training_epochs):
print("\n> this is epoch", epoch)
X_train = dg
X_test = data_generate(w2v)
avg_cost = 0
total_batch = int(n_samples // batch_size)
for i in range(total_batch):
if i % 1000 == 1:
print("\n> Epoch:", '%04d' % (epoch +1), "batch ", '%04d' % (i+1))
if i % 5000 == 1:
_y = autoencoder.partial_predict(batch_xs)
print("\n> Epoch:", '%04d' % (epoch +1), "batch ", '%04d' % (i+1), "_y predict:", _y)
batch_xs = generat_batch(X_train, 10)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
save_path = saver.save(autoencoder.sess, model_save_path, global_step=(epoch+1))
print("Epoch:", '%04d' % (epoch +1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(generat_batch(X_test, 60))))
"""
if __name__ == "__main__":
pass
print("\n> autoencoder class")
|
[
"qhn614@126.com"
] |
qhn614@126.com
|
0b9f0c6a5191efbdccbdafb1b015451146020a38
|
9cbd523cdedc727f62c887612e8ae2c25c909964
|
/tests/lib/steps/check_TID_027.py
|
83b46b0a4fa54c98997731f5cfb1487b519482f6
|
[] |
no_license
|
louiscklaw/QA_test_scripts
|
8a71d0bed99fae3b0dac4cd9414b3e34dcf5beed
|
58b73594332053272d8dce2c812c93297259c782
|
refs/heads/master
| 2023-01-27T15:48:29.477848
| 2020-12-06T10:05:19
| 2020-12-06T10:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
import random
from pprint import pprint
from config import *
from time import sleep
from assert_check_point import assertCheckPoint
import restaurant_manage.password
def run_check(json_metadata, browser, password_to_bruce):
TEST_ERR_MSG='test failed at TID_027'
assertCheckPoint(browser, 'TID_027_1', TEST_ERR_MSG)
restaurant_mgmt_po = restaurant_manage.password.Main(browser)
restaurant_mgmt_po.inputPassword(password_to_bruce)
restaurant_mgmt_po.tapLogin()
assertCheckPoint(browser, 'TID_027_2_{}'.format(password_to_bruce), TEST_ERR_MSG)
json_metadata['TID_027'] = 'passed'
|
[
"louiscklaw@gmail.com"
] |
louiscklaw@gmail.com
|
d68475d087c681885a302ce09839ee0363db00a4
|
0122d6ff2fdab185480ca06ba37e743c8e899e26
|
/test/test_api_list_gateway_response.py
|
b41732009e203a02e3f75b9f21c2ae4ff86e5112
|
[] |
no_license
|
jcu-eresearch/pyLorawanServer
|
c0564a4946627f71b1cdba114fe24c0475059f59
|
277b99736194b1f1ae47526c1deaee3f7f88c299
|
refs/heads/master
| 2020-04-03T14:38:03.334371
| 2018-10-30T05:29:27
| 2018-10-30T05:29:27
| 155,328,867
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
# coding: utf-8
"""
LoRa App Server REST API
For more information about the usage of the LoRa App Server (REST) API, see [https://docs.loraserver.io/lora-app-server/api/](https://docs.loraserver.io/lora-app-server/api/). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import lorawan_server
from lorawan_server.models.api_list_gateway_response import ApiListGatewayResponse # noqa: E501
from lorawan_server.rest import ApiException
class TestApiListGatewayResponse(unittest.TestCase):
"""ApiListGatewayResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListGatewayResponse(self):
"""Test ApiListGatewayResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = lorawan_client.models.api_list_gateway_response.ApiListGatewayResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"nigel.blair@gmail.com"
] |
nigel.blair@gmail.com
|
a701b7b61f73127bef8278cfaa98dcf0c501b42d
|
0e4168c4d129b7219cd4048102409ab69c543376
|
/scripts/joinSamples.py
|
4ea8442b37d446e6dad8984cbac3b1c31589947a
|
[] |
no_license
|
saumyaphor4252/PlayWithDatacards
|
867f47486cd51091b7b6ee99bc3f860486266117
|
2b25d49073b76a6c030cdac5577def3024dd219f
|
refs/heads/master
| 2022-03-06T05:44:21.039024
| 2017-10-31T11:08:43
| 2017-10-31T11:08:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
#
# join samples
#
# join YY and XX into VV
#
# joinSamples.update({'VV' : ['YY','XX']})
joinSamples.update({'Higgs' : ['qqH','ggH','ZH','WH']})
joinSamples.update({'VVV-WZ-ZZ-Vg' : ['VV','VVV','Vg','VgS']})
#joinSamples.update({'WW' : ['ggWW','WW']})
joinSamples.update({'Top' : ['TopPt0','TopPt1']})
joinSamples.update({'DY' : ['DYee','DYmm','DYTT']})
#joinSamples.update({'all' : ['VV','VVV','Vg','VgS','ggWW','WW','TopPt0','TopPt1','DYee','DYmm']})
|
[
"massironi.andrea@gmail.com"
] |
massironi.andrea@gmail.com
|
d3fa277d14aebc7a28b2c6ef51d67f48fb198690
|
17e60f61fc82e7369802a1c597b58b0206ad9bec
|
/lib/resIpOSF1.py
|
ed540226dbdb369c1350048d478951dab3b4d1ee
|
[] |
no_license
|
SLB-DeN/opensvc
|
5e06d42947f51662fa16203a00670a88b9e1fea9
|
75baeb19e0d26d5e150e770aef4d615c2327f32e
|
refs/heads/master
| 2021-05-17T05:35:18.585791
| 2020-03-19T15:20:05
| 2020-03-19T15:20:05
| 250,651,667
| 1
| 0
| null | 2020-03-27T21:29:22
| 2020-03-27T21:29:22
| null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
import resIp as Res
import rcExceptions as ex
from rcUtilitiesOSF1 import check_ping
from rcUtilities import to_cidr, to_dotted
class Ip(Res.Ip):
def check_ping(self, count=1, timeout=5):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, count=count, timeout=timeout)
def arp_announce(self):
return
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', '/'.join([self.addr, to_cidr(self.mask)]), 'add']
else:
cmd = ['ifconfig', self.ipdev, 'inet', 'alias', self.addr, 'netmask', to_dotted(self.mask)]
return self.vcall(cmd)
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete']
else:
cmd = ['ifconfig', self.ipdev, 'inet', '-alias', self.addr]
return self.vcall(cmd)
|
[
"christophe.varoqui@opensvc.com"
] |
christophe.varoqui@opensvc.com
|
bf5ec6948feea77173ec09f78b4b41b6d0a71eaa
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/src/transformers/models/albert/__init__.py
|
0a61f5995a78963a71d334ac03c89ac5cdcdbb54
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,368
|
py
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import (
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
if is_sentencepiece_available():
_import_structure["tokenization_albert"] = ["AlbertTokenizer"]
if is_tokenizers_available():
_import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"]
if is_torch_available():
_import_structure["modeling_albert"] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
if is_tf_available():
_import_structure["modeling_tf_albert"] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
if is_flax_available():
_import_structure["modeling_flax_albert"] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
if is_tokenizers_available():
from .tokenization_albert_fast import AlbertTokenizerFast
if is_torch_available():
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
if is_tf_available():
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
if is_flax_available():
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
62af2b892b48fc4ea07ca654b78b182a5488e8be
|
52e814745700b54e4b35e783386ad5f796def1e9
|
/colour/models/rgb/dataset/smptec_rgb.py
|
975aa35b67e90bc77dac406f33783516a44fb3cb
|
[
"BSD-3-Clause"
] |
permissive
|
scoopxyz/colour
|
e9c6502f67ff0774ab77f3c2f622b5973f5a9196
|
b1d82af250122f82919b4c54d06fdf72c069c5af
|
refs/heads/develop
| 2020-12-30T19:57:48.884001
| 2016-12-28T12:42:44
| 2016-12-28T12:42:44
| 68,670,983
| 0
| 0
| null | 2016-09-20T03:38:17
| 2016-09-20T03:38:17
| null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SMPTE-C RGB Colourspace
=======================
Defines the *SMPTE-C RGB* colourspace:
- :attr:`SMPTE_C_RGB_COLOURSPACE`.
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
.. [1] Society of Motion Picture and Television Engineers. (2004). SMPTE C
Color Monitor Colorimetry. In RP 145:2004 (Vol. RP 145:200).
doi:10.5594/S9781614821649
"""
from __future__ import division, unicode_literals
import numpy as np
from functools import partial
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (
RGB_Colourspace,
gamma_function,
normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2016 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SMPTE_C_RGB_PRIMARIES',
'SMPTE_C_RGB_ILLUMINANT',
'SMPTE_C_RGB_WHITEPOINT',
'SMPTE_C_RGB_TO_XYZ_MATRIX',
'XYZ_TO_SMPTE_C_RGB_MATRIX',
'SMPTE_C_RGB_COLOURSPACE']
SMPTE_C_RGB_PRIMARIES = np.array(
[[0.630, 0.340],
[0.310, 0.595],
[0.155, 0.070]])
"""
*SMPTE-C RGB* colourspace primaries.
SMPTE_C_RGB_PRIMARIES : ndarray, (3, 2)
"""
SMPTE_C_RGB_ILLUMINANT = 'D65'
"""
*SMPTE-C RGB* colourspace whitepoint name as illuminant.
SMPTE_C_RGB_ILLUMINANT : unicode
"""
SMPTE_C_RGB_WHITEPOINT = ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get(SMPTE_C_RGB_ILLUMINANT)
"""
*SMPTE-C RGB* colourspace whitepoint.
SMPTE_C_RGB_WHITEPOINT : ndarray
"""
SMPTE_C_RGB_TO_XYZ_MATRIX = normalised_primary_matrix(
SMPTE_C_RGB_PRIMARIES, SMPTE_C_RGB_WHITEPOINT)
"""
*SMPTE-C RGB* colourspace to *CIE XYZ* tristimulus values matrix.
SMPTE_C_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_SMPTE_C_RGB_MATRIX = np.linalg.inv(SMPTE_C_RGB_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *SMPTE-C RGB* colourspace matrix.
XYZ_TO_SMPTE_C_RGB_MATRIX : array_like, (3, 3)
"""
SMPTE_C_RGB_COLOURSPACE = RGB_Colourspace(
'SMPTE-C RGB',
SMPTE_C_RGB_PRIMARIES,
SMPTE_C_RGB_WHITEPOINT,
SMPTE_C_RGB_ILLUMINANT,
SMPTE_C_RGB_TO_XYZ_MATRIX,
XYZ_TO_SMPTE_C_RGB_MATRIX,
partial(gamma_function, exponent=1 / 2.2),
partial(gamma_function, exponent=2.2))
"""
*SMPTE-C RGB* colourspace.
SMPTE_C_RGB_COLOURSPACE : RGB_Colourspace
"""
|
[
"thomas.mansencal@gmail.com"
] |
thomas.mansencal@gmail.com
|
a121ce2c0c6215da8a3ea58cd38153ea6a7642af
|
7a2342efcfb0a9880c30c26e8b45bf954b701ac1
|
/tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
|
0a6603c79b78a6f9bfe53d502e7a4da5ba17401a
|
[
"Apache-2.0"
] |
permissive
|
danielhardej/tensorflow
|
0012157e89629c19cd7fcdab613ab609b05b0294
|
1c7d02cd21f6182f959ae66d3487ff55daa83f6a
|
refs/heads/master
| 2023-04-15T13:05:46.374708
| 2016-07-13T22:30:26
| 2016-07-13T22:30:26
| 63,285,754
| 2
| 0
|
Apache-2.0
| 2023-04-07T11:36:20
| 2016-07-13T23:15:10
|
C++
|
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
# encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
5466508a5f10a2490145c71a4d00799ff8fa23b2
|
1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e
|
/xcp2k/classes/_print51.py
|
5960da1d1f2942edcf03cd540cb901434df30894
|
[] |
no_license
|
Roolthasiva/xcp2k
|
66b2f30ebeae1a946b81f71d22f97ea4076e11dc
|
fc3b5885503c6f6dc549efeb4f89f61c8b6b8242
|
refs/heads/master
| 2022-12-23T06:03:14.033521
| 2020-10-07T08:01:48
| 2020-10-07T08:01:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from xcp2k.inputsection import InputSection
from xcp2k.classes._program_run_info36 import _program_run_info36
class _print51(InputSection):
def __init__(self):
InputSection.__init__(self)
self.PROGRAM_RUN_INFO = _program_run_info36()
self._name = "PRINT"
self._subsections = {'PROGRAM_RUN_INFO': 'PROGRAM_RUN_INFO'}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5228f70187f2fb73f3f1637fd851a5a7bc4cc9b0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02850/s373081081.py
|
b381794c0d3494f7275436bc03b2bca28c761d40
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
n = int(input())
g = [[] for _ in range(n)]
inv = [0] * n
for i in range(n - 1):
a, b = map(int, input().split())
g[a - 1].append((b - 1, i))
inv[a - 1] += 1
g[b - 1].append((a - 1, i))
inv[b - 1] += 1
k = max(inv)
print(k)
s = [0]
d = [-1] * n
d[0] = [-2]
ans = [0] * (n - 1)
while s:
p = s.pop()
c = 1
for node, idx in g[p]:
if d[node] == -1:
if c == d[p]:
c += 1
d[node] = c
ans[idx] = c
c += 1
s.append(node)
for x in ans:
print(x)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4f92f5233c910ebb8db129825cec1fe0afd083df
|
0a7711063b30b1566ade3cc07f105292e32fe6d6
|
/fabfile.py
|
07b2332cef37e71892c58c0032d7fa0bfe7a0502
|
[] |
no_license
|
huokedu/dynamic-scrapy
|
e150a1fc6894e39d6bae37c602a592d57cd22c51
|
31a47e9810f2039cfe33653e09d7d03242764723
|
refs/heads/master
| 2021-01-17T21:33:17.810250
| 2013-10-05T17:28:19
| 2013-10-05T17:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 3,512
|
py
|
"""Management utilities."""
from fabric.contrib.console import confirm
from fabric.api import abort, env, local, settings, task
########## GLOBALS
env.run = 'heroku run python manage.py'
HEROKU_ADDONS = (
'cloudamqp:lemur',
'heroku-postgresql:dev',
'scheduler:standard',
'memcachier:dev',
'newrelic:standard',
'pgbackups:auto-month',
'sentry:developer',
)
HEROKU_CONFIGS = (
'DJANGO_SETTINGS_MODULE=scrapy_test.settings.prod',
'SECRET_KEY=to(rkb!6lj3bwbz&qs2go0@)1ctjcx43lm6lerci#s_vpg*%mr'
'AWS_ACCESS_KEY_ID=xxx',
'AWS_SECRET_ACCESS_KEY=xxx',
'AWS_STORAGE_BUCKET_NAME=xxx',
)
########## END GLOBALS
########## HELPERS
def cont(cmd, message):
"""Given a command, ``cmd``, and a message, ``message``, allow a user to
either continue or break execution if errors occur while executing ``cmd``.
:param str cmd: The command to execute on the local system.
:param str message: The message to display to the user on failure.
.. note::
``message`` should be phrased in the form of a question, as if ``cmd``'s
execution fails, we'll ask the user to press 'y' or 'n' to continue or
cancel exeuction, respectively.
Usage::
cont('heroku run ...', "Couldn't complete %s. Continue anyway?" % cmd)
"""
with settings(warn_only=True):
result = local(cmd, capture=True)
if message and result.failed and not confirm(message):
abort('Stopped execution per user request.')
########## END HELPERS
########## DATABASE MANAGEMENT
@task
def syncdb():
"""Run a syncdb."""
local('%(run)s syncdb --noinput' % env)
@task
def migrate(app=None):
"""Apply one (or more) migrations. If no app is specified, fabric will
attempt to run a site-wide migration.
:param str app: Django app name to migrate.
"""
if app:
local('%s migrate %s --noinput' % (env.run, app))
else:
local('%(run)s migrate --noinput' % env)
########## END DATABASE MANAGEMENT
########## FILE MANAGEMENT
@task
def collectstatic():
"""Collect all static files, and copy them to S3 for production usage."""
local('%(run)s collectstatic --noinput' % env)
########## END FILE MANAGEMENT
########## HEROKU MANAGEMENT
@task
def bootstrap():
"""Bootstrap your new application with Heroku, preparing it for a production
deployment. This will:
- Create a new Heroku application.
- Install all ``HEROKU_ADDONS``.
- Sync the database.
- Apply all database migrations.
- Initialize New Relic's monitoring add-on.
"""
cont('heroku create', "Couldn't create the Heroku app, continue anyway?")
for addon in HEROKU_ADDONS:
cont('heroku addons:add %s' % addon,
"Couldn't add %s to your Heroku app, continue anyway?" % addon)
for config in HEROKU_CONFIGS:
cont('heroku config:add %s' % config,
"Couldn't add %s to your Heroku app, continue anyway?" % config)
cont('git push heroku master',
"Couldn't push your application to Heroku, continue anyway?")
syncdb()
migrate()
cont('%(run)s newrelic-admin validate-config - stdout' % env,
"Couldn't initialize New Relic, continue anyway?")
@task
def destroy():
"""Destroy this Heroku application. Wipe it from existance.
.. note::
This really will completely destroy your application. Think twice.
"""
local('heroku apps:destroy')
########## END HEROKU MANAGEMENT
|
[
"scoarescoare@gmail.com"
] |
scoarescoare@gmail.com
|
001ee8c250001a08a7d5f7293b5801b8d90f2704
|
7250ce4b0f8c0bbff563e22243750bd89bc4a1f8
|
/source/interprocedural_analyses/taint/test/integration/via_type_of.py
|
5c5b05667e533cc4c1f9f0187df6d2ea54ec20a3
|
[
"MIT"
] |
permissive
|
geekmc/pyre-check
|
717a02b71b9537852e494507c70b91e7e98a8c22
|
592ad6dee657e48746ed2d352c6f9269b6ff9c61
|
refs/heads/main
| 2023-07-07T15:48:20.915843
| 2021-08-10T03:22:53
| 2021-08-10T03:23:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import enum
from builtins import _test_sink, _test_source
def return_via_parameter_type(parameter):
return 0
def test_strings():
return return_via_parameter_type("A")
def test_numerals():
return return_via_parameter_type(1)
def test_lists():
return return_via_parameter_type(["a", "b"])
def meta(parameter):
return return_via_parameter_type(parameter)
def test_via_type_of_does_not_propagate():
return meta("Name")
def tito(parameter, other):
pass
def test_tito():
a = tito(_test_source(), [1, 2])
return a
def sink_via_type_of(x, y):
pass
def test_sink(element):
return sink_via_type_of(element, 1)
def test_backwards_tito(parameter):
return tito(parameter, "by_backwards")
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
57213ef2b1e90bae101fffe7e7eb0c0cfa88899b
|
e3f8a3631b05347614645807ec04f834f30d3361
|
/mysite/myapp/urls.py
|
d509527fb94ec17f6719cd9a13ab69163abc3d67
|
[
"MIT"
] |
permissive
|
CSUChico-CINS465/CINS465-S19-Examples
|
0acd7e8c46d6a197e63ff23a5198ba5c93d9cf3d
|
2a9e88e1cc5ec937562211680760243d0f16efcf
|
refs/heads/master
| 2021-11-16T15:06:28.924984
| 2021-06-16T16:33:37
| 2021-06-16T16:33:37
| 166,880,285
| 0
| 1
|
MIT
| 2021-09-22T17:49:06
| 2019-01-21T21:02:57
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('', views.index),
path('login/', auth_views.LoginView.as_view()),
path('logout/', views.logout_view),
path('register/', views.register),
path('comment/<int:sugg>/', views.comment_view),
path('suggestions/', views.suggestions_json),
]
|
[
"javawolfpack@gmail.com"
] |
javawolfpack@gmail.com
|
5650b18a91e0524ae5250eb65573dc05105ed4f4
|
31f1be14dbdc5f6076415dbab63a23a3766d0c84
|
/Slateo-web/Slateo_Admin/urls/adminGradeUrls.py
|
ab8ccf9ce72c7147d4cc5927ea19363ea668c797
|
[] |
no_license
|
kumar109-beep/Examination_Panel
|
4004b328cc5abb26dfaccc0b11fd35112840458a
|
598f70aa970b9ed3a03cc931e0fd39efb5ed84bb
|
refs/heads/main
| 2023-08-10T17:33:48.067626
| 2021-09-25T12:51:22
| 2021-09-25T12:51:22
| 410,267,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
from django.urls import path
from ..views.admingradeView import *
urlpatterns = [
path('create-grade' , create_individualGrade, name='individualGrade'),
path('chainedGradeCourses' , chainedGradeCourses, name='chainedGradeCourses'),
path('grade-list' , grade_list, name='grade_list'),
path('edit-grade' , edit_grade, name='edit_grade'),
]
|
[
"rk468335@gmail.com"
] |
rk468335@gmail.com
|
bff37916a2a0abf7e118be05ae0e606e4f32e919
|
31ecba3f8112e91fbf99edf0bb9dd385da93c2ea
|
/Chapters/Unit5probs/findbuckets.py
|
45234cabf24149c5a3d9eba1512cbed7175f33d9
|
[] |
no_license
|
siddeshbb/searchengine
|
7c92ab59e9515725541a9c010ec8efd369121907
|
210a6f52cb12b7c0bc27f43d138fa308942b9fd6
|
refs/heads/master
| 2022-12-22T19:52:52.889791
| 2020-09-28T12:46:44
| 2020-09-28T12:46:44
| 299,305,999
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
# Define a procedure, hashtable_get_bucket,
# that takes two inputs - a hashtable, and
# a keyword, and returns the bucket where the
# keyword could occur.
def hashtable_get_bucket(htable,keyword):
return htable[ hash_string( keyword , len( htable ) ) ]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
table = [[['Francis', 13], ['Ellis', 11]], [], [['Bill', 17],
['Zoe', 14]], [['Coach', 4]], [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]]
print hashtable_get_bucket(table, "Zoe")
#>>> [['Bill', 17], ['Zoe', 14]]
print hashtable_get_bucket(table, "Brick")
#>>> []
print hashtable_get_bucket(table, "Lilith")
#>>> [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]
|
[
"sbb@vmware.com"
] |
sbb@vmware.com
|
f9ba13ab120ad54b3d31f7b7e3d3fde7dbb94789
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007.1/programming/libs/tcltk/actions.py
|
1d79ab883daf1f4427f3cad73d8694cbc4138091
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
WorkDir="tk8.5a5/unix"
def setup():
autotools.configure("--with-encoding=utf-8 \
--enable-threads \
--enable-man-compression=gzip \
--enable-man-symlinks \
--enable-64bit \
--with-x \
--enable-xft")
def build():
autotools.make()
def install():
shelltools.system("make DESTDIR=%s install" % get.installDIR())
# Collect private headers, 3rd party apps like Tile depends on this
shelltools.cd("..")
pisitools.dodir("/usr/include/tk-private/generic")
pisitools.dodir("/usr/include/tk-private/unix")
shelltools.copy("unix/*.h","%s/usr/include/tk-private/unix" % get.installDIR())
shelltools.copy("generic/*.h", "%s/usr/include/tk-private/generic" % get.installDIR())
# Remove duplicated headers
pisitools.remove("/usr/include/tk-private/generic/tk.h")
pisitools.remove("/usr/include/tk-private/generic/tkDecls.h")
pisitools.remove("/usr/include/tk-private/generic/tkPlatDecls.h")
# Remove tmp path from tclConfig.sh
pisitools.dosed("%s/usr/lib/tkConfig.sh" % get.installDIR(),"%s/unix" % get.curDIR() ,"/usr/lib/")
pisitools.dosed("%s/usr/lib/tkConfig.sh" % get.installDIR(),"%s" % get.curDIR() ,"/usr/include/tk-private")
pisitools.dosym("/usr/bin/wish8.5","/usr/bin/wish")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
0eb1879c244d2115498463b9733d18e928eb0385
|
3716e91c0a18a2cf0b5807cc673d95a7539b008c
|
/Desert/GoldenMirage.py
|
f563b55791426c7140a315433fe66e9f7577132e
|
[] |
no_license
|
kiwiapple87/CodeCombat-1
|
47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa
|
ce0201e5ed099193ca40afd3b7abeee5a3732387
|
refs/heads/master
| 2021-05-01T16:38:03.575842
| 2016-08-25T11:13:26
| 2016-08-25T11:13:26
| 66,552,813
| 1
| 0
| null | 2016-08-25T11:39:20
| 2016-08-25T11:39:18
| null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
# https://codecombat.com/play/level/golden-mirage
# Collect 7 real coins.
# Real coins have a unique value in the each group of coins.
# If a coin has the same value as another coin in the group, then it's a fake.
# Fake coins will transform into venomous creatures to hurt the player!
def Coin(coins):
for coin1 in coins:
count = 0
for coin2 in coins:
if coin1.value == coin2.value:
count = count + 1
if count == 1:
return coin1
while True:
coins = self.findItems()
if coins and len(coins):
# The following code will help you debug:
coin = Coin(coins)
self.say(coin.value);
self.moveXY(coin.pos.x, coin.pos.y);
# When ready, delete the previous code and solve.
|
[
"vadim-job-hg@yandex.ru"
] |
vadim-job-hg@yandex.ru
|
68d1e6d7b2e4f62368c66cc12f92988222360a43
|
b1962b701230e8fe3235676b6a9a659b1ad921ef
|
/app/route/route1/models.py
|
bb9c977cd8cfd4a7d72eeda587a6f38d85ac85ae
|
[] |
no_license
|
volgoweb/wt
|
8e08dc0ff83ac120992bd81c209a420b207df966
|
3a88b8d7d6e1f925b363bfecb94008e14d15943f
|
refs/heads/master
| 2021-01-23T09:29:42.209757
| 2016-02-05T21:24:10
| 2016-02-05T21:24:10
| 42,570,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,063
|
py
|
# -*- coding: utf-8 -*-
"""
Маршрут для форумки.
Контент-менеджер создает инфоповод.
Автор пишет по инфоповоду заголовок и текст.
Контент-менеджер подбирает картинки.
Корректор проверяет и исправляет ошибки.
Контент-менеджер публикует.
"""
import json
import datetime
from django.db import models
from django.core.urlresolvers import reverse
from app.route.models import BaseRoute, BaseStep
from app.task.models import (
Task
)
# TODO вынести одинаковый функционал в отдельные методы
class Step1(BaseStep):
class Meta:
proxy = True
def run(self, **kwargs):
super(Step1, self).run(**kwargs)
task = Task(
title=u'Поиск товара и информирование клиента о сроках отгрузки.',
desc=u"""Проверьте наличие каждого заказанного товара и сообщите клиенту: 1) какие товары есть в наличии; 2) цену каждого товара; 3) срок поставки товаров, которых нет в наличие 4) альтернативу тем товарам, которых нет в наличие. \n Описание заявки клиента: {0}""".format(kwargs['application_desc']),
performer=kwargs['manager'],
step=self,
)
task.save()
self.task = task
self.save()
def end(self, **kwargs):
super(Step1, self).end(**kwargs)
next_step = self.route.get_step(name=Route.STEP_CREATE_ORDER)
next_step.run()
class Step2(BaseStep):
class Meta:
proxy = True
def run(self, **kwargs):
super(Step2, self).run(**kwargs)
task = Task(
title=u'Формирование заказа в 1С',
desc=u'Сформируйте заказ на отгрузку товара, который в наличие.',
performer=self.route.manager,
step=self,
)
task.save()
task = Task(
title=u'Заказать у поставщиков товар, которого нет в наличие',
desc=u'Сформируйте заказ на отгрузку товара, который в наличие.',
performer=self.route.manager,
step=self,
)
task.save()
def end(self, **kwargs):
super(Step2, self).end(**kwargs)
next_step = self.route.get_step(name=Route.STEP_CREATE_ORDER)
next_step.run()
self.save()
class Route(BaseRoute):
STEP_FIRST = 'first'
STEP_CREATE_ORDER = 'create_order'
application_desc = models.CharField(max_length=20000, blank=True, null=True)
manager = models.ForeignKey('account.Account', blank=True, null=True)
# class Meta:
# proxy = True
def save(self, *args, **kwargs):
is_new = False if self.pk else True
super(Route, self).save(*args, **kwargs)
if is_new:
s1 = Step1(
name=self.STEP_FIRST,
route=self,
)
s1.save()
s2 = Step2(
name=self.STEP_CREATE_ORDER,
route=self,
)
s2.save()
# s3 = Step3(
# name=self.STEP_CHECK_BY_CORRECTOR,
# route=self,
# )
# s3.save()
# s4 = Step4(
# name=self.STEP_GIVE_IMAGES,
# route=self,
# )
# s4.save()
# s5 = Step5(
# name=self.STEP_PUBLISH,
# route=self,
# )
# s5.save()
def run(self):
self.article = Article()
self.article.save()
step = self.steps.get(name='S1')
step.run()
|
[
"volgoweb@bk.ru"
] |
volgoweb@bk.ru
|
d7db9754a8b6bbf84b7a4de1c90f1a7ec627a1f8
|
3a9f76cda884152ab083ca713b57570d5d195a79
|
/locations/choices.py
|
7d3c412a3970317af58e06134cf6b1d703bc0ef9
|
[
"MIT"
] |
permissive
|
MahmoudFarid/django-locations
|
9b9790dfef5de0214169e04640e9a4f0ab6c0961
|
af25d0e4492fd3476b6be1c7f1ef8471be13751c
|
refs/heads/master
| 2020-07-17T11:47:38.806163
| 2015-11-14T23:41:25
| 2015-11-14T23:41:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,069
|
py
|
from localflavor.us import us_states
STATE_CHOICES = us_states.US_STATES
# ISO 3166-1 country names and codes adapted from http://opencountrycodes.appspot.com/python/
COUNTRY_CHOICES = (
('GB', 'United Kingdom'),
('AF', 'Afghanistan'),
('AX', 'Aland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, The Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Cote d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libyan Arab Jamahiriya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, The Former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('AN', 'Netherlands Antilles'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Reunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthelemy'),
('SH', 'Saint Helena'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Islands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
)
|
[
"dstegelman@gmail.com"
] |
dstegelman@gmail.com
|
ccefb9800777cb3c74d7e87d7736daf68f3b4d36
|
0a3da7d8ee3453beec7798a62003014187b714b1
|
/data/ai/99_others/nlp/text-classifier/text-classifier-rule-resume/cv-filter.py
|
2b008e6920d75a57b8bf8f0c003819bffd2f9aba
|
[] |
no_license
|
atfly/atlib
|
635d1b0804af01c090a92048ed09758cb03820bf
|
2c15fa47b2b915c1d998e89bbd69ff24dba52137
|
refs/heads/master
| 2021-05-16T12:51:50.157911
| 2017-11-16T06:50:33
| 2017-11-16T06:50:33
| 105,330,432
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
cv_basic_patterns = {"name": ["姓名"], "phone": ["手机", "电话"], "email": ["邮箱", "e-mail"],"age": ["年龄"], "address": ["通讯地址"],"location": ["居住地"], "hukou": ["户口"], "gender": ["性别", "男", "女"]}
cv_edu_patterns = {"university": ["毕业院校", "教育背景"], "major": ["专业"], "degree": ["学历", "大专", "专科", "硕士", "博士", "研究生"]}
cv_job_patterns = {"evaluation": ["个人描述", "自我评价", "个人情况", "兴趣"], "career": ["求职意向", "应聘职位", "求职类型", "职位"], "work": ["工作经历", "工作经验", "工作职责", "工作地点", "工作地区"], "project": ["项目经历", "项目"]}
cv_include_keys = {"cv": ["岗位职责", "任职要求", "任职资格", "能力要求", "基本要求", "职责描述", "岗位要求", "岗位描述", "岗位名称", "职位描述"]}
jd_include_keys = {"jd": ["求职意向", "求职状态", "教育背景", "教育经历"]}
def cvMatchFlow(content):
cv_basic_matches = {}
cv_edu_matches = {}
cv_job_matches = {}
cv_key_matches = {}
jd_key_matches = {}
for k,v in cv_basic_patterns.items():
cv_basic_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_edu_patterns.items():
cv_edu_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_job_patterns.items():
cv_job_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_include_keys.items():
cv_key_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_include_keys.items():
jd_key_matches[k]= [content.find(eachv) for eachv in v]
return cv_basic_matches,cv_edu_matches,cv_job_matches,cv_key_matches,jd_key_matches
def cvRecognition(content):
cv_basic_matches,cv_edu_matches,cv_job_matches,cv_key_matches,jd_key_matches=cvMatchFlow(content)
cv_basic_matches.items().
def isNotCV(content):
for key in jd_keys:
if key in content:
return True
return False
def isCV(content):
base_info_match = []
education_info_match = []
job_info_match = []
base_info_list = []
education_info_list = []
job_info_list = []
other_info_list = []
for k, v in cv_patterns.items():
if k == "base_info":
base_info_list = [content.find(eachv) for eachv in v]
elif k == "education_info":
education_info_list = [content.find(eachv) for eachv in v]
elif k == "job_info":
job_info_list = [content.find(eachv) for eachv in v]
else:
pass
base_info_match = [ v for v in base_info_list if v != -1]
education_info_match = [v for v in education_info_list if v != -1]
job_info_match = [v for v in job_info_list if v != -1]
print base_info_match
print job_info_match
print education_info_match
if len(base_info_match) > 0 and len(job_info_match) > 0:
if min(base_info_match) <= min(job_info_match) and min(base_info_match) < len(content)/2:
return True
if len(education_info_match) > 0 and min(education_info_match) < len(content)/2 and min(base_info_match) < min(education_info_match):
return True
for key in cv_include_keys:
if key in content:
return True
return False
if len(job_info_match) > 0 and len(education_info_match) > 0:
for key in cv_include_keys:
if key in content:
return True
if len(base_info_match) >= 2 and len(job_info_match) == 0 and len(education_info_match) > 0:
return True
return False
if __name__ == "__main__":
path = "Sample_2.xlsx"
descPos = 2
data = xlrd.open_workbook(path)
tableSample = data.sheets()[1]
nrows = tableSample.nrows
datav = []
for row in range(nrows):
if row != 0:
datav.append(tableSample.row_values(row)[descPos].lower())
f = open("sample_2_res.txt", "w")
for line in datav:
if nonCVCheck(line):
f.write("other\n")
continue
if isCVCheck(line):
f.write("cv\n")
else:
f.write("other\n")
f.close()
|
[
"atflysun@outlook.com"
] |
atflysun@outlook.com
|
d0d76f63c0475f78d1062683c3de2370e4c53de6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_152/ch4_2020_09_11_17_51_58_429714.py
|
e3254130811b41145b6e3da216c09d9b3ffa527c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
def classifica_idade(idade):
if idade >= 18:
return 'adulto'
elif idade >=12 and <= 17:
return 'adolescente'
else:
return 'crianca'
|
[
"you@example.com"
] |
you@example.com
|
074fea474ada199da2341663a3361ad9c806139c
|
1ff9adfdb9d559e6f81ed9470467bab25e93b5ab
|
/src/ta_lib/_vendor/tigerml/core/utils/stats.py
|
dc81eb055a7defec79beb1790a276d8acedfe3f6
|
[] |
no_license
|
Seemant-tiger/housing-price-prediction
|
a39dbefcb11bc460edeeee92e6becf77d35ff3a8
|
be5d8cca769c7e267cfee1932eb82b70c2855bc1
|
refs/heads/main
| 2023-06-24T00:25:49.776720
| 2021-07-18T16:44:28
| 2021-07-18T16:44:28
| 387,222,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
import itertools
import numpy as np
import pandas as pd
def woe_info_value(target_series, idv_series, target_counts=None):
"""Compute Information Value (IV) from WOE (weight of evidence).
Parameters
----------
target_series: pd.Series of target variable
idv_series: pd.Series of categorical variable
Returns
-------
information value of the categorical feature
"""
if target_counts is None:
target_counts = target_series.value_counts().reset_index()
target_counts.columns = ["target", "target_counts"]
df = pd.DataFrame({"target": target_series.values, "idv": idv_series.values})
col_target_counts = df.groupby(["idv", "target"]).size().reset_index()
col_target_counts.columns = ["idv", "target", "col_target_counts"]
# Handle Zero Event/ Non-Event
# AdjustedWOE = ln(((Number of non-events in a group + 0.5) /
# Number of non-events) /
# ((Number of events in a group + 0.5) / Number of events)))
# https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
exhaustive_combinations = list(
itertools.product(
col_target_counts.idv.unique(), col_target_counts.target.unique()
)
)
exhaustive_combinations_df = pd.DataFrame(
exhaustive_combinations, columns=["idv", "target"]
)
col_target_counts = pd.merge(
exhaustive_combinations_df, col_target_counts, how="outer", on=["idv", "target"]
)
col_target_counts["col_target_counts"].fillna(0.5, inplace=True)
col_target_counts = col_target_counts.merge(target_counts, on="target")
col_target_counts["col_target_per"] = (
col_target_counts["col_target_counts"] / col_target_counts["target_counts"]
)
col_target_per = col_target_counts.pivot_table(
index="idv", columns="target", values="col_target_per", fill_value=0
)
col_target_per.columns = ["NonEvent", "Event"]
col_target_per["WoE"] = np.log(col_target_per["NonEvent"] / col_target_per["Event"])
col_target_per["IV"] = (
col_target_per["NonEvent"] - col_target_per["Event"]
) * col_target_per["WoE"]
return col_target_per["IV"].sum()
def correlation_ratio(categories, measurements):
"""Compute correlation ratio η (eta).
Parameters
----------
categories: pd.Series of categorical variable
measurements: pd.Series of continuous variable
Returns
-------
correlation ratio: float
"""
fcat, _ = pd.factorize(categories)
cat_num = np.max(fcat) + 1
y_avg_array = np.zeros(cat_num)
n_array = np.zeros(cat_num)
for i in range(0, cat_num):
cat_measures = measurements[np.argwhere(fcat == i).flatten()]
n_array[i] = len(cat_measures)
y_avg_array[i] = np.average(cat_measures)
y_total_avg = np.sum(np.multiply(y_avg_array, n_array)) / np.sum(n_array)
numerator = np.sum(
np.multiply(n_array, np.power(np.subtract(y_avg_array, y_total_avg), 2))
)
denominator = np.sum(np.power(np.subtract(measurements, y_total_avg), 2))
if numerator == 0:
eta = 0.0
else:
eta = np.sqrt(numerator / denominator)
return eta
|
[
"seemantsingh1199@gmail.com"
] |
seemantsingh1199@gmail.com
|
02786c59d2cf9e90a7fb6700ed31bc99cfca740b
|
bebacae90aa17ad2ab4c9111a2e5cfa0f8cf13a6
|
/Python-3/basic_examples/python_breakpoint_examples.py
|
82779156f60abb7de9db164c20fc32e1fa246595
|
[
"MIT"
] |
permissive
|
ayanakshi/journaldev
|
5b0d73c53bc9a5292a8629c6c0320196abeab76e
|
a61cba22232e8cc9c40264c31aaba0bd17ff2522
|
refs/heads/master
| 2020-03-27T21:52:15.081736
| 2018-08-31T11:51:28
| 2018-08-31T11:51:28
| 147,182,378
| 1
| 0
|
MIT
| 2018-09-03T09:28:38
| 2018-09-03T09:28:38
| null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
x = 10
y = 'Hi'
z = 'Hello'
print(y)
# breakpoint() is introduced in Python 3.7
breakpoint()
print(z)
# Execution Steps
# Default:
# $python3.7 python_breakpoint_examples.py
# Disable Breakpoint:
# $PYTHONBREAKPOINT=0 python3.7 python_breakpoint_examples.py
# Using Other Debugger (for example web-pdb):
# $PYTHONBREAKPOINT=web_pdb.set_trace python3.7 python_breakpoint_examples.py
|
[
"pankaj.0323@gmail.com"
] |
pankaj.0323@gmail.com
|
7d51ad00952c385a402ae18745b0c2224c9cd731
|
b8c65f30cd80f1ca3400ff88e02d6e92d83f4137
|
/eve/exceptions.py
|
fc0361299e5eabe4201f5841e6de2014895c5ae2
|
[
"BSD-3-Clause"
] |
permissive
|
cnsoft/eve
|
e3429d5dade3cac2190ef0a56143e3a033107ea5
|
f624855c0e83668aa2db4f9c482da38847c699f4
|
refs/heads/master
| 2021-01-09T06:49:11.827912
| 2013-07-25T08:13:35
| 2013-07-25T08:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
# -*- coding: utf-8 -*-
"""
eve.exceptions
~~~~~~~~~~~~~~
This module implements Eve custom exceptions.
:copyright: (c) 2012 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
class ConfigException(Exception):
""" Raised when errors are found in the configuration settings (usually
`settings.py`).
"""
pass
class SchemaException(ConfigException):
""" Raised when errors are found in a field schema definition """
pass
|
[
"nicola@nicolaiarocci.com"
] |
nicola@nicolaiarocci.com
|
770f6161c2b5791f1460733e6599363406134e67
|
41a672c9505b5b53c58a01d5455acc410949aa24
|
/tests/aoutgoing/acceptance/messaging/p2p/context/C_15457.py
|
d782e225bd120f95750cafe463d46b88d56df246
|
[] |
no_license
|
Alexsorgo/mobile_iOS
|
b045a0ea058726841c88158be8407b7ae45e893e
|
7e298f890b408cedad9db9d0aefeccd9c10d6002
|
refs/heads/master
| 2022-12-12T17:26:14.039876
| 2020-03-18T06:34:56
| 2020-03-18T06:34:56
| 248,154,882
| 0
| 0
| null | 2021-06-02T01:13:05
| 2020-03-18T06:25:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
import pytest
from configs import config
from enums import context_enums
from screens.chats.chat_list_screen import ChatListScreen
from screens.chats.chat_screen import ChatScreen
from screens.chats.location_screen import LocationScreen
from controls.menu import Menu
from tests.aoutgoing.base_test import BaseTest
from utils.logs import log
from utils.verify import Verify
@pytest.mark.skip
class TestC15457(BaseTest):
"""
Check context menu items on location message in p2p chat
"""
FRIEND = config.AMERICA_FIRSTNAME + ' ' + config.AMERICA_LASTNAME
def test_c15457(self):
log.info("Check context menu items on location message in p2p chat")
menu = Menu(self.driver)
chat = ChatScreen(self.driver)
chat_list = ChatListScreen(self.driver)
location = LocationScreen(self.driver)
menu.go_to(menu.wenums.CHATS, [menu.wenums.ALL])
chat_list.tap_user(self.FRIEND)
menu.go_to(menu.wenums.ACTIONS, [menu.wenums.LOCATION, menu.wenums.SEND_LOCATION], menu.wenums.CHATS)
location.tap_send_location()
chat.open_context_menu_last_bubble()
log.info("Verify context menu items")
Verify.equals(context_enums.LOCATION_CONTEXT_MENU_ITEMS, chat.get_context_options(),
"Wrong context menu items")
|
[
"oleksii_mishchenko@epam.com"
] |
oleksii_mishchenko@epam.com
|
d106059d970e58822acb60ca0f9d2965aa8d056b
|
8eb5008ad4ab98f72666c54e3f83a8b17ac096f6
|
/UIPackage/LoginMainWindow.py
|
24f8c8e4d9b92506b4e5f5c80ca2d2e029873bb5
|
[] |
no_license
|
caojiaju-2017/HSShareKowledge
|
ff60d28a894807462de9402a2fdd28a1723c0ddf
|
e511439444959c518a4962ea4b6590f7c28bd112
|
refs/heads/master
| 2020-03-30T08:45:40.620897
| 2018-11-11T12:34:04
| 2018-11-11T12:34:04
| 151,037,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,612
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.Qt import *
from ByPlatform.Base.OutPutHelper import *
import sys
from UIDesigner.LoginUI import Ui_MainWindow
from tinydb import TinyDB, Query
from ByPlatform.Base.TimeHelper import *
# from UIPackage.WaitWindow import WaitWindow
from UIPackage.KnowlegeForm import KnowlegeForm
class LoginMainWindow(QMainWindow,Ui_MainWindow):
"""docstring for myDialog"""
def __init__(self, arg=None):
super(LoginMainWindow, self).__init__(arg)
self.setupUi(self)
self.setWindowIcon(QIcon(r'Res\logo.png'))
self.setWindowTitle("超级智慧终端")
self.setMinimumWidth(1000)
self.setMinimumHeight(580)
# 设置登录主界面背景
window_pale = QPalette()
window_pale.setBrush(self.backgroundRole(),QBrush(QPixmap(r"Res\loginback.jpg")))
self.setPalette(window_pale)
self.setWindowFlags(Qt.CustomizeWindowHint)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setStyleSheet("venus--TitleBar {border-radius:10px;}")
# 窗口移动
self.m_flag = False
self.m_Position = None
# 初始化标题
self.initTitle()
# 初始化按钮
self.initInputButton()
# 加载账户信息
self.accountSet = None
self.loadAccount()
self.mainWindow = None
def loadAccount(self):
configQuery = Query()
db = TinyDB('config.json')
table = db.table('config')
result = table.all()
if len(result) <= 0:
pass
else:
self.accountSet = result[0]
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.m_flag = True
self.m_Position = event.globalPos() - self.pos() # 获取鼠标相对窗口的位置
event.accept()
self.setCursor(QCursor(Qt.OpenHandCursor)) # 更改鼠标图标
def mouseMoveEvent(self, QMouseEvent):
if Qt.LeftButton and self.m_flag:
self.move(QMouseEvent.globalPos() - self.m_Position) # 更改窗口位置
QMouseEvent.accept()
def mouseReleaseEvent(self, QMouseEvent):
self.m_flag = False
self.setCursor(QCursor(Qt.ArrowCursor))
def initTitle(self):
# 调用QtGui.QPixmap方法,打开一个图片,存放在变量png中
png = QPixmap('Res\wordtitle.png')
# 在l1里面,调用setPixmap命令,建立一个图像存放框,并将之前的图像png存放在这个框框里。
self.loginTitle.setPixmap(png)
labWd = self.width() * 0.28
labHd = int(labWd *60/340.0)
self.loginTitle.setMinimumWidth(labWd)
self.loginTitle.setMinimumHeight(labHd)
startX = (self.width() - labWd) / 2
startY = int(self.height()*0.3)
self.loginTitle.setGeometry(startX,startY,labWd,labHd)
self.loginTitle.setScaledContents(True) # 让图片自适应label大小
self.pbLogin.clicked.connect(self.LoginSystem)
pass
def LoginSystem(self):
'''
登陆指令
:return:
'''
self.waitDlg = WaitWindow()
# // 屏幕居中显示
frmX = self.waitDlg.width()
frmY = self.waitDlg.height()
deskWidth = QDesktopWidget().width()
deskHeight = QDesktopWidget().height()
movePoint = QPoint(deskWidth / 2 - frmX / 2, deskHeight / 2 - frmY / 2)
# movePoint = QPoint(0,0)
self.waitDlg.move(movePoint)
# self.waitDlg.setModal(True)
# self.waitDlg.createLabel()
self.waitDlg.update()
self.waitDlg.exec_()
OutPutHelper.consolePrint("loginsystem")
userName = self.userName.text()
userPassword = self.userPassword.text()
MESSAGE = "账号或密码错误"
if self.accountSet:
if userName == self.accountSet["account"] and userPassword == self.accountSet["password"]:
pass
else:
reply = QMessageBox.information(self, "信息", MESSAGE)
if reply == QMessageBox.Ok:
pass
else:
pass
return
elif userName == "root" and userPassword == "123456":
self.accountSet = {"account": userName, "password": userPassword,
"logintime": TimeHelper.getCurrentTime()}
else:
reply = QMessageBox.information(self, "信息", MESSAGE)
if reply == QMessageBox.Ok:
pass
else:
pass
return
db = TinyDB('config.json')
table = db.table('config')
table.purge()
table.insert(self.accountSet)
self.mainWindow = SuperSmartWindow()
self.mainWindow.show()
self.hide()
def initInputButton(self):
palette = self.palette()
# palette.setColor(palette.Window, QColor(210, 210, 210))
#
# self.loginpannel.setAutoFillBackground(True)
# self.loginpannel.setPalette(palette)
self.loginpannel.setWindowOpacity(0.6)
# setWindowOpacity
self.loginpannel.setStyleSheet("#loginpannel{border:0px groove gray;border-radius:10px;padding:2px 4px;background-color: #ffffff;color: #000000;}")
self.pbLogin.setStyleSheet(
"#pbLogin{border-radius:6px; background:rgba(65, 168, 200,0.8); color:white;}" + "#pbLogin:hover{background:rgb(255,128,64);}")
self.pbLogin.setCursor(Qt.PointingHandCursor)
self.userName.setStyleSheet(
"#userName{border:2px groove gray;border-radius:4px; background:rgba(255, 255, 255,1); color:black;}" + "#userName:hover{background:rgb(255, 255, 255);}")
self.userPassword.setStyleSheet(
"#userPassword{border:2px groove gray;border-radius:4px; background:rgba(255, 255, 255,1); color:black;}" + "#userPassword:hover{background:rgb(255, 255, 255);}")
panelWd = self.loginpannel.width()
panelHd = self.loginpannel.height()
startX = (self.width() - panelWd) / 2
startY = (self.height() - panelHd)*3.0 / 5
self.loginpannel.setGeometry(startX, startY, panelWd, panelHd)
self.userName.setText("root")
self.userPassword.setEchoMode(QLineEdit.Password)
self.userPassword.setText("123456")
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return:
self.pbLogin.click()
|
[
"jiaju_cao@hotmail.com"
] |
jiaju_cao@hotmail.com
|
ba9a9f6ba60a81293d7b7eaa9ce97f4b6b1d919b
|
e02506da0c661c8241fed00efdd0d6b2f8b147df
|
/textattack/constraints/overlap/levenshtein_edit_distance.py
|
8e7863c9daeb4621e5bbc89f9b080d68255b60b1
|
[
"MIT"
] |
permissive
|
SatoshiRobatoFujimoto/TextAttack
|
2592a828f128fd8bf0b8ce5578e9488df5b2ac97
|
a809a9bddddff9f41750949e26edde26c8af6cfa
|
refs/heads/master
| 2022-07-11T02:10:24.536157
| 2020-05-14T13:29:44
| 2020-05-14T13:29:44
| 263,941,825
| 1
| 0
|
MIT
| 2020-05-14T14:43:47
| 2020-05-14T14:43:46
| null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
import editdistance
from textattack.constraints import Constraint
class LevenshteinEditDistance(Constraint):
""" A constraint on edit distance (Levenshtein Distance).
"""
def __init__(self, max_edit_distance):
if not isinstance(max_edit_distance, int):
raise TypeError('max_edit_distance must be an int')
self.max_edit_distance = max_edit_distance
def __call__(self, x, x_adv, original_text=None):
if not original_text:
return True
edit_distance = editdistance.eval(original_text.text, x_adv.text)
return edit_distance <= self.max_edit_distance
def extra_repr_keys(self):
return ['max_edit_distance']
|
[
"jxmorris12@gmail.com"
] |
jxmorris12@gmail.com
|
5c1948278e75e3ff8b2fba43c5b2c56bff5ce1f9
|
0793a634ce31b3c2370ba0f945993ee80bf1a8b4
|
/mirage/thesisPresentation.py
|
96edde9b9e908147fbf31dcb8cb3e0476b311c96
|
[
"MIT"
] |
permissive
|
ruizmic0100/Mirage
|
a5079793124f54052257af1d084b0390e421c43f
|
33ad0d07322953ac6fc5c26b4f6fe7d17e4784dd
|
refs/heads/master
| 2023-07-30T10:52:57.956239
| 2021-09-12T04:15:56
| 2021-09-12T04:15:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
import numpy as np
from mirage import lens_analysis as la
def overlay_sizes(result,num):
from matplotlib import pyplot as plt
fig = plt.figure()
for res in result:
x,y = res.lightcurves[num].plottable("uas")
label_sz = "%.3f $theta_E$" % res.parameters.quasar.radius.to(res.parameters.theta_E).value
plt.plot(x,y,label=label_sz)
return fig
def export_vid(infile,outfile):
from imageio import get_writer
from matplotlib import cm
from matplotlib.colors import Normalize
norm = Normalize(vmin=-4,vmax=4)
writer = get_writer(outfile,"mp4",fps=10)
cmap = cm.BuPu_r
data = la.load(infile)
for i in range(data[0].simulation.num_trials):
mm = data[i].magmap.data
normald = cmap(norm(mm))
normald = (normald*255).astype(np.uint8)
writer.append_data(normald)
writer.close()
print("Video exported to %s" % outfile)
#Things I want to show:
#Start a simulation and let it run while I talk about background.
#Show magnification maps, and how they vary as a function of quasar size.
#Show lightcurves, and how variable they are.
#Show how caustic events shift as a function of quasar size.
#Hilight the peaks with a + to show peaks more clearly and how they shift.
#Possible question of interest - how far apart are doublets, typically? Can we constrain
#The speed of the quasar because of that?
#Give an example of a fold and a cusp, and analyze the differences.
|
[
"jkoeller12@gmail.com"
] |
jkoeller12@gmail.com
|
c380835831f8c41526e64814112f5f23d2c0673b
|
ae10b60cb92a69146bfb05ef5dde735a0aa45d4b
|
/examples/Extended Application/sklearn/examples/calibration/plot_compare_calibration.py
|
2740807f47b521cb56cd402223342f85c841602d
|
[
"MIT"
] |
permissive
|
kantel/nodebox-pyobjc
|
471cea4c5d7f1c239c490323186458a74edcc214
|
068ba64c87d607522a240ab60c3ba14f869f6222
|
refs/heads/master
| 2021-08-14T18:32:57.995445
| 2017-11-16T13:42:23
| 2017-11-16T13:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,821
|
py
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subsetting." As a result, the calibration curve shows a characteristic
sigmoid shape, indicating that the classifier could trust its "intuition"
more and return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
# #############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# plt.show()
pltshow(plt)
|
[
"karstenwo@web.de"
] |
karstenwo@web.de
|
e0cc017891024f3f9aaf43b29c5b45d31bb2ad5c
|
0b193f4da7547d95b7c50fbc1b81276da8163372
|
/actions/models.py
|
9f242b017204d32441280c09d1083d6557d8d2a2
|
[] |
no_license
|
jzxyouok/bookmarks
|
4b071023af57a2b87fb4fcb034affd5a16719e85
|
c1bf5ce731f20c8771f6ff5038839c938a2562d8
|
refs/heads/master
| 2020-06-06T15:22:37.096495
| 2019-04-08T03:51:17
| 2019-04-08T03:51:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
# Create your models here.
class Action(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, 'actions', db_index=True)
verb = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True, db_index=True)
target_ct = models.ForeignKey(ContentType, models.CASCADE, 'target_obj', blank=True, null=True)
target_id = models.PositiveIntegerField(blank=True, null=True, db_index=True)
target = GenericForeignKey('target_ct', 'target_id')
class Meta:
ordering = ('-created',)
|
[
"2582347430@qq.com"
] |
2582347430@qq.com
|
10a57f45023bd99e590058c70fed50cd5dbdabde
|
d6d20681f41102df3feb2b438ef80569bd73730f
|
/Uge4-numpy/.history/exercises_20200218211509.py
|
721a35c816eab099a775532e7a3d12f563a27947
|
[] |
no_license
|
MukHansen/pythonAfleveringer
|
d0ad2629da5ba2b6011c9e92212949e385443789
|
4107c3c378f757733961812dd124efc99623ff2e
|
refs/heads/master
| 2020-12-22T13:27:19.135138
| 2020-05-22T11:35:52
| 2020-05-22T11:35:52
| 236,796,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,051
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
filename = './befkbhalderstatkode.csv'
data = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
years = {1992: 0, 1993: 0, 1994: 0, 1995: 0, 1996: 0, 1997: 0, 1998: 0, 1999: 0, 2000: 0, 2001: 0, 2002: 0,
2003: 0, 2004: 0, 2005: 0, 2006: 0, 2007: 0, 2008: 0, 2009: 0, 2010: 0, 2011: 0, 2012: 0, 2013: 0,
2014: 0, 2015: 0}
east = {}
west = {}
specificHoods = {2: 'Østerbro', 4: 'Vesterbro/Kgs.'} #, 4: 'Vesterbro/Kgs.'
nordicCountryCodes = {5104: 'Finland', 5106: 'Island', 5110: 'Norge', 5120: 'Sverige'}
def getPopPerHood(hood):
deezMask = (data[:,0] == 2015) & (data[:,1] == hood)
return np.sum(data[deezMask][:,4])
def getPopPerSpecificHood(year, hood):
deezMask = (data[:,0] == year) & (data[:,1] == hood)
# print((data[deezMask][:,(0,4)]))
# return (data[deezMask][:,(0,4)])
return np.sum(data[deezMask][:,4])
def getOldPeople():
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65)
return np.sum(data[deezMask][:,4])
def getOldNordicPeople(countrycode):
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65) & (data[:,3] == countrycode)
return np.sum(data[deezMask][:,4])
def getSumOfOldNordicPeople():
lst = {}
for key, value in nordicCountryCodes.items():
# print(value, getOldNordicPeople(key))
lst.update({value: getOldNordicPeople(key)})
return lst
def getSumPerHood():
lst = {}
for key, value in neighb.items():
# print(value, getPopPerHood(key))
lst.update({value: getPopPerHood(key)})
return lst
def getSumPerSpecificHoods():
lst = []
for ykey, yvalue in years.items():
for hkey, hvalue in specificHoods.items():
# lst[ykey] = getPopPerSpecificHood(ykey, hkey)
if(hkey == 2):
east[ykey] = getPopPerSpecificHood(ykey, hkey)
else:
west[ykey] = getPopPerSpecificHood(ykey, hkey)
# print(value, getPopPerSpecificHood(key))
# lst[key] = getPopPerSpecificHood(key)
# lst.append({value: getPopPerSpecificHood(key)})
# d['a'] = 100 # existing key, so overwrite
# d['c'] = 3 # new key, so add
lst.append(east)
lst.append(west)
return lst
def displayPlotOfHoodsPop():
lst = getSumPerHood()
hoodsSorted = OrderedDict(sorted(lst.items(), key=lambda x: x[1]))
cityAreas = []
sumOfPeople = []
for key, value in hoodsSorted.items():
cityAreas.append(key)
sumOfPeople.append(value)
plt.bar(cityAreas, sumOfPeople, width=0.5, linewidth=0, align='center')
title = 'Population in various areas in cph'
plt.title(title, fontsize=12)
plt.xticks(cityAreas, rotation=65)
plt.tick_params(axis='both', labelsize=8)
plt.show()
def displayPopulationOverTheYears():
getSumPerSpecificHoods() # Sørger for data til listerne east og west
yearsToDisp = []
eastpopulation = []
westpopulation = []
for key, value in years.items():
yearsToDisp.append(key)
for key, value in east.items():
eastpopulation.append(value)
for key, value in west.items():
westpopulation.append(value)
plt.figure()
plt.plot(list(range(1992,2015)), eastpopulation, linewidth=5)
plt.plot(list(range(1992,2015)), westpopulation, linewidth=5)
plt.title("Population over the years", fontsize=24)
plt.xlabel("Year", fontsize=14)
plt.tick_params(axis='both', labelsize=14)
plt.show()
# print(getSumPerHood())
# displayPlotOfHoodsPop()
# print('Number of people above the age of 65 --',getOldPeople())
# print(getSumOfOldNordicPeople())
# displayPopulationOverTheYears()
# print(getSumPerSpecificHoods())
print(displayPopulationOverTheYears())
|
[
"cph-mh752@cphbusiness.dk"
] |
cph-mh752@cphbusiness.dk
|
ab5f5aaf705c61d6d3a52fe6a016b2045c35009c
|
099256b28df65fb7c90c077b060dca16b8655235
|
/math/0x00-linear_algebra/100-slice_like_a_ninja.py
|
e1589c7bf3ddf30431df2717bebb8ee713b0f971
|
[] |
no_license
|
Immaannn2222/holbertonschool-machine_learning
|
1cebb9a889b363669bed7645d102dc56ab943c08
|
80bf8d3354702f7fb9f79bbb5ed7e00fc19f788d
|
refs/heads/master
| 2023-08-01T05:35:00.180472
| 2021-09-22T20:28:17
| 2021-09-22T20:28:17
| 317,624,526
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
#!/usr/bin/env python3
""" advanced task """
def np_slice(matrix, axes={}):
""" slices a matrix along specific axes """
new = []
for x in range(len(matrix.shape)):
if x in axes:
new.append(slice(*axes[x]))
else:
new.append(slice(None))
return(matrix[tuple(new)])
|
[
"imennaayari@gmail.com"
] |
imennaayari@gmail.com
|
1f2e6da575054c5f9539d0a9dfc2ceecc2f7f8ae
|
bfe6c95fa8a2aae3c3998bd59555583fed72900a
|
/findMaxValueOfEquation.py
|
44e49a562e323596044cf7ecbec22a3e97ebfb8e
|
[] |
no_license
|
zzz136454872/leetcode
|
f9534016388a1ba010599f4771c08a55748694b2
|
b5ea6c21bff317884bdb3d7e873aa159b8c30215
|
refs/heads/master
| 2023-09-01T17:26:57.624117
| 2023-08-29T03:18:56
| 2023-08-29T03:18:56
| 240,464,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
from typing import List
class Solution:
def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:
stack = []
j = 0
out = -12345678901234
for i in range(len(points) - 1):
while j < len(points) and points[j][0] - points[i][0] <= k:
tmp = points[j][0] + points[j][1]
while len(stack) > 0 and stack[-1] < tmp:
stack.pop()
stack.append(tmp)
j += 1
if points[i][1] + points[i][0] == stack[0]:
stack.pop(0)
if len(stack) > 0:
out = max(out, stack[0] + points[i][1] - points[i][0])
# print(i,j,stack,out)
return out
points = [[1, 3], [2, 0], [5, 10], [6, -10]]
k = 1
points = [[0, 0], [3, 0], [9, 2]]
k = 3
points = [[-19, 9], [-15, -19], [-5, -8]]
k = 10
print(Solution().findMaxValueOfEquation(points, k))
|
[
"zzz136454872@163.com"
] |
zzz136454872@163.com
|
de469789154588023215e0a3a02a630ce008e14b
|
22029865c571f7f5ba18de77f8eea6b3e77b3bbb
|
/phiface/context.py
|
b995768e6a32eadfac7ff45dc5d059a29a2a5afd
|
[
"BSD-2-Clause"
] |
permissive
|
mikejs/phiface
|
cfc100421afa4bef9c4afa6eb4ac8f7ae8d74163
|
2a2a5f8e1e555784cb2f2a27cecdf9c25a1c6044
|
refs/heads/master
| 2021-01-18T10:31:48.989712
| 2010-04-21T04:13:45
| 2010-04-21T04:13:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
import cairo
from shapely.geometry import *
PDFOutput = True
# flatten from:
# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
class Context(object):
def __init__(self):
super(Context, self).__init__()
self.width = 1200
self.height = 800
if PDFOutput:
self.surface = cairo.PDFSurface("output.pdf",
self.width, self.height)
else:
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.width, self.height)
self.ctx = cairo.Context(self.surface)
self.ctx.set_source_rgba(1.0, 1.0, 1.0, 1.0)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
def _drawCoords(self, coords):
self.ctx.move_to(*coords[0])
for (x, y) in coords:
self.ctx.line_to(x, y)
self.ctx.close_path()
def _drawPolygon(self, poly):
self._drawCoords(poly.exterior.coords)
for hole in poly.interiors:
self._drawCoords(hole.coords)
self.ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
self.ctx.fill()
def draw(self, polygons):
poly = mergeSubPolys(polygons)
if type(poly) is MultiPolygon:
for subPoly in poly.geoms:
self._drawPolygon(subPoly)
else:
self._drawPolygon(poly)
def write(self):
if not PDFOutput:
self.surface.write_to_png("output.png")
def mergeSubPolys(polygons):
def _flattenPolys(polys):
polyList = []
if type(polys) is Polygon or type(polys) is MultiPolygon:
return polys
for p in polys:
if not p:
continue
if type(p) is list:
polyList += _flattenPolys(p)
elif type(p) is Polygon or type(p) is MultiPolygon:
polyList.append(p)
else:
polyList += flatten([_flattenPolys(p.getPolygon())])
return polyList
return reduce(lambda x, y: x.union(y), _flattenPolys(polygons))
|
[
"hortont424@gmail.com"
] |
hortont424@gmail.com
|
48c3f72ee6abaa8cbb830bcb382ccfefdb237956
|
c36fc8f9bbc3e5891474bbbf17de09711cc9d0af
|
/alvi/client/scenes/merge_sort.py
|
38997dcfec013867d20041a4f1f348d5d766e843
|
[
"MIT"
] |
permissive
|
alviproject/alvi
|
f527043c1ecaf4557188c312db7e8a8065ec054d
|
ec77919a546c11c46b178a21236792f8b0d95cbd
|
refs/heads/master
| 2021-01-19T01:45:51.157037
| 2016-06-17T19:44:09
| 2016-06-17T19:44:09
| 13,392,604
| 10
| 5
| null | 2014-03-02T14:05:54
| 2013-10-07T18:54:09
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
from alvi.client.scenes.sort import Sort
class MergeSort(Sort):
def merge(self, array, left, mid, right):
temp = []
for i in range(left, right):
temp.append(array[i])
i = 0
j = right - mid
k = 0
while k < len(temp):
if i >= mid - left:
array[k+left] = temp[j]
j += 1
elif j >= len(temp):
array[k+left] = temp[i]
i += 1
elif temp[i] < temp[j]:
array[k+left] = temp[i]
i += 1
else:
array[k+left] = temp[j]
j += 1
k += 1
def _sort(self, array, left, right):
if right - left <= 1:
return
mid = (left+right) // 2
self._sort(array, left, mid)
self._sort(array, mid, right)
self.merge(array, left, mid, right)
if (right-left) > array.size() // 50:
array.sync()
def sort(self, **kwargs):
array = kwargs['container']
self._sort(array, 0, array.size())
array.sync()
|
[
"piotr@lewalski.pl"
] |
piotr@lewalski.pl
|
311eac1a6c280507906e8cf7a87844607ff9fddf
|
04c7295ce65a623dc62454aa46ae4ae4ce51ca36
|
/Lecture/Lecture_3/three_special_perfect_squares_v1_v3.6.py
|
8f5b2aa24f0c1e10d1bad0df764379d50125baab
|
[] |
no_license
|
hty-unsw/COMP9021-Python
|
38373378162a314a82bf14453d026e641963e1b9
|
97be6dfa730247b59e608ec6d464ac16b4cf1968
|
refs/heads/master
| 2020-07-03T00:51:23.540099
| 2018-10-30T14:23:15
| 2018-10-30T14:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
# Describes all sets of positive integers {x, y, z} such that
# x, y and z have no occurrence of 0,
# every nonzero digit occurs exactly once in one of x, y or z,
# and x, y and z are perfect squares.
#
# Written by Eric Martin for COMP9021
from math import sqrt
def digits_if_ok(number, digits_seen_so_far):
digits_seen_now = set(digits_seen_so_far)
while number:
# Extract rightmost digit from number.
digit = number % 10
if digit in digits_seen_now:
return
digits_seen_now.add(digit)
# Get rid of rightmost digit of number.
number //= 10
return digits_seen_now
# If it was a perfect square, max_square would, associated with 1 and 4,
# be the largest member of a possible solution.
max_square = 9876532
nb_of_solutions = 0
upper_bound = round(sqrt(max_square)) + 1
set_of_all_digits = set(range(10))
for x in range(1, upper_bound):
x_square = x * x
# digits_in_x_square_and_0 is not None
# iff all digits in x_square are distinct and not equal to 0.
digits_in_x_square_and_0 = digits_if_ok(x_square, {0})
if not digits_in_x_square_and_0:
continue
for y in range(x + 1, upper_bound):
y_square = y * y
# digits_in_x_square_and_y_square_and_0 is not None
# iff all digits in y_square are distinct, distinct to 0,
# and distinct to all digits in x_square.
digits_in_x_square_and_y_square_and_0 =\
digits_if_ok(y_square, digits_in_x_square_and_0)
if not digits_in_x_square_and_y_square_and_0:
continue
for z in range(y + 1, upper_bound):
z_square = z * z
# digits_in_x_square_and_y_square_and_z_square_and_0 is not None
# iff all digits in z_square are distinct, distinct to 0,
# and distinct to all digits in x_square and y_square.
digits_in_x_square_and_y_square_and_z_square_and_0 =\
digits_if_ok(z_square, digits_in_x_square_and_y_square_and_0)
if not digits_in_x_square_and_y_square_and_z_square_and_0:
continue
if digits_in_x_square_and_y_square_and_z_square_and_0 != set_of_all_digits:
continue
print(f'{x_square:7d} {y_square:7d} {z_square:7d}')
nb_of_solutions += 1
print(f'\nAltogether, {nb_of_solutions} solutions have been found.')
|
[
"grey1991ss@gmail.com"
] |
grey1991ss@gmail.com
|
86106338bded9ea79b22a0ead788c9f19d612858
|
d227fb26e33128afe868bef60e3042f7c6576643
|
/editor/Welder/src/Core/Database/Dialogs/ChooseGraphic_Dialog.py
|
fe49fe21d0ebcf6b5fedbd48ed36524feb53e61d
|
[] |
no_license
|
boisei0/arcreator
|
1e57b9cc61d5b38bfd0d62237592cfd9f371eca9
|
555739cafdeeed19d3c25c4948416a6ecb7697d5
|
refs/heads/master
| 2020-12-02T05:02:36.242572
| 2014-08-05T19:25:41
| 2014-08-05T19:25:41
| 22,642,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
import os
import wx
import PIL
from Boot import WelderImport
Kernel = WelderImport('Kernel')
Core = WelderImport('Core')
Templates = Core.Database.Welder_Templates
RTPFunctions = Core.Cache.RTPFunctions
PILCache = Core.Cache.PILCache
class ChooseGraphic_Dialog( Templates.ChooseGraphic_Dialog ):
def __init__( self, parent, folder, current, hue ):
Templates.ChooseGraphic_Dialog.__init__( self, parent )
self.glCanvasGraphic.canvas.Bind(wx.EVT_LEFT_DOWN,
Kernel.Protect(self.glCanvas_LeftMouse))
#self.Centre( wx.BOTH )
self.glCanvasGraphic.SetDrawMode(5)
self.ImageList = ['(None)']
self.ImageList.extend(RTPFunctions.GetFileList(os.path.join('Graphics', folder)))
self.ImageIndex = 0
if folder == 'Characters': self.cache = PILCache.Character
elif folder == 'Battlers': self.cache = PILCache.Battler
# TODO: Implement the rest...
if current in self.ImageList:
self.ImageIndex = self.ImageList.index(current)
self.listBoxGraphics.AppendItems(self.ImageList)
self.listBoxGraphics.SetSelection(self.ImageIndex)
self.sliderHue.SetValue(hue)
self.RefreshCanvas()
def RefreshCanvas( self ):
if self.ImageIndex == 0:
image = PIL.Image.new('RGBA', (32, 32))
else:
filename = self.ImageList[self.ImageIndex]
hue = self.sliderHue.GetValue()
image = self.cache(filename, hue)
self.glCanvasGraphic.ChangeImage(image)
del (image)
def glCanvas_LeftMouse( self, event ):
print 'LEFT DOWN'
def listBoxGraphics_SelectionChanged( self, event ):
"""Changes the image index and refreshes the canvas"""
self.ImageIndex = event.GetSelection()
self.RefreshCanvas()
def sliderHue_Scrolled( self, event ):
"""Refreshes the canvas and redraws with the selected hue rotation"""
self.RefreshCanvas()
PILCache.CacheLimit()
def GetSelection( self ):
"""Returns the filename and hue that was selected by the user"""
if self.ImageIndex == 0:
return 0, 0
return self.ImageList[self.ImageIndex], self.sliderHue.GetValue()
def buttonOK_Clicked( self, event ):
"""End the dialog and return wx.ID_OK"""
self.EndModal(wx.ID_OK)
def buttonCancel_Clicked( self, event ):
"""End the dialog and return wx.ID_CANCEL"""
self.EndModal(wx.ID_CANCEL)
|
[
"boisei0@hubsec.eu"
] |
boisei0@hubsec.eu
|
c3d2c4d91f89382b08790e31e8951c7bb047b615
|
c369443df5ff98eccc0eee7f63bb8947f2943605
|
/api_shop/urls.py
|
faef15fce178629654a49b9494cf03b6b004d406
|
[] |
no_license
|
erllan/shop-test
|
d2934f484b25d141a60caa5aca31a61eec48f055
|
1f77de177192ce6a1f8c5ccf1d7ca93ec026acf5
|
refs/heads/master
| 2023-03-06T01:04:38.785383
| 2021-02-27T18:02:07
| 2021-02-27T18:02:07
| 341,929,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
from django.urls import path
from rest_framework_simplejwt import views as jwt_views
from . import views
urlpatterns = [
path('token/', jwt_views.TokenObtainPairView.as_view(), name='api.get_token'),
path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='api.token_refresh'),
path('user/change/', views.UserChange.as_view(), name='api-change'),
path('user/create/', views.UserCreate.as_view(), name='api-create')
]
|
[
"erlan.kubanychbekov.000@gmail.com"
] |
erlan.kubanychbekov.000@gmail.com
|
b874247bc7250254be315256308819b4f715e819
|
9153b0679889a64dd9a0dae12d9e3e22a749bc69
|
/webserver/app/main/controller/testresult_controller.py
|
369d8b4c5ba239aee147559fbc2453f66115012a
|
[
"MIT"
] |
permissive
|
Remoterwls/Auto-Test-System
|
9e0a6c96a5222a7374e5755c518d7a10bb96f6bd
|
e865758241beee3bd0c56a120f3986b0c4aa7253
|
refs/heads/master
| 2022-12-03T01:55:37.158529
| 2020-07-30T10:17:19
| 2020-07-30T10:17:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,394
|
py
|
import json
import os
from datetime import date, datetime, timedelta, timezone
from dateutil import parser, tz
from pathlib import Path
from bson.objectid import ObjectId
from flask import request, send_from_directory, url_for, current_app
from flask_restx import Resource
from mongoengine import DoesNotExist, ValidationError
from ..util.decorator import token_required, organization_team_required_by_args, organization_team_required_by_json
from ..util.get_path import get_test_results_root
from ..config import get_config
from ..model.database import QUEUE_PRIORITY_MAX, QUEUE_PRIORITY_MIN, Endpoint, Task, TestResult
from ..util.dto import TestResultDto
from ..util.response import response_message, ENOENT, EINVAL, SUCCESS, EPERM
api = TestResultDto.api
_test_report = TestResultDto.test_report
_record_test_result = TestResultDto.record_test_result
_test_result = TestResultDto.test_result
USERS_ROOT = Path(get_config().USERS_ROOT)
@api.route('/')
class TestResultRoot(Resource):
@token_required
@organization_team_required_by_args
@api.doc('get_task_report_list')
@api.param('page', description='The page number of the whole test report list')
@api.param('limit', description='The item number of a page')
@api.param('title', description='The test suite name')
@api.param('priority', description='The priority of the task')
@api.param('endpoint', description='The endpoint that runs the test')
@api.param('sort', default='-run_date', description='The sort field')
@api.param('start_date', description='The start date')
@api.param('end_date', description='The end date')
@api.marshal_list_with(_test_report)
def get(self, **kwargs):
"""Get the task report list"""
page = request.args.get('page', default=1)
limit = request.args.get('limit', default=10)
title = request.args.get('title', default=None)
priority = request.args.get('priority', default=None)
endpoint_uid = request.args.get('endpoint', default=None)
sort = request.args.get('sort', default='-run_date')
start_date = request.args.get('start_date', None)
end_date = request.args.get('end_date', None)
organization = kwargs['organization']
team = kwargs['team']
if start_date:
start_date = parser.parse(start_date)
if end_date is None:
end_date = datetime.now(timezone.utc)
else:
end_date = parser.parse(end_date)
if (start_date - end_date).days > 0:
return response_message(EINVAL, 'start date {} is larger than end date {}'.format(start_date, end_date)), 401
query = {'run_date__lte': end_date, 'run_date__gte': start_date, 'organization': organization}
else:
query = {'organization': organization}
if team:
query['team'] = team
page = int(page)
limit = int(limit)
if page <= 0 or limit <= 0:
return response_message(EINVAL, 'Field page and limit should be larger than 1'), 400
if priority and priority != '' and priority.isdigit() and \
int(priority) >= QUEUE_PRIORITY_MIN and int(priority) <= QUEUE_PRIORITY_MAX:
query['priority'] = priority
if title and priority != '':
query['test_suite__contains'] = title
if endpoint_uid and endpoint_uid != '':
endpoint = Endpoint.objects(uid=endpoint_uid).first()
if not endpoint:
return response_message(EINVAL, 'Endpoint not found'), 400
query['endpoint_run'] = endpoint
try:
dirs = os.listdir(get_test_results_root(team=team, organization=organization))
except FileNotFoundError:
return {'items': [], 'total': 0}
ret = []
for d in dirs:
try:
ObjectId(d)
except ValidationError as e:
current_app.logger.exception(e)
else:
ret.append(d)
all_tasks = Task.objects(id__in=ret, **query).order_by(sort)
ret = []
for t in all_tasks[(page - 1) * limit : page * limit]:
ret.append({
'id': str(t.id),
'test_suite': t.test_suite,
'testcases': t.testcases,
'comment': t.comment,
'priority': t.priority,
'run_date': t.run_date,
'tester': t.tester.name,
'status': t.status,
'variables': t.variables,
'endpoint_list': t.endpoint_list,
'parallelization': t.parallelization
})
return {'items': ret, 'total': all_tasks.count()}
# @token_required
@api.doc('record_the_test_case')
@api.expect(_record_test_result)
def post(self):
"""create the test result in the database for a task"""
data = request.json
if data is None:
return response_message(EINVAL, "Payload of the request is empty"), 400
task_id = data.get('task_id', None)
if task_id == None:
return response_message(EINVAL, "Field task_id is required"), 400
task = Task.objects(pk=task_id).first()
if not task:
return response_message(ENOENT, "Task not found"), 404
test_case = data.get('test_case', None)
if test_case == None:
return response_message(EINVAL, "Field test_case is required"), 400
test_result = TestResult()
test_result.test_case = test_case
test_result.task = task
test_result.test_site = task.endpoint_run.name
try:
test_result.save()
except ValidationError as e:
current_app.logger.exception(e)
return response_message(EINVAL, "Test result validation failed"), 400
task.test_results.append(test_result)
task.save()
@api.route('/<task_id>')
@api.param('task_id', 'id of the task for which to update the result')
class TestResultUpload(Resource):
# @token_required
@api.doc('update the test result')
@api.expect(_test_result)
def post(self, task_id):
"""
Update the test result for the test case of a test suite
Any items in the field more_result in the payload will be filled to the field more_result in the test result recorded in the database
"""
data = request.json
if data is None:
return response_message(EINVAL, "Payload of the request is empty"), 400
if isinstance(data, str):
data = json.loads(data)
task = Task.objects(pk=task_id).first()
if not task:
return response_message(ENOENT, "Task not found"), 404
if not task.test_results:
return response_message(ENOENT, "Test result not found"), 404
cur_test_result = task.test_results[-1]
for k, v in data.items():
if k != 'more_result' and getattr(TestResult, k, None) != None:
setattr(cur_test_result, k, v)
else:
cur_test_result.more_result[k] = v
try:
cur_test_result.save()
except ValidationError as e:
current_app.logger.exception(e)
return response_message(EPERM, "Test result validation failed"), 400
|
[
"panzilla@163.com"
] |
panzilla@163.com
|
06a3ccc9647df2d8be805513e54fa0479aa7d101
|
20674c17d815214bf66b75be686bb8a45c0f5914
|
/version1/910_Smallest_Range_II.py
|
3c29c134342ecfaaf96e139eb9e1f848e33f07ef
|
[] |
no_license
|
moontree/leetcode
|
e7b670969fe20785b15aae82996875fd66de1b08
|
f2bf9b13508cd01c8f383789569e55a438f77202
|
refs/heads/master
| 2021-05-20T20:36:45.615420
| 2020-04-02T09:15:26
| 2020-04-02T09:15:26
| 252,408,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,314
|
py
|
"""
Given an array A of integers,
for each integer A[i] we need to choose either x = -K or x = K,
and add x to A[i] (only once).
After this process, we have some array B.
Return the smallest possible difference between the maximum value of B and the minimum value of B.
Example 1:
Input:
A = [1], K = 0
Output:
0
Explanation:
B = [1]
Example 2:
Input:
A = [0,10], K = 2
Output:
6
Explanation:
B = [2,8]
Example 3:
Input:
A = [1,3,6], K = 3
Output:
3
Explanation:
B = [4,6,3]
Note:
1 <= A.length <= 10000
0 <= A[i] <= 10000
0 <= K <= 10000
"""
class Solution(object):
def smallestRangeII(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
# in sorted A
# if A[i] + K, then A[i - 1] must + K
# if A[i] - K, then A[i + 1] must - K
A.sort()
res = float('inf')
if len(A) == 1:
return 0
for i in range(len(A)):
l = min(A[0] + K, A[i + 1] - K) if i < len(A) - 1 else A[0] + K
r = max(A[-1] - K, A[i] + K)
res = min(res, r - l)
return res
examples = [
{
"input": {
"A": [1],
"K": 0
},
"output": 0
}, {
"input": {
"A": [0, 10],
"K": 2
},
"output": 6
}, {
"input": {
"A": [1, 3, 6],
"K": 3
},
"output": 3
}, {
"input": {
"A": [2, 7, 2],
"K": 1
},
"output": 3
}, {
"input": {
"A": [7, 8, 8],
"K": 5
},
"output": 1
}, {
"input": {
"A": [4, 8, 2, 7, 2],
"K": 5
},
"output": 6
}, {
"input": {
"A": [7, 8, 8, 5, 2],
"K": 4
},
"output": 5
},
]
import time
if __name__ == '__main__':
solution = Solution()
for n in dir(solution):
if not n.startswith('__'):
func = getattr(solution, n)
print(func)
for example in examples:
print '----------'
start = time.time()
v = func(**example['input'])
end = time.time()
print v, v == example['output'], end - start
|
[
"zhangchao@zhangchaodeMacBook-Pro.local"
] |
zhangchao@zhangchaodeMacBook-Pro.local
|
a0756500f16eb446c0c1b5acdb3013ffd5fb367a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02675/s466664784.py
|
78be685e40ac4a6f27ae611368839fa6f0052292
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
N = int(input())
judge = N%10
hon = [2,4,5,7,9]
pon = [0,1,6,8]
bon = [3]
if judge in hon:
print("hon")
elif judge in pon:
print("pon")
elif judge in bon:
print("bon")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
027b9caef0850dcdd1dc4783426b6900fa67eaee
|
2e58532464f58b27de68297b0348f0c460448474
|
/Assignment-7/How_to_Write_Match/main_3.py
|
c7984212e0a63d3735236561dff194507a3e9578
|
[] |
no_license
|
RidaATariq/ITMD_413
|
969a2ebe48746b3269493027daef666bd7a26997
|
ee1676419e2a09ce4d52cfca3c3e02f00b24f74f
|
refs/heads/main
| 2023-04-20T19:15:12.864852
| 2021-05-09T18:41:46
| 2021-05-09T18:41:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
import re
# pattern = re.compile(r'\d\d\d.\d\d\d.\d\d\d\d')
pattern = re.compile(r'\d{3}.\d{3}.\d{4}') # same pattern as above but number specifies the no. of /d
# matches = pattern.finditer(text_to_search)
# for match in matches:
# print(match)
with open('data.txt', 'r') as f:
contents = f.read()
matches = pattern.finditer(contents)
for match in matches:
print(match)
|
[
"cpintor@hawk.iit.edu"
] |
cpintor@hawk.iit.edu
|
bcbac824d74d96140a3d7c8bf90485d3a39c8eb8
|
bf0aa689b92be1df24100e8581caab59a74e31db
|
/src/loaders/npzpck.py
|
c689dda4e06a1adf307df646cef07b6ad420aa1b
|
[
"MIT"
] |
permissive
|
shmilee/gdpy3
|
d7c689a70557534baa98595092cee0d737ea93cc
|
cdebb80dbb4a4d84ffa7115d8f18b5589fd40fb2
|
refs/heads/master
| 2023-08-19T22:42:40.305085
| 2023-08-15T02:11:15
| 2023-08-15T03:11:04
| 88,051,033
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 shmilee
'''
Contains Npz pickled file loader class.
'''
import numpy
import zipfile
from ..glogger import getGLogger
from ..utils import inherit_docstring
from .base import BasePckLoader, _pck_copydoc_func
__all__ = ['NpzPckLoader']
log = getGLogger('L')
@inherit_docstring((BasePckLoader,), _pck_copydoc_func, template=None)
class NpzPckLoader(BasePckLoader):
'''
Load pickled data from ``.npz`` file. Return a dictionary-like object.
Attributes
{Attributes}
Parameters
{Parameters}
Notes
-----
Q: How to read data from .npz file?
A: npzfile[datakey]
>>> npzfile = numpy.load('/tmp/test.npz')
>>> datakey = 'group/key'
>>> npzfile[datakey]
'''
__slots__ = []
loader_type = '.npz'
def _special_check_path(self):
if zipfile.is_zipfile(self.path):
return True
else:
log.error("'%s' is not a ZIP file!" % self.path)
return False
def _special_open(self):
return numpy.load(self.path, allow_pickle=True)
def _special_close(self, pathobj):
pathobj.close()
def _special_getkeys(self, pathobj):
return sorted(dict.fromkeys(pathobj.files))
def _special_get(self, pathobj, key):
value = pathobj[key]
if value.size == 1:
value = value.item()
return value
|
[
"shmilee.zju@gmail.com"
] |
shmilee.zju@gmail.com
|
15fa7543b47b6784593debfdb2a8c0b909735180
|
4cef505611a04383310ce6556fac7acb02dbc8a1
|
/No ingestion test script/No_ingestion_test_showtime_anytime.py
|
3e7aaccddd9211ae1241a17fa2b57af461c79aeb
|
[] |
no_license
|
Sayan8981/Projectx
|
9d8727eec144da35f2acffc787f3c769beef02e1
|
bcf93fe885e4cd68bb2c30c408a3b03e785965c3
|
refs/heads/master
| 2022-03-26T18:13:02.831185
| 2020-01-16T06:52:31
| 2020-01-16T06:52:31
| 187,637,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,357
|
py
|
"""Writer: Saayan"""
import MySQLdb
import collections
from pprint import pprint
import sys
import csv
import os
import pymysql
import pymongo
import datetime
import sys
import urllib2
import json
import os
from urllib2 import HTTPError
import httplib
import socket
def ingestion():
conn1=pymysql.connect(user="root",passwd="branch@123",host="localhost",db="branch_service")
cur1=conn1.cursor()
result_sheet='/No_ingestion_test_showtime_anytime.csv'
if(os.path.isfile(os.getcwd()+result_sheet)):
os.remove(os.getcwd()+result_sheet)
csv.register_dialect('excel',lineterminator = '\n',skipinitialspace=True,escapechar='')
w=open(os.getcwd()+result_sheet,"wa")
with w as mycsvfile:
fieldnames = ["showtime_id MO","showtime_id SE","showtime_id SM","projectx_id_showtime","Comment"]
writer = csv.DictWriter(mycsvfile,fieldnames=fieldnames,dialect="excel",lineterminator = '\n')
writer.writeheader()
total=0
total1=0
total2=0
query1="select source_program_id,item_type from showtime_anytime_programs where expired=0;"
cur1.execute(query1)
res1=cur1.fetchall()
j=0
k=0
l=0
m=0
n=0
o=0
p=0
q=0
r=0
for i in res1:
print i
showtime_projectx_id=[]
if list(i)!= []:
if i[1]=='movie':
print total,l,j,k
total=total+1
try:
url_showtime="http://34.231.212.186:81/projectx/mappingfromsource?sourceIds=%d&sourceName=ShowtimeAnyTime&showType=MO" %i[0]
response_showtime=urllib2.Request(url_showtime)
response_showtime.add_header('Authorization','Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3')
resp_showtime=urllib2.urlopen(response_showtime)
data_showtime=resp_showtime.read()
data_resp_showtime=json.loads(data_showtime)
except httplib.BadStatusLine:
continue
except urllib2.HTTPError:
continue
except socket.error:
continue
for jj in data_resp_showtime:
if jj["data_source"]=="ShowtimeAnyTime" and jj["type"]=="Program" and jj["sub_type"]=="MO":
showtime_projectx_id.append(jj["projectx_id"])
if len(showtime_projectx_id)>1:
j=j+1
writer.writerow({"showtime_id MO":str(i[0]),"projectx_id_showtime":showtime_projectx_id,"Comment":'Multiple ingestion for same content of Showtime'})
if len(showtime_projectx_id)==1:
k=k+1
if len(showtime_projectx_id)==0:
l=l+1
writer.writerow({"showtime_id MO":str(i[0]),"projectx_id_showtime":'',"Comment":'No Ingestion'})
print("total showtime id MO:", total ,"No ingestion: ", l, "Multiple mapped content :", j, "Total Fail: ", l+j, "Pass: ", k)
if i[1]=='episode':
total1=total1+1
try:
url_showtime="http://34.231.212.186:81/projectx/mappingfromsource?sourceIds=%d&sourceName=ShowtimeAnyTime&showType=SE" %i[0]
response_showtime=urllib2.Request(url_showtime)
response_showtime.add_header('Authorization','Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3')
resp_showtime=urllib2.urlopen(response_showtime)
data_showtime=resp_showtime.read()
data_resp_showtime=json.loads(data_showtime)
except httplib.BadStatusLine:
continue
except urllib2.HTTPError:
continue
except socket.error:
continue
for jj in data_resp_showtime:
if jj["data_source"]=="ShowtimeAnyTime" and jj["type"]=="Program" and jj["sub_type"]=="SE":
showtime_projectx_id.append(jj["projectx_id"])
if len(showtime_projectx_id)>1:
q=q+1
writer.writerow({"showtime_id SE":str(i[0]),"projectx_id_showtime":showtime_projectx_id,"Comment":'Multiple ingestion for same content of Showtime'})
if len(showtime_projectx_id)==1:
r=r+1
if len(showtime_projectx_id)==0:
p=p+1
writer.writerow({"showtime_id SE":str(i[0]),"projectx_id_showtime":'',"Comment":'No Ingestion'})
print("total showtime id SE :", total1 ,"No ingestion: ", p, "Multiple mapped content :", q, "Total Fail: ", p+q, "Pass: ", r)
if i[1]=='tv_show':
total2=total2+1
try:
url_showtime="http://34.231.212.186:81/projectx/mappingfromsource?sourceIds=%d&sourceName=ShowtimeAnyTime&showType=SM" %i[0]
response_showtime=urllib2.Request(url_showtime)
response_showtime.add_header('Authorization','Token token=efeb15f572641809acbc0c26c9c1b63f4f7f1fd7dcb68070e45e26f3a40ec8e3')
resp_showtime=urllib2.urlopen(response_showtime)
data_showtime=resp_showtime.read()
data_resp_showtime=json.loads(data_showtime)
except httplib.BadStatusLine:
continue
except urllib2.HTTPError:
continue
except socket.error:
continue
for jj in data_resp_showtime:
if jj["data_source"]=="ShowtimeAnyTime" and jj["type"]=="Program" and jj["sub_type"]=="SM":
showtime_projectx_id.append(jj["projectx_id"])
if len(showtime_projectx_id)>1:
n=n+1
writer.writerow({"showtime_id SM":str(i[0]),"projectx_id_showtime":showtime_projectx_id,"Comment":'Multiple ingestion for same content of Showtime'})
if len(showtime_projectx_id)==1:
o=o+1
if len(showtime_projectx_id)==0:
m=m+1
writer.writerow({"showtime_id SM":str(i[0]),"projectx_id_showtime":'',"Comment":'No Ingestion'})
print("total showtime id SM :", total2 ,"No ingestion: ", m ,"Multiple mapped content :", n, "Total Fail: ", m+n, "Pass: ", o)
print("total showtime id MO:", total ,"No ingestion: ", l, "Multiple mapped content :", j, "Total Fail: ", l+j, "Pass: ", k)
print("total showtime id SE :", total1 ,"No ingestion: ", p, "Multiple mapped content :", q, "Total Fail: ", p+q, "Pass: ", r)
print("total showtime id SM :", total2 ,"No ingestion: ", m ,"Multiple mapped content :", n, "Total Fail: ", m+n, "Pass: ", o)
print("total showtime_anytime id :", total+total1+total2 ,"total No ingestion: ", m+p+l, "Multiple mapped content :", q+n+j, "Total Fail: ", m+p+l+q+n+j, "Pass: ", r+o+k)
print(datetime.datetime.now())
ingestion()
|
[
"noreply@github.com"
] |
Sayan8981.noreply@github.com
|
32104a52bb1ffb03bb96243e4787ce2b62d3e161
|
a5c57d60e11d6194ab590b43ee61c21dbe9d3675
|
/src/bio2bel_drugbank/constants.py
|
6fdc1f9a930062c372375cee9a62179b39b507e3
|
[
"MIT"
] |
permissive
|
sailfish009/drugbank_bio2bel
|
5779365d02b85eecddda5e87358abf3b424464c4
|
ccab91aacfa70e362ed25a9343d7dd47d619a26c
|
refs/heads/master
| 2022-02-23T06:20:12.643593
| 2019-10-15T10:28:06
| 2019-10-15T10:28:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# -*- coding: utf-8 -*-
"""Constants for Bio2BEL DrugBank."""
import os
from bio2bel.utils import get_connection, get_data_dir
VERSION = '0.1.2-dev'
MODULE_NAME = 'drugbank'
DATA_DIR = get_data_dir(MODULE_NAME)
DRUGBANK_URL = 'https://www.drugbank.ca/releases/5-1-4/downloads/all-full-database'
DRUGBANK_PATH = os.path.join(DATA_DIR, 'drugbank_all_full_database.xml.zip')
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
29c705cad1edefde7a3423ca8e3cb007bcdd5fe8
|
67954fee55a838d3c14ea5758b178f7fddb7d5f7
|
/teacher/migrations/0001_initial.py
|
3867ed4443a95746325d5ff6c2661d334d11b7ac
|
[] |
no_license
|
Abel-Fan/student_admin
|
899ce1130273fec0905ca4de1fe3c37e6e456b05
|
e3c1b96f048273dda20b975917a66dabe97b8851
|
refs/heads/master
| 2020-06-06T10:17:44.563845
| 2019-06-20T09:54:24
| 2019-06-20T09:54:24
| 192,711,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
# Generated by Django 2.1.1 on 2019-06-20 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('student', '0002_auto_20190620_1451'),
]
operations = [
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('project', models.CharField(max_length=20, verbose_name='课程')),
('student_id', models.ManyToManyField(to='student.Student')),
],
),
]
|
[
"842615663@qq.com"
] |
842615663@qq.com
|
980f6bbb5dc5a2a639e07f041266478030497e29
|
11ada50b47a245278a41b1f2cdae60bc387937da
|
/gala/potential/potential/builtin/pybuiltin.py
|
d2a6888e172a89a36bb718859afae3aa8bd2bd42
|
[
"MPL-1.1",
"MIT"
] |
permissive
|
ltlancas/gala
|
7ec9111e3bf9f49823f393ad08f04aa4c34cf0df
|
2621bb599d67e74a85446abf72d5930ef70ca181
|
refs/heads/master
| 2020-03-25T21:03:56.114744
| 2018-05-12T16:24:46
| 2018-05-12T16:24:46
| 144,157,811
| 1
| 0
|
MIT
| 2018-08-09T13:40:53
| 2018-08-09T13:40:53
| null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
# coding: utf-8
from __future__ import division, print_function
# Standard library
from collections import OrderedDict
# Third-party
import numpy as np
from ..core import PotentialBase
from ....util import atleast_2d
__all__ = ["HarmonicOscillatorPotential", "KuzminPotential"]
class HarmonicOscillatorPotential(PotentialBase):
r"""
Represents an N-dimensional harmonic oscillator.
.. math::
\Phi = \frac{1}{2}\omega^2 x^2
Parameters
----------
omega : numeric
Frequency.
units : iterable(optional)
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
"""
def __init__(self, omega, units=None):
parameters = OrderedDict()
parameters['omega'] = np.atleast_1d(omega)
super(HarmonicOscillatorPotential, self).__init__(units=units,
parameters=parameters,
ndim=len(parameters['omega']))
def _energy(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.sum(0.5 * om[None]**2 * q**2, axis=1)
def _gradient(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return om[None]**2 * q
def _hessian(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.tile(np.diag(om)[:,:,None], reps=(1,1,q.shape[0]))
def action_angle(self, w):
"""
Transform the input cartesian position and velocity to action-angle
coordinates the Harmonic Oscillator potential. This transformation
is analytic and can be used as a "toy potential" in the
Sanders & Binney 2014 formalism for computing action-angle coordinates
in _any_ potential.
Adapted from Jason Sanders' code
`genfunc <https://github.com/jlsanders/genfunc>`_.
Parameters
----------
w : :class:`gala.dynamics.PhaseSpacePosition`, :class:`gala.dynamics.Orbit`
The positions or orbit to compute the actions, angles, and frequencies at.
"""
from ....dynamics.analyticactionangle import harmonic_oscillator_to_aa
return harmonic_oscillator_to_aa(w, self)
# def phase_space(self, actions, angles):
# """
# Transform the input action-angle coordinates to cartesian position and velocity
# assuming a Harmonic Oscillator potential. This transformation
# is analytic and can be used as a "toy potential" in the
# Sanders & Binney 2014 formalism for computing action-angle coordinates
# in _any_ potential.
# Adapted from Jason Sanders' code
# `genfunc <https://github.com/jlsanders/genfunc>`_.
# Parameters
# ----------
# x : array_like
# Positions.
# v : array_like
# Velocities.
# """
# from ...dynamics.analyticactionangle import harmonic_oscillator_aa_to_xv
# return harmonic_oscillator_aa_to_xv(actions, angles, self)
class KuzminPotential(PotentialBase):
r"""
The Kuzmin flattened disk potential.
.. math::
\Phi = -\frac{Gm}{\sqrt{x^2 + y^2 + (a + |z|)^2}}
Parameters
----------
m : numeric
Mass.
a : numeric
Flattening parameter.
units : iterable
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
"""
def __init__(self, m, a, units):
parameters = OrderedDict()
parameters['m'] = m
parameters['a'] = a
super(KuzminPotential, self).__init__(units=units,
parameters=parameters)
def _energy(self, q, t):
x,y,z = q
m = self.parameters['m']
a = self.parameters['a']
val = -self.G * m / np.sqrt(x**2 + y**2 + (a + np.abs(z))**2)
return val
def _gradient(self, q, t):
x,y,z = q
m = self.parameters['m']
a = self.parameters['a']
fac = self.G * m / (x**2 + y**2 + (a + np.abs(z))**2)**1.5
return fac[None,...] * q
|
[
"adrian.prw@gmail.com"
] |
adrian.prw@gmail.com
|
abd0e4369ed4686d8a57e4903580a0d3336d78b0
|
7e98a8b31cc2fecf8384f4590f27540f7280659a
|
/v1/LinkedLists/mirror_subtraction.py
|
b9bf4ac6f787e73961c467e99aa83d9e78408879
|
[] |
no_license
|
darraes/coding_questions
|
62a07a6ede884fd0e3596f3dac8f50e11fd8676e
|
70dcc3b859db673c39c3cf55aeb463d2bd822ed9
|
refs/heads/master
| 2021-01-21T07:21:18.033321
| 2020-06-15T16:25:11
| 2020-06-15T16:25:11
| 91,611,595
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
# http://www.careercup.com/question?id=5657550909341696
class Node:
def __init__(self, next, value):
self._next = next
self._value = value
def revert(node):
if node == None: raise
elif node._next == None:
return (node, node)
else:
tail, head = revert(node._next)
tail._next = node
node._next = None
return (node, head)
def solve(node):
slow = fast = firstHalf = node
secondHalf = None
while slow._next != None:
if fast._next == None or fast._next._next == None:
secondHalf = slow._next
slow._next = None
break
else:
slow = slow._next
fast = fast._next._next
headF = firstHalf
tailF = slow
tail, secondHalf = revert(secondHalf)
headS = secondHalf
while firstHalf != None:
if secondHalf != None:
firstHalf._value -= secondHalf._value
secondHalf = secondHalf._next
else:
firstHalf._value = 0
firstHalf = firstHalf._next
tail, headS = revert(headS)
tailF._next = headS
return headF
head = Node(Node(Node(Node(Node(None, 10), 5), 4), 9), 10)
new = solve(head)
|
[
"daniel.arraes@gmail.com"
] |
daniel.arraes@gmail.com
|
b006e071b4eedcdff17cc409c2ad17895603ea12
|
4342ef8afa6a0790690f711d28c0ce2c78711c67
|
/seed_database.py
|
921aa0c13ce449678e4383c184f066f8bd5a1fa0
|
[] |
no_license
|
christinababaya/ratings-v2
|
fe03413af9042942204aa69980a3ce2415411c14
|
2812bc1493607fb4f04b62c1e84b4d9efb396937
|
refs/heads/master
| 2023-03-25T20:39:38.191009
| 2021-03-26T16:00:14
| 2021-03-26T16:00:14
| 351,493,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
import os
import json
from random import choice, randint
from datetime import datetime
import crud
import model
import server
os.system('dropdb ratings')
os.system('createdb ratings')
model.connect_to_db(server.app)
model.db.create_all()
with open('data/movies.json') as f:
movie_data = json.loads(f.read())
movies_in_db=[]
for movie in movie_data:
title, overview, poster_path= movie['title'], movie['overview'],movie['poster_path']
release_date = datetime.strptime(movie['release_date'], '%Y-%m-%d')
db_movie= crud.create_movie(title, overview, release_date, poster_path)
movies_in_db.append(db_movie)
for n in range(10):
email = f'user{n}@test.com'
password= 'test'
user = crud.create_user(email, password)
for _ in range(10):
random_movie= choice(movies_in_db)
score= randint(1,5)
crud.create_rating(user, random_movie, score)
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
92ec7ed041e603e068af5792813e85378c4cca01
|
9d454ae0d5dd1d7e96e904ced80ca502019bb659
|
/1720_decode.py
|
8f1c93438180b711a3300877bb9f7e9cc36a7969
|
[] |
no_license
|
zzz686970/leetcode-2018
|
dad2c3db3b6360662a90ea709e58d7facec5c797
|
16e4343922041929bc3021e152093425066620bb
|
refs/heads/master
| 2021-08-18T08:11:10.153394
| 2021-07-22T15:58:52
| 2021-07-22T15:58:52
| 135,581,395
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
class Solution:
def decode(self, encoded: List[int], first: int) -> List[int]:
# ans = [first]
# for el in encoded:
# ans.append(el ^ ans[-1])
# return ans
return list(accumulate([first] + encoded, lambda x, y: x ^ y))
|
[
"1564256031@qq.com"
] |
1564256031@qq.com
|
280e83ef0782347a3afd107ead0084773b84c6ae
|
aa8af4dc70c14339a05489b0c4c4925d7a00e319
|
/starbox_custom/starbox_custom/doctype/store/test_store.py
|
908d6e2311d2d1c252d713c5f8fb6e7b22ab9769
|
[
"MIT"
] |
permissive
|
vhrspvl/starbox-custom
|
925768540b318ee923dd6587291fbece003fd17e
|
0d1143e64119cff66ad52fbe8453fa7281b62998
|
refs/heads/master
| 2021-05-13T21:35:17.327744
| 2019-07-24T16:11:50
| 2019-07-24T16:11:50
| 116,466,334
| 0
| 2
| null | 2018-08-13T15:05:27
| 2018-01-06T08:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Starboxes India and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestStoreKeeper(unittest.TestCase):
pass
|
[
"hereabdulla@gmail.com"
] |
hereabdulla@gmail.com
|
01040de6b5d4ea3274ea3ea17e71b4643c1b4001
|
b483c598fa375e9af02348960f210b9f482bd655
|
/cursoemvideo/desafios/Desafio075.py
|
48338306d4a75934c3af6b61e07b9ea8b5210c04
|
[
"MIT"
] |
permissive
|
brunofonsousa/python
|
6f766d08bf193180ea9a4903cb93ffd167db588d
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
refs/heads/master
| 2022-09-30T14:58:01.080749
| 2020-06-08T09:55:35
| 2020-06-08T09:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
'''
Exercício Python 075: Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
A) Quantas vezes apareceu o valor 9.
B) Em que posição foi digitado o primeiro valor 3.
C) Quais foram os números pares.
'''
## VERSÃO GUANABARA:
num = (int(input("Digite um número: ")),
int(input("Digite outro número: ")),
int(input("Digite mais um número: ")),
int(input("Digite o último número: ")))
print(f"O valor 9 apareceu {num.count(9)} vezes")
if 3 in num:
print(f"O valor 3 apareceu na {num.index(3)+1}ª posição")
else:
print("O valor 3 não foi digitado em nenhuma posição")
print(f"Os valores pares digitados foram ", end="")
for n in num:
if n % 2 == 0:
print(n, end=" ")
else:
pass
## VERSÃO BRUNO:
varia = ['um', 'outro', 'mais um', 'último']
tupla=()
for i in range(4):
num = int(input(f"Digite {varia[i]} número: "))
tupla = tupla + (num,)
print("Você digitou os valores:", (tupla))
vezes = 0
for i in range(len(tupla)):
#x = tupla.count(tupla[i])
x = tupla[i]
if x == 9:
vezes += 1
else:
pass
for i in range(len(tupla)):
if tupla[i] == 3:
primeiro = i
break
else:
pass
par = 0
for i in tupla:
if i % 2 == 0:
par += 1
else:
pass
print(f"O valor 9 apareceu {vezes} vezes")
print(f"O valor 3 apareceu na {primeiro}ª posição")
print(f"Os valores pares digitados foram {par}")
|
[
"brunofonsousa@gmail.com"
] |
brunofonsousa@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.