content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def list_dataset_contents(datasetName=None, nextToken=None, maxResults=None, scheduledOnOrAfter=None, scheduledBefore=None):
"""
Lists information about data set contents that have been created.
See also: AWS API Documentation
Exceptions
:example: response = client.list_dataset_contents(
datasetName='string',
nextToken='string',
maxResults=123,
scheduledOnOrAfter=datetime(2015, 1, 1),
scheduledBefore=datetime(2015, 1, 1)
)
:type datasetName: string
:param datasetName: [REQUIRED]\nThe name of the data set whose contents information you want to list.\n
:type nextToken: string
:param nextToken: The token for the next set of results.
:type maxResults: integer
:param maxResults: The maximum number of results to return in this request.
:type scheduledOnOrAfter: datetime
:param scheduledOnOrAfter: A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)
:type scheduledBefore: datetime
:param scheduledBefore: A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)
:rtype: dict
ReturnsResponse Syntax
{
'datasetContentSummaries': [
{
'version': 'string',
'status': {
'state': 'CREATING'|'SUCCEEDED'|'FAILED',
'reason': 'string'
},
'creationTime': datetime(2015, 1, 1),
'scheduleTime': datetime(2015, 1, 1),
'completionTime': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
datasetContentSummaries (list) --
Summary information about data set contents that have been created.
(dict) --
Summary information about data set contents.
version (string) --
The version of the data set contents.
status (dict) --
The status of the data set contents.
state (string) --
The state of the data set contents. Can be one of "READY", "CREATING", "SUCCEEDED" or "FAILED".
reason (string) --
The reason the data set contents are in this state.
creationTime (datetime) --
The actual time the creation of the data set contents was started.
scheduleTime (datetime) --
The time the creation of the data set contents was scheduled to start.
completionTime (datetime) --
The time the dataset content status was updated to SUCCEEDED or FAILED.
nextToken (string) --
The token to retrieve the next set of results, or null if there are no more results.
Exceptions
IoTAnalytics.Client.exceptions.InvalidRequestException
IoTAnalytics.Client.exceptions.InternalFailureException
IoTAnalytics.Client.exceptions.ServiceUnavailableException
IoTAnalytics.Client.exceptions.ThrottlingException
IoTAnalytics.Client.exceptions.ResourceNotFoundException
:return: {
'datasetContentSummaries': [
{
'version': 'string',
'status': {
'state': 'CREATING'|'SUCCEEDED'|'FAILED',
'reason': 'string'
},
'creationTime': datetime(2015, 1, 1),
'scheduleTime': datetime(2015, 1, 1),
'completionTime': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
:returns:
IoTAnalytics.Client.exceptions.InvalidRequestException
IoTAnalytics.Client.exceptions.InternalFailureException
IoTAnalytics.Client.exceptions.ServiceUnavailableException
IoTAnalytics.Client.exceptions.ThrottlingException
IoTAnalytics.Client.exceptions.ResourceNotFoundException
"""
pass | 5,328,000 |
def test_app_creation():
"""
Test if a Flask-Philo_Core app is created properly
"""
# check that raises error if not settings file is provided
with pytest.raises(ConfigurationError):
init_app(__name__)
with patch.dict(
os.environ, {
'FLASK_PHILO_SETTINGS_MODULE': 'config.settings'}):
app = init_app(__name__)
assert app is not None
assert app.name == __name__ | 5,328,001 |
def django_db_setup(django_db_setup, django_db_blocker):
"""Set up the database for tests that need it"""
with django_db_blocker.unblock():
user_one = User.objects.create_user(
email='user_one@example.com',
password='test_123'
)
Token.objects.create(user=user_one) | 5,328,002 |
def get_sample_eclat(name):
"""Read a tweet sample from a sample file and return it in a format eclat
can process.
"""
sampleFile = open(name)
X = []
Y = []
line = sampleFile.readline()
while line != '':
row = line.split()
Y.append(int(row[0]))
x = []
if int(row[3]) < 50:
x.append('#followers: 0-49')
elif int(row[3]) < 100:
x.append('#followers: 50-99')
elif int(row[3]) < 500:
x.append('#followers: 100-499')
elif int(row[3]) < 1000:
x.append('#followers: 500-999')
elif int(row[3]) < 5000:
x.append('#followers: 1000-4999')
elif int(row[3]) < 10000:
x.append('#followers: 5000-9999')
else:
x.append('#followers: 10000+')
for i in range(4, 12):
if int(row[i]):
x.append(cols[i - 3])
if int(row[12]) == 0:
x.append('Sentiment: Negative')
elif int(row[12]) == 1:
x.append('Sentiment: Neutral')
else:
x.append('Sentiment: Positive')
x.append('Topic: ' + row[13])
X.append(x)
for _ in range(8):
sampleFile.readline()
line = sampleFile.readline()
return X, Y | 5,328,003 |
def test_object_names_tables(sdc_builder, sdc_executor, gcp, table_name):
"""
Create data using Google BigQuery client with specific table names
and then check if Google BigQuery origin receives them using wiretap.
The pipeline looks like:
google_bigquery >> wiretap
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dataset_name = get_random_string(string.ascii_letters, 5)
google_bigquery = pipeline_builder.add_stage('Google BigQuery', type='origin')
query_str = f"SELECT * FROM `{dataset_name}`.`{table_name}` ORDER BY id"
google_bigquery.set_attributes(query=query_str)
wiretap = pipeline_builder.add_wiretap()
google_bigquery >> wiretap.destination
pipeline = pipeline_builder.build().configure_for_environment(gcp)
sdc_executor.add_pipeline(pipeline)
bigquery_client = gcp.bigquery_client
try:
dataset_ref = Dataset(bigquery_client.dataset(dataset_name))
# Using Google bigquery client, create dataset, table and data inside table
logger.info('Creating dataset %s using Google bigquery client ...', dataset_name)
bigquery_client.create_dataset(dataset_ref)
table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=SCHEMA))
errors = bigquery_client.insert_rows(table, ROWS_TO_INSERT)
assert not errors, 'Errors found when creating data using bigquery client'
# Start pipeline and verify correct rows are received.
logger.info('Starting pipeline')
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.output_records) == len(ROWS_TO_INSERT),\
f'Expected {len(ROWS_TO_INSERT)} records, received {len(wiretap.output_records)}'
rows_from_wiretap = get_rows_from_wiretap(wiretap)
assert rows_from_wiretap == ROWS_TO_INSERT
finally:
if dataset_ref:
bigquery_client.delete_dataset(dataset_ref, delete_contents=True) | 5,328,004 |
def process_songs(spark, df, output_data):
"""Process Data Frame with raw songs data using Spark and create Songs dimensional table stored in S3"""
print('Processing songs...')
# Define schema for the Songs table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
songs_schema = StructType([
StructField('song_id', StringType(), nullable=False),
StructField('title', StringType(), nullable=False),
StructField('artist_id', StringType(), nullable=True),
StructField('year', LongType(), nullable=True),
StructField('duration', DoubleType(), nullable=True)
])
# Cleanup data. Remove rows with empty song_id or title and select required fields for Songs table.
# We also use dropDuplicates by song_id here to avoid the same song row appears twice in the table.
songs_rdd = df \
.filter(col('song_id').isNotNull()) \
.filter(col('title').isNotNull()) \
.dropDuplicates(['song_id']) \
.select('song_id', 'title', 'artist_id', 'year', 'duration') \
.rdd
# Create Songs table using clean data and schema.
songs_table = spark.createDataFrame(songs_rdd, songs_schema)
print('Writing songs_table data frame to parquet to S3')
# Write Songs table to parquet files partitioned by year and artist to S3
songs_table_path = output_data + 'tables/songs/songs.parquet'
songs_table \
.write \
.partitionBy('year', 'artist_id') \
.mode('overwrite') \
.parquet(songs_table_path)
print('Songs table has been created.') | 5,328,005 |
def test_dmd_computation():
""" Check all variants of DMD on a toy problem
"""
for exact in [True, False]:
for total in [True, False]:
yield check_kdmd_computation_simple, exact, total | 5,328,006 |
def viz():
"""check queenbee is flying"""
click.echo("""
.' '. __
viiiiiiiiiiiiizzzzzzzzz! . . . . (__\_
. . . -{{_(|8)
' . . ' ' . . ' (__/
""") | 5,328,007 |
def get_model():
"""Summary
"""
download_and_extract_tar(
'http://download.magenta.tensorflow.org/models/nsynth/wavenet-ckpt.tar') | 5,328,008 |
def submission_storage_path(instance, filename):
"""
Function DocString
"""
string = '/'.join(['submissions', instance.submission_user.user_nick, str(instance.submission_question.question_level), str(instance.submission_question.question_level_id)])
string += '/'+datetime.datetime.now().strftime("%I:%M%p-%m-%d-%Y")
string += filename
return string | 5,328,009 |
def distance_to_center(n):
"""Return Manhattan distance to center of spiral of length <n>."""
dist = distances_to_center()
for _ in range(n - 1):
next(dist)
return next(dist) | 5,328,010 |
def detect_tachycardia(heart_rate, age):
"""
This function makes best guess as to whether tachycardia is being exhibited
:param float heart_rate: heart rate in bpm
:param int age: age of user/patient
:return ble tachycardia: whether or not tachycardia detected
"""
import logging as log
log.debug("Checking for tachycardia.\n")
hr_hi = round(207 - (0.7 * age), 2)
tachycardia = False
if heart_rate > hr_hi:
tachycardia = True
return tachycardia | 5,328,011 |
def test_seed_consistency(n, d, r, D_):
""" Seed consistency checker: verify that using the same seed gives the
same result, and different seeds give different results. Could make this
a fuzzy test by generating random values for other parameters. """
seed1, seed2 = 25, 23857235
instances = []
m = nb_arcs_from_density(n, d)
for seed in (seed1, seed1, seed2):
# Replicates what's in the CLI at the moment.
ξ = random.Random(seed)
D = partial(random.Random.randint, a=D_[0], b=D_[1])
instances.append(build_instance(ξ, n, m, r, D))
(
(net1, tree1, dist1, map1),
(net2, tree2, dist2, map2),
(net3, tree3, dist3, _),
) = instances
# Same seed matches.
assert net1 == net2 and tree1 == tree2 and dist1 == dist2 and map1 == map2
# Different seeds mismatch.
assert net1 != net3 and tree1 != tree3 and dist1 != dist3 | 5,328,012 |
async def _assert_preconditions_async(preconditions: List[List[Contract]],
resolved_kwargs: Mapping[str, Any]) -> Optional[BaseException]:
"""Assert that the preconditions of an async function hold."""
exception = None # type: Optional[BaseException]
# Assert the preconditions in groups. This is necessary to implement "require else" logic when a class
# weakens the preconditions of its base class.
for group in preconditions:
exception = None
for contract in group:
assert exception is None, "No exception as long as pre-condition group is satisfiable."
condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)
if inspect.iscoroutinefunction(contract.condition):
check = await contract.condition(**condition_kwargs)
else:
check_or_coroutine = contract.condition(**condition_kwargs)
if inspect.iscoroutine(check_or_coroutine):
check = await check_or_coroutine
else:
check = check_or_coroutine
if not_check(check=check, contract=contract):
exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs)
break
# The group of preconditions was satisfied, no need to check the other groups.
if exception is None:
break
return exception | 5,328,013 |
def moveGeneratorFromStrList (betaStringList, string_mode = True):
""" generate the final output of move sequence as a list of dictionary.
Input :
['F5-LH', 'F5-RH', 'E8-LH', 'H10-RH', 'E13-LH', 'I14-RH', 'E15-LH', 'G18-RH']
Length of the list: how many moves in this climb to the target hold. Target holds run from the third order hold to the last hold
Dictionary involves all information needed to evaluate grade/ analyze style for human. This is a basic building block of the route.
TargetHoldString : "A10" for example
TargetHoldHand: "RH" for example
TargetHoldScore: the difficulty to hold on the target hold applying the "RH" operation
RemainingHoldString : "A10" for example
RemainingHoldHand:
RemainingHoldScore
MovingHoldString : A10 for example
MovingHoldHand:
MovingHoldScore:
dxdyMtoT: vector Target - moving hand. This distance's physical meaning is the real hand traveling range during the move
dxdyRtoT: vector Target - Remaining hand. This distance's physical meaning is the inter distance between two remaining hand after finish the move
FootPlacement: [0,0,0,0,1,1,0] means there is hold on region 5 and 6.
MoveSuccessRate: estimation of how easy of this move
if coordinate_mode = True, String will be coordinate form and
"""
# From List of string to hand sequence and op sequence
handSequence = []
handOperatorSequence = []
xSequence = []
ySequence = []
for hold in betaStringList:
characterAndNum = [re.findall(r'(\w+?)(\d+)', hold.split("-")[0])[0]]
handOp = hold.split("-")[1]
alphabateList = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]
handOperatorSequence.append(handOp)
xSequence.append(alphabateList.index(characterAndNum[0][0]) )
ySequence.append(int(characterAndNum[0][1]) - 1)
outputDictionaryList = []
numOfMoves = len(handOperatorSequence) - 2 # calculate from the third hold to the end hold (no final match)
# loop over holds from third one to the finish hold (rank from 3 to end). In each move, this is the hold defined as target hold
for rank in range(2, len(handOperatorSequence)):
# Renew a dictionary
moveDictionary = {}
# Define target hold
targetHoldHand = handOperatorSequence[rank]
coordinateOfTarget = (xSequence[rank], ySequence[rank])
if string_mode == False:
moveDictionary["TargetHoldString"] = coordinateOfTarget
if targetHoldHand == "LH":
moveDictionary["TargetHoldHand"] = 0 # LH ->0
else: moveDictionary["TargetHoldHand"] = 1 # RH -> 1
else:
moveDictionary["TargetHoldString"] = coordinateToString(coordinateOfTarget)
moveDictionary["TargetHoldHand"] = targetHoldHand
moveDictionary["TargetHoldScore"] = holdScoreUseCordAndOp(coordinateOfTarget, targetHoldHand) # Could you file I/O excile file L/R hand difficulty?
# Define remaining hold
listBeforeTargetHold = handOperatorSequence[0:rank]
remainingHoldHand = oppositehand(targetHoldHand)
order = int(''.join(listBeforeTargetHold).rindex(remainingHoldHand)/2) # remaining hold is the last hold with opposite hand in the sequence before Target hand
coordinateOfRemaining = (xSequence[order], ySequence[order])
if string_mode == False:
moveDictionary["RemainingHoldString"] = coordinateOfRemaining
moveDictionary["RemainingHoldHand"] = 1 - moveDictionary["TargetHoldHand"]
else:
moveDictionary["RemainingHoldString"] = coordinateToString(coordinateOfRemaining)
moveDictionary["RemainingHoldHand"] = remainingHoldHand
moveDictionary["RemainingHoldScore"] = holdScoreUseCordAndOp(coordinateOfRemaining, remainingHoldHand)
moveDictionary["dxdyRtoT"] = (coordinateOfTarget[0] - coordinateOfRemaining[0], coordinateOfTarget[1] - coordinateOfRemaining[1])
# Define moving hold
movingHoldHand = targetHoldHand
order = int(''.join(listBeforeTargetHold).rindex(movingHoldHand)/2) # remaining hold is the last hold with opposite hand in the sequence before Target hand
coordinateOfMoving = (xSequence[order], ySequence[order])
if string_mode == False:
moveDictionary["MovingHoldString"] = coordinateOfMoving
moveDictionary["MovingHoldHand"] = moveDictionary["TargetHoldHand"]
else:
moveDictionary["MovingHoldString"] = coordinateToString(coordinateOfMoving)
moveDictionary["MovingHoldHand"] = movingHoldHand
moveDictionary["MovingHoldScore"] = holdScoreUseCordAndOp(coordinateOfMoving, movingHoldHand)
moveDictionary["dxdyMtoT"] = (coordinateOfTarget[0] - coordinateOfMoving[0], coordinateOfTarget[1] - coordinateOfMoving[1])
# Define foot region location
x0, y0 = int(coordinateOfRemaining[0]), int(coordinateOfRemaining[1])
region0 = [(x,y) for x in range(x0 - 4, x0 - 1) for y in range(y0 - 3, y0 - 1)]
region1 = [(x,y) for x in range(x0 - 1, x0 + 2) for y in range(y0 - 3, y0 - 1)]
region2 = [(x,y) for x in range(x0 + 2, x0 + 5) for y in range(y0 - 3, y0 - 1)]
region3 = [(x,y) for x in range(x0 - 5, x0 - 1) for y in range(y0 - 6, y0 - 3)]
region4 = [(x,y) for x in range(x0 - 1, x0 + 2) for y in range(y0 - 6, y0 - 3)]
region5 = [(x,y) for x in range(x0 + 2, x0 + 6) for y in range(y0 - 6, y0 - 3)]
region6 = [(x,y) for x in range(x0 - 2, x0 + 3) for y in range(y0 - 9, y0 - 6)]
# check is there foot holds in the region
footholdList = [0] * 7
regionList = [region0, region1, region2, region3, region4, region5, region6]
for holdx, holdy in zip(xSequence, ySequence):
for i in range(7):
if (holdx, holdy) in regionList[i]:
footholdList[i] = 1
# deal with additional footholds
if region1[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds (region's first element start from the lowest)
footholdList[0] = 1
footholdList[1] = 1
footholdList[2] = 1
elif region4[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds
footholdList[3] = 1
footholdList[4] = 1
footholdList[5] = 1
elif region6[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds
footholdList[6] = 1
moveDictionary["FootPlacement"] = footholdList
# Add the singlemoveSuccessRate
if coordinateOfMoving == coordinateOfRemaining: ## If start from the match position
pass # May need special consideration when match hand
if targetHoldHand == "RH":
scoreFromDistance = makeGaussian(coordinateOfTarget, 3, coordinateOfRemaining, "LH")
if targetHoldHand == "LH":
scoreFromDistance = makeGaussian(coordinateOfTarget, 3, coordinateOfRemaining, "RH")
scoreFromfoot = 1
if sum(footholdList) < 1: scoreFromfoot = 0.5
moveSuccessRate = moveDictionary["RemainingHoldScore"] * moveDictionary["TargetHoldScore"] * scoreFromDistance * scoreFromfoot
moveDictionary["MoveSuccessRate"] = moveSuccessRate
# Finish fill in all components of a move
outputDictionaryList.append(moveDictionary)
return outputDictionaryList | 5,328,014 |
def main_mix(args):
"""Render the final mix into a single bitstream file from ordered
track list using pydub.
Computes the output file for end-to-start feature matching mixing
algorithm used in farmers manual's guest mix @sorbierd 2017
"""
from pydub import AudioSegment
# f = open('trk_seq_559.txt', 'r')
# print "args.file", args.file
assert type(args.file) is str, "main_mix assumes args.file to be singular string, not %s" % (type(args.file), )
f = open(args.file, 'r')
trk_seq_raw = "".join(f.readlines())
gv = {}
lv = {}
code = compile(trk_seq_raw, "<string>", "exec")
exec(code, gv, lv)
trk_seq = lv['trk_seq']
# mix = AudioSegment.empty()
silence_thresh = -40
print("trk_seq")
for i, tup in enumerate(trk_seq):
print("trk", i, tup[0], tup[1])
if i < 1:
print("just appending")
mix = AudioSegment.from_mp3(tup[1]) # .strip_silence()
print(" mix dur", mix.duration_seconds)
mix = mix.strip_silence(silence_len = 1500, silence_thresh = silence_thresh, padding = 60)
print(" mix dur", mix.duration_seconds)
else:
xfadetime = np.random.randint(1500, 3000)
print("appending with xfade = %s ms" % (xfadetime, ))
trk_ = AudioSegment.from_mp3(tup[1])
print("trk_ dur", trk_.duration_seconds)
trk_ = trk_.strip_silence(silence_len = 1500, silence_thresh = silence_thresh, padding = 60)
print("trk_ dur", trk_.duration_seconds)
mix = mix.append(trk_, crossfade = xfadetime)
print(" mix dur", mix.duration_seconds)
mix_fin = mix.fade_in(1000).fade_out(1000)
mix_fin.export(
"mix_fin.mp3",
format = "mp3",
bitrate = '320k',
tags={'artist': 'farmersmanual (DJ)', 'title': 'the mix', 'album': 'fm playlist selection for sorbie rd. @subcity radio', 'comments': 'This album is awesome!'}) | 5,328,015 |
def add_upstream_ids(table_a, id_a, table_b, id_b, upstream_ids_col):
"""note upstream ids
"""
db = pgdata.connect()
schema_a, table_a = db.parse_table_name(table_a)
schema_b, table_b = db.parse_table_name(table_b)
# ensure that any existing values get removed
db.execute(
f"ALTER TABLE {schema_a}.{table_a} DROP COLUMN IF EXISTS {upstream_ids_col}"
)
temp_table = table_a + "_tmp"
db[f"{schema_a}.{temp_table}"].drop()
db.execute(f"CREATE TABLE {schema_a}.{temp_table} (LIKE {schema_a}.{table_a})")
db.execute(
f"ALTER TABLE {schema_a}.{temp_table} ADD COLUMN {upstream_ids_col} integer[]"
)
groups = sorted(
[
g[0]
for g in db.query(
f"SELECT DISTINCT watershed_group_code from {schema_a}.{table_a}"
)
]
)
query = sql.SQL(read_file("sql/00_add_upstream_ids.sql")).format(
schema_a=sql.Identifier(schema_a),
schema_b=sql.Identifier(schema_b),
temp_table=sql.Identifier(temp_table),
table_a=sql.Identifier(table_a),
table_b=sql.Identifier(table_b),
id_a=sql.Identifier(id_a),
id_b=sql.Identifier(id_b),
upstr_ids_col=sql.Identifier(upstream_ids_col),
)
# run each group in parallel
func = partial(execute_parallel, query)
n_processes = multiprocessing.cpu_count() - 1
pool = multiprocessing.Pool(processes=n_processes)
pool.map(func, groups)
pool.close()
pool.join()
# drop source table, rename new table, re-create indexes
db[f"{schema_a}.{table_a}"].drop()
db.execute(f"ALTER TABLE {schema_a}.{temp_table} RENAME TO {table_a}")
create_indexes(f"{schema_a}.{table_a}")
db.execute(f"ALTER TABLE {schema_a}.{table_a} ADD PRIMARY KEY ({id_a})")
conn = db.engine.raw_connection()
conn.set_isolation_level(0)
cur = conn.cursor()
cur.execute(f"VACUUM ANALYZE {schema_a}.{table_a}") | 5,328,016 |
def generateVtBar(row):
"""生成K线"""
bar = VtBarData()
symbol, exchange = row['symbol'].split('.')
bar.symbol = symbol
bar.exchange = exchangeMapReverse[exchange]
if bar.exchange in ['SSE', 'SZSE']:
bar.vtSymbol = '.'.join([bar.symbol, bar.exchange])
else:
bar.vtSymbol = bar.symbol
bar.open = row['open']
bar.high = row['high']
bar.low = row['low']
bar.close = row['close']
bar.volume = row['volume']
bar.date = str(row['date'])
bar.time = str(row['time']).rjust(6, '0')
#将bar的时间改成提前一分钟
hour=bar.time[0:2]
minute=bar.time[2:4]
sec=bar.time[4:6]
if minute=="00":
minute="59"
h = int(hour)
if h == 0:
h = 24
hour=str(h-1).rjust(2,'0')
else:
minute=str(int(minute)-1).rjust(2,'0')
bar.time=hour+minute+sec
bar.datetime = datetime.strptime(' '.join([bar.date, bar.time]), '%Y%m%d %H%M%S')
return bar | 5,328,017 |
def index_with_links():
"""post request that the form link uses
"""
db = sqlite3.connect('link_shortner.db')
c = db.cursor()
link = request.forms.get('link')
generated_id = gen_id()
#row = db.execute('SELECT * from links where link_id=?', generate_id).fetchone()
c.execute("INSERT INTO links values (?, ?)", (generated_id, link))
db.commit()
db.close()
shortened = app.config.get('info.hostname', 'localhost:8080') + '/' + generated_id
return dict(short_link=shortened, csrf_tag=csrf.csrf_tag()) | 5,328,018 |
def SinGAN_generate(Gs, Zs, reals, styles, NoiseAmp, opt, in_s=None, scale_v=1, scale_h=1, n=0, gen_start_scale=0, num_samples=10):
"""
Generate image with the given parameters.
Returns:
I_curr(torch.cuda.FloatTensor) : Current Image
"""
#if torch.is_tensor(in_s) == False:
if in_s is None:
in_s = torch.full(reals[0].shape, 0, device=opt.device)
images_cur = []
for G, Z_opt, noise_amp, style in zip(Gs, Zs, NoiseAmp, styles):
pad1 = ((opt.ker_size - 1) * opt.num_layer) / 2
m = nn.ZeroPad2d(int(pad1))
nzx = (Z_opt.shape[2] - pad1*2) * scale_v
nzy = (Z_opt.shape[3] - pad1*2) * scale_h
images_prev = images_cur
images_cur = []
m_style = m(style)
for i in range(0,num_samples,1):
if n == 0:
z_curr = generate_noise([1,nzx,nzy], device=opt.device)
z_curr = z_curr.expand(1,3,z_curr.shape[2],z_curr.shape[3])
z_curr = m(z_curr)
else:
z_curr = generate_noise([opt.nc_z,nzx,nzy], device=opt.device)
z_curr = m(z_curr)
if images_prev == []:
I_prev = m(in_s)
else:
I_prev = images_prev[i]
I_prev = imresize(I_prev,1/opt.scale_factor, opt)
I_prev = I_prev[:, :, 0:round(scale_v * reals[n].shape[2]), 0:round(scale_h * reals[n].shape[3])]
I_prev = m(I_prev)
I_prev = I_prev[:,:,0:z_curr.shape[2],0:z_curr.shape[3]]
I_prev = upsampling(I_prev,z_curr.shape[2],z_curr.shape[3])
if n < gen_start_scale:
z_curr = Z_opt
z_in = noise_amp*(z_curr)+I_prev
I_curr = G(z_in.detach(),I_prev, m_style.detach())
if n == len(reals)-1:
if opt.mode == 'train':
dir2save = '%s/RandomSamples/%s/gen_start_scale=%d' % (opt.out, opt.content[:-4], gen_start_scale)
else:
dir2save = generate_dir2save(opt)
try:
os.makedirs(dir2save)
except OSError:
pass
plt.imsave('%s/%d.png' % (dir2save, i), convert_image_np(I_curr.detach()), vmin=0, vmax=1)
#plt.imsave('%s/%d_%d.png' % (dir2save,i,n),functions.convert_image_np(I_curr.detach()), vmin=0, vmax=1)
#plt.imsave('%s/in_s.png' % (dir2save), functions.convert_image_np(in_s), vmin=0,vmax=1)
images_cur.append(I_curr)
n += 1
return I_curr.detach() | 5,328,019 |
def divide_blend(img_x: np.ndarray, img_y: np.ndarray) -> np.ndarray:
"""
Blend image x and y in 'divide' mode
:param img_x: input grayscale image on top
:param img_y: input grayscale image at bottom
:return:
"""
result = np.zeros_like(img_x, np.float_)
height, width = img_x.shape
for i in range(height):
for j in range(width):
if img_x[i, j] == 0:
color = img_y[i, j] and 255 or 0
elif img_x[i, j] == 255:
color = img_y[i, j]
elif img_x[i, j] == img_y[i, j]:
color = 255
else:
color = (img_y[i, j] / img_x[i, j]) * 255
result[i, j] = color
return result.astype(np.uint8) | 5,328,020 |
def table_to_bipartite_graph(
table: Tabular,
first_part_col: Hashable,
second_part_col: Hashable,
*,
node_part_attr: str = "part",
edge_weight_attr: str = "weight",
first_part_data: Optional[RowDataSpec] = None,
second_part_data: Optional[RowDataSpec] = None,
first_part_name: Optional[Hashable] = None,
second_part_name: Optional[Hashable] = None,
disjoint_keys: bool = False,
) -> AnyGraph:
"""
Function creating a bipartite graph from the given tabular data.
Args:
table (Iterable[Indexable] or pd.DataFrame): input tabular data. It can
be a large variety of things as long as it is 1. iterable and 2.
yields indexable values such as dicts or lists. This can for instance
be a list of dicts, a csv.DictReader stream etc. It also supports
pandas DataFrame if the library is installed.
first_part_col (Hashable): the name of the column containing the
value representing a node in the resulting graph's first part.
It could be the index if your rows are lists or a key if your rows
are dicts instead.
second_par_col (Hashable): the name of the column containing the
value representing a node in the resulting graph's second part.
It could be the index if your rows are lists or a key if your rows
are dicts instead.
node_part_attr (str, optional): name of the node attribute containing
the part it belongs to. Defaults to "part".
edge_weight_attr (str, optional): name of the edge attribute containing
its weight, i.e. the number of times it was found in the table.
Defaults to "weight".
first_part_data (Sequence or Callable, optional): sequence (i.e. list, tuple etc.)
of column from rows to keep as node attributes for the graph's first part.
Can also be a function returning a dict of those attributes.
Note that the first row containing a given node will take precedence over
subsequent ones regarding data to include.
Defaults to None.
second_part_data (Sequence or Callable, optional): sequence (i.e. list, tuple etc.)
of column from rows to keep as node attributes for the graph's second part.
Can also be a function returning a dict of those attributes.
Note that the first row containing a given node will take precedence over
subsequent ones regarding data to include.
Defaults to None.
first_part_name (Hashable, optional): can be given to rename the first part.
Defaults to None.
second_part_name (Hashable, optional): can be given to rename the second part.
to display as graph's second part's name.
Defaults to None.
disjoint_keys (bool, optional): set this to True as an optimization
mechanism if you know your part keys are disjoint, i.e. if no
value for `first_part_col` can also be found in `second_part_col`.
If you enable this option wrongly, the result can be incorrect.
Defaults to False.
Returns:
nx.AnyGraph: the bipartite graph.
"""
if first_part_col == second_part_col:
raise TypeError("first_part_col and second_part_col must be different")
if first_part_name is None:
first_part_name = first_part_col
if second_part_name is None:
second_part_name = second_part_col
table = iterator_from_dataframe(table)
graph = nx.Graph()
node_id = IncrementalIdRegister[Tuple[Hashable, Any]]()
for i, row in enumerate(table):
try:
label1 = row[first_part_col]
label2 = row[second_part_col]
except (IndexError, KeyError):
raise TypeError(
'row %i lacks the "%s" or the "%s" value'
% (i, first_part_col, second_part_col)
)
if disjoint_keys:
n1 = label1
n2 = label2
else:
# TODO: possibility to save lookups for sorted data
n1 = node_id[first_part_col, label1]
n2 = node_id[second_part_col, label2]
if n1 not in graph:
node_attr = {node_part_attr: first_part_name, "label": str(label1)}
if first_part_data:
node_attr.update(collect_row_data(first_part_data, row))
graph.add_node(n1, **node_attr)
if n2 not in graph:
node_attr = {node_part_attr: second_part_name, "label": str(label2)}
if second_part_data:
node_attr.update(collect_row_data(second_part_data, row))
graph.add_node(n2, **node_attr)
if graph.has_edge(n1, n2):
graph[n1][n2][edge_weight_attr] += 1
else:
edge_attr = {edge_weight_attr: 1}
graph.add_edge(n1, n2, **edge_attr)
return graph | 5,328,021 |
def init_wavefunction(n_sites,bond_dim,**kwargs):
"""
A function that initializes the coefficients of a wavefunction for L sites (from 0 to L-1) and arranges
them in a tensor of dimension n_0 x n_1 x ... x n_L for L sites. SVD
is applied to this tensor iteratively to obtain the matrix product state.
Parameters
----------
n_sites : int
Number of sites.
kwargs
----------
conserve_n : boolean
True for conservation of number of particles.
num_e : int
Number of electrons
Returns
-------
mps : tensornetwork
Matrix Product State.
"""
# t1 = time.time()
mps = [ \
tn.Node( block(2, bond_dim),axis_names=["n_0","i_0"] )] + \
[tn.Node( block(2, bond_dim, bond_dim),axis_names=["n_{}".format(l),"i_{}".format(l-1),"i_{}".format(l)]) for l in range(1,n_sites-1)] + \
[tn.Node( block(2, bond_dim),axis_names=["n_{}".format(n_sites-1),"i_{}".format(n_sites-2)] ) \
]
#Right Canonicalize
for i in range(n_sites-1,0,-1):
if i == n_sites-1:
redges = [mps[i]["n_{}".format(i)]]
else:
redges = [mps[i]["i_{}".format(i)],mps[i]["n_{}".format(i)]]
ledges = [mps[i]["i_{}".format(i-1)]]
u,s,v,_ = tn.split_node_full_svd(mps[i], left_edges=ledges, right_edges=redges,\
left_edge_name="d_{}".format(i-1), right_edge_name="i_{}".format(i-1),\
# max_singular_values=bond_dim)
max_truncation_err=1e-5)
if i == n_sites-1:
reord_edges=[v["n_{}".format(i)],v["i_{}".format(i-1)]]
else:
reord_edges=[v["n_{}".format(i)],v["i_{}".format(i-1)],v["i_{}".format(i)]]
v.reorder_edges(reord_edges)
if i == 1:
mps[i-1].tensor = tn.ncon([mps[i-1].tensor, u.tensor, s.tensor],[(-1,'k'),('k','l'),('l',-2)])
else:
mps[i-1].tensor = tn.ncon([mps[i-1].tensor, u.tensor, s.tensor],[(-1,-2,'k'),('k','l'),('l',-3)])
mps[i].tensor = v.tensor
#connect edges to build mps
connected_edges=[]
conn=mps[0]["i_0"]^mps[1]["i_0"]
connected_edges.append(conn)
for k in range(1,n_sites-1):
conn=mps[k]["i_{}".format(k)]^mps[k+1]["i_{}".format(k)]
connected_edges.append(conn)
mod = np.linalg.norm(mps[0].tensor)
mps[0].tensor /= mod
# t2 = time.time()
#print("MPS CONSTRUCTION TIME=",t2-t1)
return mps
#NOW FOR SVD
| 5,328,022 |
def num_channels_to_num_groups(num_channels):
"""Returns number of groups to use in a GroupNorm layer with a given number
of channels. Note that these choices are hyperparameters.
Args:
num_channels (int): Number of channels.
"""
if num_channels < 8:
return 1
if num_channels < 32:
return 2
if num_channels < 64:
return 4
if num_channels < 128:
return 8
if num_channels < 256:
return 16
else:
return 32 | 5,328,023 |
def response_message(status, message, status_code):
"""
method to handle response messages
"""
return jsonify({
"status": status,
"message": message
}), status_code | 5,328,024 |
def glDrawBuffers( baseOperation, n=None, bufs=None ):
"""glDrawBuffers( bufs ) -> bufs
Wrapper will calculate n from dims of bufs if only
one argument is provided...
"""
if bufs is None:
bufs = n
n = None
bufs = arrays.GLenumArray.asArray( bufs )
if n is None:
n = arrays.GLenumArray.arraySize( bufs )
return baseOperation( n,bufs ) | 5,328,025 |
def black_check(context):
"""Check if code is properly formatted using black"""
context.run("black --check *.py tests src") | 5,328,026 |
def compare_rendered(obj1, obj2):
"""
Return True/False if the normalized rendered version of
two folium map objects are the equal or not.
"""
return normalize(obj1) == normalize(obj2) | 5,328,027 |
def handle_chosen_inline_result(bot, update, session, user):
"""Save the chosen inline result."""
result = update.chosen_inline_result
splitted = result.result_id.split(':')
# This is a result from a banned user
if len(splitted) < 2:
return
[search_id, file_id] = splitted
inline_query = session.query(InlineQuery).get(search_id)
# This happens, if the user clicks on a link in sticker set search.
sticker = session.query(Sticker).get(file_id)
if sticker is None:
return
inline_query.sticker_file_id = file_id
sticker_usage = StickerUsage.get_or_create(session, inline_query.user, sticker)
sticker_usage.usage_count += 1 | 5,328,028 |
async def _get_device_client_adapter(settings_object):
"""
get a device client adapter for the given settings object
"""
if not settings_object.device_id and not settings_object.id_scope:
return None
adapter = adapters.create_adapter(settings_object.adapter_address, "device_client")
adapter.device_id = settings_object.device_id
return adapter | 5,328,029 |
def is_reserved(word):
"""
Determines if word is reserved
:param word: String representing the variable
:return: True if word is reserved and False otherwise
"""
lorw = ['define','define-struct']
return word in lorw | 5,328,030 |
def launch_from_openmpi(config: Union[str, Path, Config, Dict],
host: str,
port: int,
backend: str = 'nccl',
seed: int = 1024,
verbose: bool = True):
"""A wrapper for colossalai.launch for OpenMPI launcher by reading rank and world size from the environment variables
set by OpenMPI
:param config: Config file or config file path are both acceptable
:type config: Union[str, dict, Config]
:param host: The master address for distributed training
:type host: str
:param port: The master port for distributed training
:type port: str
:param backend: Backend for torch.distributed
:type backend: str, optional
:param seed: Specified random seed for every processes
:type seed: int, optional
:param verbose: Whether to print logs
:type verbose: bool, optional
"""
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
launch(config=config,
local_rank=local_rank,
rank=rank,
world_size=world_size,
host=host,
port=port,
backend=backend,
seed=seed,
verbose=verbose) | 5,328,031 |
def test_client_index_year():
"""
Test retrieving list of indices for a given year.
"""
index_list = openedgar.clients.edgar.list_index_by_year(1994)
result = len(index_list)
expected = 119
assert_equal(result, expected) | 5,328,032 |
def verify_figure_hash(name, figure=None):
"""
Verifies whether a figure has the same hash as the named hash in the current hash library.
If the hash library does not contain the specified name, the hash is added to the library.
Parameters
----------
name : string
The identifier for the hash in the hash library
figure : matplotlib.figure.Figure
If None is specified, the current figure is used (as determined by matplotlib.pyplot.gcf())
Returns
-------
out : bool
False if the figure's hash does not match the named hash, otherwise True
"""
if name not in hash_library:
hash_library[name] = hash_figure(figure)
return True
return hash_library[name] == hash_figure(figure) | 5,328,033 |
def x_span_contains_y(x_spans, y_spans):
"""
Return whether all elements of y_spans are contained by some elements of x_spans
:param x_spans:
:type x_spans:
:param y_spans:
:type y_spans:
"""
for i, j in y_spans:
match_found = False
for m, n in x_spans:
if i >= m and j <= n:
match_found = True
break
# If this particular x_span found
# a match, keep looking.
if match_found:
continue
# If we find an element that doesn't
# have a match, return false.
else:
return False
# If we have reached the end of both loops, then
# all elements match.
return True | 5,328,034 |
def test_cray_config_list(cli_runner):
""" Test `cray init` for creating the default configuration """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['config', 'list', '--quiet'])
assert result.exit_code == 0
res = json.loads(result.output)
assert res.get('configurations')
active = [i for i in res['configurations'] if i['is_active']]
assert len(active) == 1 | 5,328,035 |
def parse_args():
"""Parse arguments and return them
:returns: argparse object
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='configuration file',
required=True)
return parser.parse_args() | 5,328,036 |
def log(fn):
"""
logging decorator for the for the REST method calls. Gets all important information
about the request and response, takes the time to complete the calls and writes it
to the logs.
"""
def wrapped(self, *args):
try:
start = time()
ret = fn(self, *args)
duration = time() - start
logData = extractLogData(ctx)
logData['duration'] = duration
logData['httpCode'] = ctx.status
logData['responseHeader'] = dumps(ctx.headers)
logger.info('', extra=logData)
return ret
except Exception:
duration = time() - start
logData = extractLogData(ctx)
logData['duration'] = duration
logData['httpCode'] = ctx.status
logData['responseHeader'] = dumps(ctx.headers)
if ctx.status[0] == '2':
logger.info('', extra=logData)
else:
logger.error('', extra=logData)
raise
return wrapped | 5,328,037 |
def deal_line(text_str1, text_str2, para_bound=None):
"""行合并和段落拆分"""
global result_text
text_str2 = text_str2.strip()
len_text_str2 = len(text_str2)
if len_text_str2 > 3 and len(set(text_str2)) == 1: # 处理 ***** 这类分割线
st = list(set(text_str2))[0]
# new_file.write(' ' + st * 24 + '\n')
result_text += HEAD_SPACE + st * 24 + '\n'
return ""
if len_text_str2 > 3 and str(text_str2[0:3]) == str(text_str2[-3:]): # 处理 ***Text*** 这类分割线
# new_file.write(' ' + text_str1 + '\n')
# new_file.write(' ' + text_str2 + '\n')
result_text += HEAD_SPACE + text_str1 + '\n'
result_text += HEAD_SPACE + text_str2 + '\n'
return ""
else:
if isparagraph_break(text_str1):
# new_file.write(' ' + text_str1 + '\n')
result_text += HEAD_SPACE + text_str1 + '\n'
text_str1 = text_str2
else:
text_str1 += text_str2
if para_bound:
return split_paragraph(text_str1, para_bound)
else:
return text_str1 | 5,328,038 |
def interpreter_loop(namespace={}, debug=False, rpn_class=RPN, rpn_instance=None):
"""run an interactive session"""
if rpn_instance is None:
rpn_instance = rpn_class(namespace)
while True:
try:
print
print rpn_instance
words = raw_input('> ')
rpn_instance.interpret_sequence(words.split(), filename='<stdin>')
except KeyboardInterrupt:
print
break
except SystemExit:
raise
except Exception, msg:
if debug: raise
print "ERROR:", msg | 5,328,039 |
def process_swiss(cfg, interpolation, country_mask, out, latname, lonname):
"""
Process "Swiss National Emission Inventory" created by Meteotest Inc.
"""
if cfg.add_total_emissions:
total_flux = {}
for var in cfg.species:
var_name = cfg.in2out_species.get(var, var)
total_flux[var_name] = np.zeros((cfg.cosmo_grid.ny, cfg.cosmo_grid.nx))
for cat in cfg.categories:
for var in cfg.species:
print('Species', var, 'Category', cat)
if cfg.inventory == 'swiss-cc':
constfile = os.path.join(
cfg.input_path, "".join([cat, "10_", "*_kg.txt"])
)
elif cfg.inventory == 'swiss-art':
constfile = os.path.join(
cfg.input_path, "".join(['e', cat, '15_', var, '*'])
)
emi = np.zeros((cfg.input_grid.nx, cfg.input_grid.ny))
for filename in sorted(glob(constfile)):
print(filename)
emi += util.read_emi_from_file(filename) # (lon,lat)
out_var = np.zeros((cfg.cosmo_grid.ny, cfg.cosmo_grid.nx))
for lon in range(np.shape(emi)[0]):
for lat in range(np.shape(emi)[1]):
for (x, y, r) in interpolation[lon, lat]:
out_var[y, x] += emi[lon, lat] * r
cosmo_area = 1.0 / cfg.cosmo_grid.gridcell_areas()
# convert units
if cfg.model == 'cosmo-ghg':
# COSMO-GHG: kg.year-1.cell-1 to kg.m-2.s-1
out_var *= cosmo_area.T / util.SEC_PER_YR
unit = 'kg m-2 s-1'
elif cfg.model == 'cosmo-art':
# COSMO-ART: g.year-1.cell-1 to kg.h-1.cell-1
out_var *= 1.0 / (24.0 * util.DAY_PER_YR) / 1000.0
unit = 'kg h-1 cell-1'
else:
raise RuntimeError
# only allow positive fluxes
out_var[out_var < 0] = 0
# write new or add to exisiting variable
out_var_name = util.get_out_varname(var, cat, cfg)
print('Write as variable:', out_var_name)
util.write_variable(out, out_var, out_var_name, latname, lonname,
unit)
if cfg.add_total_emissions:
var_name = cfg.in2out_species.get(var, var)
total_flux[var_name] += out_var
# Calcluate total emission/flux per species
if cfg.add_total_emissions:
for s in cfg.species:
s = cfg.in2out_species.get(s, s)
print('Write total emissions for variable:', s)
util.write_variable(out, total_flux[s], var_name, latname, lonname,
unit) | 5,328,040 |
def resized_image(image: np.ndarray, max_size: int) -> np.ndarray:
"""Resize image to feature_process_size."""
h, w = image.shape[:2]
size = max(w, h)
if 0 < max_size < size:
dsize = w * max_size // size, h * max_size // size
return cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_AREA)
else:
return image | 5,328,041 |
def load_folder_list(args, ndict):
"""
Args:
dict : "name_run" -> path
"""
l = []
for p in ndict:
print("loading %s" % p)
l.append(load_pickle_to_dataframe(args, p))
d = pd.concat(l)
d = d.sort_values("name_run")
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("Datasets:")
print("=========")
for n, name in zip(range(len(d.columns)), d.columns):
print(f"{n} -> {name}")
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print()
return d | 5,328,042 |
def show_categorical(df, target=None, sharey=False, figsize=(17, 2), ncols=5):
"""
Display histograms of categorical features
If a target list is provided, their histograms will be excluded
"""
if target is None:
target = []
if ncols <= 1:
ncols =5
print( "Number of columns changed to {}".format(ncols))
numerical = list(df.select_dtypes(include=[np.number]))
categorical_f = [
col for col in df if col not in numerical and col not in target
]
if not categorical_f:
print("There are no categorical variables")
return
nrows = math.ceil(len(categorical_f)/ncols)
for row in range(nrows):
if row == nrows-1 and len(categorical_f) % ncols == 1: # case 1 only plot in last row
plt.subplots(ncols=1, figsize=figsize)
# so = sorted({v for v in df[nnrows-1].values if str(v) != 'nan'})
sns.countplot(df[categorical_f[-1]].dropna())
else: # standard case
if row == nrows-1 and len(categorical_f) % ncols != 0:
ncols = len(categorical_f) % ncols # adjust size of last row
_, ax = plt.subplots(ncols=ncols, sharey=sharey, figsize=figsize)
for idx, n in enumerate(categorical_f[row * ncols : row * ncols + ncols]):
so = sorted({v for v in df[n].values if str(v) != 'nan'})
axs = sns.countplot(df[n].dropna(), ax=ax[idx], order=so)
if idx != 0:
axs.set(ylabel='') | 5,328,043 |
def charge_initial():
"""
Not currently in use, parking spot id gets passed in and it carries over
and passes it into the stripe charge view.
"""
spot_id = int(request.args.get('id'))
spot = AddressEntry.query.get(spot_id)
return render_template('users/charge_initial.html', key=stripe_keys['publishable_key'], price=spot.price) | 5,328,044 |
def read_csv_from_file(file):
"""
Reads the CSV data from the open file handle and returns a list of dicts.
Assumes the CSV data includes a header row and uses that header row as
fieldnames in the dict. The following fields are required and are
case-sensitive:
- ``artist``
- ``song``
- ``submitter``
- ``seed``
Other fields are ultimately preserved untouched in the output CSV.
If the CSV doesn't have a header row, uses the following hardcoded list:
- ``order``
- ``seed``
- ``submitter``
- ``year``
- ``song``
- ``artist``
- ``link``
If a tab character is present in the first row, assumes the data is
tab-delimited, otherwise assumes comma-delimited.
:returns: All parsed data from the already-opened CSV file given, as a list
of dicts as generated by `csv.DictReader`
"""
data = list(file)
delimiter = "\t" if "\t" in data[0] else ","
# Look for a header row
reader = csv.reader([data[0]], delimiter=delimiter)
row = next(reader)
for col in row:
try:
int(col)
# Found an integer, no headers present
headers = ["order", "seed", "submitter", "year", "song", "artist", "link"]
break
except ValueError:
pass
else:
# Unable to find an integer here, must be a header row
# Pop the header row off the data list and create a new reader just to
# parse that row
data.pop(0)
headers = row
return list(csv.DictReader(data, fieldnames=headers, delimiter=delimiter)) | 5,328,045 |
def findOutliers(time, flux, gap=None,
threshold_sigma=4,
precision_days=0.0205,
maxClusterLen = 2
):
"""
Identify single point outliers.
Preserves consecutive outliers, and those that are evenly spaced in
time. This protects short duration transits.
Inputs:
------------
time, flux
(1d numpy array) Input data. Flux should have mean (or median) value
of zero. Units of time are assumed to be days.
Optional Inputs
------------------
precision_days
(float) Points that are evenly spaced to within this precision
are considered periodic, and not marked as outliers. Setting
this to zero turns off the search of periodicity.
threshold_sigma
(float) Points more than this many sigma from zero are considered
potential outliers.
maxClusterLen
(int) Outliers are not marked if they are part of
a contiguous cluster at least this long.
Returns:
------------
An array of indices indicating which points are single point
outliers. The length of this array is equal to the number of outlier
points flagged. The length of this array is NOT equal to the length
of the flux array.
Notes
----------
`precision_days` should be set to a value comparable to the cadence time.
For Kepler long cadence, this is 29.52 minutes = 0.0205 days.
If `time` is not in units of days, set the value of precision in the
same units.
"""
assert not np.all(gap), "Can't find outliers if all data is gapped"
if gap is None:
gap = np.zeros_like(flux, dtype=bool)
indices = np.zeros_like(gap)
#Remove as much signal as possible from the data
# fluxDetrended = medianDetrend(flux, 3)
fluxDetrended = np.diff(flux)
fluxDetrended = np.append(fluxDetrended, [0]) #Keep the length the same
#debug()
assert len(fluxDetrended) == len(flux)
#Find outliers as too far away from the mean.
rms = robustStd(fluxDetrended[~gap])
threshold_counts = threshold_sigma * rms / np.sqrt(2)
rawOutliers = plateau(np.fabs(fluxDetrended), threshold_counts)
if len(rawOutliers) == 0:
return indices
# Remove clusters of 2 or more consectutive outliers
#singleOutlierIndices = np.sort(outliers[(outliers[:,1] - outliers[:,0] <= 2)][:,0])
# debug()
span = rawOutliers[:,1] - rawOutliers[:,0]
outliers = rawOutliers[span < maxClusterLen+2]
for p1, p2 in outliers:
indices[p1+1 :p2] = True
#Check for periodicities in outliers
if precision_days > 0:
notOutliers = findPeriodicOutliers(time, indices, precision_days)
indices[notOutliers] = False
return indices | 5,328,046 |
def identify_denonavr_receivers():
"""
Identify DenonAVR using SSDP and SCPD queries.
Returns a list of dictionaries which includes all discovered Denon AVR
devices with keys "host", "modelName", "friendlyName", "presentationURL".
"""
# Sending SSDP broadcast message to get devices
devices = send_ssdp_broadcast()
# Check which responding device is a DenonAVR device and prepare output
receivers = []
for device in devices:
try:
receiver = evaluate_scpd_xml(device["URL"])
except ConnectionError:
continue
if receiver:
receivers.append(receiver)
return receivers | 5,328,047 |
def get_index_settings(index):
"""Returns ES settings for this index"""
return (get_es().indices.get_settings(index=index)
.get(index, {}).get('settings', {})) | 5,328,048 |
def test_environ() -> None:
"""Test environ."""
orig_expected = dict(os.environ)
override = {"TEST_PARAM": "override", "new_param": "value"}
override_expected = dict(orig_expected)
override_expected.update(override)
assert os.environ == orig_expected, "validate original value"
with environ(override):
assert os.environ == override_expected, "validate override"
assert os.environ.pop("new_param") == "value"
assert os.environ == orig_expected, "validate value returned to original" | 5,328,049 |
def test_backend_validate_incorrect_package(endpoint_url, iam_auth, product):
"""
Test POST /backend/validate with an incorrect product
"""
wrong_product = copy.deepcopy(product)
wrong_product["package"]["height"] += 100
res = requests.post(
"{}/backend/validate".format(endpoint_url),
auth=iam_auth(endpoint_url),
json={"products": [wrong_product]}
)
assert res.status_code == 200
body = res.json()
assert "products" in body
assert len(body["products"]) == 1
compare_dict(body["products"][0], product) | 5,328,050 |
def register(app: Flask) -> NoReturn:
"""
Registers all views blueprints.
:param: app Flask application.
:return:
"""
from . import auth
app.register_blueprint(auth.bp_auth) | 5,328,051 |
def make_hashkey(seed):
"""
Generate a string key by hashing
"""
h = hashlib.md5()
h.update(six.b(str(seed)))
return h.hexdigest() | 5,328,052 |
async def report(database, year, month, limit):
"""Get a report."""
matches_query = """
select count(*) as count
from matches
where extract(year from played)=:year and extract(month from played)=:month
"""
players_query = """
select count(distinct players.user_id) as count
from matches join players on matches.id=players.match_id
where extract(year from played)=:year and extract(month from played)=:month
"""
most_matches_query = """
select players.user_id, players.platform_id, players.user_name, count(matches.id) as count
from players join matches on players.match_id=matches.id
where players.user_id != '' and
extract(year from matches.played)=:year and extract(month from matches.played)=:month
group by players.user_id, players.platform_id, players.user_name
order by count(matches.id) desc
limit :limit
"""
popular_maps_query = """
select map_name as name, count(map_name) as count
from matches
where extract(year from played)=:year and extract(month from played)=:month
group by map_name
order by count(map_name) desc
"""
longest_matches_query = """
select id
from matches
where extract(year from played)=:year and extract(month from played)=:month
order by duration desc
limit :limit
"""
total_matches, total_players, most_matches, popular_maps, longest_matches = await asyncio.gather(
database.fetch_one(matches_query, values={'year': year, 'month': month}),
database.fetch_one(players_query, values={'year': year, 'month': month}),
database.fetch_all(most_matches_query, values={'limit': limit, 'year': year, 'month': month}),
database.fetch_all(popular_maps_query, values={'year': year, 'month': month}),
database.fetch_all(longest_matches_query, values={'limit': limit, 'year': year, 'month': month}),
)
return {
'year': year,
'month': month,
'total_matches': total_matches['count'],
'total_players': total_players['count'],
'most_matches': [dict(
user=dict(id=m['user_id'], platform_id=m['platform_id'], name=m['user_name']),
rank=i + 1,
count=m['count']
) for i, m in enumerate(most_matches)],
'popular_maps': compute_map_data(popular_maps)[:limit],
'longest_match_ids': list(map(lambda m: m['id'], longest_matches))
} | 5,328,053 |
def compose_ntx_graph(input_file=None, delimiter=None, weighted=None):
"""
This function creates a networkx graph from provided file
:param input_file: Input file path
:param delimiter: separator for the column of the input file
:param weighted: Simple yes/no if the input file is weighted or not
:return: networkx graph
"""
# Check sanity status of input
sanity_status = file_operations.sanity_check(input_file, delimiter, weighted)
# Get data for weighted networkx graph
file_is_weighted = file_operations.is_weighted(weighted)
# Create a networkx graph from the edgelist
if sanity_status == 1:
if file_is_weighted:
print('Creating Networkx weighted graph.....', log_type='info')
try:
ntx_graph = nx.read_weighted_edgelist(input_file, delimiter=delimiter, nodetype=int)
except Exception as e:
print('Can not create weighted networkx graph. ERROR: {}'.format(e), color='red', log_type='error')
sys.exit(1)
else:
print('Creating Networkx unweighted graph.....', log_type='info')
try:
ntx_graph = nx.read_edgelist(input_file, delimiter=delimiter, nodetype=int)
except Exception as e:
print('Can not create unweighted networkx graph. ERROR: {}'.format(e), color='red', log_type='error')
sys.exit(1)
# Return graph
return ntx_graph
else:
print('Sanity check failed!', log_type='error', color='red')
sys.exit(1) | 5,328,054 |
def test_algorithms():
"""Test detection/deblending/measurement algorithms if installed"""
compare_sep()
compare_sep_multiprocessing() | 5,328,055 |
def dy3(vector, g, m1, m2, L1, L2):
"""
Abbreviations
M = m0 + m1
S = sin(y1 - y2)
C = cos(y1 - y2)
s1 = sin(y1)
s2 = sin(y2)
Equation
y3' = g*[m2 * C * s2 - M * s1] - S*m2*[L1 * y3^2 * C + L2*y4^2]
-------------------------------------------------------------
L1*[M - m2*C^2]
"""
y1, y2, y3, y4 = vector
M, S, C, s1, s2 = abbreviate(m1, m2, y1, y2)
# Split up the equations, for almost clarity
num_a = g*(m2*C*s2-M*s1)
num_b = S*m2*(L1*C*y3**2 + L2*y4**2)
den = L1*(M - m2*C**2)
return (num_a - num_b)/den | 5,328,056 |
def plotCurve():
"""Plot enhancement vs intensity at given wavelength."""
import setup as st
from scipy.linalg import norm
# Initialization
ksi = (-9.1+0.35j)*1e-19 #2e-19
#aryIntensity = np.array([0.08, 0.18, 0.4, 0.8, 1.65, 8, ])# 1400 nm
aryIntensity = np.linspace(0.08, 8, 100) # 780 nm
#-------------------------------------------------------------------
# Core-shell
#-------------------------------------------------------------------
oMie = Mie(st.nAu, st.nSiO2, omega=st.omegaIR, R=st.rs, \
Ri=st.ri)
with open("Output/enh_I-shell.dat", "w") as f:
for i in aryIntensity:
# Initial trial intensity
IIR = i*1e12*Wpcm2
enhEff = 1
# Loop several times to get converged intensity and
# nonlinear response
for j in range(1):
# Calculate nonlinear response term
n2 = ksi*283/oMie.getn(oMie.n)/oMie.getn(oMie.n).real
nlr = n2*1e4/Wpcm2 * IIR * enhEff
#print(n2)
# Update parameters and calculate Mie coefficients
oMie.I0 = IIR
oMie.update(nlr=nlr)
oMie.calMieCoefficients()
# Calculate field enhancement
maxEnh, maxCoS = findMaxEnh(oMie, oMie.R+small, \
oMie.R+nm, 80*deg, 100*deg)
# Get next improved intensity
#IIR = i*1e12*Wpcm2 * maxEnh
#enhEff = (enhEff + (\
# norm(oMie.EFieldS([oMie.R-small,0,0]))**2/oMie.I0+\
# 2*norm(oMie.EFieldS([oMie.R-small,Pi/2,0]))**2/oMie.I0)\
# /3)/2
#print(norm(oMie.EFieldS([oMie.R-small,0,0]))**2/oMie.I0, \
# norm(oMie.EFieldS([oMie.R-small,Pi/2,0]))**2/oMie.I0, \
# i)
enhEff = (enhEff + norm(oMie.EFieldS([oMie.R-small,0,0]))**2/oMie.I0)/2
print(enhEff, i)
# Write converged results
line = "{:f}\t{:f}\t{:f}\t{:f}\n".format(i, maxEnh, \
maxCoS[0]/nm, maxCoS[1]/deg)
f.write(line)
#-------------------------------------------------------------------
# Solid
#-------------------------------------------------------------------
oMie = Mie(st.nAu, st.nAu, omega=st.omegaIR, R=st.rs, \
Ri=0, I0=st.IIR)
with open("Output/enh_I-solid.dat", "w") as f:
for i in aryIntensity:
# Initial trial intensity
IIR = i*1e12*Wpcm2
enhEff = 1
# Loop several times to get converged intensity and
# nonlinear response
for j in range(1):
# Calculate nonlinear response term
n2 = ksi*283/oMie.getn(oMie.n)/oMie.getn(oMie.n).real
nlr = n2*1e4/Wpcm2 * IIR * enhEff
# Update parameters and calculate Mie coefficients
oMie.I0 = IIR
oMie.update(nlr=nlr)
oMie.calMieCoefficients()
# Calculate field enhancement
maxEnh, maxCoS = findMaxEnh(oMie, oMie.R+small, \
oMie.R+nm, 80*deg, 100*deg)
# Get next improved intensity
#IIR = i*1e12*Wpcm2 * maxEnh
enhEff = (enhEff + norm(oMie.EFieldS([oMie.R-small,0,0]))**2/oMie.I0)/2
print(enhEff, i)
# Write converged results
line = "{:f}\t{:f}\t{:f}\t{:f}\n".format(i, maxEnh, \
maxCoS[0]/nm, maxCoS[1]/deg)
f.write(line) | 5,328,057 |
def test_properties_facade_prefix_not_string():
"""
Test setting up a facade with a prefix that is not a string.
"""
# Arrange
config_map = {"property": "2"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
# Act
raised_exception = None
try:
ApplicationPropertiesFacade(application_properties, 1)
assert False, "Should have raised an exception by now."
except ValueError as this_exception:
raised_exception = this_exception
# Assert
assert raised_exception, "Expected exception was not raised."
assert (
str(raised_exception) == "The property_prefix argument must be a string."
), "Expected message was not present in exception." | 5,328,058 |
async def test_expired_token_exception(event_loop, v2_server):
"""Test that the correct exception is raised when the token is expired."""
async with v2_server:
v2_server.add(
'api.simplisafe.com', '/v1/api/authCheck', 'get',
aresponses.Response(text='', status=401))
async with aiohttp.ClientSession(loop=event_loop) as websession:
[system] = await get_systems(TEST_EMAIL, TEST_PASSWORD, websession)
system.account._access_token_expire = datetime.now() - timedelta(
hours=1)
with pytest.raises(TokenExpiredError):
await system.account.request('get', 'api/authCheck') | 5,328,059 |
def getType(resp: falcon.Response, class_type: str, method: str) -> Any:
"""Return the @type of object allowed for POST/PUT."""
for supportedOp in get_doc(resp).parsed_classes[class_type]["class"].supportedOperation:
if supportedOp.method == method:
return supportedOp.expects.replace("vocab:", "")
# NOTE: Don't use split, if there are more than one substrings with 'vocab:' not everything will be returned.
| 5,328,060 |
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"*"
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
elif command.startswith(HELP_COMMAND):
try:
response = "You need some assistance? I can perform many administrative tasks.\n"
# response += "*look up* <KEYWORDS> - _Experimental_ _command_ to look up a topic. \n"
# response += "*query parcel* <APN NO DASHES> - Returns the Assessor information of a given parcel. \n"
# response += "*what is the house worth* - Returns the Assessor information of the house. \n"
# response += "*search files for* <KEYWORD> - Queries the home network for files with a given keyword in the filename. \n"
# response += "*search documents for* <KEYWORD> - Queries the home network for documents with a given keyword in the filename. \n"
# response += "*search music for* <KEYWORD> - Queries the home network for music with a given keyword in the filename. \n"
# response += "*pick a movie* - Chooses a movie at random from the known collection of movies. \n"
# response += "*find movies near* <ZIPCODE> - Searches for movies playing n theaters within 5 miles of the given zipcode. \n"
# response += "*pick a song* - Chooses a song at random from the known collection of movies. \n"
# response += "*temperature* - Returns the temperature and weather reports for the house. \n"
# response += "*who sings* <SONG TITLE> - Queries Gracenote for all songs with a title matching the search criteria. \n"
# response += "*gif* <KEYWORDS> - Queries GIPHY for he first gif matching the search criteria. \n"
response += "*test* - Performs a basic SRAI test of Sam's congnitive functions. "
except:
response = "My help system is damaged"
# elif command.startswith(SEARCHMUSIC_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("search music for ", "")
# trucommand = trucommand.replace(" ", "")
# response = searchmusic(trucommand)
# elif command.startswith(IDMUSIC_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("who sings", "")
# response = idmusic(trucommand)
# elif command.startswith(THEATERSEARCH_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("find movies near ", "")
# response = searchtheater(trucommand)
# elif command.startswith(NETFLIXSEARCH_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("search netflix for ", "")
# response = searchnetflix(trucommand)
# elif command.startswith(SEARCHFILES_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("search files for ", "")
# trucommand = trucommand.replace(" ", "")
# response = searchfiles(trucommand)
# elif command.startswith(SEARCHDOCS_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("search documents for ", "")
# trucommand = trucommand.replace(" ", "")
# response = searchdocs(trucommand)
# elif command.startswith(PICKMUSIC_COMMAND):
# response = pickmusic()
# elif command.startswith(LOOKUP_COMMAND):
# try:
# trucommand = command
# trucommand = trucommand.replace("look up ", "")
# res = client.query(trucommand)
# response = "According to the Wolfram|Alpha super computer...\n"
# response += (next(res.results).text)
# except:
# response = "Unable to locate that topic on the Wolfram|Alpha super computer."
# elif command.startswith(HEALTH_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("health check", "")
# trucommand = trucommand.replace(" ", "")
# status = healthcheck(trucommand)
# if status == 'True':
# response = "The server is healthy"
# else:
# response = "The server is unhealthy"
# elif command.startswith(PICKAMOVIE_COMMAND):
# response = pickamovie()
# elif command.startswith(APN_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("query parcel ", "")
# trucommand = trucommand.replace(" ", "")
# print trucommand
# response = lookupapn(trucommand)
# elif command.startswith(GIPHY_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("gif ", "")
# print trucommand
# response = giphyget(trucommand)
# elif command.startswith(TELLSAM_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("announce ", "")
# print trucommand
# response = tell_sam(trucommand)
# elif command.startswith(HOMEVALUE_COMMAND):
# trucommand = command
# trucommand = trucommand.replace("what is the house worth", "")
# trucommand = trucommand.replace(" ", "")
# print trucommand
# response = homeapn()
# elif command.startswith(TEMP_COMMAND):
# try:
# howhot = tempinfo()
# response = howhot
# except:
# response = "I was unable to connect to the house services"
else:
response = k.respond(command, "sam")
try:
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
except:
print "Failed to send message"
sleep(30)
try:
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
except:
gethelp() | 5,328,061 |
def do_server_remove_network_service(client, args):
""" Remove network role from a system VM """
kwargs = {}
kwargs['proto'] = args.proto
if args.port and args.port > 0:
kwargs['port'] = args.port
roles = client.guests.perform_action(args.id, 'remove-network-service',
**kwargs)
print(roles) | 5,328,062 |
def _patch_setuptools():
"""Patch ``setuptools`` to address known issues.
Known issues:
* In some PyPy installs, the ``setuptools.build_py.build_package_data()``
method depends on the ``convert_2to3_doctests`` being set on an instance
of ``setuptools.dist.Distribution``, but it is unset. We handle this
by setting it as a **class attribute** (vs. monkey-patching ``__init__``
to set it on instances).
"""
if not _IS_PYPY:
return
setuptools.dist.Distribution.convert_2to3_doctests = [] | 5,328,063 |
def texWinToolCtx(*args, **kwargs):
"""
This class creates a context for the View Tools track, dolly, and box zoomin the texture window.
Flags:
- alternateContext : ac (bool) [create,query]
Set the ALT+MMB and ALT+SHIFT+MMB to refer to this context.
- boxzoom : bz (bool) [create,query,edit]
Perform Box Zoom
- dolly : do (bool) [create,query,edit]
Dollies the view
- exists : ex (bool) [create]
Returns true or false depending upon whether the specified object exists. Other flags are ignored.
- history : ch (bool) [create]
If this is a tool command, turn the construction history on for the tool in question.
- image1 : i1 (unicode) [create,query,edit]
First of three possible icons representing the tool associated with the context.
- image2 : i2 (unicode) [create,query,edit]
Second of three possible icons representing the tool associated with the context.
- image3 : i3 (unicode) [create,query,edit]
Third of three possible icons representing the tool associated with the context.
- name : n (unicode) [create]
If this is a tool command, name the tool appropriately.
- toolName : tn (unicode) [create,query]
Name of the specific tool to which this command refers.
- track : tr (bool) [create,query,edit]
Tracks the view Flag can have multiple arguments, passed either as a tuple or a list.
Derived from mel command `maya.cmds.texWinToolCtx`
"""
pass | 5,328,064 |
def tokenize(source: str):
"""parce the source of a program and return a generator of tokens"""
i = 0
line = 1
col = 1
while i < len(source):
if source[i].isspace(): # parse whitespace
while i < len(source) and source[i].isspace():
if source[i] == "\n":
line += 1
col = 0
i += 1
col += 1
continue
if source.startswith("/*", i): # parse multiline comment
while i < len(source) and not source.startswith("*/", i):
if source[i] == "\n":
line += 1
col = 0
i += 1
col += 1
i += 2
col += 2
continue
if source.startswith("//", i): # parse single line comment
while i < len(source) and source[i] != "\n":
i += 1
continue
for token in TOKEN_TYPES: # parse keywords and operators
if source.startswith(token, i):
i += len(token)
yield Token(token, token, line, col)
col += len(token)
break
else:
if source[i] == '"': # parse strings
i += 1
start = i
while source[i] != '"':
i += 1
yield Token("Str", source[start:i], line, col)
i += 1
col += i - start + 1
elif source[i] == "'": # parse characters
yield Token("Num", ord(source[i + 1 : i + 2]), line, col)
i += 3
col += 3
elif source[i].isalpha(): # parse identifiers
start = i
while i < len(source) and source[i].isalnum() or source[i] == "_":
i += 1
yield Token("Id", source[start:i], line, col)
col += i - start
elif source[i].isdigit(): # parse numbers
start = i
while i < len(source) and source[i].isdigit():
i += 1
yield Token("Num", source[start:i], line, col)
col += i - start
else:
print(source[i : source.index(" ", i)])
exit("unrecognized token") | 5,328,065 |
def print_info(cbf_path):
"""Print out a load of data held in the CBF file.
This is by no means a full list of the data contained in the file, it's
mainly for debugging and development purposes. The data that will be
printed is the following:
- The number of categories and the name of each category
- The number of rows and columns and the name of each column
- The type of each element in each row/column element.
:param cbf_path: The path to the cbf file
"""
# Read the CBF file
cbf_handle = pycbf.cbf_handle_struct()
cbf_handle.read_file(cbf_path, pycbf.MSG_DIGEST)
cbf_handle.rewind_datablock()
# Select the first datablock and rewind all the categories
cbf_handle.select_datablock(0)
cbf_handle.rewind_category()
# Count the number of categories and loop through them
num_categories = cbf_handle.count_categories()
for category in range(num_categories):
# Select the ith category and print its name
cbf_handle.select_category(category)
category_name = cbf_handle.category_name()
print("Category:", category, category_name)
# Count the number of rows and columns in the category
# and print them
num_rows = cbf_handle.count_rows()
num_cols = cbf_handle.count_columns()
print("\tNum (rows, cols)", (num_rows, num_cols))
# Rewind the columns and print the name of each
cbf_handle.rewind_column()
for column in range(num_cols):
cbf_handle.select_column(column)
column_name = cbf_handle.column_name()
print("\tColumn:", column, column_name)
# Loop through all rows and columns and print the
# type of the data stored in that table element
for row in range(num_rows):
cbf_handle.select_row(row)
cbf_handle.rewind_column()
print("\t\tRow:", row, cbf_handle.get_value())
for column in range(num_cols):
cbf_handle.select_column(column)
type_of_value = cbf_handle.get_typeofvalue()
if type_of_value.find("dblq") > -1:
value = cbf_handle.get_value()
elif type_of_value.find("text") > -1:
value = cbf_handle.get_value()
value = "\n\t\t\t".join(value.split("\n"))
elif type_of_value.find("word") > -1:
value = cbf_handle.get_value()
elif type_of_value.find("sglq") > -1:
value = cbf_handle.get_value()
else:
value = "..."
print("\t\tColumn", column, "Type:", type_of_value, value) | 5,328,066 |
def full_data_numeric():
"""DataFrame with numeric data
"""
data_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
}
df = pd.DataFrame(data_dict)
return df | 5,328,067 |
def fix(x):
"""
Replaces spaces with tabs, removes spurious newlines, and lstrip()s each
line. Makes it really easy to create BED files on the fly for testing and
checking.
"""
s = ""
for i in x.splitlines():
i = i.lstrip()
if i.endswith('\t'):
add_tab = '\t'
else:
add_tab = ''
if len(i) == 0:
continue
i = i.split()
i = '\t'.join(i) + add_tab + '\n'
s += i
return s | 5,328,068 |
def get_collect_method(collect_method_name):
"""Return the collect method."""
try:
collect_method = CollectMethod.get(name=collect_method_name)
except ValueError:
raise RuntimeError(f'Collect Method {collect_method_name} not found!')
return collect_method | 5,328,069 |
def TriangleBackwardSub(U,b):
"""C = TriangleBackwardSub(U,b)
Solve linear system UC = b
"""
C = solve(U,b)
return C | 5,328,070 |
def test_binance_query_balances_unknown_asset(function_scope_binance):
"""Test that if a binance balance query returns unknown asset no exception
is raised and a warning is generated. Same for unsupported asset."""
binance = function_scope_binance
def mock_unknown_asset_return(url): # pylint: disable=unused-argument
return MockResponse(200, BINANCE_BALANCES_RESPONSE)
with patch.object(binance.session, 'get', side_effect=mock_unknown_asset_return):
# Test that after querying the assets only ETH and BTC are there
balances, msg = binance.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC]['amount'] == FVal('4723846.89208129')
assert balances[A_ETH]['amount'] == FVal('4763368.68006011')
warnings = binance.msg_aggregator.consume_warnings()
assert len(warnings) == 2
assert 'unknown binance asset IDONTEXIST' in warnings[0]
assert 'unsupported binance asset ETF' in warnings[1] | 5,328,071 |
def test_sample_categorical_copy():
"""Check the random variable sampling results after schedule copy"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [1, 2, 3, 4]
probs = [0.1, 0.2, 0.3, 0.4]
rv_decisions = []
for _ in range(n):
rv = sch.sample_categorical(candidates, probs) # pylint: disable=invalid-name
rv_decisions.append((rv, sch.get(rv)))
sch_copy = sch.copy()
for rv, decision in rv_decisions: # pylint: disable=invalid-name
decision_copy = sch_copy.get(rv)
assert int(decision) == int(decision_copy) | 5,328,072 |
def main():
""" main """
my_logfile = 'logs/awsbuild.log'
my_region = 'us-east-1'
#my_vpc = 'vpc-xxx'
my_tag = 'momo-us-east-1'
# setup logging
log_formatter = logging.Formatter("%(asctime)s %(filename)s %(name)s %(levelname)s %(message)s")
root_logger = logging.getLogger()
file_handler = logging.FileHandler(my_logfile)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
config = AwsConfig(cfgdir='configs',\
cfgfile='ec2.yaml')
conn = AwsConnector(credentials=config.settings['aws_cfg']['credentials'], region=my_region)
aws_conn = conn.get_all_conn()
if not aws_conn:
print('error AwsConnector\n')
sys.exit(-1)
vpc_conn = AwsVPC(aws_conn=aws_conn, tag=my_tag)
if not vpc_conn:
print('error AwsVPC\n')
sys.exit(-1)
vpc_id = vpc_conn.get_vpc_id()
vpc_conn.set_vpc_resource(vpc_id=vpc_id)
dc_ipv6 = config.settings['dc_cfg'][my_region]['dc_ipv6']
networkv6, cidrv6 = vpc_conn.get_vpc_cidr(ip_type='v6', split_cidr=True)
net_config = set_network_config(dc_cfg=config.settings['dc_cfg'][my_region])
if not net_config:
print('error set_network_config\n')
sys.exit(-1)
# set the network topoloy first (subnet size)
net_config.update(config.settings['dc_cfg'][my_region])
# add the config from the configuration file and the VPC ipv6 settings
net_config.update(set_network_config(dc_cfg=net_config, \
dc_cidr_v6=cidrv6, dc_network_v6=networkv6))
sec_conn = AwsSecGroup(aws_conn=aws_conn, cfgs=config.get_settings(), \
dc_cfg=net_config, vpc_id=vpc_id, ipv6=dc_ipv6 \
)
if not sec_conn:
print('error\n')
sys.exit(-1)
print('sec mod -> {}\n'.format(sec_conn.modify(delete_old=False))) | 5,328,073 |
def petsc_to_stencil(x, Xh):
""" converts a numpy array to StencilVector or BlockVector format"""
x = x.array
u = array_to_stencil(x, Xh)
return u | 5,328,074 |
def index_internal_txs_task(self) -> Optional[int]:
"""
Find and process internal txs for monitored addresses
:return: Number of addresses processed
"""
with contextlib.suppress(LockError):
with only_one_running_task(self):
logger.info("Start indexing of internal txs")
number_traces = InternalTxIndexerProvider().start()
logger.info("Find internal txs task processed %d traces", number_traces)
if number_traces:
logger.info("Calling task to process decoded traces")
process_decoded_internal_txs_task.delay()
return number_traces | 5,328,075 |
def make_lagrangian(func, equality_constraints):
"""Make a Lagrangian function from an objective function `func` and `equality_constraints`
Args:
func (callable): Unary callable with signature `f(x, *args, **kwargs)`
equality_constraints (callable): Unary callable with signature `h(x, *args, **kwargs)`
Returns:
tuple: Triple of callables (init_multipliers, lagrangian, get_params)
"""
def init_multipliers(params, *args, **kwargs):
h = jax.eval_shape(equality_constraints, params, *args, **kwargs)
multipliers = tree_util.tree_map(lambda x: np.zeros(x.shape, x.dtype), h)
return params, multipliers
def lagrangian(params, multipliers, *args, **kwargs):
h = equality_constraints(params, *args, **kwargs)
return -func(params, *args, **kwargs) + math.pytree_dot(multipliers, h)
def get_params(opt_state):
return opt_state[0]
return init_multipliers, lagrangian, get_params | 5,328,076 |
def render_task(task: todotxt.Task, namespace: argparse.Namespace, level: int = 0) -> str:
"""Render one task."""
indent = (level - 1) * " " + "- " if level else ""
rendered_task = colorize(reference(task, namespace), namespace)
rendered_blocked_tasks = render_blocked_tasks(task, namespace, level)
return indent + rendered_task + rendered_blocked_tasks | 5,328,077 |
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t: t[0]) | 5,328,078 |
def main():
"""Train ensemble model.
"""
# construct the argument parse and parse the arguments
args = argparse.ArgumentParser()
args.add_argument("-o", "--output", required=True, help="path to output directory")
args.add_argument("-m", "--models", required=True, help="path to output models directory")
args.add_argument("-n", "--num-models", type=int, default=5, help="# of models to train")
args = vars(args.parse_args())
# load the training and testing data, then scale it into the range [0, 1]
((train_x, train_y), (test_x, test_y)) = cifar10.load_data()
train_x = train_x.astype("float") / 255.0
test_x = test_x.astype("float") / 255.0
# convert the labels from integers to vectors
label_binarizer = LabelBinarizer()
train_y = label_binarizer.fit_transform(train_y)
test_y = label_binarizer.transform(test_y)
# initialize the label names for the CIFAR-10 dataset
label_names = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# construct the image generator for data augmentation
augmentation = ImageDataGenerator(
rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, fill_mode="nearest"
)
# loop over the number of models to train
for i in np.arange(0, args["num_models"]):
# initialize the optimizer and model
print("[INFO] training model {}/{}".format(i + 1, args["num_models"]))
opt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True)
model = MiniVGGNet.build(width=32, height=32, depth=3, classes=10)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the network
model_fit = model.fit_generator(
augmentation.flow(train_x, train_y, batch_size=64),
validation_data=(test_x, test_y),
epochs=40,
steps_per_epoch=len(train_x) // 64,
verbose=1,
)
# save the model to disk
path = [args["models"], "model_{}.model".format(i)]
model.save(os.path.sep.join(path))
# evaluate the network
predictions = model.predict(test_x, batch_size=64)
report = classification_report(test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=label_names)
# save the classification report to file
path = [args["output"], "model_{}.txt".format(i)]
f = open(os.path.sep.join(path), "w")
f.write(report)
f.close()
# plot the training loss and accuracy
path = [args["output"], "model_{}.png".format(i)]
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 40), model_fit.history["loss"], label="train_loss")
plt.plot(np.arange(0, 40), model_fit.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 40), model_fit.history["acc"], label="train_acc")
plt.plot(np.arange(0, 40), model_fit.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy for model {}".format(i))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(os.path.sep.join(path))
plt.close() | 5,328,079 |
def remove_tmp_directories():
"""
remove tmp directories submitted in tmp_directories
Returns:
True
"""
for tmp_dir in tmp_directories:
os.remove(tmp_dir)
return True | 5,328,080 |
def get_actual_type(arg_type: Type, kind: int,
tuple_counter: List[int]) -> Type:
"""Return the type of an actual argument with the given kind.
If the argument is a *arg, return the individual argument item.
"""
if kind == nodes.ARG_STAR:
if isinstance(arg_type, Instance):
if arg_type.type.fullname() == 'builtins.list':
# List *arg.
return arg_type.args[0]
elif arg_type.args:
# TODO try to map type arguments to Iterable
return arg_type.args[0]
else:
return AnyType()
elif isinstance(arg_type, TupleType):
# Get the next tuple item of a tuple *arg.
tuplet = cast(TupleType, arg_type)
tuple_counter[0] += 1
return tuplet.items[tuple_counter[0] - 1]
else:
return AnyType()
elif kind == nodes.ARG_STAR2:
if isinstance(arg_type, Instance) and (
(cast(Instance, arg_type)).type.fullname() == 'builtins.dict'):
# Dict **arg. TODO more general (Mapping)
return (cast(Instance, arg_type)).args[1]
else:
return AnyType()
else:
# No translation for other kinds.
return arg_type | 5,328,081 |
def trigger_write_PART(code, args, text, raw):
"""
ID: PART
Decription: Triggered when the bot write a PART, so that we can unhook
database from that channel.
Format: part <channel>
"""
del code.chan[args[1]]
del code.logs['channel'][args[1]]
del code.chan[args[1]] | 5,328,082 |
def cmd():
"""
run from cmd
:return:
"""
args = parser.parse_args()
command = args.command
# init workspace for gerapy
if command == 'init':
init(args.folder)
# generate code according to configuration
elif command == 'generate':
generate(args.project)
# debug parse for project
elif command == 'parse':
parse(args)
else:
manage() | 5,328,083 |
def test_qnn_legalize():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8')
y = relay.qnn.op.requantize(x,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
y = relay.Function([x], y)
return y
@register_qnn_legalize("qnn.requantize", level=100)
def legalize_qnn_requantize(attrs, inputs, types):
data = inputs[0]
data = relay.add(relay.const(0, 'int8'), data)
y = relay.qnn.op.requantize(data,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8')
y = relay.add(relay.const(0, 'int8'), x)
z = relay.qnn.op.requantize(y,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
z = relay.Function([x], z)
return z
a = before()
# Check that Relay Legalize does not change the graph.
a = run_opt_pass(a, relay.transform.Legalize())
b = run_opt_pass(before(), transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
# Check that QNN Legalize modifies the graph.
a = run_opt_pass(a, relay.qnn.transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) | 5,328,084 |
def load(data, schema, yamlLoader=yaml.UnsafeLoader):
"""
Loads the given data and validates it according to the schema provided.
Data must be either JSON or YAML, it must be a dictionary, a path, or a string of JSON.
Schema must be JSON, it must be a dictionary, a path, or a string of JSON.
"""
if isJson(data):
return loadAndValidateJson(data, schema)
return loadAndValidateYaml(data, schema, yamlLoader=yamlLoader) | 5,328,085 |
def calculate_slice_rotations(im_stack: np.ndarray, max_rotation:float = 45) -> List[float]:
"""Calculate the rotation angle to align each slice so the
objects long axis is aligned with the horizontal axis.
Parameters
----------
im_stack : np.ndarray
A stack of images. The images should be binary
or label iamges. The regions are found and processed
with the scikit-image label and regionprops functions.
The stack should have shape (z, y, x) for z images
with shape (y, x).
max_rotation : float
The maximum allowed rotation between slices in degrees.
If this value is exceeded, it is assumed that the
opposite rotation was found and 180 is added to the rotation.
The default value is 45.
Returns
-------
rotations : List[float]
The rotation for each slice in degrees.
"""
# get the rotations of the images
rotations = []
rotations_raw = []
prev_rot = 0
previous_values = []
for i, im in enumerate(im_stack):
previous_values.append(prev_rot)
rp = regionprops(im.astype(int))
if len(rp) > 0:
orientation = rp[0].orientation
angle_in_degrees = orientation * (180 / np.pi) + 90
else:
angle_in_degrees = 0
rotations_raw.append(angle_in_degrees)
if i > 0:
# check if we should flip the rotation
if abs(prev_rot - angle_in_degrees) > max_rotation:
angle_in_degrees = -1 * (180 - angle_in_degrees)
prev_rot = angle_in_degrees
rotations.append(angle_in_degrees)
return rotations | 5,328,086 |
def vocab_count(corpus, min_count=5, outdir='output', bindir=''):
"""Run GloVe's vocab_count."""
outdir = Path(outdir)
outdir.mkdir(exist_ok=True)
vocab = outdir / VOCAB_FNAME
cmd = os.path.join(bindir, 'vocab_count')
cmd += f' -min-count {min_count}'
cmd += f' < {corpus} > {vocab}'
runcmd(cmd) | 5,328,087 |
def calc_rest_interval(data):
"""
SubTool for Investigate: after median_deviation filters through all the
points run entropy on the remaining non_rest points. This will filter the
close but could still be rest points.
"""
lst, rest = median_deviation(data)
average = median(data)
st_entropy = entropy(lst)
maximum = 0.0
array = lst
result = []
for instance in lst:
temp_array = list(array)
temp_array.remove(instance)
temp_array.insert(0, average)
ne_entropy = entropy(temp_array)
if ne_entropy < (st_entropy - st_entropy/5):
rest.append(instance)
else:
result.append(instance)
return result, average, rest | 5,328,088 |
def get_objanno(fin_anno, godag, namespace='all'):
"""Get annotation object"""
fin_full = get_anno_fullname(fin_anno)
return get_objanno_factory(fin_full, godag=godag, namespace=namespace) | 5,328,089 |
def delete_library_block(usage_key, remove_from_parent=True):
"""
Delete the specified block from this library (and any children it has).
If the block's definition (OLX file) is within this same library as the
usage key, both the definition and the usage will be deleted.
If the usage points to a definition in a linked bundle, the usage will be
deleted but the link and the linked bundle will be unaffected.
If the block is in use by some other bundle that links to this one, that
will not prevent deletion of the definition.
remove_from_parent: modify the parent to remove the reference to this
delete block. This should always be true except when this function
calls itself recursively.
"""
def_key, lib_bundle = _lookup_usage_key(usage_key)
# Create a draft:
draft_uuid = get_or_create_bundle_draft(def_key.bundle_uuid, DRAFT_NAME).uuid
# Does this block have a parent?
if usage_key not in lib_bundle.get_top_level_usages() and remove_from_parent:
# Yes: this is not a top-level block.
# First need to modify the parent to remove this block as a child.
raise NotImplementedError
# Does this block have children?
block = load_block(usage_key, user=None)
if block.has_children:
# Next, recursively call delete_library_block(...) on each child usage
for child_usage in block.children:
# Specify remove_from_parent=False to avoid unnecessary work to
# modify this block's children list when deleting each child, since
# we're going to delete this block anyways.
delete_library_block(child_usage, remove_from_parent=False)
# Delete the definition:
if def_key.bundle_uuid == lib_bundle.bundle_uuid:
# This definition is in the library, so delete it:
path_prefix = lib_bundle.olx_prefix(def_key)
for bundle_file in get_bundle_files(def_key.bundle_uuid, use_draft=DRAFT_NAME):
if bundle_file.path.startswith(path_prefix):
# Delete this file, within this definition's "folder"
write_draft_file(draft_uuid, bundle_file.path, contents=None)
else:
# The definition must be in a linked bundle, so we don't want to delete
# it; just the <xblock-include /> in the parent, which was already
# deleted above.
pass
# Clear the bundle cache so everyone sees the deleted block immediately:
lib_bundle.cache.clear()
LIBRARY_BLOCK_DELETED.send(sender=None, library_key=lib_bundle.library_key, usage_key=usage_key) | 5,328,090 |
def create_table_descriptives(datasets):
"""Merge dataset descriptives."""
df = pd.concat(
[pd.read_json(ds, orient="index") for ds in datasets],
axis=0
)
df.index.name = "dataset_name"
return df | 5,328,091 |
def test_character_start():
"""
Test that password doesn't starts with special characters.
"""
assert validate_if("!éáíúópaodsipas") == False | 5,328,092 |
def timeLimit(seconds: int) -> Generator[None, None, None]:
"""
http://stackoverflow.com/a/601168
Use to limit the execution time of a function. Raises an exception if the execution of the
function takes more than the specified amount of time.
:param seconds: maximum allowable time, in seconds
>>> import time
>>> with timeLimit(2):
... time.sleep(1)
>>> import time
>>> with timeLimit(1):
... time.sleep(2)
Traceback (most recent call last):
...
RuntimeError: Timed out
"""
# noinspection PyUnusedLocal
def signal_handler(signum: int, frame: Any) -> None:
raise RuntimeError('Timed out')
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0) | 5,328,093 |
def test_ap_blacklist_all(dev, apdev, params):
"""Ensure we clear the blacklist if all visible APs reject"""
hapd0 = hostapd.add_ap(apdev[0], {"ssid": "test-open", "max_num_sta": "0"})
hapd1 = hostapd.add_ap(apdev[1], {"ssid": "test-open", "max_num_sta": "0"})
bss0 = hapd0.own_addr()
bss1 = hapd1.own_addr()
dev[0].connect("test-open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False, bssid=bss0)
if not dev[0].wait_event(["CTRL-EVENT-AUTH-REJECT"], timeout=10):
raise Exception("AP 0 didn't reject us")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("test-open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False, bssid=bss1)
if not dev[0].wait_event(["CTRL-EVENT-AUTH-REJECT"], timeout=10):
raise Exception("AP 1 didn't reject us")
blacklist = get_blacklist(dev[0])
logger.info("blacklist: " + str(blacklist))
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
hapd0.set("max_num_sta", "1")
# All visible APs were blacklisted; we should clear the blacklist and find
# the AP that now accepts us.
dev[0].scan_for_bss(bss0, freq=2412)
dev[0].connect("test-open", key_mgmt="NONE", scan_freq="2412", bssid=bss0) | 5,328,094 |
def get_ngram_universe(sequence, n):
"""
Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe.
Example
--------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_universe(sequence, 3)
64
"""
# if recurrance is possible, the universe is given by k^t (SSA pg 68)
k = len(set(sequence))
if k > 10 and n > 10:
return 'really big'
return k**n | 5,328,095 |
def adjust_spines(ax, spines, points_outward=10):
"""
Helps in re-creating the spartan style of Jean-luc Doumont's graphs.
Removes the spines that are not specified in spines, and colours the specified
ones in gray, and pushes them outside the graph area.
"""
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', points_outward)) # outward by 10 points
#spine.set_smart_bounds(True)
spine.set_color('gray')
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([]) | 5,328,096 |
def as_nested_dict(
obj: Union[DictLike, Iterable[DictLike]], dct_class: type = DotDict
) -> Union[DictLike, Iterable[DictLike]]:
"""
Given a obj formatted as a dictionary, transforms it (and any nested dictionaries)
into the provided dct_class
Args:
- obj (Any): An object that is formatted as a `dict`
- dct_class (type): the `dict` class to use (defaults to DotDict)
Returns:
- A `dict_class` representation of the object passed in
```
"""
if isinstance(obj, (list, tuple, set)):
return type(obj)([as_nested_dict(d, dct_class) for d in obj])
# calling as_nested_dict on `Box` objects pulls out their "private" keys due to our recursion
# into `__dict__` if it exists. We can special-case Box and just convert it to dict this way,
# which automatically handles recursion.
elif isinstance(obj, Box):
return dict(obj)
elif isinstance(obj, (dict, DotDict)):
# DotDicts could have keys that shadow `update` and `items`, so we
# take care to avoid accessing those keys here
return dct_class(
{
k: as_nested_dict(v, dct_class)
for k, v in getattr(obj, "__dict__", obj).items()
}
)
return obj | 5,328,097 |
def twoindices_positive_up_to(n, m):
"""
build 2D integer indices up to n (each scanned from 0 to n)
"""
if not isinstance(n, int) or n <= 0:
raise ValueError("%s is not a positive integer" % str(n))
nbpos_n = n + 1
nbpos_m = m + 1
gripos = np.mgrid[: n : nbpos_n * 1j, : m : nbpos_m * 1j]
indices_pos = np.reshape(gripos.T, (nbpos_n * nbpos_m, 2))
return indices_pos | 5,328,098 |
def get_data_lists(data, MOT=False):
""" Prepare rolo data for SORT
Arguments:
data: config of the following form:
{
'image_folder': data_folder + 'images/train/',
'annot_folder': data_folder + 'annotations/train/',
'detected_folder': data_folder + 'detected/train/',
'sort_det_folder': data_folder + 'sort/train/'
}
Returns:
video_folders: list of video folder paths
video_annotations: list of annotation file paths
det_list : path name list of detected results
"""
if not os.path.exists(data['image_folder']):
raise IOError("Wrong image folder path:", data['image_folder'])
else:
print("Data folder:", data['image_folder'])
if not os.path.exists(data['annot_folder']):
raise IOError("Wrong annotation folder path:", data['annot_folder'])
else:
print("Annotations folder:", data['annot_folder'])
# Get the annotations as a list: [video1ann.txt, video2ann.txt, video3ann.txt, ...]
video_annots = sorted(glob.glob((data['annot_folder'] + "*")))
sort_nicely(video_annots)
if not os.path.exists(data['detected_folder']):
os.makedirs(data['detected_folder'])
else:
print("Detected folder:", data['detected_folder'])
if len(glob.glob((data['detected_folder'] + "*"))) < len(video_annots):
print(len(glob.glob((data['detected_folder'] + "*"))))
print(len(video_annots))
exit()
video_folders_list = sorted(glob.glob((data['image_folder'] + '*')))
sort_nicely(video_folders_list)
detect_videos(video_annots, video_folders_list, data['detected_folder'])
video_folders = []
det_list = []
for i, annot_path in enumerate(video_annots):
video_name = splitext(basename(annot_path))[0] # Get the file name from its full path
video_folder = os.path.join(data['image_folder'], video_name)
if not os.path.exists(video_folder):
raise IOError("Video folder does not exit:", video_folder)
video_folders.append(video_folder)
detected_name = os.path.join(data['detected_folder'], video_name + '.npy')
if not os.path.exists(detected_name):
raise IOError("Detected file does not exit:", detected_name)
if not os.path.exists(data['sort_det_folder']):
os.makedirs(data['sort_det_folder'])
if MOT:
mot_det_path = data['sort_det_folder'] + video_name + '.txt'
else:
mot_det_path = change_box_format(detected_name, data['sort_det_folder'], video_name)
det_list.append(mot_det_path)
return video_annots, video_folders, det_list | 5,328,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.