blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32299d75b478e539707e32ef50bd264407775fda | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02796/s396415723.py | 183a9157222ec19c1853c4eb1252eb8dcabd5ca6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | rs = [(x - l, x + l) for x, l in (map(int, input().split()) for _ in range(int(input())))]
rs.sort(key=lambda x: x[1])
last = - 10 ** 9
ans = 0
for l, r in rs:
if last <= l:
ans += 1
last = r
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
122b76e57de2082a15a22ffe30f332ef29d31dd6 | 8245ecc361319340b5b196b76dc8cf1d5075c3b1 | /reservations/views.py | 6872d1de2dbff040b8a1412b6e1b63bdd5a01625 | [] | no_license | Korimse/airbnb_clone | bc267e384fc098f179387ba3153614c71f999edc | c20a82cb196ad9ad6b697cf874bca34b5461c87e | refs/heads/master | 2023-06-30T11:17:53.412006 | 2021-08-03T16:30:47 | 2021-08-03T16:30:47 | 391,269,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | import datetime
from django.views.generic import View
from django.contrib import messages
from django.shortcuts import render, redirect, reverse
from django.http import Http404
from rooms import models as room_models
from reviews import forms as review_forms
from . import models
class CreateError(Exception):
pass
def create(request, room, year, month, day):
try:
date_obj = datetime.datetime(year, month, day)
room = room_models.Room.objects.get(pk=room)
models.BookedDay.objects.get(day=date_obj, reservation__room=room)
raise CreateError()
except (room_models.Room.DoesNotExist, CreateError):
messages.error(request, "Can't Reserve That Room")
return redirect(reverse("core:home"))
except models.BookedDay.DoesNotExist:
reservation = models.Reservation.objects.create(
guest=request.user,
room=room,
check_in=date_obj,
check_out=date_obj + datetime.timedelta(days=1),
)
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
class ReservationDetailView(View):
def get(self, *args, **kwargs):
pk = kwargs.get("pk")
reservation = models.Reservation.objects.get_or_none(pk=pk)
if not reservation or (
reservation.guest != self.request.user
and reservation.room.host != self.request.user
):
raise Http404()
form = review_forms.CreateReviewForm()
return render(
self.request,
"reservations/detail.html",
{"reservation": reservation, "form": form},
)
def edit_reservation(request, pk, verb):
reservation = models.Reservation.objects.get_or_none(pk=pk)
if not reservation or (
reservation.guest != request.user and reservation.room.host != request.user
):
raise Http404()
if verb == "confirm":
reservation.status = models.Reservation.STATUS_CONFIRMED
elif verb == "cancel":
reservation.status = models.Reservation.STATUS_CANCELED
models.BookedDay.objects.filter(reservation=reservation).delete()
reservation.save()
messages.success(request, "Reservation Updated")
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
| [
"korimse@gmail.com"
] | korimse@gmail.com |
66af24b8e79c42a8dc7aa3ebdc1ace6b22534927 | f9e8733ed87858b12bfee6b70ccdddd6a616b60a | /73.py | 1b81f914bf328740233161f9aaa72772c4032d9f | [] | no_license | MajestyLee/leetcode_TopInterview | c1c9c923d3bf42cd4777bb2a2ccd21654a7c6dbb | 30b7d5acec716b7d754141835fc8bafe4411437e | refs/heads/master | 2020-04-01T12:19:20.837383 | 2018-11-06T02:13:44 | 2018-11-06T02:13:44 | 153,200,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | '''
Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
Example 1:
Input:
[
[1,1,1],
[1,0,1],
[1,1,1]
]
Output:
[
[1,0,1],
[0,0,0],
[1,0,1]
]
Example 2:
Input:
[
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
Output:
[
[0,0,0,0],
[0,4,5,0],
[0,3,1,0]
]
Follow up:
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
row = set()
col = set()
for i in range(0,len(matrix)):
for j in range(0,len(matrix[0])):
if matrix[i][j] == 0:
row.add(i)
col.add(j)
row = list(row)
col = list(col)
for i in range(0,len(row)):
matrix[row[i]] = [0 for _ in range(0,len(matrix[0]))]
for j in range(0,len(col)):
for jj in range(len(matrix)):
matrix[jj][col[j]] = 0 | [
"binjie_lee@163.com"
] | binjie_lee@163.com |
8171b639c10a5af7c9974b7827ca99ad7d23f1e9 | 9b93d0591edf01684254b460d26f56cc180eee24 | /assignment2/neural_lm.py | a2b235e061eaaf246dd339944f941501097747fb | [] | no_license | GuyTevet/nlp-tau | 5d9ad3da8722df395241123dccc187f4d9d2044f | bba13ec7ca8a24724c78ff37d269367bd9a03e69 | refs/heads/master | 2021-04-06T00:50:19.088880 | 2018-06-01T08:32:33 | 2018-06-01T08:32:33 | 125,262,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,558 | py | #!/usr/local/bin/python
import pandas as pd
import time
import os.path
from data_utils import utils as du
from numpy import *
from neural import *
from sgd import *
VOCAB_EMBEDDING_PATH = "data/lm/vocab.embeddings.glove.txt"
BATCH_SIZE = 50
NUM_OF_SGD_ITERATIONS = 40000
LEARNING_RATE = 0.3
def load_vocab_embeddings(path=VOCAB_EMBEDDING_PATH):
result = []
with open(path) as f:
index = 0
for line in f:
line = line.strip()
row = line.split()
data = [float(x) for x in row[1:]]
assert len(data) == 50
result.append(data)
index += 1
return result
def load_data_as_sentences(path, word_to_num):
"""
Conv:erts the training data to an array of integer arrays.
args:
path: string pointing to the training data
word_to_num: A dictionary from string words to integers
returns:
An array of integer arrays. Each array is a sentence and each
integer is a word.
"""
docs_data = du.load_dataset(path)
S_data = du.docs_to_indices(docs_data, word_to_num)
return docs_data, S_data
def convert_to_lm_dataset(S):
"""
Takes a dataset that is a list of sentences as an array of integer arrays.
Returns the dataset a bigram prediction problem. For any word, predict the
next work.
IMPORTANT: we have two padding tokens at the beginning but since we are
training a bigram model, only one will be used.
"""
in_word_index, out_word_index = [], []
for sentence in S:
for i in xrange(len(sentence)):
if i < 2:
continue
in_word_index.append(sentence[i - 1])
out_word_index.append(sentence[i])
return in_word_index, out_word_index
def shuffle_training_data(in_word_index, out_word_index):
combined = zip(in_word_index, out_word_index)
random.shuffle(combined)
return zip(*combined)
def int_to_one_hot(number, dim):
res = np.zeros(dim)
res[number] = 1.0
return res
def lm_wrapper(in_word_index, out_word_index, num_to_word_embedding, dimensions, params):
data = np.zeros([BATCH_SIZE, input_dim])
labels = np.zeros([BATCH_SIZE, output_dim])
# Construct the data batch and run you backpropogation implementation
### YOUR CODE HERE
num_of_examples = len(in_word_index)
#choose random batch
random_indices = np.random.choice(num_of_examples, BATCH_SIZE)
#construct data
data = np.array(num_to_word_embedding)[np.array(in_word_index)[random_indices]]
#construct labels
labels = np.array(out_word_index)[random_indices]
one_hot_labels = np.zeros((BATCH_SIZE, vocabsize))
one_hot_labels[np.arange(BATCH_SIZE), labels] = 1
cost, grad = forward_backward_prop(data, one_hot_labels, params, dimensions)
### END YOUR CODE
cost /= BATCH_SIZE
grad /= BATCH_SIZE
return cost, grad
def eval_neural_lm(eval_data_path):
"""
Evaluate perplexity (use dev set when tuning and test at the end)
"""
_, S_dev = load_data_as_sentences(eval_data_path, word_to_num)
in_word_index, out_word_index = convert_to_lm_dataset(S_dev)
assert len(in_word_index) == len(out_word_index)
num_of_examples = len(in_word_index)
perplexity = 0
### YOUR CODE HERE
prob_log_sum = 0
for in_word , label in zip(in_word_index,out_word_index):
prob = forward(num_to_word_embedding[in_word],label,params,dimensions)
prob_log_sum += np.log2(prob)
perplexity = 2 ** (-prob_log_sum * 1. / num_of_examples)
### END YOUR CODE
return perplexity
if __name__ == "__main__":
# Load the vocabulary
vocab = pd.read_table("data/lm/vocab.ptb.txt", header=None, sep="\s+",
index_col=0, names=['count', 'freq'], )
vocabsize = 2000
num_to_word = dict(enumerate(vocab.index[:vocabsize]))
num_to_word_embedding = load_vocab_embeddings()
word_to_num = du.invert_dict(num_to_word)
# Load the training data
_, S_train = load_data_as_sentences('data/lm/ptb-train.txt', word_to_num)
in_word_index, out_word_index = convert_to_lm_dataset(S_train)
assert len(in_word_index) == len(out_word_index)
num_of_examples = len(in_word_index)
random.seed(31415)
np.random.seed(9265)
in_word_index, out_word_index = shuffle_training_data(in_word_index, out_word_index)
startTime=time.time()
# Training should happen here
# Initialize parameters randomly
# Construct the params
input_dim = 50
hidden_dim = 50
output_dim = vocabsize
dimensions = [input_dim, hidden_dim, output_dim]
params = np.random.randn((input_dim + 1) * hidden_dim + (
hidden_dim + 1) * output_dim, )
print "#params: " + str(len(params))
print "#train examples: " + str(num_of_examples)
# run SGD
params = sgd(
lambda vec:lm_wrapper(in_word_index, out_word_index, num_to_word_embedding, dimensions, vec),
params, LEARNING_RATE, NUM_OF_SGD_ITERATIONS, None, True, 1000)
print "training took %d seconds" % (time.time() - startTime)
# Evaluate perplexity with dev-data
perplexity = eval_neural_lm('data/lm/ptb-dev.txt')
print "dev perplexity : " + str(perplexity)
# Evaluate perplexity with test-data (only at test time!)
if os.path.exists('data/lm/ptb-test.txt'):
perplexity = eval_neural_lm('datllllla/lm/ptb-test.txt')
print "test perplexity : " + str(perplexity)
else:
print "test perplexity will be evaluated only at test time!"
| [
"guy.tvt@gmail.com"
] | guy.tvt@gmail.com |
db1548e5b6ec0456ee71194444efa7d5cb1a68a9 | a2cd0076377f3b660d0d2ba8d3caf8382281492f | /hotel_channel_connector_wubook/tests/test_reservation_restriction_item_model.py | 3ad7283debf0d8f19a926c2bf5416c8e15ce0603 | [] | no_license | hootel/hootel | 9f465beaabc9a5da9eb3d7f8aa1d8e987631301f | d46d3de23b8bae756400d7be645893ed11583ee0 | refs/heads/11.0 | 2023-05-11T05:18:59.550231 | 2023-04-27T17:06:51 | 2023-04-27T17:06:51 | 123,324,791 | 33 | 35 | null | 2023-04-27T17:06:52 | 2018-02-28T18:24:23 | Python | UTF-8 | Python | false | false | 1,920 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2017 Solucións Aloxa S.L. <info@aloxa.eu>
# Alexandre Díaz <dev@redneboa.es>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import timedelta
from openerp.tools import (
DEFAULT_SERVER_DATETIME_FORMAT,
DEFAULT_SERVER_DATE_FORMAT)
from odoo.addons.hotel import date_utils
from .common import TestHotelWubook
class TestReservationRestrictionItem(TestHotelWubook):
def test_write(self):
now_utc_dt = date_utils.now()
day_utc_dt = now_utc_dt + timedelta(days=20)
day_utc_str = day_utc_dt.strftime(DEFAULT_SERVER_DATE_FORMAT)
rest_item_obj = self.env['hotel.room.type.restriction.item']
restriction = rest_item_obj.search([], limit=1)
self.assertTrue(restriction, "Can't found restriction for test")
restriction.write({
'min_stay': 3,
'date_start': day_utc_str
})
self.assertEqual(restriction.min_stay, 3, "Invalid Max Avail")
self.assertEqual(restriction.date_start, day_utc_str, "Invalid Date")
| [
"noreply@github.com"
] | noreply@github.com |
286e6feb771656a50d1d552a5583577d2f990bc4 | ce816d60b9aa647951749f86e8ed3e2bedc12ba6 | /fuzzylink/duplicates.py | 62ae785afbb3b88cc122bf0514d941d1ba523d6a | [
"MIT"
] | permissive | tseastmond/fuzzylink | 7c2bb1cecd0d97b24ff4c329a069255c774cbfe8 | 5aee255ab31b5a387694d512451e95791925ccfb | refs/heads/master | 2023-04-19T06:26:42.615848 | 2021-05-11T17:15:11 | 2021-05-11T17:15:11 | 240,357,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,668 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing import Process, Manager
from os import kill
from random import shuffle
from time import time, sleep
import pandas as pd
from ._loop import _loop
from ._memory_check import _memory_check
from ._timer import _timer
###############################################################################
###############################################################################
###############################################################################
# Make a function for the actual matching.
def DeDup(full, idvar, exact, nomismatch=[], fuzzy=[], strthresh=0.9,
numthresh=1, weight=1, allowmiss=False, disp=1, cores=1):
'''
A function to identify duplicates within a Pandas DataFrame.
Parameters
----------
full : Pandas DataFrame
The full dataset in which we want to eliminate duplicates.
idvar : str
The column name of a column which uniquely identifies each row.
exact : list
A list of columns on which the algorithm matches exactly.
nomismatch : list
A list of columns requiring no discrepancy in nonmissing values, but
it will allow a match between missing and a value.
fuzzy : list
A list of columns requiring a fuzzy match, with the threshold
specified by strthresh or numthresh, depending on the column type.
strthresh : float or dict
The threshold for Jaro-Winkler score below which the rows are not a
match. If a dictionary is passed, it must have one entry per fuzzy
column specifying the threshold for each.
numthresh : float
The threshold for numeric variables absolute difference outside of
which the rows are not a match.
weight : float or dict
The weight for each column to be applied in calculating the score.
allowmiss : bool
Allow a mismatch in fuzzy due to missing values.
disp : int or None
The number of seconds between each update of the printed output in the
console. If None there will be no printed progress in the console.
cores : int
The number of process to run simultaneously.
Returns
-------
A Pandas DataFrame with two columns, the first with a copy of "idvar" and
the second with a set containing all of the values of "idvar" that match
with the given row.
'''
# Keep only relevant columns and copy the DataFrame.
cols = exact + nomismatch + fuzzy + [idvar]
cols = list(set(cols))
full = full[cols].copy()
# Get a unique integer id for our 'exact' columns.
full['__exact__'] = ''
for col in exact:
full[col] = full[col].fillna('').astype(str)
full = full.loc[full[col] != '', :]
full['__exact__'] += ',' + full[col]
del full[col]
full['__exact__'] = full['__exact__'].rank(method='dense', na_option='top').astype(int)
# Get the unique values of exact columns.
vals = list(full['__exact__'].value_counts().reset_index()\
.query('index != "," and __exact__ > 1')['index'])
# Split up the unique values into the number of designated cores.
# Make a list of empty lists.
splitvals = [[] for _ in range(cores)]
# Loop over vals. Since the values that will take the longest are ordered
# first to last, we want to split them over processes.
_temp = pd.DataFrame(vals, columns=['n']).reset_index()
_temp['index'] = _temp['index']%cores
for num in range(cores):
splitvals[num] = list(_temp.loc[_temp['index'] == num, 'n'])
del _temp
# Give extra values to the last processes as they will be done the fastest.
for num in range(len(splitvals)):
if num + 1 > len(splitvals)/2:
break
if len(splitvals[-1 - num]) < len(splitvals[num]):
splitvals[-1 - num].append(splitvals[num][-1])
del splitvals[num][-1]
else:
break
# Set up the dictionary for the output from our processes.
manager = Manager()
output = manager.dict()
progress = manager.dict()
processes = manager.list()
# Start the memory check.
mem = Process(target=_memory_check, args=(processes,output), daemon=True)
mem.start()
# Mark the start time.
start_time = time()
# Start the number of processes requested.
all_procs = []
full['__id__'] = full[idvar]
for proc in range(1, cores+1):
# Initialize the process.
shuffle(splitvals[proc-1]) # This will give more acurate timing
p = Process(target=_loop, args=(full.loc[full['__exact__']\
.isin(splitvals[proc-1])\
.copy()],
splitvals[proc-1], [idvar, '__id__'],
proc, output, progress, nomismatch,
fuzzy, '', strthresh, numthresh,
weight, allowmiss, None, True))
p.start()
print('\n\nStarted Process', proc)
# Save the processes in a list.
all_procs.append(p)
processes.append(p.pid)
# Break if memory is too high.
if 'end' in output.keys():
raise MemoryError('Memory Usage Too High, Exiting...')
# Drop the associated values from full so as to not double down on the
# memory usage.
full = full.loc[~(full['__exact__'].isin(splitvals[proc-1])), :]
# Start a process to track and print remaining time.
if disp is not None:
timer = Process(target=_timer,
args=(progress, cores, start_time, disp), daemon=True)
processes.append(timer.pid)
timer.start()
# Wait for all processes to finish, make sure they all end in case of user
# stopping the program.
for p in all_procs:
try:
p.join()
except KeyboardInterrupt:
for proc in processes:
try:
kill(proc, 9)
print('Killed', proc)
except:
pass
raise KeyboardInterrupt
# Break if processes ended because memory was too high.
if 'end' in output.keys():
raise MemoryError('Memory Usage Too High, Exiting...')
# Terminate the timer and memory check.
if disp is not None:
sleep(disp*2)
timer.terminate()
print('Cleaning up the output....')
# Collect the output.
matched = dict()
for proc in range(1, cores+1):
if len(output['matched{0}'.format(proc)]) > 0:
matched.update(output['matched{0}'.format(proc)])
# Make a DataFrame for the results and rename columns.
matched = pd.DataFrame.from_dict(matched, orient='index').reset_index()
if len(matched) > 0:
matched.columns = [idvar, 'duplicates']
# Get rows that originally were in their own block, so were not matched.
if len(full) > 0:
temp = full.apply(lambda x: [x[idvar], set([x[idvar]])],\
axis=1, result_type='expand')
temp.columns = [idvar, 'duplicates']
matched = matched.append(temp, ignore_index=True)
# Return the two DataFrames.
return matched
| [
"teastmon@ucsd.edu"
] | teastmon@ucsd.edu |
90cda2194aab9ac1aa8576288440859c5c54fbbc | dd19ab77e237a80e32b25247b514b34bd99e22be | /scripts/build_seed2uniref_mappings_table.py | fb67bcf214f2634c92557ef09c94e337836eef84 | [] | no_license | aekazakov/seed2kegg | 54d65ab13c660a71c21dfd4dc1426fad9ecbb76c | b41aeda511d017eefe75231121c86d77fa796c55 | refs/heads/master | 2021-09-11T00:03:37.866608 | 2018-04-04T16:21:11 | 2018-04-04T16:21:11 | 125,553,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | #!/usr/bin/python
import sys
import os.path
import sqlite3
import argparse
from context import seed2kegg
from seed2kegg import db_utils
from seed2kegg import seed_data_util
from seed2kegg import data_analysis
def get_args():
desc = '''This script builds a table of SEED genes mapped to UniRef.
First, it compares hash values of SEED proteins with those of UniRef
proteins and finds identical sequences. All found links are stored in the
seed2uniref_mappings table.
Second, it imports DIAMOND output from the search of SEED proteins
in the UniRef DB and adds high homologies into the
seed2uniref_mappings table.'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--seed_db', help='SEED sqlite DB path')
parser.add_argument('--uniref_db', help='UniRef sqlite DB path')
parser.add_argument('--diamond_out', help='Diamond output file path')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return args
def main():
args = get_args()
# Open database
conn = db_utils.connect_local_database(args.seed_db)
c = conn.cursor()
db_utils.attach_local_database(c, args.uniref_db, 'uniref_proteins')
# Prepare database
print ('Drop seed2uniref_mappings table...')
seed_data_util.drop_seed2uniref_mappings_table(c)
print ('Create seed2uniref_mappings table...')
seed_data_util.create_seed2uniref_mappings_table(c)
# Import data
print ('Find genes with identical hashes...')
data_analysis.find_seed2uniref_identical_mappings(c)
print ('Get genes from DIAMOND output...')
seed_data_util.load_diamond_search_results(c,args.diamond_out, 95.0, 5)
# Write changes and close database
print ('Saving database...',end='')
conn.commit()
conn.close()
print ('done.')
if __name__=='__main__':
main()
| [
"aekazakov@iseq.lbl.gov"
] | aekazakov@iseq.lbl.gov |
96ffb8e963ba918d639eb80f573bf228bf4480a2 | c0eb3d8b8f7f25ba822da61db89c8b80203ff3aa | /deps/edk2/BaseTools/Source/Python/GenFds/FdfParser.py | 83d3e1935e371e179e5dc73d615f5910060b5668 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | chanuei/LiBoot | 95ace2b8b239b706f51967cd7376aa5c68246ff8 | 096e8752e0a9145e6c70fe809b51ff87d7a90383 | refs/heads/master | 2021-01-14T09:42:15.816352 | 2015-08-21T16:08:03 | 2015-08-21T16:08:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194,375 | py | ## @file
# parse FDF file
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import re
import Fd
import Region
import Fv
import AprioriSection
import FfsInfStatement
import FfsFileStatement
import VerSection
import UiSection
import FvImageSection
import DataSection
import DepexSection
import CompressSection
import GuidSection
import Capsule
import CapsuleData
import Rule
import RuleComplexFile
import RuleSimpleFile
import EfiSection
import Vtf
import ComponentStatement
import OptionRom
import OptRomInfStatement
import OptRomFileStatement
from GenFdsGlobalVariable import GenFdsGlobalVariable
from Common.BuildToolError import *
from Common import EdkLogger
from Common.Misc import PathClass
from Common.String import NormPath
import Common.GlobalData as GlobalData
from Common.Expression import *
from Common import GlobalData
from Common.String import ReplaceMacro
from Common.Misc import tdict
import re
import Common.LongFilePathOs as os
from Common.LongFilePathSupport import OpenLongFilePath as open
##define T_CHAR_SPACE ' '
##define T_CHAR_NULL '\0'
##define T_CHAR_CR '\r'
##define T_CHAR_TAB '\t'
##define T_CHAR_LF '\n'
##define T_CHAR_SLASH '/'
##define T_CHAR_BACKSLASH '\\'
##define T_CHAR_DOUBLE_QUOTE '\"'
##define T_CHAR_SINGLE_QUOTE '\''
##define T_CHAR_STAR '*'
##define T_CHAR_HASH '#'
(T_CHAR_SPACE, T_CHAR_NULL, T_CHAR_CR, T_CHAR_TAB, T_CHAR_LF, T_CHAR_SLASH, \
T_CHAR_BACKSLASH, T_CHAR_DOUBLE_QUOTE, T_CHAR_SINGLE_QUOTE, T_CHAR_STAR, T_CHAR_HASH) = \
(' ', '\0', '\r', '\t', '\n', '/', '\\', '\"', '\'', '*', '#')
SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
RegionSizePattern = re.compile("\s*(?P<base>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<size>(?:0x|0X)?[a-fA-F0-9]+)\s*")
RegionSizeGuidPattern = re.compile("\s*(?P<base>\w+\.\w+)\s*\|\s*(?P<size>\w+\.\w+)\s*")
RegionOffsetPcdPattern = re.compile("\s*(?P<base>\w+\.\w+)\s*$")
ShortcutPcdPattern = re.compile("\s*\w+\s*=\s*(?P<value>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<name>\w+\.\w+)\s*")
IncludeFileList = []
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in IncludeFileList:
if Line >= Profile.InsertStartLineNumber and Line < Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
return (Profile.FileName, Line - Profile.InsertStartLineNumber + 1)
if Line >= Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
InsertedLines += Profile.InsertAdjust + len(Profile.FileLinesList)
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FDF Parser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.Message = Str
self.ToolName = 'FdfParser'
def __str__(self):
return self.Message
## The MACRO class that used to record macro value data when parsing include file
#
#
class MacroProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName, Line):
self.FileName = FileName
self.DefinedAtLine = Line
self.MacroName = None
self.MacroValue = None
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesList = fsock.readlines()
finally:
fsock.close()
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesList = fsock.readlines()
finally:
fsock.close()
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.PcdDict = {}
self.InfList = []
# ECC will use this Dict and List information
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FdNameNotSet = False
self.FvDict = {}
self.CapsuleDict = {}
self.VtfList = []
self.RuleDict = {}
self.OptRomDict = {}
self.FmpPayloadDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self.__Token = ""
self.__SkippedChars = ""
GlobalData.gFdfParser = self
# Used to section info
self.__CurSection = []
# Key: [section name, UI name, arch]
# Value: {MACRO_NAME : MACRO_VALUE}
self.__MacroDict = tdict(True, 3)
self.__PcdDict = {}
self.__WipeOffArea = []
if GenFdsGlobalVariable.WorkSpaceDir == '':
GenFdsGlobalVariable.WorkSpaceDir = os.getenv("WORKSPACE")
## __IsWhiteSpace() method
#
# Whether char at current FileBufferPos is whitespace
#
# @param self The object pointer
# @param Char The char to test
# @retval True The char is a kind of white space
# @retval False The char is NOT a kind of white space
#
def __IsWhiteSpace(self, Char):
if Char in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_SPACE, T_CHAR_TAB, T_CHAR_LF):
return True
else:
return False
## __SkipWhiteSpace() method
#
# Skip white spaces from current char, return number of chars skipped
#
# @param self The object pointer
# @retval Count The number of chars skipped
#
def __SkipWhiteSpace(self):
Count = 0
while not self.__EndOfFile():
Count += 1
if self.__CurrentChar() in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_LF, T_CHAR_SPACE, T_CHAR_TAB):
self.__SkippedChars += str(self.__CurrentChar())
self.__GetOneChar()
else:
Count = Count - 1
return Count
## __EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def __EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
elif self.CurrentLineNumber > NumberOfLines:
return True
else:
return False
## __EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def __EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
else:
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
#
def Rewind(self):
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
## __UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def __UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self.__CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## __GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def __GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## __CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def __CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## __NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def __NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
else:
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## __SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def __SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## __CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def __CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def __StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
self.Profile.FileLinesList[-1].append(' ')
def __ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in ('\r', '\n'):
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in ('\r', '\n'):
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def __GetMacroName(self):
if not self.__GetNextToken():
raise Warning("expected Macro name", self.FileName, self.CurrentLineNumber)
MacroName = self.__Token
NotFlag = False
if MacroName.startswith('!'):
NotFlag = True
MacroName = MacroName[1:].strip()
if not MacroName.startswith('$(') or not MacroName.endswith(')'):
raise Warning("Macro name expected(Please use '$(%(Token)s)' if '%(Token)s' is a macro.)" % {"Token" : MacroName},
self.FileName, self.CurrentLineNumber)
MacroName = MacroName[2:-1]
return MacroName, NotFlag
def __SetMacroValue(self, Macro, Value):
if not self.__CurSection:
return
MacroDict = {}
if not self.__MacroDict[self.__CurSection[0], self.__CurSection[1], self.__CurSection[2]]:
self.__MacroDict[self.__CurSection[0], self.__CurSection[1], self.__CurSection[2]] = MacroDict
else:
MacroDict = self.__MacroDict[self.__CurSection[0], self.__CurSection[1], self.__CurSection[2]]
MacroDict[Macro] = Value
def __GetMacroValue(self, Macro):
# Highest priority
if Macro in GlobalData.gCommandLineDefines:
return GlobalData.gCommandLineDefines[Macro]
if Macro in GlobalData.gGlobalDefines:
return GlobalData.gGlobalDefines[Macro]
if self.__CurSection:
MacroDict = self.__MacroDict[
self.__CurSection[0],
self.__CurSection[1],
self.__CurSection[2]
]
if MacroDict and Macro in MacroDict:
return MacroDict[Macro]
# Lowest priority
if Macro in GlobalData.gPlatformDefines:
return GlobalData.gPlatformDefines[Macro]
return None
def __SectionHeaderParser(self, Section):
# [Defines]
# [FD.UiName]: use dummy instead if UI name is optional
# [FV.UiName]
# [Capsule.UiName]
# [Rule]: don't take rule section into account, macro is not allowed in this section
# [VTF.arch.UiName, arch]
# [OptionRom.DriverName]
self.__CurSection = []
Section = Section.strip()[1:-1].upper().replace(' ', '').strip('.')
ItemList = Section.split('.')
Item = ItemList[0]
if Item == '' or Item == 'RULE':
return
if Item == 'DEFINES':
self.__CurSection = ['COMMON', 'COMMON', 'COMMON']
elif Item == 'VTF' and len(ItemList) == 3:
UiName = ItemList[2]
Pos = UiName.find(',')
if Pos != -1:
UiName = UiName[:Pos]
self.__CurSection = ['VTF', UiName, ItemList[1]]
elif len(ItemList) > 1:
self.__CurSection = [ItemList[0], ItemList[1], 'COMMON']
elif len(ItemList) > 0:
self.__CurSection = [ItemList[0], 'DUMMY', 'COMMON']
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self.__EndOfFile():
if self.__CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self.__CurrentChar() == T_CHAR_LF:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self.__CurrentChar() == T_CHAR_STAR and self.__NextChar() == T_CHAR_SLASH:
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
# check for // comment
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_SLASH and not self.__EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self.__CurrentChar() == T_CHAR_HASH and not self.__EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_STAR:
self.__SetCurrentCharValue( T_CHAR_SPACE)
self.__GetOneChar()
self.__SetCurrentCharValue( T_CHAR_SPACE)
self.__GetOneChar()
InComment = True
else:
self.__GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
while self.__GetNextToken():
if self.__Token == '!include':
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len('!include')
if not self.__GetNextToken():
raise Warning("expected include file name", self.FileName, self.CurrentLineNumber)
IncFileName = self.__Token
__IncludeMacros = {}
for Macro in ['WORKSPACE', 'ECP_SOURCE', 'EFI_SOURCE', 'EDK_SOURCE']:
MacroVal = self.__GetMacroValue(Macro)
if MacroVal:
__IncludeMacros[Macro] = MacroVal
try:
IncludedFile = NormPath(ReplaceMacro(IncFileName, __IncludeMacros, RaiseError=True))
except:
raise Warning("only these system environment variables are permitted to start the path of the included file: "
"$(WORKSPACE), $(ECP_SOURCE), $(EFI_SOURCE), $(EDK_SOURCE)",
self.FileName, self.CurrentLineNumber)
#
# First search the include file under the same directory as FDF file
#
IncludedFile1 = PathClass(IncludedFile, os.path.dirname(self.FileName))
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Then search the include file under the same directory as DSC file
#
PlatformDir = ''
if GenFdsGlobalVariable.ActivePlatform:
PlatformDir = GenFdsGlobalVariable.ActivePlatform.Dir
elif GlobalData.gActivePlatform:
PlatformDir = GlobalData.gActivePlatform.MetaFile.Dir
IncludedFile1 = PathClass(IncludedFile, PlatformDir)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Also search file under the WORKSPACE directory
#
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
raise Warning("The include file does not exist under below directories: \n%s\n%s\n%s\n"%(os.path.dirname(self.FileName), PlatformDir, GlobalData.gWorkspace),
self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(IncludedFile1.Path)
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self.__GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self.__CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
IncludeFileList.append(IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, '#')
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
self.Rewind()
def __GetIfListCurrentItemStat(self, IfList):
if len(IfList) == 0:
return True
for Item in IfList:
if Item[1] == False:
return False
return True
## PreprocessConditionalStatement() method
#
# Preprocess conditional statement.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
RegionLayoutLine = 0
ReplacedLine = -1
while self.__GetNextToken():
# Determine section name and the location dependent macro
if self.__GetIfListCurrentItemStat(IfList):
if self.__Token.startswith('['):
Header = self.__Token
if not self.__Token.endswith(']'):
self.__SkipToToken(']')
Header += self.__SkippedChars
if Header.find('$(') != -1:
raise Warning("macro cannot be used in section header", self.FileName, self.CurrentLineNumber)
self.__SectionHeaderParser(Header)
continue
# Replace macros except in RULE section or out of section
elif self.__CurSection and ReplacedLine != self.CurrentLineNumber:
ReplacedLine = self.CurrentLineNumber
self.__UndoToken()
CurLine = self.Profile.FileLinesList[ReplacedLine - 1]
PreIndex = 0
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1 and self.__Token not in ['!ifdef', '!ifndef', '!if', '!elseif']:
MacroName = CurLine[StartPos+2 : EndPos]
MacorValue = self.__GetMacroValue(MacroName)
if MacorValue != None:
CurLine = CurLine.replace('$(' + MacroName + ')', MacorValue, 1)
if MacorValue.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacorValue)
else:
PreIndex = EndPos + 1
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
self.Profile.FileLinesList[ReplacedLine - 1] = CurLine
continue
if self.__Token == 'DEFINE':
if self.__GetIfListCurrentItemStat(IfList):
if not self.__CurSection:
raise Warning("macro cannot be defined in Rule section or out of section", self.FileName, self.CurrentLineNumber)
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len('DEFINE')
if not self.__GetNextToken():
raise Warning("expected Macro name", self.FileName, self.CurrentLineNumber)
Macro = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
Value = self.__GetExpression()
self.__SetMacroValue(Macro, Value)
self.__WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token == 'SET':
if not self.__GetIfListCurrentItemStat(IfList):
continue
SetLine = self.CurrentLineNumber - 1
SetOffset = self.CurrentOffsetWithinLine - len('SET')
PcdPair = self.__GetNextPcdName()
PcdName = "%s.%s" % (PcdPair[1], PcdPair[0])
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
Value = self.__GetExpression()
Value = self.__EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
self.__PcdDict[PcdName] = Value
self.Profile.PcdDict[PcdPair] = Value
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
self.__WipeOffArea.append(((SetLine, SetOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token in ('!ifdef', '!ifndef', '!if'):
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
IfList.append([IfStartPos, None, None])
CondLabel = self.__Token
Expression = self.__GetExpression()
if CondLabel == '!if':
ConditionSatisfied = self.__EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
else:
ConditionSatisfied = self.__EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'in')
if CondLabel == '!ifndef':
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token in ('!elseif', '!else'):
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self.__WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self.__WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self.__Token == '!elseif':
Expression = self.__GetExpression()
ConditionSatisfied = self.__EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token == '!endif':
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
self.__WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
elif not IfList: # Don't use PCDs inside conditional directive
if self.CurrentLineNumber <= RegionLayoutLine:
# Don't try the same line twice
continue
SetPcd = ShortcutPcdPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if SetPcd:
self.__PcdDict[SetPcd.group('name')] = SetPcd.group('value')
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSize = RegionSizePattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if not RegionSize:
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSizeGuid = RegionSizeGuidPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber])
if not RegionSizeGuid:
RegionLayoutLine = self.CurrentLineNumber + 1
continue
self.__PcdDict[RegionSizeGuid.group('base')] = RegionSize.group('base')
self.__PcdDict[RegionSizeGuid.group('size')] = RegionSize.group('size')
RegionLayoutLine = self.CurrentLineNumber + 1
if IfList:
raise Warning("Missing !endif", self.FileName, self.CurrentLineNumber)
self.Rewind()
def __CollectMacroPcd(self):
MacroDict = {}
# PCD macro
MacroDict.update(GlobalData.gPlatformPcds)
MacroDict.update(self.__PcdDict)
# Lowest priority
MacroDict.update(GlobalData.gPlatformDefines)
if self.__CurSection:
# Defines macro
ScopeMacro = self.__MacroDict['COMMON', 'COMMON', 'COMMON']
if ScopeMacro:
MacroDict.update(ScopeMacro)
# Section macro
ScopeMacro = self.__MacroDict[
self.__CurSection[0],
self.__CurSection[1],
self.__CurSection[2]
]
if ScopeMacro:
MacroDict.update(ScopeMacro)
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
# Highest priority
return MacroDict
def __EvaluateConditional(self, Expression, Line, Op = None, Value = None):
FileLineTuple = GetRealFileLine(self.FileName, Line)
MacroPcdDict = self.__CollectMacroPcd()
if Op == 'eval':
try:
if Value:
return ValueExpression(Expression, MacroPcdDict)(True)
else:
return ValueExpression(Expression, MacroPcdDict)()
except WrnExpression, Excpt:
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self.FileName, ExtraData=self.__CurrentLine(),
Line=Line)
return Excpt.result
except Exception, Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
Info = GlobalData.gPlatformOtherPcds[Excpt.Pcd]
raise Warning("Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file (%s), and it is currently defined in this section:"
" %s, line #: %d." % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE'], Info[0], Info[1]),
*FileLineTuple)
else:
raise Warning("PCD (%s) is not defined in DSC file (%s)" % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE']),
*FileLineTuple)
else:
raise Warning(str(Excpt), *FileLineTuple)
else:
if Expression.startswith('$(') and Expression[-1] == ')':
Expression = Expression[2:-1]
return Expression in MacroPcdDict
## __IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def __IsToken(self, String, IgnoreCase = False):
self.__SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def __IsKeyword(self, KeyWord, IgnoreCase = False):
self.__SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(KeyWord.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(KeyWord)
if index == 0:
followingChar = self.__CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPERATOR_TUPLE:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
def __GetExpression(self):
Line = self.Profile.FileLinesList[self.CurrentLineNumber - 1]
Index = len(Line) - 1
while Line[Index] in ['\r', '\n']:
Index -= 1
ExpressionString = self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:Index+1]
self.CurrentOffsetWithinLine += len(ExpressionString)
ExpressionString = ExpressionString.strip()
return ExpressionString
## __GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def __GetNextWord(self):
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
TempChar = self.__CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self.__GetOneChar()
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self.__GetOneChar()
else:
break
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __GetNextToken() method
#
# Get next token unit before a seperator
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def __GetNextToken(self):
# Skip leading spaces, if exist.
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while StartLine == self.CurrentLineNumber:
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space and not in seperator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPERATOR_TUPLE:
self.__GetOneChar()
# if we happen to meet a seperator as the first char, we must proceed to get it.
# That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
self.__GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self.__Token = self.Profile.FileLinesList[StartLine-1][StartPos : EndPos]
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
def __GetNextOp(self):
# Skip leading spaces, if exist.
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space
if not str(TempChar).isspace():
self.__GetOneChar()
else:
break
else:
return False
if StartPos != self.CurrentOffsetWithinLine:
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
else:
return False
## __GetNextGuid() method
#
# Get next token unit before a seperator
# If found, the GUID string is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def __GetNextGuid(self):
if not self.__GetNextToken():
return False
p = re.compile('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')
if p.match(self.__Token) != None:
return True
else:
self.__UndoToken()
return False
## __UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def __UndoToken(self):
self.__UndoOneChar()
while self.__CurrentChar().isspace():
if not self.__UndoOneChar():
self.__GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space and not in seperator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPERATOR_TUPLE:
if not self.__UndoOneChar():
return
# if we happen to meet a seperator as the first char, we must proceed to get it.
# That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
return
else:
break
self.__GetOneChar()
## __HexDigit() method
#
# Whether char input is a Hex data bit
#
# @param self The object pointer
# @param TempChar The char to test
# @retval True The char is a Hex data bit
# @retval False The char is NOT a Hex data bit
#
def __HexDigit(self, TempChar):
if (TempChar >= 'a' and TempChar <= 'f') or (TempChar >= 'A' and TempChar <= 'F') \
or (TempChar >= '0' and TempChar <= '9'):
return True
else:
return False
def __IsHex(self, HexStr):
if not HexStr.upper().startswith("0X"):
return False
if len(self.__Token) <= 2:
return False
charList = [c for c in HexStr[2 : ] if not self.__HexDigit( c)]
if len(charList) == 0:
return True
else:
return False
## __GetNextHexNumber() method
#
# Get next HEX data before a seperator
# If found, the HEX data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def __GetNextHexNumber(self):
if not self.__GetNextToken():
return False
if self.__IsHex(self.__Token):
return True
else:
self.__UndoToken()
return False
## __GetNextDecimalNumber() method
#
# Get next decimal data before a seperator
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def __GetNextDecimalNumber(self):
if not self.__GetNextToken():
return False
if self.__Token.isdigit():
return True
else:
self.__UndoToken()
return False
## __GetNextPcdName() method
#
# Get next PCD token space C name and PCD C name pair before a seperator
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval Tuple PCD C name and PCD token space C name pair
#
def __GetNextPcdName(self):
if not self.__GetNextWord():
raise Warning("expected format of <PcdTokenSpaceCName>.<PcdCName>", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self.__Token
if not self.__IsToken( "."):
raise Warning("expected format of <PcdTokenSpaceCName>.<PcdCName>", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected format of <PcdTokenSpaceCName>.<PcdCName>", self.FileName, self.CurrentLineNumber)
pcdCName = self.__Token
return (pcdCName, pcdTokenSpaceCName)
## __GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def __GetStringData(self):
if self.__Token.startswith("\"") or self.__Token.startswith("L\""):
self.__UndoToken()
self.__SkipToToken("\"")
currentLineNumber = self.CurrentLineNumber
if not self.__SkipToToken("\""):
raise Warning("Missing Quote \" for String", self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning("Missing Quote \" for String", self.FileName, self.CurrentLineNumber)
self.__Token = self.__SkippedChars.rstrip('\"')
return True
elif self.__Token.startswith("\'") or self.__Token.startswith("L\'"):
self.__UndoToken()
self.__SkipToToken("\'")
currentLineNumber = self.CurrentLineNumber
if not self.__SkipToToken("\'"):
raise Warning("Missing Quote \' for String", self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning("Missing Quote \' for String", self.FileName, self.CurrentLineNumber)
self.__Token = self.__SkippedChars.rstrip('\'')
return True
else:
return False
## __SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self.__SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def __SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self.__SkippedChars = ""
while not self.__EndOfFile():
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self.__SkippedChars += String
return True
self.__SkippedChars += str(self.__CurrentChar())
self.__GetOneChar()
self.SetFileBufferPos( StartPos)
self.__SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## Preprocess() method
#
# Preprocess comment, conditional directive, include directive, replace macro.
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def Preprocess(self):
self.__StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self.__StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self.__StringToList()
for Pos in self.__WipeOffArea:
self.__ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self.__GetDefines():
pass
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.Preprocess()
while self.__GetFd():
pass
while self.__GetFv():
pass
while self.__GetFmp():
pass
while self.__GetCapsule():
pass
while self.__GetVtf():
pass
while self.__GetRule():
pass
while self.__GetOptionRom():
pass
except Warning, X:
self.__UndoToken()
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#'\n\tGot Token: \"%s\" from File %s\n' % (self.__Token, FileLineTuple[0]) + \
X.Message += ' near line %d, column %d: %s' \
% (FileLineTuple[1], self.CurrentOffsetWithinLine + 1, self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :].rstrip('\n').rstrip('\r'))
raise
## __GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def __GetDefines(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[DEFINES"):
if not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [DEFINES", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
while self.__GetNextWord():
# handle the SET statement
if self.__Token == 'SET':
self.__UndoToken()
self.__GetSetStatement(None)
continue
Macro = self.__Token
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.startswith('['):
raise Warning("expected MACRO value", self.FileName, self.CurrentLineNumber)
Value = self.__Token
return False
## __GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def __GetFd(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith('[FMPPAYLOAD.') and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [FD.]", self.FileName, self.CurrentLineNumber)
FdName = self.__GetUiName()
if FdName == "":
if len (self.Profile.FdDict) == 0:
FdName = GenFdsGlobalVariable.PlatformName
if FdName == "" and GlobalData.gActivePlatform:
FdName = GlobalData.gActivePlatform.PlatformName
self.Profile.FdNameNotSet = True
else:
raise Warning("expected FdName in [FD.] section", self.FileName, self.CurrentLineNumber)
self.CurrentFdName = FdName.upper()
if self.CurrentFdName in self.Profile.FdDict:
raise Warning("Unexpected the same FD name", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
FdObj = Fd.FD()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
if len (self.Profile.FdDict) > 1 and self.Profile.FdNameNotSet:
raise Warning("expected all FDs have their name", self.FileName, self.CurrentLineNumber)
Status = self.__GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error", self.FileName, self.CurrentLineNumber)
while self.__GetTokenStatements(FdObj):
pass
for Attr in ("BaseAddress", "Size", "ErasePolarity"):
if getattr(FdObj, Attr) == None:
self.__GetNextToken()
raise Warning("Keyword %s missing" % Attr, self.FileName, self.CurrentLineNumber)
if not FdObj.BlockSizeList:
FdObj.BlockSizeList.append((1, FdObj.Size, None))
self.__GetDefineStatements(FdObj)
self.__GetSetStatements(FdObj)
if not self.__GetRegionLayout(FdObj):
raise Warning("expected region layout", self.FileName, self.CurrentLineNumber)
while self.__GetRegionLayout(FdObj):
pass
return True
## __GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def __GetUiName(self):
Name = ""
if self.__GetNextWord():
Name = self.__Token
return Name
## __GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def __GetCreateFile(self, Obj):
if self.__IsKeyword( "CREATE_FILE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected file name", self.FileName, self.CurrentLineNumber)
FileName = self.__Token
Obj.CreateFileName = FileName
return True
## __GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
#
def __GetTokenStatements(self, Obj):
if self.__IsKeyword( "BaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex base address", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self.__Token
if self.__IsToken( "|"):
pcdPair = self.__GetNextPcdName()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.BaseAddress
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
return True
if self.__IsKeyword( "Size"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex size", self.FileName, self.CurrentLineNumber)
Size = self.__Token
if self.__IsToken( "|"):
pcdPair = self.__GetNextPcdName()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
Obj.Size = long(Size, 0)
return True
if self.__IsKeyword( "ErasePolarity"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Erase Polarity", self.FileName, self.CurrentLineNumber)
if self.__Token != "1" and self.__Token != "0":
raise Warning("expected 1 or 0 Erase Polarity", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self.__Token
return True
return self.__GetBlockStatements(Obj)
## __GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetAddressStatements(self, Obj):
if self.__IsKeyword("BsBaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected address", self.FileName, self.CurrentLineNumber)
BsAddress = long(self.__Token, 0)
Obj.BsBaseAddress = BsAddress
if self.__IsKeyword("RtBaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected address", self.FileName, self.CurrentLineNumber)
RtAddress = long(self.__Token, 0)
Obj.RtBaseAddress = RtAddress
## __GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
#
def __GetBlockStatements(self, Obj):
IsBlock = False
while self.__GetBlockStatement(Obj):
IsBlock = True
Item = Obj.BlockSizeList[-1]
if Item[0] == None or Item[1] == None:
raise Warning("expected block statement", self.FileName, self.CurrentLineNumber)
return IsBlock
## __GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetBlockStatement(self, Obj):
if not self.__IsKeyword( "BlockSize"):
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber() and not self.__GetNextDecimalNumber():
raise Warning("expected Hex or Integer block size", self.FileName, self.CurrentLineNumber)
BlockSize = self.__Token
BlockSizePcd = None
if self.__IsToken( "|"):
PcdPair = self.__GetNextPcdName()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
BlockSize = long(BlockSize, 0)
BlockNumber = None
if self.__IsKeyword( "NumBlocks"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected block numbers", self.FileName, self.CurrentLineNumber)
BlockNumber = long(self.__Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## __GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetDefineStatements(self, Obj):
while self.__GetDefineStatement( Obj):
pass
## __GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetDefineStatement(self, Obj):
if self.__IsKeyword("DEFINE"):
self.__GetNextToken()
Macro = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value", self.FileName, self.CurrentLineNumber)
Value = self.__Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## __GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetSetStatements(self, Obj):
while self.__GetSetStatement(Obj):
pass
## __GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetSetStatement(self, Obj):
if self.__IsKeyword("SET"):
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
Value = self.__GetExpression()
Value = self.__EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
if Obj:
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## __CalcRegionExpr(self)
#
# Calculate expression for offset or size of a region
#
# @return: None if invalid expression
# Calculated number if successfully
#
def __CalcRegionExpr(self):
StartPos = self.GetFileBufferPos()
Expr = ''
PairCount = 0
while not self.__EndOfFile():
CurCh = self.__CurrentChar()
if CurCh == '(':
PairCount += 1
elif CurCh == ')':
PairCount -= 1
if CurCh in '|\r\n' and PairCount == 0:
break
Expr += CurCh
self.__GetOneChar()
try:
return long(
ValueExpression(Expr,
self.__CollectMacroPcd()
)(True),0)
except Exception:
self.SetFileBufferPos(StartPos)
return None
## __GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param Fd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetRegionLayout(self, Fd):
Offset = self.__CalcRegionExpr()
if Offset == None:
return False
RegionObj = Region.Region()
RegionObj.Offset = Offset
Fd.RegionList.append(RegionObj)
if not self.__IsToken( "|"):
raise Warning("expected '|'", self.FileName, self.CurrentLineNumber)
Size = self.__CalcRegionExpr()
if Size == None:
raise Warning("expected Region Size", self.FileName, self.CurrentLineNumber)
RegionObj.Size = Size
if not self.__GetNextWord():
return True
if not self.__Token in ("SET", "FV", "FILE", "DATA", "CAPSULE"):
#
# If next token is a word which is not a valid FV type, it might be part of [PcdOffset[|PcdSize]]
# Or it might be next region's offset described by an expression which starts with a PCD.
# PcdOffset[|PcdSize] or OffsetPcdExpression|Size
#
self.__UndoToken()
IsRegionPcd = (RegionSizeGuidPattern.match(self.__CurrentLine()[self.CurrentOffsetWithinLine:]) or
RegionOffsetPcdPattern.match(self.__CurrentLine()[self.CurrentOffsetWithinLine:]))
if IsRegionPcd:
RegionObj.PcdOffset = self.__GetNextPcdName()
self.Profile.PcdDict[RegionObj.PcdOffset] = "0x%08X" % (RegionObj.Offset + long(Fd.BaseAddress, 0))
self.__PcdDict['%s.%s' % (RegionObj.PcdOffset[1], RegionObj.PcdOffset[0])] = "0x%x" % RegionObj.Offset
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self.__IsToken( "|"):
RegionObj.PcdSize = self.__GetNextPcdName()
self.Profile.PcdDict[RegionObj.PcdSize] = "0x%08X" % RegionObj.Size
self.__PcdDict['%s.%s' % (RegionObj.PcdSize[1], RegionObj.PcdSize[0])] = "0x%x" % RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self.__GetNextWord():
return True
if self.__Token == "SET":
self.__UndoToken()
self.__GetSetStatements( RegionObj)
if not self.__GetNextWord():
return True
elif self.__Token == "FV":
self.__UndoToken()
self.__GetRegionFvType( RegionObj)
elif self.__Token == "CAPSULE":
self.__UndoToken()
self.__GetRegionCapType( RegionObj)
elif self.__Token == "FILE":
self.__UndoToken()
self.__GetRegionFileType( RegionObj)
elif self.__Token == "DATA":
self.__UndoToken()
self.__GetRegionDataType( RegionObj)
else:
self.__UndoToken()
if self.__GetRegionLayout(Fd):
return True
raise Warning("A valid region type was not found. "
"Valid types are [SET, FV, CAPSULE, FILE, DATA]. This error occurred",
self.FileName, self.CurrentLineNumber)
return True
## __GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionFvType(self, RegionObj):
if not self.__IsKeyword( "FV"):
raise Warning("expected Keyword 'FV'", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FV"
RegionObj.RegionDataList.append(self.__Token)
while self.__IsKeyword( "FV"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionCapType(self, RegionObj):
if not self.__IsKeyword("CAPSULE"):
raise Warning("expected Keyword 'CAPSULE'", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self.__Token)
while self.__IsKeyword("CAPSULE"):
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionFileType(self, RegionObj):
if not self.__IsKeyword( "FILE"):
raise Warning("expected Keyword 'FILE'", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append( self.__Token)
while self.__IsKeyword( "FILE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FILE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionDataType(self, RegionObj):
if not self.__IsKeyword( "DATA"):
raise Warning("expected Region Data type", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self.__Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + ","
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + ","
# byte value array
if len (self.__Token) <= 4:
while self.__IsToken(","):
if not self.__GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append( DataString)
while self.__IsKeyword( "DATA"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self.__Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + ","
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + ","
# byte value array
if len (self.__Token) <= 4:
while self.__IsToken(","):
if not self.__GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
RegionObj.RegionDataList.append( DataString)
## __GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def __GetFv(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[FV."):
if not S.startswith('[FMPPAYLOAD.') and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
FvName = self.__GetUiName()
self.CurrentFvName = FvName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
FvObj = Fv.FV()
FvObj.UiFvName = self.CurrentFvName
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self.__GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error", self.FileName, self.CurrentLineNumber)
self.__GetDefineStatements(FvObj)
self.__GetAddressStatements(FvObj)
FvObj.FvExtEntryTypeValue = []
FvObj.FvExtEntryType = []
FvObj.FvExtEntryData = []
while True:
self.__GetSetStatements(FvObj)
if not (self.__GetBlockStatement(FvObj) or self.__GetFvBaseAddress(FvObj) or
self.__GetFvForceRebase(FvObj) or self.__GetFvAlignment(FvObj) or
self.__GetFvAttributes(FvObj) or self.__GetFvNameGuid(FvObj) or
self.__GetFvExtEntryStatement(FvObj) or self.__GetFvNameString(FvObj)):
break
if FvObj.FvNameString == 'TRUE' and not FvObj.FvNameGuid:
raise Warning("FvNameString found but FvNameGuid was not found", self.FileName, self.CurrentLineNumber)
self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
while True:
isInf = self.__GetInfStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
isFile = self.__GetFileStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
if not isInf and not isFile:
break
return True
## __GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def __GetFvAlignment(self, Obj):
if not self.__IsKeyword( "FvAlignment"):
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected alignment value", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"):
raise Warning("Unknown alignment value '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self.__Token
return True
## __GetFvBaseAddress() method
#
# Get BaseAddress for FV
#
# @param self The object pointer
# @param Obj for whom FvBaseAddress is got
# @retval True Successfully find a FvBaseAddress statement
# @retval False Not able to find a FvBaseAddress statement
#
def __GetFvBaseAddress(self, Obj):
if not self.__IsKeyword("FvBaseAddress"):
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV base address value", self.FileName, self.CurrentLineNumber)
IsValidBaseAddrValue = re.compile('^0[x|X][0-9a-fA-F]+')
if not IsValidBaseAddrValue.match(self.__Token.upper()):
raise Warning("Unknown FV base address value '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
Obj.FvBaseAddress = self.__Token
return True
## __GetFvForceRebase() method
#
# Get FvForceRebase for FV
#
# @param self The object pointer
# @param Obj for whom FvForceRebase is got
# @retval True Successfully find a FvForceRebase statement
# @retval False Not able to find a FvForceRebase statement
#
def __GetFvForceRebase(self, Obj):
if not self.__IsKeyword("FvForceRebase"):
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FvForceRebase value", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ["TRUE", "FALSE", "0", "0X0", "0X00", "1", "0X1", "0X01"]:
raise Warning("Unknown FvForceRebase value '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if self.__Token.upper() in ["TRUE", "1", "0X1", "0X01"]:
Obj.FvForceRebase = True
elif self.__Token.upper() in ["FALSE", "0", "0X0", "0X00"]:
Obj.FvForceRebase = False
else:
Obj.FvForceRebase = None
return True
## __GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def __GetFvAttributes(self, FvObj):
IsWordToken = False
while self.__GetNextWord():
IsWordToken = True
name = self.__Token
if name not in ("ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE", "WEAK_ALIGNMENT"):
self.__UndoToken()
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self.__Token
return IsWordToken
## __GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def __GetFvNameGuid(self, FvObj):
if not self.__IsKeyword( "FvNameGuid"):
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
raise Warning("expected FV GUID value", self.FileName, self.CurrentLineNumber)
FvObj.FvNameGuid = self.__Token
return True
def __GetFvNameString(self, FvObj):
if not self.__IsKeyword( "FvNameString"):
return False
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token not in ('TRUE', 'FALSE'):
raise Warning("expected TRUE or FALSE for FvNameString", self.FileName, self.CurrentLineNumber)
FvObj.FvNameString = self.__Token
return True
def __GetFvExtEntryStatement(self, FvObj):
if not self.__IsKeyword( "FV_EXT_ENTRY"):
return False
if not self.__IsKeyword ("TYPE"):
raise Warning("expected 'TYPE'", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber() and not self.__GetNextDecimalNumber():
raise Warning("expected Hex FV extension entry type value At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryTypeValue += [self.__Token]
if not self.__IsToken( "{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
if not self.__IsKeyword ("FILE") and not self.__IsKeyword ("DATA"):
raise Warning("expected 'FILE' or 'DATA'", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryType += [self.__Token]
if self.__Token == 'DATA':
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString = self.__Token
DataString += ","
while self.__IsToken(","):
if not self.__GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
FvObj.FvExtEntryData += [DataString]
if self.__Token == 'FILE':
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV Extension Entry file path At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryData += [self.__Token]
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
return True
## __GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def __GetAprioriSection(self, FvObj, MacroDict = {}):
if not self.__IsKeyword( "APRIORI"):
return False
if not self.__IsKeyword("PEI") and not self.__IsKeyword("DXE"):
raise Warning("expected Apriori file type", self.FileName, self.CurrentLineNumber)
AprType = self.__Token
if not self.__IsToken( "{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
AprSectionObj = AprioriSection.AprioriSection()
AprSectionObj.AprioriType = AprType
self.__GetDefineStatements(AprSectionObj)
MacroDict.update(AprSectionObj.DefineVarDict)
while True:
IsInf = self.__GetInfStatement( AprSectionObj, MacroDict = MacroDict)
IsFile = self.__GetFileStatement( AprSectionObj)
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
## __GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def __GetInfStatement(self, Obj, ForCapsule = False, MacroDict = {}):
if not self.__IsKeyword( "INF"):
return False
ffsInf = FfsInfStatement.FfsInfStatement()
self.__GetInfOptions( ffsInf)
if not self.__GetNextToken():
raise Warning("expected INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self.__Token
ffsInf.CurrentLineNum = self.CurrentLineNumber
ffsInf.CurrentLineContent = self.__CurrentLine()
#Replace $(SAPCE) with real space
ffsInf.InfFileName = ffsInf.InfFileName.replace('$(SPACE)', ' ')
if ffsInf.InfFileName.replace('$(WORKSPACE)', '').find('$') == -1:
#do case sensitive check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if not ffsInf.InfFileName in self.Profile.InfList:
self.Profile.InfList.append(ffsInf.InfFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if self.__IsToken('|'):
if self.__IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self.__IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## __GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def __GetInfOptions(self, FfsInfObj):
if self.__IsKeyword("FILE_GUID"):
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
raise Warning("expected GUID value", self.FileName, self.CurrentLineNumber)
FfsInfObj.OverrideGuid = self.__Token
if self.__IsKeyword( "RuleOverride"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Rule name", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self.__Token
if self.__IsKeyword( "VERSION"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Version", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
FfsInfObj.Version = self.__Token
if self.__IsKeyword( "UI"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected UI name", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
FfsInfObj.Ui = self.__Token
if self.__IsKeyword( "USE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self.__Token
if self.__GetNextToken():
p = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\))')
if p.match(self.__Token) and p.match(self.__Token).span()[1] == len(self.__Token):
FfsInfObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
return
else:
self.__UndoToken()
return
while self.__GetNextToken():
if not p.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
## __GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def __GetFileStatement(self, Obj, ForCapsule = False, MacroDict = {}):
if not self.__IsKeyword( "FILE"):
return False
if not self.__GetNextWord():
raise Warning("expected FFS type", self.FileName, self.CurrentLineNumber)
if ForCapsule and self.__Token == 'DATA':
self.__UndoToken()
self.__UndoToken()
return False
FfsFileObj = FfsFileStatement.FileStatement()
FfsFileObj.FvFileType = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
if not self.__GetNextWord():
raise Warning("expected File GUID", self.FileName, self.CurrentLineNumber)
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
FfsFileObj.NameGuid = self.__Token
self.__GetFilePart( FfsFileObj, MacroDict.copy())
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## __FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param self The object pointer
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
def __FileCouldHaveRelocFlag (self, FileType):
if FileType in ('SEC', 'PEI_CORE', 'PEIM', 'PEI_DXE_COMBO'):
return True
else:
return False
## __SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param self The object pointer
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
def __SectionCouldHaveRelocFlag (self, SectionType):
if SectionType in ('TE', 'PE32'):
return True
else:
return False
## __GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
# @param MacroDict dictionary used to replace macro
#
def __GetFilePart(self, FfsFileObj, MacroDict = {}):
self.__GetFileOpts( FfsFileObj)
if not self.__IsToken("{"):
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
if self.__Token == 'RELOCS_STRIPPED':
FfsFileObj.KeepReloc = False
else:
FfsFileObj.KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken("{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name or section data", self.FileName, self.CurrentLineNumber)
if self.__Token == "FV":
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self.__Token
elif self.__Token == "FD":
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FD name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self.__Token
elif self.__Token in ("DEFINE", "APRIORI", "SECTION"):
self.__UndoToken()
self.__GetSectionData( FfsFileObj, MacroDict)
else:
FfsFileObj.CurrentLineNum = self.CurrentLineNumber
FfsFileObj.CurrentLineContent = self.__CurrentLine()
FfsFileObj.FileName = self.__Token.replace('$(SPACE)', ' ')
self.__VerifyFile(FfsFileObj.FileName)
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
## __GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def __GetFileOpts(self, FfsFileObj):
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if Pattern.match(self.__Token):
FfsFileObj.KeyStringList.append(self.__Token)
if self.__IsToken(","):
while self.__GetNextToken():
if not Pattern.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
else:
self.__UndoToken()
if self.__IsKeyword( "FIXED", True):
FfsFileObj.Fixed = True
if self.__IsKeyword( "CHECKSUM", True):
FfsFileObj.CheckSum = True
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self.__Token == "Auto":
FfsFileObj.Alignment = self.__Token
## __GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def __GetAlignment(self):
if self.__IsKeyword( "Align", True):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected alignment value", self.FileName, self.CurrentLineNumber)
return True
return False
## __GetFilePart() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
# @param MacroDict dictionary used to replace macro
#
def __GetSectionData(self, FfsFileObj, MacroDict = {}):
Dict = {}
Dict.update(MacroDict)
self.__GetDefineStatements(FfsFileObj)
Dict.update(FfsFileObj.DefineVarDict)
self.__GetAprioriSection(FfsFileObj, Dict.copy())
self.__GetAprioriSection(FfsFileObj, Dict.copy())
while True:
IsLeafSection = self.__GetLeafSection(FfsFileObj, Dict)
IsEncapSection = self.__GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## __GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetLeafSection(self, Obj, MacroDict = {}):
OldPos = self.GetFileBufferPos()
if not self.__IsKeyword( "SECTION"):
if len(Obj.SectionList) == 0:
raise Warning("expected SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
BuildNum = None
if self.__IsKeyword( "BUILD_NUM"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number value", self.FileName, self.CurrentLineNumber)
BuildNum = self.__Token
if self.__IsKeyword( "VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected version", self.FileName, self.CurrentLineNumber)
VerSectionObj = VerSection.VerSection()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self.__GetStringData():
VerSectionObj.StringData = self.__Token
else:
VerSectionObj.FileName = self.__Token
Obj.SectionList.append(VerSectionObj)
elif self.__IsKeyword( "UI"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected UI", self.FileName, self.CurrentLineNumber)
UiSectionObj = UiSection.UiSection()
UiSectionObj.Alignment = AlignValue
if self.__GetStringData():
UiSectionObj.StringData = self.__Token
else:
UiSectionObj.FileName = self.__Token
Obj.SectionList.append(UiSectionObj)
elif self.__IsKeyword( "FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name or FV file path", self.FileName, self.CurrentLineNumber)
FvName = self.__Token
FvObj = None
if self.__IsToken( "{"):
FvObj = Fv.FV()
FvObj.UiFvName = FvName.upper()
self.__GetDefineStatements(FvObj)
MacroDict.update(FvObj.DefineVarDict)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetAprioriSection(FvObj, MacroDict.copy())
self.__GetAprioriSection(FvObj, MacroDict.copy())
while True:
IsInf = self.__GetInfStatement(FvObj, MacroDict.copy())
IsFile = self.__GetFileStatement(FvObj, MacroDict.copy())
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj = FvImageSection.FvImageSection()
FvImageSectionObj.Alignment = AlignValue
if FvObj != None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName.upper()
FvImageSectionObj.FvFileName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self.__IsKeyword("PEI_DEPEX_EXP") or self.__IsKeyword("DXE_DEPEX_EXP") or self.__IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = DepexSection.DepexSection()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken( "}"):
raise Warning("expected Depex expression ending '}'", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self.__SkippedChars.rstrip('}')
Obj.SectionList.append(DepexSectionObj)
else:
if not self.__GetNextWord():
raise Warning("expected section type", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self.__Token == "COMPRESS" or self.__Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self.__Token not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "SUBTYPE_GUID", "SMM_DEPEX"):
raise Warning("Unknown section type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self.__Token == 'PE32') and (not self.__Token == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = DataSection.DataSection()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self.__Token
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(Obj.FvFileType) and self.__SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self.__Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag%d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self.__IsToken("="):
if not self.__GetNextToken():
raise Warning("expected section file path", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self.__Token
self.__VerifyFile(DataSectionObj.SectFileName)
else:
if not self.__GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## __VerifyFile
#
# Check if file exists or not:
# If current phase if GenFds, the file must exist;
# If current phase is AutoGen and the file is not in $(OUTPUT_DIRECTORY), the file must exist
# @param FileName: File path to be verified.
#
def __VerifyFile(self, FileName):
if FileName.replace('$(WORKSPACE)', '').find('$') != -1:
return
if not GlobalData.gAutoGenPhase or not self.__GetMacroValue("OUTPUT_DIRECTORY") in FileName:
ErrorCode, ErrorInfo = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
## __GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetCglSection(self, Obj, AlignValue = None):
if self.__IsKeyword( "COMPRESS"):
type = "PI_STD"
if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
type = self.__Token
if not self.__IsToken("{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection.CompressSection()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self.__GetLeafSection(CompressSectionObj)
IsEncapSection = self.__GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
# else:
# raise Warning("Compress type not known")
return True
elif self.__IsKeyword( "GUIDED"):
GuidValue = None
if self.__GetNextGuid():
GuidValue = self.__Token
AttribDict = self.__GetGuidAttrib()
if not self.__IsToken("{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection.GuidSection()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Recursive sections...
while True:
IsLeafSection = self.__GetLeafSection(GuidSectionObj)
IsEncapSection = self.__GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## __GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def __GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = "NONE"
AttribDict["AUTH_STATUS_VALID"] = "NONE"
AttribDict["EXTRA_HEADER_SIZE"] = -1
while self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID") \
or self.__IsKeyword("EXTRA_HEADER_SIZE"):
AttribKey = self.__Token
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected TRUE(1)/FALSE(0)/Number", self.FileName, self.CurrentLineNumber)
elif AttribKey == "EXTRA_HEADER_SIZE":
Base = 10
if self.__Token[0:2].upper() == "0X":
Base = 16
try:
AttribDict[AttribKey] = int(self.__Token, Base)
continue
except ValueError:
raise Warning("expected Number", self.FileName, self.CurrentLineNumber)
elif self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self.__Token
return AttribDict
## __GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self.__IsKeyword( "SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning("expected SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self.__GetAlignment():
if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if not self.__GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
def __GetFmp(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if not S.startswith("[FMPPAYLOAD."):
if not S.startswith("[CAPSULE.") and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [FmpPayload.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
self.__SkipToToken("[FMPPAYLOAD.", True)
FmpUiName = self.__GetUiName().upper()
if FmpUiName in self.Profile.FmpPayloadDict:
raise Warning("Duplicated FMP UI name found: %s" % FmpUiName, self.FileName, self.CurrentLineNumber)
FmpData = CapsuleData.CapsulePayload()
FmpData.UiName = FmpUiName
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("The FMP payload section is empty!", self.FileName, self.CurrentLineNumber)
FmpKeyList = ['IMAGE_HEADER_INIT_VERSION', 'IMAGE_TYPE_ID', 'IMAGE_INDEX', 'HARDWARE_INSTANCE']
while self.__Token in FmpKeyList:
Name = self.__Token
FmpKeyList.remove(Name)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if Name == 'IMAGE_TYPE_ID':
if not self.__GetNextGuid():
raise Warning("expected GUID value for IMAGE_TYPE_ID", self.FileName, self.CurrentLineNumber)
FmpData.ImageTypeId = self.__Token
else:
if not self.__GetNextToken():
raise Warning("expected value of %s" % Name, self.FileName, self.CurrentLineNumber)
Value = self.__Token
if Name == 'IMAGE_HEADER_INIT_VERSION':
FmpData.Version = Value
elif Name == 'IMAGE_INDEX':
FmpData.ImageIndex = Value
elif Name == 'HARDWARE_INSTANCE':
FmpData.HardwareInstance = Value
if not self.__GetNextToken():
break
else:
self.__UndoToken()
if FmpKeyList:
raise Warning("Missing keywords %s in FMP payload section" % ', '.join(FmpKeyList), self.FileName, self.CurrentLineNumber)
ImageFile = self.__ParseRawFileStatement()
if not ImageFile:
raise Warning("Missing image file in FMP payload section", self.FileName, self.CurrentLineNumber)
FmpData.ImageFile = ImageFile
VendorCodeFile = self.__ParseRawFileStatement()
if VendorCodeFile:
FmpData.VendorCodeFile = VendorCodeFile
self.Profile.FmpPayloadDict[FmpUiName] = FmpData
return True
## __GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def __GetCapsule(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[CAPSULE."):
if not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [Capsule.]", self.FileName, self.CurrentLineNumber)
CapsuleObj = Capsule.Capsule()
CapsuleName = self.__GetUiName()
if not CapsuleName:
raise Warning("expected capsule name", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("CREATE_FILE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected file name", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self.__Token
self.__GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleDict[CapsuleObj.UiCapsuleName] = CapsuleObj
return True
## __GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def __GetCapsuleStatements(self, Obj):
self.__GetCapsuleTokens(Obj)
self.__GetDefineStatements(Obj)
self.__GetSetStatements(Obj)
self.__GetCapsuleData(Obj)
## __GetCapsuleTokens() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def __GetCapsuleTokens(self, Obj):
if not self.__GetNextToken():
return False
while self.__Token in ("CAPSULE_GUID", "CAPSULE_HEADER_SIZE", "CAPSULE_FLAGS", "OEM_CAPSULE_FLAGS", "CAPSULE_HEADER_INIT_VERSION"):
Name = self.__Token.strip()
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value", self.FileName, self.CurrentLineNumber)
if Name == 'CAPSULE_FLAGS':
if not self.__Token in ("PersistAcrossReset", "PopulateSystemTable", "InitiateReset"):
raise Warning("expected PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value = self.__Token.strip()
while self.__IsToken(","):
Value += ','
if not self.__GetNextToken():
raise Warning("expected value", self.FileName, self.CurrentLineNumber)
if not self.__Token in ("PersistAcrossReset", "PopulateSystemTable", "InitiateReset"):
raise Warning("expected PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value += self.__Token.strip()
elif Name == 'OEM_CAPSULE_FLAGS':
Value = self.__Token.strip()
if not Value.upper().startswith('0X'):
raise Warning("expected hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
try:
Value = int(Value, 0)
except ValueError:
raise Warning("expected hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
if not 0x0000 <= Value <= 0xFFFF:
raise Warning("expected hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
Value = self.__Token.strip()
else:
Value = self.__Token.strip()
Obj.TokensDict[Name] = Value
if not self.__GetNextToken():
return False
self.__UndoToken()
## __GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def __GetCapsuleData(self, Obj):
while True:
IsInf = self.__GetInfStatement(Obj, True)
IsFile = self.__GetFileStatement(Obj, True)
IsFv = self.__GetFvStatement(Obj)
IsFd = self.__GetFdStatement(Obj)
IsAnyFile = self.__GetAnyFileStatement(Obj)
IsAfile = self.__GetAfileStatement(Obj)
IsFmp = self.__GetFmpStatement(Obj)
if not (IsInf or IsFile or IsFv or IsFd or IsAnyFile or IsAfile or IsFmp):
break
## __GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def __GetFvStatement(self, CapsuleObj):
if not self.__IsKeyword("FV"):
return False
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in self.Profile.FvDict.keys():
raise Warning("FV name does not exist", self.FileName, self.CurrentLineNumber)
CapsuleFv = CapsuleData.CapsuleFv()
CapsuleFv.FvName = self.__Token
CapsuleObj.CapsuleDataList.append(CapsuleFv)
return True
## __GetFdStatement() method
#
# Get FD for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FD is got
# @retval True Successfully find a FD statement
# @retval False Not able to find a FD statement
#
def __GetFdStatement(self, CapsuleObj):
if not self.__IsKeyword("FD"):
return False
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FD name", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in self.Profile.FdDict.keys():
raise Warning("FD name does not exist", self.FileName, self.CurrentLineNumber)
CapsuleFd = CapsuleData.CapsuleFd()
CapsuleFd.FdName = self.__Token
CapsuleObj.CapsuleDataList.append(CapsuleFd)
return True
def __GetFmpStatement(self, CapsuleObj):
if not self.__IsKeyword("FMP"):
return False
if not self.__IsKeyword("PAYLOAD"):
self.__UndoToken()
return False
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected payload name after FMP PAYLOAD =", self.FileName, self.CurrentLineNumber)
Payload = self.__Token.upper()
if Payload not in self.Profile.FmpPayloadDict:
raise Warning("This FMP Payload does not exist: %s" % self.__Token, self.FileName, self.CurrentLineNumber)
CapsuleObj.FmpPayloadList.append(self.Profile.FmpPayloadDict[Payload])
return True
def __ParseRawFileStatement(self):
if not self.__IsKeyword("FILE"):
return None
if not self.__IsKeyword("DATA"):
self.__UndoToken()
return None
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name", self.FileName, self.CurrentLineNumber)
AnyFileName = self.__Token
AnyFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(AnyFileName)
if not os.path.exists(AnyFileName):
raise Warning("File %s not exists"%AnyFileName, self.FileName, self.CurrentLineNumber)
return AnyFileName
## __GetAnyFileStatement() method
#
# Get AnyFile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom AnyFile is got
# @retval True Successfully find a Anyfile statement
# @retval False Not able to find a AnyFile statement
#
def __GetAnyFileStatement(self, CapsuleObj):
AnyFileName = self.__ParseRawFileStatement()
if not AnyFileName:
return False
CapsuleAnyFile = CapsuleData.CapsuleAnyFile()
CapsuleAnyFile.FileName = AnyFileName
CapsuleObj.CapsuleDataList.append(CapsuleAnyFile)
return True
## __GetAfileStatement() method
#
# Get Afile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom Afile is got
# @retval True Successfully find a Afile statement
# @retval False Not able to find a Afile statement
#
def __GetAfileStatement(self, CapsuleObj):
if not self.__IsKeyword("APPEND"):
return False
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Afile name", self.FileName, self.CurrentLineNumber)
AfileName = self.__Token
AfileBaseName = os.path.basename(AfileName)
if os.path.splitext(AfileBaseName)[1] not in [".bin",".BIN",".Bin",".dat",".DAT",".Dat",".data",".DATA",".Data"]:
raise Warning('invalid binary file type, should be one of "bin","BIN","Bin","dat","DAT","Dat","data","DATA","Data"', \
self.FileName, self.CurrentLineNumber)
if not os.path.isabs(AfileName):
AfileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(AfileName)
self.__VerifyFile(AfileName)
else:
if not os.path.exists(AfileName):
raise Warning('%s does not exist' % AfileName, self.FileName, self.CurrentLineNumber)
else:
pass
CapsuleAfile = CapsuleData.CapsuleAfile()
CapsuleAfile.FileName = AfileName
CapsuleObj.CapsuleDataList.append(CapsuleAfile)
return True
## __GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def __GetRule(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[RULE."):
if not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [Rule.]", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken("."):
raise Warning("expected '.'", self.FileName, self.CurrentLineNumber)
Arch = self.__SkippedChars.rstrip(".")
if Arch.upper() not in ("IA32", "X64", "IPF", "EBC", "ARM", "AARCH64", "COMMON"):
raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
ModuleType = self.__GetModuleType()
TemplateName = ""
if self.__IsToken("."):
if not self.__GetNextWord():
raise Warning("expected template name", self.FileName, self.CurrentLineNumber)
TemplateName = self.__Token
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
RuleObj = self.__GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '' :
self.Profile.RuleDict['RULE' + \
'.' + \
Arch.upper() + \
'.' + \
ModuleType.upper() ] = RuleObj
else :
self.Profile.RuleDict['RULE' + \
'.' + \
Arch.upper() + \
'.' + \
ModuleType.upper() + \
'.' + \
TemplateName.upper() ] = RuleObj
# self.Profile.RuleList.append(rule)
return True
## __GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def __GetModuleType(self):
if not self.__GetNextWord():
raise Warning("expected Module type", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("SEC", "PEI_CORE", "PEIM", "DXE_CORE", \
"DXE_DRIVER", "DXE_SAL_DRIVER", \
"DXE_SMM_DRIVER", "DXE_RUNTIME_DRIVER", \
"UEFI_DRIVER", "UEFI_APPLICATION", "USER_DEFINED", "DEFAULT", "BASE", \
"SECURITY_CORE", "COMBINED_PEIM_DRIVER", "PIC_PEIM", "RELOCATABLE_PEIM", \
"PE32_PEIM", "BS_DRIVER", "RT_DRIVER", "SAL_RT_DRIVER", "APPLICATION", "ACPITABLE", "SMM_CORE"):
raise Warning("Unknown Module type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
return self.__Token
## __GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def __GetFileExtension(self):
if not self.__IsToken("."):
raise Warning("expected '.'", self.FileName, self.CurrentLineNumber)
Ext = ""
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z][a-zA-Z0-9]*)')
if Pattern.match(self.__Token):
Ext = self.__Token
return '.' + Ext
else:
raise Warning("Unknown file extension '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
else:
raise Warning("expected file extension", self.FileName, self.CurrentLineNumber)
## __GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def __GetRuleFileStatements(self):
if not self.__IsKeyword("FILE"):
raise Warning("expected FILE", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected FFS type", self.FileName, self.CurrentLineNumber)
Type = self.__Token.strip().upper()
if Type not in ("RAW", "FREEFORM", "SEC", "PEI_CORE", "PEIM",\
"PEI_DXE_COMBO", "DRIVER", "DXE_CORE", "APPLICATION", "FV_IMAGE", "SMM", "SMM_CORE"):
raise Warning("Unknown FV type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__IsKeyword("$(NAMED_GUID)"):
if not self.__GetNextWord():
raise Warning("expected $(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
NameGuid = self.__Token
KeepReloc = None
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(Type):
if self.__Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if Pattern.match(self.__Token):
KeyStringList.append(self.__Token)
if self.__IsToken(","):
while self.__GetNextToken():
if not Pattern.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
else:
self.__UndoToken()
Fixed = False
if self.__IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self.__IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self.__Token == "Auto":
AlignValue = self.__Token
if self.__IsToken("{"):
# Complex file rule expected
Rule = RuleComplexFile.RuleComplexFile()
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(Rule)
IsLeaf = self.__GetEfiSection(Rule)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken("}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
return Rule
else:
# Simple file rule expected
if not self.__GetNextWord():
raise Warning("expected leaf section type", self.FileName, self.CurrentLineNumber)
SectionName = self.__Token
if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "PEI_DEPEX", "VERSION", "SUBTYPE_GUID", "SMM_DEPEX"):
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("Fixed", True):
Fixed = True
if self.__IsKeyword("CheckSum", True):
CheckSum = True
SectAlignment = ""
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SectAlignment = self.__Token
Ext = None
if self.__IsToken('|'):
Ext = self.__GetFileExtension()
elif not self.__GetNextToken():
raise Warning("expected File name", self.FileName, self.CurrentLineNumber)
Rule = RuleSimpleFile.RuleSimpleFile()
Rule.SectionType = SectionName
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.SectAlignment = SectAlignment
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
Rule.FileExtension = Ext
Rule.FileName = self.__Token
return Rule
## __GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self.__GetNextWord():
return False
SectionName = self.__Token
if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
self.__UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection.FvImageSection()
if self.__IsKeyword("FV_IMAGE"):
pass
if self.__IsToken( "{"):
FvObj = Fv.FV()
self.__GetDefineStatements(FvObj)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetAprioriSection(FvObj)
self.__GetAprioriSection(FvObj)
while True:
IsInf = self.__GetInfStatement(FvObj)
IsFile = self.__GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
if not self.__IsKeyword("FV"):
raise Warning("expected 'FV'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.FvFileType = self.__Token
if self.__GetAlignment():
if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Alignment = self.__Token
if self.__IsToken('|'):
FvImageSectionObj.FvFileExtension = self.__GetFileExtension()
elif self.__GetNextToken():
if self.__Token not in ("}", "COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
FvImageSectionObj.FvFileName = self.__Token
else:
self.__UndoToken()
else:
raise Warning("expected FV file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(FvImageSectionObj)
return True
EfiSectionObj = EfiSection.EfiSection()
EfiSectionObj.SectionType = SectionName
if not self.__GetNextToken():
raise Warning("expected file type", self.FileName, self.CurrentLineNumber)
if self.__Token == "STRING":
if not self.__RuleSectionCouldHaveString(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have string data%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken('='):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Quoted String", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
EfiSectionObj.StringData = self.__Token
if self.__IsKeyword("BUILD_NUM"):
if not self.__RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self.__Token
else:
EfiSectionObj.FileType = self.__Token
self.__CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
if self.__IsKeyword("Optional"):
if not self.__RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
raise Warning("%s section could NOT be optional%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
EfiSectionObj.Optional = True
if self.__IsKeyword("BUILD_NUM"):
if not self.__RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self.__Token
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.Alignment = self.__Token
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
if self.__Token == 'RELOCS_STRIPPED':
EfiSectionObj.KeepReloc = False
else:
EfiSectionObj.KeepReloc = True
if Obj.KeepReloc != None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
raise Warning("Section type %s has reloc strip flag conflict with Rule" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
else:
raise Warning("Section type %s could not have reloc strip flag" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
if self.__IsToken('|'):
EfiSectionObj.FileExtension = self.__GetFileExtension()
elif self.__GetNextToken():
if self.__Token not in ("}", "COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
if self.__Token.startswith('PCD'):
self.__UndoToken()
self.__GetNextWord()
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
EfiSectionObj.FileName = self.__Token
else:
self.__UndoToken()
else:
raise Warning("expected section file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(EfiSectionObj)
return True
## __RuleSectionCouldBeOptional() method
#
# Get whether a section could be optional
#
# @param self The object pointer
# @param SectionType The section type to check
# @retval True section could be optional
# @retval False section never optional
#
def __RuleSectionCouldBeOptional(self, SectionType):
if SectionType in ("DXE_DEPEX", "UI", "VERSION", "PEI_DEPEX", "RAW", "SMM_DEPEX"):
return True
else:
return False
## __RuleSectionCouldHaveBuildNum() method
#
# Get whether a section could have build number information
#
# @param self The object pointer
# @param SectionType The section type to check
# @retval True section could have build number information
# @retval False section never have build number information
#
def __RuleSectionCouldHaveBuildNum(self, SectionType):
if SectionType in ("VERSION"):
return True
else:
return False
## __RuleSectionCouldHaveString() method
#
# Get whether a section could have string
#
# @param self The object pointer
# @param SectionType The section type to check
# @retval True section could have string
# @retval False section never have string
#
def __RuleSectionCouldHaveString(self, SectionType):
if SectionType in ("UI", "VERSION"):
return True
else:
return False
## __CheckRuleSectionFileType() method
#
# Get whether a section matches a file type
#
# @param self The object pointer
# @param SectionType The section type to check
# @param FileType The file type to check
#
def __CheckRuleSectionFileType(self, SectionType, FileType):
if SectionType == "COMPAT16":
if FileType not in ("COMPAT16", "SEC_COMPAT16"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "PE32":
if FileType not in ("PE32", "SEC_PE32"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "PIC":
if FileType not in ("PIC", "PIC"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "TE":
if FileType not in ("TE", "SEC_TE"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "RAW":
if FileType not in ("BIN", "SEC_BIN", "RAW", "ASL", "ACPI"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "DXE_DEPEX" or SectionType == "SMM_DEPEX":
if FileType not in ("DXE_DEPEX", "SEC_DXE_DEPEX", "SMM_DEPEX"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "UI":
if FileType not in ("UI", "SEC_UI"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "VERSION":
if FileType not in ("VERSION", "SEC_VERSION"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "PEI_DEPEX":
if FileType not in ("PEI_DEPEX", "SEC_PEI_DEPEX"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "GUID":
if FileType not in ("PE32", "SEC_GUID"):
raise Warning("Incorrect section file type '%s'" % FileType, self.FileName, self.CurrentLineNumber)
## __GetRuleEncapsulationSection() method
#
# Get encapsulation section for Rule
#
# @param self The object pointer
# @param Rule for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetRuleEncapsulationSection(self, Rule):
if self.__IsKeyword( "COMPRESS"):
Type = "PI_STD"
if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
Type = self.__Token
if not self.__IsToken("{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection.CompressSection()
CompressSectionObj.CompType = Type
# Recursive sections...
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(CompressSectionObj)
IsLeaf = self.__GetEfiSection(CompressSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
Rule.SectionList.append(CompressSectionObj)
return True
elif self.__IsKeyword( "GUIDED"):
GuidValue = None
if self.__GetNextGuid():
GuidValue = self.__Token
if self.__IsKeyword( "$(NAMED_GUID)"):
GuidValue = self.__Token
AttribDict = self.__GetGuidAttrib()
if not self.__IsToken("{"):
raise Warning("expected '{'", self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection.GuidSection()
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Efi sections...
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(GuidSectionObj)
IsLeaf = self.__GetEfiSection(GuidSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken( "}"):
raise Warning("expected '}'", self.FileName, self.CurrentLineNumber)
Rule.SectionList.append(GuidSectionObj)
return True
return False
## __GetVtf() method
#
# Get VTF section contents and store its data into VTF list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a VTF
# @retval False Not able to find a VTF
#
def __GetVtf(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[VTF."):
if not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[VTF.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [VTF.]", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken("."):
raise Warning("expected '.'", self.FileName, self.CurrentLineNumber)
Arch = self.__SkippedChars.rstrip(".").upper()
if Arch not in ("IA32", "X64", "IPF", "ARM", "AARCH64"):
raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected VTF name", self.FileName, self.CurrentLineNumber)
Name = self.__Token.upper()
VtfObj = Vtf.Vtf()
VtfObj.UiName = Name
VtfObj.KeyArch = Arch
if self.__IsToken(","):
if not self.__GetNextWord():
raise Warning("expected Arch list", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("IA32", "X64", "IPF", "ARM", "AARCH64"):
raise Warning("Unknown Arch '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
VtfObj.ArchList = self.__Token.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("IA32_RST_BIN"):
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Reset file", self.FileName, self.CurrentLineNumber)
VtfObj.ResetBin = self.__Token
if VtfObj.ResetBin.replace('$(WORKSPACE)', '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(VtfObj.ResetBin), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
while self.__GetComponentStatement(VtfObj):
pass
self.Profile.VtfList.append(VtfObj)
return True
## __GetComponentStatement() method
#
# Get components in VTF
#
# @param self The object pointer
# @param VtfObj for whom component is got
# @retval True Successfully find a component
# @retval False Not able to find a component
#
def __GetComponentStatement(self, VtfObj):
if not self.__IsKeyword("COMP_NAME"):
return False
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected Component Name", self.FileName, self.CurrentLineNumber)
CompStatementObj = ComponentStatement.ComponentStatement()
CompStatementObj.CompName = self.__Token
if not self.__IsKeyword("COMP_LOC"):
raise Warning("expected COMP_LOC", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompLoc = ""
if self.__GetNextWord():
CompStatementObj.CompLoc = self.__Token
if self.__IsToken('|'):
if not self.__GetNextWord():
raise Warning("Expected Region Name", self.FileName, self.CurrentLineNumber)
if self.__Token not in ("F", "N", "S"): #, "H", "L", "PH", "PL"): not support
raise Warning("Unknown location type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
CompStatementObj.FilePos = self.__Token
else:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if not self.__IsKeyword("COMP_TYPE"):
raise Warning("expected COMP_TYPE", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component type", self.FileName, self.CurrentLineNumber)
if self.__Token not in ("FIT", "PAL_B", "PAL_A", "OEM"):
if not self.__Token.startswith("0x") or len(self.__Token) < 3 or len(self.__Token) > 4 or \
not self.__HexDigit(self.__Token[2]) or not self.__HexDigit(self.__Token[-1]):
raise Warning("Unknown location type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
CompStatementObj.CompType = self.__Token
if not self.__IsKeyword("COMP_VER"):
raise Warning("expected COMP_VER", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component version", self.FileName, self.CurrentLineNumber)
Pattern = re.compile('-$|[0-9a-fA-F]{1,2}\.[0-9a-fA-F]{1,2}$', re.DOTALL)
if Pattern.match(self.__Token) == None:
raise Warning("Unknown version format '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
CompStatementObj.CompVer = self.__Token
if not self.__IsKeyword("COMP_CS"):
raise Warning("expected COMP_CS", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component CS", self.FileName, self.CurrentLineNumber)
if self.__Token not in ("1", "0"):
raise Warning("Unknown Component CS '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
CompStatementObj.CompCs = self.__Token
if not self.__IsKeyword("COMP_BIN"):
raise Warning("expected COMP_BIN", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component file", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompBin = self.__Token
if CompStatementObj.CompBin != '-' and CompStatementObj.CompBin.replace('$(WORKSPACE)', '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(CompStatementObj.CompBin), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if not self.__IsKeyword("COMP_SYM"):
raise Warning("expected COMP_SYM", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Component symbol file", self.FileName, self.CurrentLineNumber)
CompStatementObj.CompSym = self.__Token
if CompStatementObj.CompSym != '-' and CompStatementObj.CompSym.replace('$(WORKSPACE)', '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(CompStatementObj.CompSym), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if not self.__IsKeyword("COMP_SIZE"):
raise Warning("expected COMP_SIZE", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if self.__IsToken("-"):
CompStatementObj.CompSize = self.__Token
elif self.__GetNextDecimalNumber():
CompStatementObj.CompSize = self.__Token
elif self.__GetNextHexNumber():
CompStatementObj.CompSize = self.__Token
else:
raise Warning("Unknown size '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
VtfObj.ComponentStatementList.append(CompStatementObj)
return True
## __GetOptionRom() method
#
# Get OptionROM section contents and store its data into OptionROM list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a OptionROM
# @retval False Not able to find a OptionROM
#
def __GetOptionRom(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
if not self.__IsToken("[OptionRom.", True):
raise Warning("Unknown Keyword '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
OptRomName = self.__GetUiName()
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
OptRomObj = OptionRom.OPTIONROM()
OptRomObj.DriverName = OptRomName
self.Profile.OptRomDict[OptRomName] = OptRomObj
while True:
isInf = self.__GetOptRomInfStatement(OptRomObj)
isFile = self.__GetOptRomFileStatement(OptRomObj)
if not isInf and not isFile:
break
return True
## __GetOptRomInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def __GetOptRomInfStatement(self, Obj):
if not self.__IsKeyword( "INF"):
return False
ffsInf = OptRomInfStatement.OptRomInfStatement()
self.__GetInfOptions( ffsInf)
if not self.__GetNextToken():
raise Warning("expected INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self.__Token
if ffsInf.InfFileName.replace('$(WORKSPACE)', '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if not ffsInf.InfFileName in self.Profile.InfList:
self.Profile.InfList.append(ffsInf.InfFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
self.__GetOptRomOverrides (ffsInf)
Obj.FfsList.append(ffsInf)
return True
## __GetOptRomOverrides() method
#
# Get overrides for OptROM INF & FILE
#
# @param self The object pointer
# @param FfsInfObj for whom overrides is got
#
def __GetOptRomOverrides(self, Obj):
if self.__IsToken('{'):
Overrides = OptionRom.OverrideAttribs()
while True:
if self.__IsKeyword( "PCI_VENDOR_ID"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex vendor id", self.FileName, self.CurrentLineNumber)
Overrides.PciVendorId = self.__Token
continue
if self.__IsKeyword( "PCI_CLASS_CODE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex class code", self.FileName, self.CurrentLineNumber)
Overrides.PciClassCode = self.__Token
continue
if self.__IsKeyword( "PCI_DEVICE_ID"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex device id", self.FileName, self.CurrentLineNumber)
Overrides.PciDeviceId = self.__Token
continue
if self.__IsKeyword( "PCI_REVISION"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex revision", self.FileName, self.CurrentLineNumber)
Overrides.PciRevision = self.__Token
continue
if self.__IsKeyword( "PCI_COMPRESS"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected TRUE/FALSE for compress", self.FileName, self.CurrentLineNumber)
Overrides.NeedCompress = self.__Token.upper() == 'TRUE'
continue
if self.__IsToken( "}"):
break
else:
EdkLogger.error("FdfParser", FORMAT_INVALID, File=self.FileName, Line=self.CurrentLineNumber)
Obj.OverrideAttribs = Overrides
## __GetOptRomFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def __GetOptRomFileStatement(self, Obj):
if not self.__IsKeyword( "FILE"):
return False
FfsFileObj = OptRomFileStatement.OptRomFileStatement()
if not self.__IsKeyword("EFI") and not self.__IsKeyword("BIN"):
raise Warning("expected Binary type (EFI/BIN)", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileType = self.__Token
if not self.__GetNextToken():
raise Warning("expected File path", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileName = self.__Token
if FfsFileObj.FileName.replace('$(WORKSPACE)', '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(FfsFileObj.FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if FfsFileObj.FileType == 'EFI':
self.__GetOptRomOverrides(FfsFileObj)
Obj.FfsList.append(FfsFileObj)
return True
## __GetCapInFd() method
#
# Get Cap list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval CapList List of Capsule in FD
#
def __GetCapInFd (self, FdName):
CapList = []
if FdName.upper() in self.Profile.FdDict.keys():
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'CAPSULE':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".cap"):
continue
if elementRegionData != None and elementRegionData.upper() not in CapList:
CapList.append(elementRegionData.upper())
return CapList
## __GetReferencedFdCapTuple() method
#
# Get FV and FD list referenced by a capsule image
#
# @param self The object pointer
# @param CapObj Capsule section to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def __GetReferencedFdCapTuple(self, CapObj, RefFdList = [], RefFvList = []):
for CapsuleDataObj in CapObj.CapsuleDataList :
if hasattr(CapsuleDataObj, 'FvName') and CapsuleDataObj.FvName != None and CapsuleDataObj.FvName.upper() not in RefFvList:
RefFvList.append (CapsuleDataObj.FvName.upper())
elif hasattr(CapsuleDataObj, 'FdName') and CapsuleDataObj.FdName != None and CapsuleDataObj.FdName.upper() not in RefFdList:
RefFdList.append (CapsuleDataObj.FdName.upper())
elif CapsuleDataObj.Ffs != None:
if isinstance(CapsuleDataObj.Ffs, FfsFileStatement.FileStatement):
if CapsuleDataObj.Ffs.FvName != None and CapsuleDataObj.Ffs.FvName.upper() not in RefFvList:
RefFvList.append(CapsuleDataObj.Ffs.FvName.upper())
elif CapsuleDataObj.Ffs.FdName != None and CapsuleDataObj.Ffs.FdName.upper() not in RefFdList:
RefFdList.append(CapsuleDataObj.Ffs.FdName.upper())
else:
self.__GetReferencedFdFvTupleFromSection(CapsuleDataObj.Ffs, RefFdList, RefFvList)
## __GetFvInFd() method
#
# Get FV list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval FvList list of FV in FD
#
def __GetFvInFd (self, FdName):
FvList = []
if FdName.upper() in self.Profile.FdDict.keys():
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'FV':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".fv"):
continue
if elementRegionData != None and elementRegionData.upper() not in FvList:
FvList.append(elementRegionData.upper())
return FvList
## __GetReferencedFdFvTuple() method
#
# Get FD and FV list referenced by a FFS file
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def __GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
for FfsObj in FvObj.FfsList:
if isinstance(FfsObj, FfsFileStatement.FileStatement):
if FfsObj.FvName != None and FfsObj.FvName.upper() not in RefFvList:
RefFvList.append(FfsObj.FvName.upper())
elif FfsObj.FdName != None and FfsObj.FdName.upper() not in RefFdList:
RefFdList.append(FfsObj.FdName.upper())
else:
self.__GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
## __GetReferencedFdFvTupleFromSection() method
#
# Get FD and FV list referenced by a FFS section
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param FdList referenced FD by section
# @param FvList referenced FV by section
#
def __GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
SectionStack = []
SectionStack.extend(FfsFile.SectionList)
while SectionStack != []:
SectionObj = SectionStack.pop()
if isinstance(SectionObj, FvImageSection.FvImageSection):
if SectionObj.FvName != None and SectionObj.FvName.upper() not in FvList:
FvList.append(SectionObj.FvName.upper())
if SectionObj.Fv != None and SectionObj.Fv.UiFvName != None and SectionObj.Fv.UiFvName.upper() not in FvList:
FvList.append(SectionObj.Fv.UiFvName.upper())
self.__GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
if isinstance(SectionObj, CompressSection.CompressSection) or isinstance(SectionObj, GuidSection.GuidSection):
SectionStack.extend(SectionObj.SectionList)
## CycleReferenceCheck() method
#
# Check whether cycle reference exists in FDF
#
# @param self The object pointer
# @retval True cycle reference exists
# @retval False Not exists cycle reference
#
def CycleReferenceCheck(self):
#
# Check the cycle between FV and FD image
#
MaxLength = len (self.Profile.FvDict)
for FvName in self.Profile.FvDict.keys():
LogStr = "\nCycle Reference Checking for FV: %s\n" % FvName
RefFvStack = []
RefFvStack.append(FvName)
FdAnalyzedList = []
Index = 0
while RefFvStack != [] and Index < MaxLength:
Index = Index + 1
FvNameFromStack = RefFvStack.pop()
if FvNameFromStack.upper() in self.Profile.FvDict.keys():
FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
else:
continue
RefFdList = []
RefFvList = []
self.__GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "FV %s contains FD %s\n" % (FvNameFromStack, RefFdName)
FvInFdList = self.__GetFvInFd(RefFdName)
if FvInFdList != []:
for FvNameInFd in FvInFdList:
LogStr += "FD %s contains FV %s\n" % (RefFdName,FvNameInFd)
if FvNameInFd not in RefFvStack:
RefFvStack.append(FvNameInFd)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
FdAnalyzedList.append(RefFdName)
for RefFvName in RefFvList:
LogStr += "FV %s contains FV %s\n" % (FvNameFromStack, RefFvName)
if RefFvName not in RefFvStack:
RefFvStack.append(RefFvName)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
#
# Check the cycle between Capsule and FD image
#
MaxLength = len (self.Profile.CapsuleDict)
for CapName in self.Profile.CapsuleDict.keys():
#
# Capsule image to be checked.
#
LogStr = "\n\n\nCycle Reference Checking for Capsule: %s\n" % CapName
RefCapStack = []
RefCapStack.append(CapName)
FdAnalyzedList = []
FvAnalyzedList = []
Index = 0
while RefCapStack != [] and Index < MaxLength:
Index = Index + 1
CapNameFromStack = RefCapStack.pop()
if CapNameFromStack.upper() in self.Profile.CapsuleDict.keys():
CapObj = self.Profile.CapsuleDict[CapNameFromStack.upper()]
else:
continue
RefFvList = []
RefFdList = []
self.__GetReferencedFdCapTuple(CapObj, RefFdList, RefFvList)
FvListLength = 0
FdListLength = 0
while FvListLength < len (RefFvList) or FdListLength < len (RefFdList):
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "Capsule %s contains FD %s\n" % (CapNameFromStack, RefFdName)
CapInFdList = self.__GetCapInFd(RefFdName)
if CapInFdList != []:
for CapNameInFd in CapInFdList:
LogStr += "FD %s contains Capsule %s\n" % (RefFdName,CapNameInFd)
if CapNameInFd not in RefCapStack:
RefCapStack.append(CapNameInFd)
if CapName in RefCapStack or CapNameFromStack in RefCapStack:
EdkLogger.info(LogStr)
return True
FvInFdList = self.__GetFvInFd(RefFdName)
if FvInFdList != []:
for FvNameInFd in FvInFdList:
LogStr += "FD %s contains FV %s\n" % (RefFdName,FvNameInFd)
if FvNameInFd not in RefFvList:
RefFvList.append(FvNameInFd)
FdAnalyzedList.append(RefFdName)
#
# the number of the parsed FV and FD image
#
FvListLength = len (RefFvList)
FdListLength = len (RefFdList)
for RefFvName in RefFvList:
if RefFvName in FvAnalyzedList:
continue
LogStr += "Capsule %s contains FV %s\n" % (CapNameFromStack, RefFvName)
if RefFvName.upper() in self.Profile.FvDict.keys():
FvObj = self.Profile.FvDict[RefFvName.upper()]
else:
continue
self.__GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
FvAnalyzedList.append(RefFvName)
return False
if __name__ == "__main__":
import sys
try:
test_file = sys.argv[1]
except IndexError, v:
print "Usage: %s filename" % sys.argv[0]
sys.exit(1)
parser = FdfParser(test_file)
try:
parser.ParseFile()
parser.CycleReferenceCheck()
except Warning, X:
print str(X)
else:
print "Success!"
| [
"maheshkhanwalkar@gmail.com"
] | maheshkhanwalkar@gmail.com |
a026a57ad5eda7c9a365e0e346865752f3c49fce | 87020f242ca9087dfef226f5114ad9691313fddb | /search.py | 140f03c40c7ebf21714cbdb772ed0453ca6b5452 | [
"ISC"
] | permissive | tslight/dsa | 18ee40cf57aec88a6d97f172df6e49f296676050 | c59ff7c4e25908086ac7589d1a763fc311bdad50 | refs/heads/main | 2021-12-02T10:07:51.058416 | 2021-08-27T16:15:24 | 2021-08-27T16:15:24 | 398,352,193 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | from argparse import ArgumentParser
import sys
import timeit
def get_args():
parser = ArgumentParser(
description='Search for an item in a list. ' +
'Defaults to a sorted list of 10 million elements ' +
'and searches for the last element.'
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-b', '--binary', action='store_true',
help="Use binary search.")
group.add_argument('-r', '--recursive', action='store_true',
help="Use recursive binary search.")
group.add_argument('-s', '--simple', action='store_true',
help="Use simple search.")
parser.add_argument("-m", "--max", type=int, default='10_000_000',
help="Max number in range.")
parser.add_argument("-i", "--increment", type=int, default=1,
help="Number to increment each element in list by.")
parser.add_argument("-n", "--number", type=int, default=None,
help="Number to search for in list.")
return parser.parse_args()
def simple(mylist, item):
count = 0
for e in mylist:
count += 1
if e == item:
return (True, count)
return (False, count)
def binary(mylist, item):
low = 0
count = 0
high = len(mylist) - 1
while low <= high:
mid = int((low + high) / 2)
guess = mylist[mid]
count += 1
if guess == item:
return (True, count)
elif guess > item:
high = mid - 1
else:
low = mid + 1
return (False, count)
def recursive_binary(arr, element, count):
count += 1
low = 0
high = len(arr) - 1
mid = int((low + high) / 2)
if element == arr[mid]:
return (True, count)
elif high == 0:
return (False, count)
elif element > arr[mid]:
return recursive_binary(arr[mid + 1:], element, count)
elif element < arr[mid]:
return recursive_binary(arr[:mid - 1], element, count)
def search():
args = get_args()
if args.number is None:
args.number = args.max - 1
mylist = list(range(0, args.max, args.increment))
if args.binary:
found, count = binary(mylist, args.number)
search_type = "Binary"
elif args.recursive:
found, count = recursive_binary(mylist, args.number, 0)
search_type = "Recursive Binary"
else:
found, count = simple(mylist, args.number)
search_type = "Simple"
print(
"\nType: {} Search\n".format(search_type) +
"Found: {}\n".format(str(found)) +
"Number: {:,}\n".format(args.number) +
"Length: {:,}\n".format(len(mylist)) +
"Steps: {:,}".format(count)
)
def main():
speed = timeit.timeit(
"search()", setup="from __main__ import search", number=1
)
print("Time: {} seconds\n".format(round(speed, 8)))
if __name__ == '__main__':
main()
| [
"tslight@pm.com"
] | tslight@pm.com |
2ed787bbddbaa83abe0480f699b22342aef9d349 | 9355b7af3d03dfd7feeaf002b0a5c99b7d9678f0 | /cloudpathlib/local/__init__.py | c6a1ba1e7ca427e6d8b62c37eeb68dc51a596663 | [
"MIT"
] | permissive | rluta/cloudpathlib | 64780e574e533a8c128285d10ecfb79c5be30f19 | 80f7afdf85dfb4f3ad0406944a5d3cf28c727435 | refs/heads/master | 2023-06-10T01:46:32.670933 | 2021-07-05T18:29:47 | 2021-07-05T18:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | """This module implements "Local" classes that mimic their associated `cloudpathlib` non-local
counterparts but use the local filesystem in place of cloud storage. They can be used as drop-in
replacements, with the intent that you can use them as mock or monkepatch substitutes in your
tests. See ["Testing code that uses cloudpathlib"
](https://cloudpathlib.drivendata.org/testing_mocked_cloudpathlib/) for usage examples.
"""
from .implementations import (
local_azure_blob_implementation,
LocalAzureBlobClient,
LocalAzureBlobPath,
local_gs_implementation,
LocalGSClient,
LocalGSPath,
local_s3_implementation,
LocalS3Client,
LocalS3Path,
)
from .localclient import LocalClient
from .localpath import LocalPath
__all__ = [
"local_azure_blob_implementation",
"LocalAzureBlobClient",
"LocalAzureBlobPath",
"LocalClient",
"local_gs_implementation",
"LocalGSClient",
"LocalGSPath",
"LocalPath",
"local_s3_implementation",
"LocalS3Client",
"LocalS3Path",
]
| [
"noreply@github.com"
] | noreply@github.com |
59f898c24b7c31d0cbe76ef107a8a875644260fd | e4c6acac07427baf82b44c17198dab5b78b44fa7 | /warn/warn.py | 2e374b944fce69f2ab0ee357d7bfd5128807795a | [
"MIT"
] | permissive | scopatz/warn | bf1b33320031857233ee525fc56957a511eb2d37 | a528bca192856f3cbf81e5fb133b143cb247a789 | refs/heads/master | 2020-04-06T05:30:32.463854 | 2016-08-29T00:23:41 | 2016-08-29T00:23:41 | 67,258,506 | 0 | 0 | null | 2016-09-02T22:20:43 | 2016-09-02T22:20:43 | null | UTF-8 | Python | false | false | 8,594 | py | """ A module that replace the built-ins warning module wit a more flexible
interface.
"""
import warnings
import sys
import re
from warnings import (_is_internal_frame, _next_external_frame,
_filters_mutated, showwarning, defaultaction,
onceregistry)
wfmu = _filters_mutated
warnings._filters_version = 1
def _filters_mutated():
warnings._filters_version += 1
warnings._filters_mutated = _filters_mutated
def new_warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None,
emit_module=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != warnings._filters_version:
registry.clear()
registry['version'] = warnings._filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in warnings.filters:
item = _get_proxy_filter(item)
if len(item) == 5:
action, msg, cat, mod, ln = item
emod = None
else:
action, msg, cat, mod, ln, emod = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(emod is None or emod.match(emit_module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
elif action == "custom":
pass
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
def _get_stack_frame(stacklevel):
stacklevel = stacklevel + 1
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
return frame
def new_warn(message, category=None, stacklevel=1, emitstacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
####################
### Get category ###
####################
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
if not (isinstance(category, type) and issubclass(category, Warning)):
raise TypeError("category must be a Warning subclass, "
"not '{:s}'".format(type(category).__name__))
# Get context information
try:
frame = _get_stack_frame(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = frame.f_globals
lineno = frame.f_lineno
try:
eframe = _get_stack_frame(emitstacklevel)
except ValueError:
eglobals = sys.__dict__
else:
eglobals = eframe.f_globals
if '__name__' in eglobals:
emodule = eglobals['__name__']
else:
emodule = "<string>"
####################
### Get Filename ###
####################
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
####################
### Get Filename ###
####################
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc"):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
new_warn_explicit(message, category, filename, lineno, module, registry,
globals, emit_module=emodule)
_proxy_map = {}
re_matchall = re.compile('', re.I)
class ProxyWarning(Warning): pass # NOQA
def _set_proxy_filter(warningstuple):
"""set up a proxy that store too long warnings in a separate map"""
if len(warningstuple) > 5:
key = len(_proxy_map)+1
_proxy_map[key] = warningstuple
return ('custom', re_matchall, ProxyWarning, re_matchall, key)
else:
return warningstuple
def _get_proxy_filter(warningstuple):
"""set up a proxy that store too long warnings in a separate map"""
if warningstuple[2] == ProxyWarning:
return _proxy_map[warningstuple[4]]
else:
return warningstuple
def newfilterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False, emodule=""):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
if emodule:
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno, re.compile(emodule, ))
else:
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
warnings.filters.append(_set_proxy_filter(item))
else:
warnings.filters.insert(0, _set_proxy_filter(item))
warnings._filters_mutated()
class Patch:
def __init__(self):
self._enter = 0
def __call__(self):
if not self._enter:
self._warn_explit = warnings.warn_explicit
self._warn = warnings.warn
self._filterwarnings = warnings.filterwarnings
warnings.warn_explicit = new_warn_explicit
warnings.warn = new_warn
warnings.filterwarnings = newfilterwarnings
self._enter += 1
def __enter__(self):
return self.__call__()
def __exit__(self):
self._enter -= 1
if self._enter:
return
else:
pass
# restore original stat
patch = Patch()
| [
"bussonniermatthias@gmail.com"
] | bussonniermatthias@gmail.com |
dfbe4793da8123e0dc4a52d5d76275ed55a387fe | e7d598e7b56f6cc2ddfb193d201b2bc9c06c6856 | /rehab/models/pathology.py | be73eb29ca10117785b4bce3f0fab04430b434d4 | [
"ISC"
] | permissive | RehabForAll/RehabProtocol | c13ed7c9facc899ffedadf846e0b4637ae52d8ef | f8a0a846ba3afedcd17671b077da5665310b7137 | refs/heads/main | 2023-08-03T17:02:27.846893 | 2021-09-29T17:39:46 | 2021-09-29T17:39:46 | 384,837,176 | 0 | 0 | null | 2021-09-29T17:39:47 | 2021-07-11T01:24:35 | Python | UTF-8 | Python | false | false | 947 | py | import json
import uuid
from dataclasses import dataclass, asdict, field
from functools import cached_property
from typing import List
@dataclass
class Pathology:
"""
Represents a pathology, which is initialized with a name and description.
"""
name: str
description: str
phases: List[str] = field(default_factory=list)
id: str = field(default=str(uuid.uuid4()))
def to_json(self):
return json.dumps(asdict(self))
@classmethod
def from_json_file(cls, file_name: str):
if not file_name.endswith('.json'):
file_name += '.json'
with open(file_name, 'r') as in_file:
json_file = json.load(in_file)
# parse file
return cls(**json_file)
def add_phase(self, phase):
self.phases.append(phase)
def remove_phase(self, phase_number):
self.phases.pop(phase_number - 1)
def get_phases(self):
return self.phases
| [
"gabe.cohen@hey.com"
] | gabe.cohen@hey.com |
9cf43674f5ae4b67b40e064a3e7906ca50fe7a6a | 99df94cb8fbbec6311a26ffd6146f98656a2e1c4 | /app/core/proyectos/migrations/0001_initial.py | a3c70612bcb7ae32362830615f90e83db1370394 | [] | no_license | luis114/Python | e9e36251c5b2cccaec499ac30df97286129bbae9 | 27879e6974a5e58d66ab9db4cfbb2bca49bbd963 | refs/heads/master | 2022-09-25T05:38:09.358209 | 2020-06-04T00:53:39 | 2020-06-04T00:53:39 | 269,226,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,797 | py | # Generated by Django 2.2.12 on 2020-05-14 00:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('userdata', '0003_auto_20200513_1531'),
]
operations = [
migrations.CreateModel(
name='CateProyecto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idCategoriaProyecto', models.CharField(max_length=20, verbose_name='Identificador')),
('lenguaje', models.CharField(max_length=200, verbose_name='Lenguaje')),
('motorBd', models.CharField(max_length=200, verbose_name='Motor De Base De Datos')),
('arquitectura', models.CharField(max_length=200, verbose_name='Arquitectura')),
],
options={
'verbose_name': 'Datos de Categoria',
'verbose_name_plural': 'Categoria',
},
),
migrations.CreateModel(
name='TipoDocu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idTipoDocumento', models.CharField(max_length=200, verbose_name='Tipo Documento')),
('nomDocumento', models.CharField(max_length=200, verbose_name='Documento')),
],
options={
'verbose_name': 'Tipo De Documento',
'verbose_name_plural': 'Documento',
},
),
migrations.CreateModel(
name='Proyecto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nomProyecto', models.CharField(max_length=200, null=True, verbose_name='Nombre de Proyecto')),
('desProyecto', models.TextField(max_length=200, null=True, verbose_name='Descripcion de Proyecto')),
('imgProyecto', models.ImageField(max_length=200, null=True, upload_to='', verbose_name='Imagen de Proyecto')),
('fechaInicial', models.DateTimeField(auto_now=True)),
('fechaFinal', models.DateTimeField(auto_now=True)),
('urlReposotorio', models.CharField(max_length=200, null=True, verbose_name='Url Repositorio')),
('estProyecto', models.CharField(max_length=200, null=True, verbose_name='Estado del Proyecto')),
('idCategoriaProyecto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proyectos.CateProyecto')),
],
options={
'verbose_name': 'Datos del Proyecto',
'verbose_name_plural': 'Proyecto',
},
),
migrations.CreateModel(
name='Documento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nomDocumento', models.CharField(max_length=200, null=True, verbose_name='Nombre de Documento')),
('pathDocument', models.CharField(max_length=200, null=True, verbose_name='Path Documento')),
('idProyectoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proyectos.Proyecto')),
('idTipoDocumento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proyectos.TipoDocu')),
('idUsuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userdata.DatosUser')),
],
options={
'verbose_name': 'Datos del Documento',
'verbose_name_plural': 'Documento',
},
),
]
| [
"64228042+luis114@users.noreply.github.com"
] | 64228042+luis114@users.noreply.github.com |
1e29bf8238137d9c10cafa4a39b30bda313da58b | e4aa334316c940f07cbf87176be53c7cb8703345 | /08_pr.py | cf20f3e7f201b59596c4e8ec470a807f2c028389 | [] | no_license | kumarsaurav20/pythonchapters | f6a9d0a04278f57dabe4bffc1ad91b46f2694ee4 | a45627aabbec22f7f5e6df107d3fe19a846bd33e | refs/heads/main | 2023-07-05T22:48:13.618485 | 2021-08-08T06:17:51 | 2021-08-08T06:17:51 | 386,627,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | # Check that a tuple cannot be changed in pyhton
t = (2, 4, 5, 8)
t[0] = 0
print(t) | [
"noreply@github.com"
] | noreply@github.com |
c73e52136d599822c89c4bad081b5f3c2bd8bf63 | 9425d564cb8c19f0213697ba24eaa512dabd24a4 | /contour.py | e5f64dab769811df855574fce54e3ca04435c920 | [] | no_license | linzhibo/auto_rectangle_detection | bf557fadedb9ebc51843021779d2d68e578e20ef | 5b17e614ba6cfd70d09e91eaef5b44524276e4ad | refs/heads/master | 2020-03-15T03:41:02.377770 | 2018-05-03T05:58:16 | 2018-05-03T05:58:16 | 131,948,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,035 | py |
import cv2
import numpy as np
import sys
import os
import numpy as np
from matplotlib import pyplot as plt
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(vid):
vid = cv2.GaussianBlur(vid, (5, 5), 0)
squares = []
for gray in cv2.split(vid):
for thrs in xrange(0, 255, 26):
if thrs == 0:
edge = cv2.Canny(gray, 0, 50, apertureSize=5)
edge = cv2.dilate(edge, None)
else:
retval, edge = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
edge, contours, hierarchy = cv2.findContours(edge, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
def biggest_square(squares):
biggestSquare = squares[0]
for i in range(len(squares)):
if (surface(squares[i]) > surface(biggestSquare)):
biggestSquare = squares[i]
return biggestSquare
def surface(square):
a1 = square[2][0] - square[1][0]
a2 = square[2][1] - square[1][1]
a = np.sqrt(a1*a1 + a2*a2)
b1 = square[1][0] - square[0][0]
b2 = square[1][1] - square[0][1]
b = np.sqrt(b1*b1 + b2*b2)
return int(a)*int(b)
def transformation(img, squares):
sq = biggest_square(squares)
a1 = sq[2][0] - sq[1][0]
a2 = sq[2][1] - sq[1][1]
a = int(np.sqrt(a1*a1 + a2*a2))
b1 = sq[1][0] - sq[0][0]
b2 = sq[1][1] - sq[0][1]
b = int(np.sqrt(b1*b1 + b2*b2))
pts1 = np.float32([[sq[0][0],sq[0][1]],[sq[3][0],sq[3][1]],[sq[1][0],sq[1][1]],[sq[2][0],sq[2][1]]])
pts2 = np.float32([[0,0],[a,0],[0,b],[a,b]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(a,b))
return dst
filepath = './data'
dstpath = './corrected'
frame = cv2.imread('88.jpg')
frame_2 = frame
# squares = find_squares(frame)
# print squares[0], squares[0][0][0], squares[0][1], squares[0][2], squares[0][3]
# print biggestSquare
# cv2.imshow("frame", frame)
# dst = cv2.drawContours(frame, squares, -1, (0, 255, 0), 3 )
# dst = transformation(frame, squares)
# cv2.imwrite('transformation.png',dst)
# cv2.imshow('squares', dst)
# k = cv2.waitKey(0)
squares = find_squares(frame)
dst = cv2.drawContours(frame, squares, -1, (0, 255, 0), 3 )
dst_cor = transformation(frame_2, squares)
# cv2.imwrite('transformation.png',dst)
plt.subplot(121),plt.imshow(dst),plt.title('Input')
plt.subplot(122),plt.imshow(dst_cor),plt.title('Output')
plt.show()
# for filename in os.listdir(filepath):
# dst_filename = os.path.join(dstpath, filename)
# filename = os.path.join(filepath, filename)
# print dst_filename
# frame = cv2.imread(filename)
# squares = find_squares(frame)
# dst = transformation(frame, squares)
# cv2.imwrite(dst_filename, dst)
| [
"zhibo98@hotmail.com"
] | zhibo98@hotmail.com |
324787c4111ae0c7fbd6537e0453983a3e004533 | 0d9cfecca5ced625e3544b987e1da3648458fd0b | /Python/scripts/match_standalone.py | 6e402e0d267d8c0c2562d1c39650610c44f63744 | [
"MIT"
] | permissive | CandisZhao/SSF | 34168de32f08a9ef0ccdf6c25df093b542f26cc2 | 133ed0518ed6b54bdcb781d78bc1fcd531211d8d | refs/heads/master | 2020-12-30T15:41:35.327364 | 2017-11-16T07:28:51 | 2017-11-16T07:28:51 | 91,160,242 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,162 | py | #!/usr/bin/env python
# vim:ts=4 sw=4 sts=4 et:
"""\
%prog [options] reference_complexes predicted_complexes
Calculates matching scores between a set of reference and predicted complexes.
The input files must contain the reference and the predicted complexes, one
complex per line.
"""
from __future__ import division
import optparse
import sys
import os
from mwmatching import maxWeightMatching
from textwrap import dedent
__author__ = "Tamas Nepusz <nepusz@hal.elte.hu>"
###########################################################################
def canonical_protein_name(name):
"""Returns the canonical name of a protein by performing a few simple
transformations on the name."""
return name.strip().upper()
def is_numeric(x):
"""Returns whether the given string can be interpreted as a number."""
try:
float(x)
return True
except:
return False
def matching_score(set1, set2):
"""Calculates the matching score between two sets (e.g., a cluster and a complex)
using the approach of Bader et al, 2001"""
return len(set1.intersection(set2))**2 / (float(len(set1)) * len(set2))
###########################################################################
def accuracy(reference, predicted):
return (clusteringwise_sensitivity(reference, predicted) * \
positive_predictive_value(reference, predicted)) ** 0.5
def clusteringwise_sensitivity(reference, predicted):
num, den = 0., 0.
for complex in reference:
den += len(complex)
num += max(len(complex.intersection(cluster)) for cluster in predicted)
if den == 0.:
return 0.
return num / den
def clusteringwise_separation(reference, predicted):
intersections = {}
marginal_sums = [0.] * len(predicted), [0.] * len(reference)
for i, cluster in enumerate(predicted):
for j, complex in enumerate(reference):
isect = len(cluster.intersection(complex))
if isect > 0:
intersections[i, j] = isect
marginal_sums[0][i] += isect
marginal_sums[1][j] += isect
separations_complex = [0.] * len(reference)
separations_cluster = [0.] * len(predicted)
for i, cluster in enumerate(predicted):
s = marginal_sums[0][i]
for j, complex in enumerate(reference):
isect = intersections.get((i, j), 0)
if isect == 0:
continue
val = float(isect * isect) / (s * marginal_sums[1][j])
separations_complex[j] += val
separations_cluster[i] += val
avg_sep_complex = sum(separations_complex) / len(separations_complex)
avg_sep_cluster = sum(separations_cluster) / len(separations_cluster)
return (avg_sep_complex * avg_sep_cluster) ** 0.5
def fraction_matched(reference, predicted, score_threshold=0.25):
result = 0
for id1, c1 in enumerate(reference):
for id2, c2 in enumerate(predicted):
score = matching_score(c1, c2)
if score > score_threshold:
result += 1
break
# print "matched = %d" %result
# print "predicted = ", len(predicted)
return result / len(reference)
def maximum_matching_ratio(reference, predicted, score_threshold=0.25):
scores = {}
n = len(reference)
for id1, c1 in enumerate(reference):
for id2, c2 in enumerate(predicted):
score = matching_score(c1, c2)
if score <= score_threshold:
continue
scores[id1, id2+n] = score
input = [(v1, v2, w) for (v1, v2), w in scores.iteritems()]
mates = maxWeightMatching(input)
score = sum(scores[i, mate] for i, mate in enumerate(mates) if i < mate)
return score / n
def positive_predictive_value(reference, predicted):
num, den = 0., 0.
for cluster in predicted:
isects = [len(cluster.intersection(complex)) for complex in reference]
isects.append(0.)
num += max(isects)
den += sum(isects)
if den == 0.:
return 0.
return num / den
def precision(reference, predicted, score_threshold=0.25):
result = 0
for id1, c1 in enumerate(predicted):
for id2, c2 in enumerate(reference):
score = matching_score(c1, c2)
if score > score_threshold:
result += 1
break
# print "pre_matched = ", result
# print "predicted= ", len(predicted)
return result / len(predicted)
def recall(reference, predicted, score_threshold=0.25):
result = 0.0
for id1, c1 in enumerate(reference):
for id2, c2 in enumerate(predicted):
score = matching_score(c1, c2)
if score > score_threshold:
result += 1.0
break
return result / len(reference)
def f_measure(reference, predicted, score_threshold=0.25):
p = precision(reference, predicted, score_threshold)
r = recall(reference, predicted, score_threshold)
return 2 * p * r / (p + r)
###########################################################################
class MatchApplication(object):
def __init__(self):
self.measures = dict(
precision=precision,
recall=recall,
f_measure=f_measure,
frac=fraction_matched,
acc=accuracy,
mmr=maximum_matching_ratio
)
self.parser = self.create_parser()
def create_parser(self):
parser = optparse.OptionParser(usage=dedent(sys.modules[__name__].__doc__).strip())
parser.add_option("-m", "--measure", action="append", dest="measures", default=[],
metavar="MEASURE", help="calculate the quality measure given by MEASURE. "
"Possible values are: %s. May be given multiple times." %
", ".join(sorted(self.measures.keys())))
parser.add_option("-n", "--network", metavar="FILE", dest="network",
help="read the PPI network from FILE and assume that only these complexes "
"were known to the clustering algorithm")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="be quiet")
return parser
def log(self, msg):
if self.options.quiet:
return
print >>sys.stderr, msg
def read_complexes(self, fname, known_proteins=None, strictness=0.5,
min_size=3, max_size=100):
result = []
for line in open(fname):
ps = set(canonical_protein_name(x) for x in line.strip().split() if x)
if known_proteins is not None:
isect = ps.intersection(known_proteins)
if len(isect) < max(min_size, len(ps) * strictness):
continue
if len(isect) > max_size:
continue
ps = isect
result.append(ps)
to_delete = set()
for idx, cluster in enumerate(result):
for idx2, cluster2 in enumerate(result):
if idx == idx2 or idx2 in to_delete:
continue
if cluster == cluster2:
to_delete.add(idx2)
result = [r for i, r in enumerate(result) if i not in to_delete]
return result
def read_network(self, fname):
known_proteins = set()
for line in open(fname):
parts = [canonical_protein_name(part) for part in line.strip().split()
if not is_numeric(part)]
known_proteins.update(parts)
return known_proteins
def run(self):
self.options, self.args = self.parser.parse_args()
if len(self.args) != 2:
self.parser.print_help()
return 1
if not self.options.measures:
self.options.measures = sorted(self.measures.keys())
if self.options.network:
known_proteins = self.read_network(self.options.network)
self.log("%d known proteins found in network" % len(known_proteins))
else:
known_proteins = None
reference_complexes = self.read_complexes(self.args[0], known_proteins)
predicted_complexes = self.read_complexes(self.args[1])
self.log("%d reference complexes, %d predicted complexes" %
(len(reference_complexes), len(predicted_complexes)))
fout = open("temp.txt","w")
for complex in reference_complexes:
string = " ".join(complex) + "\n"
fout.write(string)
fout.close()
command = "./Overlapping-NMI-master/onmi temp.txt "+ self.args[1]
os.system(command)
for measure in self.options.measures:
if measure not in self.measures:
self.log("Ignoring unknown measure: %s" % measure)
continue
result = self.measures[measure](reference_complexes, predicted_complexes)
print "%s = %.4f" %(measure,result)
# print result
return 0
def main():
return MatchApplication().run()
if __name__ == "__main__":
sys.exit(main())
| [
"can.zhao1114@hotmail.com"
] | can.zhao1114@hotmail.com |
b91382be96010e2e1aefacdcb707ef46b39f8400 | 3ca6302ebdc0e47d5d462435ad24a2886cfa5063 | /64.py | 5ce1111db6333ea60c31349788bb7a2df4797496 | [] | no_license | Sem31/PythonBasics | 3859276820d484025d6c3d8f9efaf131b8626da8 | d9bfd520b67056a3cbb747f7a4b71fe55871c082 | refs/heads/master | 2020-04-24T19:09:48.608293 | 2019-02-23T10:56:26 | 2019-02-23T10:56:26 | 172,203,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #create csv file by this programs
import csv
with open("example1.csv",'w')as obj:
field = ["name","salary"]
writer = csv.DictWriter(obj, fieldnames=field)
writer.writeheader()
writer.writerow({'name':'bob','salary':10000})
writer.writerow({'name':'sem','salary':40000})
writer.writerow({'name':'kamlesh','salary':30000})
writer.writerow({'name':'vishal','salary':50000}) | [
"semprajapat31@gmail.com"
] | semprajapat31@gmail.com |
9fea6a7e73c6e6d9c9615de59079f3c61274895b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/tourn.py | b96981b870a4794e10ed270201c3a0c1ab925f9b | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 26 | py | ii = [('MereHHB3.py', 13)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
09df1cd07c077f07a91816f38d1c8539b004786e | d5579bf9341d8ae1edcbf3cce1b55ddb582be954 | /test/integration/sagemaker/test_distributed_operations.py | 8fdb943993445ee7cafc9d265d15363805d9919b | [
"Apache-2.0"
] | permissive | bearpelican/sagemaker-fastai-container | ad4759ecdc4cda4a50f63810edb2feda144d72d0 | 32173721a5bbdd0c9009001a778343884d3a117c | refs/heads/master | 2020-04-07T06:30:42.041402 | 2018-11-19T01:13:21 | 2018-11-19T01:13:21 | 158,138,610 | 0 | 0 | Apache-2.0 | 2018-11-18T23:53:36 | 2018-11-18T23:53:36 | null | UTF-8 | Python | false | false | 2,380 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
from test.integration import dist_operations_path
from test.integration.sagemaker.estimator import PytorchTestEstimator
from test.integration.sagemaker.timeout import timeout
@pytest.mark.skip_gpu
def test_dist_operations_cpu(sagemaker_session, ecr_image, instance_type, dist_cpu_backend):
instance_type = instance_type or 'ml.c4.xlarge'
_test_dist_operations(sagemaker_session, ecr_image, instance_type, dist_cpu_backend)
@pytest.mark.skip_cpu
def test_dist_operations_gpu(sagemaker_session, instance_type, ecr_image, dist_gpu_backend):
instance_type = instance_type or 'ml.p2.xlarge'
_test_dist_operations(sagemaker_session, ecr_image, instance_type, dist_gpu_backend)
@pytest.mark.skip_cpu
def test_dist_operations_multi_gpu(sagemaker_session, ecr_image, dist_gpu_backend):
instance_type = 'ml.p2.8xlarge'
_test_dist_operations(sagemaker_session, ecr_image, instance_type, dist_gpu_backend, 1)
def _test_dist_operations(sagemaker_session, ecr_image, instance_type, dist_backend, train_instance_count=3):
with timeout(minutes=8):
pytorch = PytorchTestEstimator(entry_point=dist_operations_path, role='SageMakerRole',
train_instance_count=train_instance_count, train_instance_type=instance_type,
sagemaker_session=sagemaker_session, docker_image_uri=ecr_image,
hyperparameters={'backend': dist_backend})
pytorch.sagemaker_session.default_bucket()
fake_input = pytorch.sagemaker_session.upload_data(path=dist_operations_path,
key_prefix='pytorch/distributed_operations')
pytorch.fit({'required_argument': fake_input})
| [
"mmcclean@amazon.com"
] | mmcclean@amazon.com |
d7a7dd541bc436358194e21d56e22cccd5a27ae9 | 5178f5aa20a857f8744fb959e8b246079c800c65 | /02_oop/tr/src/23_list/list_tr2.py | 1c3806bcda6b62f6a939b84a1cd64d558d120ece | [] | no_license | murayama333/python2020 | 4c3f35a0d78426c96f0fbaed335f9a63227205da | 8afe367b8b42fcf9489fff1da1866e88f3af3b33 | refs/heads/master | 2021-05-19T04:03:46.295906 | 2021-03-09T22:23:58 | 2021-03-09T22:23:58 | 251,520,131 | 0 | 3 | null | 2020-10-26T01:20:09 | 2020-03-31T06:35:18 | Python | UTF-8 | Python | false | false | 99 | py | my_list = ["a", "b", "c", "d", "e"]
my_list.reverse()
print("".join([e.upper() for e in my_list]))
| [
"murayama333@gmail.com"
] | murayama333@gmail.com |
621a71a66735f63280d746b371e164c3f0f864c3 | b52732e4f54c9f04d10378b05e8cb4575a7c1122 | /gams_parser/gams_parser.py | 699d7bda3ee656a93bc461b0e1ffe72a01050a01 | [] | no_license | anderson-optimization/gams-parser | 759344ded12d45ec10115d3517f564855f506007 | 3786206a34d6a6ae08d9525965ddc21df0beefe1 | refs/heads/master | 2020-05-02T23:26:49.920945 | 2019-09-17T21:23:12 | 2019-09-17T21:23:12 | 178,280,296 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py |
from lark import Lark
import os
from .tree_injector import TreeInjector
from .tree_to_model import TreeToModel
import logging
logger = logging.getLogger('gams_parser')
dirname = os.path.dirname(__file__)
grammar_gams = os.path.join(dirname, 'grammar/gams.lark')
grammar_ao_inject = os.path.join(dirname, 'grammar/ao_inject.lark')
with open(grammar_gams,'r') as in_file:
text=in_file.read()
lark_gams = Lark(text,propagate_positions=True)
with open(grammar_ao_inject,'r') as in_file:
text=in_file.read()
lark_ao_inject = Lark(text)
def scrub_meta(obj, bad=["_meta","meta"]):
if isinstance(obj, dict):
for k in obj.keys():
if k in bad:
del obj[k]
else:
scrub_meta(obj[k], bad)
elif isinstance(obj, list):
for i in reversed(range(len(obj))):
if obj[i] in bad:
del obj[i]
else:
scrub_meta(obj[i], bad)
else:
# neither a dict nor a list, do nothing
pass
class GamsParser():
def __init__(self,file):
if isinstance(file,str):
self.file = open(file,'r')
else:
self.file = file
self.text=self.file.read()
def inject(self,context=None,run=None,data=None):
logger.debug("GamsParser : Inject : 1. Parse")
self.text+="\n"
pt= lark_ao_inject.parse(self.text)
#print("Parse Tree")
#print(pt.pretty())
logger.debug("GamsParser : Inject : 2. Transform")
TI=TreeInjector(context,data=data)
new_model,inject_map=TI.transform(pt)
return new_model,inject_map
def parse(self):
return lark_gams.parse(self.text)
def transform(self):
parse_tree=lark_gams.parse(self.text)
model=TreeToModel().transform(parse_tree)
model.cross_reference()
model.reference_lines(self.text)
return model
| [
"ericjanderson4@gmail.com"
] | ericjanderson4@gmail.com |
7fb65c504197305fd3cdcc370807cdcc1aa103a4 | f79061c27f0f1c718d2cfdea5d5281c5a6eeaaf7 | /platforms/typescript.py | 29f5c56cb6398b0653d9d96cd8c5268fe2335409 | [
"Unlicense"
] | permissive | jt28828/fontawesome-enum-generator | 09bec3297ab192ee88bc7b66be5d1ab62b70e0c6 | 30b27885f3c3dadb5b17af5033b4c57169dda8f4 | refs/heads/master | 2023-02-15T12:42:24.242274 | 2020-07-15T12:14:45 | 2020-07-15T12:14:45 | 279,809,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from typing import Dict, TextIO
from utils.file import write_enum
from utils.string import to_pascal_case
def read_template() -> str:
template_file: TextIO = open("./file_templates/typescript-enum.ts", "r")
return template_file.read()
def as_enum_row(key: str, json: Dict) -> str:
enum_name = to_pascal_case(key)
return f" {enum_name} = \"&#x{json[key]['unicode']};\",\n"
def as_typescript_enum(icon_json: Dict):
enum_template = read_template()
enum_rows = ""
for key in icon_json:
enum_rows += as_enum_row(key, icon_json)
updated_enum = enum_template.replace("<<Contents>>", enum_rows)
write_enum(updated_enum, "fontawesome-codes.ts")
| [
"jt28828@gmail.com"
] | jt28828@gmail.com |
d6be0f7331fa6b064a004a0259e1d74da5084b2b | aa3f0f41cf53b71ac6d070025a5b2150ff6a30ca | /main.py | 23d5c89f8c56bf49e6225b028963807e90793572 | [] | no_license | camilled671/diaz-camill-set-up | bccb73576953fa4f3f9e22ee8c8fcda0040dc7f6 | 664707947544f404560c1e6590340271c35ceca5 | refs/heads/master | 2022-12-28T18:41:20.157834 | 2020-10-19T17:48:12 | 2020-10-19T17:48:12 | 305,466,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | def on_forever():
pass
forever(on_forever)
light.show_animation(light.rainbowAnimation, 500) | [
"72838912+camilled671@users.noreply.github.com"
] | 72838912+camilled671@users.noreply.github.com |
89b1f16b7c44389e677d7b3500ffc2e6d9da37f5 | 9600a6178957e9e37d073dde4f65690796eb6dff | /.venv/bin/symilar | 2a8796f197c6cab58958b756f733b316bbbeaf8d | [] | no_license | gregoryswedeen/IOTActiveNoiseCancelling | 96ee30e9adc0f473a969607929235227944ea26b | ee20de6c1f3a056c817d3a437fbc9d70f5bfd6dd | refs/heads/master | 2020-04-10T10:34:30.133340 | 2018-12-08T19:19:50 | 2018-12-08T19:19:50 | 160,970,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/Users/greg/Desktop/IOT/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"greg@Gregs-MacBook-Pro.local"
] | greg@Gregs-MacBook-Pro.local | |
b144b7816aafcab541d963b588d1e7bdc49ea124 | 0f17dce7c6f8c659b5dcdcf5d0f34942aaaf55af | /bilinear_interpolation.py | 8a53740e5483155c201ba7060b5829f17da12fa0 | [
"MIT"
] | permissive | HelinXu/NA_project1 | 99516d01ea55510cb8e0832b7730160349fe6517 | 5a62b12832819d39a2872669ab9fee077c9d1d8f | refs/heads/main | 2023-08-23T22:24:15.893537 | 2021-10-15T04:15:04 | 2021-10-15T04:15:04 | 417,161,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | # NA project 1_1
# Auther: Helin Xu
import cv2
import numpy as np
import math
from math import pi as PI
def read_img():
img = cv2.imread('./img/qr-polar.png')
return img
def bilinear_interpolate_v1(img, length):
'''
version 1
length must be aligned to the original image.
'''
output_img = np.zeros((length, length, 3))
for y in range(length):
for x in range(length):
_x = x * math.cos(y * 2 * PI / length) + length
_y = -x * math.sin(y * 2 * PI / length) + length
x1 = int(_x) # lower bound for float x
y1 = int(_y) # lower bound for float y
x2 = int(_x) + 1 # upper bound for float x
y2 = int(_y) + 1 # upper bound for float y
rgb11 = img[y1][x1]
rgb12 = img[y1][x2]
rgb21 = img[y2][x1]
rgb22 = img[y2][x2]
rgb = rgb11 * (x2 - _x) * (y2 - _y) +\
rgb12 * (x2 - _x) * (_y - y1) +\
rgb21 * (_x - x1) * (y2 - _y) +\
rgb22 * (_x - x1) * (_y - y1)
output_img[y][x] = rgb
return output_img
def bilinear_interpolate(img, N):
'''
version 2
output lenght can be arbitrary number N.
Input:
- img: original image
- N: output width & height
Output:
- output_img: numpy array (N*N*3)
'''
output_img = np.zeros((N, N, 3))
length = img.shape[0]
for x in range(N):
x_norm = x / N * 2 * PI
for y in range(N):
y_norm = y / N * 2 * PI
_x = length * (x_norm * math.cos(y_norm) + 2 * PI) / (4 * PI)
_y = length * (-x_norm * math.sin(y_norm) + 2 * PI) / (4 * PI)
x1 = int(_x) # lower bound for float x
y1 = int(_y) # lower bound for float y
x2 = int(_x) + 1 # upper bound for float x
y2 = int(_y) + 1 # upper bound for float y
rgb11 = img[y1][x1]
rgb12 = img[y1][x2]
rgb21 = img[y2][x1]
rgb22 = img[y2][x2]
rgb = rgb11 * (x2 - _x) * (y2 - _y) +\
rgb12 * (x2 - _x) * (_y - y1) +\
rgb21 * (_x - x1) * (y2 - _y) +\
rgb22 * (_x - x1) * (_y - y1)
output_img[y][x] = rgb
return output_img
if __name__ == '__main__':
polar_img = read_img()
output_img = bilinear_interpolate(polar_img, 200)
cv2.imwrite('./img/qr-code.png', output_img)
| [
"xhl19@mails.tsinghua.edu.cn"
] | xhl19@mails.tsinghua.edu.cn |
d9868dd88cf02a964c6d945a91add7bd02582f0b | 8dd003be19ec906c114420b362677414b513be43 | /tutorial_07/exercise7_1.py | 7eb40e4cde5e4fc12db13585adc12346a630740a | [] | no_license | kneureither/compMBP | c14cb85961f3f07fe9ac6368a0acf72abbd655d0 | 7108a16b85bab6ac7500bc572938a2b0efe96247 | refs/heads/main | 2023-06-10T16:49:37.675120 | 2021-06-18T12:45:37 | 2021-06-18T12:45:37 | 360,791,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,027 | py | import scipy
import numpy as np
import matplotlib.pyplot as plt
from compMBP.tutorial_05 import ed
from matplotlib import cm
import functools
def task_decorator(func):
def inner(*args, **kwargs):
print('--'*30)
print(func.__name__)
func(*args, **kwargs)
return inner
class MatrixProductStates():
def __init__(self, L=14, J=1, g=1.5):
self.L = L
self.J = J
self.g = g
def initialize(self):
sx_list = ed.gen_sx_list(L)
sz_list = ed.gen_sz_list(L)
self.H = ed.gen_hamiltonian(sx_list, sz_list, g=g, J=J)
E0, self.psi0 = self.get_ground_state()
self.chimax_exact = 2 ** (self.L // 2)
def get_ground_state(self):
E, vecs = scipy.sparse.linalg.eigsh(self.H, which='SA')
psi0 = vecs[:, 0]
return E, psi0
def product(self, Mns):
product = Mns[0][:, 0, :]
for Mn_idx in range(1, len(Mns)):
product = product @ Mns[Mn_idx][:, 0, :]
print(product)
product = Mns[0][:, 1, :]
for Mn_idx in range(1, len(Mns)):
product = product @ Mns[Mn_idx][:, 0, :]
print(product)
def get_product(self, Mns):
psi = 0
for j in range(2):
for i in range(2):
product = Mns[0][:, j, :]
for Mn_idx in range(1, len(Mns)):
print(product)
product = np.matmul(product, Mns[Mn_idx][:, i, :])
psi += product
return psi
def print_number_of_floats(self, list_of_arrays):
number_floats = sum([array.size for array in list_of_arrays])
print('Number of floats:', number_floats)
@task_decorator
def task1a(self):
E, psi0 = self.get_ground_state()
@task_decorator
def task1b(self):
E, psi0 = self.get_ground_state()
Mns = self.compress(psi0, self.L, 2 ** (self.L // 2))
print([Mn.shape for Mn in Mns])
self.print_number_of_floats(Mns)
@task_decorator
def task1c(self):
E, psi0 = self.get_ground_state()
print(psi0.shape)
Mns = self.compress(psi0, self.L, 10)
print([Mn.shape for Mn in Mns])
self.print_number_of_floats(Mns)
def overlap(self, product_state_1, product_state_2=None):
"""
1) Contract left (bra and ket)
2) Contract right (bra and ket)
3) Cotract left with right
:param product_state_1:
:param product_state_2:
:return:
"""
if product_state_2 is None:
product_state_2 = product_state_1
left = np.tensordot(product_state_1[0], product_state_2[0].conj(), ((0, 1), (0, 1)))
for Mn_1, Mn_2 in zip(product_state_1[1:], product_state_2[1:]):
right = np.tensordot(Mn_1, Mn_2.conj(), (1, 1))
left = np.tensordot(left, right, ((0, 1), (0, 2)))
return left[0, 0]
@staticmethod
def overlap2(bra, ket=None):
"""
More efficient:
1) ket to left
2) (ket and left) to bra
:param bra:
:param ket:
:return:
"""
if ket is None:
ket = bra
braket = np.ones((1, 1))
for bra_i, ket_i in zip(bra, ket):
ket_i = np.tensordot(braket, ket_i, (1, 0))
braket = np.tensordot(bra_i, ket_i.conj(), ((0, 1), (0, 1)))
return braket.item()
@task_decorator
def task1d(self):
E, psi0 = self.get_ground_state()
Mns_ex = self.compress(psi0, self.L, 256)
Mns_compr = self.compress(psi0, self.L, 10)
overlap_exact_exact = self.overlap2(Mns_ex)
print('overlap of psi_exact with itself: ', overlap_exact_exact)
overlap_exact_compr = self.overlap2(Mns_ex, Mns_compr)
print('overlap of psi_exact with psi_compr: ', overlap_exact_compr)
@task_decorator
def task1f(self):
psi = np.zeros(int(2 ** self.L))
psi[0] = 1
mps = self.compress(psi, self.L, self.chimax_exact)
E, psi0 = self.get_ground_state()
mps0 = self.compress(psi0, self.L, self.chimax_exact)
overlap_up = self.overlap2(mps0, mps)
print('overlap of all up and groundstate:',overlap_up)
def compress(self, psi, L, chimax):
Mns = []
for n in range(L):
psi = psi.reshape(-1, 2 ** (L - n) // 2)
M_n, lambda_n, psitilde = scipy.linalg.svd(psi, full_matrices=False, lapack_driver='gesvd')
keep = np.argsort(lambda_n)[:: -1][: chimax]
M_n = M_n[:, keep]
lambda_ = lambda_n[keep]
psitilde = psitilde[keep, :]
M_n = M_n.reshape(-1, 2, M_n.shape[1])
Mns.append(M_n)
psi = lambda_[:, np.newaxis] * psitilde[:, :]
return Mns
if __name__ == '__main__':
part1 = MatrixProductStates(L=14, J=1, g=1.5)
part1.initialize()
part1.task1a()
part1.task1b()
part1.task1c()
part1.task1d()
part1.task1f()
| [
"o.kuijpers@appliednanolayers.com"
] | o.kuijpers@appliednanolayers.com |
0b76eb3152ea21f50c238690516d9c4b250c8271 | 1c9a0b4b5fd25c73f99bd9839973ca652077d078 | /fotogram/migrations/0002_auto_20200227_1244.py | aaf779808dd2cd6c492c12e4e2a2dc894a1e9143 | [] | no_license | vthakur-1/fotogrammysite | eebc2a7ebe2a2d3133153ecc046b3de32b77dbb6 | 14be1758a170fcd70be555fd918e5c40277427e2 | refs/heads/master | 2021-01-16T04:26:51.472003 | 2020-03-03T09:17:11 | 2020-03-03T09:17:11 | 242,975,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # Generated by Django 2.2.9 on 2020-02-27 12:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fotogram', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(upload_to='photos'),
),
]
| [
"vthakur@bestpeeers.com"
] | vthakur@bestpeeers.com |
003e4fca3eace8c6be7169624bfe843f9b977a21 | 605a4d0c4fd3beb3a1c290317bea56ab2a86f999 | /users/views.py | 444b75024b1fe9b1e24205e41f052d9bfaeb1e0b | [] | no_license | Nikolai586/hw05_final | eda54ca105e4babb7b5a8154c99fdff019d2faa1 | 9e9f0c1738c03c60946b4447d6c2295351049b6d | refs/heads/master | 2023-02-15T17:04:12.743697 | 2021-01-13T12:50:32 | 2021-01-13T12:50:32 | 254,456,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView
from .forms import CreationForm
class SignUp(CreateView):
form_class = CreationForm
success_url = "/auth/login/"
template_name = "signup.html"
| [
"n.chistoprudov@yandex.ru"
] | n.chistoprudov@yandex.ru |
5dcce59288ba49c09d94cfea168bf68303a22c56 | cd02ebba78037ecc06c77fa3c7f1ac942613d330 | /UR5_Move/build/universal_robot/ur_description/catkin_generated/pkg.installspace.context.pc.py | cb37c28f0a2cde3b23c13fd45d1ea0d1e9f6d4ef | [] | no_license | tianxiang84/ROS_Project | 8e63cf2dad1fc0aa7db8d1ede0eb370be3b92d0a | 7b1efe121fde034fcf5e377877c0106f248e4d5c | refs/heads/master | 2021-01-24T13:29:05.231000 | 2018-02-27T19:17:00 | 2018-02-27T19:17:00 | 123,176,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_description"
PROJECT_SPACE_DIR = "/home/sdrwcs/ROS_Projects/UR5_Move/install"
PROJECT_VERSION = "1.2.1"
| [
"su.tianxiang@gmail.com"
] | su.tianxiang@gmail.com |
5c7defef90a65bb7f61aeef89e351f7402713492 | 62bcb903ed04041e9b896f6e18efca93dbc20fa6 | /medium/79.py | c26c1bc3810757d26a6cb4d4835ebf77e29e250d | [] | no_license | moxi43/leetcode-to-the-moon | 41022dbb5f5640eeaf39acbc852401a1f6b7cfed | b3ff69e9cb6139668496f1258bb7690769e762e9 | refs/heads/main | 2023-08-25T20:43:03.623753 | 2021-10-11T04:32:08 | 2021-10-11T04:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # Link to the problem: https://leetcode.com/problems/word-search/
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == word[0] and self.traverse(board, i, j, word):
return True
return False
def traverse(self, board, i, j, word):
if len(word) == 0:
return True
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or board[i][j] != word[0]:
return False
match = False
original_value = board[i][j]
board[i][j] = "_"
match = self.traverse(board, i+1, j, word[1:]) or self.traverse(board, i-1, j, word[1:]) or self.traverse(board, i, j+1, word[1:]) or self.traverse(board, i, j-1, word[1:])
board[i][j] = original_value
return match | [
"vostretsov0505@gmail.com"
] | vostretsov0505@gmail.com |
32f32d142cd5b87ef07bfe78179e6dbe85a6fe01 | a368d89f996f035f02e8b2f0b5ab62e4738b311b | /level1/lesson3.py | 660599626f422afdb3473d92a1399d4e458ef3e7 | [] | no_license | iiinjoy/algorithms_python_lessons | e95934b50734508b95eca67249ffc66979e154c1 | 16ac0db57de8d12b4920e433b734c84f4264b301 | refs/heads/master | 2021-01-15T01:45:00.203631 | 2020-11-20T14:59:17 | 2020-11-20T14:59:17 | 242,836,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | #!/usr/bin/env python3
import ctypes
class DynArray:
def __init__(self):
self.count = 0
self.capacity = 16
self.array = self.make_array(self.capacity)
def __len__(self):
return self.count
def make_array(self, new_capacity):
return (new_capacity * ctypes.py_object)()
def __getitem__(self, i):
if i < 0 or i >= self.count:
raise IndexError('Index is out of bounds')
return self.array[i]
def resize(self, new_capacity):
new_array = self.make_array(new_capacity)
for i in range(self.count):
new_array[i] = self.array[i]
self.array = new_array
self.capacity = new_capacity
def append(self, itm):
if self.count == self.capacity:
self.resize(2*self.capacity)
self.array[self.count] = itm
self.count += 1
def insert(self, i, itm):
if i < 0 or i > self.count:
raise IndexError('Index is out of bounds')
if self.count == self.capacity:
new_capacity = 2*self.capacity
new_array = self.make_array(new_capacity)
for j in range(self.count, 0, -1):
if j <= i:
new_array[j-1] = self.array[j-1]
else:
new_array[j] = self.array[j-1]
new_array[i] = itm
self.array = new_array
self.capacity = new_capacity
else:
for j in range(self.count, i, -1):
self.array[j] = self.array[j-1]
self.array[i] = itm
self.count += 1
def delete(self, i):
if i < 0 or i >= self.count:
raise IndexError('Index is out of bounds')
if (self.capacity > 16) and (self.count-1 < self.capacity/2):
new_capacity = int(self.capacity/1.5)
if new_capacity < 16:
new_capacity = 16
new_array = self.make_array(new_capacity)
for j in range(self.count-1):
if j < i:
new_array[j] = self.array[j]
else:
new_array[j] = self.array[j+1]
self.array = new_array
self.capacity = new_capacity
else:
for j in range(i, self.count-1):
self.array[j] = self.array[j+1]
self.count -= 1
| [
"iiinjoy@gmail.com"
] | iiinjoy@gmail.com |
194ec17dc4782d8db8ac1bd6cb988904879d5cd0 | f64adace83544d6653a1025cc6ded9c46be269f3 | /week1/week1-1.py | b569e07d9ccf77725776702d1d3175444864fa95 | [] | no_license | yved/python_lesson2 | f7626edae91085aed27ea8838ee4852ea77bda14 | 7170cf8ae85bf50a993efcff3dc901ed3148b03c | refs/heads/master | 2020-04-29T11:28:20.423368 | 2019-03-08T13:06:03 | 2019-03-08T13:06:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # #商管程式設計二第一周上課內容 #five littile duck #function
#版本一
def over_mother():
print("Over the hills and far away")
print("Mother duck said quack quack quack")
def n_duck(num):
print("%s litte ducks went out day" % num)
def only_n(num):
print("But only %s little ducks came back" % num)
# n_duck("Five")
# over_mother()
# only_n("Four")
# print()
# n_duck("Four")
# over_mother()
# only_n("Three")
# print()
# n_duck("Three")
# over_mother()
# only_n("Two")
# print()
# print("end")
#版本二 更精簡
def sing_unit(num1,num2):
n_duck(num1)
over_mother()
only_n(num2)
sing_unit("Five","Four")
print()
sing_unit("Four","Three")
print()
sing_unit("Three","Two") | [
"n0975116268@gmail.com"
] | n0975116268@gmail.com |
fb2be3e6b7ee3d9b312742dd57afeede7ca2bac3 | a654ff745707f986643719e34ccc279dd7639368 | /EmailFetcher/manage.py | 0ce7d610b43c20c4bb543bd193e7e201bda7b64f | [] | no_license | Slammy-akash/Email-Fetcher-Web-App | 8936c6553f2d0114389c92b51f63086d2f426ecf | a6ddaf12d18dd73cddf2d31f94dac5cbe732ad75 | refs/heads/master | 2023-07-20T16:21:24.166181 | 2021-09-01T16:25:48 | 2021-09-01T16:25:48 | 402,082,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EmailFetcher.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"akashlonkarmail@gmail.com"
] | akashlonkarmail@gmail.com |
a02991bb550d24cd031f5ec463f1626b4f85959b | 53787e5cc1f2c9112c79cf5f6b07fc7961419c08 | /scripts/decoder_performance_plots.py | 5d74e05757f706095a5d8577605e0ac07039b691 | [] | no_license | sanjayankur31/BillingsEtAl2014_GCL_SpikingSimulationAndAnalysis | a1ccbf0ffa295eef11da515ff0de9b83d3c9dee2 | 52f95f6425c3d732bca2362c6fcd7c7e6987eee6 | refs/heads/master | 2021-05-28T03:45:49.352361 | 2015-01-09T11:25:33 | 2015-01-09T11:25:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | import numpy as np
import matplotlib
matplotlib.rc('font', family='Helvetica', size=18)
from matplotlib import pyplot as plt
linewidth = 1.5
file_extension='eps'
training_size = np.loadtxt('../../data/data_training_size.csv', delimiter=',')
training_size_mi_poor = np.loadtxt('../../data/data_training_size_mi_f0.1.csv', delimiter=',')
training_size_mi_good = np.loadtxt('../../data/data_training_size_mi_f0.5.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(3,1.75))
ax_poor = ax.twinx()
ax_poor.plot(training_size, training_size_mi_poor, linewidth=linewidth, label='qe', c='#848484')
ax.plot(training_size, training_size_mi_good, linewidth=linewidth, label='qe', c='k')
ax.locator_params(tight=True,)
ax_poor.locator_params(axis='y', tight=True, nbins=3)
ax.locator_params(axis='y', tight=True)
ax.set_xticks([20, 50, 80])
ax.set_yticks([9.4, 9.8])
for tl in ax_poor.get_yticklabels():
tl.set_color('#848484')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel('Training set size')
ax.set_ylabel('MI (bits)')
ax.set_zorder(ax_poor.get_zorder()+1)
ax.patch.set_visible(False)
ax_poor.patch.set_visible(True)
fig.savefig('training_size.'+file_extension)
testing_size = np.loadtxt('../../data/data_testing_size.csv', delimiter=',')
testing_size_mi_plugin_poor = np.loadtxt('../../data/data_testing_size_mi_plugin_f0.1.csv', delimiter=',')
testing_size_mi_plugin_good = np.loadtxt('../../data/data_testing_size_mi_plugin_f0.5.csv', delimiter=',')
testing_size_mi_qe_poor = np.loadtxt('../../data/data_testing_size_mi_qe_f0.1.csv', delimiter=',')
testing_size_mi_qe_good = np.loadtxt('../../data/data_testing_size_mi_qe_f0.5.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(3,1.75))
ax_poor = ax.twinx()
ax_poor.plot(testing_size, testing_size_mi_plugin_poor, linewidth=linewidth, label='qe', c='#FF8000')
ax_poor.plot(testing_size, testing_size_mi_qe_poor, linewidth=linewidth, label='qe', c='#848484')#A4A4A4
ax.plot(testing_size, testing_size_mi_plugin_good, linewidth=linewidth, label='qe', c='r')
ax.plot(testing_size, testing_size_mi_qe_good, linewidth=linewidth, label='qe', c='k')
ax_poor.locator_params(axis='y', tight=False, nbins=3)
for tl in ax_poor.get_yticklabels():
tl.set_color('#848484')
ax.locator_params(tight=True)
ax.locator_params(axis='y', tight=False, nbins=3)
ax.set_xticks([20, 50, 80])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel('Testing set size')
ax.set_ylabel('MI (bits)')
ax.set_zorder(ax_poor.get_zorder()+1)
ax.patch.set_visible(False)
ax_poor.patch.set_visible(True)
fig.savefig('testing_size.'+file_extension)
plt.show()
| [
"e.piasini@ucl.ac.uk"
] | e.piasini@ucl.ac.uk |
9d43309417fafb43f5aaf4c0c7e0890f85b8e219 | b5df682b69682dbb2ba954e28b0fbd76b41ba465 | /OWL/migrations/0001_initial.py | e6cff32955b5ac15fb0df187bcdc482babfdecaf | [] | no_license | nlittlepoole/Bernie-s-Army | 4c61a665def4f0e29d38fc2a3f50d43d8f2e3382 | 9504ebc5269e2431c7c40f8bd1e4cf1b395a9af0 | refs/heads/master | 2021-01-11T10:47:40.352664 | 2015-05-19T02:23:23 | 2015-05-19T02:23:23 | 35,443,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Soldier',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('phone', models.CharField(max_length=15)),
('carrier', models.CharField(max_length=100)),
('join_date', models.DateTimeField(verbose_name=b'date joined')),
('zip_code', models.IntegerField(default=0)),
],
),
]
| [
"nl2418@columbia.edu"
] | nl2418@columbia.edu |
bdebc2ca24df7c0c89118d81e345933c293f4e6b | 3320082ef45c39e62157c487052758f0b4a86c33 | /app/TaskAPISchedulerEngine/TaskAPISchedulerEngine/wsgi.py | 2c6491c47b9ead6cabfdaa12f7f6f15d12b016ce | [] | no_license | art-bug/TaskAPIScheduler | f24e6fb8d7e783d6f3ce9eb083d8c62380480e9c | 5cdf9b9dad54b67b68b34bc71ebf4eff40fcccc3 | refs/heads/main | 2023-01-12T16:28:21.096985 | 2020-11-16T06:50:22 | 2020-11-16T06:50:22 | 313,212,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for TaskAPISchedulerEngine project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TaskAPISchedulerEngine.settings')
application = get_wsgi_application()
| [
"artbag94@gmail.com"
] | artbag94@gmail.com |
59dff948764973fa5020813450cbf72ec9b819a3 | 7e6ecf52e90d618ebb2df7be864e1370543540a8 | /30/loginhash/myhash/views.py | dc6e2736ba65cff3214430745b84cbdd7c615e0a | [] | no_license | rangai/draft_0 | 76be571d40043b98c9faa830a5924b4886e9d975 | bf7ff892b167ebae0ad9378f9aebd0b3cf3c8e48 | refs/heads/main | 2023-08-27T03:58:49.876422 | 2021-11-03T07:22:31 | 2021-11-03T07:22:31 | 397,608,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | import hashlib
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from myhash.models import Myhash
from myhash.forms import MyhashForm
def top(request):
myhash = Myhash.objects.all()
context = {"myhash": myhash}
return render(request, "myhash/top.html", context)
@login_required
def hash_new(request):
if request.method == 'POST':
form = MyhashForm(request.POST)
if form.is_valid():
myhash = form.save(commit=False)
myhash.hsh = hashlib.sha256(myhash.msg.encode()).hexdigest()
myhash.save()
return redirect(top)
else:
form = MyhashForm()
return render(request, "myhash/myhash_new.html", {'form':form}) | [
"fibo.112358132134@gmail.com"
] | fibo.112358132134@gmail.com |
d04d69a830c7cfa1d28c20023309327dad01a9dd | a20f588629b8d27655b3e47ccb517c3e7fdb1c80 | /tests/test_fitsmanager.py | 924d73e9cb0da045ab2461895e4000423d0525c9 | [
"MIT"
] | permissive | lgrcia/prose | 52d114f5efff539bcbf2a418de2ca03ef2c12518 | 1c90ff6ebef671b2e74f30643656d1528ed45190 | refs/heads/main | 2023-08-03T08:50:24.294406 | 2023-07-21T15:25:47 | 2023-07-21T15:25:47 | 259,437,330 | 45 | 8 | MIT | 2023-08-30T00:10:15 | 2020-04-27T19:56:37 | Python | UTF-8 | Python | false | false | 530 | py | from astropy.io.fits import Header
from prose import FitsManager, Image, Telescope
def test_empty_header(tmp_path):
im = Image()
im.writeto(tmp_path / "test.fits")
FitsManager(tmp_path)
def test_custom_fm(tmp_path):
im = Image()
keyword = "FILT"
value = "test_filter"
im.header[keyword] = value
im.writeto(tmp_path / "test.fits")
telescope = Telescope(keyword_filter=keyword)
fm = FitsManager(tmp_path, telescope=telescope)
assert fm.observations().iloc[0]["filter"] == value
| [
"lionel_garcia@live.fr"
] | lionel_garcia@live.fr |
a6a6984813486278c4dc89f5e5201d922504d0eb | fcaa66bb55cb96342fc673e88363337fac95a184 | /MovieApp/migrations/0004_auto_20210610_1948.py | c749cbeeb8d6a5a28e0b123163bcbbfbf191e942 | [] | no_license | rushabhgediya38/MovieTicketBooking | 3f0ab4fbea6011c47968ae0d50a42d8bacf4ffdb | beeb59d671d96418c0959ed072f4ffcf517a1b0c | refs/heads/main | 2023-05-14T05:55:09.176174 | 2021-06-13T15:02:42 | 2021-06-13T15:02:42 | 375,613,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # Generated by Django 3.2.4 on 2021-06-10 14:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MovieApp', '0003_images'),
]
operations = [
migrations.CreateModel(
name='M_lang',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='movie',
name='M_lang',
field=models.ManyToManyField(to='MovieApp.M_lang'),
),
]
| [
"rushabhgediya38@gmail.com"
] | rushabhgediya38@gmail.com |
e2315618261019a8385b4bde0fe248eef4ec9cf6 | 6fc75fec537f4b8211c358649dc63d71940a6f34 | /app/core/admin.py | 0e15594ccfb4a231476e867860aa69c2a6e051ff | [
"MIT"
] | permissive | henryvalbuena/recipe-api | a8d9e0c05112f0651409b48608106efe2e1bf2ab | 60e8a4e4f3c576ef1bd12740083fc0f6c22f4bc3 | refs/heads/master | 2020-07-25T06:41:14.415226 | 2019-09-18T23:33:34 | 2019-09-18T23:33:34 | 208,199,348 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
# Register your models here.
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredient)
admin.site.register(models.Recipe)
| [
"dev.henryvm@gmail.com"
] | dev.henryvm@gmail.com |
a3ec93a7b13f164f0ce903611bb8dd6f8cd55385 | a67961da00957467c6f79e119e3faa6072970c0b | /Assets/Scripts/Weapons/Weapon.py | 0e9a097a027e395d4f8de4b0675e0ca4fd3fc62f | [] | no_license | westtr-gv/tercio | d10300ae5336d99e34334e8991b16ab02078ea9a | bf11775f83f8bf4f1a97e84322afc8b5a88f6572 | refs/heads/master | 2020-12-15T18:11:15.438290 | 2020-02-24T13:08:40 | 2020-02-24T13:08:40 | 235,206,248 | 0 | 0 | null | 2020-02-12T19:28:22 | 2020-01-20T21:54:59 | Python | UTF-8 | Python | false | false | 314 | py | """
This script will be inherited by each type of weapon
(Melee and Ranged), and hold the basic information
that is relevant to any kind of weapon
"""
import pygame
from LeagueEngine import *
class Weapon(DUGameObject):
def __init__(self):
self.damage = 1
self.attackSpeed = 2 | [
"17kisern@gmail.com"
] | 17kisern@gmail.com |
43171e67ff9e36899ce8b565c03eaac899555a02 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /components/policy/tools/PRESUBMIT.py | 8d6bc1a9cc400f3e06219f8a9d4ecd123cddc991 | [
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 847 | py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def RunOtherPresubmit(function_name, input_api, output_api):
# Apply the PRESUBMIT for components/policy/resources to run the syntax check.
presubmit_path = (
input_api.change.RepositoryRoot() + \
'/components/policy/resources/PRESUBMIT.py')
presubmit_content = input_api.ReadFile(presubmit_path)
global_vars = {}
exec (presubmit_content, global_vars)
return global_vars[function_name](input_api, output_api)
def CheckChangeOnUpload(input_api, output_api):
return RunOtherPresubmit("CheckChangeOnUpload", input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return RunOtherPresubmit("CheckChangeOnCommit", input_api, output_api)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
1b00196825631a6f44decdbf3560208ff455bf28 | d354d2da2a6bd47aa0f545a0bf351e982882ea4c | /setup.py | 03aded87728f2e3159fcc416da43efee5d4887cd | [
"MIT"
] | permissive | acodebreaker/pywsd | 27dffb27a0961dbe5d09e71cc4f18e3dba10bfdf | ec8dd4bead6108e04250591d1732afcc9b0fb1bb | refs/heads/master | 2021-01-18T01:40:48.909216 | 2014-11-24T07:25:17 | 2014-11-24T07:25:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD)
#
# Copyright (C) 2014 alvations
# URL:
# For license information, see LICENSE.md
from distutils.core import setup
setup(
name='pywsd',
version='0.1',
packages=['pywsd',],
long_description='Python Implementations of Word Sense Disambiguation (WSD) technologies',
) | [
"alvations@gmail.com"
] | alvations@gmail.com |
04d11322b67cf04250275997e9f96e08b6189c9e | 2e09fd737be7dadf9d89da654ad87b617244c838 | /migrations/versions/ff4b9656bd74_.py | cbe6236df7cb7308693031d497ad323b4fa4dae8 | [] | no_license | ahmadalthamer/fyyur | 6392cf011d6aa984fd7da1233500bd7796fe7695 | 45b0b4a5579397549547626b4fdfdf02b86e0fcb | refs/heads/master | 2022-06-21T09:22:06.661163 | 2020-05-14T23:38:00 | 2020-05-14T23:38:00 | 261,534,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | """empty message
Revision ID: ff4b9656bd74
Revises: fe0e9b829e71
Create Date: 2020-05-12 03:48:11.041139
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'ff4b9656bd74'
down_revision = 'fe0e9b829e71'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Show', sa.Column('date', sa.DateTime(), nullable=False))
op.drop_column('Show', 'start_time')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Show', sa.Column('start_time', postgresql.TIMESTAMP(), autoincrement=False, nullable=False))
op.drop_column('Show', 'date')
# ### end Alembic commands ###
| [
"ahmad.althamer@hotmail.com"
] | ahmad.althamer@hotmail.com |
12743efafbf2db040d55e3bac3802bdfe66aef1f | 835aedfce2b3d84d81861420fed5f801831b3b4f | /Prototype/Packet_Summary_Questions.py | ebb8c2ab5ed5c89ea4ff5cc7325da957c162db53 | [] | no_license | alifa2try/Mirai_Bot_Scanner_Summation_Prototype | 74d6dfbd5123b5727bf13ea350dff93aa0a23ad0 | 380b248c7eb8240c3bb178404261be8e11560116 | refs/heads/master | 2020-07-09T05:12:23.348776 | 2019-01-23T19:36:23 | 2019-01-23T19:36:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | ## Student:
## Charles V. Frank Jr.
## charles.frank@trojans.dsu.edu
##
## University:
## Dakota State University
##
## Date:
## August 15, 2018
## -------------------------------------------------------------------------------------------
## Module:
## Answer_Research_Questions.py
## -------------------------------------------------------------------------------------------
## Purpose:
## This module will answer the dissertation research questions.
##
##
## Bot Scanning dataset from:
## https://www.impactcybertrust.org/dataset_view?idDataset=740
## -------------------------------------------------------------------------------------------
##
## Functions:
##
## can_the_bots_and_potential_new_bot_victims_be_idenitied - answer research questions one and two
##
## is_it_possible_to_monitor_bot_scanning_over_time - answer research question three
##
import BotScannerResults as bsr
##
## First and second Research Questions
##
##
## Main
##
if __name__ == "__main__":
bsr.pcapruntime_summary("2016-06-01", "2017-03-31")
| [
"noreply@github.com"
] | noreply@github.com |
f9cb989c8163b4dc1f891d8bb5dd73e77e8fcd38 | 06d9fc57608b775fcd3944f29ea9ae0d9143a710 | /code/exercises/07_DataStructures/ex_07_07_global.py | 0aaa1f30e4e6c668e757f99e3af864faf21914b2 | [
"Apache-2.0"
] | permissive | chiachang100/learn-to-code-with-python | bbde94d48566e962888286aea902befc8481a464 | fe16115cb3be612d5abd8ffdbd6a14a37d6b4d52 | refs/heads/master | 2022-03-25T11:15:18.820484 | 2019-11-21T01:27:01 | 2019-11-21T01:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | #!/usr/bin/python
# Filename: ex_global.py
x = 50
def func():
global x
print('x is ', x)
x = 2
print('Changed local x to ', x)
func()
print('Value of x is ', x)
| [
"chiachang100@gmail.com"
] | chiachang100@gmail.com |
96f93bf467f72e4c91c462af4e93915455cbeeb7 | 4d19ee7c0d80b6cc94b543c12b1f74bfee8dab4d | /Python1/python1/nested.py | e13db486bf10b2decc630195978ca6540e7651ab | [
"MIT"
] | permissive | ceeblet/OST_PythonCertificationTrack | df8d640b95ca0d1f7d45960f46253b899291c880 | 042e0ce964bc88b3f4132dcbd7e06c5f504eae34 | refs/heads/master | 2016-09-05T19:21:26.887824 | 2015-03-17T14:25:50 | 2015-03-17T14:25:50 | 31,505,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | #!/usr/local/bin/python3
""" Nested exception handling """
def divide(a, b):
""" Return result of dividing a by b """
print("=" * 20)
print("a: ", a, "/ b: ", b)
try:
return a/b
except (ZeroDivisionError, TypeError):
print("Something went wrong!")
raise
if __name__ == "__main__":
for arg1, arg2 in ((1, "string"), (2, 0), (123, 4)):
try:
print(divide(arg1, arg2))
except Exception as msg:
print("Problem: {0}".format(msg))
| [
"cbrown@navinet.net"
] | cbrown@navinet.net |
a73c5d4bb559659bf09d6c522ea1b8a91e58cc67 | db88c14390c0edaf096e1e9f1c02a4a799f8d3a4 | /scripts/mospy_handle.py | 4ff3c46c312a1673c28b6e582644773ea273ceaa | [] | no_license | themiyan/MosfireDRP_Themiyan | 91b85597255f9f74e866617205ee70fe34f89379 | 84e61dcd4b8d926e0d7fc6a4f73edb3f045f6a6a | refs/heads/master | 2021-01-10T02:31:43.602593 | 2017-02-20T23:33:54 | 2017-02-20T23:33:54 | 54,149,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,565 | py | #!/usr/local/bin/python
'''
MOSFIRE 'handle' command:
(c) npk - Dec 2013
'''
import MOSFIRE
import MOSFIRE.IO as IO
import os
import numpy as np
import pyfits as pf
import sys
import glob
if len(sys.argv) < 3:
print '''Usage: mospy handle [target]'''
sys.exit()
files = []
for i in range(1, len(sys.argv)):
files.extend(glob.iglob(sys.argv[i]))
masks = {}
for fname in files:
try:
header = MOSFIRE.IO.readheader(fname)
except IOError, err:
print "Couldn't IO %s" % fname
continue
except:
print "%s is unreadable" % fname
continue
lamps = ""
try:
if header["pwstata7"] == 1:
lamps += header["pwloca7"][0:2]
if header["pwstata8"] == 1:
lamps += header["pwloca8"][0:2]
except KeyError:
lamps = "???"
header.update("lamps", lamps)
try:
if header["aborted"]:
header.update("object", "ABORTED")
except:
print "Missing header file in: %s" % fname
try:
print "%(datafile)12s %(object)40s %(truitime)6.1f s %(maskname)35s %(lamps)3s %(filter)4s %(mgtname)7s" % (header)
except:
try:
print "%(datafile)12s %(object)25s %(truitime)6.1f s %(lamps)3s %(filter)6s %(mgtname)7s" % (header)
except:
print "%s Skipped" % fname
continue
datafile = header['datafile'] + '.fits'
maskname = str(header['maskname'])
target = str(header['targname'])
filter = header['filter']
yr,mn,dy = IO.fname_to_date_tuple(datafile)
date = str(yr)+mn+str(dy)
object = header['object']
itime = header['truitime']
grating_turret = header['mgtname']
if object.find("MIRA") == -1:
mira = False
else:
mira = True
if header['MGTNAME'] is not 'mirror':
mira = False
if maskname.find(" (align)") == -1:
align = False
else:
maskname = maskname.replace(" (align)", "")
align = True
if maskname.find('LONGSLIT') != -1:
print "longslit file"
align = False
if maskname.find('long2pos') != -1:
if grating_turret != 'mirror':
align = False
empty_files = {'Align': [], 'Ne': [], 'Ar': [], 'Flat': [], 'FlatThermal': [],
'Dark': [], 'Aborted': [], 'Image': [], 'MIRA': [], 'Unknown': []}
if maskname not in masks:
masks[maskname] = {date: {filter: empty_files}}
if date not in masks[maskname]:
masks[maskname][date] = {filter: empty_files}
if filter not in masks[maskname][date]:
masks[maskname][date][filter] = empty_files
offset = 'Offset_' + str(header['YOFFSET'])
if (maskname.find('long2pos') != -1 and align is False) or maskname.find('LONGSLIT') != -1:
# if the target name contains a /, replace it with _
target_name = target.replace("/","_")
# if the target name contains a space, remove it
target_name = target.replace(" ","")
# add a posC and posA to the offset names
position = ''
if header['XOFFSET']>0:
position = 'PosC'
if header['XOFFSET']<0:
position = 'PosA'
offset = offset+'_'+str(target_name)
if position is not '':
offset = offset+'_'+position
if mira:
masks[maskname][date][filter]['MIRA'].append(fname)
elif align:
masks[maskname][date][filter]['Align'].append(fname)
elif 'Ne' in header['lamps']:
masks[maskname][date][filter]['Ne'].append(fname)
elif 'Ar' in header['lamps']:
masks[maskname][date][filter]['Ar'].append(fname)
elif header['ABORTED']:
masks[maskname][date][filter]['Aborted'].append(fname)
elif header['FILTER'] == 'Dark':
masks[maskname][date][filter]['Dark'].append(fname)
elif header['FLATSPEC'] == 1:
masks[maskname][date][filter]['Flat'].append(fname)
elif object.find("Flat:") != -1 and ( object.find("lamps off") != -1 or object.find("Flat:Off")) != -1 :
masks[maskname][date][filter]['FlatThermal'].append(fname)
elif header['mgtname'] == 'mirror':
masks[maskname][date][filter]['Image'].append(fname)
elif offset != 0:
print "offset is now:"+str(offset)
if offset in masks[maskname][date][filter]:
masks[maskname][date][filter][offset].append((fname, itime))
print "adding file to existing offset file"
else:
masks[maskname][date][filter][offset] = [(fname, itime)]
print "creating new offset file"
else:
masks[maskname][date][filter]['Unknown'].append(fname)
##### Now handle mask dictionary
def descriptive_blurb():
import getpass, time
uid = getpass.getuser()
date = time.asctime()
return "# Created by '%s' on %s\n" % (uid, date)
# Write out the list of files in filepath
# list = ['/path/to/mYYmmDD_####.fits' ...]
# filepath is absolute path to the file name to write to
#
# Result, is a file called filepath is written with
# fits files in the list.
def handle_file_list(output_file, files):
'''Write a list of paths to MOSFIRE file to output_file.'''
if os.path.isfile(output_file):
print "%s: already exists, skipping" % output_file
pass
print "\t", output_file
f = open(output_file, "w")
f.write(descriptive_blurb())
if len(files) == 0:
f.close()
return
picker = lambda x: x
if len(files[0]) == 2: picker = lambda x: x[0]
# Identify unique path to files:
paths = [os.path.dirname(picker(file)) for file in files]
paths = list(set(paths))
if len(paths) == 1:
path_to_all = paths[0]
converter = os.path.basename
f.write("%s # Abs. path to files [optional]\n" % path_to_all)
else:
converter = lambda x: x
for path in files:
if len(path) == 2: to_write = "%s # %s s\n" % (converter(path[0]), path[1])
else: to_write = "%s\n" % converter(path)
f.write("%s" % to_write)
f.close()
def handle_date_and_filter(mask, date, filter, mask_info):
path = os.path.join(mask,date,filter)
try: os.makedirs(path)
except OSError: pass
for type in mask_info.keys():
handle_file_list(os.path.join(path, type + ".txt"), mask_info[type])
for mask in masks.keys():
for date in masks[mask].keys():
for filter in masks[mask][date].keys():
handle_date_and_filter(mask, date, filter, masks[mask][date][filter])
| [
"themiyananayakkara@gmail.com"
] | themiyananayakkara@gmail.com |
93dffeb1d6be0528a53c287b319ac6dabad50101 | c09fa8f3c25b738b6ac5e7b9f474dff97d4b3e6e | /src/handler/weixin_vaild.py | 960b8949dff01e938a3ec4a68c933a511b6e7781 | [] | no_license | xu-yongliang/wechatDevelop | 084079819560550a9f49cb5d9fc3c70525e36c9a | 025bb40f3f532b451b4fb308c5d9b2bf5b294343 | refs/heads/master | 2021-06-06T23:42:28.915954 | 2016-10-09T06:26:59 | 2016-10-09T06:26:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | #!/usr/bin/python
# encoding:utf-8
from wechat_sdk import WechatConf, WechatBasic
from baseHandler import BaseHandler
from helper.process import ProcessBody
# 集成baseHandle ,实现do_action方法
class WeixinVaild(BaseHandler):
def do_action(self):
# 创建wechat sdk 把消息给封装起来,方便调用验证,以及消息提取和消息回复
conf = WechatConf(
token='xuyung',
appid='wxffc2efc83cd3cac8',
appsecret='6619da74b175a079677e505cbb6fe9dc',
encrypt_mode='normal'
)
wechat = WechatBasic(conf=conf)
signature = self.get_argument('signature', '')
if signature:
timestamp = self.get_argument('timestamp', '')
nonce = self.get_argument('nonce', '')
if wechat.check_signature(signature, timestamp, nonce):
if self.get_argument('echostr', ''):
self.result = self.get_argument('echostr', '')
return
else:
wechat.parse_data(self.request.body)
process = ProcessBody(wechat)
self.result = process.get_result()
else:
self.result = 'error'
else:
wechat.parse_data(self.request.body)
process = ProcessBody(wechat)
self.result = process.get_result()
| [
"xuyongliang@haizhi.com"
] | xuyongliang@haizhi.com |
ae03b937573efae64738241bb1ef7d3cdce0ca08 | c38389bc55bb1021d5af3e234bb509c322d9d574 | /djproject/urls.py | 4fae2b0de8a02299469c9d75d91f3f247e55f0e2 | [] | no_license | kcemenike/djproject | bb22c546776f714be3ea41510cae8c26947428a7 | a6c61977afb30ae1ab0f066fa0e5cd3171922eb2 | refs/heads/master | 2020-04-13T19:14:12.473587 | 2019-01-08T19:58:55 | 2019-01-08T19:58:55 | 163,396,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | """djproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from cd_library.views import current_datetime
urlpatterns = [
path('admin/', admin.site.urls),
path('pastebin/', include('pastebin.urls')),
path('time/', current_datetime)
]
| [
"kc@KCEKALI.lab"
] | kc@KCEKALI.lab |
c7e85775629e15d592aad6dc1b386daa9b3152fc | 524591f2c4f760bc01c12fea3061833847a4ff9a | /arm/opt/ros/kinetic/lib/python2.7/dist-packages/sensor_msgs/msg/_PointCloud2.py | 63495f1677ff10df73ab8c86a47cfff83c7bb456 | [
"BSD-3-Clause"
] | permissive | Roboy/roboy_plexus | 6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e | 1f3039edd24c059459563cb81d194326fe824905 | refs/heads/roboy3 | 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 | BSD-3-Clause | 2022-10-22T13:43:45 | 2017-08-28T16:53:52 | C++ | UTF-8 | Python | false | false | 13,149 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from sensor_msgs/PointCloud2.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import sensor_msgs.msg
import std_msgs.msg
class PointCloud2(genpy.Message):
_md5sum = "1158d486dd51d683ce2f1be655c3c181"
_type = "sensor_msgs/PointCloud2"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# This message holds a collection of N-dimensional points, which may
# contain additional information such as normals, intensity, etc. The
# point data is stored as a binary blob, its layout described by the
# contents of the "fields" array.
# The point cloud data may be organized 2d (image-like) or 1d
# (unordered). Point clouds organized as 2d images may be produced by
# camera depth sensors such as stereo or time-of-flight.
# Time of sensor data acquisition, and the coordinate frame ID (for 3d
# points).
Header header
# 2D structure of the point cloud. If the cloud is unordered, height is
# 1 and width is the length of the point cloud.
uint32 height
uint32 width
# Describes the channels and their layout in the binary data blob.
PointField[] fields
bool is_bigendian # Is this data bigendian?
uint32 point_step # Length of a point in bytes
uint32 row_step # Length of a row in bytes
uint8[] data # Actual point data, size is (row_step*height)
bool is_dense # True if there are no invalid points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: sensor_msgs/PointField
# This message holds the description of one point entry in the
# PointCloud2 message format.
uint8 INT8 = 1
uint8 UINT8 = 2
uint8 INT16 = 3
uint8 UINT16 = 4
uint8 INT32 = 5
uint8 UINT32 = 6
uint8 FLOAT32 = 7
uint8 FLOAT64 = 8
string name # Name of field
uint32 offset # Offset from start of point struct
uint8 datatype # Datatype enumeration, see above
uint32 count # How many elements in the field
"""
__slots__ = ['header','height','width','fields','is_bigendian','point_step','row_step','data','is_dense']
_slot_types = ['std_msgs/Header','uint32','uint32','sensor_msgs/PointField[]','bool','uint32','uint32','uint8[]','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,height,width,fields,is_bigendian,point_step,row_step,data,is_dense
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointCloud2, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.height is None:
self.height = 0
if self.width is None:
self.width = 0
if self.fields is None:
self.fields = []
if self.is_bigendian is None:
self.is_bigendian = False
if self.point_step is None:
self.point_step = 0
if self.row_step is None:
self.row_step = 0
if self.data is None:
self.data = b''
if self.is_dense is None:
self.is_dense = False
else:
self.header = std_msgs.msg.Header()
self.height = 0
self.width = 0
self.fields = []
self.is_bigendian = False
self.point_step = 0
self.row_step = 0
self.data = b''
self.is_dense = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.height, _x.width))
length = len(self.fields)
buff.write(_struct_I.pack(length))
for val1 in self.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))
_x = self.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.is_dense))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.fields is None:
self.fields = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
self.fields.append(val1)
_x = self
start = end
end += 9
(_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])
self.is_bigendian = bool(self.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.data = str[start:end]
start = end
end += 1
(self.is_dense,) = _get_struct_B().unpack(str[start:end])
self.is_dense = bool(self.is_dense)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.height, _x.width))
length = len(self.fields)
buff.write(_struct_I.pack(length))
for val1 in self.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))
_x = self.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.is_dense))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.fields is None:
self.fields = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
self.fields.append(val1)
_x = self
start = end
end += 9
(_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])
self.is_bigendian = bool(self.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.data = str[start:end]
start = end
end += 1
(self.is_dense,) = _get_struct_B().unpack(str[start:end])
self.is_dense = bool(self.is_dense)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_IBI = None
def _get_struct_IBI():
global _struct_IBI
if _struct_IBI is None:
_struct_IBI = struct.Struct("<IBI")
return _struct_IBI
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_B2I = None
def _get_struct_B2I():
global _struct_B2I
if _struct_B2I is None:
_struct_B2I = struct.Struct("<B2I")
return _struct_B2I
| [
"simon.trendel@tum.de"
] | simon.trendel@tum.de |
baf02ed9910963e5ed29164ba414f88415d59e00 | ae5bdb32f5ae61f422e537222601e0fe4f86739c | /py2app_tests/argv_app/setup.py | 432f60efe00ec2498ecebe46d1699b3bb23c06bb | [
"MIT",
"Python-2.0"
] | permissive | acclivity/py2app | beeefa84eaeaa40edfcbed25d4edb500ddd60a61 | a3dafb2c559dc9be78ebe1c44887820f9451806c | refs/heads/master | 2021-03-26T09:11:01.176301 | 2020-03-16T22:25:26 | 2020-03-16T22:25:26 | 247,691,716 | 0 | 0 | NOASSERTION | 2020-03-16T12:04:10 | 2020-03-16T12:04:09 | null | UTF-8 | Python | false | false | 148 | py | from setuptools import setup
setup(
name='BasicApp',
app=['main.py'],
options=dict(py2app=dict(
argv_emulation=True,
)),
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
44b0f8140acf2ee96f151344629234de62c648f2 | 3e5150447a2c90c26354500f1df9660ef35c990b | /classes/str/.rstrip() | 2409992c6e0c6d0f2ce555b6bc34fe5f619190e3 | [] | no_license | kilirobbs/python-fiddle | 8d6417ebff9d6530e713b6724f8416da86c24c65 | 9c2f320bd2391433288cd4971c2993f1dd5ff464 | refs/heads/master | 2016-09-11T03:56:39.808358 | 2013-03-19T19:26:19 | 2013-03-19T19:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | #!/usr/bin/env python
print "1 ".rstrip()
print "1\n".rstrip() | [
"cancerhermit@gmail.com"
] | cancerhermit@gmail.com | |
d4dd8d3af20c272a50ffd0226634bd7465a6f2ee | 6ff12f51b9a1b9f751cec3df21813803d2455f1e | /tools/link_graph_generator.py | cf503594caed97072da3912f1fad3b5706416592 | [] | no_license | prdx/PoliteScrapper | 5d40089bb399c3d08fb848355b73cdc530c8327c | e84a49fa197e484361d2e69421b32fd4240c884c | refs/heads/master | 2020-03-23T18:36:21.340544 | 2018-08-01T21:48:14 | 2018-08-01T21:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | from bs4 import BeautifulSoup
import os
import pickle
files = [f for f in os.listdir('.') if os.path.isfile(f) and f.endswith(".xml")]
outlinks = {}
inlinks = {}
def generate_outlink_file():
print("Generating outlinks file ...")
# Generate outlinks
for xml in files:
try:
with open(xml, "rb") as f:
soup = BeautifulSoup(f, "lxml")
url = soup.id.value.text
outlinks[url] = soup.outlinks.value.text.split(",")
except Exception as e:
print("Error processing: " + xml)
print(e)
os.rename(xml, xml + ".fail")
# Dump the outlinks
with open("../output/outlinks.p", "wb") as out:
pickle.dump(outlinks, out, protocol=pickle.HIGHEST_PROTOCOL)
print("Done generating outlinks file ...")
print("Outlinks size: " + str(len(outlinks)) + " urls")
def generate_inlink_file():
print("Generating inlinks file ...")
# Generate inlinks
for key in outlinks:
for url in outlinks[key]:
try:
inlinks[url].append(key)
except KeyError:
inlinks[url] = [key]
except Exception as e:
print("Error processing: " + key)
print(e)
# Dump the inlinks
with open("../output/inlinks.p", "wb") as out:
pickle.dump(inlinks, out, protocol=pickle.HIGHEST_PROTOCOL)
print("Inlinks size: " + str(len(inlinks)) + " urls")
print("Done inlinks file ...")
generate_outlink_file()
generate_inlink_file()
| [
"astungkara.project@gmail.com"
] | astungkara.project@gmail.com |
838027b05c4975fc5f55b86184077144347a1bad | 4f21e3301c1a8699745528177b3210b4f1a1f1d5 | /week10/project2/library/settings.py | 4dbdb1bf1faf2c3a9ac45fabe288d8e6aa05c0ca | [] | no_license | ndina/webdev2019 | 7fd0250b662b378d55e24e931f82d0b2538d63a5 | eae4808e2f0bfcdd5a366fd4692c041b96faaa0b | refs/heads/master | 2020-05-03T22:05:12.392913 | 2019-05-04T02:46:56 | 2019-05-04T02:46:56 | 167,550,783 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | """
Django settings for library project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u2iir6bmw(y%pu*23y%sm1u#8y#o7_qchko#=r*_rtqy_-ge+e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'library.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"dbom12360@gmail.com"
] | dbom12360@gmail.com |
580c4532e3607bbc2d2b818f089db145db04139b | 8ef1439e1a44ae3ac83498facb5dcb48f48e13da | /venv/Scripts/pip3-script.py | f2c54b69d77f1a73551beee61141d4f238898c5a | [] | no_license | myfoxsay/Spiders | 1c1ad138d83e0d0257b0f73bfa250207227e4475 | c50a7b49f65d1e19ef8a2064fb6162fee454442b | refs/heads/master | 2021-06-28T12:24:42.410054 | 2021-01-07T07:00:54 | 2021-01-07T07:00:54 | 196,186,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!C:\Users\ym10266\PycharmProjects\untitled1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"myfoxsay@163.com"
] | myfoxsay@163.com |
855520507be67c41ec69ec8579c87dda07b2ebf2 | eead3da885b3a36c5704e314c2610eaa64b21528 | /去除链表重复节点.py | 327faa8543b6a1dbba150035ea83c101f4328a0e | [] | no_license | Rango94/leetcode | 8ecd708a5ad552bd184b432f0ec0647df8de2304 | 11696cb03aa81dbea95b4c61aff08f70e4f1a81f | refs/heads/master | 2020-03-23T18:59:45.591397 | 2018-08-27T01:50:21 | 2018-08-27T01:50:21 | 141,947,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def print(self):
p=self
while p!=None:
print(p.val)
p=p.next
class Solution:
def deleteDuplication(self, pHead):
if pHead==None:
return None
out_pHead=None
out=None
cur=pHead
nex=pHead.next
while nex!=None:
while nex.val==cur.val:
cur=cur.next
nex=nex.next
if out ==None:
out_pHead=ListNode(cur.val)
out=out_pHead
else:
print(cur.val)
out.nex=ListNode(cur.val)
out=out.nex
cur=cur.next
nex=nex.next
return out_pHead
a1=ListNode(1)
a2=ListNode(2)
a3=ListNode(3)
a4=ListNode(3)
a5=ListNode(4)
a6=ListNode(4)
a7=ListNode(5)
a1.next=a2
a2.next=a3
a3.next=a4
a4.next=a5
a5.next=a6
a6.next=a7
s=Solution()
s.deleteDuplication(a1).print()
| [
"wangnz1994@163.com"
] | wangnz1994@163.com |
f52d578b0c5ce04a6f71e3ad5469a8314254b093 | ce12ef4b16535da475f14ec0d974750fcc7c2377 | /ordersapp/views.py | fa7daf3fb894319be2061639cffd12e9169b495a | [] | no_license | IlyaSaveliev/geekshop | 6336ea431b458621a36f90d6ab9f91ce7e1c4d91 | 51d714ce105aeb0ce3102d2e845fb53aa4581eca | refs/heads/Master | 2023-04-16T05:40:44.812694 | 2021-04-25T11:43:22 | 2021-04-25T11:43:22 | 343,256,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | from django.db import transaction
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
from django.forms import inlineformset_factory
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy, reverse
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView
from ordersapp.models import Order
from ordersapp.forms import OrderForm, OrderItemForm, OrderItem
from basketapp.models import Basket
from mainapp.models import Product
class OrderListView(ListView):
model = Order
def get_queryset(self):
return Order.objects.filter(user=self.request.user)
class OrderCreateView(CreateView):
model = Order
# form_class = OrderForm
fields = []
success_url = reverse_lazy('order:orders_list')
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
OrderFormSet = inlineformset_factory(Order, OrderItem, form=OrderItemForm, extra=1)
if self.request.POST:
formset = OrderFormSet(self.request.POST)
else:
basket_items = Basket.objects.filter(user=self.request.user)
if len(basket_items):
OrderFormSet = inlineformset_factory(Order, OrderItem, form=OrderItemForm, extra=len(basket_items))
formset = OrderFormSet()
for num, form in enumerate(formset.forms):
form.initial['product'] = basket_items[num].product
form.initial['quantity'] = basket_items[num].quantity
form.initial['price'] = basket_items[num].product.price
# basket_items.delete()
else:
formset = OrderFormSet()
data['orderitems'] = formset
return data
def form_valid(self, form):
context = self.get_context_data()
orderitems = context['orderitems']
with transaction.atomic():
Basket.get_items(self.request.user).delete()
form.instance.user = self.request.user
self.object = form.save()
if orderitems.is_valid():
orderitems.instance = self.object
orderitems.save()
if self.object.get_total_cost() == 0:
self.object.delete()
return super().form_valid(form)
class OrderDetailView(DetailView):
model = Order
class OrderUpdateView(UpdateView):
model = Order
# form_class = OrderForm
fields = []
success_url = reverse_lazy('order:orders_list')
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
OrderFormSet = inlineformset_factory(Order, OrderItem, form=OrderItemForm, extra=1)
if self.request.POST:
formset = OrderFormSet(self.request.POST, instance=self.object)
else:
formset = OrderFormSet(instance=self.object)
for form in formset.forms:
if form.instance.pk:
form.initial['price'] = form.instance.product.price
data['orderitems'] = formset
return data
def form_valid(self, form):
context = self.get_context_data()
orderitems = context['orderitems']
with transaction.atomic():
self.object = form.save()
if orderitems.is_valid():
orderitems.instance = self.object
orderitems.save()
if self.object.get_total_cost() == 0:
self.object.delete()
return super().form_valid(form)
class OrderDeleteView(DeleteView):
model = Order
success_url = reverse_lazy('order:orders_list')
def order_forming_complete(request, pk):
order = get_object_or_404(Order, pk=pk)
order.status = Order.SENT_TO_PROCEED
order.save()
return HttpResponseRedirect(reverse('order:orders_list'))
@receiver(pre_save, sender=Basket)
@receiver(pre_save, sender=OrderItem)
def product_quantity_update_save(sender, update_fields, instance, **kwargs):
if update_fields is 'quantity' or 'product':
if instance.pk:
instance.product.quantity -= instance.quantity - sender.get_item(instance.pk).quantity
else:
instance.product.quantity -= instance.quantity
instance.product.save()
@receiver(pre_delete, sender=Basket)
@receiver(pre_delete, sender=OrderItem)
def product_quantity_update_delete(sender, instance, **kwargs):
instance.product.quantity += instance.quantity
instance.product.save()
def get_product_price(request, pk):
if request.is_ajax():
product = Product.objects.filter(pk=int(pk)).first()
if product:
return JsonResponse({'price': product.price})
else:
return JsonResponse({'price': 0})
| [
"parazit.ololo@gmail.com"
] | parazit.ololo@gmail.com |
d0c3fb8162dec8bff5a63f4bba4da756f7c890e6 | 1c45f9b803bca4b022e53a6128c7937286d397a0 | /pythontest/0110/functionex1.py | 7bbac6857e840f4f5dcce3f4a45b0f9d1ba7f8d9 | [] | no_license | yangyohan123/python | 2860e185b581e1b0d16fbdbbd69932154d004e06 | fc643d54542b15d32bb1319f223412694929b88f | refs/heads/master | 2021-01-01T14:18:59.395282 | 2020-02-09T17:34:40 | 2020-02-09T17:34:40 | 239,317,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | '''
Created on 2020. 1. 10.
@author: GDJ-19
함수 사용하기
'''
def coffee_machine(button):
print()
print('#1 뜨거운 물 준비')
print('#2 종이컵 준비')
if button==1 :
print("#3 보통커피를 탄다")
elif button == 2:
print("#3 설탕커피를 탄다")
elif button == 3:
print("#3 블랙커피를 탄다")
else :
print("#3 커피 종류 없음")
print("#4 물을 붓는다.")
coffee = int(input("커피 종류를 입력하세요(1:보통,2:설탕,3:블랙)"))
coffee_machine(coffee) | [
"yangyohan123@daum.net"
] | yangyohan123@daum.net |
364ca9a98f0bed65c764fca9306fca1df99f3df8 | 5c12a874f7a4c01d233b028bb2332214d7d4919f | /activity/migrations/0002_auto_20191107_1203.py | 732eae609155059d6742805f68f7f80a587bd454 | [] | no_license | cocogels/marketing | 9cc9535ac04ac008c9882ed5087a33eb201fbeeb | 41e3d4959cf2c07a1d8f00821e5a4ad829419ba4 | refs/heads/master | 2020-07-06T06:09:13.356059 | 2019-11-08T09:04:08 | 2019-11-08T09:04:08 | 202,916,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,563 | py | # Generated by Django 2.2.1 on 2019-11-07 04:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('activity', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='activity',
name='approver',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_approver', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='activity',
name='budget',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='activity',
name='flyer_ihe',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='activity',
name='flyer_nc',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='activity',
name='flyer_shs',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='activity',
name='standy',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='activity',
name='tarpaulin',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='activity',
name='end_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='activity',
name='start_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='activity',
name='status',
field=django_fsm.FSMIntegerField(choices=[(0, 'created'), (1, 'pending 1 '), (2, 'apppoved 1'), (3, 'revised 1'), (4, 'pending 2'), (5, 'apppoved 2'), (6, 'revised 2'), (7, 'pending 3'), (8, 'apppoved 3'), (9, 'revised 3'), (10, 'rejected'), (11, 'cancelled')], default=0, protected=True),
),
migrations.AlterField(
model_name='activity',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_request', to=settings.AUTH_USER_MODEL),
),
]
| [
"laracasangelo008@gmail.com"
] | laracasangelo008@gmail.com |
0a852681577cdebfbc2733b9e17da56aaebb4186 | b0d898be0456049180333dec8c216f1752b0ddf3 | /0215_5.py | f4aab87ee519f122997ae11fe5fe5059f0fb0376 | [] | no_license | mason890/python | c2517a034acd13d4b3e2c9d2b6f2758d25b3780e | 473f2032d4ee90cb9757adebfd2b45c7e6a430d6 | refs/heads/master | 2020-12-08T11:33:45.080873 | 2020-02-22T07:49:30 | 2020-02-22T07:49:30 | 232,972,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | #추상 클래스
#추상 메소드를 가진 클래스
#주로 최상위 부모클래스가 해당
#객체를 만들수 없다.
class Animal :
def __init__(self, name):
self.name = name
def sound(self):
print('소리가 납니다.')
class Dog(Animal) :
def __init__(self,name):
super().__init__(name)
def sound(self):
print('멍멍')
def smell(self):
print('냄새 맡는다.')
class Cat(Animal) :
def __init__(self,name):
super().__init__(name)
def sound(self):
print('야웅')
def jump(self):
print('점프한다.')
class Cow(Animal) :
def __init__(self,name):
super().__init__(name)
def sound(self):
print('음메')
def milk(self):
print('우유만든다.')
class Mouse(Animal) :
def __init__(self,name):
super().__init__(name)
def sound(self):
print('찍찍')
def smell(self):
print('달린다.')
stl = []
# from abc import *
# class animal(metaclass=ABCMeta):
# def __init__(self,name):
# self.name = name
# @abstractmethod
# def sound(self):
# pass | [
"noreply@github.com"
] | noreply@github.com |
83ae553fb14325ec70a9a373c9e33ad74c4c1844 | e997e95cdda60b898d2da30a4ce2f27119532a3f | /main.py | f3f6430cc0e3e25deaf00ab80bc65d942573a046 | [] | no_license | dperezc21/guane-intern-fastapi | 23ea8c9f540a060d03bc34cd8b68a8db58477e80 | 42d2b1b2cf4881d811dc24a9d00490bbec091967 | refs/heads/master | 2023-05-08T03:19:34.009368 | 2021-05-28T02:40:56 | 2021-05-28T02:40:56 | 370,084,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,299 | py |
from fastapi import FastAPI
from database import db
from crud.crud_dog import *
from crud.crud_user import *
from schemas import ModelDogs, ModelUser
from auth import *
from fastapi import Depends
from fastapi.security import OAuth2PasswordRequestForm
from models import *
app = FastAPI()
@app.on_event('startup')
def startup():
if db.is_closed():
db.connect()
db.create_tables([Dog, User])
@app.on_event('shutdown')
def shutdown():
if not db.is_closed():
db.close()
@app.post("/api/users")
async def create_users(id:str, name:str, last_name:str, email:str):
return create_user(id, name, last_name, email)
@app.get("/api/users/{id}")
async def get_user_id(id:str):
user=get_user(id)
if not user:
return "Usuario no encontrado"
else:
return user
@app.delete("/api/users/{id}")
async def delete_users(id:str):
return delete_user(id)
@app.put("/api/users")
async def update_user(user:ModelUser):
return update_user_(user)
@app.post("/api/dogs")
async def create_dogs(id:str, name:str, is_adopted:bool, id_user:str, form_data:OAuth2PasswordRequestForm = Depends()):
user = getUser(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail ="Usuario o contraseña incorrecto",
headers = {"WWW-Authenticate": "Bearer"},
)
token = create_access_token(
data={"password" : user["password"],
"sub" : user["username"]}
)
if get_current_user(token):
return create_dog(id_user, id,name, get_dog_image_api(), is_adopted)
@app.get("/api/dogs")
async def dogs():
result = get_list_dogs()
return list(result)
@app.get("/api/dogs/is_adopted")
async def dog_is_adopted():
adopted = get_dog_is_adopted()
if not adopted:
return "no existe"
return list(adopted)
@app.get("/api/dogs/{name}")
async def get_dog_for_name(name:str):
dog = get_dog(name)
if not dog:
return "No existe registro"
return list(dog)
@app.delete("/api/dogs/{name}")
async def eliminar_dog(name:str):
return delete_dog(name)
@app.put("/api/dogs")
async def update_dog(dog:ModelDogs):
return update_dog(dog)
| [
"davierperez11.2@gmail.com"
] | davierperez11.2@gmail.com |
9e2a924be8d259560a72db6497c863403bfb61b4 | 8cb68e9e7b920c7795f23b8fb11d27de77fe6888 | /tox/tests/test_z_cmdline.py | b28888066c205307d439a32cfbe3bcf4f3845f85 | [
"MIT",
"Apache-2.0"
] | permissive | maxalbert/circleci-python-sandbox | fdc0d8acd4e80793b68eb4cc295d60ce32ee9fcc | a4c07c809e05e7f34dfbf7a449c60070d1856e6a | refs/heads/master | 2021-01-17T21:56:58.803742 | 2016-02-19T19:58:53 | 2016-02-19T20:08:20 | 52,112,609 | 0 | 0 | null | 2016-02-19T19:53:31 | 2016-02-19T19:53:31 | null | UTF-8 | Python | false | false | 20,991 | py | import tox
import py
import pytest
from tox._pytestplugin import ReportExpectMock
try:
import json
except ImportError:
import simplejson as json
pytest_plugins = "pytester"
from tox.session import Session
from tox.config import parseconfig
def test_report_protocol(newconfig):
config = newconfig([], """
[testenv:mypython]
deps=xy
""")
class Popen:
def __init__(self, *args, **kwargs):
pass
def communicate(self):
return "", ""
def wait(self):
pass
session = Session(config, popen=Popen,
Report=ReportExpectMock)
report = session.report
report.expect("using")
venv = session.getvenv("mypython")
venv.update()
report.expect("logpopen")
def test__resolve_pkg(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
spec = distshare.join("pkg123-*")
py.test.raises(tox.exception.MissingDirectory,
'mocksession._resolve_pkg(spec)')
distshare.ensure(dir=1)
py.test.raises(tox.exception.MissingDependency,
'mocksession._resolve_pkg(spec)')
distshare.ensure("pkg123-1.3.5.zip")
p = distshare.ensure("pkg123-1.4.5.zip")
mocksession.report.clear()
result = mocksession._resolve_pkg(spec)
assert result == p
mocksession.report.expect("info", "determin*pkg123*")
distshare.ensure("pkg123-1.4.7dev.zip")
mocksession._clearmocks()
result = mocksession._resolve_pkg(spec)
mocksession.report.expect("warning", "*1.4.7*")
assert result == p
mocksession._clearmocks()
distshare.ensure("pkg123-1.4.5a1.tar.gz")
result = mocksession._resolve_pkg(spec)
assert result == p
def test__resolve_pkg_doubledash(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
p = distshare.ensure("pkg-mine-1.3.0.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
distshare.ensure("pkg-mine-1.3.0a1.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
class TestSession:
def test_make_sdist(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
'''
})
config = parseconfig([])
session = Session(config)
sdist = session.get_installpkg_path()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist2 = session.get_installpkg_path()
assert sdist2 == sdist
sdist.write("hello")
assert sdist.stat().size < 10
sdist_new = Session(config).get_installpkg_path()
assert sdist_new == sdist
assert sdist_new.stat().size > 10
def test_make_sdist_distshare(self, tmpdir, initproj):
distshare = tmpdir.join("distshare")
initproj("example123-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
distshare=%s
''' % distshare
})
config = parseconfig([])
session = Session(config)
sdist = session.get_installpkg_path()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist_share = config.distshare.join(sdist.basename)
assert sdist_share.check()
assert sdist_share.read("rb") == sdist.read("rb"), (sdist_share, sdist)
def test_log_pcall(self, mocksession):
mocksession.config.logdir.ensure(dir=1)
assert not mocksession.config.logdir.listdir()
action = mocksession.newaction(None, "something")
action.popen(["echo", ])
match = mocksession.report.getnext("logpopen")
assert match[1].outpath.relto(mocksession.config.logdir)
assert match[1].shell is False
def test_summary_status(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
envs = session.venvlist
assert len(envs) == 2
env1, env2 = envs
env1.status = "FAIL XYZ"
assert env1.status
env2.status = 0
assert not env2.status
session._summary()
out, err = capfd.readouterr()
exp = "%s: FAIL XYZ" % env1.envconfig.envname
assert exp in out
exp = "%s: commands succeeded" % env2.envconfig.envname
assert exp in out
def test_getvenv(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
venv1 = session.getvenv("hello")
venv2 = session.getvenv("hello")
assert venv1 is venv2
venv1 = session.getvenv("world")
venv2 = session.getvenv("world")
assert venv1 is venv2
pytest.raises(LookupError, lambda: session.getvenv("qwe"))
# not sure we want this option ATM
def XXX_test_package(cmd, initproj):
initproj("myproj-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'MANIFEST.in': """
include doc
include myproj
""",
'tox.ini': ''
})
result = cmd.run("tox", "package")
assert not result.ret
result.stdout.fnmatch_lines([
"*created sdist package at*",
])
def test_minversion(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
minversion = 6.0
'''
})
result = cmd.run("tox", "-v")
result.stdout.fnmatch_lines([
"*ERROR*tox version is * required is at least 6.0*"
])
assert result.ret
def test_run_custom_install_command_error(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tox.ini': '''
[testenv]
install_command=./tox.ini {opts} {packages}
'''
})
result = cmd.run("tox")
result.stdout.fnmatch_lines([
"ERROR: invocation failed (errno *), args: ['*/tox.ini*",
])
assert result.ret
def test_unknown_interpreter_and_env(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*InterpreterNotFound*xyz_unknown_interpreter*",
])
result = cmd.run("tox", "-exyz")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*unknown*",
])
def test_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*InterpreterNotFound*xyz_unknown_interpreter*",
])
def test_skip_platform_mismatch(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv]
changedir=tests
platform=x123
'''
})
result = cmd.run("tox")
assert not result.ret
result.stdout.fnmatch_lines("""
SKIPPED*platform mismatch*
""")
def test_skip_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd.run("tox", "--skip-missing-interpreters")
assert not result.ret
result.stdout.fnmatch_lines([
"*SKIPPED*InterpreterNotFound*xyz_unknown_interpreter*",
])
def test_unknown_dep(cmd, initproj):
initproj("dep123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv]
deps=qweqwe123
changedir=tests
'''
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*could not install*qweqwe123*",
])
def test_unknown_environment(cmd, initproj):
initproj("env123-0.7", filedefs={
'tox.ini': ''
})
result = cmd.run("tox", "-e", "qpwoei")
assert result.ret
result.stdout.fnmatch_lines([
"*ERROR*unknown*environment*qpwoei*",
])
def test_skip_sdist(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
""",
'tox.ini': '''
[tox]
skipsdist=True
[testenv]
commands=python -c "print('done')"
'''
})
result = cmd.run("tox", )
assert result.ret == 0
def test_minimal_setup_py_empty(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
""",
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*empty*",
])
def test_minimal_setup_py_comment_only(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """\n# some comment
""",
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*empty*",
])
def test_minimal_setup_py_non_functional(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
import sys
""",
'tox.ini': ''
})
result = cmd.run("tox", )
assert result.ret == 1
result.stdout.fnmatch_lines([
"*ERROR*check setup.py*",
])
def test_sdist_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
""",
'tox.ini': '',
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*FAIL*could not package project*",
])
def test_package_install_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
from setuptools import setup
setup(
name='pkg123',
description='pkg123 project',
version='0.7',
license='MIT',
platforms=['unix', 'win32'],
packages=['pkg123',],
install_requires=['qweqwe123'],
)
""",
'tox.ini': '',
})
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*InvocationError*",
])
class TestToxRun:
@pytest.fixture
def example123(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {
'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
changedir=tests
commands= py.test --basetemp={envtmpdir} \
--junitxml=junit-{envname}.xml
deps=pytest
'''
})
def test_toxuone_env(self, cmd, example123):
result = cmd.run("tox")
assert not result.ret
result.stdout.fnmatch_lines([
"*junit-python.xml*",
"*1 passed*",
])
result = cmd.run("tox", "-epython", )
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
def test_different_config_cwd(self, cmd, example123, monkeypatch):
# see that things work with a different CWD
monkeypatch.chdir(cmd.tmpdir)
result = cmd.run("tox", "-c", "example123/tox.ini")
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
def test_json(self, cmd, example123):
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
jsonpath = cmd.tmpdir.join("res.json")
result = cmd.run("tox", "--result-json", jsonpath)
assert result.ret == 1
data = json.load(jsonpath.open("r"))
verify_json_report_format(data)
result.stdout.fnmatch_lines([
"*1 failed*",
"*summary*",
"*python: *failed*",
])
def test_develop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd.run("tox", "-vv", "--develop")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
def test_usedevelop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv]
usedevelop=True
"""})
result = cmd.run("tox", "-vv")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
def test_usedevelop_mixed(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv:devenv]
usedevelop=True
[testenv:nondev]
usedevelop=False
"""})
# running only 'devenv' should not do sdist
result = cmd.run("tox", "-vv", "-e", "devenv")
assert not result.ret
assert "sdist-make" not in result.stdout.str()
# running all envs should do sdist
result = cmd.run("tox", "-vv")
assert not result.ret
assert "sdist-make" in result.stdout.str()
def test_test_usedevelop(cmd, initproj):
initproj("example123-0.5", filedefs={
'tests': {
'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
usedevelop=True
changedir=tests
commands=
py.test --basetemp={envtmpdir} --junitxml=junit-{envname}.xml []
deps=pytest
'''
})
result = cmd.run("tox", "-v")
assert not result.ret
result.stdout.fnmatch_lines([
"*junit-python.xml*",
"*1 passed*",
])
assert "sdist-make" not in result.stdout.str()
result = cmd.run("tox", "-epython", )
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
# see that things work with a different CWD
old = cmd.tmpdir.chdir()
result = cmd.run("tox", "-c", "example123/tox.ini")
assert not result.ret
result.stdout.fnmatch_lines([
"*1 passed*",
"*summary*",
"*python: commands succeeded"
])
old.chdir()
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
result = cmd.run("tox", )
assert result.ret
result.stdout.fnmatch_lines([
"*1 failed*",
"*summary*",
"*python: *failed*",
])
def test_test_piphelp(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv]
commands=pip -h
[testenv:py26]
basepython=python
[testenv:py27]
basepython=python
"""})
result = cmd.run("tox")
assert not result.ret
def test_notest(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv:py26]
basepython=python
"""})
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*summary*",
"*py26*skipped tests*",
])
result = cmd.run("tox", "-v", "--notest", "-epy26")
assert not result.ret
result.stdout.fnmatch_lines([
"*py26*reusing*",
])
def test_PYC(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("PYTHONDOWNWRITEBYTECODE", 1)
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*create*",
])
def test_env_VIRTUALENV_PYTHON(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("VIRTUALENV_PYTHON", '/FOO')
result = cmd.run("tox", "-v", "--notest")
assert not result.ret, result.stdout.lines
result.stdout.fnmatch_lines([
"*create*",
])
def test_sdistonly(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd.run("tox", "-v", "--sdistonly")
assert not result.ret
result.stdout.fnmatch_lines([
"*sdist-make*setup.py*",
])
assert "-mvirtualenv" not in result.stdout.str()
def test_separate_sdist_no_sdistfile(cmd, initproj):
distshare = cmd.tmpdir.join("distshare")
initproj(("pkg123-foo", "0.7"), filedefs={
'tox.ini': """
[tox]
distshare=%s
""" % distshare
})
result = cmd.run("tox", "--sdistonly")
assert not result.ret
l = distshare.listdir()
assert len(l) == 1
sdistfile = l[0]
assert 'pkg123-foo-0.7.zip' in str(sdistfile)
def test_separate_sdist(cmd, initproj):
distshare = cmd.tmpdir.join("distshare")
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-0.7.zip
""" % distshare
})
result = cmd.run("tox", "--sdistonly")
assert not result.ret
l = distshare.listdir()
assert len(l) == 1
sdistfile = l[0]
result = cmd.run("tox", "-v", "--notest")
assert not result.ret
result.stdout.fnmatch_lines([
"*inst*%s*" % sdistfile,
])
def test_sdist_latest(tmpdir, newconfig):
distshare = tmpdir.join("distshare")
config = newconfig([], """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-*
""" % distshare)
p = distshare.ensure("pkg123-1.4.5.zip")
distshare.ensure("pkg123-1.4.5a1.zip")
session = Session(config)
sdist_path = session.get_installpkg_path()
assert sdist_path == p
def test_installpkg(tmpdir, newconfig):
p = tmpdir.ensure("pkg123-1.0.zip")
config = newconfig(["--installpkg=%s" % p], "")
session = Session(config)
sdist_path = session.get_installpkg_path()
assert sdist_path == p
@pytest.mark.xfail("sys.platform == 'win32' and sys.version_info < (2,6)",
reason="test needs better impl")
def test_envsitepackagesdir(cmd, initproj):
initproj("pkg512-0.0.5", filedefs={
'tox.ini': """
[testenv]
commands=
python -c "print(r'X:{envsitepackagesdir}')"
"""})
result = cmd.run("tox")
assert result.ret == 0
result.stdout.fnmatch_lines("""
X:*tox*site-packages*
""")
def verify_json_report_format(data, testenvs=True):
assert data["reportversion"] == "1"
assert data["toxversion"] == tox.__version__
if testenvs:
for envname, envdata in data["testenvs"].items():
for commandtype in ("setup", "test"):
if commandtype not in envdata:
continue
for command in envdata[commandtype]:
assert command["output"]
assert command["retcode"]
if envname != "GLOB":
assert isinstance(envdata["installed_packages"], list)
pyinfo = envdata["python"]
assert isinstance(pyinfo["version_info"], list)
assert pyinfo["version"]
assert pyinfo["executable"]
| [
"smlstvnh@gmail.com"
] | smlstvnh@gmail.com |
bfbdafd3b48ff0f5e1b4d217edb4c347cc630969 | d7ac596181d48395ee16f2ba74ce3ed6a959c369 | /test.py | 1dbfbb57c7b273b125e6cd0042f6917878057716 | [] | no_license | bloggermark/Test-Repo | 5eba24e84d73bceaaf16410ea857d0cf2e1284e2 | 87afa26eadd55f5378bd53e192a86dd10969c20a | refs/heads/main | 2023-06-12T07:27:50.985894 | 2021-07-10T16:41:03 | 2021-07-10T16:41:03 | 384,736,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | print('hello world!')
print('test') | [
"bloggermark@sbcglobal.net"
] | bloggermark@sbcglobal.net |
d9c577b60a03d5b7cc46cfe1a2e2d0040acd95f9 | 7d4273077ca64cc1311b2d9fe9148f192cc91ce9 | /V2prizeTheBestUsers.py | ba2025cc0aa5a9497cd290a17d04b9d458df978b | [] | no_license | mateuszgajewski-python/PyPi | a7016cf6723b41df0d67e2afa15b914698811e45 | 057a16dda7fc79a317caac0c9fac44e3ef98436f | refs/heads/master | 2023-03-05T06:47:28.477825 | 2021-02-14T13:53:21 | 2021-02-14T13:53:21 | 337,395,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | import requests
import json
r = requests.get("https://jsonplaceholder.typicode.com/todos")
downloadList = r.json()
def how_many_true(list_, key, keyWithBoolean):
dictionary ={}
for record in list_:
if record[keyWithBoolean] == True:
try:
dictionary[record[key]] += 1
except:
dictionary[record[key]] = 1
return dictionary
userIdWithPoints = how_many_true(downloadList, "userId", "completed")
def keys_with_top_values(dictionary):
list_ = []
maxValue = max(dictionary.values())
for key, value in dictionary.items():
if(value == maxValue):
list_.append(key)
return list_
userIdWithPoints = how_many_true(downloadList, "userId", "completed")
theBestUsers = keys_with_top_values(userIdWithPoints)
print(theBestUsers)
def tail_of_address_ampersand(myList, key = "id"):
i = 1
wynik = ""
for x in myList:
if(len(myList) == i):
wynik += key + "=" + str(x)
else:
wynik += key + "=" + str(x) + "&"
i += 1
return wynik
#sposób 2
for theBestUser in theBestUsers:
r = requests.get("https://jsonplaceholder.typicode.com/users/" + str(theBestUser))
user = r.json()
print("Ciasteczko dostaje użytkownik o imieniu", user["name"])
#sposób 3
r = requests.get("https://jsonplaceholder.typicode.com/users?" + tail_of_address_ampersand(theBestUsers))
users = r.json()
for user in users:
print ("Ciasteczko dostaje użytkownik o imieniu", user["name"])
| [
"mateuszgajewski@gmail.com"
] | mateuszgajewski@gmail.com |
e3d781a3f7d2d498cb5c6001e32a838461a0daa6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_054/ch2_2020_09_16_11_34_55_516156.py | cb769d528b4f741eaac3317840c0153eb23c362a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | # Está função é para calcular a velocidade média
def velocidade_media (d,t):
velocidade_media = d / t
return velocidade_média
| [
"you@example.com"
] | you@example.com |
d477f39fa7d688c996c72f8e5f53aa273d7ae6d0 | 916f8962b18272b62fdccd5b2d9ec4b2b2d4d533 | /Thalamus_wmSCrsFC/nosmooth_adding_YBK_postMelodic_znorm_glm.py | f873477c1245feee3ac8ed36ab7059fef434132f | [] | no_license | fahaxiki1234/ThalamoCortical_YBK | 284b84f67fdcc525e3b0c65cc13f0068463fce5d | 63fee7dea60ba94d04ef9f55cd2b220bb63836a6 | refs/heads/master | 2023-03-16T04:22:02.865421 | 2020-05-22T04:21:46 | 2020-05-22T04:21:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,416 | py | import re
import sys
import os
from os.path import join, basename, isfile, isdir
import argparse
import nibabel as nb
import numpy as np
from scipy import stats
def zStats (side, hemi,downsample, subject):
melodicDir='tica_results/mICA_HCvsFEP_{side}{downsample}_nosmooth//dim0'.format(side=side, downsample=downsample)
mmthreshDir = join(melodicDir, 'stats')
mmthreshImgss = [join(mmthreshDir, x) for x in os.listdir(mmthreshDir) if x.startswith('thresh_zstat')]
for mmthreshImgLoc in mmthreshImgss:
mmthreshImgs = basename(mmthreshImgLoc)
mmthreshImg = mmthreshImgs.split('.')[0]
glm_dir = join(melodicDir, 'glm_out')
stage_1_output = join(glm_dir, 'znorm_{subject}_stage1_{mmthreshImg}').format(subject=subject, mmthreshImg=mmthreshImg)
if not os.path.isfile(stage_1_output):
print('\tz-normalizing mmthrehsolded individual IC stage 1 output')
fsl_reg_out = join(glm_dir, '{subject}_stage1_{mmthreshImg}'.format(subject=subject, mmthreshImg=mmthreshImg))
fsl_glm_mat = np.loadtxt(fsl_reg_out)
fsl_glm_mat_z = stats.zscore(fsl_glm_mat)
np.savetxt(stage_1_output, fsl_glm_mat_z)
else:
print('\tz-normalized mmthresholded individual IC stage 1 output')
melodicIC_on_thal = join(glm_dir, '{subject}_stage1').format(subject=subject)
znorm_melodicIC_on_thal = join(glm_dir, 'znorm_{subject}_stage1').format(subject=subject)
if not os.path.isfile(znorm_melodicIC_on_thal):
print('\tz-normalizing melodic_IC stage 1 output')
glm_mat = np.loadtxt(melodicIC_on_thal)
glm_mat_z = stats.zscore(glm_mat)
np.savetxt(znorm_melodicIC_on_thal, glm_mat_z)
else:
print('\tz-normalized melodic_IC stage 1 output')
stage_2_out = join(glm_dir, 'znorm_{subject}_stage2_{mmthreshImg}.nii.gz'.format(subject=subject, mmthreshImg=mmthreshImg))
if not os.path.isfile(stage_2_out):
print('\trunning stage 2 with z-normalized mmthresholded individual IC')
subject_map = '{subject}/YB*/{side}/fdt_matrix2_reconstructed{downsample}.nii.gz'.format(side=side, subject=subject, downsample=downsample)
stage_1_maps = [join(glm_dir, x) for x in os.listdir(glm_dir) if x.startswith('znorm_{subject}_stage1_{mmthreshImg}'.format(subject = subject, mmthreshImg=mmthreshImg))]
for stage_1_map in stage_1_maps:
command = 'fsl_glm -i {subject_map} -d {stage_1_map} -m /Volume/CCNC_W1_2T/Thalamus_SCrsFC_20171026/{hemi}_thalamus_HOSC_60{downsample}.nii.gz -o {stage_2_out}'.format(subject_map = subject_map, stage_1_map = stage_1_map, hemi = hemi, downsample = downsample, stage_2_out = stage_2_out)
os.popen(command).read()
else:
print('\tcompleted stage 2 with z-normalized mmthresholded individual IC')
stage_2_znorm_melodicIC = join(glm_dir, 'znorm_{subject}_stage2.nii.gz'.format(subject=subject))
if not os.path.isfile(stage_2_znorm_melodicIC):
print('\trunning stage 2 with z-normalized melodic_IC')
subject_map = '{subject}/YB*/{side}/fdt_matrix2_reconstructed{downsample}.nii.gz'.format(side=side, subject=subject, downsample=downsample)
stage2_mat = join(glm_dir, 'znorm_{subject}_stage1').format(subject=subject)
command = 'fsl_glm -i {subject_map} -d {stage2_mat} -m /Volume/CCNC_W1_2T/Thalamus_SCrsFC_20171026/{hemi}_thalamus_HOSC_60{downsample}.nii.gz -o {stage_2_znorm_melodicIC}'.format(subject_map = subject_map, stage2_mat=stage2_mat, stage_2_znorm_melodicIC = stage_2_znorm_melodicIC, hemi = hemi, downsample = downsample)
os.popen(command).read()
else:
print('\tcompleted stage 2 with z-normalized melodic_IC')
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--side', '-side', nargs=1, help = 'left or right', type=str)
parser.add_argument('--hemi', '-hemi', nargs=1, help = 'lh or rh', type=str)
parser.add_argument('--downsample', '-ds', nargs=1, help = 'e.g., _ds3; none if no downsampling', type=str)
parser.add_argument('--subject', '-subj', nargs=1, type=str)
args = parser.parse_args()
#get_hemi = lambda x: 'lh' if x == 'left' else 'rh'
#hemi = get_hemi(args.side)
zStats(args.side[0], args.hemi[0], args.downsample[0], args.subject[0])
| [
"noreply@github.com"
] | noreply@github.com |
cdc14ab92541df567c2da2c71eab1580caecb1c9 | 73dadaa1c10ba149cf42fe3600edee9565b2e418 | /pythonBasicsHandsOn.py | 92dfdf4b8dab6dd981c4aea7320a988e9e35c5e3 | [] | no_license | Sankarb475/Python_Learning | 078826e5087bf1c6d2e18e9176af1bad3e345eb1 | 1b0d929a66f99b86bfd41590c1ce8781385223a0 | refs/heads/master | 2022-06-14T07:45:10.053327 | 2022-06-09T05:11:44 | 2022-06-09T05:11:44 | 167,215,150 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,865 | py | # Shallow Copy and Deep Copy
# When you use "=" to create a copy of an object, It only creates a new variable that shares the reference of the original object.
a = [1,2,3,4]
b = a
a.append(5)
a[2] = 100
print(a,b)
=> [1, 2, 100, 4, 5] [1, 2, 100, 4, 5]
-- Shallow copy creates a copy of t
import copy
a = [1,2,3,4]
b = copy.copy(a)
b.append(5)
print(a,b)
-- [1, 2, 3, 4] [1, 2, 3, 4, 5]
import copy
a = [1,2,3,4]
b = copy.copy(a)
b[0] = 100
print(a,b)
-- [1, 2, 3, 4] [100, 2, 3, 4]
import copy
a = [[1],[2],[3],[4]]
b = copy.copy(a)
a.append(5)
a[0][0] = 100
print(a,b)
-- [[100], [2], [3], [4], 5] [[100], [2], [3], [4]]
-- Deep Copy
it creates a completely new object with the elements of the existing object and they have no relation at all.
import copy
old_list = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
new_list = copy.deepcopy(old_list)
old_list[1][0] = 'BB'
print("Old list:", old_list)
print("New list:", new_list)
Old list: [[1, 1, 1], ['BB', 2, 2], [3, 3, 3]]
New list: [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
================================================================================
# Python args and kargs
The special syntax **kwargs in function definitions in python is used to pass a keyworded, variable-length argument list.
def myFun(**kwargs):
for key, value in kwargs.items():
print ("%s == %s" %(key, value))
# Driver code
myFun(first ='Geeks', mid ='for', last='Geeks')
-- The special syntax *args in function definitions in python is used to pass a variable number of arguments to a function
You cant pass a key-worded parameter.
def myFun(*argv):
for arg in argv:
print (arg)
myFun('Hello', 'Welcome', 'to', 'GeeksforGeeks')
# Python Decorators
-- A design pattern to in python - takes in a function, adds some functionality and returns it.
-- This is also called metaprogramming because a part of the program tries to modify another part of the program at compile time
def make_pretty(func):
def inner():
print("I got decorated")
func()
return inner
def ordinary():
print("I am ordinary")
>>> ordinary()
I am ordinary
>>> # let's decorate this ordinary function
>>> pretty = make_pretty(ordinary)
>>> pretty()
I got decorated
I am ordinary
#Python Serialization
===============================================
Pickling is the process whereby a Python object hierarchy is converted into a byte stream (usually not human readable) to be written to a file,
this is also known as Serialization. Unpickling is the reverse operation, whereby a byte stream is converted back into a working Python object hierarchy.
import pickle
# serializes
pickle.dump()
#deserializes
pickle.load()
'''to run python in command prompt, use "python", (windows :considering you have set up environment variable)
The interactive prompt runs code and echoes results as you go, but it doesn’t save your code in a file
'''
# enumerate() in python ==> it will give you the index numbers while iterating
>>> for n,i in enumerate(arr):
... print(n,i)
...
0 6
1 4
2 2
3 1
4 3
5 5
6 7
>>> arr
[6, 4, 2, 1, 3, 5, 7]
#to get current working directory
>>> import os
>>> os.getcwd()
'/Users/sankar.biswas'
#changing current direcctory
>>> os.chdir('/Users/sankar.biswas/Desktop/Python/coding')
>>> os.getcwd()
'/Users/sankar.biswas/Desktop/Python/coding'
# to run a python script from command prompt
python file1.py
#saving the output in a file
python script1.py > saveit.txt
# "dir" - you can use it to fetch a list of all the names available inside a module
>>> import sys
>>> dir(sys)
['__breakpointhook__', '__displayhook__', '__doc__', '__excepthook__', '__interactivehook__', '__loader__', '__name__', '__package__', '__spec__', '__stderr__',
'__stdin__', '__stdout__', '_clear_type_cache', '_current_frames', '_debugmallocstats', '_framework', '_getframe', '_git',
'_home', '_xoptions', 'abiflags', 'api_version', 'argv', 'base_exec_prefix', 'base_prefix', 'breakpointhook', 'builtin_module_names',
'byteorder', 'call_tracing', 'callstats', 'copyright', 'displayhook', 'dont_write_bytecode', 'exc_info', 'excepthook', 'exec_prefix', 'executable', 'exit',
'flags', 'float_info', 'float_repr_style', 'get_asyncgen_hooks', 'get_coroutine_origin_tracking_depth', 'get_coroutine_wrapper',
'getallocatedblocks', 'getcheckinterval', 'getdefaultencoding', 'getdlopenflags', 'getfilesystemencodeerrors', 'getfilesystemencoding',
'getprofile', 'getrecursionlimit', 'getrefcount', 'getsizeof', 'getswitchinterval', 'gettrace', 'hash_info', 'hexversion', 'implementation', 'int_info', 'intern', 'is_finalizing', 'maxsize', 'maxunicode',
'meta_path', 'modules', 'path', 'path_hooks', 'path_importer_cache', 'platform', 'prefix', 'ps1', 'ps2', 'set_asyncgen_hooks',
'set_coroutine_origin_tracking_depth', 'set_coroutine_wrapper', 'setcheckinterval', 'setdlopenflags', 'setprofile', 'setrecursionlimit',
'setswitchinterval', 'settrace', 'stderr', 'stdin', 'stdout', 'thread_info', 'version', 'version_info', 'warnoptions']
# the " exec(open('module.py').read())" built-in function call is another way to launch files from the interactive prompt without having to import and later reload
#you can also find out the functions you can apply on a variable using "dir"
>>> a = 234
>>> dir(a)
['__abs__', '__add__', '__and__', '__bool__', '__ceil__', '__class__', '__delattr__', '__dir__', '__divmod__', '__doc__',
'__eq__', '__float__', '__floor__', '__floordiv__', '__format__', '__ge__', '__getattribute__', '__getnewargs__', '__gt__',
'__hash__', '__index__', '__init__', '__init_subclass__', '__int__', '__invert__', '__le__', '__lshift__', '__lt__', '__mod__'
, '__mul__', '__ne__', '__neg__', '__new__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdivmod__', '__reduce__',
'__reduce_ex__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', '__rrshift__',
'__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__',
'__trunc__', '__xor__', 'bit_length', 'conjugate', 'denominator', 'from_bytes', 'imag', 'numerator', 'real', 'to_bytes']
# this will help you get a knowledge on the functionality of the function, dial 'q' to escape
>>> help(a.__abs__)
# Pattern Matching
>>> match = re.match('Hello[ \t]*(.*)world', 'Hello Python world')
>>> match
<re.Match object; span=(0, 18), match='Hello Python world'>
>>> match.group(1)
'Python '
>>> match = re.match('[/:](.*)[/:](.*)[/:](.*)', '/usr/home:lumberjack')
>>> match.groups()
('usr', 'home', 'lumberjack')
>>> re.split('[/:]', '/usr/home/lumberjack')
['', 'usr', 'home', 'lumberjack']
#List Operations
>>> L = [123, 'spam', 1.23]
>>> len(L)
3
>>> L*2
[123, 'spam', 1.23, 123, 'spam', 1.23]
>>> L[:]
[123, 'spam', 1.23]
>>> L[2:]
[1.23]
>>> L[:-1]
[123, 'spam']
>>> L.append(23)
[123, 'spam', 1.23, 23]
>>> L.pop(2)
1.23
>>> L
[123, 'spam', 23]
>>> list = [1,23,4,56,33,656,564]
>>> list.sort()
>>> list
[1, 4, 23, 33, 56, 564, 656]
#adding multiple elements to an existing list
>>> L
[123, 'abc', 1.23, {}]
>>> L.extend([5,6,7])
>>> L
[123, 'abc', 1.23, {}, 5, 6, 7]
#deleting all the elements
>>> L.clear()
>>> L
[]
#deleting a single element by index
>>> L = [123, 'abc', 1.23, {}]
>>> del L[0]
>>> L
['abc', 1.23, {}]
#selecting a partcular column from a 2D list
>>> list2D = [[1,2,3],[4,5,6],[7,8,9]]
>>> list2D[1][2]
6
>>> col2 = [row[1] for row in list2D] #Give me row[1] (2nd element) for each row in matrix M, in a new list.
>>> col2
[2, 5, 8]
>>> M
['bb', 'aa', 'cc']
>>> M.sort()
>>> M
['aa', 'bb', 'cc']
>>> [row[1] for row in M if row[1] % 2 == 0] #Filter out odd items
[2, 8]
#diagonal matrix
>>> diag = [M[i][i] for i in [0, 1, 2]] >>> diag
[1, 5, 9]
# Repeat characters in a string
>>> doubles = [c * 2 for c in 'spam'] >>> doubles
['ss', 'pp', 'aa', 'mm']
>>> list(range(4))
[0, 1, 2, 3]
>>> a = list(range(-6,7,2))
>>> a
[-6, -4, -2, 0, 2, 4, 6]
>>> [[x ** 2, x **3] for x in range(4)]
[[0, 0], [1, 1], [4, 8], [9, 27]]
>>> [[x, x / 2, x * 2] for x in range(-6, 7, 2) if x > 0]
[[2, 1.0, 4], [4, 2.0, 8], [6, 3.0, 12]]
>>> [[x, int(x / 2), x * 2] for x in range(-6, 7, 2) if x > 0]
[[2, 1, 4], [4, 2, 8], [6, 3, 12]]
>>> G = (sum(row) for row in M)
>>> G
<generator object <genexpr> at 0x105b29408>
>>> next(G)
6
>>> next(G)
15
>>> next(G)
24
'''Dictionaries :: Dictionaries, the only mapping type (not a sequence) in Python’s core objects set, are also mutable '''
>>> D = {}
>>> type(D)
<class 'dict'>
>>> D = {'food': 'Spam', 'quantity': 4, 'color': 'pink'}
>>> D
{'food': 'Spam', 'quantity': 4, 'color': 'pink'}
#using dict to define a dictionary
>>> bob1 = dict(name='Bob', job='dev', age=40)
>>> bob1
{'age': 40, 'name': 'Bob', 'job': 'dev'}
#zipping way to define dictionary
>>> bob2 = dict(zip(['name', 'job', 'age'], ['Bob', 'dev', 40]))
>>> bob2
{'name': 'Bob', 'job': 'dev', 'age': 40}
#Complex nesting of different types in python - one of the advantage of using python, complex nesting is easy to implement
>>> rec = {'name': {'first': 'Bob', 'last': 'Smith'}, 'jobs': ['dev', 'mgr'], 'age': 40.5}
>>> rec['jobs'][1]
'mgr'
>>> rec['name']['last']
'Smith'
>>> rec['jobs'].append('support')
>>> rec
{'name': {'first': 'Bob', 'last': 'Smith'}, 'jobs': ['dev', 'mgr', 'support'], 'age': 40.5}
#In Python, when we lose the last reference to the object—by assigning its variable to something else
>>> rec = 0
#Python has a feature known as garbage collection that cleans up unused memory as your program runs and frees you from having to manage such details in your code.
>>> D = {'a': 1, 'b': 2, 'c': 3}
#so now, what ".get" does is it will select the data with the key 'x' in dictionary D, if it doesnyt find it, it will return 0
>>> value = D.get('x', 0)
>>> value
0
#Sorting Keys: for Loops
>>> sorted(D)
['a', 'b', 'c']
>>> Ks = list(D.keys())
>>> Ks
['a', 'c', 'b']
>>> Ks.sort()
>>> Ks
['a', 'b', 'c']
#Tuples :: tuples are sequences, like lists, but they are immutable. Functionally, they’re used to represent fixed collections of items.
>>> T = (1, 2, 3, 4, 5)
>>> len(T)
5
>>> T + (5,6)
(1, 2, 3, 4, 5, 5, 6)
>>> T
(1, 2, 3, 4, 5)
>>> T[0]
1
>>> T.index(4)
3
>>> T.count(4)
1
#tuples provide a sort of integrity constraint
#String slicing, so the last number is the gap of skipping, that is 1,3,5,... will be skipped
>>> S = "I a m s a d"
>>> S[::2]
'Iamsad'
#the third index if given negative will reverse the selection
>>> S[::-2]
'dasmaI'
>>> S
'I evol being alone'
>>> S[5:1:-1]
'love'
>>>
>>> S[::-1]
'enola gnieb love I'
#converting whatever we have into string
>>> repr(42)
'42'
#converting into ASCII
>>> ord('A')
65
#converting integer to binary
>>> bin(13)
'0b1101'
#converting binary to integer
>>> int('1101', 2)
13
| [
"noreply@github.com"
] | noreply@github.com |
94e03f267155e84ef42e255bed470c54f6626f90 | 2881903801698749889648a36b0d8dc51a6e59a0 | /cannabis_reports/tests/test_apis_dispensaries.py | b1d83aec5fca76bad68f430d4303e23be54ec6a0 | [
"MIT",
"Python-2.0"
] | permissive | lordlala/python-cannabis-reports | 139149681315ab6521c7fb8775209c8a3eff87a9 | 6b64cecd881247eee23f9d2223f6d364bafad937 | refs/heads/master | 2021-12-30T11:42:15.183290 | 2018-02-08T21:06:53 | 2018-02-08T21:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | # -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from .api_common import recorder
from .api_entity import ApiEntityAbstract
from ..models.dispensary import Dispensary
class TestApisDispensaries(ApiEntityAbstract):
"""Tests the Dispensaries API endpoint."""
UID = 'ca/san-francisco/grass-roots'
def setUp(self):
super(TestApisDispensaries, self).setUp()
self.endpoint = self.api.Dispensaries
@recorder.use_cassette()
def test_apis_dispensaries_list(self):
"""It should parse the response and return the proper object."""
self._test_apis_objects_list(Dispensary)
@recorder.use_cassette()
def test_apis_dispensaries_get(self):
"""It should not be implemented."""
self._test_apis_objects_get('Grass Roots')
@recorder.use_cassette()
def test_apis_dispensaries_get_extracts(self):
"""It should return the extracts for a dispensary."""
self._test_apis_objects_get_extracts()
@recorder.use_cassette()
def test_apis_dispensaries_get_strains(self):
"""It should return the strains for a dispensary."""
self._test_apis_objects_get_strains()
@recorder.use_cassette()
def test_apis_dispensaries_get_edibles(self):
"""It should return the edibles for a dispensary."""
self._test_apis_objects_get_edibles()
@recorder.use_cassette()
def test_apis_dispensaries_get_products(self):
"""It should return the products for a dispensary."""
self._test_apis_objects_get_products()
@recorder.use_cassette()
def test_apis_dispensaries_get_available(self):
"""It should not be implemented for this endpoint."""
with self.assertRaises(NotImplementedError):
self._test_apis_objects_get_available()
| [
"dave@laslabs.com"
] | dave@laslabs.com |
f72a7e1fb1b3ae77d80542b0a582a4170c564ef7 | 440149cfffd932d6f86e4660f0934443bd483c9a | /Pathfinding Visualizer Project/a_star.py | 06ebaf790b9a949616352e27c19ec703aa3bd02f | [] | no_license | siddhantgpt99/cv-projets | 72176d19487ccd5a0cde3dff8d4767a217e19c5a | 559246966bad4b3c7c0b1992aee2151239515e3b | refs/heads/master | 2022-11-27T11:41:37.441504 | 2020-08-09T17:49:35 | 2020-08-09T17:49:35 | 286,029,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,308 | py | from queues import PriorityQueue
def heuristic(tile1, tile2):
"""
Manhattan distance between two tiles.
:param tile1: Tile
:param tile2: Tile
:return: int distance
"""
(x1, y1) = (tile1.r, tile1.c)
(x2, y2) = (tile2.r, tile2.c)
return abs(x1 - x2) + abs(y1 - y2)
def reconstruct_path(came_from, start, end):
"""
Reconstructs the came_from dictionary to be a list of tiles
we can traverse and draw later.
:param came_from: dictionary
:param start: Tile
:param end: Tile
:return: List path
"""
current = end
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start) # optional
path.reverse() # optional
return path
def a_star(start, end):
"""
A* Pathfinding algorithm. Takes a start tile and end tile, and uses
their neighbour list to traverse.
Uses the heapq queue in queues.py.
:param start: Tile
:param end: Tile
:return: came_from, dictionary with all tiles as key, and where we came from (parent tile) as value.
cost_so_far, dictionary with tiles as key, and their cost so far as value.
success, True or False. If the algorithm found the end tile or not.
has_been_next, list over tiles that has been considered as the next tile.
"""
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {start: None}
cost_so_far = {start: 0}
has_been_next = []
success = False
while not frontier.empty():
current = frontier.pop()
current.visit()
if current == end:
print("A* Pathfinder, successful.")
success = True
break
for next_tile in current.neighbours:
if next_tile not in has_been_next:
has_been_next.append(next_tile)
new_cost = cost_so_far[current] + next_tile.weight
if next_tile not in cost_so_far or new_cost < cost_so_far[next_tile]:
cost_so_far[next_tile] = new_cost
priority = new_cost + heuristic(end, next_tile)
frontier.put(next_tile, priority)
came_from[next_tile] = current
return came_from, cost_so_far, success, has_been_next
| [
"noreply@github.com"
] | noreply@github.com |
b100c782ba41017bc03dda7b492a6fc56956b54c | 96ff8afa4e440a5adba69a2f3438c05ccbc9cf15 | /core/plotter/ts_plot.py | 94e225e7e6b5ad7e2a02448a9527e69c84dabd00 | [] | no_license | StockLiM/SLiM | 8e05cc36d5f30a3bce0ba92d462b23f39cbc6212 | 0d4977d3457a7645a90a77e025615148ce29e594 | refs/heads/master | 2021-01-21T15:22:59.247618 | 2017-06-30T03:57:16 | 2017-06-30T03:57:16 | 89,649,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,874 | py | import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import os
from datetime import datetime
## sma plot
def plot_sma_symbol(symbol, date, price_from = 'Close', short_sma = True, long_sma = False):
print "plot sma on {0}".format(symbol)
fn = 'data/price/{0}/{1}.csv'.format(date, symbol)
if not os.path.exists(fn):
print "price file not exists"
return
data = pd.read_csv(fn)
price = np.array(data[price_from])
ts = price[::-1]
# d = range(len(ts))
d = map(lambda x: datetime.strptime(x, "%d-%b-%y"), list(data.iloc[:,0])[::-1])
path_ = 'data/price/{0}'.format(date)
if not os.path.exists(path_):
os.mkdir(path_)
fn_short = "data/plot/{0}/{0}_short_sma_{1}_{2}.png".format(date, symbol, price_from)
fn_long = "data/plot/{0}/{0}_long_sma_{1}_{2}.png".format(date, symbol, price_from)
if short_sma and not os.path.exists(fn_short):
plt.clf()
sma_10 = pd.rolling_mean(ts, window = 10)
sma_20 = pd.rolling_mean(ts, window = 20)
sma_50 = pd.rolling_mean(ts, window = 50)
l1, = plt.plot(d, ts, color = 'black', label = 'price')
l2, = plt.plot(d, sma_10, color = 'blue', label = 'sma 10')
l3, = plt.plot(d, sma_20, color = 'red', label = 'sma 20')
l4, = plt.plot(d, sma_50, color = 'green', label = 'sma 50')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2 + 15, y1, y2))
plt.legend([l1, l2, l3, l4], loc = 0)
plt.title("short sma(price: {2}) plot for {0} on {1}".format(symbol, date, price_from))
plt.savefig(fn_short)
# return
if long_sma and not os.path.exists(fn_long):
plt.clf()
sma_10 = pd.rolling_mean(ts, window = 50)
sma_20 = pd.rolling_mean(ts, window = 200)
l1, = plt.plot(d, ts, color = 'black', label = 'price')
l2, = plt.plot(d, sma_10, color = 'blue', label = 'sma 50')
l3, = plt.plot(d, sma_20, color = 'red', label = 'sma 200')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2 + 15, y1, y2))
plt.legend(loc = 0)
plt.title("long sma(price: {2}) plot for {0} on {1}".format(symbol, date, price_from))
plt.savefig(fn_long)
# return
## ewma plot
def plot_ewma_symbol(symbol, date, price_from, short_ewma = True, long_ewma = False, reverse = True, slot = None):
print "plot ewma on {0}".format(symbol)
fn = 'data/price/{0}/{1}.csv'.format(date, symbol)
if not os.path.exists(fn):
print "price file not exists"
return
data = pd.read_csv(fn)
price = np.array(data[price_from])
if reverse:
ts = price[::-1]
# d = range(len(ts))
d = map(lambda x: datetime.strptime(x, "%d-%b-%y"), list(data.iloc[:,0])[::-1])
fn_short = "data/plot/{0}/{0}_short_ewma_{1}_{2}.png".format(date, symbol, price_from)
fn_long = "data/plot/{0}/{0}_long_ewma_{1}_{2}.png".format(date, symbol, price_from)
else:
ts = price[slot[0]:slot[1]]
d = map(lambda x: datetime.strptime(x, "%Y-%m-%d"), list(data.iloc[:,0][slot[0]:slot[1]]))
fn_short = "data/plot/{0}/{0}_short_ewma_{1}_{2}_{3}_{4}.png".format(date, symbol, price_from, slot[0], slot[1])
fn_long = "data/plot/{0}/{0}_long_ewma_{1}_{2}.png".format(date, symbol, price_from)
path_ = 'data/plot/{0}'.format(date)
if not os.path.exists(path_):
os.mkdir(path_)
if short_ewma and not os.path.exists(fn_short):
plt.clf()
ewma_10 = pd.ewma(ts, span = 10, adjust = False)
ewma_20 = pd.ewma(ts, span = 20, adjust = False)
ewma_50 = pd.ewma(ts, span = 50, adjust = False)
l1, = plt.plot(d, ts, color = 'black', label = 'price')
l2, = plt.plot(d, ewma_10, color = 'blue', label = 'ewma 10')
l3, = plt.plot(d, ewma_20, color = 'red', label = 'ewma 20')
l4, = plt.plot(d, ewma_50, color = 'green', label = 'ewma 50')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2 + 15, y1, y2))
plt.legend(loc = 0)
plt.title("short ewma(price: {2}) plot for {0} on {1}".format(symbol, date, price_from))
plt.savefig(fn_short)
# return
if long_ewma and not os.path.exists(fn_long):
plt.clf()
ewma_50 = pd.ewma(ts, span = 50, adjust = False)
ewma_200 = pd.ewma(ts, span = 200, adjust = False)
l1, = plt.plot(d, ts, color = 'black', label = 'price')
l2, = plt.plot(d, ewma_50, color = 'blue', label = 'ewma 50')
l3, = plt.plot(d, ewma_200, color = 'red', label = 'ewma 200')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2 + 15, y1, y2))
plt.legend(loc = 0)
plt.title("long ewma(price: {2}) plot for {0} on {1}".format(symbol, date, price_from))
plt.savefig(fn_long)
def plot_ma_symbol_list(symbol_source, date, price_from):
## symbol_source: tech, health
symbol_list = pd.read_csv('data/symbol/{0}.csv'.format(symbol_source))['Symbol']
for symbol in symbol_list:
# plot_sma_symbol(symbol, date)
plot_ewma_symbol(symbol, date, price_from)
def img2html_symbol_list(symbol_source, date, type_, price_from):
# type_: short_sma, short_ewma, long_sma, long_ewma
symbol_list = pd.read_csv('data/symbol/{0}.csv'.format(symbol_source))['Symbol']
title = '{0}_{1}_{2}_{3}'.format(symbol_source, type_, price_from, date)
fn = 'report/{0}.html'.format(title)
with open(fn, 'w') as f:
f.write('<!DOCTYPE html>\n')
f.write('<html><head><title>{0}</title></head><body><center>\n'.format(title))
for symbol in symbol_list:
f.write('<h2>{0}</h2>\n'.format(symbol))
f.write('<img src="../data/plot/{0}/{0}_{1}_{2}_{3}.png">\n'.format(date, type_, symbol, price_from))
f.write('</center></body></html>')
print("report saving to {0}".format(fn))
def plot_volume_symbol(symbol, date):
print "plot volume on {0}".format(symbol)
fn = 'data/price/{0}/{1}.csv'.format(date, symbol)
if not os.path.exists(fn):
print "price file not exists"
return
window = 90
data = pd.read_csv(fn)
vol = np.array(data['Volume'][:window])
ts = vol[::-1]
d = map(lambda x: datetime.strptime(x, "%d-%b-%y"), list(data.iloc[:window,0])[::-1])
path_ = 'data/plot/{0}'.format(date)
if not os.path.exists(path_):
os.mkdir(path_)
fn = "data/plot/{0}/{0}_{1}_volume.png".format(date, symbol)
if not os.path.exists(fn):
plt.clf()
plt.bar(d, ts, color = 'b', label = 'volume')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2 + 15, y1, y2))
plt.legend(loc = 0)
plt.title("Volume plot for {0} on {1} (Recent {2} days)".format(symbol, date, window))
plt.savefig(fn)
if __name__ == '__main__':
symbol = 'GOOG'
# date = '05_18'
# date = datetime.now().strftime("%m_%d") # today
# plot_sma_symbol(symbol = symbol, date = date, short_sma = True, long_sma = True)
plot_ewma_symbol(symbol = symbol, date = 'history', price_from = 'Close', reverse = False, slot = [200,700])
# plot_ma_symbol_list('watch', date)
# plot_ma_symbol_list('bull_0406', date)
# img2html_symbol_list('big_bull_0406', date, type_ = 'short_ewma')
# img2html_symbol_list('hold', date, type_ = 'short_ewma')
# plot_volume_symbol(symbol, date)
| [
"yejing1991@hotmail.com"
] | yejing1991@hotmail.com |
de991516b7cdac9ff957510dfda568d7ebbe7241 | b6e10cf6a4c7498093a78665d4c03a3cfbabd25b | /code.py | 6163544367a522bbeae538a62de0ced93fb3fbf4 | [
"Apache-2.0"
] | permissive | prashantm2812/StudentsPerformance | dbbbb733f9d00ff207ca42f06884b4b3157d8635 | 394779f7e64051476a1dc7b03f09c5f8fd8e7d1f | refs/heads/master | 2020-04-10T21:11:12.587199 | 2018-12-11T06:55:02 | 2018-12-11T06:55:02 | 161,290,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 22:09:03 2018
@author: Prashant Maheshwari
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('StudentsPerformance.csv')
X = dataset[['gender', 'race/ethnicity', 'parental level of education', 'lunch', 'test preparation course']]
Y = dataset[['math score', 'reading score', 'writing score']]
X = pd.get_dummies(X, columns = ['gender', 'race/ethnicity', 'parental level of education', 'lunch', 'test preparation course'])
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.10, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
sc1 = StandardScaler()
Y_train = sc1.fit_transform(Y_train)
Y_test = sc1.transform(Y_test)
import keras
from keras.models import Sequential
from keras.layers import Dense
def create_model():
model = Sequential()
model.add(Dense(units = 17, kernel_initializer = 'uniform', activation = 'relu', input_dim = 17))
model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu'))
model.add(Dense(units = 3, kernel_initializer = 'uniform', activation = 'linear'))
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
return model
seed = 7
np.random.seed(seed)
model = KerasClassifier(build_fn = create_model, epochs = 100, batch_size = 5, verbose = 50)
from sklearn.model_selection import GridSearchCV as gscv
batch_size = [32, 64 ,100]
epochs = [25, 50, 100, 200, 150]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = gscv(estimator=model, param_grid=param_grid, verbose = 60, n_jobs= -1)
grid_search = grid.fit(X_train, Y_train)
grid_search.best_score_#0.7499999933772616
grid_search.best_params_#{'batch_size': 100, 'epochs': 200}
| [
"noreply@github.com"
] | noreply@github.com |
c0e29612bc1ab99f21ed31d148930eda30c512c3 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.ses.EventDestination.sns-destination-python/__main__.py | d32f60d788281d4b38651670141a088b90714d15 | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import pulumi
import pulumi_aws as aws
sns = aws.ses.EventDestination("sns",
configuration_set_name=aws_ses_configuration_set["example"]["name"],
enabled=True,
matching_types=[
"bounce",
"send",
],
sns_destination={
"topic_arn": aws_sns_topic["example"]["arn"],
})
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
b4f54871d6f20147ee3d122fda8a109d09514286 | 5549b8152781830be13147be7330e904a2f96d4b | /lagom_example/lagom_example/wsgi.py | 084584e914c059c8f2a4482d9178aec1d87c01b2 | [] | no_license | meadsteve/lagom-django-example | a00670131ccb2673986bc27a753238e227880fb5 | 9ac58b9cd423effed53ea96d3bf2c0391bd56064 | refs/heads/master | 2021-12-29T06:04:26.067144 | 2021-09-15T08:03:20 | 2021-09-15T08:03:20 | 238,651,005 | 0 | 0 | null | 2021-09-22T18:32:53 | 2020-02-06T09:24:35 | Python | UTF-8 | Python | false | false | 403 | py | """
WSGI config for lagom_example project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lagom_example.settings')
application = get_wsgi_application()
| [
"meadsteve@gmail.com"
] | meadsteve@gmail.com |
6a3d435f81dfff1dfcaaa011dedf56f129b984c3 | a6fafaabc803f4292ddaa0c50ba88941f5200be4 | /gobotany/core/distribution_places.py | 1f8ec958aa111e14559d624c0617d2ebfb4ee04d | [] | no_license | biofsoares/gobotany-app | cc0f164f1ab12b3177b03626170853e5731008ff | edd9ce53b1f2a92d7f4a0bbf05e4bd88942a6c6d | refs/heads/master | 2021-01-16T21:43:01.388365 | 2015-12-30T19:55:08 | 2015-12-30T19:55:08 | 51,067,589 | 1 | 0 | null | 2016-02-04T10:08:24 | 2016-02-04T10:08:24 | null | UTF-8 | Python | false | false | 2,933 | py | # List all the places where distribution data are tracked, at both
# the state or province and county levels.
#
# This is used by the Admin interface during the bulk addition of a set
# of distribution records for a new plant name.
#
# Format: state or province abbreviation, county (optional)
DISTRIBUTION_PLACES = [
('AB', ''),
('AK', ''),
('AL', ''),
('AR', ''),
('AZ', ''),
('BC', ''),
('CA', ''),
('CO', ''),
('CT', ''),
('CT', 'Fairfield'),
('CT', 'Hartford'),
('CT', 'Litchfield'),
('CT', 'Middlesex'),
('CT', 'New Haven'),
('CT', 'New London'),
('CT', 'Tolland'),
('CT', 'Windham'),
('DE', ''),
('FL', ''),
('GA', ''),
('HI', ''),
('IA', ''),
('ID', ''),
('IL', ''),
('IN', ''),
('KS', ''),
('KY', ''),
('LA', ''),
('MA', ''),
('MA', 'Barnstable'),
('MA', 'Berkshire'),
('MA', 'Bristol'),
('MA', 'Dukes'),
('MA', 'Essex'),
('MA', 'Franklin'),
('MA', 'Hampden'),
('MA', 'Hampshire'),
('MA', 'Middlesex'),
('MA', 'Nantucket'),
('MA', 'Norfolk'),
('MA', 'Plymouth'),
('MA', 'Suffolk'),
('MA', 'Worcester'),
('MB', ''),
('MD', ''),
('ME', ''),
('ME', 'Androscoggin'),
('ME', 'Aroostook'),
('ME', 'Cumberland'),
('ME', 'Franklin'),
('ME', 'Hancock'),
('ME', 'Kennebec'),
('ME', 'Knox'),
('ME', 'Lincoln'),
('ME', 'Oxford'),
('ME', 'Penobscot'),
('ME', 'Piscataquis'),
('ME', 'Sagadahoc'),
('ME', 'Somerset'),
('ME', 'Waldo'),
('ME', 'Washington'),
('ME', 'York'),
('MI', ''),
('MN', ''),
('MO', ''),
('MS', ''),
('MT', ''),
('NB', ''),
('NC', ''),
('ND', ''),
('NE', ''),
('NH', ''),
('NH', 'Belknap'),
('NH', 'Caroll'),
('NH', 'Cheshire'),
('NH', 'Coos'),
('NH', 'Grafton'),
('NH', 'Hillsborough'),
('NH', 'Merrimack'),
('NH', 'Rockingham'),
('NH', 'Strafford'),
('NH', 'Sullivan'),
('NJ', ''),
('NL', ''),
('NM', ''),
('NS', ''),
('NT', ''),
('NU', ''),
('NV', ''),
('NY', ''),
('OH', ''),
('OK', ''),
('ON', ''),
('OR', ''),
('PA', ''),
('PE', ''),
('QC', ''),
('RI', ''),
('RI', 'Bristol'),
('RI', 'Kent'),
('RI', 'Newport'),
('RI', 'Providence'),
('RI', 'Washington'),
('SC', ''),
('SD', ''),
('SK', ''),
('TN', ''),
('TX', ''),
('UT', ''),
('VA', ''),
('VT', ''),
('VT', 'Addison'),
('VT', 'Bennington'),
('VT', 'Caledonia'),
('VT', 'Chittenden'),
('VT', 'Essex'),
('VT', 'Franklin'),
('VT', 'Grand Isle'),
('VT', 'Lamoille'),
('VT', 'Orange'),
('VT', 'Orleans'),
('VT', 'Rutland'),
('VT', 'Washington'),
('VT', 'Windham'),
('VT', 'Windsor'),
('WA', ''),
('WI', ''),
('WV', ''),
('WY', ''),
('YT', ''),
] | [
"jnga@users.noreply.github.com"
] | jnga@users.noreply.github.com |
aa08416cec64433ef17052cae24d44ab961b544f | 2d9a17e2b896d2f6a90913a4ba02d41f0ede5dd0 | /_gsinfo/qiyecxb-ct/qycxb_spider.py | c629ad7e0a0c3154e2b6a4ad7ae34b42379b6c08 | [] | no_license | wolfwhoami/xxxxx | 1cf2ed2c8ed78048d87cccf2953ca86c0871a783 | 670787ec71127bc05c1645cc3d8ef7c3a91fe84b | refs/heads/master | 2020-03-30T00:44:55.864817 | 2016-12-16T01:45:03 | 2016-12-16T01:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,218 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
import os
import sys
sys.path.append(sys.path[0]+"/..")
print sys.path
import time
from spider.spider import Spider, AccountErrors
import re
from spider.savebin import BinSaver
import random
import threading
import traceback
import spider.util
from spider.savebin import FileSaver
from qycxb_AES import CCIQ_AES
filter_name = set()
bloom = set()
class QycxbSpider(Spider):
"""
根据企业基本信息查询详情 121.40.186.237:18889:ipin:helloipin
"""
def __init__(self):
self._can_use_proxy_num = 0
self.is_debug = False
if self.is_debug:
Spider.__init__(self, 1)
#self.proxy_error_cnt = 0
else:
self.proxies_dict = []
self.read_proxy("../../_ct_proxy/proxy_all_filter.txt")
Spider.__init__(self, len(self.proxies_dict))
self._aes_ = CCIQ_AES()
#成功的
self.query_success = FileSaver("beijing_query_detail1.txt")
#失败的
self.query_failure = FileSaver("beijing_query_detail_failure1.txt")
#已经爬取过的
self.already_cname_list = FileSaver("beijing_already_detail1.txt")
#初始化已经爬过的公司
self.init_cname()
self.extJsons = ["Hoi6oX70l9whauZmjq8jVAmoe3UspXXhX9mPG+KAeqs1rKZVr/uapICH92P/Crryt63u28aP4QP665AzcT/jN5Go1o3bvwMvVIkuN9e60k6WI2pVFBrwZMvxwW6BnQukSzDSlyPvEhgpR5DIHQEV6C51hMgp4Zc3OkTSsyezAm4=",
"ctlCXDvoyaH2pCIArrgvXp7zrZTzpz2Q5rukh+aWvupEFABw6P2AvbmaN+HJ7IZgDJ/kgBkJt/rLppSGitYCPKGR2IGv6OXZsrJGgbRB3G3Ac4K8xpX3aMB5s8Ci2a/YpTpioZxAvptqJsQUCoNn0tLCOVM4XxMJQWbrErkOcl4=",
"ctlCXDvoyaH2pCIArrgvXp7zrZTzpz2Q5rukh+aWvupEFABw6P2AvbmaN+HJ7IZgDJ/kgBkJt/rLppSGitYCPKGR2IGv6OXZsrJGgbRB3G1U2wdOlL49/aDwt3NZNp4TGa5iBFpYLm69F/6PPFoXIR/Aw5p48//8OgZFpddDUwQ="]
self.user_agents = ["=CCIQ/2.0.1 (iPhone; iOS 9.1; Scale/2.00)",
"=CCIQ/2.0.1 (iPhone; iOS 8.1.3; Scale/2.00)",
"=CCIQ/2.0.1 (iPhone; iOS 8.4; Scale/2.00)"]
self.is_first = True
self.init_time = 0
def req_all(self, url, encryptedJson, retry=0):
number = random.randrange(0, 3, 1)
self.select_user_agent(self.user_agents[number])
param = spider.util.utf8str({"encryptedJson":self._aes_.encrypt(spider.util.utf8str(encryptedJson)), "extJson":self.extJsons[number]})
param = param.replace('/', "\/")
if self.is_first:
self.init_time = time.time()
print '初始化时间',self.init_time
self.is_first = False
if self.is_debug:
res = self.request_url(url, headers={"Content-Type": "application/json"}, data=param, proxies={'http': 'http://ipin:helloipin@192.168.1.45:3428', 'https': 'https://ipin:helloipin@192.168.1.45:3428'})
#res = self.request_url(url, headers={"Content-Type": "application/json"}, data=param, proxies={'http': 'http://137.135.166.225:8120', 'https': 'https://137.135.166.225:8120'})
else:
res = self.request_url(url, headers={"Content-Type": "application/json"}, data=param, proxies=self.proxies_dict[self.get_tid()])
if res is None:
if retry < 3:
time.sleep(3)
return self.req_all(url, encryptedJson, retry=(retry+1))
else:
return None
if res.code == 200:
time.sleep(random.randrange(30, 50, 1))
else:
time.sleep(5)
return res
def init_cname(self):
with open("beijing_already_detail1.txt", "r") as f:
for line in f:
filter_name.add(line.strip())
def wait_q_breakable(self):
lt = 0
while True:
if not self.job_queue.empty() or not self.job_queue2.empty() or not self.job_queue3.empty():
time.sleep(5)
if time.time() < lt + 1 and self._running_count == 0:
return True
time.sleep(2)
lt = time.time()
if self._worker_count == 0:
return False
def dispatch(self):
with open("all_company_list.txt", "r") as f:
cnt = 0
for line in f:
line = line.strip()
cnt += 1
if line in filter_name:
#print cnt, "already spider!!!"
continue
job = {"line": line, "cnt": cnt, "retry": 1}
self.add_job(job, True)
self.wait_q_breakable()
self.add_job(None, True)
def record_spider(self, line, cname):
"""
已经爬过的,无论成功失败都算爬过.
"""
filter_name.add(line)
self.already_cname_list.append(line)
bloom.add(cname)
def run_job(self, jobid):
line = jobid.get("line")
cnt = jobid.get("cnt")
retry = jobid.get("retry")
self.get_detail(line, cnt, retry)
#time.sleep(random.randrange(5, 11, 1))
def get_detail(self, line, cnt, retry):
tid = self.get_tid()
param = None
try:
param = eval(line)
except Exception as err:
print 'tid=%d --- cnt=%d --- data is not json, return'%(tid, cnt)
self.record_spider(line, 'UNKNOW')
return
cname = param['oc_name']
if cname in bloom:
cname = param['query_name']
if cname in bloom:
print 'query_name:%s aleready crawler...' % cname
return
ccode = param['oc_code']
carea = param['oc_area']
url = "http://appsvc.qiye.qianzhan.com/OrgCompany.svc/orgcompany/combine/detail"
encryptedJson = {
"bl_oc_code" : ccode,#code, #"71526726X"
"v1" : "QZOrgV005",
"isDirect" : "0",
"bl_oc_name" : cname,#cname, #"腾讯科技"
"bl_oc_area" : carea #area #"4403"
}
res = self.req_all(url, encryptedJson)
res_code = 0
if res is None:
if self.get_fail_cnt(1, 'failcount-none') < 10:
self.re_add_job({'line': line,'cnt': cnt, 'retry': retry})
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d " % (tid, cnt, cname, retry, res_code)
return
else:
self.re_add_job({'line': line, 'cnt': cnt, 'retry': (retry+1)})
self._can_use_proxy_num -= 1
raise AccountErrors.NoAccountError("Maybe the proxy invalid,failcount-none = [ %d ]" % self.get_fail_cnt(0, 'failcount-none'))
else:
setattr(self._curltls, 'failcount-none', 0)
res_code = res.code
if (res_code >= 400 and res_code < 500) or res_code == 202 :
self.re_add_job({'line': line,'cnt': cnt, 'retry': (retry+1)})
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d " % (tid, cnt, cname, retry, res_code)
if self.get_fail_cnt(1, 'failcount-400') > 5:
self._can_use_proxy_num -= 1
raise AccountErrors.NoAccountError("Maybe the proxy invalid,failcount-400 = [ %d ]" % self.get_fail_cnt(0, 'failcount-400'))
return
else:
setattr(self._curltls, 'failcount-400', 0)
if res_code >= 500:
self.re_add_job({'line': line, 'cnt': cnt, 'retry': (retry+1)})
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d " % (tid, cnt, cname, retry, res_code)
time.sleep(retry*2)
return
elif res_code == 200:
try:
c = eval(res.text)['c']
except Exception as err:
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d res.text exception " % (tid, cnt, cname, retry, res_code)#, "\n", res.text
#param["error_type"] = "res_text_error"
#self.query_failure.append(spider.util.utf8str(param))
#self.record_spider(line, cname)
self.re_add_job({'line': line, 'cnt': cnt, 'retry': retry})
return
if len(c) == 0:
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d --- exception 'C' IS NULL" % (tid, cnt, cname, retry, res_code)
param["error_type"] = "c=0"
self.query_failure.append(spider.util.utf8str(param))
self.record_spider(line, cname)
return
result = CCIQ_AES("BF1856A312580D41256311147089E1CC").decrypt(c)
try:
detail = eval(result)
except Exception as err:
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d --- exception result:%s" % (tid, cnt, cname, retry, res_code, result)
param["error_type"] = "result_error"
self.query_failure.append(spider.util.utf8str(param))
self.record_spider(line, cname)
return
#股东信息
listGD = self.get_gd(carea, ccode, cname)
if listGD is not None:
#print "tid=", tid, " listGD=", spider.util.utf8str(listGD)
detail['listGD'] = listGD['listGD']
#投资信息
list_inversted = self.get_inversted(cname)
if list_inversted is not None:
#print "tid=", tid, " list_inversted=", spider.util.utf8str(list_inversted)
detail['inversted'] = list_inversted['inversted']
#获取分支机构信息
branch = []
list_branch = self.get_branch(cname, list_branch=branch)
if list_branch is not None:
#print "tid=", tid, " list_branch=", spider.util.utf8str(list_branch)
detail['Branch'] = list_branch #['Branch']
self.query_success.append(spider.util.utf8str(detail))
self.record_spider(line, cname)
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d @@@ success:\n %s" % (tid, cnt, cname, retry, res_code, spider.util.utf8str(detail))
else:
param["error_type"] = "unknown_error:%d" % res_code
self.query_failure.append(spider.util.utf8str(param))
self.record_spider(line, cname)
print "tid=%d --- cnt=%d --- cname=%s --- retry=%d --- res.code=%d --- exception UNKNOW ERROR" % (tid, cnt, cname, retry, res_code)
return
def get_gd(self, area, code, cname, retry=0):
"""
获取股东信息
"""
url = "http://appsvc.qiye.qianzhan.com/OrgCompany.svc/orgcompany/gd/detail"
encryptedJson = {
"bl_oc_area" : area,
"v1" : "QZOrgV005",
"bl_oc_code" : code
}
res = self.req_all(url, encryptedJson)
if res is None:
return None
if res.code == 200:
try:
c = eval(res.text)['c']
if len(c) == 0:
print "get_gd --- cname=%s --- retry=%d --- reason:len(c)=0" % (cname, retry)
return None
result = CCIQ_AES("BF1856A312580D41256311147089E1CC").decrypt(c)
return eval(result)
except Exception as err:
print "get_gd --- cname=%s --- retry=%d --- reason:%s" % (cname, retry, err)
if retry < 5:
retry += 1
time.sleep(retry*1.5)
return self.get_gd(area, code, cname, retry=retry)
else:
return None
else:
print "get_gd --- cname=%s --- retry=%d --- res.code=%d" % (cname, retry, res.code)
if retry < 5:
retry += 1
time.sleep(retry*1.5)
return self.get_gd(area, code, cname, retry=retry)
else:
return None
def get_inversted(self, cname, retry=0):
"""
查询投资信息
"""
url = "http://appsvc.qiye.qianzhan.com/OrgCompany.svc/orgcompany/map/invesment"
encryptedJson = {
"input" : cname,
"v1" : "QZOrgV005"
}
res = self.req_all(url, encryptedJson)
if res is None:
return None
if res.code == 200:
try:
c = eval(res.text)['c']
if len(c) == 0:
print "get_inversted --- cname=%s --- retry=%d --- reason:len(c)=0" % (cname, retry)
return None
result = CCIQ_AES("BF1856A312580D41256311147089E1CC").decrypt(c)
return eval(result)
except Exception as err:
print "get_inversted --- cname=%s --- retry=%d --- reason:%s" % (cname, retry, err)
if retry < 5:
retry += 1
time.sleep(retry*1.5)
return self.get_inversted(cname, retry=retry)
else:
return None
else:
print "get_inversted --- cname=%s --- retry=%d --- res.code=%d" % (cname, retry, res.code)
if retry < 5:
retry += 1
time.sleep(retry*1.5)
return self.get_inversted(cname, retry=retry)
else:
return None
def get_branch(self,cname, now_page=1, list_branch=[], retry=0):
"""
查询分支机构
"""
url = "http://appsvc.qiye.qianzhan.com/OrgCompany.svc/orgcompany/branch/select/page"
encryptedJson = {
"companyName" : cname,
"v1" : "QZOrgV005",
"page" : now_page,
"pagesize" : "10"
}
res = self.req_all(url, encryptedJson)
if res is None:
return None
if res.code == 200:
try:
c = eval(res.text)['c']
if len(c) == 0:
print "get_branch --- cname=%s --- retry=%d --- reason:len(c)=0" % (cname, retry)
return None
result = CCIQ_AES("BF1856A312580D41256311147089E1CC").decrypt(c)
temp = eval(result)
if temp is not None:
for t in temp['Branch']:
list_branch.append(t)
if len(temp['Branch']) == 10:
if now_page > 3:
return list_branch
now_page += 1
print cname, "翻页 -----------------------------------> now_page", now_page
return self.get_branch(cname, now_page=now_page, list_branch=list_branch, retry=retry)
else:
return list_branch
else:
print "get_branch --- cname=%s --- retry=%d --- now_page=%d --- res.code=%d --- Branch is NULL" % (cname, retry, now_page)
return None
except Exception as err:
print "get_branch --- cname=%s --- retry=%d --- reason:%s" % (cname, retry, err)
if retry < 5:
retry += 1
time.sleep(retry*1.5)
return self.get_branch(cname, now_page=now_page, list_branch=list_branch, retry=retry)
else:
return None
else:
print "get_branch --- cname=%s --- retry=%d --- res.code=%d" % (cname, retry, res.code)
if retry < 5:
retry += 1
time.sleep(retry*1.5)
return self.get_branch(cname, now_page=now_page, list_branch=list_branch, retry=retry)
else:
return None
def get_fail_cnt(self, addv , type):
fc = getattr(self._curltls, type, 0)
if (addv):
fc += addv
setattr(self._curltls, type, fc)
return fc
def event_handler(self, evt, msg, **kwargs):
if evt == 'DONE':
msg += '企业查询宝APP公司详情detail查询已经停止...'
spider.util.sendmail('chentao@ipin.com', '%s DONE' % sys.argv[0], msg)
def read_proxy(self,fn):
with open(fn, 'r') as f:
for line in f:
line = line.strip()
self._match_proxy(line)
print " loaded [ %d ] proxis " % len(self.proxies_dict)
def _match_proxy(self,line):
m = re.match('([0-9.]+):(\d+):([a-z0-9]+):([a-z0-9._-]+)$', line, re.I)
m1 = re.match('([0-9.]+):(\d+):([a-z0-9]+)$', line, re.I)
if m:
prstr = '%s:%s@%s:%s' % (m.group(3), m.group(4), m.group(1), m.group(2))
proxies = {'http': 'http://' + prstr, 'https': 'https://' + prstr}
elif m1:
prstr = '%s:%s' % (m1.group(1), m1.group(2))
proxies = {'http': 'http://' + prstr, 'https': 'https://' + prstr}
else:
proxies = {'http': 'http://' + line, 'https': 'https://' + line}
self.proxies_dict.append(proxies)
if __name__ == "__main__":
s = QycxbSpider()
s.run()
#s.get_branch("江苏武进建工集团有限公司")
| [
"jianghao@ipin.com"
] | jianghao@ipin.com |
5cb9c51015c50cab850bea8216889f5c99c937d9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_Jormungandr_Revenge_of_the_pancakes.py | d9925b4d479f3e794bba1c134eedd620908d2b23 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,096 | py | #!/usr/bin/env python
__author__ = 'Bill'
def check_pancakes(n):
"""(check_pancakes):
function to test for all face up
:param n: the pancakes string
"""
for ch in n:
if ch == '-':
return False
return True
def flip_pancakes(n):
"""(flip_pancakes):
function to flip pancakes
:param n: the pancakes string
"""
n = list(n)
dict = {'+':'-', '-':'+'}
first = n[0]
i = 0
for ch in n:
if ch != first:
break
i += 1
for j in xrange(i):
n[j] = dict[first]
n = "".join(n)
return n
from misc import input_, output_
num_cases, cases = input_('B-large.in')
Results = []
for case in cases:
case = case.rstrip('\n')
i = 0
face_up = check_pancakes(case)
if face_up == True:
Results.append(i)
else:
while check_pancakes(case) == False:
case = flip_pancakes(case)
i += 1
Results.append(i)
output_(Results, 'Revenge_of_the_pancakes_large.out') | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
5265e0967e113d081e9131c0f58e4a14c2139144 | 7b9c1e0e6c40804dc11090b6b2d9b4c61fce3f12 | /FIB_HERITAGE_METHOD.py | 4a6f632a061ba42bc6892fb6c74cc294e2d1cfdc | [] | no_license | denizcetiner/rosalindpractice | 7474a07f0018c2582b3fbb8db2f686bceb687ac7 | 4dfe56e7b7af1680380c4f7a1048c174a6c405b7 | refs/heads/master | 2020-03-19T16:40:34.277493 | 2018-10-08T17:38:01 | 2018-10-08T17:38:01 | 136,724,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | from graphviz import Digraph
class RabbitPairMonth:
life_expectancy = -1
give_birth_to = 3
id_counter = 1
maturity_month = 1
def __init__(self, id, birth_time, current_month):
self.id = id
self.birth_time = birth_time
self.current_month = current_month
self.children = []
self.older = None
def __str__(self):
return "ID={0}, M={1}, B={2}".format(self.id, self.current_month, self.birth_time)
@staticmethod
def get_next_id() -> int:
result = RabbitPairMonth.id_counter
RabbitPairMonth.id_counter += 1
return result
def is_pair_dying(self) -> bool:
return (self.current_month - self.birth_time) == RabbitPairMonth.life_expectancy - 1
def is_mature(self) -> bool:
return (self.current_month - self.birth_time) >= RabbitPairMonth.maturity_month
def give_birth(self):
if self.is_mature():
for i in range(RabbitPairMonth.give_birth_to):
id_for_child = RabbitPairMonth.get_next_id()
child_pair = RabbitPairMonth(id_for_child, self.current_month + 1, self.current_month + 1)
self.children.append(child_pair)
def grow_older(self):
alive_next_month = not self.is_pair_dying()
if alive_next_month:
self.older = RabbitPairMonth(self.id, self.birth_time, self.current_month + 1)
def create_next_month(self):
self.give_birth()
self.grow_older()
def create_wabbits_heritage(adam_and_eve: 'RabbitPairMonth', observe_for_months: int):
rabbits_heap_tree = [adam_and_eve]
index_rabbits_heap_tree = 0
while index_rabbits_heap_tree < len(rabbits_heap_tree):
current_pair = rabbits_heap_tree[index_rabbits_heap_tree]
current_month = current_pair.current_month
if current_month > observe_for_months:
break
current_pair.create_next_month()
rabbits_heap_tree.extend(current_pair.children)
if current_pair.older is not None:
rabbits_heap_tree.append(current_pair.older)
index_rabbits_heap_tree += 1
return rabbits_heap_tree
def create_graph(heap_tree: []):
dot = Digraph(comment='Wabbits Breeding By Month')
dot.attr(rankdir="LR")
i = 0
root = heap_tree[i]
current = root
while current.older is not None or len(current.children) > 0:
if current.older is not None:
dot.edge(str(current), str(current.older))
for child in current.children:
dot.edge(str(current), str(child))
i += 1
if i >= len(heap_tree):
break
else:
current = heap_tree[i]
dot_source = dot.source
def run(input="5 3", maturity_in_months=1):
params = input.split()
months = int(params[0])
gives_birth_to_pairs = int(params[1])
starting_month = 1
adam_and_eve = RabbitPairMonth(RabbitPairMonth.get_next_id(), starting_month, starting_month)
wabbits_heritage = create_wabbits_heritage(adam_and_eve, months)
create_graph(wabbits_heritage)
count_of_months = 0
for pair_month in wabbits_heritage:
if pair_month.current_month == months:
count_of_months += 1
print(count_of_months)
return count_of_months | [
"deniz.cetiner94@gmail.com"
] | deniz.cetiner94@gmail.com |
5cde4d44f65f3c97fcdfd7ef7b717207957f7006 | b25a4e997a74dfe036d8beeff39d03bac526af94 | /weibov/spiders/stock.py | 51c82cd291ee7b6cb86caa46a471186f15603083 | [] | no_license | lianghongjie/weibo | 0ca2dc73cb4ac8a79f3ce99d37f8209db46eb08e | f5327ba1070f506ac40d3dd8d69c8d99fd1a7630 | refs/heads/master | 2020-03-13T11:58:47.343020 | 2018-04-26T06:51:19 | 2018-04-26T06:51:19 | 131,110,509 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,820 | py | # -*- coding: utf-8 -*-
import scrapy
from ..util.create_url import GeneratorUrl, ARTICLE
import json
from datetime import datetime
from scrapy.http import Request
from ..items import StockItem, UserItem
import time
import os
class StockSpider(scrapy.Spider):
name = 'stock'
def start_requests(self):
file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test/have.csv')
with open(file_path, 'r') as fl:
iter_file = fl.readlines()
gen_url = GeneratorUrl()
for index, data in enumerate(iter_file):
data = data.strip("\n").split(",")
company = data[0].split("(")[0]
ticker = data[-1]
urls = gen_url.get_stock_urls(company=company, ticker=ticker)
for url in urls:
time.sleep(4)
yield Request(url=url, callback=self.parse, meta={'company': company, 'ticker': ticker,
'proxy': 'http://127.0.0.1:3128'})
def parse(self, response):
json_data = json.loads(response.text)
try:
cards = json_data["cards"].__len__()
except KeyError:
return
if not cards:
return
user_groups = json_data["cards"][-1]["card_group"]
items = []
for user_card in user_groups:
time_flag = datetime.now()
stock_item = StockItem()
user_mblog = user_card["mblog"]
stock_item["attitudes_count"] = user_mblog.get("attitudes_count", 0) # 点赞数
stock_item["comments_count"] = user_mblog.get("comments_count", 0) # 评论数
stock_item["reposts_count"] = user_mblog.get("reposts_count", 0) # 转发数
stock_item["created_at"] = user_mblog.get("created_at", "") # 文章创建时间
stock_item["crawler_time"] = time_flag.strftime('%Y-%m-%d-%H') # 爬去时间
stock_item["reads_count"] = user_mblog.get("reads_count", 0) # 阅读量
stock_item["stock_name"] = response.meta['company'] # 股票名
stock_item["content"] = user_mblog.get("text", "") # 内容
stock_item["source"] = 'stock'
blog_id = user_mblog.get("id", 0) # blog id
stock_item["article_url"] = GeneratorUrl().get_article_url(ARTICLE, id=blog_id) # 文章url
user_item = UserItem()
user = user_card["mblog"]["user"]
user_item["name"] = user.get("name", "")
user_item["description"] = user.get("description", "") # 描述
user_item["user_create_at"] = user.get("created_at", "") # 用户创建日期
user_item["credit_score"] = user.get("credit_score", "") # 信用评分
user_item["followers_count"] = user.get("followers_count", 0) # 粉丝数
user_item["gender"] = user.get("gender", "") # 性别(m)
user_item["geo_enabled"] = user.get("geo_enabled", "") # 是否认证
user_id = user.get("id", "")
user_item["user_id"] = user_id # user id
stock_item['user_id'] = user_id
user_item["location"] = user.get("location", "") # 用户城市
user_item["friends_count"] = user.get("friends_count", 0) # 用户关注人数
user_item["verified_level"] = user.get("verified_level", "") # 认证级别
user_item["statuses_count"] = user.get("statuses_count", 0) # 全部微波数
user_item["crawler_time"] = time_flag.strftime('%Y-%m-%d-%H')
stock_id = response.meta['ticker']
stock_item['stock_id'] = stock_id
user_item['source'] = 'find_by_code'
items.append(stock_item)
items.append(user_item)
return items
| [
"abc650301032@qq.com"
] | abc650301032@qq.com |
3b9e253f10e41baad365199b92279151f3fb20da | 07452949bbfe246b970af1f8c8cec969414b0026 | /tests/test_relabel.py | 8eacbe707bdebce7e7dd29f18a5e21a67521d846 | [
"MIT"
] | permissive | monocongo/cvdata | f1e8649e43cb4ab0ca639f22d61a6e5a30bc74a2 | aca1d5d129d7fb0b2be0c5763c9363bad66696a5 | refs/heads/master | 2023-08-18T05:20:30.155204 | 2023-08-03T14:34:29 | 2023-08-03T14:34:29 | 221,716,830 | 18 | 8 | MIT | 2023-08-03T14:34:31 | 2019-11-14T14:30:14 | Python | UTF-8 | Python | false | false | 6,562 | py | import logging
from xml.etree import ElementTree
import pytest
from cvdata import relabel
from assert_utils import elements_equal, text_files_equal
# ------------------------------------------------------------------------------
# disable logging messages
logging.disable(logging.CRITICAL)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"data_dir",
)
def test_relabel_darknet(
data_dir,
):
"""
Test for the cvdata.relabel.relabel_darknet() function
:param data_dir: temporary directory into which test files will be loaded
"""
darknet_file_name = "darknet_1.txt"
darknet_file_path = str(data_dir.join(darknet_file_name))
# confirm that a relabeling won't occur if the old value is not present
relabel.relabel_darknet(darknet_file_path, 58, 59)
expected_darknet_file_name = "expected_darknet_1.txt"
expected_darknet_file_path = str(data_dir.join(expected_darknet_file_name))
assert text_files_equal(
darknet_file_path,
expected_darknet_file_path,
)
# confirm that relabeling occurred as expected
relabel.relabel_darknet(darknet_file_path, 3, 2)
expected_darknet_file_name = "expected_darknet_2.txt"
expected_darknet_file_path = str(data_dir.join(expected_darknet_file_name))
assert text_files_equal(
darknet_file_path,
expected_darknet_file_path,
)
# confirm that various invalid arguments raise an error
with pytest.raises(TypeError):
relabel.relabel_darknet(darknet_file_path, None, 0)
relabel.relabel_darknet(darknet_file_path, 0, None)
relabel.relabel_darknet(1, 0, 1)
relabel.relabel_darknet(None, 1, 0)
relabel.relabel_darknet("/not/present", 0, 1)
relabel.relabel_darknet(1.0, "strings won't work", 0)
relabel.relabel_darknet(darknet_file_path, 1, "strings won't work")
relabel.relabel_darknet(darknet_file_path, 1.0, 0)
relabel.relabel_darknet(darknet_file_path, 2, 1.0)
relabel.relabel_darknet(darknet_file_path, True, 0)
relabel.relabel_darknet(darknet_file_path, 1, True)
with pytest.raises(ValueError):
relabel.relabel_darknet(darknet_file_path, -5, 1)
relabel.relabel_darknet(darknet_file_path, 1, -4)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"data_dir",
)
def test_relabel_kitti(
data_dir,
):
"""
Test for the cvdata.relabel.relabel_kitti() function
:param data_dir: temporary directory into which test files will be loaded
"""
kitti_file_name = "kitti_1.txt"
kitti_file_path = str(data_dir.join(kitti_file_name))
# confirm that a relabeling won't occur if the old value is not present
relabel.relabel_kitti(kitti_file_path, "NOT_PRESENT", "NOT_USED")
expected_kitti_file_name = "expected_kitti_1.txt"
expected_kitti_file_path = str(data_dir.join(expected_kitti_file_name))
assert text_files_equal(
kitti_file_path,
expected_kitti_file_path,
)
# confirm that relabeling occurred as expected
relabel.relabel_kitti(kitti_file_path, "pistol", "firearm")
expected_kitti_file_name = "expected_kitti_2.txt"
expected_kitti_file_path = str(data_dir.join(expected_kitti_file_name))
assert text_files_equal(
kitti_file_path,
expected_kitti_file_path,
)
# confirm that various invalid arguments raise an error
with pytest.raises(ValueError):
relabel.relabel_kitti(None, "don't care", "don't care")
relabel.relabel_kitti(kitti_file_path, None, "don't care")
relabel.relabel_kitti(kitti_file_path, "don't care", None)
relabel.relabel_kitti("/not/present", "don't care", "don't care")
relabel.relabel_kitti(1, "don't care", "don't care")
relabel.relabel_kitti(1.0, "don't care", "don't care")
relabel.relabel_kitti(kitti_file_path, 1, "don't care")
relabel.relabel_kitti(kitti_file_path, 1.0, "don't care")
relabel.relabel_kitti(kitti_file_path, True, "don't care")
relabel.relabel_kitti(kitti_file_path, "don't care", 1)
relabel.relabel_kitti(kitti_file_path, "don't care", 1.0)
relabel.relabel_kitti(kitti_file_path, "don't care", True)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"data_dir",
)
def test_relabel_pascal(
data_dir,
):
"""
Test for the cvdata.relabel.relabel_pascal() function
:param data_dir: temporary directory into which test files will be loaded
"""
pascal_file_name = "pascal_1.xml"
pascal_file_path = str(data_dir.join(pascal_file_name))
# confirm that a relabeling won't occur if the old value is not present
etree_before_relabel = ElementTree.parse(pascal_file_path)
relabel.relabel_pascal(pascal_file_path, "NOT_PRESENT", "NOT_USED")
etree_after_relabel = ElementTree.parse(pascal_file_path)
assert elements_equal(
etree_before_relabel.getroot(),
etree_after_relabel.getroot(),
)
# confirm that relabeling occurred as expected
relabel.relabel_pascal(pascal_file_path, "pistol", "firearm")
etree_after_relabel = ElementTree.parse(pascal_file_path)
expected_pascal_file_name = "expected_pascal_1.xml"
expected_pascal_file_path = str(data_dir.join(expected_pascal_file_name))
etree_expected_after_relabel = ElementTree.parse(expected_pascal_file_path)
assert elements_equal(
etree_expected_after_relabel.getroot(),
etree_after_relabel.getroot(),
)
# confirm that various invalid arguments raise an error
with pytest.raises(ValueError):
relabel.relabel_pascal(None, "don't care", "don't care")
relabel.relabel_pascal(pascal_file_path, None, "don't care")
relabel.relabel_pascal(pascal_file_path, "don't care", None)
relabel.relabel_pascal("/not/present", "don't care", "don't care")
relabel.relabel_pascal(1, "don't care", "don't care")
relabel.relabel_pascal(1.0, "don't care", "don't care")
relabel.relabel_pascal(pascal_file_path, 1, "don't care")
relabel.relabel_pascal(pascal_file_path, 1.0, "don't care")
relabel.relabel_pascal(pascal_file_path, True, "don't care")
relabel.relabel_pascal(pascal_file_path, "don't care", 1)
relabel.relabel_pascal(pascal_file_path, "don't care", 1.0)
relabel.relabel_pascal(pascal_file_path, "don't care", True)
| [
"monocongo@gmail.com"
] | monocongo@gmail.com |
6606ff39a891b68aee9bfe862e48704e6ed6a975 | cc08f7a8d284b38975a777b416e1abfa33fc3a2d | /calcs.py | 8a2cf0fae4b3ef8be855c9b5ebd05811e50b3d39 | [] | no_license | EgorOs/osinkin_hw6 | 431d1af9a43767f56f55668db10d255477ca66a4 | f6b4008f4e04f54d83e7680b3d757709a6923d28 | refs/heads/master | 2020-03-26T16:30:31.311553 | 2018-09-04T19:50:01 | 2018-09-04T19:50:01 | 145,106,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,993 | py | #!/usr/bin/env python3
from enum import Enum
from string import ascii_lowercase
class Token:
def __init__(self, ch):
self.ch = ch
if ch == '+' or ch == '-':
self.priority = 1
elif ch == '*' or ch == '/':
self.priority = 2
elif ch == '^':
self.priority = 3
elif ch == '(' or ch == ')':
self.priority = 4
else:
self.priority = 0
# self.pos = None
# self.value = None
# self.kind = None
self.calculable = False if self.ch in set(ascii_lowercase) else True
def tokenize(expression):
characters = list(expression)
digits_token = None
tokens = []
pos = 0
while characters:
ch = characters.pop(0)
if ch.isdigit():
if digits_token is not None:
digits_token = digits_token * 10 + int(ch)
else:
digits_token = int(ch)
else:
if digits_token is not None:
new_token = Token(digits_token)
new_token.pos = pos
tokens.append(new_token)
pos += 1
new_token = Token(ch)
new_token.pos = pos
tokens.append(new_token)
digits_token = None
pos += 1
if digits_token is not None:
new_token = Token(digits_token)
new_token.pos = pos
tokens.append(new_token)
pos += 1
return tokens
def represent_as_tree(tokens: list) -> dict:
priority_list = []
bias = 0
for pos, token in enumerate(tokens):
if token.ch == '(':
bias += token.priority
elif token.ch == ')':
bias -= token.priority
elif token.priority > 0:
# find node with highest priority
priority_list.append((pos, token.priority + bias))
order = sorted(priority_list, key=lambda x: x[1], reverse=True)
if bias != 0:
# brackets are not balanced
return {}
if len(tokens) == 1:
# valid one-symbol expression
if tokens[0].ch in set(ascii_lowercase) or str(tokens[0].ch).isdigit():
priority_tree = {}
priority_tree[1] = [tokens]
return priority_tree
else:
return {}
priority_tree = {}
nodes_init_pos = {}
prev_pos, prev_priority = None, None
for pos, priority in order:
if prev_pos is not None and prev_priority is not None:
if abs(prev_pos - pos) <= 2 and prev_priority == priority:
# expand node
if prev_pos < pos:
pos_l, pos_r = pos + 0, pos + 2
else:
pos_l, pos_r = pos - 1, pos + 1
priority_tree[priority][-1] += [t for t in
tokens[pos_l: pos_r]]
elif not priority_tree.get(priority):
# new key
nodes_init_pos[priority] = [pos]
if prev_pos < pos:
pos_l, pos_r = pos - 1, pos + 2
else:
pos_l, pos_r = pos - 1, pos + 2
pos_l = 0 if pos_l < 0 else pos_l
pos_r = len(tokens) if pos_l > len(tokens) else pos_r
priority_tree[priority] = [
[t for t in tokens[pos_l: pos_r]]]
else:
# new node with same priority
nodes_init_pos[priority].append(pos)
pos_l = 0 if pos - 1 < 0 else pos - 1
pos_r = len(tokens) if pos + 2 > len(
tokens) else pos + 2
priority_tree[priority].append(
[t for t in tokens[pos_l: pos_r]])
prev_pos = pos
prev_priority = priority
else:
# create initial node
nodes_init_pos[priority] = [pos]
pos_l = 0 if pos - 1 < 0 else pos - 1
pos_r = len(tokens) if pos + 2 > len(tokens) else pos + 2
priority_tree[priority] = [
[t for t in tokens[pos_l: pos_r]]]
prev_pos = pos
prev_priority = priority
return priority_tree
def to_postfix(opcodes):
class Symbol(Enum):
BREAK_SIGN = '|'
MUL_SIGN = '*'
DIV_SIGN = '/'
PLUS_SIGN = '+'
MINUS_SIGN = '-'
LEFT_BRACKET = '('
RIGHT_BRACKET = ')'
POWER_SIGN = '^'
class Action(Enum):
BREAK_SIGN = {'|': 4, '-': 1, '+': 1, '*': 1, '^': 1, '/': 1, '(': 1,
')': 5}
PLUS_SIGN = {'|': 2, '-': 2, '+': 2, '*': 1, '^': 1, '/': 1, '(': 1,
')': 2}
MINUS_SIGN = {'|': 2, '-': 2, '+': 2, '*': 1, '^': 1, '/': 1, '(': 1,
')': 2}
MUL_SIGN = {'|': 2, '-': 2, '+': 2, '*': 2, '^': 1, '/': 2, '(': 1,
')': 2}
POWER_SIGN = {'|': 2, '-': 2, '+': 2, '*': 2, '^': 1, '/': 2, '(': 1,
')': 2}
DIV_SIGN = {'|': 2, '-': 2, '+': 2, '*': 2, '^': 2, '/': 2, '(': 1,
')': 2}
LEFT_BRACKET = {'|': 5, '-': 1, '+': 1, '*': 1, '^': 1, '/': 1, '(': 1,
')': 3}
opcodes = opcodes + ['|']
if opcodes[0] == '+':
opcodes.pop(0)
lst_postfix = []
stack = ['|']
pos = 0
while True:
sym = opcodes[pos]
if sym in set(ascii_lowercase) or sym.isdigit():
lst_postfix.append(sym)
pos += 1
else:
LAST_SIGN = Symbol(stack[-1]).name
action_choice = Action[LAST_SIGN].value[sym]
if action_choice == 1:
stack.append(sym)
pos += 1
elif action_choice == 2:
last = stack.pop(-1)
lst_postfix.append(last)
elif action_choice == 3:
stack.pop(-1)
pos += 1
elif action_choice == 4:
break
else:
raise Exception('invalid input string', opcodes)
return lst_postfix
class Calculator:
class TokenListDescriptor:
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return getattr(instance, self.name, self)
def __set__(self, instance, expression):
# if isinstance(expression[0], Token):
# new_tokens = expression
# else:
# new_tokens = tokenize(expression)
setattr(instance, self.name, tokenize(expression))
class CalcTreeDescriptor:
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return getattr(instance, self.name, self)
def __set__(self, instance, tokens):
setattr(instance, self.name, represent_as_tree(tokens))
# Tokens and tree are calculated each time new value is set
tokens = TokenListDescriptor('__tokens')
tree = CalcTreeDescriptor('__tree')
def __init__(self, opcodes: list, operators=None):
if isinstance(opcodes, str):
opcodes = list(opcodes)
self.opcodes = opcodes
self.operators = operators if operators is not None else []
self.tokens = opcodes
self.tree = self.tokens
def __str__(self) -> str:
lst_postfix = to_postfix(self.opcodes)
return ''.join(lst_postfix)
def optimise(self):
for operator in self.operators:
# Updating tokens and tree
self.opcodes = operator.process(self.opcodes)
self.tokens = self.opcodes
self.tree = self.tokens
def validate(self) -> bool:
def check_unary_operators(tokens: list) -> bool:
prev_priority = None
if tokens[-1].priority == 1:
# unary operation can not end with +/-
return False
for token in tokens:
if prev_priority is not None:
if token.priority != prev_priority:
pass
else:
return False
prev_priority = token.priority
else:
prev_priority = token.priority
return True
def check_binary_operators(tokens: list) -> bool:
if len(tokens) < 3:
return False
prev_priority = None
prev_token = None
if tokens[-1].priority not in (0, 4):
return False
if tokens[0].priority not in (0, 4):
return False
for token in tokens:
if prev_priority is not None:
if prev_token.ch == '/' and token.ch == 0:
return False
if token.priority != prev_priority:
pass
else:
return False
prev_priority = token.priority
prev_token = token
else:
prev_priority = token.priority
prev_token = token
return True
def check_binary_offset(opcodes: list) -> bool:
# Ensure that binary operators are placed inbetween operands
tokens = tokenize(opcodes)
lst = [t.priority for t in tokens if t.priority not in (1, 4) ]
prev_priority = lst[0]
if prev_priority != 0:
return False
positioning = [prev_priority]
for i in range(1, len(lst)):
curr_priority = lst[i]
if curr_priority != prev_priority:
positioning.append(curr_priority)
prev_priority = curr_priority
expected = (0,)
for p in positioning:
if p not in expected:
return False
elif expected == (0,):
expected = (2, 3)
elif expected == (2, 3):
expected = (0,)
if expected == (2, 3):
return True
else:
return False
tree = self.tree
opcodes = self.opcodes
if not tree:
return False
if len(opcodes) == 1:
# represent_as_tree returns only valid token
# for a single operand scenario
return True
for key in tree.keys():
nodes = tree[key]
for node in nodes:
# ignore brackets
clean_node = [t for t in node if t.priority != 4]
operation_priority = max(list({t.priority for t in clean_node}))
if operation_priority == 1:
if not check_unary_operators(node):
return False
else:
if not check_binary_operators(node):
return False
if not check_binary_offset(opcodes):
return False
return True
def validate_test():
validate_check_list = [
('a+2', True),
('a-(-2)', True),
('a+2-', False),
('a+(2+(3+5)', False),
('a^2', True),
('a^(-2)', True),
('-a-2', True),
('6/0', False),
('a/(b-b)', True),
('+a', True,),
('^a', False),
('a^', False),
('a^-b', False),
('a^+b', False),
('a^b', True),
('^-b', False),
('+b/(0+0)', True),
('+b/(0)', True), # or should this case be considered as False?
('1', True),
('-(-a)', True),
('-((-a))', True),
('-(-(-a))', True),
('-(*(-a))', False),
('-((-a))/', False),
('-(-5)', True),
('-(a+b)+c-(-d)', True),
('-(-(a+b))', True)
]
for case, exp in validate_check_list:
tokens = list(case)
calc = Calculator(tokens).validate()
if calc != exp:
print('Error in case for "{}". Actual "{}", expected {}'
.format(case, calc, exp))
def str_test():
str_check_list = [
("a", "a"),
("-a", "a-"),
("(a*(b/c)+((d-f)/k))", "abc/*df-k/+"),
("(a)", "a"),
("a*(b+c)", "abc+*"),
("(a*(b/c)+((d-f)/k))*(h*(g-r))", "abc/*df-k/+hgr-**"),
("(x*y)/(j*z)+g", "xy*jz*/g+"),
("a-(b+c)", "abc+-"),
("a/(b+c)", "abc+/"),
("a^(b+c)", "abc+^"),
("a^b^c", "abc^^"),
("a^(b^c)", "abc^^"),
("(a^b)^c", "ab^c^"),
("a*b^c", "abc^*"),
("(a*b)^c", "ab*c^"),
]
for case, exp in str_check_list:
tokens = list(case)
calc = Calculator(tokens)
if str(calc) != exp:
print('Error in case for "{}". Actual "{}", expected {}'
.format(case, calc, exp))
validate_test()
# str_test()
| [
"egoros97@yandex.ru"
] | egoros97@yandex.ru |
ca1da7d4907bfadb4fcbc08eda69b0a43a6152f6 | 637316e4f60b0b4288dd67c4018a7ed89d57d25f | /53. kolkoIkrzyzyk.py | dfcabd88acfda9abd132c91a4db64e72a246fac0 | [] | no_license | wojtez/KomputerSwiatBookPython | 37ff0473b7ed79cb8d531855851175c9d8c3324c | 84d63e1936265b7553e13c76affef20848ebcefd | refs/heads/master | 2022-10-15T12:45:13.508291 | 2020-06-08T23:51:27 | 2020-06-08T23:51:27 | 269,219,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | # Program wykorzystjący moduł turtle do tworzenia popularnej gry kółko i krzyżyk
from turtle import *
import math
# definiujemy bok kwadratu
bok = 80
pole = [[False, False, False], [False, False, False], [False, False, False]]
# rysowanie pola do gry za pomocą kwadratu
def kwadrat():
for i in range(4):
fd(bok)
left(90)
# rysowanie pól do gry w tabeli 3x3
def plansza():
# rysowanie kwadratow w wierszach
for i in range(3):
# rysowanie 3 wadratow w kazdym wierszu
for j in range(3):
# zawieszenie rysowania po istniejącej linii
pd()
# wywołanie procedury
kwadrat()
# wznowienie rysowania po pustej linii
pu()
fd(bok)
# przesuwamy kursor do drugiej linii
bk(3*bok)
left(90)
fd(bok)
right(90)
# tworzenie krzyżyka
def krzyzyk(a, b): # a = kolumna, b = wiersz
pu()
# przesmieszczanie żółwia
setx(a * bok + bok/2)
sety(b * bok + bok/2)
pd()
left(45)
for i in range(4):
fd(bok/4)
bk(bok/4)
left(90)
right(45)
pu()
# tworzenie kolka
def kolko(a, b):
pu()
setx(a * bok + bok/2)
# wyliczamy ze wzoru na promień kola (2*pi*r)
# zakladamy ze kolo powstanie z 36 linii
# każda po 3 kroki żółwia i przesunieta o 10 stopni
# co daje nam 108 (36*6) i finalnie do wzoru wstawiamy 54/pi
sety(b * bok + bok/2 - 54/math.pi)
# opcjonalnie można wykorzstać wbudowaną funkcję
# sety(circle(50))
pd()
for i in range(36):
fd(3)
left(10)
czyj_ruch = "x"
# sprawdzanie czyj ruch jest nastepny
def postaw(a, b):
global czyj_ruch
global pole
#dodanie tablicy pozwala zablokowac wstawianie x lub o w to samo miejsce
if pole [a] [b] == False:
pole [a] [b] = True
if czyj_ruch == "x":
krzyzyk(a, b)
czyj_ruch = "0"
elif czyj_ruch == "0":
kolko(a, b)
czyj_ruch = "x"
# uruchomienie rysowania planszy
plansza()
postaw(1, 2)
postaw(0, 1)
postaw(1, 2)
postaw(1, 2)
postaw(1, 2)
postaw(1, 2)
postaw(1, 2)
postaw(1, 2)
postaw(1, 2)
postaw(0, 2)
postaw(1, 0)
postaw(2, 2)
input("Push enter button to finnish game.")
| [
"kozlowski.woj@outlook.com"
] | kozlowski.woj@outlook.com |
aa103ea582f1fe1dccda82638cc5841b408a0c7a | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /988/988.smallest-string-starting-from-leaf.233252752.Accepted.leetcode.py | 22432d1b1812c2fa9c180ef407130c342025bc17 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | class Solution(object):
def smallestFromLeaf(self, root):
self.result = "~"
def dfs(node, A):
if node:
A.append(chr(node.val + ord('a')))
if not node.left and not node.right:
self.result = min(self.result, "".join(reversed(A)))
dfs(node.left, A)
dfs(node.right, A)
A.pop()
dfs(root, [])
return self.result
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
0a12281b7c925eb8277884979410f9ea73ef9d93 | 1f2d8f8cef89facd60e56c6e4c59e881c1cdbe5d | /hw1/code/main.py | 35a4390a0b00ebe8f3f3839320e38ec5f0cd8275 | [] | no_license | TerminalWitchcraft/CV-HW | abf66e4cad887c9c4d13c24e59dea4f99c6d5bb1 | 7df4daffe16d2c06161f0cf77655d988d369f51d | refs/heads/master | 2022-01-09T21:26:45.794794 | 2019-05-05T02:30:06 | 2019-05-05T02:30:06 | 172,445,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,096 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : b1.py
# Author : Hitesh Paul <hp1293@gmail.com>
# Date : 24.02.2019
# Last Modified Date: 28.02.2019
# Last Modified By : Hitesh Paul <hp1293@gmail.com>
import os
import random
import numpy as np
import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
import plotly.io as pio
from collections import defaultdict, Counter, OrderedDict
from PIL import Image
# Load the image
COLORMAP = {
-1: "rgb(99,99,99)",
0: "rgb(215,48,39)",
1: "rgb(102,189,99)",
2: "rgb(43,130,189)"
}
NAMEMAP = {
-1: "Gray",
0: "Red",
1: "Green",
2: "Blue"
}
def get_info(im):
# Show the bands contained in the image
print("The image has following bands: ", im.getbands())
# Seperate out the bands
# print out information about the bands
print(im.getextrema())
print("The width of the image is: ", im.width)
print("The height of the image is: ", im.height)
def plot(data, filename, title, titlex, titley, modes,
auto_open=True, gray=False, save=False):
"""
Function to plot data. data is an array of (x,y)
"""
if not os.path.exists("html"):
os.mkdir("html")
traces = []
for i in range(len(modes)):
color = COLORMAP[-1] if gray else COLORMAP[modes[i]]
name = NAMEMAP[-1] if gray else NAMEMAP[modes[i]]
trace = go.Bar(x=data[modes[i]][0], y=data[modes[i]][1],
marker={"line": {"color": color}, "color": color},
name=name)
traces.append(trace)
layout = go.Layout(title=title,
xaxis=dict(title=titlex),
yaxis=dict(title=titley))
fig = go.Figure(data=traces, layout=layout)
plotly.offline.plot(fig, auto_open=auto_open, filename="html/" + filename+".html", image_filename="images/" + filename)
if save:
if not os.path.exists("charts"):
os.mkdir("charts")
pio.write_image(fig, 'charts/' + filename + ".jpeg", width=1366, height=768, scale=2)
def histogram(im, mode=[0], denom=1, cummulate=False):
"""
Returns the histogram for the given image and mode
0 -> Red, 1-> Green, 2->Blue
"""
ret_data = []
for key in mode:
band = np.array(im.getdata(key))
data = defaultdict(int)
for item in band:
data[item] += 1
x = []
y = []
for i in range(256):
x.append(i)
if cummulate:
cummulative_sum = 0
for j in range(i+1):
cummulative_sum += data[j] / denom
y.append(cummulative_sum)
else:
y.append(data[i] / denom)
ret_data.append((x,y))
return ret_data
def normalize(grey_im, mode=[0], gray=True):
"""
Normalize the histogram according to Leibnitz rule
"""
arr = np.array(grey_im)
denom = grey_im.width * grey_im.height
data = histogram(grey_im, mode=mode, denom=1, cummulate=True)
y = data[0][1]
# First method
vf = np.vectorize(lambda x: ((y[x] - np.nanmin(arr) ) / (denom - np.nanmin(arr))) * np.amax(arr) )
# ff = np.vectorize(lambda x: np.nanmax(arr) * y[x])
norm_im = vf(arr)
norm_im = Image.fromarray(norm_im.astype(np.uint8))
# norm_im.show()
return norm_im
def pdf(im, mode=[0]):
"""
Function to calculate pdf of the given image
Returns histogram / (width * height)
"""
denom = im.width * im.height
return histogram(im, mode, denom=denom)
def cdf(im, mode=[0]):
"""
Function to calculate cumulative distribution function from histogram
"""
denom = im.width * im.height
return histogram(im, mode, denom=denom, cummulate=True)
def grayscale(img):
"""
Converts the given color image to grayscale
"""
im2arr = np.array(img)
r_band = im2arr[:,:,0]
g_band = im2arr[:,:,1]
b_band = im2arr[:,:,2]
l = 0.3 * r_band + 0.59 * g_band + 0.11 * b_band
grey_im = Image.fromarray(l.astype(np.uint8))
# grey_im.show()
return grey_im
def main(filename):
if not os.path.exists("images"):
os.mkdir("images")
im = Image.open(filename)
get_info(im)
filename = filename[:len(filename)-4]
save = True
# Part 1 of assignment
hist_data = histogram(im, [0,1,2])
plot(hist_data, filename=filename+"_hist_r", title="Plot of distribution for Red",
titlex="Intensity Values (0-255)",
titley="Number of Pixels",
modes=[0], auto_open=False, save=save)
plot(hist_data, filename=filename+"_hist_g", title="Plot of distribution for Green",
titlex="Intensity Values (0-255)",
titley="Number of Pixels",
modes=[1], auto_open=False, save=save)
plot(hist_data, filename=filename+"_hist_b", title="Plot of distribution for Blue",
titlex="Intensity Values (0-255)",
titley="Number of Pixels",
modes=[2], auto_open=False, save=save)
plot(hist_data, filename=filename+"_hist", title="Plot of distribution for channels[Click on the legend to isolate traces]",
titlex="Intensity Values (0-255)",
titley="Number of Pixels",
modes=[0,1,2], auto_open=False, save=save)
# Next, create Grayscale images
grey_im = grayscale(im)
grey_im.save("images/" + filename + "_grey.jpeg")
hist_data_gray = histogram(grey_im, [0])
plot(hist_data_gray, filename=filename+"_hist_gray", title="Plot of distribution for Gray channel",
titlex="Intensity Values (0-255)",
titley="Number of Pixels",
modes=[0], auto_open=False, save=save, gray=True)
# Next, plot the pdf
pdf_grey = pdf(grey_im, [0])
plot(pdf_grey, filename=filename+"_pdf_grey", title="Plot of probability distribution function",
titlex="Intensity Values (0-255)",
titley="Probability",
modes=[0], auto_open=False, save=save, gray=True)
# Plot the cdf
cdf_grey = cdf(grey_im, [0])
plot(cdf_grey, filename=filename+"_cdf_grey", title="Plot of Cummulative distribution function",
titlex="Intensity Values (0-255)",
titley="Cummulative",
modes=[0], auto_open=False, save=save, gray=True)
# Plot the normalized histogram
norm_im = normalize(grey_im)
norm_im = normalize(norm_im)
norm_im.save("images/" + filename + "_norm.jpeg")
hist_grey_norm = histogram(norm_im)
plot(hist_grey_norm, filename=filename+"_norm_hist_gray", title="Plot of Normalized histogram",
titlex="Intensity Values (0-255)",
titley="Cummulative",
modes=[0], auto_open=False, save=save, gray=True)
cdf_grey_norm = cdf(norm_im, [0])
plot(cdf_grey_norm, filename=filename+"_norm_cdf_grey", title="Plot of Cummulative distribution function",
titlex="Intensity Values (0-255)",
titley="Cummulative",
modes=[0], auto_open=False, save=save, gray=True)
def manual_threshold(im, threshold):
"""
Manually set the threshold and return a new image
"""
arr = np.array(im)
arr[arr > threshold] = 255
arr[arr <= threshold] = 0
return Image.fromarray(arr)
def threshold(filenames, thresholds):
"""
Second part of assignment
"""
for filename in filenames:
for item in thresholds:
im = Image.open(filename)
imt = manual_threshold(im, item)
imt.save("images/" + filename[:len(filename) -4 ] + "_t_{}.png".format(str(item)))
def otsu(filenames):
"""
Second part of the assignment. Otsu's automatic threshold
detection algorithm
"""
for filename in filenames:
im = Image.open(filename)
arr = np.array(im)
arr += 1
hist_data = pdf(im)
hist_temp = {}
for key, value in zip(hist_data[0][0], hist_data[0][1]):
# print(key, value)
if value > 0:
hist_temp[key] = value
optim_k = 0
max_b = 0.0
x = []
y = []
for k in range(0, 256):
c0 = [x for x in range(k+1) if x in hist_temp]
c1 = [x for x in range(k+1, 256) if x in hist_temp]
if not c0 or not c1: continue
omega0 = omegak= sum([hist_temp[x] for x in c0])
omega1 = sum([hist_temp[x] for x in c1])
u0 = sum([i * hist_temp[i] for i in c0]) / omega0
u1 = sum([i * hist_temp[i] for i in c1]) / omega1
ut = sum([i*hist_temp[i] for i in hist_temp])
var = (omega0 * omega1) * ((u0-u1) * (u0-u1))
x.append(k)
y.append(var)
if var > max_b:
max_b = var
optim_k = k
print("The best k for {} is: ".format(filename), optim_k)
print("The max variance for {} is: ".format(filename), max_b)
im1 = manual_threshold(im, optim_k)
im1.save("images/" + filename[: len(filename) - 4] + "_otsu.jpeg")
hist_data_gray = histogram(im1, [0])
plot(hist_data_gray, filename=filename[: len(filename) - 4]+"_hist_gray", title="Plot of distribution for Gray channel",
titlex="Intensity Values (0-255)",
titley="Number of Pixels",
modes=[0], auto_open=False, save=True, gray=True)
plot([(x,y)], filename=filename[: len(filename) - 4] + "_otsu", title="Plot of variance with respect to intensity level",
titlex="Intensity Values (0-255)",
titley="Variance",
modes=[0], auto_open=False, save=True, gray=True)
def prepare():
"""Prepare directories"""
import shutil
for item in ["images", "charts", "html"]:
try:
shutil.rmtree(item)
except:
pass
if __name__ == "__main__":
prepare()
files = ["b2_a.png", "b2_b.png", "b2_c.png"]
main("b1.png")
threshold(files, [80, 125, 230])
otsu(files)
| [
"git@hiteshpaul.com"
] | git@hiteshpaul.com |
a254002a4d58e94c7726ba1e04f330ff133f6aca | 488f92b110b94cc7d4e3c2fea7ffd244acc6fb23 | /Lab 05 - Loopy Lab/lab_05.py | a130bbb17c393ec4ac18d0fd6a7239befa4c2db7 | [] | no_license | miguelsh410/sanchez-miguel-arcade-games-work | cc4cb6efef40b3fcba079c599a8850207e669653 | 1b777721953cb054a0f714262885750f0c85b04a | refs/heads/master | 2022-05-03T16:16:21.339268 | 2019-07-11T02:17:57 | 2019-07-11T02:17:57 | 196,303,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,433 | py | import arcade
def draw_section_outlines():
# Draw squares on bottom
arcade.draw_rectangle_outline(150, 150, 300, 300, arcade.color.BLACK)
arcade.draw_rectangle_outline(450, 150, 300, 300, arcade.color.BLACK)
arcade.draw_rectangle_outline(750, 150, 300, 300, arcade.color.BLACK)
arcade.draw_rectangle_outline(1050, 150, 300, 300, arcade.color.BLACK)
# Draw squares on top
arcade.draw_rectangle_outline(150, 450, 300, 300, arcade.color.BLACK)
arcade.draw_rectangle_outline(450, 450, 300, 300, arcade.color.BLACK)
arcade.draw_rectangle_outline(750, 450, 300, 300, arcade.color.BLACK)
arcade.draw_rectangle_outline(1050, 450, 300, 300, arcade.color.BLACK)
def draw_section_1():
for row in range(30):
for column in range(30):
x = 5 + (column * 10)
y = 296 - (row * 10)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_2():
for row in range(30):
for column in range(30):
x = 305 + (column * 10)
y = 296 - (row * 10)
if column % 2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
def draw_section_3():
for row in range(30):
for column in range(30):
x = 605 + (column * 10)
y = 296 - (row * 10)
if row % 2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_4():
for row in range(30):
for column in range(30):
x = 906 + (column * 10)
y = 296 - (row * 10)
if (row + 1) % 2 == 0 and column % 2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
def draw_section_5():
for row in range(30):
for column in range(row):
x = 5 + ((29 - column) * 10)
y = 596 - (row * 10)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_6():
for row in range(30):
for column in range(row + 1):
x = 305 + (column * 10)
y = 596 - (row * 10)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_7():
for row in range(30):
for column in range(30 - row):
x = 605 + (column * 10)
y = 596 - (row * 10)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_8():
for row in range(30):
for column in range(30 - row):
x = 905 + ((29 - column) * 10)
y = 596 - (row * 10)
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def main():
# Create a window
arcade.open_window(1200, 600, "Lab 05 - Loopy Lab")
arcade.set_background_color(arcade.color.AIR_FORCE_BLUE)
arcade.start_render()
# Draw the outlines for the sections
draw_section_outlines()
# Draw the sections
draw_section_1()
draw_section_2()
draw_section_3()
draw_section_4()
draw_section_5()
draw_section_6()
draw_section_7()
draw_section_8()
arcade.finish_render()
arcade.run()
main() | [
"miguel_sh410@hotmail.com"
] | miguel_sh410@hotmail.com |
8c1b6a3a004b909f82de6758f41f54206c8bda08 | b8c8973b8af953628d0418bccc8a1a0c6ac54895 | /datasets.py | de1606d5cd8ca04dba92ff559ac4376378d1a9a5 | [
"MIT"
] | permissive | Mjacks3/AutoEncoder_Research | b61aa5590c34b0e3f54195761b6b398f6dfe9ce6 | d92bfdfc76f30173db5dc7d880ee1516cb72ba86 | refs/heads/master | 2023-01-11T10:55:38.715465 | 2020-02-14T04:46:34 | 2020-02-14T04:46:34 | 220,724,300 | 1 | 0 | MIT | 2019-12-15T19:36:19 | 2019-11-10T01:05:23 | Python | UTF-8 | Python | false | false | 1,609 | py |
import os
def load_other_batch_data(data_path='/home/mjacks3/monize/tahdith/datasets/train'):
#Intended just for training our data
full_x = []
for r, d, f in os.walk(data_path):
for file in f:
if 'embedding' in file:
with open(data_path +'/'+ file[0:-14]+'/'+ file) as fi:
data = fi.readlines()
data = data[1:-1]
data = [list(map(str, line.split())) for line in data]
data = np.array(data)
x, y = data[:, 1:], data[:, 0]
for ind in range (len(x)):
x[ind] = x[ind].astype(float)
full_x = np.append(full_x, x)
return full_x, 0
def load_other_data(data_path='/home/mjacks3/monize/tahdith/datasets/train/Java.git/Java.git.txt.embedding'):
# for demo training and for tesrt
print(data_path)
with open(data_path) as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(str, line.split())) for line in data]
data = np.array(data)
x, y = data[:, 1:], data[:, 0]
for ind in range (len(x)):
x[ind] = x[ind].astype(float)
# x = [list(map(float , x)) for num in x]
#x = x.reshape([-1, 16, 16, 1])
return x, y
def load_data(dataset):
x, y = load_other_batch_data(dataset)
return x.reshape([x.shape[0], -1]), y
"""
if __name__ == "__main__":
x, y = load_other_batch_data()
print(x)
x, y = load_other_data('/home/mjacks3/monize/tahdith/datasets/train/Java.git/Java.git.txt.embedding')
#print (x)
"""
| [
"mjacks3@umbc.edu"
] | mjacks3@umbc.edu |
88c931e22e34976270911349be0c9c290864141e | 64045f22ad137eb96c8a54c50b6da42948ceba89 | /pygcn/trainer.py | c2aa0a43570158d2065b1d56d3ec8a64fd20fe23 | [
"MIT"
] | permissive | Haicang/pygcn | ab73b7e2305f9f21987e8f3c8100ff570bcfb425 | 608785e4ef18ee2d579ac9d60bda6110e97d877c | refs/heads/master | 2021-07-24T03:39:19.087499 | 2021-07-02T13:44:44 | 2021-07-02T13:44:44 | 233,192,191 | 0 | 0 | null | 2020-01-11T07:15:15 | 2020-01-11T07:15:15 | null | UTF-8 | Python | false | false | 5,964 | py | import abc
import time
import numpy as np
import torch
import torch.nn as nn
from .utils import *
__all__ = [
'GNNsTrainer'
]
class BaseTrainer(abc.ABC):
def __init__(self,
model: nn.Module,
criterion,
optimizer,
epochs,
stopper=None,
acc_fn=accuracy,
log_fn=None,
verbose=True):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.epochs = epochs
self.stopper = stopper
self.acc_fn = acc_fn
self.log_fn = log_fn
self.verbose = verbose
@property
def has_stopper(self):
return self.stopper is not None
@staticmethod
def _check_inputs(feats) -> list:
if not isinstance(feats, list):
return [feats]
return feats
@abc.abstractmethod
def fit(self, *args, **kwargs):
pass
@abc.abstractmethod
def score(self, *args):
pass
class GNNsTrainer(BaseTrainer):
"""
Trainer for GNN models. The transductive semi-supervised training of GNNs is different from
full supervised training of mlp-like models.
"""
def __init__(self,
model: nn.Module,
criterion,
optimizer,
epochs,
stopper=None,
acc_fn=accuracy,
log_fn=None,
verbose=True):
super().__init__(model, criterion, optimizer, epochs, stopper, acc_fn, log_fn, verbose)
self.validation = False
@staticmethod
def _check_mask_types(masks: list):
for m in masks:
assert isinstance(m, torch.Tensor), 'Type Error'
assert m.dtype == torch.bool
def score(self, feats: list, labels: torch.Tensor, mask: torch.Tensor):
feats = self._check_inputs(feats)
model = self.model
criterion = self.criterion
model.eval()
with torch.no_grad():
outputs = model(*feats)
acc = self.acc_fn(outputs[mask], labels[mask])
loss = criterion(outputs[mask], labels[mask])
return acc, loss.item()
def _simple_fit(self, feats, labels, train_mask):
"""
Parameters
----------
feats : torch.Tensor or list
labels : torch.Tensor
train_mask : torch.Tensor
(of dtype `torch.int`)
Returns
-------
out : None
"""
feats = self._check_inputs(feats)
model: nn.Module = self.model
criterion = self.criterion
optimizer = self.optimizer
dur = []
t_start = time.time()
for epoch in range(self.epochs):
model.train()
if epoch >= 3:
t0 = time.time()
optimizer.zero_grad()
output = model(*feats)
loss = criterion(output[train_mask], labels[train_mask])
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
if self.verbose:
if self.log_fn is None:
acc, train_loss = self.score(feats, labels, train_mask)
print('Epoch {:5d} | Time(s) {:.4f} | Loss {:.4f} | Acc {:.4f}'.format(
epoch, np.mean(dur), train_loss, acc
))
else:
raise NotImplementedError
print("Total time elapsed: {:.4f}s".format(time.time() - t_start))
self.model = model
def _fit_with_val(self, feats, labels, train_mask, val_mask):
feats = self._check_inputs(feats)
model: nn.Module = self.model
criterion = self.criterion
optimizer = self.optimizer
stopper = self.stopper
dur = []
t_start = time.time()
for epoch in range(self.epochs):
model.train()
if epoch >= 3:
t0 = time.time()
optimizer.zero_grad()
output = model(*feats)
loss = criterion(output[train_mask], labels[train_mask])
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
with torch.no_grad():
train_acc, train_loss = self.score(feats, labels, train_mask)
val_acc, val_loss = self.score(feats, labels, val_mask)
if stopper is not None:
if stopper.step(val_acc, model):
break
if self.verbose:
if self.log_fn is None:
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |"
" ValLoss {:.4f} | ValAcc {:.4f}".
format(epoch, np.mean(dur), train_loss, train_acc, val_loss, val_acc))
else:
raise NotImplementedError
print("Total time elapsed: {:.4f}s".format(time.time() - t_start))
if stopper is not None:
model.load_state_dict(stopper.load_checkpoint())
self.model = model
def fit(self, feats, labels, masks):
"""
Parameters
----------
feats : list
labels : torch.Tensor
masks : torch.Tensor or list[torch.Tensor]
Returns
-------
out : None
"""
masks = self._check_inputs(masks)
self._check_mask_types(masks)
if len(masks) == 2:
train_mask, val_mask = masks
self.validation = True
self._fit_with_val(feats, labels, train_mask, val_mask)
elif len(masks) == 1:
train_mask = masks[0]
self.validation = False
self._simple_fit(feats, labels, train_mask)
else:
raise TypeError('Something wrong with type of `masks`.')
| [
"aseaboat@outlook.com"
] | aseaboat@outlook.com |
abf9533351f909d1812af5ffd5eba0092ce9247b | 587eed32179cdbf10b4626a3a238d5923af7459c | /ml/xgboost_regression.py | 095d47f0a9a69aa09b7adbbaac64d948e3f6c9c8 | [] | no_license | shushanxingzhe/python_learning | fad57fcf607efe81f769d276a6c4390726c5f1bb | 7fe26ad612790328edab1627e6c1c144a21d79fc | refs/heads/master | 2022-09-12T15:11:50.377981 | 2022-08-04T05:43:51 | 2022-08-04T05:43:51 | 96,225,151 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | import xgboost
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
X_train = np.linspace(5, 10, 20)
X_train = X_train.reshape((-1, 1))
y_train = 0.2 * X_train ** 2 + 3
X_test = np.linspace(15, 20, 20)
X_test = X_test.reshape((-1, 1))
y_test = 0.2 * X_test ** 2 + 3
model = xgboost.XGBRegressor()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(r2_score(y_test, y_pred))
plt.subplot(1, 2, 1)
plt.scatter(X_train, y_train)
plt.scatter(X_test, y_pred)
plt.plot(X_test, y_test)
model = xgboost.XGBRegressor(booster='gblinear')
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(r2_score(y_test, y_pred))
plt.subplot(1, 2, 2)
plt.scatter(X_train, y_train)
plt.scatter(X_test, y_pred)
plt.plot(X_test, y_test)
plt.show()
| [
"shushanxingzhe@126.com"
] | shushanxingzhe@126.com |
cd331c1a35d97ba039cafc1cf3303a4e88b1f523 | bf3e0c392b198dee8ca2c0d192ba335634e49841 | /utilities/templatetags/recurse.py | d0ca3c4038a6c30fd862e63a8d4412b245fb192a | [] | no_license | swolfod/TOS_Promotions | 88b6b652a2697d1f72591c1d24ee0387b564b834 | b856a69f50be4d1339591d2314c04f0186ab8023 | refs/heads/master | 2020-06-14T10:54:44.850828 | 2016-12-19T15:10:48 | 2016-12-19T15:10:48 | 75,193,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | ###############################################################################
# Recurse template tag for Django v1.1
# Copyright (C) 2008 Lucas Murray
# http://www.undefinedfire.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
###############################################################################
from django import template
register = template.Library()
class RecurseNode(template.Node):
def __init__(self, var, name, condition, child, nodeList):
self.var = var
self.name = name
self.condition = condition
self.child = child
self.nodeList = nodeList
def __repr__(self):
return '<RecurseNode>'
def renderCallback(self, context, vals, level):
output = []
try:
if len(vals):
pass
except:
vals = [vals]
if len(vals):
if 'loop' in self.nodeList:
output.append(self.nodeList['loop'].render(context))
for val in vals:
context.push()
context['level'] = level
context[self.name] = val
if not self.condition or self.condition.resolve(context):
children = self.child.resolve(context)
context['children'] = children
if 'child' in self.nodeList:
output.append(self.nodeList['child'].render(context))
if children:
output.append(self.renderCallback(context, children, level + 1))
output.append(self.nodeList['endloop'].render(context))
context.pop()
output.append(self.nodeList['endrecurse'].render(context))
return ''.join(output)
def render(self, context):
vals = self.var.resolve(context)
output = self.renderCallback(context, vals, 1)
return output
@register.tag
def recurse(parser, token):
bits = list(token.split_contents())
if not ((len(bits) == 6 or (len(bits) == 8 and bits[6] == 'if') and bits[2] == 'with' and bits[4] == 'as')):
raise (template.TemplateSyntaxError, "Invalid tag syxtax, expected '{% recurse [childVar] with [parents] as [parent] (if [condition]) %}'")
child = parser.compile_filter(bits[1])
var = parser.compile_filter(bits[3])
name = bits[5]
condition = parser.compile_filter(bits[7]) if len(bits) == 8 else None
nodeList = {}
while len(nodeList) < 4:
temp = parser.parse(('child','loop','endloop','endrecurse'))
tag = parser.tokens[0].contents
if tag == 'endloop' and 'loop' not in nodeList:
raise (template.TemplateSyntaxError, "Invalid tag syxtax, '{% loop %}' should be followed by '{% endloop %}'")
nodeList[tag] = temp
parser.delete_first_token()
if tag == 'endrecurse':
break
if "loop" in nodeList and "endloop" not in nodeList:
raise (template.TemplateSyntaxError, "Invalid tag syxtax, '{% loop %}' should be followed by '{% endloop %}'")
return RecurseNode(var, name, condition, child, nodeList) | [
"swolfod@gmail.com"
] | swolfod@gmail.com |
37bd37efa7285ccccba19d5b872543c17d244ff7 | a9d6606ac831df233ed5b9a8f0757c2596a72405 | /pubcrawl/migrations/0001_initial.py | 54405199e3e1e946b6e2cca3b9c6b005b0aa4765 | [] | no_license | AAGlasgow/pubcrawler | a753ec295cb14a1e292abbe0d691a3263f453c82 | 6a23c8f0c41b34a7ff977d6e777889b73aeb4226 | refs/heads/master | 2016-09-06T00:32:31.758251 | 2015-03-27T17:09:18 | 2015-03-27T17:09:18 | 32,093,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,648 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
('views', models.IntegerField(default=0)),
('likes', models.IntegerField(default=0)),
('slug', models.SlugField(unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Crawl',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('score', models.IntegerField(default=0)),
('name', models.CharField(max_length=128)),
('drink', models.BooleanField(default=False)),
('drinkDescription', models.CharField(max_length=500)),
('costume', models.BooleanField(default=False)),
('costumeDescription', models.CharField(max_length=500)),
('description', models.CharField(max_length=500)),
('picture', models.ImageField(upload_to=b'crawl_images', blank=True)),
('dateTime', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(unique=True)),
('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Crawl_Pub',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.IntegerField()),
('crawl', models.ForeignKey(to='pubcrawl.Crawl')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(to='pubcrawl.Category')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Pub',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('placeID', models.CharField(unique=True, max_length=256)),
('name', models.CharField(max_length=128)),
('slug', models.SlugField(unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('liked', models.BooleanField(default=False)),
('text', models.CharField(max_length=750)),
('dateTime', models.DateTimeField(auto_now=True)),
('crawl', models.ForeignKey(to='pubcrawl.Crawl')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('website', models.URLField(blank=True)),
('picture', models.ImageField(upload_to=b'profile_images', blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='crawl_pub',
name='pub',
field=models.ForeignKey(to='pubcrawl.Pub'),
preserve_default=True,
),
]
| [
"2079884F@student.gla.ac.uk"
] | 2079884F@student.gla.ac.uk |
d6d8a0a8e4bd901a25c210a72278a027d0e701ee | ac7c31d271ece050a5598aaf520d2f5f08c75425 | /FA/q_algorithm.py | 5d3906fcecf21e97d8218eeb7ee7f98da7768741 | [] | no_license | srujanpanuganti/Dots-and-Boxes-Reinforcement-Learning | 2ed33439e2fd40eef4353da30771aff7318c0e69 | c1461d2c8e238d114690ab185c545b7e7ba35950 | refs/heads/master | 2022-07-17T06:31:05.876226 | 2020-05-12T04:15:00 | 2020-05-12T04:15:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,436 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 14:09:53 2019
@author: Srujan Panuganti
"""
import numpy as np
#import itertools
from operator import add
import random
import copy
from approximation import Estimator
class q_learn:
def __init__(self,
total_actions,
estimator:Estimator,
learning_rate = 0.6,
discount_factor = 0.7,
epsilon=0.6
):
self.estimator = estimator
self.total_actions = total_actions
#self.all_states = list(itertools.product([0, 1], repeat=total_actions))
#self.all_actions = []
#for i in self.all_states:
# if sum(i) == 1:
# self.all_actions.append(i)
self.number_of_states = 2** (self.total_actions)
self.all_actions = [ 2**j for j in range(0,self.total_actions)]
self.all_actions_arr = np.asarray([ 2**j for j in range(0,self.total_actions)]).reshape(self.total_actions,1).astype(int)
self.zeros_action = np.zeros([self.total_actions,1]).astype(int)
self.all_actions_with_q_vals = np.hstack((self.all_actions_arr,self.zeros_action))
self.lin_space = list(range(0,self.total_actions))
self.action_index = {}
for key,val in zip(self.all_actions,self.lin_space ):
self.action_index[key] = val
# =============================================================================
# self.all_actions = list(range(1,self.total_actions+1))
# self.all_actions_arr = np.asarray(range(1,self.total_actions+1)).reshape(self.total_actions,1).astype(int)
# self.zeros_action = np.zeros([self.total_actions,1]).astype(int)
#
# self.all_actions_with_q_vals = np.hstack((self.all_actions_arr,self.zeros_action))
#
# =============================================================================
self.all_states = list(range(0,self.number_of_states))
self.q_table = dict.fromkeys(self.all_states,self.all_actions_with_q_vals)
#self.current_action = self.all_actions[0]
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.epsilon = epsilon
def compute_q_value(self, old_q_val,reward,max_q):
new_q_val = old_q_val + self.learning_rate* (reward + self.discount_factor* max_q - old_q_val)
return new_q_val
def maximum_q(self, current_state, possible_actions):
max_q = 0
index = 0
max_i = 0
optimal_action = 0
#print('here',possible_actions)
for act in possible_actions:
#print('curr st',current_state,'act',act-1)
#print('what the prob',self.q_table[current_state][act-1])
if self.q_table[current_state][self.action_index[act]][1] >= max_q:
max_q = max_q
optimal_action = act
max_i = index
index +=1
#print('papa')
return max_q,optimal_action,max_i
# =============================================================================
#
# for act in possible_actions:
# if self.q_table[str([current_state,act])] >= max_q:
# max_q = max_q
# optimal_action = act
#
# max_i = index
# index +=1
# return max_q,optimal_action,max_i
#
# =============================================================================
# =============================================================================
# def q_update(self, old_state_action, current_state ,reward, possible_actions):
# ### obtaining the old_q value
# #previous_state = old_state_action[0]
# q_value = self.q_table[str(old_state_action)]
#
# # =============================================================================
# # if not possible_actions:
# # self.q_table[str(old_state_action)] = new_q_val
# #
# # =============================================================================
# max_q,optimal_action,max_i = self.maximum_q(current_state, possible_actions)
#
# ### computing the new q-value to be updated into the q_table
# new_q_val = self.compute_q_value(q_value,reward,max_q)
# ### updating the q_value to the q_table
# self.q_table[str(old_state_action)] = new_q_val
#
# return optimal_action
#
# =============================================================================
def q_update(self, old_state_action, current_state ,reward):#, possible_actions):
### obtaining the old_q value
#previous_state = old_state_action[0]
q_value = self.q_table[old_state_action[0]][self.action_index[old_state_action[1]]][1]
#q_value = self.q_table[old_state_action[0]][old_state_action[1]][1]
#print('all_', self.all_actions)
max_q,optimal_action,max_i = self.maximum_q(current_state, self.all_actions)
### computing the new q-value to be updated into the q_table
new_q_val = self.compute_q_value(q_value,reward,max_q)
### updating the q_value to the q_table
self.q_table[old_state_action[0]][self.action_index[old_state_action[1]]][1] = new_q_val
#self.q_table[old_state_action[0]][old_state_action[1]][1] = new_q_val
self.estimator.update(old_state_action[0],self.action_index[old_state_action[1]],new_q_val)
return optimal_action
def epsilon_greedy(self, current_state, possible_actions):
pos_act = copy.deepcopy(possible_actions)
max_q, optimal_action, max_i = self.maximum_q(current_state, pos_act)
#print('posss',pos_act)
#print(optimal_action)
#print(max_i)
if random.random() >= self.epsilon:
action = optimal_action
#print('opt action',action)
else:
if max_i > 1:
np.delete(pos_act,max_i)
action = random.choice(pos_act)
else:
action = random.choice(pos_act)
return action
| [
"noreply@github.com"
] | noreply@github.com |
f058f908a45f09325dc4edc777257215ab53d73b | f6dd0381ea30cacb9555ecee772dcca721a04310 | /python/src/cintegrallib/algo/nn/opt.py | bba45245492f0974c5d7ca3d1723bc4b794604c4 | [] | no_license | careychou/cintegral | 4b305afde1e1569496b0d238bac3653280bac156 | ede6a5bdd93c7509ce9e13f673e2eef5c8db21dc | refs/heads/master | 2023-04-08T22:29:00.522208 | 2021-04-16T11:48:00 | 2021-04-16T11:48:00 | 343,204,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import tensorflow as tf
import numpy as np
###
# utility functions for differential optimization from tensorflow
# website
#
# newton root finding
# Param:
# grads1: dy/dx
# grads2: hessian matrix d2y/dx2
def newton_root(grads1, grads2):
gshape = grads1.shape
nparam = tf.reduce_prod(gshape)
grads1 = tf.reshape(grads1, [nparam, 1])
grads2 = tf.reshape(grads2, [nparam, nparam])
# regularize for numeric stability
eps = 1e-3
eye_eps = tf.eye(grads2.numpy().shape[0])*eps
# find root: x(k+1) = x(k) - f'(x)/f''(x)
update = tf.linalg.solve(grads2 + eye_eps, grads1)
return tf.reshape(update, gshape)
| [
"carey.h.chou@gmail.com"
] | carey.h.chou@gmail.com |
a14d3e665336b521fc8262c6e0e47e6396190015 | 90ec04a55a3c3ae13ce7b30f866d2fe16b7f552e | /curso-basico-python/Tema2/Ejercicios/listas/respuestas.py | f75caaeaf47aa6b620cf5c82f0feb1f4b1423ed5 | [] | no_license | syscurso/CursosTwitch | 7c58926d46d135b384db1d50cf1a48198e1c3c15 | 7bcbded082e343b661d907313a49902125ad0973 | refs/heads/main | 2023-03-06T23:33:49.359254 | 2021-02-24T14:06:00 | 2021-02-24T14:06:00 | 336,535,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py |
# Lista de la que disponemos para los ejercicios
lista = [5, 30, 17, 4, 24]
# EJERCICIO 1
# Crea una variable que almacene la longitud de la lista y printeala
longitud = len(lista)
# EJERCICIO 2
# Crea una variable que almacene el último valor de la lista y printeala
#Ambas son correctas
ultimoVal = lista[longitud-1]
ultimoVal1 = lista[-1]
# EJERCICIO 3
# Añade un número a la lista y luego printeala para certificarlo
lista.append(4)
# EJERCICIO 4
# Elimina el último valor de la lista y luego printeala para certificarlo
lista.pop()
# EJERCICIO 5
# Ordena la lista y luego printeala para certificarlo
lista.sort()
# EJERCICIO EXTRA
# Sabiendo que nosotros somos la 'X' y es nuestro turno...
# Añade tú valor X en esta matriz para obtener la victoria!!!
matriz = [['X',' ','O'],
['O','X',' '],
[' ',' ',' ']]
#Se aceptará ya que es el que expliqué.
matriz[2].insert(2, 'X')
# Correcto y lo explico hoy antes de comenzar
matriz[2][2] = 'X'
# Para que se vea en consola correctamente.
print(matriz[0])
print(matriz[1])
print(matriz[2]) | [
"syscurso@gmail.com"
] | syscurso@gmail.com |
3ae0ed5ed98be35ed46d8dc9ee81969373ac43d8 | 8240a0c8817a63f529344fcf4dbca80ca2545729 | /report/controllers/pool.py | 92b603dd881fea40d4c19a11f0867914c4158cc6 | [] | no_license | BrokenEagle/PythonScripts | 6e218b7c19bbe1f051b2e0e2681ecc720def2fba | 350a82720e95ad662faeb50b3a85904e9980f4dc | refs/heads/master | 2022-10-11T22:01:04.220628 | 2020-06-14T01:41:03 | 2020-06-14T01:52:52 | 66,891,053 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,663 | py | #REPORT/CONTROLLERS/POOL.PY
#LOCAL IMPORTS
from danbooru import GetPageUrl,GetSearchUrl,IDPageLoop
from misc import DebugPrint,IsAddItem,IsRemoveItem,GetAddItem,GetRemoveItem,IsOrderChange
#MODULE IMPORTS
from ..logical.reportcontroller import ReportController
from ..logical.users import UserReportData
from ..logical.common import GetTotalColumn,GetCreateColumn
#Functions
def UpdatePoolData(userid,userdict,currversiondata,priorversiondata):
dirty = 0
if len(priorversiondata)==0:
DebugPrint("Create")
userdict[userid][1] += 1
return
priorversiondata = priorversiondata.pop()
postpoollist = currversiondata['post_ids']
prepoollist = priorversiondata['post_ids'] #page crossing will cause failure here
if currversiondata['name_changed']:
DebugPrint("Name change")
dirty = 1
userdict[userid][2] += 1
if currversiondata['description_changed']:
DebugPrint("Description change")
dirty = 1
userdict[userid][3] += 1
if IsAddItem(prepoollist,postpoollist):
DebugPrint("Add post")
dirty = 1
userdict[userid][4] += 1
obsolete = GetObsoleteAdd(currversiondata,GetAddItem(prepoollist,postpoollist))
if obsolete > 0:
DebugPrint("Obsolete Add")
userdict[userid][5] += 1
if IsRemoveItem(prepoollist,postpoollist):
DebugPrint("Remove post")
dirty = 1
userdict[userid][6] += 1
obsolete = GetObsoleteRemove(currversiondata,GetRemoveItem(prepoollist,postpoollist))
if obsolete > 0:
DebugPrint("Obsolete Remove")
userdict[userid][7] += obsolete
if IsOrderChange(prepoollist,postpoollist):
DebugPrint("Order change")
dirty = 1
userdict[userid][8] += 1
if currversiondata['is_active'] != priorversiondata['is_active']:
DebugPrint("Active change")
dirty = 1
userdict[userid][9] += 1
if dirty == 0:
DebugPrint("Other")
userdict[userid][10] += 1
def GetObsoleteAdd(currversion,postlist):
startid = [GetPageUrl(currversion['id'],above=True)]
urladds = [GetSearchUrl('pool_id',currversion['pool_id'])]
inputdict = {'postlist':[postlist],'obsolete':[0]}
IDPageLoop('pool_versions',100,GetObsoleteAddIterator,urladds,inputdict,startid,reverselist=True)
return inputdict['obsolete'][0]
def GetObsoleteAddIterator(poolver,postlist,obsolete):
poolidlist = poolver['post_ids']
templist=postlist[0]
for i in reversed(range(0,len(postlist[0]))):
if postlist[0][i] not in poolidlist:
obsolete[0] += 1
templist.pop(i)
postlist[0] = templist
if len(postlist[0]) == 0:
return -1
return 0
def GetObsoleteRemove(currversion,postlist):
startid = [GetPageUrl(currversion['id'],above=True)]
urladds = [GetSearchUrl('pool_id',currversion['pool_id'])]
inputdict = {'postlist':[postlist],'obsolete':[0]}
IDPageLoop('pool_versions',100,GetObsoleteRemoveIterator,urladds,inputdict,startid,reverselist=True)
return inputdict['obsolete'][0]
def GetObsoleteRemoveIterator(poolver,postlist,obsolete):
poolidlist = poolver['post_ids']
templist=postlist[0]
for i in reversed(range(0,len(postlist[0]))):
if postlist[0][i] in poolidlist:
obsolete[0] += 1
templist.pop(i)
postlist[0] = templist
if len(postlist[0]) == 0:
return -1
return 0
def pooltransform(userdict,**kwargs):
datacolumns = {}
for key in userdict:
datacolumns[key] = userdict[key][:4] + [str(userdict[key][4])+', ('+str(userdict[key][6])+')'] + [str(userdict[key][5])+', ('+str(userdict[key][7])+')'] +\
userdict[key][8:]
return datacolumns
#Report variables
reportname = 'pool'
dtexttitle = "Pool Details"
dtextheaders = ['Username','Total','Create','Name','Descr','Add/ Rem','Obs Add/ Rem','Order','Active','Other']
csvheaders = ['total','name','descr changed','create','add','remove','obsolete add','obsolete remove','order','active','other']
transformfuncs = [GetTotalColumn,GetCreateColumn,None,None,None,None,None,None,None]
dtexttransform = pooltransform
extracolumns = 10
tablecutoffs = [[40]]
reporthandler = UserReportData.InitializeUserReportData(UpdatePoolData)
#Controller variables
startvalue = 0
querylimit = 10
versioned = True
timestamp = 'updated_at'
userid = 'updater_id'
controller = 'pool_versions'
createuserid = 'creator_id'
createcontroller = 'pools'
lookupid = 'pool_id'
typehandler = ReportController.InitializeReportController([reporthandler])
| [
"BrokenEagle98@yahoo.com"
] | BrokenEagle98@yahoo.com |
4f57e86ebf5519b9db20e98fde471633fc6e4b12 | 488d56a0a2c100777dfe55ea9ea75f461256f8bc | /allmax/wsgi.py | 2b7f6384c59140cb1829569dfa88061aaedfa8c4 | [] | no_license | namor-katz/allmax | e2214e31082e7c9472f99632b1b0a34e386d0ca3 | 14f4c887daced8d548fed62b47d895f3ba8712b2 | refs/heads/master | 2020-07-01T22:38:43.287530 | 2019-08-08T19:46:53 | 2019-08-08T19:46:53 | 201,326,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for allmax project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'allmax.settings')
application = get_wsgi_application()
| [
"namor925@gmail.com"
] | namor925@gmail.com |
89e743033bdaf9bc5884b7c64680abb041b596a8 | 202c8826e2a32f9414b653e1ea12ebc09c7a8039 | /mysql_db.py | d730e2b3bc3debf008d7e337dbc21a58b241cfea | [] | no_license | alexsumsher/mysql_connector_pooler | 22727ac4bd9f9667c9b02152851abad0d8215fbe | f92f549ea14d58a86a668d2c3b26f4fb3527450f | refs/heads/master | 2022-10-23T18:28:49.832336 | 2022-10-12T02:43:02 | 2022-10-12T02:43:02 | 145,304,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,820 | py | # -*- coding:utf-8
"""
MySQL Database Connection.
change to fit for poolo class.
last update: 20171229.
update: com_con,增加flexiable模式的pool
20180103: com_con, __enter__ with elock(thread.Lock) for thread safe
201803:new arg for execute_db: get_ins_id(false), weather return last insert id
"""
import MySQLdb
import logging
import time
# for com_con
from random import random as rdm
from threading import Lock
logging.basicConfig(level=logging.DEBUG, format='(%(funcName)-10s) %(message)s')
class server_info:
server_info = {
'host': '127.0.0.1',
'port': 3306,
'user': 'dbuser',
'password': 'dbpassword',
'database': 'dbname',
'charset': 'utf8'
}
def __init__(self, **configure):
self.__dict__ = server_info.server_info
if 'host' in configure:
self.host = configure['host']
if 'port' in configure:
self.port = configure['port']
if 'user' in configure:
self.user = configure['user']
if 'passwd' in configure:
self.passwd = configure['passwd']
if 'db' in configure:
self.db = configure['db']
def __getitem__(self, item):
return self.__dict__.get(item)
def __setitem__(self, n, v):
if n in self.__dict__:
self.__dict__[n] = v
@property
def con_string(self):
cmdstr = 'mysql -h %s -u %s -p%s' % (self.host, self.user, self.passwd)
if self.port:
cmdstr += ' -P %s' % self.port
if self.db:
cmdstr += ' -D %s' % self.db
return cmdstr
@property
def info(self):
return self.__dict__
class mdb_mysql(object):
@classmethod
def add_prop(cls, pname, defv=None):
if pname not in cls.__dict__:
cls.__dict__[pname] = defv
def __init__(self, server_args, **extargs):
self.server_info = server_args.info if isinstance(server_args, server_info) else server_args
self.dbname = self.server_info['db']
self.conn = None
self.last_cmd = ''
if self.connect_db() != 1:
return None
self._mark = extargs.get('mark', 0)
# ready=0, closed=-1, using=1
self.stat = 0
def __getitem__(self, query_string):
cmd_arr = query_string.split(';')
if len(cmd_arr) > 1:
return self.do_sequence(cmd_arr)
else:
operation = query_string.split(' ')[0]
if operation.lower() in ('insert', 'update', 'delete'):
return self.execute_db(query_string)
else:
rt = self.query_db(query_string)
return rt[0] if rt and len(rt) == 1 else rt
# @property
# def lastInsertID(self):
# return self.conn.insert_id()
#
@property
def mark(self):
return self._mark
@mark.setter
def mark(self, mark):
self._mark = mark
@property
def lastcmd(self):
return self.last_cmd
def assign_connection(self, con):
if self.check_conn(con) < 2:
print 'not a correct connection pass in!'
return 0
else:
self.conn = con
return 1
def connect_db(self):
if self.conn:
constat = self.check_conn()
if constat == 2:
print 'connect exists!'
return 1
elif constat == 1:
self.conn.close()
try:
self.conn = MySQLdb.connect(*[], **self.server_info)
self.conn.autocommit(True)
except MySQLdb.DatabaseError as err:
print 'Error on connecting to DB Server: %s' % err
return 0
if self.check_conn >=2:
return 1
else:
return 0
def get_cur(self):
try:
return self.conn.cursor()
except:
if self.connect_db() == 1:
return self.conn.cursor()
else:
raise ValueError('not get cursor!')
def change_db(self, dbname):
try:
self.conn.select_db(dbname)
except MySQLdb.MySQLError as err:
print 'MySQL Server Error: %s' % err
return 0
self.dbname = dbname
self.server_info['db'] = dbname
return 1
def ready(self):
"""
work with opooler: if opooler->down==close connection; on opooler i_get(wait->work), do nothing until using
it with execute_db or querydb, the execute method would auto connect to server first...
stat: -1(no work), 0(wait/idel), 1(working/using/binded);
set to working mode.
"""
if self.check_conn() == 2 or self.connect_db() == 1:
self.stat = 1
else:
self.stat = -1
return self.stat
def idle(self):
if self.check_conn() == 2 or self.connect_db() == 1:
self.stat = 0
else:
self.stat = -1
return self.stat
def close(self):
# if conn is exists and usable, conn.__str__():<_mysql.connection open to '192.168.1.191' at 11888f8>
# if conn is exists and closed, conn.__str__():<_mysql.connection closed at 3201fc8>
if self.conn and self.conn.__str__().split()[1] == 'open':
try:
self.conn.close()
except MySQLdb.MySQLError as err:
print 'MySQL Server Error: %s' % err
self.conn = None
return 1
def check_conn(self, con=None):
"""
return -1 if not a MySQLdb.Connection, 0 if is a connection but closed,
1 if opened but not connect to a correct server, 2 if server ok!
"""
target = con if con else self.conn
i = -1
if isinstance(target, MySQLdb.connections.Connection):
i += 1
if target.__str__().split()[1] == 'open':
i += 1
i += 1 if target.get_host_info().split()[0] == self.server_info['host'] else 0
return i
def __enter__(self):
self.execute_db('START TRANSACTION')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.execute_db('COMMIT')
def execute_db(self, sqlcmd, args=None, cur=None, get_ins_id=False):
# for debug:
print sqlcmd
# 在执行之前可能需要做一些cursor的处理,故可以先get_cur然后传入cur的方式来执行execute_db
if self.conn is None:
if self.connect_db() == 0:
print 'SQL Server error on connect! Can not query!'
return 0
vcur = cur or self.get_cur()
rt = 0
try:
rt = vcur.execute(sqlcmd, args)
except MySQLdb.ProgrammingError:
self.conn.store_result()
self.conn.next_result()
rt = vcur.execute(sqlcmd, args)
except MySQLdb.OperationalError:
logging.warning('Server error! Cannot execute SQL statement!')
if not cur:
# if cur, execute invoked by query, return false
return -1
else:
self.close()
if self.connect_db() != 1:
logging.warning('Server Down!')
return None
else:
vcur = self.get_cur()
rt = vcur.execute(sqlcmd, args)
finally:
if rt and get_ins_id:
vcur.execute('SELECT LAST_INSERT_ID()')
rt = vcur.fetchone()[0]
self.last_cmd = sqlcmd
# iid = 0
# if rt and sqlcmd[:6].lower == 'insert':
# iid = vcur.execute('SELECT LAST_INSERT_ID();')
# rrt = iid,rt
# else:
# rrt = rt
if not cur:
vcur.close()
return rt
def us_execute_db(self, sqlcmd, args=None):
logging.warning('unsafe execute db!')
cur = self.get_cur()
rt = 0
try:
cur.execute('SET SQL_SAFE_UPDATES=0;')
self.last_cmd = sqlcmd
except MySQLdb.ProgrammingError:
self.conn.store_result()
self.conn.next_result()
cur.execute('SET SQL_SAFE_UPDATES=0;')
rt = cur.execute(sqlcmd, args)
except MySQLdb.OperationalError:
logging.warning('Server error! connection is closed!retry!')
self.close()
if self.connect_db() != 1:
return None
else:
cur = self.get_cur()
cur.execute('SET SQL_SAFE_UPDATES=1;')
rt = cur.execute(sqlcmd, args)
except MySQLdb.Error:
logging.warning('Server error! Cannot execute SQL statement!')
self.close()
return 0
finally:
cur.execute('SET SQL_SAFE_UPDATES=1;')
cur.close()
return rt
# 查询的结果是一个tuple,所以需要其中第一个参数时(无论one是否true)都应该用[0]
# fetchall的结果是由tuple组成的list,one=True表示读取第一个tuple
# query+tuple查询方式:cursor.execute('select * from user where name=? and pwd=?', ('abc', '123456'))
def query_db(self, query, args=None, incur=None, one=False):
if incur:
cur = incur
else:
cur = self.get_cur()
rt = 0
rv = None
rt = self.execute_db(query, args=args, cur=cur)
# if con may down, reconnect and query again.
if rt == -1 and self.check_conn < 2:
self.close()
if self.connect_db() == 1:
cur = self.get_cur()
rt = self.execute_db(query, args=args, cur=cur)
else:
return None
if rt:
rv = cur.fetchone() if one else cur.fetchall()
if not incur:
cur.close()
# return rv[0] if rv and len(rv) == 1 else rv
return rv
# handling for huge number of insert
def huge_insert(self, table, colums, values, seq_len=1000):
assert len(colums) == len(values[0])
orign_command = 'INSERT INTO {0} ({1}) VALUES %s'.format(table, ','.join(colums))
value_str = ''
length = len(values)
loop_count = length / seq_len + (1 if length % seq_len > 0 else 0)
counter = 0
cur = self.get_cur()
for loop in xrange(loop_count):
cc = 0
for line in values:
cc += 1
line_str = '(%s),' % ','.join(map(lambda x: '"%s"' % x if isinstance(x, (str, unicode)) else str(x), line))
value_str += line_str
sql_command = (orign_command % value_str)[:-1]
if cur.execute(sql_command):
counter += cc
else:
print 'Error: huge_insert: on loop==%d' % loop
cur.close()
return '%s/%s' % (counter, length)
# from v1.0: removed transaction functons
def execute_many(self, sqlstr, args):
"""insert into table (col1, col2...) values (%s, %s...), [(var1-1, var1-2...), (var2-1, var2-2)...]"""
if self.conn is None:
if self.connect_db() == 0:
raise MySQLdb.MySQLError
cur = self.get_cur()
rt = 1
try:
cur.executemany(sqlstr, args)
except MySQLdb.MySQLError:
logging.error('not success on execute_many')
rt = 0
finally:
cur.close()
return rt
def do_sequence(self, sql_seq, ignore=False):
# batch execute a list of sql command
if self.conn is None:
if self.connect_db() == 0:
logging.error('can not connect to database!')
return 0
cur = self.get_cur()
rts = []
sql_seq = sql_seq if isinstance(sql_seq, (list,tuple)) else sql_seq.split(';')
for sql_cmd in sql_seq:
rt = 0
try:
rt = cur.execute(sql_cmd)
except MySQLdb.OperationalError:
logging.warning('Server error! Cannot execute SQL statement!')
self.close()
if self.connect_db() != 1:
logging.warning('Server error! Cannot connect to server!')
return 0
else:
cur = self.get_cur()
rt = cur.execute(sql_cmd)
except MySQLdb.MySQLError:
logging.warning('mysql error on command: %s' % sql_cmd)
if not ignore:
cur.close()
return 0
else:
rts.append(rt)
continue
if rt and sql_cmd[:6].lower() == 'select':
rtv = cur.fetchall()
rts.append(rtv if len(rtv)>1 else rtv[0])
else:
rts.append(rt)
cur.close()
return rts
def do_transaction(self, sql_seq, ignore=False):
"""transaction mode"""
if self.conn is None:
if self.connect_db() == 0:
logging.error('can not connect to database!')
return 0
cur = self.get_cur()
cur.execute('BEGIN')
rts = []
for sql_cmd in sql_seq:
rt = 0
try:
rt = cur.execute(sql_cmd)
except MySQLdb.MySQLError:
logging.warning('mysql error on command: %s' % sql_cmd)
if not ignore:
self.conn.rollback()
cur.close()
return 0
else:
self.conn.store_result()
self.conn.next_result()
rts.append(rt)
continue
if rt and sql_cmd[:6].lower() == 'select':
rtv = cur.fetchall()
rts.append(rtv if len(rtv)>1 else rtv[0])
else:
rts.append(rt)
self.conn.commit()
cur.close()
return rts
def do_sql_script(self, sql_scr_str):
if self.conn is None:
if self.connect_db() == 0:
raise MySQLdb.MySQLError('can not connect to database file')
self.conn.executescript(sql_scr_str)
return 1
def last_insid(self):
return self.conn.query_db('SELECT LAST_INSERT_ID();', one=True)
class com_con(object):
length = 10
dead_len = 30
recover_time = 60
# w is a mark for pool
w = 'pool'
@classmethod
def set_deadlen(cls, dlen=0):
if dlen > cls.dead_len:
cls.dead_len = dlen
return dlen
else:
return cls.dead_len
# -1: not inited; 1: working; 0:shutdown
def __init__(self, sql_obj, con_info, length=0, atonce=True, flexible=False):
# if flexible, work with lenth and dead_length, and self.c->last time work mode, if add new con is set to 1, if kick set to -1:
# when take: if overlen create new con, if over deadlen error
# when kick: if finger > len and last time is kick, will not append to conlist for reuse, just remove it(on the other hand, if the last time action is still create new con, which means the pool may still works under busy mode)
self.length = length or self.__class__.length
if flexible:
if self.length < self.__class__.dead_len:
self.dead_len = self.__class__.dead_len
self.__take_kick = self.__take_kick_2
self.c = 0
else:
logging.warning('length is bigger than dead_len, will not work in flexible mode!')
self.dead_len = 0
self.__take_kick = self.__take_kick_1
else:
self.dead_len = 0
self.__take_kick = self.__take_kick_1
self.sqo = sql_obj
self.cif = con_info
self.finger = 0
self.ilock = Lock()
self.elock = Lock()
self.status = -1
self.conlist = []
self.staticlist = []
self.curcon = None
self.recover_time = self.__class__.recover_time
self.w = 'pool'
if atonce:
self.ilock.acquire()
self.__inilist(sql_obj, con_info)
self.ilock.release()
def __getitem__(self, sqlcmds):
con = self.__take_kick()
if con:
try:
rt = con[sqlcmds]
except:
rt = None
finally:
self.__take_kick(con)
return rt
else:
raise RuntimeError('NO connection to take, with current finger=%s' % self.finger)
def __inilist(self, sql_obj, con_info):
if self.status > 0:
return
if len(self.conlist) > 0:
for i in xrange(len(self.conlist)):
self.conlist.pop().close()
if len(self.staticlist) > 0:
for i in xrange(len(self.staticlist)):
self.staticlist.pop().close()
if self.status == -1:
self.staticlist = [None] * self.length
for i in xrange(self.length):
con = sql_obj(con_info, mark=0)
self.conlist.append(con)
self.staticlist[i] = con
time.sleep(0.05)
elif self.status == 0:
i = 0
for con in self.staticlist:
t = con.connect_db()
if t == 0:
self.conlist.remove(con)
self.conlist.append(sql_obj(con_info, mark=0))
del con
i += 1
if i < self.length:
self.conlist.extend([sql_obj(con_info, mark=0) for x in xrange(self.length - i)])
self.staticlist = [None] * self.length
for t in xrange(len(self.conlist)):
self.staticlist[t] = self.conlist[t]
self.status = 1
return self.status
def shutdown(self):
self.ilock.acquire()
for con in self.staticlist:
con.close()
time.sleep(0.05)
self.status = 0
self.ilock.release()
def __batch_recovery(self):
ctime = int(time.time())
for con in self.staticlist:
if ctime - con.mark >= self.recover_time:
con.mark = 0
self.conlist.append(con)
self.finger -= 1
return self.finger
def __str__(self):
return 'status: %s\tfinger: %s\t; usage: %s/%s' % (self.status, self.finger, len(self.staticlist), len(self.conlist))
def __take_kick_2(self, con=None):
logging.debug('status: %s\tfinger: %s\t; usage: %s/%s' % (self.status, self.finger, len(self.staticlist), len(self.conlist)))
# work on flexible mode
def newcon():
ncon = self.sqo(self.cif, mark=0)
if ncon:
# create con and direct use, so it's no need to append to conlist
self.staticlist.append(ncon)
self.finger += 1
self.c = 1
return ncon
else:
return None
self.ilock.acquire()
if con:
if self.finger > self.length and self.c < 0:
self.staticlist.remove(con)
del con
else:
self.conlist.append(con)
con.mark = 0
self.c = -1
self.finger -= 1
self.ilock.release()
return self.finger
if self.status == 0:
if self.__inilist(self.sqo, self.cif) != 1:
self.ilock.release()
raise RuntimeError('Cannot Initial the Pool!')
elif self.status == -1:
self.conlist = []
self.staticlist = []
con = newcon()
self.ilock.release()
if con:
self.status = 1
return con
else:
return RuntimeError('Not able to inital the pool!')
if self.finger >= self.dead_len:
if self.__batch_recovery() >= self.dead_len:
self.ilock.release()
raise RuntimeError('Work on flexible Mode and over dead_len!')
elif self.finger >= self.length:
con = newcon()
self.ilock.release()
return con
con = self.conlist.pop(0)
self.finger += 1
self.c = 1
self.ilock.release()
return con
def __take_kick_1(self, con=None):
logging.debug('status: %s\tfinger: %s\t; usage: %s/%s' % (self.status, self.finger, len(self.staticlist), len(self.conlist)))
self.ilock.acquire()
if con:
self.conlist.append(con)
con.mark = 0
self.finger -= 1
self.ilock.release()
return self.finger
if self.status <= 0:
if self.__inilist(self.sqo, self.cif) != 1:
self.ilock.release()
raise RuntimeError('Cannot Initial the Pool!')
if self.finger >= self.length:
self.__batch_recovery()
if self.finger >= self.length:
for i in xrange(5):
time.sleep(round(rdm(),1))
if self.finger < self.length:
break
if i >= 4:
self.ilock.release()
return None
con = self.conlist.pop(0)
self.finger += 1
self.ilock.release()
return con
def free(self):
con = self.__take_kick()
if con:
con.mark = int(time.time())
return con
else:
return None
def release(self, con=None):
if con:
return self.__take_kick(con)
else:
return self.__batch_recovery()
def __enter__(self):
self.elock.acquire()
if self.curcon is None:
logging.info('con for with is still None, create it!')
self.curcon = self.sqo(self.cif, mark=0)
else:
self.curcon.ready()
return self.curcon
def __exit__(self, exc_type, exc_val, exc_tb):
self.elock.release()
def execute_db(self, cmd, get_ins_id=False):
con = self.__take_kick()
if con:
try:
rt = con.execute_db(cmd, get_ins_id=get_ins_id)
except:
rt = None
finally:
self.__take_kick(con)
return rt
else:
raise RuntimeError('NO connection to take, with current finger=%s' % self.finger)
def us_execute_db(self, sqlcmd):
con = self.__take_kick()
if con:
try:
rt = con.us_execute_db(cmd)
except:
rt = None
finally:
self.__take_kick(con)
return rt
else:
raise RuntimeError('NO connection to take, with current finger=%s' % self.finger)
def query_db(self, cmd, one=False):
con = self.__take_kick()
if con:
try:
rt = con.query_db(cmd, one=one)
except:
rt = None
finally:
self.__take_kick(con)
return rt
else:
raise RuntimeError('NO connection to take, with current finger=%s' % self.finger)
def do_sequence(self, sql_seq, ignore=False):
con = self.__take_kick()
if con:
try:
rt = con.do_sequence(sql_seq, ignore=ignore)
except:
rt = None
finally:
self.__take_kick(con)
return rt
else:
raise RuntimeError('NO connection to take, with current finger=%s' % self.finger)
class Qer(object):
sel_limit = 20
_con_pool = None
@classmethod
def set_con_pool(cls, con_pool):
if hasattr(con_pool, 'w') and con_pool.w == 'pool':
cls.con_pool = cls._con_pool = con_pool
else:
raise ValueError('NOT A CON-POOL!')
# if set the __con_pool for Qer, then if there was not set con_pool for subclass, witch will point to Qer's
# for common usage, we should not use __con_pool but _con_pool
# if new instance with a con key_value, then set the class con_pool to it
def __new__(cls, *args, **kwargs):
if 'con' in kwargs and hasattr(kwargs['con'], 'w') and kwargs['con'].w == 'pool':
cls.con_pool = cls._con_pool = kwargs['con']
return super(Qer, cls).__new__(cls)
# a) class new_qer_1(Qer), ins_of_new_qer_1() => ins_of_new_qer_1/new_qer_1 ._con_pool == Qer._con_pool
# b) class new_qer_2(Qer), ins_of_new_qer_2(con=new_con_pool) => ins_of_new_qer_2/new_qer_2 ._con_pool == new_con_pool
# remark: __init__ with self.con = self.__class__._con_pool for quit using con
def __init__(self, con=None):
self.con = self.__class__._con_pool
@staticmethod
def __filter_dict(orign_data, colns=None, onlyexists=False, defv=None, mode='dict'):
# check a input dict/request.args&form for required keys, and covert to string/dict.
defv = defv if isinstance(defv, dict) else {'defv': str(defv)} if defv is not None else {'defv': ''}
defv_defv = defv.get('defv', '')
outs = dict()
colns = colns if isinstance(colns, (list, tuple)) else colns.split(',') if isinstance(colns, str) else orign_data.keys()
real_colns = []
for _ in colns:
v = orign_data.get(_)
if v is None:
if onlyexists:
continue
else:
v = defv.get(_, defv_defv)
real_colns.append(_)
outs[_] = v if v.isdigit() else '"%s"' % v
if mode == 'dict':
return outs
elif mode == 'pstr':
vstr = ''
for _ in real_colns:
vstr += '%s=%s,' % (_, outs[_])
return vstr[:-1]
elif mode == '2str':
nstr = ''
vstr = ''
for _ in real_colns:
nstr += _ + ','
vstr += outs[_] + ','
return nstr[:-1],vstr[:-1]
@staticmethod
def __dict2str(from_dict, array=None, mode=0):
if mode == 0:
vstr = ''
for _ in array if array else form_dict.iterkeys():
v = from_dict[_]
vstr += '%s=%s,' % (_, v if v.isdigit() else '"%s"' % v)
return vstr[:-1]
else:
nstr = ''
vstr = ''
for _ in array if array else form_dict.iterkeys():
v = from_dict[_]
nstr += _ + ','
vstr += '%s,' % v if v.isdigit() else '"%s",' % v
return nstr[:-1],vstr[:-1]
@staticmethod
def __dict_list_4json(datalist, fieldlist, one=False):
"""[{key-1-1:val-1-1,key-1-2:val-1-2,...},{key-2-1:val-2-1,key-2-2:val-2-2,...}]"""
# datalist: (1,2,3...), ((1,2,3..), (4,5,6...),...); v2.1
if isinstance(fieldlist, str):
fieldlist = fieldlist.split(',')
if not datalist or not isinstance(fieldlist, (list,tuple)):
print 'not a correct input!'
return None
slen = len(fieldlist)
def t2d(t):
c = 0
d = {}
for _ in fieldlist:
x = t[c]
d[_] = x if isinstance(x, (unicode, str, int)) else str(x)
c += 1
return d
def d2d(D):
d = {}
for _ in fieldlist:
d[_] = D[_]
return d
def o2d(o):
d = {}
for _ in fieldlist:
d[_] = o.__dict__[_]
return d
slen = len(fieldlist)
if isinstance(datalist[0], (str, unicode, int)):
if slen != len(datalist):
raise ValueError("length of item miss match!")
if one:
return t2d(datalist)
else:
datalist = [datalist]
out_list = list()
for line in datalist:
if isinstance(line, tuple):
out_list.append(t2d(line))
elif isinstance(line, dict) or hasattr(line, '__getitem__'):
out_list.append(d2d(line))
elif hasattr(line, '__dict__'):
out_list.append(o2d(line))
return out_list
if __name__ == '__main__':
server = server_info().info
testdbc = mdb_mysql(server)
if testdbc.connect_db() == 1:
print 'YES'
else:
print 'NO'
print testdbc['show tables;']
testdbc.close()
| [
"ersummer@126.com"
] | ersummer@126.com |
3e975642797d10a8c17cd8f6be381c06081d0be0 | 6ff5e37147419b5ed5ee43a6654bf2bc8d6f5779 | /maximum subarray.py | 12ac9e4f14710a4a0d248c29fc80aa7f5d9fc197 | [] | no_license | ashageorgesj/LeetCode | 982fcdf5265ce98c02a27a7b55ff4273c4875dcd | afb8c8fbfa3f3a08d70c4fa12ab82bcb601bbb2d | refs/heads/master | 2021-05-21T04:19:45.314378 | 2021-02-05T07:13:58 | 2021-02-05T07:13:58 | 252,539,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Using Kadane's algorithm
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
total = 0
maxVal = 0
if all([num < 0 for num in nums]):
return max(nums)
for i in range(0,len(nums)):
total += nums[i]
if total <= 0:
total = 0
if total > maxVal:
maxVal = total
return maxVal | [
"ashageor@gmail.com"
] | ashageor@gmail.com |
7e718881b9c46f43e2cc9329438179cd7fbc6988 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CISCO-CDSTV-FSI-MIB.py | cb16a25f5a07fa321bae26f7dbc8039adcc9a510 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 6,231 | py | #
# PySNMP MIB module CISCO-CDSTV-FSI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-CDSTV-FSI-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:35:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoURLString, = mibBuilder.importSymbols("CISCO-TC", "CiscoURLString")
InetAddressType, InetPortNumber, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetPortNumber", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
TimeTicks, Counter32, Integer32, ObjectIdentity, MibIdentifier, iso, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, ModuleIdentity, Counter64, Unsigned32, Bits, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Counter32", "Integer32", "ObjectIdentity", "MibIdentifier", "iso", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "ModuleIdentity", "Counter64", "Unsigned32", "Bits", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoCdstvFsiMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 735))
ciscoCdstvFsiMIB.setRevisions(('2010-05-10 00:00',))
if mibBuilder.loadTexts: ciscoCdstvFsiMIB.setLastUpdated('201005100000Z')
if mibBuilder.loadTexts: ciscoCdstvFsiMIB.setOrganization('Cisco Systems, Inc.')
ciscoCdstvFsiMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 735, 0))
ciscoCdstvFsiMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 735, 1))
ciscoCdstvFsiMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 735, 2))
ciscoCdstvFsiMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 735, 2, 1))
cdstvFsiIpAddressType = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 1), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiIpAddressType.setStatus('current')
cdstvFsiIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 2), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiIpAddress.setStatus('current')
cdstvFsiServerPort = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 3), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiServerPort.setStatus('current')
cdstvFsiFtpClientPort = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 4), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiFtpClientPort.setStatus('current')
cdstvFsiFtpOutServerPort = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 5), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiFtpOutServerPort.setStatus('current')
cdstvFsiFtpOutLoginTTL = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 6), Unsigned32()).setUnits('hops').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiFtpOutLoginTTL.setStatus('current')
cdstvFsiLogLevel = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("low", 2), ("high", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiLogLevel.setStatus('current')
cdstvFsiContentRootPath = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 8), SnmpAdminString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiContentRootPath.setStatus('current')
cdstvFsiAsyncCallbackURL = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 735, 1, 9), CiscoURLString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdstvFsiAsyncCallbackURL.setStatus('current')
ciscoCdstvFsiMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 735, 2, 2))
ciscoCdstvFsiMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 735, 2, 1, 1)).setObjects(("CISCO-CDSTV-FSI-MIB", "ciscoCdstvFsiMIBMainObjectGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoCdstvFsiMIBCompliance = ciscoCdstvFsiMIBCompliance.setStatus('current')
ciscoCdstvFsiMIBMainObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 735, 2, 2, 1)).setObjects(("CISCO-CDSTV-FSI-MIB", "cdstvFsiIpAddress"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiServerPort"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiFtpClientPort"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiFtpOutServerPort"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiFtpOutLoginTTL"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiLogLevel"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiContentRootPath"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiAsyncCallbackURL"), ("CISCO-CDSTV-FSI-MIB", "cdstvFsiIpAddressType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoCdstvFsiMIBMainObjectGroup = ciscoCdstvFsiMIBMainObjectGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-CDSTV-FSI-MIB", ciscoCdstvFsiMIBConform=ciscoCdstvFsiMIBConform, PYSNMP_MODULE_ID=ciscoCdstvFsiMIB, cdstvFsiFtpClientPort=cdstvFsiFtpClientPort, ciscoCdstvFsiMIBObjects=ciscoCdstvFsiMIBObjects, cdstvFsiIpAddress=cdstvFsiIpAddress, cdstvFsiServerPort=cdstvFsiServerPort, cdstvFsiContentRootPath=cdstvFsiContentRootPath, ciscoCdstvFsiMIB=ciscoCdstvFsiMIB, ciscoCdstvFsiMIBCompliance=ciscoCdstvFsiMIBCompliance, ciscoCdstvFsiMIBMainObjectGroup=ciscoCdstvFsiMIBMainObjectGroup, cdstvFsiIpAddressType=cdstvFsiIpAddressType, ciscoCdstvFsiMIBCompliances=ciscoCdstvFsiMIBCompliances, ciscoCdstvFsiMIBNotifs=ciscoCdstvFsiMIBNotifs, cdstvFsiAsyncCallbackURL=cdstvFsiAsyncCallbackURL, cdstvFsiFtpOutServerPort=cdstvFsiFtpOutServerPort, cdstvFsiFtpOutLoginTTL=cdstvFsiFtpOutLoginTTL, cdstvFsiLogLevel=cdstvFsiLogLevel, ciscoCdstvFsiMIBGroups=ciscoCdstvFsiMIBGroups)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
88fe37658f8407f12b873c62b2a21869341c3261 | d282be07f3ef398817be3878510b67dd5845b249 | /bin/redditerm | 30cf5858b44045add2dd109a817c1072cbf7febd | [] | no_license | videah/dotfiles | c15e5d6ca999e11d68c43378fb849c5db21fb85d | 8c6c7631d3856cf0a90224e2e20052105a4e84a2 | refs/heads/master | 2021-06-01T02:03:59.417210 | 2016-06-22T22:40:57 | 2016-06-22T22:40:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,558 | #!/usr/bin/env python3
#
# redditerm Version 0.2
#
# This code is licensed under the MIT Open Source License.
#
# Copyright (c) 2015 Ruairidh Carmichael - ruairidhcarmichael@live.co.uk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import urllib.request, json, sys
# Has the user inputted a username, if not, quit.
try:
user = sys.argv[1]
except:
print("Error: No username provided. Please enter a username as an argument.")
sys.exit()
# Grab the users 'about.json', make sure to set the user agent.
url = "http://www.reddit.com/user/{username}/about.json".format(username = user)
req = urllib.request.Request(url, data = None, headers = {'User-Agent': 'redditerm'})
# If we can't get the users info, give up and output an error.
try:
response = urllib.request.urlopen(req);
except:
print("There was an error loading the account '{username}'. Are you sure it exists?".format(username = user))
sys.exit()
# Read the JSON and decode it.
rawdata = response.read()
data = json.loads(rawdata.decode())
# Construct the terminal output.
output = """
███▄▄▄ ▄█▀▀▀█
▐█ ▀█▌ █▌
▐█ ▀███▀
█▌ {red}Username:{clear} {name}
▄▄██▀▀▀▀▀▀▀▀▀▀██▄▄
▄█▀▀███▀▀ ▀▀██▀▀▀█▄ {red}Link Karma:{clear} {link_karma}
▐█ █▀ {red}▄▄▄ ▄▄▄{clear} ▀█▄ ▐█
███ {red}█████ █████{clear} ███▀ {red}Comment Karma:{clear} {comment_karma}
█▌ {red}▀███▀ ▀███▀{clear} █
▐█ ▐█
▀█ ▄▄ ▄▄ ▄█
▀█▄ ▀▀█████▀▀ ▄▄█▀
▀██▄▄▄ ▄▄▄▄█▀▀
▀▀▀▀▀▀▀▀▀▀
"""
output = output.format(
red = "\x1b[1;31m", # ANSI Color Codes.
clear = "\x1b[0m",
name = data['data']['name'], # Max reddit username length is 20, so we should be fine.
link_karma = str(data['data']['link_karma']),
comment_karma = str(data['data']['comment_karma'])
)
# Finally, display said output.
print(output) | [
"ruairidhcarmichael@live.co.uk"
] | ruairidhcarmichael@live.co.uk | |
2e9c01aa40d1ab7c497942dbffcc0fa4a3ba2b26 | 4b73dc6b31bd2e25957c70677dc6fd0135a1a43b | /03_Avanzado/Unidad_06/test/add2_register_peewe.py.py | 39ca7af97f070da147425d5db3d1a2fdd422c67a | [] | no_license | stick2yourmind/Python | 557aae4b8422104ec244dcd45123b1c0b4ed2d7a | 9629add21493bea0173fa6ed548bedb18e73fa32 | refs/heads/master | 2022-11-17T10:00:53.622658 | 2020-07-13T01:21:49 | 2020-07-13T01:21:49 | 272,263,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,466 | py | import random
from peewee import *
import datetime
import mysql.connector
my_sql_host_default = "localhost"
my_sql_user_default = "root"
my_sql_pass_default = ""
my_sql_db_default = "catalogueDB"
my_sql_table_default = "producto"
columns_name_list = ["titulo", "fecha", "descripcion", "estado", "objeto"]
my_sql_db = my_sql_db_default
my_sql_port = 3306
my_sql_host = my_sql_host_default
my_sql_user = my_sql_user_default
my_sql_pass = my_sql_pass_default
def create_db_my_sql(my_sql_host, my_sql_user, my_sql_pass, my_sql_db):
aux = -1
try:
my_db = mysql.connector.connect(
host=my_sql_host,
user=my_sql_user,
passwd=my_sql_pass)
my_cursor = my_db.cursor()
my_cursor.execute("CREATE DATABASE IF NOT EXISTS " + my_sql_db)
my_db.commit()
my_cursor.close()
my_db.close()
aux = 1
print("create_db_my_sql: DB has been created")
except:
print("create_db_my_sql: Error")
return aux
def create_table_orm(my_sql_db, my_sql_host, my_sql_port, my_sql_user, my_sql_pass, columns_name_list):
aux = -1
try:
db = MySQLDatabase(my_sql_db, host=my_sql_host, port=my_sql_port,
user=my_sql_user,
passwd=my_sql_pass)
class Catalogue(Model):
class Meta:
database = db
class RegItem3(Catalogue):
# id column is not required due to peewee auto generates it and assign a AutoField class to it.
# classAutoField:
# Field class for storing auto-incrementing primary keys.
locals()[columns_name_list[0]] = TextField()
locals()[columns_name_list[1]] = DateTimeField(default=datetime.datetime.now())
locals()[columns_name_list[2]] = TextField()
locals()[columns_name_list[3]] = BooleanField(default=True)
locals()[columns_name_list[4]] = TextField()
db.connect()
db.create_tables([RegItem3])
db.close()
aux = 1
print("create_table_orm: Table has been created")
except:
print("create_table_orm: Error")
return aux
create_db_my_sql(my_sql_host, my_sql_user, my_sql_pass, my_sql_db)
create_table_orm(my_sql_db, my_sql_host, my_sql_port, my_sql_user, my_sql_pass , columns_name_list)
db = MySQLDatabase(my_sql_db, host=my_sql_host, port=my_sql_port,
user=my_sql_user,
passwd=my_sql_pass)
class Catalogue(Model):
class Meta:
database = db
class RegItem3(Catalogue):
locals()[columns_name_list[0]] = TextField()
locals()[columns_name_list[1]] = DateTimeField(default=datetime.datetime.now())
locals()[columns_name_list[2]] = TextField()
locals()[columns_name_list[3]] = BooleanField(default=True)
locals()[columns_name_list[4]] = TextField()
aux = "regItem3"
def __str__(self):
return self.aux
db.connect()
columns_value_list = ["5", "titulo", None, "descripcion", "estado", "objeto"]
RegItem3 = RegItem3()
RegItem3.titulo = columns_value_list[1]
RegItem3.fecha = datetime.datetime.now()
RegItem3.descripcion = columns_value_list[3]
RegItem3.estado = columns_value_list[4]
RegItem3.objeto = columns_value_list[5]
print(RegItem3)
RegItem3.save()
db.close()
# There is another way to update registers, by using both update() and execute(). Check pdf at unit 1 to check how to
# implement that solution. | [
"saravia.jonathan.m@gmail.com"
] | saravia.jonathan.m@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.