python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO consolidate with fcn_mip/score_ensemble_outputs.py
from typing import Mapping
import xarray
import xskillscore
import numpy as np
from contextlib import contextmanager
@contextmanager
def properscoring_with_cupy():
"""A context manager that makes proper-scoring compatible with cupy arrays
Not thread safe.
"""
import properscoring
import cupy
# using cupy is 3x faster
properscoring._crps.np = cupy
old_score = properscoring._crps._crps_ensemble_core
properscoring._crps._crps_ensemble_core = (
properscoring._crps._crps_ensemble_vectorized
)
yield
properscoring._crps.np = np
properscoring._crps._crps_ensemble_core = old_score
def global_average(x, lat):
cos_lat = np.cos(np.deg2rad(lat))
return x.weighted(cos_lat).mean(dim=["lat", "lon"])
def score_ensemble(
ensemble: xarray.DataArray, obs: xarray.DataArray, lat: xarray.DataArray
) -> Mapping[str, xarray.DataArray]:
"""Set of standardized scores for ensembles
Includes:
- crps
- mse of ensemble mean
- variance about ensemble mean
- MSE of the first ensemble member
"""
out = {}
num = (ensemble.mean("ensemble") - obs) ** 2
out["MSE_mean"] = global_average(num.load(), lat)
num = ensemble.var("ensemble", ddof=1)
out["variance"] = global_average(num.load(), lat)
if 0 in ensemble.ensemble:
num = (ensemble.sel(ensemble=0) - obs) ** 2
out["MSE_det"] = global_average(num.load(), lat)
crps = xskillscore.crps_ensemble(
obs,
ensemble,
issorted=False,
member_dim="ensemble",
dim=(),
keep_attrs=True,
)
out["crps"] = global_average(crps.load(), lat)
# get ensemble size in potentially time-dependent manner
# for compatibility needs to have the same dims as the other metrics
out["ensemble_size"] = ensemble.count("ensemble").isel(lat=0, lon=0).load()
return out
|
earth2mip-main
|
earth2mip/xarray/metrics.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
earth2mip-main
|
earth2mip/xarray/__init__.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xarray
try:
import cupy
except ImportError:
cupy = None
def to_cupy(ds):
"""Convert a dataset or datarray to use cupy
assumes that dask is not used
"""
if isinstance(ds, xarray.DataArray):
# TODO make this name unique
name = "23vd89a"
return to_cupy(ds.to_dataset(name=name))[name]
variables = {}
for v in ds.variables:
try:
arr = cupy.asarray(ds[v])
except ValueError as e:
raise ValueError(
f"{v} cannot be converted to cupy. Original exception: {e}."
)
else:
variables[v] = xarray.Variable(ds[v].dims, arr)
return ds._replace_with_new_dims(
variables, coord_names=list(ds.coords), indexes=ds.indexes
)
def to_np(arr):
return xarray.DataArray(
arr.data.get(), coords=arr.coords, dims=arr.dims, attrs=arr.attrs
)
def concat_dict(d, key_names=(), concat_dim="key"):
"""concat a dictionary of xarray objects"""
arrays = []
for key, arr in d.items():
coords = {}
for name, value in zip(key_names, key):
coords[name] = xarray.Variable([concat_dim], [value])
arr = arr.expand_dims(concat_dim)
arr = arr.assign_coords(coords)
arrays.append(arr)
return xarray.concat(arrays, dim=concat_dim)
|
earth2mip-main
|
earth2mip/xarray/utils.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import sys
import pathlib
root = pathlib.Path(__file__).parent
modulus = root.parent / "third_party" / "modulus"
sys.path.insert(0, root.parent.as_posix())
sys.path.insert(0, modulus.as_posix())
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "Earth2 MIP"
copyright = "2023, NVIDIA"
author = "NVIDIA"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_book_theme"
html_static_path = ["_static"]
|
earth2mip-main
|
docs/conf.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# %%
import os
import numpy as np
import datetime
# Set number of GPUs to use to 1
os.environ["WORLD_SIZE"] = "1"
# Set model registry as a local folder
model_registry = os.path.join(os.path.dirname(os.path.realpath(os.getcwd())), "models")
os.makedirs(model_registry, exist_ok=True)
os.environ["MODEL_REGISTRY"] = model_registry
import earth2mip.networks.pangu as pangu
from earth2mip import (
registry,
inference_ensemble,
)
from earth2mip.initial_conditions import cds
from os.path import dirname, abspath, join
# %% Load model package and data source
print("Loading pangu model, this can take a bit")
package = registry.get_model("pangu")
# Load just the 24 hour model (also supports 6 hour)
inferener = pangu.load_single_model(package, time_step_hours=24)
data_source = cds.DataSource(inferener.in_channel_names)
time = datetime.datetime(2018, 1, 1)
# %% Run inference
ds = inference_ensemble.run_basic_inference(
inferener,
n=12,
data_source=data_source,
time=time,
)
print(ds)
# %% Post-process
import matplotlib.pyplot as plt
from scipy.signal import periodogram
output = f"{dirname(dirname(abspath(__file__)))}/outputs/workflows"
os.makedirs(output, exist_ok=True)
arr = ds.sel(channel="u200").values
f, pw = periodogram(arr, axis=-1, fs=1)
pw = pw.mean(axis=(1, 2))
l = ds.time - ds.time[0] # noqa
days = l / (ds.time[-1] - ds.time[0])
cm = plt.cm.get_cmap("viridis")
for k in range(ds.sizes["time"]):
day = (ds.time[k] - ds.time[0]) / np.timedelta64(1, "D")
day = day.item()
plt.loglog(f, pw[k], color=cm(days[k]), label=day)
plt.legend()
plt.ylim(bottom=1e-8)
plt.grid()
plt.savefig(join(output, "u200_spectra_pangu24.png"), bbox_inches="tight")
# %%
day = (ds.time - ds.time[0]) / np.timedelta64(1, "D")
plt.semilogy(day, pw[:, 100:].mean(-1), "o-")
plt.savefig(join(output, "u200_high_wave_pangu24.png"), bbox_inches="tight")
|
earth2mip-main
|
examples/workflows/pangu_24.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# %%
import os
import datetime
import xarray as xr
# Set number of GPUs to use to 1
os.environ["WORLD_SIZE"] = "1"
# Set model registry as a local folder
model_registry = os.path.join(os.path.dirname(os.path.realpath(os.getcwd())), "models")
os.makedirs(model_registry, exist_ok=True)
os.environ["MODEL_REGISTRY"] = model_registry
import earth2mip.networks.dlwp as dlwp
from earth2mip import (
registry,
inference_ensemble,
)
from earth2mip.initial_conditions import cds
from modulus.distributed import DistributedManager
from os.path import dirname, abspath, join
# %% Load model package and data source
device = DistributedManager().device
print(f"Loading dlwp model onto {device}, this can take a bit")
package = registry.get_model("dlwp")
inferencer = dlwp.load(package, device=device)
cds_data_source = cds.DataSource(inferencer.in_channel_names)
# Stack two data-sources together for double timestep inputs
time = datetime.datetime(2018, 1, 1)
ds1 = cds_data_source[time]
ds2 = cds_data_source[time - datetime.timedelta(hours=6)]
ds = xr.concat([ds2, ds1], dim="time")
data_source = {time: ds}
time = datetime.datetime(2018, 1, 1)
# %% Run inference
ds = inference_ensemble.run_basic_inference(
inferencer,
n=12,
data_source=data_source,
time=time,
)
print(ds)
# %% Post-process
# %% Post-process
import matplotlib.pyplot as plt
from scipy.signal import periodogram
output = f"{dirname(dirname(abspath(__file__)))}/outputs/workflows"
os.makedirs(output, exist_ok=True)
arr = ds.sel(channel="t2m").values
fig, axs = plt.subplots(1, 13, figsize=(13 * 5, 5))
for i in range(13):
axs[i].imshow(arr[i, 0])
plt.savefig(join(output, "t2m_field_dlwp.png"), bbox_inches="tight")
|
earth2mip-main
|
examples/workflows/dlwp.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# %%
import os
import numpy as np
import datetime
# Set number of GPUs to use to 1
os.environ["WORLD_SIZE"] = "1"
# Set model registry as a local folder
model_registry = os.path.join(os.path.dirname(os.path.realpath(os.getcwd())), "models")
os.makedirs(model_registry, exist_ok=True)
os.environ["MODEL_REGISTRY"] = model_registry
import earth2mip.networks.fcnv2_sm as fcnv2
from earth2mip import registry, inference_ensemble
from earth2mip.initial_conditions import cds
from modulus.distributed import DistributedManager
from os.path import dirname, abspath, join
# %% Load model package and data source
device = DistributedManager().device
print(f"Loading FCNv2 small model onto {device}, this can take a bit")
package = registry.get_model("fcnv2_sm")
sfno_inference_model = fcnv2.load(package, device=device)
data_source = cds.DataSource(sfno_inference_model.in_channel_names)
output = "path/"
time = datetime.datetime(2018, 1, 1)
ds = inference_ensemble.run_basic_inference(
sfno_inference_model,
n=1,
data_source=data_source,
time=time,
)
# %% Post-process
import matplotlib.pyplot as plt
from scipy.signal import periodogram
output = f"{dirname(dirname(abspath(__file__)))}/outputs/workflows"
os.makedirs(output, exist_ok=True)
arr = ds.sel(channel="u200").values
f, pw = periodogram(arr, axis=-1, fs=1)
pw = pw.mean(axis=(1, 2))
l = ds.time - ds.time[0] # noqa
days = l / (ds.time[-1] - ds.time[0])
cm = plt.cm.get_cmap("viridis")
for k in range(ds.sizes["time"]):
day = (ds.time[k] - ds.time[0]) / np.timedelta64(1, "D")
day = day.item()
plt.loglog(f, pw[k], color=cm(days[k]), label=day)
plt.legend()
plt.ylim(bottom=1e-8)
plt.grid()
plt.savefig(join(output, "u200_spectra_fcnv2sm.png"), bbox_inches="tight")
# %%
day = (ds.time - ds.time[0]) / np.timedelta64(1, "D")
plt.semilogy(day, pw[:, 100:].mean(-1), "o-")
plt.savefig(join(output, "u200_high_wave_fcnv2sm.png"), bbox_inches="tight")
|
earth2mip-main
|
examples/workflows/fcnv2_sm.py
|
#!/usr/bin/python
#
# Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
"""Fuzzing helper, creates and uses corpus/crash directories.
fuzzer.py <fuzzer> <extra fuzzer arguments>
"""
import os
import subprocess
import sys
FUZZER = sys.argv[1]
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CORPORA_DIR = os.path.abspath(os.path.join(THIS_DIR, "corpora"))
FUZZER_DIR = os.path.abspath(os.path.join(CORPORA_DIR, FUZZER))
if not os.path.isdir(FUZZER_DIR):
os.mkdir(FUZZER_DIR)
corpora = []
def _create(d):
dd = os.path.abspath(os.path.join(CORPORA_DIR, d))
if not os.path.isdir(dd):
os.mkdir(dd)
corpora.append(dd)
def _add(d):
dd = os.path.abspath(os.path.join(CORPORA_DIR, d))
if os.path.isdir(dd):
corpora.append(dd)
def main():
_create(FUZZER)
_create(FUZZER + "-crash")
_add(FUZZER + "-seed")
cmd = ([os.path.abspath(os.path.join(THIS_DIR, FUZZER))] + sys.argv[2:]
+ ["-artifact_prefix=" + corpora[1] + "/"] + corpora)
print(" ".join(cmd))
subprocess.call(cmd)
if __name__ == "__main__":
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/CryptoPkg/Library/OpensslLib/openssl/fuzz/helper.py
|
# @file Edk2ToolsBuild.py
# Invocable class that builds the basetool c files.
#
# Supports VS2017, VS2019, and GCC5
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
import logging
import argparse
import multiprocessing
from edk2toolext import edk2_logging
from edk2toolext.environment import self_describing_environment
from edk2toolext.base_abstract_invocable import BaseAbstractInvocable
from edk2toollib.utility_functions import RunCmd
from edk2toollib.windows.locate_tools import QueryVcVariables
class Edk2ToolsBuild(BaseAbstractInvocable):
def ParseCommandLineOptions(self):
''' parse arguments '''
ParserObj = argparse.ArgumentParser()
ParserObj.add_argument("-t", "--tool_chain_tag", dest="tct", default="VS2017",
help="Set the toolchain used to compile the build tools")
args = ParserObj.parse_args()
self.tool_chain_tag = args.tct
def GetWorkspaceRoot(self):
''' Return the workspace root for initializing the SDE '''
# this is the bastools dir...not the traditional EDK2 workspace root
return os.path.dirname(os.path.abspath(__file__))
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
# for now don't use scopes
return ('global',)
def GetLoggingLevel(self, loggerType):
''' Get the logging level for a given type (return Logging.Level)
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
if(loggerType == "con"):
return logging.ERROR
else:
return logging.DEBUG
def GetLoggingFolderRelativeToRoot(self):
''' Return a path to folder for log files '''
return "BaseToolsBuild"
def GetVerifyCheckRequired(self):
''' Will call self_describing_environment.VerifyEnvironment if this returns True '''
return True
def GetLoggingFileName(self, loggerType):
''' Get the logging file name for the type.
Return None if the logger shouldn't be created
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
return "BASETOOLS_BUILD"
def WritePathEnvFile(self, OutputDir):
''' Write a PyTool path env file for future PyTool based edk2 builds'''
content = '''##
# Set shell variable EDK_TOOLS_BIN to this folder
#
# Autogenerated by Edk2ToolsBuild.py
#
# Copyright (c), Microsoft Corporation
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
{
"id": "You-Built-BaseTools",
"scope": "edk2-build",
"flags": ["set_shell_var", "set_path"],
"var_name": "EDK_TOOLS_BIN"
}
'''
with open(os.path.join(OutputDir, "basetoolsbin_path_env.yaml"), "w") as f:
f.write(content)
def Go(self):
logging.info("Running Python version: " + str(sys.version_info))
(build_env, shell_env) = self_describing_environment.BootstrapEnvironment(
self.GetWorkspaceRoot(), self.GetActiveScopes())
# # Bind our current execution environment into the shell vars.
ph = os.path.dirname(sys.executable)
if " " in ph:
ph = '"' + ph + '"'
shell_env.set_shell_var("PYTHON_HOME", ph)
# PYTHON_COMMAND is required to be set for using edk2 python builds.
pc = sys.executable
if " " in pc:
pc = '"' + pc + '"'
shell_env.set_shell_var("PYTHON_COMMAND", pc)
if self.tool_chain_tag.lower().startswith("vs"):
# # Update environment with required VC vars.
interesting_keys = ["ExtensionSdkDir", "INCLUDE", "LIB"]
interesting_keys.extend(
["LIBPATH", "Path", "UniversalCRTSdkDir", "UCRTVersion", "WindowsLibPath", "WindowsSdkBinPath"])
interesting_keys.extend(
["WindowsSdkDir", "WindowsSdkVerBinPath", "WindowsSDKVersion", "VCToolsInstallDir"])
vc_vars = QueryVcVariables(
interesting_keys, 'x86', vs_version=self.tool_chain_tag.lower())
for key in vc_vars.keys():
logging.debug(f"Var - {key} = {vc_vars[key]}")
if key.lower() == 'path':
shell_env.set_path(vc_vars[key])
else:
shell_env.set_shell_var(key, vc_vars[key])
self.OutputDir = os.path.join(
shell_env.get_shell_var("EDK_TOOLS_PATH"), "Bin", "Win32")
# compiled tools need to be added to path because antlr is referenced
shell_env.insert_path(self.OutputDir)
# Actually build the tools.
ret = RunCmd('nmake.exe', None,
workingdir=shell_env.get_shell_var("EDK_TOOLS_PATH"))
if ret != 0:
raise Exception("Failed to build.")
self.WritePathEnvFile(self.OutputDir)
return ret
elif self.tool_chain_tag.lower().startswith("gcc"):
cpu_count = self.GetCpuThreads()
ret = RunCmd("make", f"-C . -j {cpu_count}", workingdir=shell_env.get_shell_var("EDK_TOOLS_PATH"))
if ret != 0:
raise Exception("Failed to build.")
self.OutputDir = os.path.join(
shell_env.get_shell_var("EDK_TOOLS_PATH"), "Source", "C", "bin")
self.WritePathEnvFile(self.OutputDir)
return ret
logging.critical("Tool Chain not supported")
return -1
def GetCpuThreads(self) -> int:
''' Function to return number of cpus. If error return 1'''
cpus = 1
try:
cpus = multiprocessing.cpu_count()
except:
# from the internet there are cases where cpu_count is not implemented.
# will handle error by just doing single proc build
pass
return cpus
def main():
Edk2ToolsBuild().Invoke()
if __name__ == "__main__":
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Edk2ToolsBuild.py
|
## @file
#
#
# Copyright (c) 2009 - 2014, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import sys
import locale
if sys.platform == "darwin" and sys.version_info[0] < 3:
DefaultLocal = locale.getdefaultlocale()[1]
if DefaultLocal is None:
DefaultLocal = 'UTF8'
sys.setdefaultencoding(DefaultLocal)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/sitecustomize.py
|
## @file
# generate capsule
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .GenFdsGlobalVariable import GenFdsGlobalVariable, FindExtendTool
from CommonDataClass.FdfClass import CapsuleClassObject
import Common.LongFilePathOs as os
from io import BytesIO
from Common.Misc import SaveFileOnChange, PackGUID
import uuid
from struct import pack
from Common import EdkLogger
from Common.BuildToolError import GENFDS_ERROR
from Common.DataType import TAB_LINE_BREAK
WIN_CERT_REVISION = 0x0200
WIN_CERT_TYPE_EFI_GUID = 0x0EF1
EFI_CERT_TYPE_PKCS7_GUID = uuid.UUID('{4aafd29d-68df-49ee-8aa9-347d375665a7}')
EFI_CERT_TYPE_RSA2048_SHA256_GUID = uuid.UUID('{a7717414-c616-4977-9420-844712a735bf}')
## create inf file describes what goes into capsule and call GenFv to generate capsule
#
#
class Capsule (CapsuleClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CapsuleClassObject.__init__(self)
# For GenFv
self.BlockSize = None
# For GenFv
self.BlockNum = None
self.CapsuleName = None
## Generate FMP capsule
#
# @retval string Generated Capsule file path
#
def GenFmpCapsule(self):
#
# Generate capsule header
# typedef struct {
# EFI_GUID CapsuleGuid;
# UINT32 HeaderSize;
# UINT32 Flags;
# UINT32 CapsuleImageSize;
# } EFI_CAPSULE_HEADER;
#
Header = BytesIO()
#
# Use FMP capsule GUID: 6DCBD5ED-E82D-4C44-BDA1-7194199AD92A
#
Header.write(PackGUID('6DCBD5ED-E82D-4C44-BDA1-7194199AD92A'.split('-')))
HdrSize = 0
if 'CAPSULE_HEADER_SIZE' in self.TokensDict:
Header.write(pack('=I', int(self.TokensDict['CAPSULE_HEADER_SIZE'], 16)))
HdrSize = int(self.TokensDict['CAPSULE_HEADER_SIZE'], 16)
else:
Header.write(pack('=I', 0x20))
HdrSize = 0x20
Flags = 0
if 'CAPSULE_FLAGS' in self.TokensDict:
for flag in self.TokensDict['CAPSULE_FLAGS'].split(','):
flag = flag.strip()
if flag == 'PopulateSystemTable':
Flags |= 0x00010000 | 0x00020000
elif flag == 'PersistAcrossReset':
Flags |= 0x00010000
elif flag == 'InitiateReset':
Flags |= 0x00040000
Header.write(pack('=I', Flags))
#
# typedef struct {
# UINT32 Version;
# UINT16 EmbeddedDriverCount;
# UINT16 PayloadItemCount;
# // UINT64 ItemOffsetList[];
# } EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER;
#
FwMgrHdr = BytesIO()
if 'CAPSULE_HEADER_INIT_VERSION' in self.TokensDict:
FwMgrHdr.write(pack('=I', int(self.TokensDict['CAPSULE_HEADER_INIT_VERSION'], 16)))
else:
FwMgrHdr.write(pack('=I', 0x00000001))
FwMgrHdr.write(pack('=HH', len(self.CapsuleDataList), len(self.FmpPayloadList)))
FwMgrHdrSize = 4+2+2+8*(len(self.CapsuleDataList)+len(self.FmpPayloadList))
#
# typedef struct _WIN_CERTIFICATE {
# UINT32 dwLength;
# UINT16 wRevision;
# UINT16 wCertificateType;
# //UINT8 bCertificate[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE;
#
# typedef struct _WIN_CERTIFICATE_UEFI_GUID {
# WIN_CERTIFICATE Hdr;
# EFI_GUID CertType;
# //UINT8 CertData[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE_UEFI_GUID;
#
# typedef struct {
# UINT64 MonotonicCount;
# WIN_CERTIFICATE_UEFI_GUID AuthInfo;
# } EFI_FIRMWARE_IMAGE_AUTHENTICATION;
#
# typedef struct _EFI_CERT_BLOCK_RSA_2048_SHA256 {
# EFI_GUID HashType;
# UINT8 PublicKey[256];
# UINT8 Signature[256];
# } EFI_CERT_BLOCK_RSA_2048_SHA256;
#
PreSize = FwMgrHdrSize
Content = BytesIO()
for driver in self.CapsuleDataList:
FileName = driver.GenCapsuleSubItem()
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += os.path.getsize(FileName)
File = open(FileName, 'rb')
Content.write(File.read())
File.close()
for fmp in self.FmpPayloadList:
if fmp.Existed:
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += len(fmp.Buffer)
Content.write(fmp.Buffer)
continue
if fmp.ImageFile:
for Obj in fmp.ImageFile:
fmp.ImageFile = Obj.GenCapsuleSubItem()
if fmp.VendorCodeFile:
for Obj in fmp.VendorCodeFile:
fmp.VendorCodeFile = Obj.GenCapsuleSubItem()
if fmp.Certificate_Guid:
ExternalTool, ExternalOption = FindExtendTool([], GenFdsGlobalVariable.ArchList, fmp.Certificate_Guid)
CmdOption = ''
CapInputFile = fmp.ImageFile
if not os.path.isabs(fmp.ImageFile):
CapInputFile = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, fmp.ImageFile)
CapOutputTmp = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName) + '.tmp'
if ExternalTool is None:
EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % fmp.Certificate_Guid)
else:
CmdOption += ExternalTool
if ExternalOption:
CmdOption = CmdOption + ' ' + ExternalOption
CmdOption += ' -e ' + ' --monotonic-count ' + str(fmp.MonotonicCount) + ' -o ' + CapOutputTmp + ' ' + CapInputFile
CmdList = CmdOption.split()
GenFdsGlobalVariable.CallExternalTool(CmdList, "Failed to generate FMP auth capsule")
if uuid.UUID(fmp.Certificate_Guid) == EFI_CERT_TYPE_PKCS7_GUID:
dwLength = 4 + 2 + 2 + 16 + os.path.getsize(CapOutputTmp) - os.path.getsize(CapInputFile)
else:
dwLength = 4 + 2 + 2 + 16 + 16 + 256 + 256
fmp.ImageFile = CapOutputTmp
AuthData = [fmp.MonotonicCount, dwLength, WIN_CERT_REVISION, WIN_CERT_TYPE_EFI_GUID, fmp.Certificate_Guid]
fmp.Buffer = fmp.GenCapsuleSubItem(AuthData)
else:
fmp.Buffer = fmp.GenCapsuleSubItem()
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += len(fmp.Buffer)
Content.write(fmp.Buffer)
BodySize = len(FwMgrHdr.getvalue()) + len(Content.getvalue())
Header.write(pack('=I', HdrSize + BodySize))
#
# The real capsule header structure is 28 bytes
#
Header.write(b'\x00'*(HdrSize-28))
Header.write(FwMgrHdr.getvalue())
Header.write(Content.getvalue())
#
# Generate FMP capsule file
#
CapOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName) + '.Cap'
SaveFileOnChange(CapOutputFile, Header.getvalue(), True)
return CapOutputFile
## Generate capsule
#
# @param self The object pointer
# @retval string Generated Capsule file path
#
def GenCapsule(self):
if self.UiCapsuleName.upper() + 'cap' in GenFdsGlobalVariable.ImageBinDict:
return GenFdsGlobalVariable.ImageBinDict[self.UiCapsuleName.upper() + 'cap']
GenFdsGlobalVariable.InfLogger( "\nGenerate %s Capsule" %self.UiCapsuleName)
if ('CAPSULE_GUID' in self.TokensDict and
uuid.UUID(self.TokensDict['CAPSULE_GUID']) == uuid.UUID('6DCBD5ED-E82D-4C44-BDA1-7194199AD92A')):
return self.GenFmpCapsule()
CapInfFile = self.GenCapInf()
CapInfFile.append("[files]" + TAB_LINE_BREAK)
CapFileList = []
for CapsuleDataObj in self.CapsuleDataList:
CapsuleDataObj.CapsuleName = self.CapsuleName
FileName = CapsuleDataObj.GenCapsuleSubItem()
CapsuleDataObj.CapsuleName = None
CapFileList.append(FileName)
CapInfFile.append("EFI_FILE_NAME = " + \
FileName + \
TAB_LINE_BREAK)
SaveFileOnChange(self.CapInfFileName, ''.join(CapInfFile), False)
#
# Call GenFv tool to generate capsule
#
CapOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName)
CapOutputFile = CapOutputFile + '.Cap'
GenFdsGlobalVariable.GenerateFirmwareVolume(
CapOutputFile,
[self.CapInfFileName],
Capsule=True,
FfsList=CapFileList
)
GenFdsGlobalVariable.VerboseLogger( "\nGenerate %s Capsule Successfully" %self.UiCapsuleName)
GenFdsGlobalVariable.SharpCounter = 0
GenFdsGlobalVariable.ImageBinDict[self.UiCapsuleName.upper() + 'cap'] = CapOutputFile
return CapOutputFile
## Generate inf file for capsule
#
# @param self The object pointer
# @retval file inf file object
#
def GenCapInf(self):
self.CapInfFileName = os.path.join(GenFdsGlobalVariable.FvDir,
self.UiCapsuleName + "_Cap" + '.inf')
CapInfFile = []
CapInfFile.append("[options]" + TAB_LINE_BREAK)
for Item in self.TokensDict:
CapInfFile.append("EFI_" + \
Item + \
' = ' + \
self.TokensDict[Item] + \
TAB_LINE_BREAK)
return CapInfFile
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Capsule.py
|
## @file
# process FFS generation
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from Common.DataType import *
# mapping between FILE type in FDF and file type for GenFfs
FdfFvFileTypeToFileType = {
SUP_MODULE_SEC : 'EFI_FV_FILETYPE_SECURITY_CORE',
SUP_MODULE_PEI_CORE : 'EFI_FV_FILETYPE_PEI_CORE',
SUP_MODULE_PEIM : 'EFI_FV_FILETYPE_PEIM',
SUP_MODULE_DXE_CORE : 'EFI_FV_FILETYPE_DXE_CORE',
'FREEFORM' : 'EFI_FV_FILETYPE_FREEFORM',
'DRIVER' : 'EFI_FV_FILETYPE_DRIVER',
'APPLICATION' : 'EFI_FV_FILETYPE_APPLICATION',
'FV_IMAGE' : 'EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE',
'RAW' : 'EFI_FV_FILETYPE_RAW',
'PEI_DXE_COMBO' : 'EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER',
'SMM' : 'EFI_FV_FILETYPE_SMM',
SUP_MODULE_SMM_CORE : 'EFI_FV_FILETYPE_SMM_CORE',
SUP_MODULE_MM_STANDALONE : 'EFI_FV_FILETYPE_MM_STANDALONE',
SUP_MODULE_MM_CORE_STANDALONE : 'EFI_FV_FILETYPE_MM_CORE_STANDALONE'
}
# mapping between section type in FDF and file suffix
SectionSuffix = {
BINARY_FILE_TYPE_PE32 : '.pe32',
BINARY_FILE_TYPE_PIC : '.pic',
BINARY_FILE_TYPE_TE : '.te',
BINARY_FILE_TYPE_DXE_DEPEX : '.dpx',
'VERSION' : '.ver',
BINARY_FILE_TYPE_UI : '.ui',
'COMPAT16' : '.com16',
'RAW' : '.raw',
'FREEFORM_SUBTYPE_GUID': '.guid',
'SUBTYPE_GUID' : '.guid',
'FV_IMAGE' : 'fv.sec',
'COMPRESS' : '.com',
'GUIDED' : '.guided',
BINARY_FILE_TYPE_PEI_DEPEX : '.dpx',
BINARY_FILE_TYPE_SMM_DEPEX : '.dpx'
}
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Ffs.py
|
## @file
# process depex section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import DepexSectionClassObject
from AutoGen.GenDepex import DependencyExpression
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import PathClass
from Common.DataType import *
## generate data section
#
#
class DepexSection (DepexSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
DepexSectionClassObject.__init__(self)
def __FindGuidValue(self, CName):
for Arch in GenFdsGlobalVariable.ArchList:
PkgList = GenFdsGlobalVariable.WorkSpace.GetPackageList(GenFdsGlobalVariable.ActivePlatform,
Arch,
GenFdsGlobalVariable.TargetName,
GenFdsGlobalVariable.ToolChainTag)
for Inf in GenFdsGlobalVariable.FdfParser.Profile.InfList:
ModuleData = GenFdsGlobalVariable.WorkSpace.BuildObject[
PathClass(Inf, GenFdsGlobalVariable.WorkSpaceDir),
Arch,
GenFdsGlobalVariable.TargetName,
GenFdsGlobalVariable.ToolChainTag
]
for Pkg in ModuleData.Packages:
if Pkg not in PkgList:
PkgList.append(Pkg)
for PkgDb in PkgList:
if CName in PkgDb.Ppis:
return PkgDb.Ppis[CName]
if CName in PkgDb.Protocols:
return PkgDb.Protocols[CName]
if CName in PkgDb.Guids:
return PkgDb.Guids[CName]
return None
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name list, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, keyStringList, FfsFile = None, Dict = None, IsMakefile = False):
if self.ExpressionProcessed == False:
self.Expression = self.Expression.replace("\n", " ").replace("\r", " ")
ExpList = self.Expression.split()
for Exp in ExpList:
if Exp.upper() not in ('AND', 'OR', 'NOT', 'TRUE', 'FALSE', 'SOR', 'BEFORE', 'AFTER', 'END'):
GuidStr = self.__FindGuidValue(Exp)
if GuidStr is None:
EdkLogger.error("GenFds", RESOURCE_NOT_AVAILABLE,
"Depex GUID %s could not be found in build DB! (ModuleName: %s)" % (Exp, ModuleName))
self.Expression = self.Expression.replace(Exp, GuidStr)
self.Expression = self.Expression.strip()
self.ExpressionProcessed = True
if self.DepexType == 'PEI_DEPEX_EXP':
ModuleType = SUP_MODULE_PEIM
SecType = BINARY_FILE_TYPE_PEI_DEPEX
elif self.DepexType == 'DXE_DEPEX_EXP':
ModuleType = SUP_MODULE_DXE_DRIVER
SecType = BINARY_FILE_TYPE_DXE_DEPEX
elif self.DepexType == 'SMM_DEPEX_EXP':
ModuleType = SUP_MODULE_DXE_SMM_DRIVER
SecType = BINARY_FILE_TYPE_SMM_DEPEX
else:
EdkLogger.error("GenFds", FORMAT_INVALID,
"Depex type %s is not valid for module %s" % (self.DepexType, ModuleName))
InputFile = os.path.join (OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + '.depex')
InputFile = os.path.normpath(InputFile)
Depex = DependencyExpression(self.Expression, ModuleType)
Depex.Generate(InputFile)
OutputFile = os.path.join (OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + '.dpx')
OutputFile = os.path.normpath(OutputFile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [InputFile], Section.Section.SectionType.get (SecType), IsMakefile=IsMakefile)
return [OutputFile], self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/DepexSection.py
|
## @file
# generate capsule
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from io import BytesIO
from struct import pack
import os
from Common.Misc import SaveFileOnChange
import uuid
## base class for capsule data
#
#
class CapsuleData:
## The constructor
#
# @param self The object pointer
def __init__(self):
pass
## generate capsule data
#
# @param self The object pointer
def GenCapsuleSubItem(self):
pass
## FFS class for capsule data
#
#
class CapsuleFfs (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
## generate FFS capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
FfsFile = self.Ffs.GenFfs()
return FfsFile
## FV class for capsule data
#
#
class CapsuleFv (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
self.CapsuleName = None
## generate FV capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FvName.find('.fv') == -1:
if self.FvName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[self.FvName.upper()]
FdBuffer = BytesIO()
FvObj.CapsuleName = self.CapsuleName
FvFile = FvObj.AddToBuffer(FdBuffer)
FvObj.CapsuleName = None
FdBuffer.close()
return FvFile
else:
FvFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvName)
return FvFile
## FD class for capsule data
#
#
class CapsuleFd (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FdName = None
self.CapsuleName = None
## generate FD capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FdName.find('.fd') == -1:
if self.FdName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[self.FdName.upper()]
FdFile = FdObj.GenFd()
return FdFile
else:
FdFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FdName)
return FdFile
## AnyFile class for capsule data
#
#
class CapsuleAnyFile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate AnyFile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
## Afile class for capsule data
#
#
class CapsuleAfile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate Afile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
class CapsulePayload(CapsuleData):
'''Generate payload file, the header is defined below:
#pragma pack(1)
typedef struct {
UINT32 Version;
EFI_GUID UpdateImageTypeId;
UINT8 UpdateImageIndex;
UINT8 reserved_bytes[3];
UINT32 UpdateImageSize;
UINT32 UpdateVendorCodeSize;
UINT64 UpdateHardwareInstance; //Introduced in v2
} EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER;
'''
def __init__(self):
self.UiName = None
self.Version = None
self.ImageTypeId = None
self.ImageIndex = None
self.HardwareInstance = None
self.ImageFile = []
self.VendorCodeFile = []
self.Certificate_Guid = None
self.MonotonicCount = None
self.Existed = False
self.Buffer = None
def GenCapsuleSubItem(self, AuthData=[]):
if not self.Version:
self.Version = '0x00000002'
if not self.ImageIndex:
self.ImageIndex = '0x1'
if not self.HardwareInstance:
self.HardwareInstance = '0x0'
ImageFileSize = os.path.getsize(self.ImageFile)
if AuthData:
# the ImageFileSize need include the full authenticated info size. From first bytes of MonotonicCount to last bytes of certificate.
# the 32 bit is the MonotonicCount, dwLength, wRevision, wCertificateType and CertType
ImageFileSize += 32
VendorFileSize = 0
if self.VendorCodeFile:
VendorFileSize = os.path.getsize(self.VendorCodeFile)
#
# Fill structure
#
Guid = self.ImageTypeId.split('-')
Buffer = pack('=ILHHBBBBBBBBBBBBIIQ',
int(self.Version, 16),
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16),
int(self.ImageIndex, 16),
0,
0,
0,
ImageFileSize,
VendorFileSize,
int(self.HardwareInstance, 16)
)
if AuthData:
Buffer += pack('QIHH', AuthData[0], AuthData[1], AuthData[2], AuthData[3])
Buffer += uuid.UUID(AuthData[4]).bytes_le
#
# Append file content to the structure
#
ImageFile = open(self.ImageFile, 'rb')
Buffer += ImageFile.read()
ImageFile.close()
if self.VendorCodeFile:
VendorFile = open(self.VendorCodeFile, 'rb')
Buffer += VendorFile.read()
VendorFile.close()
self.Existed = True
return Buffer
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/CapsuleData.py
|
## @file
# process compress section generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .Ffs import SectionSuffix
from . import Section
import subprocess
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import CompressSectionClassObject
from Common.DataType import *
## generate compress section
#
#
class CompressSection (CompressSectionClassObject) :
## compress types: PI standard and non PI standard
CompTypeDict = {
'PI_STD' : 'PI_STD',
'PI_NONE' : 'PI_NONE'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CompressSectionClassObject.__init__(self)
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = None, IsMakefile = False):
if FfsInf is not None:
self.CompType = FfsInf.__ExtendMacro__(self.CompType)
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
SectFiles = tuple()
SectAlign = []
Index = 0
MaxAlign = None
if Dict is None:
Dict = {}
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' %(SecNum, Index)
ReturnSectList, AlignValue = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if AlignValue is not None:
if MaxAlign is None:
MaxAlign = AlignValue
if GenFdsGlobalVariable.GetAlignment (AlignValue) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = AlignValue
if ReturnSectList != []:
if AlignValue is None:
AlignValue = "1"
for FileData in ReturnSectList:
SectFiles += (FileData,)
SectAlign.append(AlignValue)
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
SectionSuffix['COMPRESS']
OutputFile = os.path.normpath(OutputFile)
DummyFile = OutputFile + '.dummy'
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFiles, InputAlign=SectAlign, IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [DummyFile], Section.Section.SectionType['COMPRESS'],
CompressionType=self.CompTypeDict[self.CompType], IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/CompressSection.py
|
## @file
# process FD Region generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import *
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from io import BytesIO
import string
import Common.LongFilePathOs as os
from stat import *
from Common import EdkLogger
from Common.BuildToolError import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.DataType import BINARY_FILE_TYPE_FV
## generate Region
#
#
class Region(object):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Offset = None # The begin position of the Region
self.Size = None # The Size of the Region
self.PcdOffset = None
self.PcdSize = None
self.SetVarDict = {}
self.RegionType = None
self.RegionDataList = []
## PadBuffer()
#
# Add padding bytes to the Buffer
#
# @param Buffer The buffer the generated region data will be put
# in
# @param ErasePolarity Flash erase polarity
# @param Size Number of padding bytes requested
#
def PadBuffer(self, Buffer, ErasePolarity, Size):
if Size > 0:
if (ErasePolarity == '1') :
PadByte = pack('B', 0xFF)
else:
PadByte = pack('B', 0)
for i in range(0, Size):
Buffer.write(PadByte)
## AddToBuffer()
#
# Add region data to the Buffer
#
# @param self The object pointer
# @param Buffer The buffer generated region data will be put
# @param BaseAddress base address of region
# @param BlockSize block size of region
# @param BlockNum How many blocks in region
# @param ErasePolarity Flash erase polarity
# @param MacroDict macro value pair
# @retval string Generated FV file path
#
def AddToBuffer(self, Buffer, BaseAddress, BlockSizeList, ErasePolarity, ImageBinDict, MacroDict=None, Flag=False):
Size = self.Size
if MacroDict is None:
MacroDict = {}
if not Flag:
GenFdsGlobalVariable.InfLogger('\nGenerate Region at Offset 0x%X' % self.Offset)
GenFdsGlobalVariable.InfLogger(" Region Size = 0x%X" % Size)
GenFdsGlobalVariable.SharpCounter = 0
if Flag and (self.RegionType != BINARY_FILE_TYPE_FV):
return
if self.RegionType == BINARY_FILE_TYPE_FV:
#
# Get Fv from FvDict
#
self.FvAddress = int(BaseAddress, 16) + self.Offset
FvBaseAddress = '0x%X' % self.FvAddress
FvOffset = 0
for RegionData in self.RegionDataList:
FileName = None
if RegionData.endswith(".fv"):
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region FV File Name = .fv : %s' % RegionData)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
FileName = RegionData
elif RegionData.upper() + 'fv' in ImageBinDict:
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
FileName = ImageBinDict[RegionData.upper() + 'fv']
else:
#
# Generate FvImage.
#
FvObj = None
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[RegionData.upper()]
if FvObj is not None :
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
#
# Call GenFv tool
#
self.BlockInfoOfRegion(BlockSizeList, FvObj)
self.FvAddress = self.FvAddress + FvOffset
FvAlignValue = GenFdsGlobalVariable.GetAlignment(FvObj.FvAlignment)
if self.FvAddress % FvAlignValue != 0:
EdkLogger.error("GenFds", GENFDS_ERROR,
"FV (%s) is NOT %s Aligned!" % (FvObj.UiFvName, FvObj.FvAlignment))
FvBuffer = BytesIO()
FvBaseAddress = '0x%X' % self.FvAddress
BlockSize = None
BlockNum = None
FvObj.AddToBuffer(FvBuffer, FvBaseAddress, BlockSize, BlockNum, ErasePolarity, Flag=Flag)
if Flag:
continue
FvBufferLen = len(FvBuffer.getvalue())
if FvBufferLen > Size:
FvBuffer.close()
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of FV (%s) is larger than Region Size 0x%X specified." % (RegionData, Size))
#
# Put the generated image into FD buffer.
#
Buffer.write(FvBuffer.getvalue())
FvBuffer.close()
FvOffset = FvOffset + FvBufferLen
Size = Size - FvBufferLen
continue
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "FV (%s) is NOT described in FDF file!" % (RegionData))
#
# Add the exist Fv image into FD buffer
#
if not Flag:
if FileName is not None:
FileLength = os.stat(FileName)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of FV File (%s) is larger than Region Size 0x%X specified." \
% (RegionData, Size))
BinFile = open(FileName, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
if not Flag:
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType == 'CAPSULE':
#
# Get Capsule from Capsule Dict
#
for RegionData in self.RegionDataList:
if RegionData.endswith(".cap"):
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
GenFdsGlobalVariable.InfLogger(' Region CAPSULE Image Name = .cap : %s' % RegionData)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
FileName = RegionData
elif RegionData.upper() + 'cap' in ImageBinDict:
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
FileName = ImageBinDict[RegionData.upper() + 'cap']
else:
#
# Generate Capsule image and Put it into FD buffer
#
CapsuleObj = None
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict:
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[RegionData.upper()]
if CapsuleObj is not None :
CapsuleObj.CapsuleName = RegionData.upper()
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
#
# Call GenFv tool to generate Capsule Image
#
FileName = CapsuleObj.GenCapsule()
CapsuleObj.CapsuleName = None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Capsule (%s) is NOT described in FDF file!" % (RegionData))
#
# Add the capsule image into FD buffer
#
FileLength = os.stat(FileName)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size 0x%X of Capsule File (%s) is larger than Region Size 0x%X specified." \
% (FileLength, RegionData, Size))
BinFile = open(FileName, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType in ('FILE', 'INF'):
for RegionData in self.RegionDataList:
if self.RegionType == 'INF':
RegionData.__InfParse__(None)
if len(RegionData.BinFileList) != 1:
EdkLogger.error('GenFds', GENFDS_ERROR, 'INF in FD region can only contain one binary: %s' % RegionData)
File = RegionData.BinFileList[0]
RegionData = RegionData.PatchEfiFile(File.Path, File.Type)
else:
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
#
# Add the file image into FD buffer
#
FileLength = os.stat(RegionData)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of File (%s) is larger than Region Size 0x%X specified." \
% (RegionData, Size))
GenFdsGlobalVariable.InfLogger(' Region File Name = %s' % RegionData)
BinFile = open(RegionData, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType == 'DATA' :
GenFdsGlobalVariable.InfLogger(' Region Name = DATA')
DataSize = 0
for RegionData in self.RegionDataList:
Data = RegionData.split(',')
DataSize = DataSize + len(Data)
if DataSize > Size:
EdkLogger.error("GenFds", GENFDS_ERROR, "Size of DATA is larger than Region Size ")
else:
for item in Data :
Buffer.write(pack('B', int(item, 16)))
Size = Size - DataSize
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType is None:
GenFdsGlobalVariable.InfLogger(' Region Name = None')
self.PadBuffer(Buffer, ErasePolarity, Size)
## BlockSizeOfRegion()
#
# @param BlockSizeList List of block information
# @param FvObj The object for FV
#
def BlockInfoOfRegion(self, BlockSizeList, FvObj):
Start = 0
End = 0
RemindingSize = self.Size
ExpectedList = []
for (BlockSize, BlockNum, pcd) in BlockSizeList:
End = Start + BlockSize * BlockNum
# region not started yet
if self.Offset >= End:
Start = End
continue
# region located in current blocks
else:
# region ended within current blocks
if self.Offset + self.Size <= End:
ExpectedList.append((BlockSize, (RemindingSize + BlockSize - 1) // BlockSize))
break
# region not ended yet
else:
# region not started in middle of current blocks
if self.Offset <= Start:
UsedBlockNum = BlockNum
# region started in middle of current blocks
else:
UsedBlockNum = (End - self.Offset) // BlockSize
Start = End
ExpectedList.append((BlockSize, UsedBlockNum))
RemindingSize -= BlockSize * UsedBlockNum
if FvObj.BlockSizeList == []:
FvObj.BlockSizeList = ExpectedList
else:
# first check whether FvObj.BlockSizeList items have only "BlockSize" or "NumBlocks",
# if so, use ExpectedList
for Item in FvObj.BlockSizeList:
if Item[0] is None or Item[1] is None:
FvObj.BlockSizeList = ExpectedList
break
# make sure region size is no smaller than the summed block size in FV
Sum = 0
for Item in FvObj.BlockSizeList:
Sum += Item[0] * Item[1]
if self.Size < Sum:
EdkLogger.error("GenFds", GENFDS_ERROR, "Total Size of FV %s 0x%x is larger than Region Size 0x%x "
% (FvObj.UiFvName, Sum, self.Size))
# check whether the BlockStatements in FV section is appropriate
ExpectedListData = ''
for Item in ExpectedList:
ExpectedListData += "BlockSize = 0x%x\n\tNumBlocks = 0x%x\n\t" % Item
Index = 0
for Item in FvObj.BlockSizeList:
if Item[0] != ExpectedList[Index][0]:
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
% FvObj.UiFvName, ExtraData=ExpectedListData)
elif Item[1] != ExpectedList[Index][1]:
if (Item[1] < ExpectedList[Index][1]) and (Index == len(FvObj.BlockSizeList) - 1):
break;
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
% FvObj.UiFvName, ExtraData=ExpectedListData)
else:
Index += 1
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Region.py
|
## @file
# process FV image section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from io import BytesIO
from .Ffs import SectionSuffix
import subprocess
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import FvImageSectionClassObject
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common import EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
## generate FV image section
#
#
class FvImageSection(FvImageSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FvImageSectionClassObject.__init__(self)
## GenSection() method
#
# Generate FV image section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = None, IsMakefile = False):
OutputFileList = []
if Dict is None:
Dict = {}
if self.FvFileType is not None:
FileList, IsSect = Section.Section.GetFileList(FfsInf, self.FvFileType, self.FvFileExtension)
if IsSect :
return FileList, self.Alignment
Num = SecNum
MaxFvAlignment = 0
for FvFileName in FileList:
FvAlignmentValue = 0
if os.path.isfile(FvFileName):
FvFileObj = open (FvFileName, 'rb')
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
if isinstance(FvHeaderBuffer[0x2E], str):
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E]) & 0x1F)
else:
FvAlignmentValue = 1 << (FvHeaderBuffer[0x2E] & 0x1F)
FvFileObj.close()
if FvAlignmentValue > MaxFvAlignment:
MaxFvAlignment = FvAlignmentValue
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get("FV_IMAGE"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [FvFileName], 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
# MaxFvAlignment is larger than or equal to 1K
if MaxFvAlignment >= 0x400:
if MaxFvAlignment >= 0x100000:
#The max alignment supported by FFS is 16M.
if MaxFvAlignment >= 0x1000000:
self.Alignment = "16M"
else:
self.Alignment = str(MaxFvAlignment // 0x100000) + "M"
else:
self.Alignment = str (MaxFvAlignment // 0x400) + "K"
else:
# MaxFvAlignment is less than 1K
self.Alignment = str (MaxFvAlignment)
return OutputFileList, self.Alignment
#
# Generate Fv
#
if self.FvName is not None:
Buffer = BytesIO()
Fv = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName)
if Fv is not None:
self.Fv = Fv
if not self.FvAddr and self.Fv.BaseAddress:
self.FvAddr = self.Fv.BaseAddress
FvFileName = Fv.AddToBuffer(Buffer, self.FvAddr, MacroDict = Dict, Flag=IsMakefile)
if Fv.FvAlignment is not None:
if self.Alignment is None:
self.Alignment = Fv.FvAlignment
else:
if GenFdsGlobalVariable.GetAlignment (Fv.FvAlignment) > GenFdsGlobalVariable.GetAlignment (self.Alignment):
self.Alignment = Fv.FvAlignment
else:
if self.FvFileName is not None:
FvFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvFileName)
if os.path.isfile(FvFileName):
FvFileObj = open (FvFileName, 'rb')
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
if isinstance(FvHeaderBuffer[0x2E], str):
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E]) & 0x1F)
else:
FvAlignmentValue = 1 << (FvHeaderBuffer[0x2E] & 0x1F)
# FvAlignmentValue is larger than or equal to 1K
if FvAlignmentValue >= 0x400:
if FvAlignmentValue >= 0x100000:
#The max alignment supported by FFS is 16M.
if FvAlignmentValue >= 0x1000000:
self.Alignment = "16M"
else:
self.Alignment = str(FvAlignmentValue // 0x100000) + "M"
else:
self.Alignment = str (FvAlignmentValue // 0x400) + "K"
else:
# FvAlignmentValue is less than 1K
self.Alignment = str (FvAlignmentValue)
FvFileObj.close()
else:
if len (mws.getPkgPath()) == 0:
EdkLogger.error("GenFds", FILE_NOT_FOUND, "%s is not found in WORKSPACE: %s" % self.FvFileName, GenFdsGlobalVariable.WorkSpaceDir)
else:
EdkLogger.error("GenFds", FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.FvFileName, '\n\t'.join(mws.getPkgPath())))
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "FvImageSection Failed! %s NOT found in FDF" % self.FvName)
#
# Prepare the parameter of GenSection
#
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get("FV_IMAGE"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [FvFileName], 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/FvImageSection.py
|
## @file
# process OptionROM generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import subprocess
from . import OptRomInfStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import OptionRomClassObject
from Common.Misc import SaveFileOnChange
from Common import EdkLogger
from Common.BuildToolError import *
##
#
#
class OPTIONROM (OptionRomClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self, Name = ""):
OptionRomClassObject.__init__(self)
self.DriverName = Name
## AddToBuffer()
#
# Generate Option ROM
#
# @param self The object pointer
# @param Buffer The buffer generated OptROM data will be put
# @retval string Generated OptROM file path
#
def AddToBuffer (self, Buffer, Flag=False) :
if not Flag:
GenFdsGlobalVariable.InfLogger( "\nGenerating %s Option ROM ..." %self.DriverName)
EfiFileList = []
BinFileList = []
# Process Modules in FfsList
for FfsFile in self.FfsList :
if isinstance(FfsFile, OptRomInfStatement.OptRomInfStatement):
FilePathNameList = FfsFile.GenFfs(IsMakefile=Flag)
if len(FilePathNameList) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, "Module %s not produce .efi files, so NO file could be put into option ROM." % (FfsFile.InfFileName))
if FfsFile.OverrideAttribs is None:
EfiFileList.extend(FilePathNameList)
else:
FileName = os.path.basename(FilePathNameList[0])
TmpOutputDir = os.path.join(GenFdsGlobalVariable.FvDir, self.DriverName, FfsFile.CurrentArch)
if not os.path.exists(TmpOutputDir) :
os.makedirs(TmpOutputDir)
TmpOutputFile = os.path.join(TmpOutputDir, FileName+'.tmp')
GenFdsGlobalVariable.GenerateOptionRom(TmpOutputFile,
FilePathNameList,
[],
FfsFile.OverrideAttribs.NeedCompress,
FfsFile.OverrideAttribs.PciClassCode,
FfsFile.OverrideAttribs.PciRevision,
FfsFile.OverrideAttribs.PciDeviceId,
FfsFile.OverrideAttribs.PciVendorId,
IsMakefile = Flag)
BinFileList.append(TmpOutputFile)
else:
FilePathName = FfsFile.GenFfs(IsMakefile=Flag)
if FfsFile.OverrideAttribs is not None:
FileName = os.path.basename(FilePathName)
TmpOutputDir = os.path.join(GenFdsGlobalVariable.FvDir, self.DriverName, FfsFile.CurrentArch)
if not os.path.exists(TmpOutputDir) :
os.makedirs(TmpOutputDir)
TmpOutputFile = os.path.join(TmpOutputDir, FileName+'.tmp')
GenFdsGlobalVariable.GenerateOptionRom(TmpOutputFile,
[FilePathName],
[],
FfsFile.OverrideAttribs.NeedCompress,
FfsFile.OverrideAttribs.PciClassCode,
FfsFile.OverrideAttribs.PciRevision,
FfsFile.OverrideAttribs.PciDeviceId,
FfsFile.OverrideAttribs.PciVendorId,
IsMakefile=Flag)
BinFileList.append(TmpOutputFile)
else:
if FfsFile.FileType == 'EFI':
EfiFileList.append(FilePathName)
else:
BinFileList.append(FilePathName)
#
# Call EfiRom tool
#
OutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.DriverName)
OutputFile = OutputFile + '.rom'
GenFdsGlobalVariable.GenerateOptionRom(
OutputFile,
EfiFileList,
BinFileList,
IsMakefile=Flag)
if not Flag:
GenFdsGlobalVariable.InfLogger( "\nGenerate %s Option ROM Successfully" %self.DriverName)
GenFdsGlobalVariable.SharpCounter = 0
return OutputFile
class OverrideAttribs:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.PciVendorId = None
self.PciClassCode = None
self.PciDeviceId = None
self.PciRevision = None
self.NeedCompress = None
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/OptionRom.py
|
## @file
# parse FDF file
#
# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2015, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from re import compile, DOTALL
from string import hexdigits
from uuid import UUID
from Common.BuildToolError import *
from Common import EdkLogger
from Common.Misc import PathClass, tdict, ProcessDuplicatedInf, GuidStructureStringToGuidString
from Common.StringUtils import NormPath, ReplaceMacro
from Common import GlobalData
from Common.Expression import *
from Common.DataType import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.LongFilePathOs as os
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.RangeExpression import RangeExpression
from collections import OrderedDict
from .Fd import FD
from .Region import Region
from .Fv import FV
from .AprioriSection import AprioriSection
from .FfsInfStatement import FfsInfStatement
from .FfsFileStatement import FileStatement
from .VerSection import VerSection
from .UiSection import UiSection
from .FvImageSection import FvImageSection
from .DataSection import DataSection
from .DepexSection import DepexSection
from .CompressSection import CompressSection
from .GuidSection import GuidSection
from .SubTypeGuidSection import SubTypeGuidSection
from .Capsule import EFI_CERT_TYPE_PKCS7_GUID, EFI_CERT_TYPE_RSA2048_SHA256_GUID, Capsule
from .CapsuleData import CapsuleFfs, CapsulePayload, CapsuleFv, CapsuleFd, CapsuleAnyFile, CapsuleAfile
from .RuleComplexFile import RuleComplexFile
from .RuleSimpleFile import RuleSimpleFile
from .EfiSection import EfiSection
from .OptionRom import OPTIONROM
from .OptRomInfStatement import OptRomInfStatement, OverrideAttribs
from .OptRomFileStatement import OptRomFileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
T_CHAR_CR = '\r'
T_CHAR_TAB = '\t'
T_CHAR_DOUBLE_QUOTE = '\"'
T_CHAR_SINGLE_QUOTE = '\''
T_CHAR_BRACE_R = '}'
SEPARATORS = {TAB_EQUAL_SPLIT, TAB_VALUE_SPLIT, TAB_COMMA_SPLIT, '{', T_CHAR_BRACE_R}
ALIGNMENTS = {"Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K", "64K", "128K",
"256K", "512K", "1M", "2M", "4M", "8M", "16M"}
ALIGNMENT_NOAUTO = ALIGNMENTS - {"Auto"}
CR_LB_SET = {T_CHAR_CR, TAB_LINE_BREAK}
RegionSizePattern = compile("\s*(?P<base>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<size>(?:0x|0X)?[a-fA-F0-9]+)\s*")
RegionSizeGuidPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*\|\s*(?P<size>\w+\.\w+[\.\w\[\]]*)\s*")
RegionOffsetPcdPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*$")
ShortcutPcdPattern = compile("\s*\w+\s*=\s*(?P<value>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<name>\w+\.\w+)\s*")
BaseAddrValuePattern = compile('^0[xX][0-9a-fA-F]+')
FileExtensionPattern = compile(r'([a-zA-Z][a-zA-Z0-9]*)')
TokenFindPattern = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
AllIncludeFileList = []
# Get the closest parent
def GetParentAtLine (Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile
return None
# Check include loop
def IsValidInclude (File, Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line) and Profile.FileName == File:
return False
return True
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber and Profile.Level == 1:
InsertedLines += Profile.GetTotalLines()
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FdfParser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.OriginalLineNumber = Line
self.Message = Str
self.ToolName = 'FdfParser'
def __str__(self):
return self.Message
# helper functions to facilitate consistency in warnings
# each function is for a different common warning
@staticmethod
def Expected(Str, File, Line):
return Warning("expected {}".format(Str), File, Line)
@staticmethod
def ExpectedEquals(File, Line):
return Warning.Expected("'='", File, Line)
@staticmethod
def ExpectedCurlyOpen(File, Line):
return Warning.Expected("'{'", File, Line)
@staticmethod
def ExpectedCurlyClose(File, Line):
return Warning.Expected("'}'", File, Line)
@staticmethod
def ExpectedBracketClose(File, Line):
return Warning.Expected("']'", File, Line)
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
for index, line in enumerate(self.FileLinesList):
if not line.endswith(TAB_LINE_BREAK):
self.FileLinesList[index] += TAB_LINE_BREAK
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
self.IncludeFileList = []
self.Level = 1 # first level include file
def GetTotalLines(self):
TotalLines = self.InsertAdjust + len(self.FileLinesList)
for Profile in self.IncludeFileList:
TotalLines += Profile.GetTotalLines()
return TotalLines
def IsLineInFile(self, Line):
if Line >= self.InsertStartLineNumber and Line < self.InsertStartLineNumber + self.GetTotalLines():
return True
return False
def GetLineInFile(self, Line):
if not self.IsLineInFile (Line):
return (self.FileName, -1)
InsertedLines = self.InsertStartLineNumber
for Profile in self.IncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber:
InsertedLines += Profile.GetTotalLines()
return (self.FileName, Line - InsertedLines + 1)
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.FileName = FileName
self.PcdDict = OrderedDict()
self.PcdLocalDict = OrderedDict()
self.InfList = []
self.InfDict = {'ArchTBD':[]}
# ECC will use this Dict and List information
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FdNameNotSet = False
self.FvDict = {}
self.CapsuleDict = {}
self.RuleDict = {}
self.OptRomDict = {}
self.FmpPayloadDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self._Token = ""
self._SkippedChars = ""
GlobalData.gFdfParser = self
# Used to section info
self._CurSection = []
# Key: [section name, UI name, arch]
# Value: {MACRO_NAME: MACRO_VALUE}
self._MacroDict = tdict(True, 3)
self._PcdDict = OrderedDict()
self._WipeOffArea = []
if GenFdsGlobalVariable.WorkSpaceDir == '':
GenFdsGlobalVariable.WorkSpaceDir = os.getenv("WORKSPACE")
## _SkipWhiteSpace() method
#
# Skip white spaces from current char.
#
# @param self The object pointer
#
def _SkipWhiteSpace(self):
while not self._EndOfFile():
if self._CurrentChar() in {TAB_PRINTCHAR_NUL, T_CHAR_CR, TAB_LINE_BREAK, TAB_SPACE_SPLIT, T_CHAR_TAB}:
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
else:
return
return
## _EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def _EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
if self.CurrentLineNumber > NumberOfLines:
return True
return False
## _EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def _EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
# @param DestLine Optional new destination line number.
# @param DestOffset Optional new destination offset.
#
def Rewind(self, DestLine = 1, DestOffset = 0):
self.CurrentLineNumber = DestLine
self.CurrentOffsetWithinLine = DestOffset
## _UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def _UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self._CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## _GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def _GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## _CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def _CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## _NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def _NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## _SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def _SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## _CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def _CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def _StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
if not self.Profile.FileLinesList:
EdkLogger.error('FdfParser', FILE_READ_FAILURE, 'The file is empty!', File=self.FileName)
self.Profile.FileLinesList[-1].append(' ')
def _ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def _SetMacroValue(self, Macro, Value):
if not self._CurSection:
return
MacroDict = {}
if not self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]:
self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]] = MacroDict
else:
MacroDict = self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]
MacroDict[Macro] = Value
def _GetMacroValue(self, Macro):
# Highest priority
if Macro in GlobalData.gCommandLineDefines:
return GlobalData.gCommandLineDefines[Macro]
if Macro in GlobalData.gGlobalDefines:
return GlobalData.gGlobalDefines[Macro]
if self._CurSection:
MacroDict = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if MacroDict and Macro in MacroDict:
return MacroDict[Macro]
# Lowest priority
if Macro in GlobalData.gPlatformDefines:
return GlobalData.gPlatformDefines[Macro]
return None
def _SectionHeaderParser(self, Section):
# [Defines]
# [FD.UiName]: use dummy instead if UI name is optional
# [FV.UiName]
# [Capsule.UiName]
# [Rule]: don't take rule section into account, macro is not allowed in this section
# [OptionRom.DriverName]
self._CurSection = []
Section = Section.strip()[1:-1].upper().replace(' ', '').strip(TAB_SPLIT)
ItemList = Section.split(TAB_SPLIT)
Item = ItemList[0]
if Item == '' or Item == 'RULE':
return
if Item == TAB_COMMON_DEFINES.upper():
self._CurSection = [TAB_COMMON, TAB_COMMON, TAB_COMMON]
elif len(ItemList) > 1:
self._CurSection = [ItemList[0], ItemList[1], TAB_COMMON]
elif len(ItemList) > 0:
self._CurSection = [ItemList[0], 'DUMMY', TAB_COMMON]
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self._EndOfFile():
if self._CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self._CurrentChar() == TAB_LINE_BREAK:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self._CurrentChar() == TAB_STAR and self._NextChar() == TAB_BACK_SLASH:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
# check for // comment
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_BACK_SLASH and not self._EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self._CurrentChar() == TAB_COMMENT_SPLIT and not self._EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_STAR:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = True
else:
self._GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
# nested include support
Processed = False
MacroDict = {}
while self._GetNextToken():
if self._Token == TAB_DEFINE:
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
MacroDict[Macro] = Value
elif self._Token == TAB_INCLUDE:
Processed = True
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len(TAB_INCLUDE)
if not self._GetNextToken():
raise Warning.Expected("include file name", self.FileName, self.CurrentLineNumber)
IncFileName = self._Token
PreIndex = 0
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1:
Macro = IncFileName[StartPos+2: EndPos]
MacroVal = self._GetMacroValue(Macro)
if not MacroVal:
if Macro in MacroDict:
MacroVal = MacroDict[Macro]
if MacroVal is not None:
IncFileName = IncFileName.replace('$(' + Macro + ')', MacroVal, 1)
if MacroVal.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroVal)
else:
raise Warning("The Macro %s is not defined" %Macro, self.FileName, self.CurrentLineNumber)
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
IncludedFile = NormPath(IncFileName)
#
# First search the include file under the same directory as FDF file
#
IncludedFile1 = PathClass(IncludedFile, os.path.dirname(self.FileName))
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Then search the include file under the same directory as DSC file
#
PlatformDir = ''
if GenFdsGlobalVariable.ActivePlatform:
PlatformDir = GenFdsGlobalVariable.ActivePlatform.Dir
elif GlobalData.gActivePlatform:
PlatformDir = GlobalData.gActivePlatform.MetaFile.Dir
IncludedFile1 = PathClass(IncludedFile, PlatformDir)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Also search file under the WORKSPACE directory
#
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
raise Warning("The include file does not exist under below directories: \n%s\n%s\n%s\n"%(os.path.dirname(self.FileName), PlatformDir, GlobalData.gWorkspace),
self.FileName, self.CurrentLineNumber)
if not IsValidInclude (IncludedFile1.Path, self.CurrentLineNumber):
raise Warning("The include file {0} is causing a include loop.\n".format (IncludedFile1.Path), self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(IncludedFile1.Path)
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
ParentProfile = GetParentAtLine (CurrentLine)
if ParentProfile is not None:
ParentProfile.IncludeFileList.insert(0, IncFileProfile)
IncFileProfile.Level = ParentProfile.Level + 1
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self._GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self._CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
# reversely sorted to better determine error in file
AllIncludeFileList.insert(0, IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, TAB_COMMENT_SPLIT)
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
if Processed: # Nested and back-to-back support
self.Rewind(DestLine = IncFileProfile.InsertStartLineNumber - 1)
Processed = False
# Preprocess done.
self.Rewind()
@staticmethod
def _GetIfListCurrentItemStat(IfList):
if len(IfList) == 0:
return True
for Item in IfList:
if Item[1] == False:
return False
return True
## PreprocessConditionalStatement() method
#
# Preprocess conditional statement.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
RegionLayoutLine = 0
ReplacedLine = -1
while self._GetNextToken():
# Determine section name and the location dependent macro
if self._GetIfListCurrentItemStat(IfList):
if self._Token.startswith(TAB_SECTION_START):
Header = self._Token
if not self._Token.endswith(TAB_SECTION_END):
self._SkipToToken(TAB_SECTION_END)
Header += self._SkippedChars
if Header.find('$(') != -1:
raise Warning("macro cannot be used in section header", self.FileName, self.CurrentLineNumber)
self._SectionHeaderParser(Header)
continue
# Replace macros except in RULE section or out of section
elif self._CurSection and ReplacedLine != self.CurrentLineNumber:
ReplacedLine = self.CurrentLineNumber
self._UndoToken()
CurLine = self.Profile.FileLinesList[ReplacedLine - 1]
PreIndex = 0
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1 and self._Token not in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF, TAB_ELSE_IF}:
MacroName = CurLine[StartPos+2: EndPos]
MacroValue = self._GetMacroValue(MacroName)
if MacroValue is not None:
CurLine = CurLine.replace('$(' + MacroName + ')', MacroValue, 1)
if MacroValue.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroValue)
else:
PreIndex = EndPos + 1
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
self.Profile.FileLinesList[ReplacedLine - 1] = CurLine
continue
if self._Token == TAB_DEFINE:
if self._GetIfListCurrentItemStat(IfList):
if not self._CurSection:
raise Warning("macro cannot be defined in Rule section or out of section", self.FileName, self.CurrentLineNumber)
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len(TAB_DEFINE)
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
self._SetMacroValue(Macro, Value)
self._WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == 'SET':
if not self._GetIfListCurrentItemStat(IfList):
continue
SetLine = self.CurrentLineNumber - 1
SetOffset = self.CurrentOffsetWithinLine - len('SET')
PcdPair = self._GetNextPcdSettings()
PcdName = "%s.%s" % (PcdPair[1], PcdPair[0])
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
self._PcdDict[PcdName] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
self._WipeOffArea.append(((SetLine, SetOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF}:
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
IfList.append([IfStartPos, None, None])
CondLabel = self._Token
Expression = self._GetExpression()
if CondLabel == TAB_IF:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
else:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'in')
if CondLabel == TAB_IF_N_DEF:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_ELSE_IF, TAB_ELSE}:
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self._WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self._Token == TAB_ELSE_IF:
Expression = self._GetExpression()
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == '!endif':
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
self._WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
elif not IfList: # Don't use PCDs inside conditional directive
if self.CurrentLineNumber <= RegionLayoutLine:
# Don't try the same line twice
continue
SetPcd = ShortcutPcdPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if SetPcd:
self._PcdDict[SetPcd.group('name')] = SetPcd.group('value')
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSize = RegionSizePattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if not RegionSize:
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSizeGuid = RegionSizeGuidPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber])
if not RegionSizeGuid:
RegionLayoutLine = self.CurrentLineNumber + 1
continue
self._PcdDict[RegionSizeGuid.group('base')] = RegionSize.group('base')
self._PcdDict[RegionSizeGuid.group('size')] = RegionSize.group('size')
RegionLayoutLine = self.CurrentLineNumber + 1
if IfList:
raise Warning("Missing !endif", self.FileName, self.CurrentLineNumber)
self.Rewind()
def _CollectMacroPcd(self):
MacroDict = {}
# PCD macro
MacroDict.update(GlobalData.gPlatformPcds)
MacroDict.update(self._PcdDict)
# Lowest priority
MacroDict.update(GlobalData.gPlatformDefines)
if self._CurSection:
# Defines macro
ScopeMacro = self._MacroDict[TAB_COMMON, TAB_COMMON, TAB_COMMON]
if ScopeMacro:
MacroDict.update(ScopeMacro)
# Section macro
ScopeMacro = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if ScopeMacro:
MacroDict.update(ScopeMacro)
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
for Item in GlobalData.BuildOptionPcd:
if isinstance(Item, tuple):
continue
PcdName, TmpValue = Item.split(TAB_EQUAL_SPLIT)
TmpValue = BuildOptionValue(TmpValue, {})
MacroDict[PcdName.strip()] = TmpValue
# Highest priority
return MacroDict
def _EvaluateConditional(self, Expression, Line, Op = None, Value = None):
MacroPcdDict = self._CollectMacroPcd()
if Op == 'eval':
try:
if Value:
return ValueExpression(Expression, MacroPcdDict)(True)
else:
return ValueExpression(Expression, MacroPcdDict)()
except WrnExpression as Excpt:
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self.FileName, ExtraData=self._CurrentLine(),
Line=Line)
return Excpt.result
except Exception as Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
Info = GlobalData.gPlatformOtherPcds[Excpt.Pcd]
raise Warning("Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file (%s), and it is currently defined in this section:"
" %s, line #: %d." % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE'], Info[0], Info[1]),
self.FileName, Line)
else:
raise Warning("PCD (%s) is not defined in DSC file (%s)" % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE']),
self.FileName, Line)
else:
raise Warning(str(Excpt), self.FileName, Line)
else:
if Expression.startswith('$(') and Expression[-1] == ')':
Expression = Expression[2:-1]
return Expression in MacroPcdDict
## _IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsToken(self, String, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsKeyword(self, KeyWord, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(KeyWord.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(KeyWord)
if index == 0:
followingChar = self._CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPARATORS:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetExpression(self):
Line = self.Profile.FileLinesList[self.CurrentLineNumber - 1]
Index = len(Line) - 1
while Line[Index] in CR_LB_SET:
Index -= 1
ExpressionString = self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:Index+1]
self.CurrentOffsetWithinLine += len(ExpressionString)
ExpressionString = ExpressionString.strip()
return ExpressionString
## _GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def _GetNextWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetNextPcdWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _GetNextToken() method
#
# Get next token unit before a separator
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def _GetNextToken(self):
# Skip leading spaces, if exist.
self._SkipWhiteSpace()
if self._EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while StartLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPARATORS:
self._GetOneChar()
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
self._GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self._Token = self.Profile.FileLinesList[StartLine-1][StartPos: EndPos]
if self._Token.lower() in {TAB_IF, TAB_END_IF, TAB_ELSE_IF, TAB_ELSE, TAB_IF_DEF, TAB_IF_N_DEF, TAB_ERROR, TAB_INCLUDE}:
self._Token = self._Token.lower()
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
## _GetNextGuid() method
#
# Get next token unit before a separator
# If found, the GUID string is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def _GetNextGuid(self):
if not self._GetNextToken():
return False
if GlobalData.gGuidPattern.match(self._Token) is not None:
return True
elif self._Token in GlobalData.gGuidDict:
return True
else:
self._UndoToken()
return False
@staticmethod
def _Verify(Name, Value, Scope):
# value verification only applies to numeric values.
if Scope not in TAB_PCD_NUMERIC_TYPES:
return
ValueNumber = 0
try:
ValueNumber = int(Value, 0)
except:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value is not valid dec or hex number for %s." % Name)
if ValueNumber < 0:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value can't be set to negative value for %s." % Name)
if ValueNumber > MAX_VAL_TYPE[Scope]:
EdkLogger.error("FdfParser", FORMAT_INVALID, "Too large value for %s." % Name)
return True
## _UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def _UndoToken(self):
self._UndoOneChar()
while self._CurrentChar().isspace():
if not self._UndoOneChar():
self._GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPARATORS:
if not self._UndoOneChar():
return
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
return
else:
break
self._GetOneChar()
## _GetNextHexNumber() method
#
# Get next HEX data before a separator
# If found, the HEX data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def _GetNextHexNumber(self):
if not self._GetNextToken():
return False
if GlobalData.gHexPatternAll.match(self._Token):
return True
else:
self._UndoToken()
return False
## _GetNextDecimalNumber() method
#
# Get next decimal data before a separator
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def _GetNextDecimalNumber(self):
if not self._GetNextToken():
return False
if self._Token.isdigit():
return True
else:
self._UndoToken()
return False
def _GetNextPcdSettings(self):
if not self._GetNextWord():
raise Warning.Expected("<PcdTokenSpaceCName>", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self._Token
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected(".", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("<PcdCName>", self.FileName, self.CurrentLineNumber)
pcdCName = self._Token
Fields = []
while self._IsToken(TAB_SPLIT):
if not self._GetNextPcdWord():
raise Warning.Expected("Pcd Fields", self.FileName, self.CurrentLineNumber)
Fields.append(self._Token)
return (pcdCName, pcdTokenSpaceCName,TAB_SPLIT.join(Fields))
## _GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def _GetStringData(self):
QuoteToUse = None
if self._Token.startswith(T_CHAR_DOUBLE_QUOTE) or self._Token.startswith("L\""):
QuoteToUse = T_CHAR_DOUBLE_QUOTE
elif self._Token.startswith(T_CHAR_SINGLE_QUOTE) or self._Token.startswith("L\'"):
QuoteToUse = T_CHAR_SINGLE_QUOTE
else:
return False
self._UndoToken()
self._SkipToToken(QuoteToUse)
currentLineNumber = self.CurrentLineNumber
if not self._SkipToToken(QuoteToUse):
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
self._Token = self._SkippedChars.rstrip(QuoteToUse)
return True
## _SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self._SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def _SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self._SkippedChars = ""
while not self._EndOfFile():
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._SkippedChars += String
return True
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
self.SetFileBufferPos(StartPos)
self._SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## Preprocess() method
#
# Preprocess comment, conditional directive, include directive, replace macro.
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def Preprocess(self):
self._StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self._StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self._StringToList()
for Pos in self._WipeOffArea:
self._ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self._GetDefines():
pass
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.Preprocess()
self._GetError()
#
# Keep processing sections of the FDF until no new sections or a syntax error is found
#
while self._GetFd() or self._GetFv() or self._GetFmp() or self._GetCapsule() or self._GetRule() or self._GetOptionRom():
pass
except Warning as X:
self._UndoToken()
#'\n\tGot Token: \"%s\" from File %s\n' % (self._Token, FileLineTuple[0]) + \
# At this point, the closest parent would be the included file itself
Profile = GetParentAtLine(X.OriginalLineNumber)
if Profile is not None:
X.Message += ' near line %d, column %d: %s' \
% (X.LineNumber, 0, Profile.FileLinesList[X.LineNumber-1])
else:
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
X.Message += ' near line %d, column %d: %s' \
% (FileLineTuple[1], self.CurrentOffsetWithinLine + 1, self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:].rstrip(TAB_LINE_BREAK).rstrip(T_CHAR_CR))
raise
## SectionParser() method
#
# Parse the file section info
# Exception will be raised if syntax error found
#
# @param self The object pointer
# @param section The section string
def SectionParser(self, section):
S = section.upper()
if not S.startswith("[DEFINES") and not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM.") and not S.startswith('[FMPPAYLOAD.'):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [Rule.], [OptionRom.], [FMPPAYLOAD.])", self.FileName, self.CurrentLineNumber)
## _GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def _GetDefines(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[DEFINES"):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[DEFINES", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
while self._GetNextWord():
# handle the SET statement
if self._Token == 'SET':
self._UndoToken()
self._GetSetStatement(None)
continue
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.startswith(TAB_SECTION_START):
raise Warning.Expected("MACRO value", self.FileName, self.CurrentLineNumber)
Value = self._Token
return False
##_GetError() method
def _GetError(self):
#save the Current information
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
while self._GetNextToken():
if self._Token == TAB_ERROR:
EdkLogger.error('FdfParser', ERROR_STATEMENT, self._CurrentLine().replace(TAB_ERROR, '', 1), File=self.FileName, Line=self.CurrentLineNumber)
self.CurrentLineNumber = CurrentLine
self.CurrentOffsetWithinLine = CurrentOffset
## _GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def _GetFd(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith('[FMPPAYLOAD.') and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section", self.FileName, self.CurrentLineNumber)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[FD.]", self.FileName, self.CurrentLineNumber)
FdName = self._GetUiName()
if FdName == "":
if len (self.Profile.FdDict) == 0:
FdName = GenFdsGlobalVariable.PlatformName
if FdName == "" and GlobalData.gActivePlatform:
FdName = GlobalData.gActivePlatform.PlatformName
self.Profile.FdNameNotSet = True
else:
raise Warning.Expected("FdName in [FD.] section", self.FileName, self.CurrentLineNumber)
self.CurrentFdName = FdName.upper()
if self.CurrentFdName in self.Profile.FdDict:
raise Warning("Unexpected the same FD name", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FdObj = FD()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
if len (self.Profile.FdDict) > 1 and self.Profile.FdNameNotSet:
raise Warning.Expected("all FDs have their name", self.FileName, self.CurrentLineNumber)
Status = self._GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error", self.FileName, self.CurrentLineNumber)
while self._GetTokenStatements(FdObj):
pass
for Attr in ("BaseAddress", "Size", "ErasePolarity"):
if getattr(FdObj, Attr) is None:
self._GetNextToken()
raise Warning("Keyword %s missing" % Attr, self.FileName, self.CurrentLineNumber)
if not FdObj.BlockSizeList:
FdObj.BlockSizeList.append((1, FdObj.Size, None))
self._GetDefineStatements(FdObj)
self._GetSetStatements(FdObj)
if not self._GetRegionLayout(FdObj):
raise Warning.Expected("region layout", self.FileName, self.CurrentLineNumber)
while self._GetRegionLayout(FdObj):
pass
return True
## _GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def _GetUiName(self):
Name = ""
if self._GetNextWord():
Name = self._Token
return Name
## _GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def _GetCreateFile(self, Obj):
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
FileName = self._Token
Obj.CreateFileName = FileName
return True
def SetPcdLocalation(self,pcdpair):
self.Profile.PcdLocalDict[pcdpair] = (self.Profile.FileName,self.CurrentLineNumber)
## _GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
#
def _GetTokenStatements(self, Obj):
if self._IsKeyword("BaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex base address", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.BaseAddress
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
return True
if self._IsKeyword("Size"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex size", self.FileName, self.CurrentLineNumber)
Size = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Size
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
Obj.Size = int(Size, 0)
return True
if self._IsKeyword("ErasePolarity"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Erase Polarity", self.FileName, self.CurrentLineNumber)
if not self._Token in {"1", "0"}:
raise Warning.Expected("1 or 0 Erase Polarity", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self._Token
return True
return self._GetBlockStatements(Obj)
## _GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetAddressStatements(self, Obj):
if self._IsKeyword("BsBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
BsAddress = int(self._Token, 0)
Obj.BsBaseAddress = BsAddress
if self._IsKeyword("RtBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
RtAddress = int(self._Token, 0)
Obj.RtBaseAddress = RtAddress
## _GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
#
def _GetBlockStatements(self, Obj):
IsBlock = False
while self._GetBlockStatement(Obj):
IsBlock = True
Item = Obj.BlockSizeList[-1]
if Item[0] is None or Item[1] is None:
raise Warning.Expected("block statement", self.FileName, self.CurrentLineNumber)
return IsBlock
## _GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetBlockStatement(self, Obj):
if not self._IsKeyword("BlockSize"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex or Integer block size", self.FileName, self.CurrentLineNumber)
BlockSize = self._Token
BlockSizePcd = None
if self._IsToken(TAB_VALUE_SPLIT):
PcdPair = self._GetNextPcdSettings()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
BlockSize = int(BlockSize, 0)
BlockNumber = None
if self._IsKeyword("NumBlocks"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("block numbers", self.FileName, self.CurrentLineNumber)
BlockNumber = int(self._Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## _GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatements(self, Obj):
while self._GetDefineStatement(Obj):
pass
## _GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatement(self, Obj):
if self._IsKeyword(TAB_DEFINE):
self._GetNextToken()
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
Value = self._Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## _GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatements(self, Obj):
while self._GetSetStatement(Obj):
pass
## _GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatement(self, Obj):
if self._IsKeyword("SET"):
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
if Obj:
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## _CalcRegionExpr(self)
#
# Calculate expression for offset or size of a region
#
# @return: None if invalid expression
# Calculated number if successfully
#
def _CalcRegionExpr(self):
StartPos = self.GetFileBufferPos()
Expr = ''
PairCount = 0
while not self._EndOfFile():
CurCh = self._CurrentChar()
if CurCh == '(':
PairCount += 1
elif CurCh == ')':
PairCount -= 1
if CurCh in '|\r\n' and PairCount == 0:
break
Expr += CurCh
self._GetOneChar()
try:
return int(
ValueExpression(Expr,
self._CollectMacroPcd()
)(True), 0)
except Exception:
self.SetFileBufferPos(StartPos)
return None
## _GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param theFd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetRegionLayout(self, theFd):
Offset = self._CalcRegionExpr()
if Offset is None:
return False
RegionObj = Region()
RegionObj.Offset = Offset
theFd.RegionList.append(RegionObj)
if not self._IsToken(TAB_VALUE_SPLIT):
raise Warning.Expected("'|'", self.FileName, self.CurrentLineNumber)
Size = self._CalcRegionExpr()
if Size is None:
raise Warning.Expected("Region Size", self.FileName, self.CurrentLineNumber)
RegionObj.Size = Size
if not self._GetNextWord():
return True
if not self._Token in {"SET", BINARY_FILE_TYPE_FV, "FILE", "DATA", "CAPSULE", "INF"}:
#
# If next token is a word which is not a valid FV type, it might be part of [PcdOffset[|PcdSize]]
# Or it might be next region's offset described by an expression which starts with a PCD.
# PcdOffset[|PcdSize] or OffsetPcdExpression|Size
#
self._UndoToken()
IsRegionPcd = (RegionSizeGuidPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]) or
RegionOffsetPcdPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]))
if IsRegionPcd:
RegionObj.PcdOffset = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdOffset] = "0x%08X" % (RegionObj.Offset + int(theFd.BaseAddress, 0))
self.SetPcdLocalation(RegionObj.PcdOffset)
self._PcdDict['%s.%s' % (RegionObj.PcdOffset[1], RegionObj.PcdOffset[0])] = "0x%x" % RegionObj.Offset
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self._IsToken(TAB_VALUE_SPLIT):
RegionObj.PcdSize = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdSize] = "0x%08X" % RegionObj.Size
self.SetPcdLocalation(RegionObj.PcdSize)
self._PcdDict['%s.%s' % (RegionObj.PcdSize[1], RegionObj.PcdSize[0])] = "0x%x" % RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self._GetNextWord():
return True
if self._Token == "SET":
self._UndoToken()
self._GetSetStatements(RegionObj)
if not self._GetNextWord():
return True
elif self._Token == BINARY_FILE_TYPE_FV:
self._UndoToken()
self._GetRegionFvType(RegionObj)
elif self._Token == "CAPSULE":
self._UndoToken()
self._GetRegionCapType(RegionObj)
elif self._Token == "FILE":
self._UndoToken()
self._GetRegionFileType(RegionObj)
elif self._Token == "INF":
self._UndoToken()
RegionObj.RegionType = "INF"
while self._IsKeyword("INF"):
self._UndoToken()
ffsInf = self._ParseInfStatement()
if not ffsInf:
break
RegionObj.RegionDataList.append(ffsInf)
elif self._Token == "DATA":
self._UndoToken()
self._GetRegionDataType(RegionObj)
else:
self._UndoToken()
if self._GetRegionLayout(theFd):
return True
raise Warning("A valid region type was not found. "
"Valid types are [SET, FV, CAPSULE, FILE, DATA, INF]. This error occurred",
self.FileName, self.CurrentLineNumber)
return True
## _GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFvType(self, RegionObj):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = BINARY_FILE_TYPE_FV
RegionObj.RegionDataList.append((self._Token).upper())
while self._IsKeyword(BINARY_FILE_TYPE_FV):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append((self._Token).upper())
## _GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionCapType(self, RegionObj):
if not self._IsKeyword("CAPSULE"):
raise Warning.Expected("'CAPSULE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("CAPSULE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFileType(self, RegionObj):
if not self._IsKeyword("FILE"):
raise Warning.Expected("'FILE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FILE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionDataType(self, RegionObj):
if not self._IsKeyword("DATA"):
raise Warning.Expected("Region Data type", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append(DataString)
while self._IsKeyword("DATA"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionDataList.append(DataString)
## _GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def _GetFv(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FV."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvName = self._GetUiName()
self.CurrentFvName = FvName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FvObj = FV(Name=self.CurrentFvName)
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self._GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error", self.FileName, self.CurrentLineNumber)
self._GetDefineStatements(FvObj)
self._GetAddressStatements(FvObj)
while True:
self._GetSetStatements(FvObj)
if not (self._GetBlockStatement(FvObj) or self._GetFvBaseAddress(FvObj) or
self._GetFvForceRebase(FvObj) or self._GetFvAlignment(FvObj) or
self._GetFvAttributes(FvObj) or self._GetFvNameGuid(FvObj) or
self._GetFvExtEntryStatement(FvObj) or self._GetFvNameString(FvObj)):
break
if FvObj.FvNameString == 'TRUE' and not FvObj.FvNameGuid:
raise Warning("FvNameString found but FvNameGuid was not found", self.FileName, self.CurrentLineNumber)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
isInf = self._GetInfStatement(FvObj)
isFile = self._GetFileStatement(FvObj)
if not isInf and not isFile:
break
return True
## _GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def _GetFvAlignment(self, Obj):
if not self._IsKeyword("FvAlignment"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"}:
raise Warning("Unknown alignment value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self._Token
return True
## _GetFvBaseAddress() method
#
# Get BaseAddress for FV
#
# @param self The object pointer
# @param Obj for whom FvBaseAddress is got
# @retval True Successfully find a FvBaseAddress statement
# @retval False Not able to find a FvBaseAddress statement
#
def _GetFvBaseAddress(self, Obj):
if not self._IsKeyword("FvBaseAddress"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV base address value", self.FileName, self.CurrentLineNumber)
if not BaseAddrValuePattern.match(self._Token.upper()):
raise Warning("Unknown FV base address value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvBaseAddress = self._Token
return True
## _GetFvForceRebase() method
#
# Get FvForceRebase for FV
#
# @param self The object pointer
# @param Obj for whom FvForceRebase is got
# @retval True Successfully find a FvForceRebase statement
# @retval False Not able to find a FvForceRebase statement
#
def _GetFvForceRebase(self, Obj):
if not self._IsKeyword("FvForceRebase"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FvForceRebase value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"TRUE", "FALSE", "0", "0X0", "0X00", "1", "0X1", "0X01"}:
raise Warning("Unknown FvForceRebase value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token.upper() in {"TRUE", "1", "0X1", "0X01"}:
Obj.FvForceRebase = True
elif self._Token.upper() in {"FALSE", "0", "0X0", "0X00"}:
Obj.FvForceRebase = False
else:
Obj.FvForceRebase = None
return True
## _GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def _GetFvAttributes(self, FvObj):
IsWordToken = False
while self._GetNextWord():
IsWordToken = True
name = self._Token
if name not in {"ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE", "WEAK_ALIGNMENT", "FvUsedSizeEnable"}:
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self._Token
return IsWordToken
## _GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def _GetFvNameGuid(self, FvObj):
if not self._IsKeyword("FvNameGuid"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
FvObj.FvNameGuid = self._Token
return True
def _GetFvNameString(self, FvObj):
if not self._IsKeyword("FvNameString"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {'TRUE', 'FALSE'}:
raise Warning.Expected("TRUE or FALSE for FvNameString", self.FileName, self.CurrentLineNumber)
FvObj.FvNameString = self._Token
return True
def _GetFvExtEntryStatement(self, FvObj):
if not (self._IsKeyword("FV_EXT_ENTRY") or self._IsKeyword("FV_EXT_ENTRY_TYPE")):
return False
if not self._IsKeyword ("TYPE"):
raise Warning.Expected("'TYPE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex FV extension entry type value At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryTypeValue.append(self._Token)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("FILE") and not self._IsKeyword("DATA"):
raise Warning.Expected("'FILE' or 'DATA'", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryType.append(self._Token)
if self._Token == 'DATA':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString = self._Token
DataString += TAB_COMMA_SPLIT
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
FvObj.FvExtEntryData.append(DataString)
if self._Token == 'FILE':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV Extension Entry file path At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryData.append(self._Token)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return True
## _GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def _GetAprioriSection(self, FvObj):
if not self._IsKeyword("APRIORI"):
return False
if not self._IsKeyword("PEI") and not self._IsKeyword("DXE"):
raise Warning.Expected("Apriori file type", self.FileName, self.CurrentLineNumber)
AprType = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
AprSectionObj = AprioriSection()
AprSectionObj.AprioriType = AprType
self._GetDefineStatements(AprSectionObj)
while True:
IsInf = self._GetInfStatement(AprSectionObj)
IsFile = self._GetFileStatement(AprSectionObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
def _ParseInfStatement(self):
if not self._IsKeyword("INF"):
return None
ffsInf = FfsInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if not ffsInf.InfFileName.endswith('.inf'):
raise Warning.Expected(".inf file path", self.FileName, self.CurrentLineNumber)
ffsInf.CurrentLineNum = self.CurrentLineNumber
ffsInf.CurrentLineContent = self._CurrentLine()
#Replace $(SAPCE) with real space
ffsInf.InfFileName = ffsInf.InfFileName.replace('$(SPACE)', ' ')
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#do case sensitive check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
if self._IsToken(TAB_VALUE_SPLIT):
if self._IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self._IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return ffsInf
## _GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetInfStatement(self, Obj, ForCapsule=False):
ffsInf = self._ParseInfStatement()
if not ffsInf:
return False
if ForCapsule:
myCapsuleFfs = CapsuleFfs()
myCapsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(myCapsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## _GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def _GetInfOptions(self, FfsInfObj):
if self._IsKeyword("FILE_GUID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
FfsInfObj.OverrideGuid = self._Token
if self._IsKeyword("RuleOverride"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Rule name", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self._Token
if self._IsKeyword("VERSION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Version", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Version = self._Token
if self._IsKeyword(BINARY_FILE_TYPE_UI):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI name", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Ui = self._Token
if self._IsKeyword("USE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self._Token
if self._GetNextToken():
p = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\))')
if p.match(self._Token) and p.match(self._Token).span()[1] == len(self._Token):
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
return
else:
self._UndoToken()
return
while self._GetNextToken():
if not p.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
## _GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetFileStatement(self, Obj, ForCapsule = False):
if not self._IsKeyword("FILE"):
return False
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
if ForCapsule and self._Token == 'DATA':
self._UndoToken()
self._UndoToken()
return False
FfsFileObj = FileStatement()
FfsFileObj.FvFileType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
if not self._GetNextWord():
raise Warning.Expected("File GUID", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
FfsFileObj.NameGuid = self._Token
self._GetFilePart(FfsFileObj)
if ForCapsule:
capsuleFfs = CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## _FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _FileCouldHaveRelocFlag (FileType):
if FileType in {SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_MM_CORE_STANDALONE, 'PEI_DXE_COMBO'}:
return True
else:
return False
## _SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _SectionCouldHaveRelocFlag (SectionType):
if SectionType in {BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32}:
return True
else:
return False
## _GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
#
def _GetFilePart(self, FfsFileObj):
self._GetFileOpts(FfsFileObj)
if not self._IsToken("{"):
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
if self._Token == 'RELOCS_STRIPPED':
FfsFileObj.KeepReloc = False
else:
FfsFileObj.KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name or section data", self.FileName, self.CurrentLineNumber)
if self._Token == BINARY_FILE_TYPE_FV:
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self._Token
elif self._Token == "FD":
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self._Token
elif self._Token in {TAB_DEFINE, "APRIORI", "SECTION"}:
self._UndoToken()
self._GetSectionData(FfsFileObj)
elif hasattr(FfsFileObj, 'FvFileType') and FfsFileObj.FvFileType == 'RAW':
self._UndoToken()
self._GetRAWData(FfsFileObj)
else:
FfsFileObj.CurrentLineNum = self.CurrentLineNumber
FfsFileObj.CurrentLineContent = self._CurrentLine()
FfsFileObj.FileName = self._Token.replace('$(SPACE)', ' ')
self._VerifyFile(FfsFileObj.FileName)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
## _GetRAWData() method
#
# Get RAW data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetRAWData(self, FfsFileObj):
FfsFileObj.FileName = []
FfsFileObj.SubAlignment = []
while True:
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if not self._GetNextToken():
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
FileName = self._Token.replace('$(SPACE)', ' ')
if FileName == T_CHAR_BRACE_R:
self._UndoToken()
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
self._VerifyFile(FileName)
File = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir)
FfsFileObj.FileName.append(File.Path)
FfsFileObj.SubAlignment.append(AlignValue)
if self._IsToken(T_CHAR_BRACE_R):
self._UndoToken()
break
if len(FfsFileObj.SubAlignment) == 1:
FfsFileObj.SubAlignment = FfsFileObj.SubAlignment[0]
if len(FfsFileObj.FileName) == 1:
FfsFileObj.FileName = FfsFileObj.FileName[0]
## _GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def _GetFileOpts(self, FfsFileObj):
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
FfsFileObj.KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
if self._IsKeyword("FIXED", True):
FfsFileObj.Fixed = True
if self._IsKeyword("CHECKSUM", True):
FfsFileObj.CheckSum = True
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
FfsFileObj.Alignment = self._Token
## _GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def _GetAlignment(self):
if self._IsKeyword("Align", True):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
return True
return False
## _GetSectionData() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetSectionData(self, FfsFileObj):
self._GetDefineStatements(FfsFileObj)
while True:
IsLeafSection = self._GetLeafSection(FfsFileObj)
IsEncapSection = self._GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## _GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetLeafSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(Obj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
BuildNum = None
if self._IsKeyword("BUILD_NUM"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number value", self.FileName, self.CurrentLineNumber)
BuildNum = self._Token
if self._IsKeyword("VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("version", self.FileName, self.CurrentLineNumber)
VerSectionObj = VerSection()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self._GetStringData():
VerSectionObj.StringData = self._Token
else:
VerSectionObj.FileName = self._Token
Obj.SectionList.append(VerSectionObj)
elif self._IsKeyword(BINARY_FILE_TYPE_UI):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI", self.FileName, self.CurrentLineNumber)
UiSectionObj = UiSection()
UiSectionObj.Alignment = AlignValue
if self._GetStringData():
UiSectionObj.StringData = self._Token
else:
UiSectionObj.FileName = self._Token
Obj.SectionList.append(UiSectionObj)
elif self._IsKeyword("FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name or FV file path", self.FileName, self.CurrentLineNumber)
FvName = self._Token
FvObj = None
if self._IsToken("{"):
FvObj = FV()
FvObj.UiFvName = FvName.upper()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj = FvImageSection()
FvImageSectionObj.Alignment = AlignValue
if FvObj is not None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName.upper()
FvImageSectionObj.FvFileName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self._IsKeyword("PEI_DEPEX_EXP") or self._IsKeyword("DXE_DEPEX_EXP") or self._IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = DepexSection()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(T_CHAR_BRACE_R):
raise Warning.Expected("Depex expression ending '}'", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self._SkippedChars.rstrip(T_CHAR_BRACE_R)
Obj.SectionList.append(DepexSectionObj)
elif self._IsKeyword("SUBTYPE_GUID"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SubTypeGuidValue = None
if not self._GetNextGuid():
raise Warning.Expected("GUID", self.FileName, self.CurrentLineNumber)
else:
SubTypeGuidValue = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("section file path", self.FileName, self.CurrentLineNumber)
FileName = self._Token
SubTypeGuidSectionObj = SubTypeGuidSection()
SubTypeGuidSectionObj.Alignment = AlignValue
SubTypeGuidSectionObj.SubTypeGuid = SubTypeGuidValue
SubTypeGuidSectionObj.SectFileName = FileName
Obj.SectionList.append(SubTypeGuidSectionObj)
else:
if not self._GetNextWord():
raise Warning.Expected("section type", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self._Token == "COMPRESS" or self._Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self._Token not in {"COMPAT16", BINARY_FILE_TYPE_PE32, BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,\
BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "SUBTYPE_GUID", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown section type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self._Token == BINARY_FILE_TYPE_PE32) and (not self._Token == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = DataSection()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Obj.FvFileType) and self._SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self._Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag%d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_EQUAL_SPLIT):
if not self._GetNextToken():
raise Warning.Expected("section file path", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self._Token
self._VerifyFile(DataSectionObj.SectFileName)
else:
if not self._GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## _VerifyFile
#
# Check if file exists or not:
# If current phase if GenFds, the file must exist;
# If current phase is AutoGen and the file is not in $(OUTPUT_DIRECTORY), the file must exist
# @param FileName: File path to be verified.
#
def _VerifyFile(self, FileName):
if FileName.replace(TAB_WORKSPACE, '').find('$') != -1:
return
if not GlobalData.gAutoGenPhase or not self._GetMacroValue(TAB_DSC_DEFINES_OUTPUT_DIRECTORY) in FileName:
ErrorCode, ErrorInfo = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
## _GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetCglSection(self, Obj, AlignValue = None):
if self._IsKeyword("COMPRESS"):
type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(CompressSectionObj)
IsEncapSection = self._GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(GuidSectionObj)
IsEncapSection = self._GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## _GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def _GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = "NONE"
AttribDict["AUTH_STATUS_VALID"] = "NONE"
AttribDict["EXTRA_HEADER_SIZE"] = -1
while self._IsKeyword("PROCESSING_REQUIRED") or self._IsKeyword("AUTH_STATUS_VALID") \
or self._IsKeyword("EXTRA_HEADER_SIZE"):
AttribKey = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE(1)/FALSE(0)/Number", self.FileName, self.CurrentLineNumber)
elif AttribKey == "EXTRA_HEADER_SIZE":
Base = 10
if self._Token[0:2].upper() == "0X":
Base = 16
try:
AttribDict[AttribKey] = int(self._Token, Base)
continue
except ValueError:
raise Warning.Expected("Number", self.FileName, self.CurrentLineNumber)
elif self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self._Token
return AttribDict
## _GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
if not self._GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
def _GetFmp(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FMPPAYLOAD."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
self._SkipToToken("[FMPPAYLOAD.", True)
FmpUiName = self._GetUiName().upper()
if FmpUiName in self.Profile.FmpPayloadDict:
raise Warning("Duplicated FMP UI name found: %s" % FmpUiName, self.FileName, self.CurrentLineNumber)
FmpData = CapsulePayload()
FmpData.UiName = FmpUiName
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning("The FMP payload section is empty!", self.FileName, self.CurrentLineNumber)
FmpKeyList = ['IMAGE_HEADER_INIT_VERSION', 'IMAGE_TYPE_ID', 'IMAGE_INDEX', 'HARDWARE_INSTANCE', 'CERTIFICATE_GUID', 'MONOTONIC_COUNT']
while self._Token in FmpKeyList:
Name = self._Token
FmpKeyList.remove(Name)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if Name == 'IMAGE_TYPE_ID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for IMAGE_TYPE_ID.", self.FileName, self.CurrentLineNumber)
FmpData.ImageTypeId = self._Token
elif Name == 'CERTIFICATE_GUID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
FmpData.Certificate_Guid = self._Token
if UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_RSA2048_SHA256_GUID and UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_PKCS7_GUID:
raise Warning("Only support EFI_CERT_TYPE_RSA2048_SHA256_GUID or EFI_CERT_TYPE_PKCS7_GUID for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
else:
if not self._GetNextToken():
raise Warning.Expected("value of %s" % Name, self.FileName, self.CurrentLineNumber)
Value = self._Token
if Name == 'IMAGE_HEADER_INIT_VERSION':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.Version = Value
elif Name == 'IMAGE_INDEX':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.ImageIndex = Value
elif Name == 'HARDWARE_INSTANCE':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.HardwareInstance = Value
elif Name == 'MONOTONIC_COUNT':
if FdfParser._Verify(Name, Value, 'UINT64'):
FmpData.MonotonicCount = Value
if FmpData.MonotonicCount.upper().startswith('0X'):
FmpData.MonotonicCount = int(FmpData.MonotonicCount, 16)
else:
FmpData.MonotonicCount = int(FmpData.MonotonicCount)
if not self._GetNextToken():
break
else:
self._UndoToken()
if (FmpData.MonotonicCount and not FmpData.Certificate_Guid) or (not FmpData.MonotonicCount and FmpData.Certificate_Guid):
EdkLogger.error("FdfParser", FORMAT_INVALID, "CERTIFICATE_GUID and MONOTONIC_COUNT must be work as a pair.")
# Only the IMAGE_TYPE_ID is required item
if FmpKeyList and 'IMAGE_TYPE_ID' in FmpKeyList:
raise Warning("'IMAGE_TYPE_ID' in FMP payload section.", self.FileName, self.CurrentLineNumber)
# get the Image file and Vendor code file
self._GetFMPCapsuleData(FmpData)
if not FmpData.ImageFile:
raise Warning("Missing image file in FMP payload section.", self.FileName, self.CurrentLineNumber)
# check whether more than one Vendor code file
if len(FmpData.VendorCodeFile) > 1:
raise Warning("Vendor code file max of 1 per FMP payload section.", self.FileName, self.CurrentLineNumber)
self.Profile.FmpPayloadDict[FmpUiName] = FmpData
return True
## _GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def _GetCapsule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[CAPSULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Capsule.]", self.FileName, self.CurrentLineNumber)
CapsuleObj = Capsule()
CapsuleName = self._GetUiName()
if not CapsuleName:
raise Warning.Expected("capsule name", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self._Token
self._GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleDict[CapsuleObj.UiCapsuleName] = CapsuleObj
return True
## _GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def _GetCapsuleStatements(self, Obj):
self._GetCapsuleTokens(Obj)
self._GetDefineStatements(Obj)
self._GetSetStatements(Obj)
self._GetCapsuleData(Obj)
## _GetCapsuleTokens() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def _GetCapsuleTokens(self, Obj):
if not self._GetNextToken():
return False
while self._Token in {"CAPSULE_GUID", "CAPSULE_HEADER_SIZE", "CAPSULE_FLAGS", "OEM_CAPSULE_FLAGS", "CAPSULE_HEADER_INIT_VERSION"}:
Name = self._Token.strip()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if Name == 'CAPSULE_FLAGS':
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
while self._IsToken(TAB_COMMA_SPLIT):
Value += TAB_COMMA_SPLIT
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value += self._Token.strip()
elif Name == 'OEM_CAPSULE_FLAGS':
Value = self._Token.strip()
if not Value.upper().startswith('0X'):
raise Warning.Expected("hex value starting with 0x", self.FileName, self.CurrentLineNumber)
try:
Value = int(Value, 0)
except ValueError:
raise Warning.Expected("hex string failed to convert to value", self.FileName, self.CurrentLineNumber)
if not 0x0000 <= Value <= 0xFFFF:
raise Warning.Expected("hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
else:
Value = self._Token.strip()
Obj.TokensDict[Name] = Value
if not self._GetNextToken():
return False
self._UndoToken()
## _GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetCapsuleData(self, Obj):
while True:
IsInf = self._GetInfStatement(Obj, True)
IsFile = self._GetFileStatement(Obj, True)
IsFv = self._GetFvStatement(Obj)
IsFd = self._GetFdStatement(Obj)
IsAnyFile = self._GetAnyFileStatement(Obj)
IsAfile = self._GetAfileStatement(Obj)
IsFmp = self._GetFmpStatement(Obj)
if not (IsInf or IsFile or IsFv or IsFd or IsAnyFile or IsAfile or IsFmp):
break
## _GetFMPCapsuleData() method
#
# Get capsule data for FMP capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetFMPCapsuleData(self, Obj):
while True:
IsFv = self._GetFvStatement(Obj, True)
IsFd = self._GetFdStatement(Obj, True)
IsAnyFile = self._GetAnyFileStatement(Obj, True)
if not (IsFv or IsFd or IsAnyFile):
break
## _GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def _GetFvStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FvDict:
raise Warning("FV name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFv = CapsuleFv()
myCapsuleFv.FvName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFv)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFv)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFv)
return True
## _GetFdStatement() method
#
# Get FD for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FD is got
# @retval True Successfully find a FD statement
# @retval False Not able to find a FD statement
#
def _GetFdStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword("FD"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FdDict:
raise Warning("FD name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFd = CapsuleFd()
myCapsuleFd.FdName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFd)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFd)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFd)
return True
def _GetFmpStatement(self, CapsuleObj):
if not self._IsKeyword("FMP_PAYLOAD"):
if not self._IsKeyword("FMP"):
return False
if not self._IsKeyword("PAYLOAD"):
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("payload name after FMP_PAYLOAD =", self.FileName, self.CurrentLineNumber)
Payload = self._Token.upper()
if Payload not in self.Profile.FmpPayloadDict:
raise Warning("This FMP Payload does not exist: %s" % self._Token, self.FileName, self.CurrentLineNumber)
CapsuleObj.FmpPayloadList.append(self.Profile.FmpPayloadDict[Payload])
return True
def _ParseRawFileStatement(self):
if not self._IsKeyword("FILE"):
return None
if not self._IsKeyword("DATA"):
self._UndoToken()
return None
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
AnyFileName = self._Token
self._VerifyFile(AnyFileName)
if not os.path.isabs(AnyFileName):
AnyFileName = mws.join(GenFdsGlobalVariable.WorkSpaceDir, AnyFileName)
return AnyFileName
## _GetAnyFileStatement() method
#
# Get AnyFile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom AnyFile is got
# @retval True Successfully find a Anyfile statement
# @retval False Not able to find a AnyFile statement
#
def _GetAnyFileStatement(self, CapsuleObj, FMPCapsule = False):
AnyFileName = self._ParseRawFileStatement()
if not AnyFileName:
return False
myCapsuleAnyFile = CapsuleAnyFile()
myCapsuleAnyFile.FileName = AnyFileName
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleAnyFile)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleAnyFile)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleAnyFile)
return True
## _GetAfileStatement() method
#
# Get Afile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom Afile is got
# @retval True Successfully find a Afile statement
# @retval False Not able to find a Afile statement
#
def _GetAfileStatement(self, CapsuleObj):
if not self._IsKeyword("APPEND"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Afile name", self.FileName, self.CurrentLineNumber)
AfileName = self._Token
AfileBaseName = os.path.basename(AfileName)
if os.path.splitext(AfileBaseName)[1] not in {".bin", ".BIN", ".Bin", ".dat", ".DAT", ".Dat", ".data", ".DATA", ".Data"}:
raise Warning('invalid binary file type, should be one of "bin",BINARY_FILE_TYPE_BIN,"Bin","dat","DAT","Dat","data","DATA","Data"', \
self.FileName, self.CurrentLineNumber)
if not os.path.isabs(AfileName):
AfileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(AfileName)
self._VerifyFile(AfileName)
else:
if not os.path.exists(AfileName):
raise Warning('%s does not exist' % AfileName, self.FileName, self.CurrentLineNumber)
else:
pass
myCapsuleAfile = CapsuleAfile()
myCapsuleAfile.FileName = AfileName
CapsuleObj.CapsuleDataList.append(myCapsuleAfile)
return True
## _GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def _GetRule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[RULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Rule.]", self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Arch = self._SkippedChars.rstrip(TAB_SPLIT)
ModuleType = self._GetModuleType()
TemplateName = ""
if self._IsToken(TAB_SPLIT):
if not self._GetNextWord():
raise Warning.Expected("template name", self.FileName, self.CurrentLineNumber)
TemplateName = self._Token
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
RuleObj = self._GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '':
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() ] = RuleObj
else:
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() + \
TAB_SPLIT + \
TemplateName.upper() ] = RuleObj
return True
## _GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def _GetModuleType(self):
if not self._GetNextWord():
raise Warning.Expected("Module type", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {
SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER,
SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_DXE_SMM_DRIVER,
SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_UEFI_DRIVER,
SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, SUP_MODULE_HOST_APPLICATION,
TAB_DEFAULT, SUP_MODULE_BASE,
EDK_COMPONENT_TYPE_SECURITY_CORE,
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER,
EDK_COMPONENT_TYPE_PIC_PEIM,
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, "PE32_PEIM",
EDK_COMPONENT_TYPE_BS_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER,
EDK_COMPONENT_TYPE_SAL_RT_DRIVER,
EDK_COMPONENT_TYPE_APPLICATION, "ACPITABLE",
SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown Module type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return self._Token
## _GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def _GetFileExtension(self):
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Ext = ""
if self._GetNextToken():
if FileExtensionPattern.match(self._Token):
Ext = self._Token
return TAB_SPLIT + Ext
else:
raise Warning("Unknown file extension '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
else:
raise Warning.Expected("file extension", self.FileName, self.CurrentLineNumber)
## _GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def _GetRuleFileStatements(self):
if not self._IsKeyword("FILE"):
raise Warning.Expected("FILE", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
Type = self._Token.strip().upper()
if Type not in {"RAW", "FREEFORM", SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
"PEI_DXE_COMBO", "DRIVER", SUP_MODULE_DXE_CORE, EDK_COMPONENT_TYPE_APPLICATION,
"FV_IMAGE", "SMM", SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown FV type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("$(NAMED_GUID)"):
if not self._GetNextWord():
NamedGuid = self._CurrentLine()[self.CurrentOffsetWithinLine:].split()[0].strip()
if GlobalData.gGuidPatternEnd.match(NamedGuid):
self.CurrentOffsetWithinLine += len(NamedGuid)
self._Token = NamedGuid
else:
raise Warning.Expected("$(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
NameGuid = self._Token
KeepReloc = None
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Type):
if self._Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
Fixed = False
if self._IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self._IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if self._IsToken("{"):
# Complex file rule expected
NewRule = RuleComplexFile()
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(NewRule)
IsLeaf = self._GetEfiSection(NewRule)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return NewRule
else:
# Simple file rule expected
if not self._GetNextWord():
raise Warning.Expected("leaf section type", self.FileName, self.CurrentLineNumber)
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self._IsKeyword("Fixed", True):
Fixed = True
if self._IsKeyword("CheckSum", True):
CheckSum = True
SectAlignment = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SectAlignment = self._Token
Ext = None
if self._IsToken(TAB_VALUE_SPLIT):
Ext = self._GetFileExtension()
elif not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
NewRule = RuleSimpleFile()
NewRule.SectionType = SectionName
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.SectAlignment = SectAlignment
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
NewRule.FileExtension = Ext
NewRule.FileName = self._Token
return NewRule
## _GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
EfiSectionObj = EfiSection()
if not self._GetNextWord():
CurrentLine = self._CurrentLine()[self.CurrentOffsetWithinLine:].split()[0].strip()
if self._Token == '{' and Obj.FvFileType == "RAW" and TAB_SPLIT in CurrentLine:
if self._IsToken(TAB_VALUE_SPLIT):
EfiSectionObj.FileExtension = self._GetFileExtension()
elif self._GetNextToken():
EfiSectionObj.FileName = self._Token
EfiSectionObj.SectionType = BINARY_FILE_TYPE_RAW
Obj.SectionList.append(EfiSectionObj)
return True
else:
return False
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX, BINARY_FILE_TYPE_GUID}:
self._UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection()
if self._IsKeyword("FV_IMAGE"):
pass
if self._IsToken("{"):
FvObj = FV()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.FvFileType = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Alignment = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
FvImageSectionObj.FvFileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
FvImageSectionObj.FvFileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("FV file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(FvImageSectionObj)
return True
EfiSectionObj.SectionType = SectionName
if not self._GetNextToken():
raise Warning.Expected("file type", self.FileName, self.CurrentLineNumber)
if self._Token == "STRING":
if not self._RuleSectionCouldHaveString(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have string data%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Quoted String", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
EfiSectionObj.StringData = self._Token
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
else:
EfiSectionObj.FileType = self._Token
self._CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
if self._IsKeyword("Optional"):
if not self._RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
raise Warning("%s section could NOT be optional%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
EfiSectionObj.Optional = True
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.Alignment = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
if self._Token == 'RELOCS_STRIPPED':
EfiSectionObj.KeepReloc = False
else:
EfiSectionObj.KeepReloc = True
if Obj.KeepReloc is not None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
raise Warning("Section type %s has reloc strip flag conflict with Rule" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
else:
raise Warning("Section type %s could not have reloc strip flag" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_VALUE_SPLIT):
EfiSectionObj.FileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
if self._Token.startswith('PCD'):
self._UndoToken()
self._GetNextWord()
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
EfiSectionObj.FileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("section file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(EfiSectionObj)
return True
## _RuleSectionCouldBeOptional() method
#
# Get whether a section could be optional
#
# @param SectionType The section type to check
# @retval True section could be optional
# @retval False section never optional
#
@staticmethod
def _RuleSectionCouldBeOptional(SectionType):
if SectionType in {BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "RAW", BINARY_FILE_TYPE_SMM_DEPEX}:
return True
else:
return False
## _RuleSectionCouldHaveBuildNum() method
#
# Get whether a section could have build number information
#
# @param SectionType The section type to check
# @retval True section could have build number information
# @retval False section never have build number information
#
@staticmethod
def _RuleSectionCouldHaveBuildNum(SectionType):
if SectionType == "VERSION":
return True
else:
return False
## _RuleSectionCouldHaveString() method
#
# Get whether a section could have string
#
# @param SectionType The section type to check
# @retval True section could have string
# @retval False section never have string
#
@staticmethod
def _RuleSectionCouldHaveString(SectionType):
if SectionType in {BINARY_FILE_TYPE_UI, "VERSION"}:
return True
else:
return False
## _CheckRuleSectionFileType() method
#
# Get whether a section matches a file type
#
# @param self The object pointer
# @param SectionType The section type to check
# @param FileType The file type to check
#
def _CheckRuleSectionFileType(self, SectionType, FileType):
WarningString = "Incorrect section file type '%s'"
if SectionType == "COMPAT16":
if FileType not in {"COMPAT16", "SEC_COMPAT16"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PE32:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_PE32"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PIC:
if FileType not in {BINARY_FILE_TYPE_PIC, "SEC_PIC"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_TE:
if FileType not in {BINARY_FILE_TYPE_TE, "SEC_TE"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "RAW":
if FileType not in {BINARY_FILE_TYPE_BIN, "SEC_BIN", "RAW", "ASL", "ACPI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_DXE_DEPEX or SectionType == BINARY_FILE_TYPE_SMM_DEPEX:
if FileType not in {BINARY_FILE_TYPE_DXE_DEPEX, "SEC_DXE_DEPEX", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_UI:
if FileType not in {BINARY_FILE_TYPE_UI, "SEC_UI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "VERSION":
if FileType not in {"VERSION", "SEC_VERSION"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PEI_DEPEX:
if FileType not in {BINARY_FILE_TYPE_PEI_DEPEX, "SEC_PEI_DEPEX"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_GUID:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_GUID"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
## _GetRuleEncapsulationSection() method
#
# Get encapsulation section for Rule
#
# @param self The object pointer
# @param theRule for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetRuleEncapsulationSection(self, theRule):
if self._IsKeyword("COMPRESS"):
Type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
Type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.CompType = Type
# Recursive sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(CompressSectionObj)
IsLeaf = self._GetEfiSection(CompressSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
if self._Token in GlobalData.gGuidDict:
self._Token = GuidStructureStringToGuidString(GlobalData.gGuidDict[self._Token]).upper()
GuidValue = self._Token
if self._IsKeyword("$(NAMED_GUID)"):
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Efi sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(GuidSectionObj)
IsLeaf = self._GetEfiSection(GuidSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(GuidSectionObj)
return True
return False
## _GetOptionRom() method
#
# Get OptionROM section contents and store its data into OptionROM list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a OptionROM
# @retval False Not able to find a OptionROM
#
def _GetOptionRom(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[OPTIONROM."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[OptionRom.", True):
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
OptRomName = self._GetUiName()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
OptRomObj = OPTIONROM(OptRomName)
self.Profile.OptRomDict[OptRomName] = OptRomObj
while True:
isInf = self._GetOptRomInfStatement(OptRomObj)
isFile = self._GetOptRomFileStatement(OptRomObj)
if not isInf and not isFile:
break
return True
## _GetOptRomInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetOptRomInfStatement(self, Obj):
if not self._IsKeyword("INF"):
return False
ffsInf = OptRomInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
self._GetOptRomOverrides (ffsInf)
Obj.FfsList.append(ffsInf)
return True
## _GetOptRomOverrides() method
#
# Get overrides for OptROM INF & FILE
#
# @param self The object pointer
# @param FfsInfObj for whom overrides is got
#
def _GetOptRomOverrides(self, Obj):
if self._IsToken('{'):
Overrides = OverrideAttribs()
while True:
if self._IsKeyword("PCI_VENDOR_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex vendor id", self.FileName, self.CurrentLineNumber)
Overrides.PciVendorId = self._Token
continue
if self._IsKeyword("PCI_CLASS_CODE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex class code", self.FileName, self.CurrentLineNumber)
Overrides.PciClassCode = self._Token
continue
if self._IsKeyword("PCI_DEVICE_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
# Get a list of PCI IDs
Overrides.PciDeviceId = ""
while (self._GetNextHexNumber()):
Overrides.PciDeviceId = "{} {}".format(Overrides.PciDeviceId, self._Token)
if not Overrides.PciDeviceId:
raise Warning.Expected("one or more Hex device ids", self.FileName, self.CurrentLineNumber)
continue
if self._IsKeyword("PCI_REVISION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex revision", self.FileName, self.CurrentLineNumber)
Overrides.PciRevision = self._Token
continue
if self._IsKeyword("PCI_COMPRESS"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE/FALSE for compress", self.FileName, self.CurrentLineNumber)
Overrides.NeedCompress = self._Token.upper() == 'TRUE'
continue
if self._IsToken(T_CHAR_BRACE_R):
break
else:
EdkLogger.error("FdfParser", FORMAT_INVALID, File=self.FileName, Line=self.CurrentLineNumber)
Obj.OverrideAttribs = Overrides
## _GetOptRomFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetOptRomFileStatement(self, Obj):
if not self._IsKeyword("FILE"):
return False
FfsFileObj = OptRomFileStatement()
if not self._IsKeyword("EFI") and not self._IsKeyword(BINARY_FILE_TYPE_BIN):
raise Warning.Expected("Binary type (EFI/BIN)", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileType = self._Token
if not self._GetNextToken():
raise Warning.Expected("File path", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileName = self._Token
if FfsFileObj.FileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(FfsFileObj.FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if FfsFileObj.FileType == 'EFI':
self._GetOptRomOverrides(FfsFileObj)
Obj.FfsList.append(FfsFileObj)
return True
## _GetCapInFd() method
#
# Get Cap list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval CapList List of Capsule in FD
#
def _GetCapInFd (self, FdName):
CapList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'CAPSULE':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".cap"):
continue
if elementRegionData is not None and elementRegionData.upper() not in CapList:
CapList.append(elementRegionData.upper())
return CapList
## _GetReferencedFdCapTuple() method
#
# Get FV and FD list referenced by a capsule image
#
# @param self The object pointer
# @param CapObj Capsule section to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdCapTuple(self, CapObj, RefFdList = [], RefFvList = []):
for CapsuleDataObj in CapObj.CapsuleDataList:
if hasattr(CapsuleDataObj, 'FvName') and CapsuleDataObj.FvName is not None and CapsuleDataObj.FvName.upper() not in RefFvList:
RefFvList.append (CapsuleDataObj.FvName.upper())
elif hasattr(CapsuleDataObj, 'FdName') and CapsuleDataObj.FdName is not None and CapsuleDataObj.FdName.upper() not in RefFdList:
RefFdList.append (CapsuleDataObj.FdName.upper())
elif CapsuleDataObj.Ffs is not None:
if isinstance(CapsuleDataObj.Ffs, FileStatement):
if CapsuleDataObj.Ffs.FvName is not None and CapsuleDataObj.Ffs.FvName.upper() not in RefFvList:
RefFvList.append(CapsuleDataObj.Ffs.FvName.upper())
elif CapsuleDataObj.Ffs.FdName is not None and CapsuleDataObj.Ffs.FdName.upper() not in RefFdList:
RefFdList.append(CapsuleDataObj.Ffs.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(CapsuleDataObj.Ffs, RefFdList, RefFvList)
## _GetFvInFd() method
#
# Get FV list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval FvList list of FV in FD
#
def _GetFvInFd (self, FdName):
FvList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == BINARY_FILE_TYPE_FV:
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".fv"):
continue
if elementRegionData is not None and elementRegionData.upper() not in FvList:
FvList.append(elementRegionData.upper())
return FvList
## _GetReferencedFdFvTuple() method
#
# Get FD and FV list referenced by a FFS file
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
for FfsObj in FvObj.FfsList:
if isinstance(FfsObj, FileStatement):
if FfsObj.FvName is not None and FfsObj.FvName.upper() not in RefFvList:
RefFvList.append(FfsObj.FvName.upper())
elif FfsObj.FdName is not None and FfsObj.FdName.upper() not in RefFdList:
RefFdList.append(FfsObj.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
## _GetReferencedFdFvTupleFromSection() method
#
# Get FD and FV list referenced by a FFS section
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param FdList referenced FD by section
# @param FvList referenced FV by section
#
def _GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
SectionStack = list(FfsFile.SectionList)
while SectionStack != []:
SectionObj = SectionStack.pop()
if isinstance(SectionObj, FvImageSection):
if SectionObj.FvName is not None and SectionObj.FvName.upper() not in FvList:
FvList.append(SectionObj.FvName.upper())
if SectionObj.Fv is not None and SectionObj.Fv.UiFvName is not None and SectionObj.Fv.UiFvName.upper() not in FvList:
FvList.append(SectionObj.Fv.UiFvName.upper())
self._GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
if isinstance(SectionObj, CompressSection) or isinstance(SectionObj, GuidSection):
SectionStack.extend(SectionObj.SectionList)
## CycleReferenceCheck() method
#
# Check whether cycle reference exists in FDF
#
# @param self The object pointer
# @retval True cycle reference exists
# @retval False Not exists cycle reference
#
def CycleReferenceCheck(self):
#
# Check the cycle between FV and FD image
#
MaxLength = len (self.Profile.FvDict)
for FvName in self.Profile.FvDict:
LogStr = "\nCycle Reference Checking for FV: %s\n" % FvName
RefFvStack = set(FvName)
FdAnalyzedList = set()
Index = 0
while RefFvStack and Index < MaxLength:
Index = Index + 1
FvNameFromStack = RefFvStack.pop()
if FvNameFromStack.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
else:
continue
RefFdList = []
RefFvList = []
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "FV %s contains FD %s\n" % (FvNameFromStack, RefFdName)
FvInFdList = self._GetFvInFd(RefFdName)
if FvInFdList != []:
for FvNameInFd in FvInFdList:
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvStack:
RefFvStack.add(FvNameInFd)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
FdAnalyzedList.add(RefFdName)
for RefFvName in RefFvList:
LogStr += "FV %s contains FV %s\n" % (FvNameFromStack, RefFvName)
if RefFvName not in RefFvStack:
RefFvStack.add(RefFvName)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
#
# Check the cycle between Capsule and FD image
#
MaxLength = len (self.Profile.CapsuleDict)
for CapName in self.Profile.CapsuleDict:
#
# Capsule image to be checked.
#
LogStr = "\n\n\nCycle Reference Checking for Capsule: %s\n" % CapName
RefCapStack = {CapName}
FdAnalyzedList = set()
FvAnalyzedList = set()
Index = 0
while RefCapStack and Index < MaxLength:
Index = Index + 1
CapNameFromStack = RefCapStack.pop()
if CapNameFromStack.upper() in self.Profile.CapsuleDict:
CapObj = self.Profile.CapsuleDict[CapNameFromStack.upper()]
else:
continue
RefFvList = []
RefFdList = []
self._GetReferencedFdCapTuple(CapObj, RefFdList, RefFvList)
FvListLength = 0
FdListLength = 0
while FvListLength < len (RefFvList) or FdListLength < len (RefFdList):
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "Capsule %s contains FD %s\n" % (CapNameFromStack, RefFdName)
for CapNameInFd in self._GetCapInFd(RefFdName):
LogStr += "FD %s contains Capsule %s\n" % (RefFdName, CapNameInFd)
if CapNameInFd not in RefCapStack:
RefCapStack.append(CapNameInFd)
if CapName in RefCapStack or CapNameFromStack in RefCapStack:
EdkLogger.info(LogStr)
return True
for FvNameInFd in self._GetFvInFd(RefFdName):
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvList:
RefFvList.append(FvNameInFd)
FdAnalyzedList.add(RefFdName)
#
# the number of the parsed FV and FD image
#
FvListLength = len (RefFvList)
FdListLength = len (RefFdList)
for RefFvName in RefFvList:
if RefFvName in FvAnalyzedList:
continue
LogStr += "Capsule %s contains FV %s\n" % (CapNameFromStack, RefFvName)
if RefFvName.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[RefFvName.upper()]
else:
continue
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
FvAnalyzedList.add(RefFvName)
return False
def GetAllIncludedFile (self):
global AllIncludeFileList
return AllIncludeFileList
if __name__ == "__main__":
import sys
try:
test_file = sys.argv[1]
except IndexError as v:
print("Usage: %s filename" % sys.argv[0])
sys.exit(1)
parser = FdfParser(test_file)
try:
parser.ParseFile()
parser.CycleReferenceCheck()
except Warning as X:
print(str(X))
else:
print("Success!")
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/FdfParser.py
|
## @file
# Python 'GenFds' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/__init__.py
|
## @file
# Complex Rule object for generating FFS
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Rule
from CommonDataClass.FdfClass import RuleComplexFileClassObject
## complex rule
#
#
class RuleComplexFile(RuleComplexFileClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleComplexFileClassObject.__init__(self)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/RuleComplexFile.py
|
## @file
# process GUIDed section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .GenFdsGlobalVariable import FindExtendTool
from CommonDataClass.FdfClass import GuidSectionClassObject
import sys
from Common import EdkLogger
from Common.BuildToolError import *
from .FvImageSection import FvImageSection
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
## generate GUIDed section
#
#
class GuidSection(GuidSectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
GuidSectionClassObject.__init__(self)
## GenSection() method
#
# Generate GUIDed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile=False):
#
# Generate all section
#
self.KeyStringList = KeyStringList
self.CurrentArchList = GenFdsGlobalVariable.ArchList
if FfsInf is not None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.NameGuid = FfsInf.__ExtendMacro__(self.NameGuid)
self.SectionType = FfsInf.__ExtendMacro__(self.SectionType)
self.CurrentArchList = [FfsInf.CurrentArch]
SectFile = tuple()
SectAlign = []
Index = 0
MaxAlign = None
if Dict is None:
Dict = {}
if self.FvAddr != []:
FvAddrIsSet = True
else:
FvAddrIsSet = False
if self.ProcessRequired in ("TRUE", "1"):
if self.FvAddr != []:
#no use FvAddr when the image is processed.
self.FvAddr = []
if self.FvParentAddr is not None:
#no use Parent Addr when the image is processed.
self.FvParentAddr = None
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' % (SecNum, Index)
# set base address for inside FvImage
if isinstance(Sect, FvImageSection):
if self.FvAddr != []:
Sect.FvAddr = self.FvAddr.pop(0)
self.IncludeFvSection = True
elif isinstance(Sect, GuidSection):
Sect.FvAddr = self.FvAddr
Sect.FvParentAddr = self.FvParentAddr
ReturnSectList, align = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if isinstance(Sect, GuidSection):
if Sect.IncludeFvSection:
self.IncludeFvSection = Sect.IncludeFvSection
if align is not None:
if MaxAlign is None:
MaxAlign = align
if GenFdsGlobalVariable.GetAlignment (align) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = align
if ReturnSectList != []:
if align is None:
align = "1"
for file in ReturnSectList:
SectFile += (file,)
SectAlign.append(align)
if MaxAlign is not None:
if self.Alignment is None:
self.Alignment = MaxAlign
else:
if GenFdsGlobalVariable.GetAlignment (MaxAlign) > GenFdsGlobalVariable.GetAlignment (self.Alignment):
self.Alignment = MaxAlign
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
SectionSuffix['GUIDED']
OutputFile = os.path.normpath(OutputFile)
ExternalTool = None
ExternalOption = None
if self.NameGuid is not None:
ExternalTool, ExternalOption = FindExtendTool(self.KeyStringList, self.CurrentArchList, self.NameGuid)
#
# If not have GUID , call default
# GENCRC32 section
#
if self.NameGuid is None :
GenFdsGlobalVariable.VerboseLogger("Use GenSection function Generate CRC32 Section")
GenFdsGlobalVariable.GenerateSection(OutputFile, SectFile, Section.Section.SectionType[self.SectionType], InputAlign=SectAlign, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
#or GUID not in External Tool List
elif ExternalTool is None:
EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % self.NameGuid)
else:
DummyFile = OutputFile + ".dummy"
#
# Call GenSection with DUMMY section type.
#
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFile, InputAlign=SectAlign, IsMakefile=IsMakefile)
#
# Use external tool process the Output
#
TempFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
'.tmp'
TempFile = os.path.normpath(TempFile)
#
# Remove temp file if its time stamp is older than dummy file
# Just in case the external tool fails at this time but succeeded before
# Error should be reported if the external tool does not generate a new output based on new input
#
if os.path.exists(TempFile) and os.path.exists(DummyFile) and os.path.getmtime(TempFile) < os.path.getmtime(DummyFile):
os.remove(TempFile)
FirstCall = False
CmdOption = '-e'
if ExternalOption is not None:
CmdOption = CmdOption + ' ' + ExternalOption
if not GenFdsGlobalVariable.EnableGenfdsMultiThread:
if self.ProcessRequired not in ("TRUE", "1") and self.IncludeFvSection and not FvAddrIsSet and self.FvParentAddr is not None:
#FirstCall is only set for the encapsulated flash FV image without process required attribute.
FirstCall = True
#
# Call external tool
#
ReturnValue = [1]
if FirstCall:
#first try to call the guided tool with -z option and CmdOption for the no process required guided tool.
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, '-z' + ' ' + CmdOption, ReturnValue)
#
# when no call or first call failed, ReturnValue are not 1.
# Call the guided tool with CmdOption
#
if ReturnValue[0] != 0:
FirstCall = False
ReturnValue[0] = 0
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption)
#
# There is external tool which does not follow standard rule which return nonzero if tool fails
# The output file has to be checked
#
if not os.path.exists(TempFile) :
EdkLogger.error("GenFds", COMMAND_FAILURE, 'Fail to call %s, no output file was generated' % ExternalTool)
FileHandleIn = open(DummyFile, 'rb')
FileHandleIn.seek(0, 2)
InputFileSize = FileHandleIn.tell()
FileHandleOut = open(TempFile, 'rb')
FileHandleOut.seek(0, 2)
TempFileSize = FileHandleOut.tell()
Attribute = []
HeaderLength = None
if self.ExtraHeaderSize != -1:
HeaderLength = str(self.ExtraHeaderSize)
if self.ProcessRequired == "NONE" and HeaderLength is None:
if TempFileSize > InputFileSize:
FileHandleIn.seek(0)
BufferIn = FileHandleIn.read()
FileHandleOut.seek(0)
BufferOut = FileHandleOut.read()
if BufferIn == BufferOut[TempFileSize - InputFileSize:]:
HeaderLength = str(TempFileSize - InputFileSize)
#auto sec guided attribute with process required
if HeaderLength is None:
Attribute.append('PROCESSING_REQUIRED')
FileHandleIn.close()
FileHandleOut.close()
if FirstCall and 'PROCESSING_REQUIRED' in Attribute:
# Guided data by -z option on first call is the process required data. Call the guided tool with the real option.
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption)
#
# Call Gensection Add Section Header
#
if self.ProcessRequired in ("TRUE", "1"):
if 'PROCESSING_REQUIRED' not in Attribute:
Attribute.append('PROCESSING_REQUIRED')
if self.AuthStatusValid in ("TRUE", "1"):
Attribute.append('AUTH_STATUS_VALID')
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute, GuidHdrLen=HeaderLength)
else:
#add input file for GenSec get PROCESSING_REQUIRED
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption, IsMakefile=IsMakefile)
Attribute = []
HeaderLength = None
if self.ExtraHeaderSize != -1:
HeaderLength = str(self.ExtraHeaderSize)
if self.AuthStatusValid in ("TRUE", "1"):
Attribute.append('AUTH_STATUS_VALID')
if self.ProcessRequired == "NONE" and HeaderLength is None:
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute,
GuidHdrLen=HeaderLength, DummyFile=DummyFile, IsMakefile=IsMakefile)
else:
if self.ProcessRequired in ("TRUE", "1"):
if 'PROCESSING_REQUIRED' not in Attribute:
Attribute.append('PROCESSING_REQUIRED')
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute,
GuidHdrLen=HeaderLength, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
if 'PROCESSING_REQUIRED' in Attribute:
# reset guided section alignment to none for the processed required guided data
self.Alignment = None
self.IncludeFvSection = False
self.ProcessRequired = "TRUE"
if IsMakefile and self.Alignment is not None and self.Alignment.strip() == '0':
self.Alignment = '1'
return OutputFileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/GuidSection.py
|
## @file
# process FFS generation from FILE statement
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from io import BytesIO
from struct import pack
from CommonDataClass.FdfClass import FileStatementClassObject
from Common import EdkLogger
from Common.BuildToolError import GENFDS_ERROR
from Common.Misc import GuidStructureByteArrayToGuidString, SaveFileOnChange
import Common.LongFilePathOs as os
from .GuidSection import GuidSection
from .FvImageSection import FvImageSection
from .Ffs import FdfFvFileTypeToFileType
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import shutil
## generate FFS from FILE
#
#
class FileStatement (FileStatementClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FileStatementClassObject.__init__(self)
self.CurrentLineNum = None
self.CurrentLineContent = None
self.FileName = None
self.InfFileName = None
self.SubAlignment = None
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @param Dict dictionary contains macro and value pair
# @param FvChildAddr Array of the inside FvImage base address
# @param FvParentAddr Parent Fv base address
# @retval string Generated FFS file name
#
def GenFfs(self, Dict = None, FvChildAddr=[], FvParentAddr=None, IsMakefile=False, FvName=None):
if self.NameGuid and self.NameGuid.startswith('PCD('):
PcdValue = GenFdsGlobalVariable.GetPcdValue(self.NameGuid)
if len(PcdValue) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, '%s NOT defined.' \
% (self.NameGuid))
if PcdValue.startswith('{'):
PcdValue = GuidStructureByteArrayToGuidString(PcdValue)
RegistryGuidStr = PcdValue
if len(RegistryGuidStr) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, 'GUID value for %s in wrong format.' \
% (self.NameGuid))
self.NameGuid = RegistryGuidStr
Str = self.NameGuid
if FvName:
Str += FvName
OutputDir = os.path.join(GenFdsGlobalVariable.FfsDir, Str)
if os.path.exists(OutputDir):
shutil.rmtree(OutputDir)
if not os.path.exists(OutputDir):
os.makedirs(OutputDir)
if Dict is None:
Dict = {}
Dict.update(self.DefineVarDict)
SectionAlignments = None
if self.FvName:
Buffer = BytesIO()
if self.FvName.upper() not in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
EdkLogger.error("GenFds", GENFDS_ERROR, "FV (%s) is NOT described in FDF file!" % (self.FvName))
Fv = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName.upper())
FileName = Fv.AddToBuffer(Buffer)
SectionFiles = [FileName]
elif self.FdName:
if self.FdName.upper() not in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
EdkLogger.error("GenFds", GENFDS_ERROR, "FD (%s) is NOT described in FDF file!" % (self.FdName))
Fd = GenFdsGlobalVariable.FdfParser.Profile.FdDict.get(self.FdName.upper())
FileName = Fd.GenFd()
SectionFiles = [FileName]
elif self.FileName:
if hasattr(self, 'FvFileType') and self.FvFileType == 'RAW':
if isinstance(self.FileName, list) and isinstance(self.SubAlignment, list) and len(self.FileName) == len(self.SubAlignment):
FileContent = BytesIO()
MaxAlignIndex = 0
MaxAlignValue = 1
for Index, File in enumerate(self.FileName):
try:
f = open(File, 'rb')
except:
GenFdsGlobalVariable.ErrorLogger("Error opening RAW file %s." % (File))
Content = f.read()
f.close()
AlignValue = 1
if self.SubAlignment[Index]:
AlignValue = GenFdsGlobalVariable.GetAlignment(self.SubAlignment[Index])
if AlignValue > MaxAlignValue:
MaxAlignIndex = Index
MaxAlignValue = AlignValue
FileContent.write(Content)
if len(FileContent.getvalue()) % AlignValue != 0:
Size = AlignValue - len(FileContent.getvalue()) % AlignValue
for i in range(0, Size):
FileContent.write(pack('B', 0xFF))
if FileContent.getvalue() != b'':
OutputRAWFile = os.path.join(GenFdsGlobalVariable.FfsDir, self.NameGuid, self.NameGuid + '.raw')
SaveFileOnChange(OutputRAWFile, FileContent.getvalue(), True)
self.FileName = OutputRAWFile
self.SubAlignment = self.SubAlignment[MaxAlignIndex]
if self.Alignment and self.SubAlignment:
if GenFdsGlobalVariable.GetAlignment (self.Alignment) < GenFdsGlobalVariable.GetAlignment (self.SubAlignment):
self.Alignment = self.SubAlignment
elif self.SubAlignment:
self.Alignment = self.SubAlignment
self.FileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
#Replace $(SAPCE) with real space
self.FileName = self.FileName.replace('$(SPACE)', ' ')
SectionFiles = [GenFdsGlobalVariable.MacroExtend(self.FileName, Dict)]
else:
SectionFiles = []
Index = 0
SectionAlignments = []
for section in self.SectionList:
Index = Index + 1
SecIndex = '%d' %Index
# process the inside FvImage from FvSection or GuidSection
if FvChildAddr != []:
if isinstance(section, FvImageSection):
section.FvAddr = FvChildAddr.pop(0)
elif isinstance(section, GuidSection):
section.FvAddr = FvChildAddr
if FvParentAddr and isinstance(section, GuidSection):
section.FvParentAddr = FvParentAddr
if self.KeepReloc == False:
section.KeepReloc = False
sectList, align = section.GenSection(OutputDir, self.NameGuid, SecIndex, self.KeyStringList, None, Dict)
if sectList != []:
for sect in sectList:
SectionFiles.append(sect)
SectionAlignments.append(align)
#
# Prepare the parameter
#
FfsFileOutput = os.path.join(OutputDir, self.NameGuid + '.ffs')
GenFdsGlobalVariable.GenerateFfs(FfsFileOutput, SectionFiles,
FdfFvFileTypeToFileType.get(self.FvFileType),
self.NameGuid,
Fixed=self.Fixed,
CheckSum=self.CheckSum,
Align=self.Alignment,
SectionAlign=SectionAlignments
)
return FfsFileOutput
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/FfsFileStatement.py
|
## @file
# process UI section generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from .Ffs import SectionSuffix
import subprocess
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import UiSectionClassObject
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
## generate UI section
#
#
class UiSection (UiSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
UiSectionClassObject.__init__(self)
## GenSection() method
#
# Generate UI section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
if FfsInf is not None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.StringData = FfsInf.__ExtendMacro__(self.StringData)
self.FileName = FfsInf.__ExtendMacro__(self.FileName)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get(BINARY_FILE_TYPE_UI))
if self.StringData is not None :
NameString = self.StringData
elif self.FileName is not None:
if Dict is None:
Dict = {}
FileNameStr = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
FileNameStr = GenFdsGlobalVariable.MacroExtend(FileNameStr, Dict)
FileObj = open(FileNameStr, 'r')
NameString = FileObj.read()
FileObj.close()
else:
NameString = ''
GenFdsGlobalVariable.GenerateSection(OutputFile, None, 'EFI_SECTION_USER_INTERFACE', Ui=NameString, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/UiSection.py
|
## @file
# section base class
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from CommonDataClass.FdfClass import SectionClassObject
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os, glob
from Common import EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
## section base class
#
#
class Section (SectionClassObject):
SectionType = {
'RAW' : 'EFI_SECTION_RAW',
'FREEFORM' : 'EFI_SECTION_FREEFORM_SUBTYPE_GUID',
BINARY_FILE_TYPE_PE32 : 'EFI_SECTION_PE32',
BINARY_FILE_TYPE_PIC : 'EFI_SECTION_PIC',
BINARY_FILE_TYPE_TE : 'EFI_SECTION_TE',
'FV_IMAGE' : 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE',
'COMPAT16' : 'EFI_SECTION_COMPATIBILITY16',
BINARY_FILE_TYPE_DXE_DEPEX : 'EFI_SECTION_DXE_DEPEX',
BINARY_FILE_TYPE_PEI_DEPEX : 'EFI_SECTION_PEI_DEPEX',
'GUIDED' : 'EFI_SECTION_GUID_DEFINED',
'COMPRESS' : 'EFI_SECTION_COMPRESSION',
BINARY_FILE_TYPE_UI : 'EFI_SECTION_USER_INTERFACE',
BINARY_FILE_TYPE_SMM_DEPEX : 'EFI_SECTION_SMM_DEPEX'
}
BinFileType = {
BINARY_FILE_TYPE_GUID : '.guid',
'ACPI' : '.acpi',
'ASL' : '.asl' ,
BINARY_FILE_TYPE_UEFI_APP : '.app',
BINARY_FILE_TYPE_LIB : '.lib',
BINARY_FILE_TYPE_PE32 : '.pe32',
BINARY_FILE_TYPE_PIC : '.pic',
BINARY_FILE_TYPE_PEI_DEPEX : '.depex',
'SEC_PEI_DEPEX' : '.depex',
BINARY_FILE_TYPE_TE : '.te',
BINARY_FILE_TYPE_UNI_VER : '.ver',
BINARY_FILE_TYPE_VER : '.ver',
BINARY_FILE_TYPE_UNI_UI : '.ui',
BINARY_FILE_TYPE_UI : '.ui',
BINARY_FILE_TYPE_BIN : '.bin',
'RAW' : '.raw',
'COMPAT16' : '.comp16',
BINARY_FILE_TYPE_FV : '.fv'
}
SectFileType = {
'SEC_GUID' : '.sec' ,
'SEC_PE32' : '.sec' ,
'SEC_PIC' : '.sec',
'SEC_TE' : '.sec',
'SEC_VER' : '.sec',
'SEC_UI' : '.sec',
'SEC_COMPAT16' : '.sec',
'SEC_BIN' : '.sec'
}
ToolGuid = {
'0xa31280ad-0x481e-0x41b6-0x95e8-0x127f-0x4c984779' : 'TianoCompress',
'0xee4e5898-0x3914-0x4259-0x9d6e-0xdc7b-0xd79403cf' : 'LzmaCompress'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
## GenSection() method
#
# virtual function
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
#
def GenSection(self, OutputPath, GuidName, SecNum, keyStringList, FfsInf = None, Dict = None):
pass
## GetFileList() method
#
# Generate compressed section
#
# @param self The object pointer
# @param FfsInf FfsInfStatement object that contains file list
# @param FileType File type to get
# @param FileExtension File extension to get
# @param Dict dictionary contains macro and its value
# @retval tuple (File list, boolean)
#
def GetFileList(FfsInf, FileType, FileExtension, Dict = None, IsMakefile=False, SectionType=None):
IsSect = FileType in Section.SectFileType
if FileExtension is not None:
Suffix = FileExtension
elif IsSect :
Suffix = Section.SectionType.get(FileType)
else:
Suffix = Section.BinFileType.get(FileType)
if FfsInf is None:
EdkLogger.error("GenFds", GENFDS_ERROR, 'Inf File does not exist!')
FileList = []
if FileType is not None:
for File in FfsInf.BinFileList:
if File.Arch == TAB_ARCH_COMMON or FfsInf.CurrentArch == File.Arch:
if File.Type == FileType or (int(FfsInf.PiSpecVersion, 16) >= 0x0001000A \
and FileType == 'DXE_DPEX' and File.Type == BINARY_FILE_TYPE_SMM_DEPEX) \
or (FileType == BINARY_FILE_TYPE_TE and File.Type == BINARY_FILE_TYPE_PE32):
if TAB_STAR in FfsInf.TargetOverrideList or File.Target == TAB_STAR or File.Target in FfsInf.TargetOverrideList or FfsInf.TargetOverrideList == []:
FileList.append(FfsInf.PatchEfiFile(File.Path, File.Type))
else:
GenFdsGlobalVariable.InfLogger ("\nBuild Target \'%s\' of File %s is not in the Scope of %s specified by INF %s in FDF" %(File.Target, File.File, FfsInf.TargetOverrideList, FfsInf.InfFileName))
else:
GenFdsGlobalVariable.VerboseLogger ("\nFile Type \'%s\' of File %s in %s is not same with file type \'%s\' from Rule in FDF" %(File.Type, File.File, FfsInf.InfFileName, FileType))
else:
GenFdsGlobalVariable.InfLogger ("\nCurrent ARCH \'%s\' of File %s is not in the Support Arch Scope of %s specified by INF %s in FDF" %(FfsInf.CurrentArch, File.File, File.Arch, FfsInf.InfFileName))
elif FileType is None and SectionType == BINARY_FILE_TYPE_RAW:
for File in FfsInf.BinFileList:
if File.Ext == Suffix:
FileList.append(File.Path)
if (not IsMakefile and Suffix is not None and os.path.exists(FfsInf.EfiOutputPath)) or (IsMakefile and Suffix is not None):
if not FileList:
SuffixMap = FfsInf.GetFinalTargetSuffixMap()
if Suffix in SuffixMap:
FileList.extend(SuffixMap[Suffix])
#Process the file lists is alphabetical for a same section type
if len (FileList) > 1:
FileList.sort()
return FileList, IsSect
GetFileList = staticmethod(GetFileList)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Section.py
|
## @file
# process data section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import DataSectionClassObject
from Common.Misc import PeImageClass
from Common.LongFilePathSupport import CopyLongFilePath
from Common.DataType import *
## generate data section
#
#
class DataSection (DataSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
DataSectionClassObject.__init__(self)
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name list, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, keyStringList, FfsFile = None, Dict = None, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
if Dict is None:
Dict = {}
if FfsFile is not None:
self.SectFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.SectFileName)
self.SectFileName = GenFdsGlobalVariable.MacroExtend(self.SectFileName, Dict, FfsFile.CurrentArch)
else:
self.SectFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.SectFileName)
self.SectFileName = GenFdsGlobalVariable.MacroExtend(self.SectFileName, Dict)
"""Check Section file exist or not !"""
if not os.path.exists(self.SectFileName):
self.SectFileName = os.path.join (GenFdsGlobalVariable.WorkSpaceDir,
self.SectFileName)
"""Copy Map file to Ffs output"""
Filename = GenFdsGlobalVariable.MacroExtend(self.SectFileName)
if Filename[(len(Filename)-4):] == '.efi':
MapFile = Filename.replace('.efi', '.map')
CopyMapFile = os.path.join(OutputPath, ModuleName + '.map')
if IsMakefile:
if GenFdsGlobalVariable.CopyList == []:
GenFdsGlobalVariable.CopyList = [(MapFile, CopyMapFile)]
else:
GenFdsGlobalVariable.CopyList.append((MapFile, CopyMapFile))
else:
if os.path.exists(MapFile):
if not os.path.exists(CopyMapFile) or (os.path.getmtime(MapFile) > os.path.getmtime(CopyMapFile)):
CopyLongFilePath(MapFile, CopyMapFile)
#Get PE Section alignment when align is set to AUTO
if self.Alignment == 'Auto' and self.SecType in (BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32):
self.Alignment = "0"
NoStrip = True
if self.SecType in (BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32):
if self.KeepReloc is not None:
NoStrip = self.KeepReloc
if not NoStrip:
FileBeforeStrip = os.path.join(OutputPath, ModuleName + '.efi')
if not os.path.exists(FileBeforeStrip) or \
(os.path.getmtime(self.SectFileName) > os.path.getmtime(FileBeforeStrip)):
CopyLongFilePath(self.SectFileName, FileBeforeStrip)
StrippedFile = os.path.join(OutputPath, ModuleName + '.stripped')
GenFdsGlobalVariable.GenerateFirmwareImage(
StrippedFile,
[GenFdsGlobalVariable.MacroExtend(self.SectFileName, Dict)],
Strip=True,
IsMakefile = IsMakefile
)
self.SectFileName = StrippedFile
if self.SecType == BINARY_FILE_TYPE_TE:
TeFile = os.path.join( OutputPath, ModuleName + 'Te.raw')
GenFdsGlobalVariable.GenerateFirmwareImage(
TeFile,
[GenFdsGlobalVariable.MacroExtend(self.SectFileName, Dict)],
Type='te',
IsMakefile = IsMakefile
)
self.SectFileName = TeFile
OutputFile = os.path.join (OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get(self.SecType))
OutputFile = os.path.normpath(OutputFile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [self.SectFileName], Section.Section.SectionType.get(self.SecType), IsMakefile = IsMakefile)
FileList = [OutputFile]
return FileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/DataSection.py
|
## @file
# Global variables for GenFds
#
# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
import Common.LongFilePathOs as os
import sys
from sys import stdout
from subprocess import PIPE,Popen
from struct import Struct
from array import array
from Common.BuildToolError import COMMAND_FAILURE,GENFDS_ERROR
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.TargetTxtClassObject import TargetTxtDict
from Common.ToolDefClassObject import ToolDefDict,gDefaultToolsDefFile
from AutoGen.BuildEngine import ToolBuildRule
import Common.DataType as DataType
from Common.Misc import PathClass,CreateDirectory
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from Common.BuildToolError import *
from AutoGen.AutoGen import CalculatePriorityValue
## Global variables
#
#
class GenFdsGlobalVariable:
FvDir = ''
OutputDirDict = {}
BinDir = ''
# will be FvDir + os.sep + 'Ffs'
FfsDir = ''
FdfParser = None
LibDir = ''
WorkSpace = None
WorkSpaceDir = ''
ConfDir = ''
OutputDirFromDscDict = {}
TargetName = ''
ToolChainTag = ''
RuleDict = {}
ArchList = None
ActivePlatform = None
FvAddressFileName = ''
VerboseMode = False
DebugLevel = -1
SharpCounter = 0
SharpNumberPerLine = 40
FdfFile = ''
FdfFileTimeStamp = 0
FixedLoadAddress = False
PlatformName = ''
BuildRuleFamily = DataType.TAB_COMPILER_MSFT
ToolChainFamily = DataType.TAB_COMPILER_MSFT
__BuildRuleDatabase = None
GuidToolDefinition = {}
FfsCmdDict = {}
SecCmdList = []
CopyList = []
ModuleFile = ''
EnableGenfdsMultiThread = True
#
# The list whose element are flags to indicate if large FFS or SECTION files exist in FV.
# At the beginning of each generation of FV, false flag is appended to the list,
# after the call to GenerateSection returns, check the size of the output file,
# if it is greater than 0xFFFFFF, the tail flag in list is set to true,
# and EFI_FIRMWARE_FILE_SYSTEM3_GUID is passed to C GenFv.
# At the end of generation of FV, pop the flag.
# List is used as a stack to handle nested FV generation.
#
LargeFileInFvFlags = []
EFI_FIRMWARE_FILE_SYSTEM3_GUID = '5473C07A-3DCB-4dca-BD6F-1E9689E7349A'
LARGE_FILE_SIZE = 0x1000000
SectionHeader = Struct("3B 1B")
# FvName, FdName, CapName in FDF, Image file name
ImageBinDict = {}
## LoadBuildRule
#
@staticmethod
def _LoadBuildRule():
if GenFdsGlobalVariable.__BuildRuleDatabase:
return GenFdsGlobalVariable.__BuildRuleDatabase
BuildRule = ToolBuildRule()
GenFdsGlobalVariable.__BuildRuleDatabase = BuildRule.ToolBuildRule
TargetObj = TargetTxtDict()
ToolDefinitionFile = TargetObj.Target.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = os.path.join('Conf', gDefaultToolsDefFile)
if os.path.isfile(ToolDefinitionFile):
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"), "Conf")))
ToolDefinition = ToolDefObj.ToolDef.ToolsDefTxtDatabase
if DataType.TAB_TOD_DEFINES_BUILDRULEFAMILY in ToolDefinition \
and GenFdsGlobalVariable.ToolChainTag in ToolDefinition[DataType.TAB_TOD_DEFINES_BUILDRULEFAMILY] \
and ToolDefinition[DataType.TAB_TOD_DEFINES_BUILDRULEFAMILY][GenFdsGlobalVariable.ToolChainTag]:
GenFdsGlobalVariable.BuildRuleFamily = ToolDefinition[DataType.TAB_TOD_DEFINES_BUILDRULEFAMILY][GenFdsGlobalVariable.ToolChainTag]
if DataType.TAB_TOD_DEFINES_FAMILY in ToolDefinition \
and GenFdsGlobalVariable.ToolChainTag in ToolDefinition[DataType.TAB_TOD_DEFINES_FAMILY] \
and ToolDefinition[DataType.TAB_TOD_DEFINES_FAMILY][GenFdsGlobalVariable.ToolChainTag]:
GenFdsGlobalVariable.ToolChainFamily = ToolDefinition[DataType.TAB_TOD_DEFINES_FAMILY][GenFdsGlobalVariable.ToolChainTag]
return GenFdsGlobalVariable.__BuildRuleDatabase
## GetBuildRules
# @param Inf: object of InfBuildData
# @param Arch: current arch
#
@staticmethod
def GetBuildRules(Inf, Arch):
if not Arch:
Arch = DataType.TAB_COMMON
if not Arch in GenFdsGlobalVariable.OutputDirDict:
return {}
BuildRuleDatabase = GenFdsGlobalVariable._LoadBuildRule()
if not BuildRuleDatabase:
return {}
PathClassObj = PathClass(Inf.MetaFile.File,
GenFdsGlobalVariable.WorkSpaceDir)
BuildDir = os.path.join(
GenFdsGlobalVariable.OutputDirDict[Arch],
Arch,
PathClassObj.SubDir,
PathClassObj.BaseName
)
BinDir = os.path.join(GenFdsGlobalVariable.OutputDirDict[Arch], Arch)
Macro = {
"WORKSPACE":GenFdsGlobalVariable.WorkSpaceDir,
"MODULE_NAME":Inf.BaseName,
"MODULE_GUID":Inf.Guid,
"MODULE_VERSION":Inf.Version,
"MODULE_TYPE":Inf.ModuleType,
"MODULE_FILE":str(PathClassObj),
"MODULE_FILE_BASE_NAME":PathClassObj.BaseName,
"MODULE_RELATIVE_DIR":PathClassObj.SubDir,
"MODULE_DIR":PathClassObj.SubDir,
"BASE_NAME":Inf.BaseName,
"ARCH":Arch,
"TOOLCHAIN":GenFdsGlobalVariable.ToolChainTag,
"TOOLCHAIN_TAG":GenFdsGlobalVariable.ToolChainTag,
"TOOL_CHAIN_TAG":GenFdsGlobalVariable.ToolChainTag,
"TARGET":GenFdsGlobalVariable.TargetName,
"BUILD_DIR":GenFdsGlobalVariable.OutputDirDict[Arch],
"BIN_DIR":BinDir,
"LIB_DIR":BinDir,
"MODULE_BUILD_DIR":BuildDir,
"OUTPUT_DIR":os.path.join(BuildDir, "OUTPUT"),
"DEBUG_DIR":os.path.join(BuildDir, "DEBUG")
}
BuildRules = {}
for Type in BuildRuleDatabase.FileTypeList:
#first try getting build rule by BuildRuleFamily
RuleObject = BuildRuleDatabase[Type, Inf.BuildType, Arch, GenFdsGlobalVariable.BuildRuleFamily]
if not RuleObject:
# build type is always module type, but ...
if Inf.ModuleType != Inf.BuildType:
RuleObject = BuildRuleDatabase[Type, Inf.ModuleType, Arch, GenFdsGlobalVariable.BuildRuleFamily]
#second try getting build rule by ToolChainFamily
if not RuleObject:
RuleObject = BuildRuleDatabase[Type, Inf.BuildType, Arch, GenFdsGlobalVariable.ToolChainFamily]
if not RuleObject:
# build type is always module type, but ...
if Inf.ModuleType != Inf.BuildType:
RuleObject = BuildRuleDatabase[Type, Inf.ModuleType, Arch, GenFdsGlobalVariable.ToolChainFamily]
if not RuleObject:
continue
RuleObject = RuleObject.Instantiate(Macro)
BuildRules[Type] = RuleObject
for Ext in RuleObject.SourceFileExtList:
BuildRules[Ext] = RuleObject
return BuildRules
## GetModuleCodaTargetList
#
# @param Inf: object of InfBuildData
# @param Arch: current arch
#
@staticmethod
def GetModuleCodaTargetList(Inf, Arch):
BuildRules = GenFdsGlobalVariable.GetBuildRules(Inf, Arch)
if not BuildRules:
return []
TargetList = set()
FileList = []
if not Inf.IsBinaryModule:
for File in Inf.Sources:
if File.TagName in {"", DataType.TAB_STAR, GenFdsGlobalVariable.ToolChainTag} and \
File.ToolChainFamily in {"", DataType.TAB_STAR, GenFdsGlobalVariable.ToolChainFamily}:
FileList.append((File, DataType.TAB_UNKNOWN_FILE))
for File in Inf.Binaries:
if File.Target in {DataType.TAB_COMMON, DataType.TAB_STAR, GenFdsGlobalVariable.TargetName}:
FileList.append((File, File.Type))
for File, FileType in FileList:
LastTarget = None
RuleChain = []
SourceList = [File]
Index = 0
while Index < len(SourceList):
Source = SourceList[Index]
Index = Index + 1
if File.IsBinary and File == Source and Inf.Binaries and File in Inf.Binaries:
# Skip all files that are not binary libraries
if not Inf.LibraryClass:
continue
RuleObject = BuildRules[DataType.TAB_DEFAULT_BINARY_FILE]
elif FileType in BuildRules:
RuleObject = BuildRules[FileType]
elif Source.Ext in BuildRules:
RuleObject = BuildRules[Source.Ext]
else:
# stop at no more rules
if LastTarget:
TargetList.add(str(LastTarget))
break
FileType = RuleObject.SourceFileType
# stop at STATIC_LIBRARY for library
if Inf.LibraryClass and FileType == DataType.TAB_STATIC_LIBRARY:
if LastTarget:
TargetList.add(str(LastTarget))
break
Target = RuleObject.Apply(Source)
if not Target:
if LastTarget:
TargetList.add(str(LastTarget))
break
elif not Target.Outputs:
# Only do build for target with outputs
TargetList.add(str(Target))
# to avoid cyclic rule
if FileType in RuleChain:
break
RuleChain.append(FileType)
SourceList.extend(Target.Outputs)
LastTarget = Target
FileType = DataType.TAB_UNKNOWN_FILE
for Cmd in Target.Commands:
if "$(CP)" == Cmd.split()[0]:
CpTarget = Cmd.split()[2]
TargetList.add(CpTarget)
return list(TargetList)
## SetDir()
#
# @param OutputDir Output directory
# @param FdfParser FDF contents parser
# @param Workspace The directory of workspace
# @param ArchList The Arch list of platform
#
@staticmethod
def SetDir (OutputDir, FdfParser, WorkSpace, ArchList):
GenFdsGlobalVariable.VerboseLogger("GenFdsGlobalVariable.OutputDir:%s" % OutputDir)
GenFdsGlobalVariable.FdfParser = FdfParser
GenFdsGlobalVariable.WorkSpace = WorkSpace
GenFdsGlobalVariable.FvDir = os.path.join(GenFdsGlobalVariable.OutputDirDict[ArchList[0]], DataType.TAB_FV_DIRECTORY)
if not os.path.exists(GenFdsGlobalVariable.FvDir):
os.makedirs(GenFdsGlobalVariable.FvDir)
GenFdsGlobalVariable.FfsDir = os.path.join(GenFdsGlobalVariable.FvDir, 'Ffs')
if not os.path.exists(GenFdsGlobalVariable.FfsDir):
os.makedirs(GenFdsGlobalVariable.FfsDir)
#
# Create FV Address inf file
#
GenFdsGlobalVariable.FvAddressFileName = os.path.join(GenFdsGlobalVariable.FfsDir, 'FvAddress.inf')
FvAddressFile = open(GenFdsGlobalVariable.FvAddressFileName, 'w')
#
# Add [Options]
#
FvAddressFile.writelines("[options]" + DataType.TAB_LINE_BREAK)
BsAddress = '0'
for Arch in ArchList:
if GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].BsBaseAddress:
BsAddress = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].BsBaseAddress
break
FvAddressFile.writelines("EFI_BOOT_DRIVER_BASE_ADDRESS = " + \
BsAddress + \
DataType.TAB_LINE_BREAK)
RtAddress = '0'
for Arch in reversed(ArchList):
temp = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].RtBaseAddress
if temp:
RtAddress = temp
break
FvAddressFile.writelines("EFI_RUNTIME_DRIVER_BASE_ADDRESS = " + \
RtAddress + \
DataType.TAB_LINE_BREAK)
FvAddressFile.close()
@staticmethod
def SetEnv(FdfParser, WorkSpace, ArchList, GlobalData):
GenFdsGlobalVariable.ModuleFile = WorkSpace.ModuleFile
GenFdsGlobalVariable.FdfParser = FdfParser
GenFdsGlobalVariable.WorkSpace = WorkSpace.Db
GenFdsGlobalVariable.ArchList = ArchList
GenFdsGlobalVariable.ToolChainTag = GlobalData.gGlobalDefines["TOOL_CHAIN_TAG"]
GenFdsGlobalVariable.TargetName = GlobalData.gGlobalDefines["TARGET"]
GenFdsGlobalVariable.ActivePlatform = GlobalData.gActivePlatform
GenFdsGlobalVariable.ConfDir = GlobalData.gConfDirectory
GenFdsGlobalVariable.EnableGenfdsMultiThread = GlobalData.gEnableGenfdsMultiThread
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = os.path.normpath(
os.path.join(GlobalData.gWorkspace,
WorkSpace.Db.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GlobalData.gGlobalDefines['TARGET'],
GlobalData.gGlobalDefines['TOOLCHAIN']].OutputDirectory,
GlobalData.gGlobalDefines['TARGET'] +'_' + GlobalData.gGlobalDefines['TOOLCHAIN']))
GenFdsGlobalVariable.OutputDirFromDscDict[Arch] = os.path.normpath(
WorkSpace.Db.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch,
GlobalData.gGlobalDefines['TARGET'], GlobalData.gGlobalDefines['TOOLCHAIN']].OutputDirectory)
GenFdsGlobalVariable.PlatformName = WorkSpace.Db.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch,
GlobalData.gGlobalDefines['TARGET'],
GlobalData.gGlobalDefines['TOOLCHAIN']].PlatformName
GenFdsGlobalVariable.FvDir = os.path.join(GenFdsGlobalVariable.OutputDirDict[ArchList[0]], DataType.TAB_FV_DIRECTORY)
if not os.path.exists(GenFdsGlobalVariable.FvDir):
os.makedirs(GenFdsGlobalVariable.FvDir)
GenFdsGlobalVariable.FfsDir = os.path.join(GenFdsGlobalVariable.FvDir, 'Ffs')
if not os.path.exists(GenFdsGlobalVariable.FfsDir):
os.makedirs(GenFdsGlobalVariable.FfsDir)
#
# Create FV Address inf file
#
GenFdsGlobalVariable.FvAddressFileName = os.path.join(GenFdsGlobalVariable.FfsDir, 'FvAddress.inf')
FvAddressFile = open(GenFdsGlobalVariable.FvAddressFileName, 'w')
#
# Add [Options]
#
FvAddressFile.writelines("[options]" + DataType.TAB_LINE_BREAK)
BsAddress = '0'
for Arch in ArchList:
BsAddress = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch,
GlobalData.gGlobalDefines['TARGET'],
GlobalData.gGlobalDefines["TOOL_CHAIN_TAG"]].BsBaseAddress
if BsAddress:
break
FvAddressFile.writelines("EFI_BOOT_DRIVER_BASE_ADDRESS = " + \
BsAddress + \
DataType.TAB_LINE_BREAK)
RtAddress = '0'
for Arch in reversed(ArchList):
temp = GenFdsGlobalVariable.WorkSpace.BuildObject[
GenFdsGlobalVariable.ActivePlatform, Arch, GlobalData.gGlobalDefines['TARGET'],
GlobalData.gGlobalDefines["TOOL_CHAIN_TAG"]].RtBaseAddress
if temp:
RtAddress = temp
break
FvAddressFile.writelines("EFI_RUNTIME_DRIVER_BASE_ADDRESS = " + \
RtAddress + \
DataType.TAB_LINE_BREAK)
FvAddressFile.close()
## ReplaceWorkspaceMacro()
#
# @param String String that may contain macro
#
@staticmethod
def ReplaceWorkspaceMacro(String):
String = mws.handleWsMacro(String)
Str = String.replace('$(WORKSPACE)', GenFdsGlobalVariable.WorkSpaceDir)
if os.path.exists(Str):
if not os.path.isabs(Str):
Str = os.path.abspath(Str)
else:
Str = mws.join(GenFdsGlobalVariable.WorkSpaceDir, String)
return os.path.normpath(Str)
## Check if the input files are newer than output files
#
# @param Output Path of output file
# @param Input Path list of input files
#
# @retval True if Output doesn't exist, or any Input is newer
# @retval False if all Input is older than Output
#
@staticmethod
def NeedsUpdate(Output, Input):
if not os.path.exists(Output):
return True
# always update "Output" if no "Input" given
if not Input:
return True
# if fdf file is changed after the 'Output" is generated, update the 'Output'
OutputTime = os.path.getmtime(Output)
if GenFdsGlobalVariable.FdfFileTimeStamp > OutputTime:
return True
for F in Input:
# always update "Output" if any "Input" doesn't exist
if not os.path.exists(F):
return True
# always update "Output" if any "Input" is newer than "Output"
if os.path.getmtime(F) > OutputTime:
return True
return False
@staticmethod
def GenerateSection(Output, Input, Type=None, CompressionType=None, Guid=None,
GuidHdrLen=None, GuidAttr=[], Ui=None, Ver=None, InputAlign=[], BuildNumber=None, DummyFile=None, IsMakefile=False):
Cmd = ["GenSec"]
if Type:
Cmd += ("-s", Type)
if CompressionType:
Cmd += ("-c", CompressionType)
if Guid:
Cmd += ("-g", Guid)
if DummyFile:
Cmd += ("--dummy", DummyFile)
if GuidHdrLen:
Cmd += ("-l", GuidHdrLen)
#Add each guided attribute
for Attr in GuidAttr:
Cmd += ("-r", Attr)
#Section Align is only for dummy section without section type
for SecAlign in InputAlign:
Cmd += ("--sectionalign", SecAlign)
CommandFile = Output + '.txt'
if Ui:
if IsMakefile:
if Ui == "$(MODULE_NAME)":
Cmd += ('-n', Ui)
else:
Cmd += ("-n", '"' + Ui + '"')
Cmd += ("-o", Output)
if ' '.join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(' '.join(Cmd).strip())
else:
SectionData = array('B', [0, 0, 0, 0])
SectionData.fromlist(array('B',Ui.encode('utf-16-le')).tolist())
SectionData.append(0)
SectionData.append(0)
Len = len(SectionData)
GenFdsGlobalVariable.SectionHeader.pack_into(SectionData, 0, Len & 0xff, (Len >> 8) & 0xff, (Len >> 16) & 0xff, 0x15)
DirName = os.path.dirname(Output)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
with open(Output, "wb") as Fd:
SectionData.tofile(Fd)
Fd.flush()
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
elif Ver:
Cmd += ("-n", Ver)
if BuildNumber:
Cmd += ("-j", BuildNumber)
Cmd += ("-o", Output)
SaveFileOnChange(CommandFile, ' '.join(Cmd), False)
if IsMakefile:
if ' '.join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(' '.join(Cmd).strip())
else:
if not GenFdsGlobalVariable.NeedsUpdate(Output, list(Input) + [CommandFile]):
return
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate section")
else:
Cmd += ("-o", Output)
Cmd += Input
SaveFileOnChange(CommandFile, ' '.join(Cmd), False)
if IsMakefile:
if sys.platform == "win32":
Cmd = ['if', 'exist', Input[0]] + Cmd
else:
Cmd = ['-test', '-e', Input[0], "&&"] + Cmd
if ' '.join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(' '.join(Cmd).strip())
elif GenFdsGlobalVariable.NeedsUpdate(Output, list(Input) + [CommandFile]):
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate section")
if (os.path.getsize(Output) >= GenFdsGlobalVariable.LARGE_FILE_SIZE and
GenFdsGlobalVariable.LargeFileInFvFlags):
GenFdsGlobalVariable.LargeFileInFvFlags[-1] = True
@staticmethod
def GetAlignment (AlignString):
if not AlignString:
return 0
if AlignString.endswith('K'):
return int (AlignString.rstrip('K')) * 1024
if AlignString.endswith('M'):
return int (AlignString.rstrip('M')) * 1024 * 1024
if AlignString.endswith('G'):
return int (AlignString.rstrip('G')) * 1024 * 1024 * 1024
return int (AlignString)
@staticmethod
def GenerateFfs(Output, Input, Type, Guid, Fixed=False, CheckSum=False, Align=None,
SectionAlign=None, MakefilePath=None):
Cmd = ["GenFfs", "-t", Type, "-g", Guid]
mFfsValidAlign = ["0", "8", "16", "128", "512", "1K", "4K", "32K", "64K", "128K", "256K", "512K", "1M", "2M", "4M", "8M", "16M"]
if Fixed == True:
Cmd.append("-x")
if CheckSum:
Cmd.append("-s")
if Align:
if Align not in mFfsValidAlign:
Align = GenFdsGlobalVariable.GetAlignment (Align)
for index in range(0, len(mFfsValidAlign) - 1):
if ((Align > GenFdsGlobalVariable.GetAlignment(mFfsValidAlign[index])) and (Align <= GenFdsGlobalVariable.GetAlignment(mFfsValidAlign[index + 1]))):
break
Align = mFfsValidAlign[index + 1]
Cmd += ("-a", Align)
Cmd += ("-o", Output)
for I in range(0, len(Input)):
if MakefilePath:
Cmd += ("-oi", Input[I])
else:
Cmd += ("-i", Input[I])
if SectionAlign and SectionAlign[I]:
Cmd += ("-n", SectionAlign[I])
CommandFile = Output + '.txt'
SaveFileOnChange(CommandFile, ' '.join(Cmd), False)
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
if MakefilePath:
if (tuple(Cmd), tuple(GenFdsGlobalVariable.SecCmdList), tuple(GenFdsGlobalVariable.CopyList)) not in GenFdsGlobalVariable.FfsCmdDict:
GenFdsGlobalVariable.FfsCmdDict[tuple(Cmd), tuple(GenFdsGlobalVariable.SecCmdList), tuple(GenFdsGlobalVariable.CopyList)] = MakefilePath
GenFdsGlobalVariable.SecCmdList = []
GenFdsGlobalVariable.CopyList = []
else:
if not GenFdsGlobalVariable.NeedsUpdate(Output, list(Input) + [CommandFile]):
return
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate FFS")
@staticmethod
def GenerateFirmwareVolume(Output, Input, BaseAddress=None, ForceRebase=None, Capsule=False, Dump=False,
AddressFile=None, MapFile=None, FfsList=[], FileSystemGuid=None):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input+FfsList):
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
Cmd = ["GenFv"]
if BaseAddress:
Cmd += ("-r", BaseAddress)
if ForceRebase == False:
Cmd += ("-F", "FALSE")
elif ForceRebase == True:
Cmd += ("-F", "TRUE")
if Capsule:
Cmd.append("-c")
if Dump:
Cmd.append("-p")
if AddressFile:
Cmd += ("-a", AddressFile)
if MapFile:
Cmd += ("-m", MapFile)
if FileSystemGuid:
Cmd += ("-g", FileSystemGuid)
Cmd += ("-o", Output)
for I in Input:
Cmd += ("-i", I)
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate FV")
@staticmethod
def GenerateFirmwareImage(Output, Input, Type="efi", SubType=None, Zero=False,
Strip=False, Replace=False, TimeStamp=None, Join=False,
Align=None, Padding=None, Convert=False, IsMakefile=False):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input) and not IsMakefile:
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
Cmd = ["GenFw"]
if Type.lower() == "te":
Cmd.append("-t")
if SubType:
Cmd += ("-e", SubType)
if TimeStamp:
Cmd += ("-s", TimeStamp)
if Align:
Cmd += ("-a", Align)
if Padding:
Cmd += ("-p", Padding)
if Zero:
Cmd.append("-z")
if Strip:
Cmd.append("-l")
if Replace:
Cmd.append("-r")
if Join:
Cmd.append("-j")
if Convert:
Cmd.append("-m")
Cmd += ("-o", Output)
Cmd += Input
if IsMakefile:
if " ".join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(" ".join(Cmd).strip())
else:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate firmware image")
@staticmethod
def GenerateOptionRom(Output, EfiInput, BinaryInput, Compress=False, ClassCode=None,
Revision=None, DeviceId=None, VendorId=None, IsMakefile=False):
InputList = []
Cmd = ["EfiRom"]
if EfiInput:
if Compress:
Cmd.append("-ec")
else:
Cmd.append("-e")
for EfiFile in EfiInput:
Cmd.append(EfiFile)
InputList.append (EfiFile)
if BinaryInput:
Cmd.append("-b")
for BinFile in BinaryInput:
Cmd.append(BinFile)
InputList.append (BinFile)
# Check List
if not GenFdsGlobalVariable.NeedsUpdate(Output, InputList) and not IsMakefile:
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, InputList))
if ClassCode:
Cmd += ("-l", ClassCode)
if Revision:
Cmd += ("-r", Revision)
if DeviceId:
Cmd += ("-i", DeviceId)
if VendorId:
Cmd += ("-f", VendorId)
Cmd += ("-o", Output)
if IsMakefile:
if " ".join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(" ".join(Cmd).strip())
else:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate option rom")
@staticmethod
def GuidTool(Output, Input, ToolPath, Options='', returnValue=[], IsMakefile=False):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input) and not IsMakefile:
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
Cmd = [ToolPath, ]
Cmd += Options.split(' ')
Cmd += ("-o", Output)
Cmd += Input
if IsMakefile:
if " ".join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(" ".join(Cmd).strip())
else:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to call " + ToolPath, returnValue)
@staticmethod
def CallExternalTool (cmd, errorMess, returnValue=[]):
if type(cmd) not in (tuple, list):
GenFdsGlobalVariable.ErrorLogger("ToolError! Invalid parameter type in call to CallExternalTool")
if GenFdsGlobalVariable.DebugLevel != -1:
cmd += ('--debug', str(GenFdsGlobalVariable.DebugLevel))
GenFdsGlobalVariable.InfLogger (cmd)
if GenFdsGlobalVariable.VerboseMode:
cmd += ('-v',)
GenFdsGlobalVariable.InfLogger (cmd)
else:
stdout.write ('#')
stdout.flush()
GenFdsGlobalVariable.SharpCounter = GenFdsGlobalVariable.SharpCounter + 1
if GenFdsGlobalVariable.SharpCounter % GenFdsGlobalVariable.SharpNumberPerLine == 0:
stdout.write('\n')
try:
PopenObject = Popen(' '.join(cmd), stdout=PIPE, stderr=PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFds", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
(out, error) = PopenObject.communicate()
while PopenObject.returncode is None:
PopenObject.wait()
if returnValue != [] and returnValue[0] != 0:
#get command return value
returnValue[0] = PopenObject.returncode
return
if PopenObject.returncode != 0 or GenFdsGlobalVariable.VerboseMode or GenFdsGlobalVariable.DebugLevel != -1:
GenFdsGlobalVariable.InfLogger ("Return Value = %d" % PopenObject.returncode)
GenFdsGlobalVariable.InfLogger(out.decode(encoding='utf-8', errors='ignore'))
GenFdsGlobalVariable.InfLogger(error.decode(encoding='utf-8', errors='ignore'))
if PopenObject.returncode != 0:
print("###", cmd)
EdkLogger.error("GenFds", COMMAND_FAILURE, errorMess)
@staticmethod
def VerboseLogger (msg):
EdkLogger.verbose(msg)
@staticmethod
def InfLogger (msg):
EdkLogger.info(msg)
@staticmethod
def ErrorLogger (msg, File=None, Line=None, ExtraData=None):
EdkLogger.error('GenFds', GENFDS_ERROR, msg, File, Line, ExtraData)
@staticmethod
def DebugLogger (Level, msg):
EdkLogger.debug(Level, msg)
## MacroExtend()
#
# @param Str String that may contain macro
# @param MacroDict Dictionary that contains macro value pair
#
@staticmethod
def MacroExtend (Str, MacroDict=None, Arch=DataType.TAB_COMMON):
if Str is None:
return None
Dict = {'$(WORKSPACE)': GenFdsGlobalVariable.WorkSpaceDir,
# '$(OUTPUT_DIRECTORY)': GenFdsGlobalVariable.OutputDirFromDsc,
'$(TARGET)': GenFdsGlobalVariable.TargetName,
'$(TOOL_CHAIN_TAG)': GenFdsGlobalVariable.ToolChainTag,
'$(SPACE)': ' '
}
if Arch != DataType.TAB_COMMON and Arch in GenFdsGlobalVariable.ArchList:
OutputDir = GenFdsGlobalVariable.OutputDirFromDscDict[Arch]
else:
OutputDir = GenFdsGlobalVariable.OutputDirFromDscDict[GenFdsGlobalVariable.ArchList[0]]
Dict['$(OUTPUT_DIRECTORY)'] = OutputDir
if MacroDict:
Dict.update(MacroDict)
for key in Dict:
if Str.find(key) >= 0:
Str = Str.replace (key, Dict[key])
if Str.find('$(ARCH)') >= 0:
if len(GenFdsGlobalVariable.ArchList) == 1:
Str = Str.replace('$(ARCH)', GenFdsGlobalVariable.ArchList[0])
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "No way to determine $(ARCH) for %s" % Str)
return Str
## GetPcdValue()
#
# @param PcdPattern pattern that labels a PCD.
#
@staticmethod
def GetPcdValue (PcdPattern):
if PcdPattern is None:
return None
if PcdPattern.startswith('PCD('):
PcdPair = PcdPattern[4:].rstrip(')').strip().split('.')
else:
PcdPair = PcdPattern.strip().split('.')
TokenSpace = PcdPair[0]
TokenCName = PcdPair[1]
for Arch in GenFdsGlobalVariable.ArchList:
Platform = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
PcdDict = Platform.Pcds
for Key in PcdDict:
PcdObj = PcdDict[Key]
if (PcdObj.TokenCName == TokenCName) and (PcdObj.TokenSpaceGuidCName == TokenSpace):
if PcdObj.Type != DataType.TAB_PCDS_FIXED_AT_BUILD:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not FixedAtBuild type." % PcdPattern)
if PcdObj.DatumType != DataType.TAB_VOID:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not VOID* datum type." % PcdPattern)
return PcdObj.DefaultValue
for Package in GenFdsGlobalVariable.WorkSpace.GetPackageList(GenFdsGlobalVariable.ActivePlatform,
Arch,
GenFdsGlobalVariable.TargetName,
GenFdsGlobalVariable.ToolChainTag):
PcdDict = Package.Pcds
for Key in PcdDict:
PcdObj = PcdDict[Key]
if (PcdObj.TokenCName == TokenCName) and (PcdObj.TokenSpaceGuidCName == TokenSpace):
if PcdObj.Type != DataType.TAB_PCDS_FIXED_AT_BUILD:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not FixedAtBuild type." % PcdPattern)
if PcdObj.DatumType != DataType.TAB_VOID:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not VOID* datum type." % PcdPattern)
return PcdObj.DefaultValue
return ''
## FindExtendTool()
#
# Find location of tools to process data
#
# @param KeyStringList Filter for inputs of section generation
# @param CurrentArchList Arch list
# @param NameGuid The Guid name
#
def FindExtendTool(KeyStringList, CurrentArchList, NameGuid):
if GenFdsGlobalVariable.GuidToolDefinition:
if NameGuid in GenFdsGlobalVariable.GuidToolDefinition:
return GenFdsGlobalVariable.GuidToolDefinition[NameGuid]
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"), "Conf")))
ToolDef = ToolDefObj.ToolDef
ToolDb = ToolDef.ToolsDefTxtDatabase
# if user not specify filter, try to deduce it from global data.
if KeyStringList is None or KeyStringList == []:
Target = GenFdsGlobalVariable.TargetName
ToolChain = GenFdsGlobalVariable.ToolChainTag
if ToolChain not in ToolDb['TOOL_CHAIN_TAG']:
EdkLogger.error("GenFds", GENFDS_ERROR, "Can not find external tool because tool tag %s is not defined in tools_def.txt!" % ToolChain)
KeyStringList = [Target + '_' + ToolChain + '_' + CurrentArchList[0]]
for Arch in CurrentArchList:
if Target + '_' + ToolChain + '_' + Arch not in KeyStringList:
KeyStringList.append(Target + '_' + ToolChain + '_' + Arch)
ToolPathTmp = None
ToolOption = None
for Arch in CurrentArchList:
MatchItem = None
MatchPathItem = None
MatchOptionsItem = None
for KeyString in KeyStringList:
KeyStringBuildTarget, KeyStringToolChain, KeyStringArch = KeyString.split('_')
if KeyStringArch != Arch:
continue
for Item in ToolDef.ToolsDefTxtDictionary:
if len(Item.split('_')) < 5:
continue
ItemTarget, ItemToolChain, ItemArch, ItemTool, ItemAttr = Item.split('_')
if ItemTarget == DataType.TAB_STAR:
ItemTarget = KeyStringBuildTarget
if ItemToolChain == DataType.TAB_STAR:
ItemToolChain = KeyStringToolChain
if ItemArch == DataType.TAB_STAR:
ItemArch = KeyStringArch
if ItemTarget != KeyStringBuildTarget:
continue
if ItemToolChain != KeyStringToolChain:
continue
if ItemArch != KeyStringArch:
continue
if ItemAttr != DataType.TAB_GUID:
# Not GUID attribute
continue
if ToolDef.ToolsDefTxtDictionary[Item].lower() != NameGuid.lower():
# No GUID value match
continue
if MatchItem:
if MatchItem.split('_')[3] == ItemTool:
# Tool name is the same
continue
if CalculatePriorityValue(MatchItem) > CalculatePriorityValue(Item):
# Current MatchItem is higher priority than new match item
continue
MatchItem = Item
if not MatchItem:
continue
ToolName = MatchItem.split('_')[3]
for Item in ToolDef.ToolsDefTxtDictionary:
if len(Item.split('_')) < 5:
continue
ItemTarget, ItemToolChain, ItemArch, ItemTool, ItemAttr = Item.split('_')
if ItemTarget == DataType.TAB_STAR:
ItemTarget = KeyStringBuildTarget
if ItemToolChain == DataType.TAB_STAR:
ItemToolChain = KeyStringToolChain
if ItemArch == DataType.TAB_STAR:
ItemArch = KeyStringArch
if ItemTarget != KeyStringBuildTarget:
continue
if ItemToolChain != KeyStringToolChain:
continue
if ItemArch != KeyStringArch:
continue
if ItemTool != ToolName:
continue
if ItemAttr == 'PATH':
if MatchPathItem:
if CalculatePriorityValue(MatchPathItem) <= CalculatePriorityValue(Item):
MatchPathItem = Item
else:
MatchPathItem = Item
if ItemAttr == 'FLAGS':
if MatchOptionsItem:
if CalculatePriorityValue(MatchOptionsItem) <= CalculatePriorityValue(Item):
MatchOptionsItem = Item
else:
MatchOptionsItem = Item
if MatchPathItem:
ToolPathTmp = ToolDef.ToolsDefTxtDictionary[MatchPathItem]
if MatchOptionsItem:
ToolOption = ToolDef.ToolsDefTxtDictionary[MatchOptionsItem]
for Arch in CurrentArchList:
MatchItem = None
MatchPathItem = None
MatchOptionsItem = None
for KeyString in KeyStringList:
KeyStringBuildTarget, KeyStringToolChain, KeyStringArch = KeyString.split('_')
if KeyStringArch != Arch:
continue
Platform = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, KeyStringBuildTarget, KeyStringToolChain]
for Item in Platform.BuildOptions:
if len(Item[1].split('_')) < 5:
continue
ItemTarget, ItemToolChain, ItemArch, ItemTool, ItemAttr = Item[1].split('_')
if ItemTarget == DataType.TAB_STAR:
ItemTarget = KeyStringBuildTarget
if ItemToolChain == DataType.TAB_STAR:
ItemToolChain = KeyStringToolChain
if ItemArch == DataType.TAB_STAR:
ItemArch = KeyStringArch
if ItemTarget != KeyStringBuildTarget:
continue
if ItemToolChain != KeyStringToolChain:
continue
if ItemArch != KeyStringArch:
continue
if ItemAttr != DataType.TAB_GUID:
# Not GUID attribute match
continue
if Platform.BuildOptions[Item].lower() != NameGuid.lower():
# No GUID value match
continue
if MatchItem:
if MatchItem[1].split('_')[3] == ItemTool:
# Tool name is the same
continue
if CalculatePriorityValue(MatchItem[1]) > CalculatePriorityValue(Item[1]):
# Current MatchItem is higher priority than new match item
continue
MatchItem = Item
if not MatchItem:
continue
ToolName = MatchItem[1].split('_')[3]
for Item in Platform.BuildOptions:
if len(Item[1].split('_')) < 5:
continue
ItemTarget, ItemToolChain, ItemArch, ItemTool, ItemAttr = Item[1].split('_')
if ItemTarget == DataType.TAB_STAR:
ItemTarget = KeyStringBuildTarget
if ItemToolChain == DataType.TAB_STAR:
ItemToolChain = KeyStringToolChain
if ItemArch == DataType.TAB_STAR:
ItemArch = KeyStringArch
if ItemTarget != KeyStringBuildTarget:
continue
if ItemToolChain != KeyStringToolChain:
continue
if ItemArch != KeyStringArch:
continue
if ItemTool != ToolName:
continue
if ItemAttr == 'PATH':
if MatchPathItem:
if CalculatePriorityValue(MatchPathItem[1]) <= CalculatePriorityValue(Item[1]):
MatchPathItem = Item
else:
MatchPathItem = Item
if ItemAttr == 'FLAGS':
if MatchOptionsItem:
if CalculatePriorityValue(MatchOptionsItem[1]) <= CalculatePriorityValue(Item[1]):
MatchOptionsItem = Item
else:
MatchOptionsItem = Item
if MatchPathItem:
ToolPathTmp = Platform.BuildOptions[MatchPathItem]
if MatchOptionsItem:
ToolOption = Platform.BuildOptions[MatchOptionsItem]
GenFdsGlobalVariable.GuidToolDefinition[NameGuid] = (ToolPathTmp, ToolOption)
return ToolPathTmp, ToolOption
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py
|
## @file
# process FV generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import subprocess
from io import BytesIO
from struct import *
from . import FfsFileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from Common.Misc import SaveFileOnChange, PackGUID
from Common.LongFilePathSupport import CopyLongFilePath
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
FV_UI_EXT_ENTY_GUID = 'A67DF1FA-8DE8-4E98-AF09-4BDF2EFFBC7C'
## generate FV
#
#
class FV (object):
## The constructor
#
# @param self The object pointer
#
def __init__(self, Name=None):
self.UiFvName = Name
self.CreateFileName = None
self.BlockSizeList = []
self.DefineVarDict = {}
self.SetVarDict = {}
self.FvAlignment = None
self.FvAttributeDict = {}
self.FvNameGuid = None
self.FvNameString = None
self.AprioriSectionList = []
self.FfsList = []
self.BsBaseAddress = None
self.RtBaseAddress = None
self.FvInfFile = None
self.FvAddressFile = None
self.BaseAddress = None
self.InfFileName = None
self.FvAddressFileName = None
self.CapsuleName = None
self.FvBaseAddress = None
self.FvForceRebase = None
self.FvRegionInFD = None
self.UsedSizeEnable = False
self.FvExtEntryTypeValue = []
self.FvExtEntryType = []
self.FvExtEntryData = []
## AddToBuffer()
#
# Generate Fv and add it to the Buffer
#
# @param self The object pointer
# @param Buffer The buffer generated FV data will be put
# @param BaseAddress base address of FV
# @param BlockSize block size of FV
# @param BlockNum How many blocks in FV
# @param ErasePolarity Flash erase polarity
# @param MacroDict macro value pair
# @retval string Generated FV file path
#
def AddToBuffer (self, Buffer, BaseAddress=None, BlockSize= None, BlockNum=None, ErasePloarity='1', MacroDict = None, Flag=False):
if BaseAddress is None and self.UiFvName.upper() + 'fv' in GenFdsGlobalVariable.ImageBinDict:
return GenFdsGlobalVariable.ImageBinDict[self.UiFvName.upper() + 'fv']
if MacroDict is None:
MacroDict = {}
#
# Check whether FV in Capsule is in FD flash region.
# If yes, return error. Doesn't support FV in Capsule image is also in FD flash region.
#
if self.CapsuleName is not None:
for FdObj in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType == BINARY_FILE_TYPE_FV:
for RegionData in RegionObj.RegionDataList:
if RegionData.endswith(".fv"):
continue
elif RegionData.upper() + 'fv' in GenFdsGlobalVariable.ImageBinDict:
continue
elif self.UiFvName.upper() == RegionData.upper():
GenFdsGlobalVariable.ErrorLogger("Capsule %s in FD region can't contain a FV %s in FD region." % (self.CapsuleName, self.UiFvName.upper()))
if not Flag:
GenFdsGlobalVariable.InfLogger( "\nGenerating %s FV" %self.UiFvName)
GenFdsGlobalVariable.LargeFileInFvFlags.append(False)
FFSGuid = None
if self.FvBaseAddress is not None:
BaseAddress = self.FvBaseAddress
if not Flag:
self._InitializeInf(BaseAddress, BlockSize, BlockNum, ErasePloarity)
#
# First Process the Apriori section
#
MacroDict.update(self.DefineVarDict)
GenFdsGlobalVariable.VerboseLogger('First generate Apriori file !')
FfsFileList = []
for AprSection in self.AprioriSectionList:
FileName = AprSection.GenFfs (self.UiFvName, MacroDict, IsMakefile=Flag)
FfsFileList.append(FileName)
# Add Apriori file name to Inf file
if not Flag:
self.FvInfFile.append("EFI_FILE_NAME = " + \
FileName + \
TAB_LINE_BREAK)
# Process Modules in FfsList
for FfsFile in self.FfsList:
if Flag:
if isinstance(FfsFile, FfsFileStatement.FileStatement):
continue
if GenFdsGlobalVariable.EnableGenfdsMultiThread and GenFdsGlobalVariable.ModuleFile and GenFdsGlobalVariable.ModuleFile.Path.find(os.path.normpath(FfsFile.InfFileName)) == -1:
continue
FileName = FfsFile.GenFfs(MacroDict, FvParentAddr=BaseAddress, IsMakefile=Flag, FvName=self.UiFvName)
FfsFileList.append(FileName)
if not Flag:
self.FvInfFile.append("EFI_FILE_NAME = " + \
FileName + \
TAB_LINE_BREAK)
if not Flag:
FvInfFile = ''.join(self.FvInfFile)
SaveFileOnChange(self.InfFileName, FvInfFile, False)
#
# Call GenFv tool
#
FvOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiFvName)
FvOutputFile = FvOutputFile + '.Fv'
# BUGBUG: FvOutputFile could be specified from FDF file (FV section, CreateFile statement)
if self.CreateFileName is not None:
FvOutputFile = self.CreateFileName
if Flag:
GenFdsGlobalVariable.ImageBinDict[self.UiFvName.upper() + 'fv'] = FvOutputFile
return FvOutputFile
FvInfoFileName = os.path.join(GenFdsGlobalVariable.FfsDir, self.UiFvName + '.inf')
if not Flag:
CopyLongFilePath(GenFdsGlobalVariable.FvAddressFileName, FvInfoFileName)
OrigFvInfo = None
if os.path.exists (FvInfoFileName):
OrigFvInfo = open(FvInfoFileName, 'r').read()
if GenFdsGlobalVariable.LargeFileInFvFlags[-1]:
FFSGuid = GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID
GenFdsGlobalVariable.GenerateFirmwareVolume(
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
FfsList=FfsFileList,
ForceRebase=self.FvForceRebase,
FileSystemGuid=FFSGuid
)
NewFvInfo = None
if os.path.exists (FvInfoFileName):
NewFvInfo = open(FvInfoFileName, 'r').read()
if NewFvInfo is not None and NewFvInfo != OrigFvInfo:
FvChildAddr = []
AddFileObj = open(FvInfoFileName, 'r')
AddrStrings = AddFileObj.readlines()
AddrKeyFound = False
for AddrString in AddrStrings:
if AddrKeyFound:
#get base address for the inside FvImage
FvChildAddr.append (AddrString)
elif AddrString.find ("[FV_BASE_ADDRESS]") != -1:
AddrKeyFound = True
AddFileObj.close()
if FvChildAddr != []:
# Update Ffs again
for FfsFile in self.FfsList:
FileName = FfsFile.GenFfs(MacroDict, FvChildAddr, BaseAddress, IsMakefile=Flag, FvName=self.UiFvName)
if GenFdsGlobalVariable.LargeFileInFvFlags[-1]:
FFSGuid = GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID;
#Update GenFv again
GenFdsGlobalVariable.GenerateFirmwareVolume(
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
FfsList=FfsFileList,
ForceRebase=self.FvForceRebase,
FileSystemGuid=FFSGuid
)
#
# Write the Fv contents to Buffer
#
if os.path.isfile(FvOutputFile) and os.path.getsize(FvOutputFile) >= 0x48:
FvFileObj = open(FvOutputFile, 'rb')
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
Signature = FvHeaderBuffer[0x28:0x32]
if Signature and Signature.startswith(b'_FVH'):
GenFdsGlobalVariable.VerboseLogger("\nGenerate %s FV Successfully" % self.UiFvName)
GenFdsGlobalVariable.SharpCounter = 0
FvFileObj.seek(0)
Buffer.write(FvFileObj.read())
# FV alignment position.
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E:0x2F]) & 0x1F)
if FvAlignmentValue >= 0x400:
if FvAlignmentValue >= 0x100000:
if FvAlignmentValue >= 0x1000000:
#The max alignment supported by FFS is 16M.
self.FvAlignment = "16M"
else:
self.FvAlignment = str(FvAlignmentValue // 0x100000) + "M"
else:
self.FvAlignment = str(FvAlignmentValue // 0x400) + "K"
else:
# FvAlignmentValue is less than 1K
self.FvAlignment = str (FvAlignmentValue)
FvFileObj.close()
GenFdsGlobalVariable.ImageBinDict[self.UiFvName.upper() + 'fv'] = FvOutputFile
GenFdsGlobalVariable.LargeFileInFvFlags.pop()
else:
GenFdsGlobalVariable.ErrorLogger("Invalid FV file %s." % self.UiFvName)
else:
GenFdsGlobalVariable.ErrorLogger("Failed to generate %s FV file." %self.UiFvName)
return FvOutputFile
## _GetBlockSize()
#
# Calculate FV's block size
# Inherit block size from FD if no block size specified in FV
#
def _GetBlockSize(self):
if self.BlockSizeList:
return True
for FdObj in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType != BINARY_FILE_TYPE_FV:
continue
for RegionData in RegionObj.RegionDataList:
#
# Found the FD and region that contain this FV
#
if self.UiFvName.upper() == RegionData.upper():
RegionObj.BlockInfoOfRegion(FdObj.BlockSizeList, self)
if self.BlockSizeList:
return True
return False
## _InitializeInf()
#
# Initialize the inf file to create FV
#
# @param self The object pointer
# @param BaseAddress base address of FV
# @param BlockSize block size of FV
# @param BlockNum How many blocks in FV
# @param ErasePolarity Flash erase polarity
#
def _InitializeInf (self, BaseAddress = None, BlockSize= None, BlockNum = None, ErasePloarity='1'):
#
# Create FV inf file
#
self.InfFileName = os.path.join(GenFdsGlobalVariable.FvDir,
self.UiFvName + '.inf')
self.FvInfFile = []
#
# Add [Options]
#
self.FvInfFile.append("[options]" + TAB_LINE_BREAK)
if BaseAddress is not None:
self.FvInfFile.append("EFI_BASE_ADDRESS = " + \
BaseAddress + \
TAB_LINE_BREAK)
if BlockSize is not None:
self.FvInfFile.append("EFI_BLOCK_SIZE = " + \
'0x%X' %BlockSize + \
TAB_LINE_BREAK)
if BlockNum is not None:
self.FvInfFile.append("EFI_NUM_BLOCKS = " + \
' 0x%X' %BlockNum + \
TAB_LINE_BREAK)
else:
if self.BlockSizeList == []:
if not self._GetBlockSize():
#set default block size is 1
self.FvInfFile.append("EFI_BLOCK_SIZE = 0x1" + TAB_LINE_BREAK)
for BlockSize in self.BlockSizeList:
if BlockSize[0] is not None:
self.FvInfFile.append("EFI_BLOCK_SIZE = " + \
'0x%X' %BlockSize[0] + \
TAB_LINE_BREAK)
if BlockSize[1] is not None:
self.FvInfFile.append("EFI_NUM_BLOCKS = " + \
' 0x%X' %BlockSize[1] + \
TAB_LINE_BREAK)
if self.BsBaseAddress is not None:
self.FvInfFile.append('EFI_BOOT_DRIVER_BASE_ADDRESS = ' + \
'0x%X' %self.BsBaseAddress)
if self.RtBaseAddress is not None:
self.FvInfFile.append('EFI_RUNTIME_DRIVER_BASE_ADDRESS = ' + \
'0x%X' %self.RtBaseAddress)
#
# Add attribute
#
self.FvInfFile.append("[attributes]" + TAB_LINE_BREAK)
self.FvInfFile.append("EFI_ERASE_POLARITY = " + \
' %s' %ErasePloarity + \
TAB_LINE_BREAK)
if not (self.FvAttributeDict is None):
for FvAttribute in self.FvAttributeDict.keys():
if FvAttribute == "FvUsedSizeEnable":
if self.FvAttributeDict[FvAttribute].upper() in ('TRUE', '1'):
self.UsedSizeEnable = True
continue
self.FvInfFile.append("EFI_" + \
FvAttribute + \
' = ' + \
self.FvAttributeDict[FvAttribute] + \
TAB_LINE_BREAK )
if self.FvAlignment is not None:
self.FvInfFile.append("EFI_FVB2_ALIGNMENT_" + \
self.FvAlignment.strip() + \
" = TRUE" + \
TAB_LINE_BREAK)
#
# Generate FV extension header file
#
if not self.FvNameGuid:
if len(self.FvExtEntryType) > 0 or self.UsedSizeEnable:
GenFdsGlobalVariable.ErrorLogger("FV Extension Header Entries declared for %s with no FvNameGuid declaration." % (self.UiFvName))
else:
TotalSize = 16 + 4
Buffer = bytearray()
if self.UsedSizeEnable:
TotalSize += (4 + 4)
## define EFI_FV_EXT_TYPE_USED_SIZE_TYPE 0x03
#typedef struct
# {
# EFI_FIRMWARE_VOLUME_EXT_ENTRY Hdr;
# UINT32 UsedSize;
# } EFI_FIRMWARE_VOLUME_EXT_ENTRY_USED_SIZE_TYPE;
Buffer += pack('HHL', 8, 3, 0)
if self.FvNameString == 'TRUE':
#
# Create EXT entry for FV UI name
# This GUID is used: A67DF1FA-8DE8-4E98-AF09-4BDF2EFFBC7C
#
FvUiLen = len(self.UiFvName)
TotalSize += (FvUiLen + 16 + 4)
Guid = FV_UI_EXT_ENTY_GUID.split('-')
#
# Layout:
# EFI_FIRMWARE_VOLUME_EXT_ENTRY: size 4
# GUID: size 16
# FV UI name
#
Buffer += (pack('HH', (FvUiLen + 16 + 4), 0x0002)
+ PackGUID(Guid)
+ self.UiFvName.encode('utf-8'))
for Index in range (0, len(self.FvExtEntryType)):
if self.FvExtEntryType[Index] == 'FILE':
# check if the path is absolute or relative
if os.path.isabs(self.FvExtEntryData[Index]):
FileFullPath = os.path.normpath(self.FvExtEntryData[Index])
else:
FileFullPath = os.path.normpath(os.path.join(GenFdsGlobalVariable.WorkSpaceDir, self.FvExtEntryData[Index]))
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
GenFdsGlobalVariable.ErrorLogger("Error opening FV Extension Header Entry file %s." % (self.FvExtEntryData[Index]))
FvExtFile = open (FileFullPath, 'rb')
FvExtFile.seek(0, 2)
Size = FvExtFile.tell()
if Size >= 0x10000:
GenFdsGlobalVariable.ErrorLogger("The size of FV Extension Header Entry file %s exceeds 0x10000." % (self.FvExtEntryData[Index]))
TotalSize += (Size + 4)
FvExtFile.seek(0)
Buffer += pack('HH', (Size + 4), int(self.FvExtEntryTypeValue[Index], 16))
Buffer += FvExtFile.read()
FvExtFile.close()
if self.FvExtEntryType[Index] == 'DATA':
ByteList = self.FvExtEntryData[Index].split(',')
Size = len (ByteList)
if Size >= 0x10000:
GenFdsGlobalVariable.ErrorLogger("The size of FV Extension Header Entry data %s exceeds 0x10000." % (self.FvExtEntryData[Index]))
TotalSize += (Size + 4)
Buffer += pack('HH', (Size + 4), int(self.FvExtEntryTypeValue[Index], 16))
for Index1 in range (0, Size):
Buffer += pack('B', int(ByteList[Index1], 16))
Guid = self.FvNameGuid.split('-')
Buffer = PackGUID(Guid) + pack('=L', TotalSize) + Buffer
#
# Generate FV extension header file if the total size is not zero
#
if TotalSize > 0:
FvExtHeaderFileName = os.path.join(GenFdsGlobalVariable.FvDir, self.UiFvName + '.ext')
FvExtHeaderFile = BytesIO()
FvExtHeaderFile.write(Buffer)
Changed = SaveFileOnChange(FvExtHeaderFileName, FvExtHeaderFile.getvalue(), True)
FvExtHeaderFile.close()
if Changed:
if os.path.exists (self.InfFileName):
os.remove (self.InfFileName)
self.FvInfFile.append("EFI_FV_EXT_HEADER_FILE_NAME = " + \
FvExtHeaderFileName + \
TAB_LINE_BREAK)
#
# Add [Files]
#
self.FvInfFile.append("[files]" + TAB_LINE_BREAK)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Fv.py
|
## @file
# process FFS generation from INF statement
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2014-2016 Hewlett-Packard Development Company, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Rule
import Common.LongFilePathOs as os
from io import BytesIO
from struct import *
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .Ffs import SectionSuffix,FdfFvFileTypeToFileType
import subprocess
import sys
from . import Section
from . import RuleSimpleFile
from . import RuleComplexFile
from CommonDataClass.FdfClass import FfsInfStatementClassObject
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.DataType import SUP_MODULE_USER_DEFINED
from Common.DataType import SUP_MODULE_HOST_APPLICATION
from Common.StringUtils import *
from Common.Misc import PathClass
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import ProcessDuplicatedInf
from Common.Misc import GetVariableOffset
from Common import EdkLogger
from Common.BuildToolError import *
from .GuidSection import GuidSection
from .FvImageSection import FvImageSection
from Common.Misc import PeImageClass
from AutoGen.GenDepex import DependencyExpression
from PatchPcdValue.PatchPcdValue import PatchBinaryFile
from Common.LongFilePathSupport import CopyLongFilePath
from Common.LongFilePathSupport import OpenLongFilePath as open
import Common.GlobalData as GlobalData
from .DepexSection import DepexSection
from Common.Misc import SaveFileOnChange
from Common.Expression import *
from Common.DataType import *
## generate FFS from INF
#
#
class FfsInfStatement(FfsInfStatementClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsInfStatementClassObject.__init__(self)
self.TargetOverrideList = []
self.ShadowFromInfFile = None
self.KeepRelocFromRule = None
self.InDsc = True
self.OptRomDefs = {}
self.PiSpecVersion = '0x00000000'
self.InfModule = None
self.FinalTargetSuffixMap = {}
self.CurrentLineNum = None
self.CurrentLineContent = None
self.FileName = None
self.InfFileName = None
self.OverrideGuid = None
self.PatchedBinFile = ''
self.MacroDict = {}
self.Depex = False
## GetFinalTargetSuffixMap() method
#
# Get final build target list
def GetFinalTargetSuffixMap(self):
if not self.InfModule or not self.CurrentArch:
return []
if not self.FinalTargetSuffixMap:
FinalBuildTargetList = GenFdsGlobalVariable.GetModuleCodaTargetList(self.InfModule, self.CurrentArch)
for File in FinalBuildTargetList:
self.FinalTargetSuffixMap.setdefault(os.path.splitext(File)[1], []).append(File)
# Check if current INF module has DEPEX
if '.depex' not in self.FinalTargetSuffixMap and self.InfModule.ModuleType != SUP_MODULE_USER_DEFINED and self.InfModule.ModuleType != SUP_MODULE_HOST_APPLICATION \
and not self.InfModule.DxsFile and not self.InfModule.LibraryClass:
ModuleType = self.InfModule.ModuleType
PlatformDataBase = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, self.CurrentArch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if ModuleType != SUP_MODULE_USER_DEFINED and ModuleType != SUP_MODULE_HOST_APPLICATION:
for LibraryClass in PlatformDataBase.LibraryClasses.GetKeys():
if LibraryClass.startswith("NULL") and PlatformDataBase.LibraryClasses[LibraryClass, ModuleType]:
self.InfModule.LibraryClasses[LibraryClass] = PlatformDataBase.LibraryClasses[LibraryClass, ModuleType]
StrModule = str(self.InfModule)
PlatformModule = None
if StrModule in PlatformDataBase.Modules:
PlatformModule = PlatformDataBase.Modules[StrModule]
for LibraryClass in PlatformModule.LibraryClasses:
if LibraryClass.startswith("NULL"):
self.InfModule.LibraryClasses[LibraryClass] = PlatformModule.LibraryClasses[LibraryClass]
DependencyList = [self.InfModule]
LibraryInstance = {}
DepexList = []
while len(DependencyList) > 0:
Module = DependencyList.pop(0)
if not Module:
continue
for Dep in Module.Depex[self.CurrentArch, ModuleType]:
if DepexList != []:
DepexList.append('AND')
DepexList.append('(')
DepexList.extend(Dep)
if DepexList[-1] == 'END': # no need of a END at this time
DepexList.pop()
DepexList.append(')')
if 'BEFORE' in DepexList or 'AFTER' in DepexList:
break
for LibName in Module.LibraryClasses:
if LibName in LibraryInstance:
continue
if PlatformModule and LibName in PlatformModule.LibraryClasses:
LibraryPath = PlatformModule.LibraryClasses[LibName]
else:
LibraryPath = PlatformDataBase.LibraryClasses[LibName, ModuleType]
if not LibraryPath:
LibraryPath = Module.LibraryClasses[LibName]
if not LibraryPath:
continue
LibraryModule = GenFdsGlobalVariable.WorkSpace.BuildObject[LibraryPath, self.CurrentArch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
LibraryInstance[LibName] = LibraryModule
DependencyList.append(LibraryModule)
if DepexList:
Dpx = DependencyExpression(DepexList, ModuleType, True)
if len(Dpx.PostfixNotation) != 0:
# It means this module has DEPEX
self.FinalTargetSuffixMap['.depex'] = [os.path.join(self.EfiOutputPath, self.BaseName) + '.depex']
return self.FinalTargetSuffixMap
## __InfParse() method
#
# Parse inf file to get module information
#
# @param self The object pointer
# @param Dict dictionary contains macro and value pair
#
def __InfParse__(self, Dict = None, IsGenFfs=False):
GenFdsGlobalVariable.VerboseLogger( " Begine parsing INf file : %s" %self.InfFileName)
self.InfFileName = self.InfFileName.replace('$(WORKSPACE)', '')
if len(self.InfFileName) > 1 and self.InfFileName[0] == '\\' and self.InfFileName[1] == '\\':
pass
elif self.InfFileName[0] == '\\' or self.InfFileName[0] == '/' :
self.InfFileName = self.InfFileName[1:]
if self.InfFileName.find('$') == -1:
InfPath = NormPath(self.InfFileName)
if not os.path.exists(InfPath):
InfPath = GenFdsGlobalVariable.ReplaceWorkspaceMacro(InfPath)
if not os.path.exists(InfPath):
EdkLogger.error("GenFds", GENFDS_ERROR, "Non-existant Module %s !" % (self.InfFileName))
self.CurrentArch = self.GetCurrentArch()
#
# Get the InfClass object
#
PathClassObj = PathClass(self.InfFileName, GenFdsGlobalVariable.WorkSpaceDir)
ErrorCode, ErrorInfo = PathClassObj.Validate(".inf")
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
#
# Cache lower case version of INF path before processing FILE_GUID override
#
InfLowerPath = str(PathClassObj).lower()
if self.OverrideGuid:
PathClassObj = ProcessDuplicatedInf(PathClassObj, self.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir)
if self.CurrentArch is not None:
Inf = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClassObj, self.CurrentArch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
#
# Set Ffs BaseName, ModuleGuid, ModuleType, Version, OutputPath
#
self.BaseName = Inf.BaseName
self.ModuleGuid = Inf.Guid
self.ModuleType = Inf.ModuleType
if Inf.Specification is not None and 'PI_SPECIFICATION_VERSION' in Inf.Specification:
self.PiSpecVersion = Inf.Specification['PI_SPECIFICATION_VERSION']
if Inf.AutoGenVersion < 0x00010005:
self.ModuleType = Inf.ComponentType
self.VersionString = Inf.Version
self.BinFileList = Inf.Binaries
self.SourceFileList = Inf.Sources
if self.KeepReloc is None and Inf.Shadow:
self.ShadowFromInfFile = Inf.Shadow
else:
Inf = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClassObj, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
self.BaseName = Inf.BaseName
self.ModuleGuid = Inf.Guid
self.ModuleType = Inf.ModuleType
if Inf.Specification is not None and 'PI_SPECIFICATION_VERSION' in Inf.Specification:
self.PiSpecVersion = Inf.Specification['PI_SPECIFICATION_VERSION']
self.VersionString = Inf.Version
self.BinFileList = Inf.Binaries
self.SourceFileList = Inf.Sources
if self.BinFileList == []:
EdkLogger.error("GenFds", GENFDS_ERROR,
"INF %s specified in FDF could not be found in build ARCH %s!" \
% (self.InfFileName, GenFdsGlobalVariable.ArchList))
if self.OverrideGuid:
self.ModuleGuid = self.OverrideGuid
if len(self.SourceFileList) != 0 and not self.InDsc:
EdkLogger.warn("GenFds", GENFDS_ERROR, "Module %s NOT found in DSC file; Is it really a binary module?" % (self.InfFileName))
if self.ModuleType == SUP_MODULE_SMM_CORE and int(self.PiSpecVersion, 16) < 0x0001000A:
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "SMM_CORE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x0001000A", File=self.InfFileName)
if self.ModuleType == SUP_MODULE_MM_CORE_STANDALONE and int(self.PiSpecVersion, 16) < 0x00010032:
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "MM_CORE_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.InfFileName)
if Inf._Defs is not None and len(Inf._Defs) > 0:
self.OptRomDefs.update(Inf._Defs)
self.PatchPcds = []
InfPcds = Inf.Pcds
Platform = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, self.CurrentArch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
FdfPcdDict = GenFdsGlobalVariable.FdfParser.Profile.PcdDict
PlatformPcds = Platform.Pcds
# Workaround here: both build and GenFds tool convert the workspace path to lower case
# But INF file path in FDF and DSC file may have real case characters.
# Try to convert the path to lower case to see if PCDs value are override by DSC.
DscModules = {}
for DscModule in Platform.Modules:
DscModules[str(DscModule).lower()] = Platform.Modules[DscModule]
for PcdKey in InfPcds:
Pcd = InfPcds[PcdKey]
if not hasattr(Pcd, 'Offset'):
continue
if Pcd.Type != TAB_PCDS_PATCHABLE_IN_MODULE:
continue
# Override Patchable PCD value by the value from DSC
PatchPcd = None
if InfLowerPath in DscModules and PcdKey in DscModules[InfLowerPath].Pcds:
PatchPcd = DscModules[InfLowerPath].Pcds[PcdKey]
elif PcdKey in Platform.Pcds:
PatchPcd = Platform.Pcds[PcdKey]
DscOverride = False
if PatchPcd and Pcd.Type == PatchPcd.Type:
DefaultValue = PatchPcd.DefaultValue
DscOverride = True
# Override Patchable PCD value by the value from FDF
FdfOverride = False
if PcdKey in FdfPcdDict:
DefaultValue = FdfPcdDict[PcdKey]
FdfOverride = True
# Override Patchable PCD value by the value from Build Option
BuildOptionOverride = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if PcdKey == (pcd[1], pcd[0]):
if pcd[2]:
continue
DefaultValue = pcd[3]
BuildOptionOverride = True
break
if not DscOverride and not FdfOverride and not BuildOptionOverride:
continue
# Support Flexible PCD format
if DefaultValue:
try:
DefaultValue = ValueExpressionEx(DefaultValue, Pcd.DatumType, Platform._GuidDict)(True)
except BadExpression:
EdkLogger.error("GenFds", GENFDS_ERROR, 'PCD [%s.%s] Value "%s"' %(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, DefaultValue), File=self.InfFileName)
if Pcd.InfDefaultValue:
try:
Pcd.InfDefaultValue = ValueExpressionEx(Pcd.InfDefaultValue, Pcd.DatumType, Platform._GuidDict)(True)
except BadExpression:
EdkLogger.error("GenFds", GENFDS_ERROR, 'PCD [%s.%s] Value "%s"' %(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.DefaultValue), File=self.InfFileName)
# Check value, if value are equal, no need to patch
if Pcd.DatumType == TAB_VOID:
if Pcd.InfDefaultValue == DefaultValue or not DefaultValue:
continue
# Get the string size from FDF or DSC
if DefaultValue[0] == 'L':
# Remove L"", but the '\0' must be appended
MaxDatumSize = str((len(DefaultValue) - 2) * 2)
elif DefaultValue[0] == '{':
MaxDatumSize = str(len(DefaultValue.split(',')))
else:
MaxDatumSize = str(len(DefaultValue) - 1)
if DscOverride:
Pcd.MaxDatumSize = PatchPcd.MaxDatumSize
# If no defined the maximum size in DSC, try to get current size from INF
if not Pcd.MaxDatumSize:
Pcd.MaxDatumSize = str(len(Pcd.InfDefaultValue.split(',')))
else:
Base1 = Base2 = 10
if Pcd.InfDefaultValue.upper().startswith('0X'):
Base1 = 16
if DefaultValue.upper().startswith('0X'):
Base2 = 16
try:
PcdValueInImg = int(Pcd.InfDefaultValue, Base1)
PcdValueInDscOrFdf = int(DefaultValue, Base2)
if PcdValueInImg == PcdValueInDscOrFdf:
continue
except:
continue
# Check the Pcd size and data type
if Pcd.DatumType == TAB_VOID:
if int(MaxDatumSize) > int(Pcd.MaxDatumSize):
EdkLogger.error("GenFds", GENFDS_ERROR, "The size of VOID* type PCD '%s.%s' exceeds its maximum size %d bytes." \
% (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, int(MaxDatumSize) - int(Pcd.MaxDatumSize)))
else:
if PcdValueInDscOrFdf > MAX_VAL_TYPE[Pcd.DatumType] \
or PcdValueInImg > MAX_VAL_TYPE[Pcd.DatumType]:
EdkLogger.error("GenFds", GENFDS_ERROR, "The size of %s type PCD '%s.%s' doesn't match its data type." \
% (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
self.PatchPcds.append((Pcd, DefaultValue))
self.InfModule = Inf
self.PcdIsDriver = Inf.PcdIsDriver
self.IsBinaryModule = Inf.IsBinaryModule
if len(Inf.Depex.data) > 0 and len(Inf.DepexExpression.data) > 0:
self.Depex = True
GenFdsGlobalVariable.VerboseLogger("BaseName : %s" % self.BaseName)
GenFdsGlobalVariable.VerboseLogger("ModuleGuid : %s" % self.ModuleGuid)
GenFdsGlobalVariable.VerboseLogger("ModuleType : %s" % self.ModuleType)
GenFdsGlobalVariable.VerboseLogger("VersionString : %s" % self.VersionString)
GenFdsGlobalVariable.VerboseLogger("InfFileName :%s" % self.InfFileName)
#
# Set OutputPath = ${WorkSpace}\Build\Fv\Ffs\${ModuleGuid}+ ${ModuleName}\
#
if IsGenFfs:
Rule = self.__GetRule__()
if GlobalData.gGuidPatternEnd.match(Rule.NameGuid):
self.ModuleGuid = Rule.NameGuid
self.OutputPath = os.path.join(GenFdsGlobalVariable.FfsDir, \
self.ModuleGuid + self.BaseName)
if not os.path.exists(self.OutputPath) :
os.makedirs(self.OutputPath)
self.EfiOutputPath, self.EfiDebugPath = self.__GetEFIOutPutPath__()
GenFdsGlobalVariable.VerboseLogger( "ModuelEFIPath: " + self.EfiOutputPath)
## PatchEfiFile
#
# Patch EFI file with patch PCD
#
# @param EfiFile: EFI file needs to be patched.
# @retval: Full path of patched EFI file: self.OutputPath + EfiFile base name
# If passed in file does not end with efi, return as is
#
def PatchEfiFile(self, EfiFile, FileType):
#
# If the module does not have any patches, then return path to input file
#
if not self.PatchPcds:
return EfiFile
#
# Only patch file if FileType is PE32 or ModuleType is USER_DEFINED
#
if FileType != BINARY_FILE_TYPE_PE32 and self.ModuleType != SUP_MODULE_USER_DEFINED and self.ModuleType != SUP_MODULE_HOST_APPLICATION:
return EfiFile
#
# Generate path to patched output file
#
Basename = os.path.basename(EfiFile)
Output = os.path.normpath (os.path.join(self.OutputPath, Basename))
#
# If this file has already been patched, then return the path to the patched file
#
if self.PatchedBinFile == Output:
return Output
#
# If a different file from the same module has already been patched, then generate an error
#
if self.PatchedBinFile:
EdkLogger.error("GenFds", GENFDS_ERROR,
'Only one binary file can be patched:\n'
' a binary file has been patched: %s\n'
' current file: %s' % (self.PatchedBinFile, EfiFile),
File=self.InfFileName)
#
# Copy unpatched file contents to output file location to perform patching
#
CopyLongFilePath(EfiFile, Output)
#
# Apply patches to patched output file
#
for Pcd, Value in self.PatchPcds:
RetVal, RetStr = PatchBinaryFile(Output, int(Pcd.Offset, 0), Pcd.DatumType, Value, Pcd.MaxDatumSize)
if RetVal:
EdkLogger.error("GenFds", GENFDS_ERROR, RetStr, File=self.InfFileName)
#
# Save the path of the patched output file
#
self.PatchedBinFile = Output
#
# Return path to patched output file
#
return Output
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @param Dict dictionary contains macro and value pair
# @param FvChildAddr Array of the inside FvImage base address
# @param FvParentAddr Parent Fv base address
# @retval string Generated FFS file name
#
def GenFfs(self, Dict = None, FvChildAddr = [], FvParentAddr=None, IsMakefile=False, FvName=None):
#
# Parse Inf file get Module related information
#
if Dict is None:
Dict = {}
self.__InfParse__(Dict, IsGenFfs=True)
Arch = self.GetCurrentArch()
SrcFile = mws.join( GenFdsGlobalVariable.WorkSpaceDir, self.InfFileName);
DestFile = os.path.join( self.OutputPath, self.ModuleGuid + '.ffs')
SrcFileDir = "."
SrcPath = os.path.dirname(SrcFile)
SrcFileName = os.path.basename(SrcFile)
SrcFileBase, SrcFileExt = os.path.splitext(SrcFileName)
DestPath = os.path.dirname(DestFile)
DestFileName = os.path.basename(DestFile)
DestFileBase, DestFileExt = os.path.splitext(DestFileName)
self.MacroDict = {
# source file
"${src}" : SrcFile,
"${s_path}" : SrcPath,
"${s_dir}" : SrcFileDir,
"${s_name}" : SrcFileName,
"${s_base}" : SrcFileBase,
"${s_ext}" : SrcFileExt,
# destination file
"${dst}" : DestFile,
"${d_path}" : DestPath,
"${d_name}" : DestFileName,
"${d_base}" : DestFileBase,
"${d_ext}" : DestFileExt
}
#
# Allow binary type module not specify override rule in FDF file.
#
if len(self.BinFileList) > 0:
if self.Rule is None or self.Rule == "":
self.Rule = "BINARY"
if not IsMakefile and GenFdsGlobalVariable.EnableGenfdsMultiThread and self.Rule != 'BINARY':
IsMakefile = True
#
# Get the rule of how to generate Ffs file
#
Rule = self.__GetRule__()
GenFdsGlobalVariable.VerboseLogger( "Packing binaries from inf file : %s" %self.InfFileName)
#
# Convert Fv File Type for PI1.1 SMM driver.
#
if self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER and int(self.PiSpecVersion, 16) >= 0x0001000A:
if Rule.FvFileType == 'DRIVER':
Rule.FvFileType = 'SMM'
#
# Framework SMM Driver has no SMM FV file type
#
if self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER and int(self.PiSpecVersion, 16) < 0x0001000A:
if Rule.FvFileType == 'SMM' or Rule.FvFileType == SUP_MODULE_SMM_CORE:
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "Framework SMM module doesn't support SMM or SMM_CORE FV file type", File=self.InfFileName)
#
# For the rule only has simpleFile
#
MakefilePath = None
if self.IsBinaryModule:
IsMakefile = False
if IsMakefile:
PathClassObj = PathClass(self.InfFileName, GenFdsGlobalVariable.WorkSpaceDir)
if self.OverrideGuid:
PathClassObj = ProcessDuplicatedInf(PathClassObj, self.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir)
MakefilePath = PathClassObj.Path, Arch
if isinstance (Rule, RuleSimpleFile.RuleSimpleFile):
SectionOutputList = self.__GenSimpleFileSection__(Rule, IsMakefile=IsMakefile)
FfsOutput = self.__GenSimpleFileFfs__(Rule, SectionOutputList, MakefilePath=MakefilePath)
return FfsOutput
#
# For Rule has ComplexFile
#
elif isinstance(Rule, RuleComplexFile.RuleComplexFile):
InputSectList, InputSectAlignments = self.__GenComplexFileSection__(Rule, FvChildAddr, FvParentAddr, IsMakefile=IsMakefile)
FfsOutput = self.__GenComplexFileFfs__(Rule, InputSectList, InputSectAlignments, MakefilePath=MakefilePath)
return FfsOutput
## __ExtendMacro__() method
#
# Replace macro with its value
#
# @param self The object pointer
# @param String The string to be replaced
# @retval string Macro replaced string
#
def __ExtendMacro__ (self, String):
MacroDict = {
'$(INF_OUTPUT)' : self.EfiOutputPath,
'$(MODULE_NAME)' : self.BaseName,
'$(BUILD_NUMBER)': self.BuildNum,
'$(INF_VERSION)' : self.VersionString,
'$(NAMED_GUID)' : self.ModuleGuid
}
String = GenFdsGlobalVariable.MacroExtend(String, MacroDict)
String = GenFdsGlobalVariable.MacroExtend(String, self.MacroDict)
return String
## __GetRule__() method
#
# Get correct rule for generating FFS for this INF
#
# @param self The object pointer
# @retval Rule Rule object
#
def __GetRule__ (self) :
CurrentArchList = []
if self.CurrentArch is None:
CurrentArchList = ['common']
else:
CurrentArchList.append(self.CurrentArch)
for CurrentArch in CurrentArchList:
RuleName = 'RULE' + \
'.' + \
CurrentArch.upper() + \
'.' + \
self.ModuleType.upper()
if self.Rule is not None:
RuleName = RuleName + \
'.' + \
self.Rule.upper()
Rule = GenFdsGlobalVariable.FdfParser.Profile.RuleDict.get(RuleName)
if Rule is not None:
GenFdsGlobalVariable.VerboseLogger ("Want To Find Rule Name is : " + RuleName)
return Rule
RuleName = 'RULE' + \
'.' + \
TAB_COMMON + \
'.' + \
self.ModuleType.upper()
if self.Rule is not None:
RuleName = RuleName + \
'.' + \
self.Rule.upper()
GenFdsGlobalVariable.VerboseLogger ('Trying to apply common rule %s for INF %s' % (RuleName, self.InfFileName))
Rule = GenFdsGlobalVariable.FdfParser.Profile.RuleDict.get(RuleName)
if Rule is not None:
GenFdsGlobalVariable.VerboseLogger ("Want To Find Rule Name is : " + RuleName)
return Rule
if Rule is None :
EdkLogger.error("GenFds", GENFDS_ERROR, 'Don\'t Find common rule %s for INF %s' \
% (RuleName, self.InfFileName))
## __GetPlatformArchList__() method
#
# Get Arch list this INF built under
#
# @param self The object pointer
# @retval list Arch list
#
def __GetPlatformArchList__(self):
InfFileKey = os.path.normpath(mws.join(GenFdsGlobalVariable.WorkSpaceDir, self.InfFileName))
DscArchList = []
for Arch in GenFdsGlobalVariable.ArchList :
PlatformDataBase = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if PlatformDataBase is not None:
if InfFileKey in PlatformDataBase.Modules:
DscArchList.append (Arch)
else:
#
# BaseTools support build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, then PlatformDataBase.Modules use FILE_GUIDmodule.inf as key,
# but the path (self.MetaFile.Path) is the real path
#
for key in PlatformDataBase.Modules:
if InfFileKey == str((PlatformDataBase.Modules[key]).MetaFile.Path):
DscArchList.append (Arch)
break
return DscArchList
## GetCurrentArch() method
#
# Get Arch list of the module from this INF is to be placed into flash
#
# @param self The object pointer
# @retval list Arch list
#
def GetCurrentArch(self) :
TargetArchList = GenFdsGlobalVariable.ArchList
PlatformArchList = self.__GetPlatformArchList__()
CurArchList = TargetArchList
if PlatformArchList != []:
CurArchList = list(set (TargetArchList) & set (PlatformArchList))
GenFdsGlobalVariable.VerboseLogger ("Valid target architecture(s) is : " + " ".join(CurArchList))
ArchList = []
if self.KeyStringList != []:
for Key in self.KeyStringList:
Key = GenFdsGlobalVariable.MacroExtend(Key)
Target, Tag, Arch = Key.split('_')
if Arch in CurArchList:
ArchList.append(Arch)
if Target not in self.TargetOverrideList:
self.TargetOverrideList.append(Target)
else:
ArchList = CurArchList
UseArchList = TargetArchList
if self.UseArch is not None:
UseArchList = []
UseArchList.append(self.UseArch)
ArchList = list(set (UseArchList) & set (ArchList))
self.InfFileName = NormPath(self.InfFileName)
if len(PlatformArchList) == 0:
self.InDsc = False
PathClassObj = PathClass(self.InfFileName, GenFdsGlobalVariable.WorkSpaceDir)
ErrorCode, ErrorInfo = PathClassObj.Validate(".inf")
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if len(ArchList) == 1:
Arch = ArchList[0]
return Arch
elif len(ArchList) > 1:
if len(PlatformArchList) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, "GenFds command line option has multiple ARCHs %s. Not able to determine which ARCH is valid for Module %s !" % (str(ArchList), self.InfFileName))
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Module built under multiple ARCHs %s. Not able to determine which output to put into flash for Module %s !" % (str(ArchList), self.InfFileName))
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Module %s appears under ARCH %s in platform %s, but current deduced ARCH is %s, so NO build output could be put into flash." \
% (self.InfFileName, str(PlatformArchList), GenFdsGlobalVariable.ActivePlatform, str(set (UseArchList) & set (TargetArchList))))
## __GetEFIOutPutPath__() method
#
# Get the output path for generated files
#
# @param self The object pointer
# @retval string Path that output files from this INF go to
#
def __GetEFIOutPutPath__(self):
Arch = ''
OutputPath = ''
DebugPath = ''
(ModulePath, FileName) = os.path.split(self.InfFileName)
Index = FileName.rfind('.')
FileName = FileName[0:Index]
if self.OverrideGuid:
FileName = self.OverrideGuid
Arch = "NoneArch"
if self.CurrentArch is not None:
Arch = self.CurrentArch
OutputPath = os.path.join(GenFdsGlobalVariable.OutputDirDict[Arch],
Arch,
ModulePath,
FileName,
'OUTPUT'
)
DebugPath = os.path.join(GenFdsGlobalVariable.OutputDirDict[Arch],
Arch,
ModulePath,
FileName,
'DEBUG'
)
OutputPath = os.path.abspath(OutputPath)
DebugPath = os.path.abspath(DebugPath)
return OutputPath, DebugPath
## __GenSimpleFileSection__() method
#
# Generate section by specified file name or a list of files with file extension
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenSimpleFileSection__(self, Rule, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
FileList = []
OutputFileList = []
GenSecInputFile = None
if Rule.FileName is not None:
GenSecInputFile = self.__ExtendMacro__(Rule.FileName)
if os.path.isabs(GenSecInputFile):
GenSecInputFile = os.path.normpath(GenSecInputFile)
else:
GenSecInputFile = os.path.normpath(os.path.join(self.EfiOutputPath, GenSecInputFile))
else:
FileList, IsSect = Section.Section.GetFileList(self, '', Rule.FileExtension)
Index = 1
SectionType = Rule.SectionType
#
# Convert Fv Section Type for PI1.1 SMM driver.
#
if self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER and int(self.PiSpecVersion, 16) >= 0x0001000A:
if SectionType == BINARY_FILE_TYPE_DXE_DEPEX:
SectionType = BINARY_FILE_TYPE_SMM_DEPEX
#
# Framework SMM Driver has no SMM_DEPEX section type
#
if self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER and int(self.PiSpecVersion, 16) < 0x0001000A:
if SectionType == BINARY_FILE_TYPE_SMM_DEPEX:
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "Framework SMM module doesn't support SMM_DEPEX section type", File=self.InfFileName)
NoStrip = True
if self.ModuleType in (SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM):
if self.KeepReloc is not None:
NoStrip = self.KeepReloc
elif Rule.KeepReloc is not None:
NoStrip = Rule.KeepReloc
elif self.ShadowFromInfFile is not None:
NoStrip = self.ShadowFromInfFile
if FileList != [] :
for File in FileList:
SecNum = '%d' %Index
GenSecOutputFile= self.__ExtendMacro__(Rule.NameGuid) + \
SectionSuffix[SectionType] + SUP_MODULE_SEC + SecNum
Index = Index + 1
OutputFile = os.path.join(self.OutputPath, GenSecOutputFile)
File = GenFdsGlobalVariable.MacroExtend(File, Dict, self.CurrentArch)
#Get PE Section alignment when align is set to AUTO
if self.Alignment == 'Auto' and (SectionType == BINARY_FILE_TYPE_PE32 or SectionType == BINARY_FILE_TYPE_TE):
ImageObj = PeImageClass (File)
if ImageObj.SectionAlignment < 0x400:
self.Alignment = str (ImageObj.SectionAlignment)
elif ImageObj.SectionAlignment < 0x100000:
self.Alignment = str (ImageObj.SectionAlignment // 0x400) + 'K'
else:
self.Alignment = str (ImageObj.SectionAlignment // 0x100000) + 'M'
if not NoStrip:
FileBeforeStrip = os.path.join(self.OutputPath, ModuleName + '.reloc')
if not os.path.exists(FileBeforeStrip) or \
(os.path.getmtime(File) > os.path.getmtime(FileBeforeStrip)):
CopyLongFilePath(File, FileBeforeStrip)
StrippedFile = os.path.join(self.OutputPath, ModuleName + '.stipped')
GenFdsGlobalVariable.GenerateFirmwareImage(
StrippedFile,
[File],
Strip=True,
IsMakefile=IsMakefile
)
File = StrippedFile
if SectionType == BINARY_FILE_TYPE_TE:
TeFile = os.path.join( self.OutputPath, self.ModuleGuid + 'Te.raw')
GenFdsGlobalVariable.GenerateFirmwareImage(
TeFile,
[File],
Type='te',
IsMakefile=IsMakefile
)
File = TeFile
GenFdsGlobalVariable.GenerateSection(OutputFile, [File], Section.Section.SectionType[SectionType], IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
else:
SecNum = '%d' %Index
GenSecOutputFile= self.__ExtendMacro__(Rule.NameGuid) + \
SectionSuffix[SectionType] + SUP_MODULE_SEC + SecNum
OutputFile = os.path.join(self.OutputPath, GenSecOutputFile)
GenSecInputFile = GenFdsGlobalVariable.MacroExtend(GenSecInputFile, Dict, self.CurrentArch)
#Get PE Section alignment when align is set to AUTO
if self.Alignment == 'Auto' and (SectionType == BINARY_FILE_TYPE_PE32 or SectionType == BINARY_FILE_TYPE_TE):
ImageObj = PeImageClass (GenSecInputFile)
if ImageObj.SectionAlignment < 0x400:
self.Alignment = str (ImageObj.SectionAlignment)
elif ImageObj.SectionAlignment < 0x100000:
self.Alignment = str (ImageObj.SectionAlignment // 0x400) + 'K'
else:
self.Alignment = str (ImageObj.SectionAlignment // 0x100000) + 'M'
if not NoStrip:
FileBeforeStrip = os.path.join(self.OutputPath, ModuleName + '.reloc')
if not os.path.exists(FileBeforeStrip) or \
(os.path.getmtime(GenSecInputFile) > os.path.getmtime(FileBeforeStrip)):
CopyLongFilePath(GenSecInputFile, FileBeforeStrip)
StrippedFile = os.path.join(self.OutputPath, ModuleName + '.stipped')
GenFdsGlobalVariable.GenerateFirmwareImage(
StrippedFile,
[GenSecInputFile],
Strip=True,
IsMakefile=IsMakefile
)
GenSecInputFile = StrippedFile
if SectionType == BINARY_FILE_TYPE_TE:
TeFile = os.path.join( self.OutputPath, self.ModuleGuid + 'Te.raw')
GenFdsGlobalVariable.GenerateFirmwareImage(
TeFile,
[GenSecInputFile],
Type='te',
IsMakefile=IsMakefile
)
GenSecInputFile = TeFile
GenFdsGlobalVariable.GenerateSection(OutputFile, [GenSecInputFile], Section.Section.SectionType[SectionType], IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
return OutputFileList
## __GenSimpleFileFfs__() method
#
# Generate FFS
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @param InputFileList The output file list from GenSection
# @retval string Generated FFS file name
#
def __GenSimpleFileFfs__(self, Rule, InputFileList, MakefilePath = None):
FfsOutput = self.OutputPath + \
os.sep + \
self.__ExtendMacro__(Rule.NameGuid) + \
'.ffs'
GenFdsGlobalVariable.VerboseLogger(self.__ExtendMacro__(Rule.NameGuid))
InputSection = []
SectionAlignments = []
for InputFile in InputFileList:
InputSection.append(InputFile)
SectionAlignments.append(Rule.SectAlignment)
if Rule.NameGuid is not None and Rule.NameGuid.startswith('PCD('):
PcdValue = GenFdsGlobalVariable.GetPcdValue(Rule.NameGuid)
if len(PcdValue) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, '%s NOT defined.' \
% (Rule.NameGuid))
if PcdValue.startswith('{'):
PcdValue = GuidStructureByteArrayToGuidString(PcdValue)
RegistryGuidStr = PcdValue
if len(RegistryGuidStr) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, 'GUID value for %s in wrong format.' \
% (Rule.NameGuid))
self.ModuleGuid = RegistryGuidStr
GenFdsGlobalVariable.GenerateFfs(FfsOutput, InputSection,
FdfFvFileTypeToFileType[Rule.FvFileType],
self.ModuleGuid, Fixed=Rule.Fixed,
CheckSum=Rule.CheckSum, Align=Rule.Alignment,
SectionAlign=SectionAlignments,
MakefilePath=MakefilePath
)
return FfsOutput
## __GenComplexFileSection__() method
#
# Generate section by sections in Rule
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @param FvChildAddr Array of the inside FvImage base address
# @param FvParentAddr Parent Fv base address
# @retval string File name of the generated section file
#
def __GenComplexFileSection__(self, Rule, FvChildAddr, FvParentAddr, IsMakefile = False):
if self.ModuleType in (SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_MM_CORE_STANDALONE):
if Rule.KeepReloc is not None:
self.KeepRelocFromRule = Rule.KeepReloc
SectFiles = []
SectAlignments = []
Index = 1
HasGeneratedFlag = False
if self.PcdIsDriver == 'PEI_PCD_DRIVER':
if self.IsBinaryModule:
PcdExDbFileName = os.path.join(GenFdsGlobalVariable.FvDir, "PEIPcdDataBase.raw")
else:
PcdExDbFileName = os.path.join(self.EfiOutputPath, "PEIPcdDataBase.raw")
PcdExDbSecName = os.path.join(self.OutputPath, "PEIPcdDataBaseSec.raw")
GenFdsGlobalVariable.GenerateSection(PcdExDbSecName,
[PcdExDbFileName],
"EFI_SECTION_RAW",
IsMakefile = IsMakefile
)
SectFiles.append(PcdExDbSecName)
SectAlignments.append(None)
elif self.PcdIsDriver == 'DXE_PCD_DRIVER':
if self.IsBinaryModule:
PcdExDbFileName = os.path.join(GenFdsGlobalVariable.FvDir, "DXEPcdDataBase.raw")
else:
PcdExDbFileName = os.path.join(self.EfiOutputPath, "DXEPcdDataBase.raw")
PcdExDbSecName = os.path.join(self.OutputPath, "DXEPcdDataBaseSec.raw")
GenFdsGlobalVariable.GenerateSection(PcdExDbSecName,
[PcdExDbFileName],
"EFI_SECTION_RAW",
IsMakefile = IsMakefile
)
SectFiles.append(PcdExDbSecName)
SectAlignments.append(None)
for Sect in Rule.SectionList:
SecIndex = '%d' %Index
SectList = []
#
# Convert Fv Section Type for PI1.1 SMM driver.
#
if self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER and int(self.PiSpecVersion, 16) >= 0x0001000A:
if Sect.SectionType == BINARY_FILE_TYPE_DXE_DEPEX:
Sect.SectionType = BINARY_FILE_TYPE_SMM_DEPEX
#
# Framework SMM Driver has no SMM_DEPEX section type
#
if self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER and int(self.PiSpecVersion, 16) < 0x0001000A:
if Sect.SectionType == BINARY_FILE_TYPE_SMM_DEPEX:
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "Framework SMM module doesn't support SMM_DEPEX section type", File=self.InfFileName)
#
# process the inside FvImage from FvSection or GuidSection
#
if FvChildAddr != []:
if isinstance(Sect, FvImageSection):
Sect.FvAddr = FvChildAddr.pop(0)
elif isinstance(Sect, GuidSection):
Sect.FvAddr = FvChildAddr
if FvParentAddr is not None and isinstance(Sect, GuidSection):
Sect.FvParentAddr = FvParentAddr
if Rule.KeyStringList != []:
SectList, Align = Sect.GenSection(self.OutputPath, self.ModuleGuid, SecIndex, Rule.KeyStringList, self, IsMakefile = IsMakefile)
else :
SectList, Align = Sect.GenSection(self.OutputPath, self.ModuleGuid, SecIndex, self.KeyStringList, self, IsMakefile = IsMakefile)
if not HasGeneratedFlag:
UniVfrOffsetFileSection = ""
ModuleFileName = mws.join(GenFdsGlobalVariable.WorkSpaceDir, self.InfFileName)
InfData = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClass(ModuleFileName), self.CurrentArch]
#
# Search the source list in InfData to find if there are .vfr file exist.
#
VfrUniBaseName = {}
VfrUniOffsetList = []
for SourceFile in InfData.Sources:
if SourceFile.Type.upper() == ".VFR" :
#
# search the .map file to find the offset of vfr binary in the PE32+/TE file.
#
VfrUniBaseName[SourceFile.BaseName] = (SourceFile.BaseName + "Bin")
if SourceFile.Type.upper() == ".UNI" :
#
# search the .map file to find the offset of Uni strings binary in the PE32+/TE file.
#
VfrUniBaseName["UniOffsetName"] = (self.BaseName + "Strings")
if len(VfrUniBaseName) > 0:
if IsMakefile:
if InfData.BuildType != 'UEFI_HII':
UniVfrOffsetFileName = os.path.join(self.OutputPath, self.BaseName + '.offset')
UniVfrOffsetFileSection = os.path.join(self.OutputPath, self.BaseName + 'Offset' + '.raw')
UniVfrOffsetFileNameList = []
UniVfrOffsetFileNameList.append(UniVfrOffsetFileName)
TrimCmd = "Trim --Vfr-Uni-Offset -o %s --ModuleName=%s --DebugDir=%s " % (UniVfrOffsetFileName, self.BaseName, self.EfiDebugPath)
GenFdsGlobalVariable.SecCmdList.append(TrimCmd)
GenFdsGlobalVariable.GenerateSection(UniVfrOffsetFileSection,
[UniVfrOffsetFileName],
"EFI_SECTION_RAW",
IsMakefile = True
)
else:
VfrUniOffsetList = self.__GetBuildOutputMapFileVfrUniInfo(VfrUniBaseName)
#
# Generate the Raw data of raw section
#
if VfrUniOffsetList:
UniVfrOffsetFileName = os.path.join(self.OutputPath, self.BaseName + '.offset')
UniVfrOffsetFileSection = os.path.join(self.OutputPath, self.BaseName + 'Offset' + '.raw')
FfsInfStatement.__GenUniVfrOffsetFile (VfrUniOffsetList, UniVfrOffsetFileName)
UniVfrOffsetFileNameList = []
UniVfrOffsetFileNameList.append(UniVfrOffsetFileName)
"""Call GenSection"""
GenFdsGlobalVariable.GenerateSection(UniVfrOffsetFileSection,
UniVfrOffsetFileNameList,
"EFI_SECTION_RAW"
)
#os.remove(UniVfrOffsetFileName)
if UniVfrOffsetFileSection:
SectList.append(UniVfrOffsetFileSection)
HasGeneratedFlag = True
for SecName in SectList :
SectFiles.append(SecName)
SectAlignments.append(Align)
Index = Index + 1
return SectFiles, SectAlignments
## __GenComplexFileFfs__() method
#
# Generate FFS
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @param InputFileList The output file list from GenSection
# @retval string Generated FFS file name
#
def __GenComplexFileFfs__(self, Rule, InputFile, Alignments, MakefilePath = None):
if Rule.NameGuid is not None and Rule.NameGuid.startswith('PCD('):
PcdValue = GenFdsGlobalVariable.GetPcdValue(Rule.NameGuid)
if len(PcdValue) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, '%s NOT defined.' \
% (Rule.NameGuid))
if PcdValue.startswith('{'):
PcdValue = GuidStructureByteArrayToGuidString(PcdValue)
RegistryGuidStr = PcdValue
if len(RegistryGuidStr) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, 'GUID value for %s in wrong format.' \
% (Rule.NameGuid))
self.ModuleGuid = RegistryGuidStr
FfsOutput = os.path.join( self.OutputPath, self.ModuleGuid + '.ffs')
GenFdsGlobalVariable.GenerateFfs(FfsOutput, InputFile,
FdfFvFileTypeToFileType[Rule.FvFileType],
self.ModuleGuid, Fixed=Rule.Fixed,
CheckSum=Rule.CheckSum, Align=Rule.Alignment,
SectionAlign=Alignments,
MakefilePath=MakefilePath
)
return FfsOutput
## __GetBuildOutputMapFileVfrUniInfo() method
#
# Find the offset of UNI/INF object offset in the EFI image file.
#
# @param self The object pointer
# @param VfrUniBaseName A name list contain the UNI/INF object name.
# @retval RetValue A list contain offset of UNI/INF object.
#
def __GetBuildOutputMapFileVfrUniInfo(self, VfrUniBaseName):
MapFileName = os.path.join(self.EfiOutputPath, self.BaseName + ".map")
EfiFileName = os.path.join(self.EfiOutputPath, self.BaseName + ".efi")
return GetVariableOffset(MapFileName, EfiFileName, list(VfrUniBaseName.values()))
## __GenUniVfrOffsetFile() method
#
# Generate the offset file for the module which contain VFR or UNI file.
#
# @param VfrUniOffsetList A list contain the VFR/UNI offsets in the EFI image file.
# @param UniVfrOffsetFileName The output offset file name.
#
@staticmethod
def __GenUniVfrOffsetFile(VfrUniOffsetList, UniVfrOffsetFileName):
# Use a instance of StringIO to cache data
fStringIO = BytesIO()
for Item in VfrUniOffsetList:
if (Item[0].find("Strings") != -1):
#
# UNI offset in image.
# GUID + Offset
# { 0x8913c5e0, 0x33f6, 0x4d86, { 0x9b, 0xf1, 0x43, 0xef, 0x89, 0xfc, 0x6, 0x66 } }
#
UniGuid = b'\xe0\xc5\x13\x89\xf63\x86M\x9b\xf1C\xef\x89\xfc\x06f'
fStringIO.write(UniGuid)
UniValue = pack ('Q', int (Item[1], 16))
fStringIO.write (UniValue)
else:
#
# VFR binary offset in image.
# GUID + Offset
# { 0xd0bc7cb4, 0x6a47, 0x495f, { 0xaa, 0x11, 0x71, 0x7, 0x46, 0xda, 0x6, 0xa2 } };
#
VfrGuid = b'\xb4|\xbc\xd0Gj_I\xaa\x11q\x07F\xda\x06\xa2'
fStringIO.write(VfrGuid)
type (Item[1])
VfrValue = pack ('Q', int (Item[1], 16))
fStringIO.write (VfrValue)
#
# write data into file.
#
try :
SaveFileOnChange(UniVfrOffsetFileName, fStringIO.getvalue())
except:
EdkLogger.error("GenFds", FILE_WRITE_FAILURE, "Write data to file %s failed, please check whether the file been locked or using by other applications." %UniVfrOffsetFileName, None)
fStringIO.close ()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/FfsInfStatement.py
|
## @file
# process APRIORI file data and generate PEI/DXE APRIORI file
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import pack
import Common.LongFilePathOs as os
from io import BytesIO
from .FfsFileStatement import FileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from Common.StringUtils import NormPath
from Common.Misc import SaveFileOnChange, PathClass
from Common.EdkLogger import error as EdkLoggerError
from Common.BuildToolError import RESOURCE_NOT_AVAILABLE
from Common.DataType import TAB_COMMON
DXE_APRIORI_GUID = "FC510EE7-FFDC-11D4-BD41-0080C73C8881"
PEI_APRIORI_GUID = "1B45CC0A-156A-428A-AF62-49864DA0E6E6"
## process APRIORI file data and generate PEI/DXE APRIORI file
#
#
class AprioriSection (object):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.DefineVarDict = {}
self.FfsList = []
self.AprioriType = ""
## GenFfs() method
#
# Generate FFS for APRIORI file
#
# @param self The object pointer
# @param FvName for whom apriori file generated
# @param Dict dictionary contains macro and its value
# @retval string Generated file name
#
def GenFfs (self, FvName, Dict = None, IsMakefile = False):
if Dict is None:
Dict = {}
Buffer = BytesIO()
if self.AprioriType == "PEI":
AprioriFileGuid = PEI_APRIORI_GUID
else:
AprioriFileGuid = DXE_APRIORI_GUID
OutputAprFilePath = os.path.join (GenFdsGlobalVariable.WorkSpaceDir, \
GenFdsGlobalVariable.FfsDir,\
AprioriFileGuid + FvName)
if not os.path.exists(OutputAprFilePath):
os.makedirs(OutputAprFilePath)
OutputAprFileName = os.path.join( OutputAprFilePath, \
AprioriFileGuid + FvName + '.Apri' )
AprFfsFileName = os.path.join (OutputAprFilePath,\
AprioriFileGuid + FvName + '.Ffs')
Dict.update(self.DefineVarDict)
InfFileName = None
for FfsObj in self.FfsList:
Guid = ""
if isinstance(FfsObj, FileStatement):
Guid = FfsObj.NameGuid
else:
InfFileName = NormPath(FfsObj.InfFileName)
Arch = FfsObj.GetCurrentArch()
if Arch:
Dict['$(ARCH)'] = Arch
InfFileName = GenFdsGlobalVariable.MacroExtend(InfFileName, Dict, Arch)
if Arch:
Inf = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClass(InfFileName, GenFdsGlobalVariable.WorkSpaceDir), Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
Guid = Inf.Guid
else:
Inf = GenFdsGlobalVariable.WorkSpace.BuildObject[PathClass(InfFileName, GenFdsGlobalVariable.WorkSpaceDir), TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
Guid = Inf.Guid
if not Inf.Module.Binaries:
EdkLoggerError("GenFds", RESOURCE_NOT_AVAILABLE,
"INF %s not found in build ARCH %s!" \
% (InfFileName, GenFdsGlobalVariable.ArchList))
GuidPart = Guid.split('-')
Buffer.write(pack('I', int(GuidPart[0], 16)))
Buffer.write(pack('H', int(GuidPart[1], 16)))
Buffer.write(pack('H', int(GuidPart[2], 16)))
for Num in range(2):
Char = GuidPart[3][Num*2:Num*2+2]
Buffer.write(pack('B', int(Char, 16)))
for Num in range(6):
Char = GuidPart[4][Num*2:Num*2+2]
Buffer.write(pack('B', int(Char, 16)))
SaveFileOnChange(OutputAprFileName, Buffer.getvalue())
RawSectionFileName = os.path.join( OutputAprFilePath, \
AprioriFileGuid + FvName + '.raw' )
MakefilePath = None
if IsMakefile:
if not InfFileName:
return None
MakefilePath = InfFileName, Arch
GenFdsGlobalVariable.GenerateSection(RawSectionFileName, [OutputAprFileName], 'EFI_SECTION_RAW', IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateFfs(AprFfsFileName, [RawSectionFileName],
'EFI_FV_FILETYPE_FREEFORM', AprioriFileGuid, MakefilePath=MakefilePath)
return AprFfsFileName
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/AprioriSection.py
|
## @file
# process OptionROM generation from INF statement
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import RuleSimpleFile
from . import RuleComplexFile
from . import Section
import Common.GlobalData as GlobalData
from Common.DataType import *
from Common.StringUtils import *
from .FfsInfStatement import FfsInfStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
##
#
#
class OptRomInfStatement (FfsInfStatement):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsInfStatement.__init__(self)
self.OverrideAttribs = None
## __GetOptRomParams() method
#
# Parse inf file to get option ROM related parameters
#
# @param self The object pointer
#
def __GetOptRomParams(self):
if self.OverrideAttribs is None:
self.OverrideAttribs = OverrideAttribs()
if self.OverrideAttribs.NeedCompress is None:
self.OverrideAttribs.NeedCompress = self.OptRomDefs.get ('PCI_COMPRESS')
if self.OverrideAttribs.NeedCompress is not None:
if self.OverrideAttribs.NeedCompress.upper() not in ('TRUE', 'FALSE'):
GenFdsGlobalVariable.ErrorLogger( "Expected TRUE/FALSE for PCI_COMPRESS: %s" %self.InfFileName)
self.OverrideAttribs.NeedCompress = \
self.OverrideAttribs.NeedCompress.upper() == 'TRUE'
if self.OverrideAttribs.PciVendorId is None:
self.OverrideAttribs.PciVendorId = self.OptRomDefs.get ('PCI_VENDOR_ID')
if self.OverrideAttribs.PciClassCode is None:
self.OverrideAttribs.PciClassCode = self.OptRomDefs.get ('PCI_CLASS_CODE')
if self.OverrideAttribs.PciDeviceId is None:
self.OverrideAttribs.PciDeviceId = self.OptRomDefs.get ('PCI_DEVICE_ID')
if self.OverrideAttribs.PciRevision is None:
self.OverrideAttribs.PciRevision = self.OptRomDefs.get ('PCI_REVISION')
# InfObj = GenFdsGlobalVariable.WorkSpace.BuildObject[self.PathClassObj, self.CurrentArch]
# RecordList = InfObj._RawData[MODEL_META_DATA_HEADER, InfObj._Arch, InfObj._Platform]
# for Record in RecordList:
# Record = ReplaceMacros(Record, GlobalData.gEdkGlobal, False)
# Name = Record[0]
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @retval string Generated .efi file name
#
def GenFfs(self, IsMakefile=False):
#
# Parse Inf file get Module related information
#
self.__InfParse__()
self.__GetOptRomParams()
#
# Get the rule of how to generate Ffs file
#
Rule = self.__GetRule__()
GenFdsGlobalVariable.VerboseLogger( "Packing binaries from inf file : %s" %self.InfFileName)
#
# For the rule only has simpleFile
#
if isinstance (Rule, RuleSimpleFile.RuleSimpleFile) :
EfiOutputList = self.__GenSimpleFileSection__(Rule, IsMakefile=IsMakefile)
return EfiOutputList
#
# For Rule has ComplexFile
#
elif isinstance(Rule, RuleComplexFile.RuleComplexFile):
EfiOutputList = self.__GenComplexFileSection__(Rule, IsMakefile=IsMakefile)
return EfiOutputList
## __GenSimpleFileSection__() method
#
# Get .efi files according to simple rule.
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenSimpleFileSection__(self, Rule, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
OutputFileList = []
if Rule.FileName is not None:
GenSecInputFile = self.__ExtendMacro__(Rule.FileName)
OutputFileList.append(GenSecInputFile)
else:
OutputFileList, IsSect = Section.Section.GetFileList(self, '', Rule.FileExtension)
return OutputFileList
## __GenComplexFileSection__() method
#
# Get .efi by sections in complex Rule
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenComplexFileSection__(self, Rule, IsMakefile=False):
OutputFileList = []
for Sect in Rule.SectionList:
if Sect.SectionType == BINARY_FILE_TYPE_PE32:
if Sect.FileName is not None:
GenSecInputFile = self.__ExtendMacro__(Sect.FileName)
OutputFileList.append(GenSecInputFile)
else:
FileList, IsSect = Section.Section.GetFileList(self, '', Sect.FileExtension)
OutputFileList.extend(FileList)
return OutputFileList
class OverrideAttribs:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.PciVendorId = None
self.PciClassCode = None
self.PciDeviceId = None
self.PciRevision = None
self.NeedCompress = None
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/OptRomInfStatement.py
|
## @file
# Rule object for generating FFS
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from CommonDataClass.FdfClass import RuleClassObject
## Rule base class
#
#
class Rule(RuleClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Rule.py
|
## @file
# Simple Rule object for generating FFS
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Rule
from CommonDataClass.FdfClass import RuleSimpleFileClassObject
## simple rule
#
#
class RuleSimpleFile (RuleSimpleFileClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleSimpleFileClassObject.__init__(self)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/RuleSimpleFile.py
|
## @file
# process rule section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import *
from . import Section
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import EfiSectionClassObject
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import PeImageClass
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import CopyLongFilePath
from Common.DataType import *
## generate rule section
#
#
class EfiSection (EfiSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
EfiSectionClassObject.__init__(self)
## GenSection() method
#
# Generate rule section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name list, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = None, IsMakefile = False) :
if self.FileName is not None and self.FileName.startswith('PCD('):
self.FileName = GenFdsGlobalVariable.GetPcdValue(self.FileName)
"""Prepare the parameter of GenSection"""
if FfsInf is not None :
InfFileName = FfsInf.InfFileName
SectionType = FfsInf.__ExtendMacro__(self.SectionType)
Filename = FfsInf.__ExtendMacro__(self.FileName)
BuildNum = FfsInf.__ExtendMacro__(self.BuildNum)
StringData = FfsInf.__ExtendMacro__(self.StringData)
ModuleNameStr = FfsInf.__ExtendMacro__('$(MODULE_NAME)')
NoStrip = True
if FfsInf.ModuleType in (SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_MM_CORE_STANDALONE) and SectionType in (BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32):
if FfsInf.KeepReloc is not None:
NoStrip = FfsInf.KeepReloc
elif FfsInf.KeepRelocFromRule is not None:
NoStrip = FfsInf.KeepRelocFromRule
elif self.KeepReloc is not None:
NoStrip = self.KeepReloc
elif FfsInf.ShadowFromInfFile is not None:
NoStrip = FfsInf.ShadowFromInfFile
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Module %s apply rule for None!" %ModuleName)
"""If the file name was pointed out, add it in FileList"""
FileList = []
if Dict is None:
Dict = {}
if Filename is not None:
Filename = GenFdsGlobalVariable.MacroExtend(Filename, Dict)
# check if the path is absolute or relative
if os.path.isabs(Filename):
Filename = os.path.normpath(Filename)
else:
Filename = os.path.normpath(os.path.join(FfsInf.EfiOutputPath, Filename))
if not self.Optional:
FileList.append(Filename)
elif os.path.exists(Filename):
FileList.append(Filename)
elif IsMakefile:
SuffixMap = FfsInf.GetFinalTargetSuffixMap()
if '.depex' in SuffixMap:
FileList.append(Filename)
else:
FileList, IsSect = Section.Section.GetFileList(FfsInf, self.FileType, self.FileExtension, Dict, IsMakefile=IsMakefile, SectionType=SectionType)
if IsSect :
return FileList, self.Alignment
Index = 0
Align = self.Alignment
""" If Section type is 'VERSION'"""
OutputFileList = []
if SectionType == 'VERSION':
InfOverrideVerString = False
if FfsInf.Version is not None:
#StringData = FfsInf.Version
BuildNum = FfsInf.Version
InfOverrideVerString = True
if InfOverrideVerString:
#VerTuple = ('-n', '"' + StringData + '"')
if BuildNum is not None and BuildNum != '':
BuildNumTuple = ('-j', BuildNum)
else:
BuildNumTuple = tuple()
Num = SecNum
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
#Ui=StringData,
Ver=BuildNum,
IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
elif FileList != []:
for File in FileList:
Index = Index + 1
Num = '%s.%d' %(SecNum, Index)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get(SectionType))
f = open(File, 'r')
VerString = f.read()
f.close()
BuildNum = VerString
if BuildNum is not None and BuildNum != '':
BuildNumTuple = ('-j', BuildNum)
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
#Ui=VerString,
Ver=BuildNum,
IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
else:
BuildNum = StringData
if BuildNum is not None and BuildNum != '':
BuildNumTuple = ('-j', BuildNum)
else:
BuildNumTuple = tuple()
BuildNumString = ' ' + ' '.join(BuildNumTuple)
#if VerString == '' and
if BuildNumString == '':
if self.Optional == True :
GenFdsGlobalVariable.VerboseLogger( "Optional Section don't exist!")
return [], None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "File: %s miss Version Section value" %InfFileName)
Num = SecNum
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
#Ui=VerString,
Ver=BuildNum,
IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
#
# If Section Type is BINARY_FILE_TYPE_UI
#
elif SectionType == BINARY_FILE_TYPE_UI:
InfOverrideUiString = False
if FfsInf.Ui is not None:
StringData = FfsInf.Ui
InfOverrideUiString = True
if InfOverrideUiString:
Num = SecNum
if IsMakefile and StringData == ModuleNameStr:
StringData = "$(MODULE_NAME)"
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_USER_INTERFACE',
Ui=StringData, IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
elif FileList != []:
for File in FileList:
Index = Index + 1
Num = '%s.%d' %(SecNum, Index)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get(SectionType))
f = open(File, 'r')
UiString = f.read()
f.close()
if IsMakefile and UiString == ModuleNameStr:
UiString = "$(MODULE_NAME)"
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_USER_INTERFACE',
Ui=UiString, IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
else:
if StringData is not None and len(StringData) > 0:
UiTuple = ('-n', '"' + StringData + '"')
else:
UiTuple = tuple()
if self.Optional == True :
GenFdsGlobalVariable.VerboseLogger( "Optional Section don't exist!")
return '', None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "File: %s miss UI Section value" %InfFileName)
Num = SecNum
if IsMakefile and StringData == ModuleNameStr:
StringData = "$(MODULE_NAME)"
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_USER_INTERFACE',
Ui=StringData, IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
#
# If Section Type is BINARY_FILE_TYPE_RAW
#
elif SectionType == BINARY_FILE_TYPE_RAW:
"""If File List is empty"""
if FileList == []:
if self.Optional == True:
GenFdsGlobalVariable.VerboseLogger("Optional Section don't exist!")
return [], None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Output file for %s section could not be found for %s" % (SectionType, InfFileName))
elif len(FileList) > 1:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Files suffixed with %s are not allowed to have more than one file in %s[Binaries] section" % (
self.FileExtension, InfFileName))
else:
for File in FileList:
File = GenFdsGlobalVariable.MacroExtend(File, Dict)
OutputFileList.append(File)
else:
"""If File List is empty"""
if FileList == [] :
if self.Optional == True:
GenFdsGlobalVariable.VerboseLogger("Optional Section don't exist!")
return [], None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Output file for %s section could not be found for %s" % (SectionType, InfFileName))
else:
"""Convert the File to Section file one by one """
for File in FileList:
""" Copy Map file to FFS output path """
Index = Index + 1
Num = '%s.%d' %(SecNum, Index)
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get(SectionType))
File = GenFdsGlobalVariable.MacroExtend(File, Dict)
#Get PE Section alignment when align is set to AUTO
if self.Alignment == 'Auto' and (SectionType == BINARY_FILE_TYPE_PE32 or SectionType == BINARY_FILE_TYPE_TE):
Align = "0"
if File[(len(File)-4):] == '.efi' and FfsInf.InfModule.BaseName == os.path.basename(File)[:-4]:
MapFile = File.replace('.efi', '.map')
CopyMapFile = os.path.join(OutputPath, ModuleName + '.map')
if IsMakefile:
if GenFdsGlobalVariable.CopyList == []:
GenFdsGlobalVariable.CopyList = [(MapFile, CopyMapFile)]
else:
GenFdsGlobalVariable.CopyList.append((MapFile, CopyMapFile))
else:
if os.path.exists(MapFile):
if not os.path.exists(CopyMapFile) or \
(os.path.getmtime(MapFile) > os.path.getmtime(CopyMapFile)):
CopyLongFilePath(MapFile, CopyMapFile)
if not NoStrip:
FileBeforeStrip = os.path.join(OutputPath, ModuleName + '.efi')
if IsMakefile:
if GenFdsGlobalVariable.CopyList == []:
GenFdsGlobalVariable.CopyList = [(File, FileBeforeStrip)]
else:
GenFdsGlobalVariable.CopyList.append((File, FileBeforeStrip))
else:
if not os.path.exists(FileBeforeStrip) or \
(os.path.getmtime(File) > os.path.getmtime(FileBeforeStrip)):
CopyLongFilePath(File, FileBeforeStrip)
StrippedFile = os.path.join(OutputPath, ModuleName + '.stripped')
GenFdsGlobalVariable.GenerateFirmwareImage(
StrippedFile,
[File],
Strip=True,
IsMakefile = IsMakefile
)
File = StrippedFile
"""For TE Section call GenFw to generate TE image"""
if SectionType == BINARY_FILE_TYPE_TE:
TeFile = os.path.join( OutputPath, ModuleName + 'Te.raw')
GenFdsGlobalVariable.GenerateFirmwareImage(
TeFile,
[File],
Type='te',
IsMakefile = IsMakefile
)
File = TeFile
"""Call GenSection"""
GenFdsGlobalVariable.GenerateSection(OutputFile,
[File],
Section.Section.SectionType.get (SectionType),
IsMakefile=IsMakefile
)
OutputFileList.append(OutputFile)
return OutputFileList, Align
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/EfiSection.py
|
## @file
# process Version section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import VerSectionClassObject
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import SUP_MODULE_SEC
## generate version section
#
#
class VerSection (VerSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
VerSectionClassObject.__init__(self)
## GenSection() method
#
# Generate version section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
if FfsInf:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.BuildNum = FfsInf.__ExtendMacro__(self.BuildNum)
self.StringData = FfsInf.__ExtendMacro__(self.StringData)
self.FileName = FfsInf.__ExtendMacro__(self.FileName)
OutputFile = os.path.join(OutputPath,
ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get('VERSION'))
OutputFile = os.path.normpath(OutputFile)
# Get String Data
StringData = ''
if self.StringData:
StringData = self.StringData
elif self.FileName:
if Dict is None:
Dict = {}
FileNameStr = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
FileNameStr = GenFdsGlobalVariable.MacroExtend(FileNameStr, Dict)
FileObj = open(FileNameStr, 'r')
StringData = FileObj.read()
StringData = '"' + StringData + '"'
FileObj.close()
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
Ver=StringData, BuildNumber=self.BuildNum, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/VerSection.py
|
## @file
# process OptionROM generation from FILE statement
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
##
#
#
class OptRomFileStatement:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.FileName = None
self.FileType = None
self.OverrideAttribs = None
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @param Dict dictionary contains macro and value pair
# @retval string Generated FFS file name
#
def GenFfs(self, Dict = None, IsMakefile=False):
if Dict is None:
Dict = {}
if self.FileName is not None:
self.FileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
return self.FileName
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/OptRomFileStatement.py
|
## @file
# generate flash image
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from re import compile
from optparse import OptionParser
from sys import exit
from glob import glob
from struct import unpack
from linecache import getlines
from io import BytesIO
import Common.LongFilePathOs as os
from Common.TargetTxtClassObject import TargetTxtDict,gDefaultTargetTxtFile
from Common.DataType import *
import Common.GlobalData as GlobalData
from Common import EdkLogger
from Common.StringUtils import NormPath
from Common.Misc import DirCache, PathClass, GuidStructureStringToGuidString
from Common.Misc import SaveFileOnChange, ClearDuplicatedInf
from Common.BuildVersion import gBUILD_VERSION
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.BuildToolError import FatalError, GENFDS_ERROR, CODE_ERROR, FORMAT_INVALID, RESOURCE_NOT_AVAILABLE, FILE_NOT_FOUND, OPTION_MISSING, FORMAT_NOT_SUPPORTED, OPTION_VALUE_INVALID, PARAMETER_INVALID
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from .FdfParser import FdfParser, Warning
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .FfsFileStatement import FileStatement
import Common.DataType as DataType
from struct import Struct
## Version and Copyright
versionNumber = "1.0" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + versionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def main():
global Options
Options = myOptionParser()
EdkLogger.Initialize()
return GenFdsApi(OptionsToCommandDict(Options))
def resetFdsGlobalVariable():
GenFdsGlobalVariable.FvDir = ''
GenFdsGlobalVariable.OutputDirDict = {}
GenFdsGlobalVariable.BinDir = ''
# will be FvDir + os.sep + 'Ffs'
GenFdsGlobalVariable.FfsDir = ''
GenFdsGlobalVariable.FdfParser = None
GenFdsGlobalVariable.LibDir = ''
GenFdsGlobalVariable.WorkSpace = None
GenFdsGlobalVariable.WorkSpaceDir = ''
GenFdsGlobalVariable.ConfDir = ''
GenFdsGlobalVariable.OutputDirFromDscDict = {}
GenFdsGlobalVariable.TargetName = ''
GenFdsGlobalVariable.ToolChainTag = ''
GenFdsGlobalVariable.RuleDict = {}
GenFdsGlobalVariable.ArchList = None
GenFdsGlobalVariable.ActivePlatform = None
GenFdsGlobalVariable.FvAddressFileName = ''
GenFdsGlobalVariable.VerboseMode = False
GenFdsGlobalVariable.DebugLevel = -1
GenFdsGlobalVariable.SharpCounter = 0
GenFdsGlobalVariable.SharpNumberPerLine = 40
GenFdsGlobalVariable.FdfFile = ''
GenFdsGlobalVariable.FdfFileTimeStamp = 0
GenFdsGlobalVariable.FixedLoadAddress = False
GenFdsGlobalVariable.PlatformName = ''
GenFdsGlobalVariable.BuildRuleFamily = DataType.TAB_COMPILER_MSFT
GenFdsGlobalVariable.ToolChainFamily = DataType.TAB_COMPILER_MSFT
GenFdsGlobalVariable.__BuildRuleDatabase = None
GenFdsGlobalVariable.GuidToolDefinition = {}
GenFdsGlobalVariable.FfsCmdDict = {}
GenFdsGlobalVariable.SecCmdList = []
GenFdsGlobalVariable.CopyList = []
GenFdsGlobalVariable.ModuleFile = ''
GenFdsGlobalVariable.EnableGenfdsMultiThread = True
GenFdsGlobalVariable.LargeFileInFvFlags = []
GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID = '5473C07A-3DCB-4dca-BD6F-1E9689E7349A'
GenFdsGlobalVariable.LARGE_FILE_SIZE = 0x1000000
GenFdsGlobalVariable.SectionHeader = Struct("3B 1B")
# FvName, FdName, CapName in FDF, Image file name
GenFdsGlobalVariable.ImageBinDict = {}
def GenFdsApi(FdsCommandDict, WorkSpaceDataBase=None):
global Workspace
Workspace = ""
ArchList = None
ReturnCode = 0
resetFdsGlobalVariable()
try:
if FdsCommandDict.get("verbose"):
EdkLogger.SetLevel(EdkLogger.VERBOSE)
GenFdsGlobalVariable.VerboseMode = True
if FdsCommandDict.get("FixedAddress"):
GenFdsGlobalVariable.FixedLoadAddress = True
if FdsCommandDict.get("quiet"):
EdkLogger.SetLevel(EdkLogger.QUIET)
if FdsCommandDict.get("debug"):
EdkLogger.SetLevel(FdsCommandDict.get("debug") + 1)
GenFdsGlobalVariable.DebugLevel = FdsCommandDict.get("debug")
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if not FdsCommandDict.get("Workspace",os.environ.get('WORKSPACE')):
EdkLogger.error("GenFds", OPTION_MISSING, "WORKSPACE not defined",
ExtraData="Please use '-w' switch to pass it or set the WORKSPACE environment variable.")
elif not os.path.exists(FdsCommandDict.get("Workspace",os.environ.get('WORKSPACE'))):
EdkLogger.error("GenFds", PARAMETER_INVALID, "WORKSPACE is invalid",
ExtraData="Please use '-w' switch to pass it or set the WORKSPACE environment variable.")
else:
Workspace = os.path.normcase(FdsCommandDict.get("Workspace",os.environ.get('WORKSPACE')))
GenFdsGlobalVariable.WorkSpaceDir = Workspace
if FdsCommandDict.get("debug"):
GenFdsGlobalVariable.VerboseLogger("Using Workspace:" + Workspace)
if FdsCommandDict.get("GenfdsMultiThread"):
GenFdsGlobalVariable.EnableGenfdsMultiThread = True
else:
GenFdsGlobalVariable.EnableGenfdsMultiThread = False
os.chdir(GenFdsGlobalVariable.WorkSpaceDir)
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(GenFdsGlobalVariable.WorkSpaceDir, PackagesPath)
if FdsCommandDict.get("fdf_file"):
FdfFilename = FdsCommandDict.get("fdf_file")[0].Path
FdfFilename = GenFdsGlobalVariable.ReplaceWorkspaceMacro(FdfFilename)
if FdfFilename[0:2] == '..':
FdfFilename = os.path.abspath(FdfFilename)
if not os.path.isabs(FdfFilename):
FdfFilename = mws.join(GenFdsGlobalVariable.WorkSpaceDir, FdfFilename)
if not os.path.exists(FdfFilename):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=FdfFilename)
GenFdsGlobalVariable.FdfFile = FdfFilename
GenFdsGlobalVariable.FdfFileTimeStamp = os.path.getmtime(FdfFilename)
else:
EdkLogger.error("GenFds", OPTION_MISSING, "Missing FDF filename")
if FdsCommandDict.get("build_target"):
GenFdsGlobalVariable.TargetName = FdsCommandDict.get("build_target")
if FdsCommandDict.get("toolchain_tag"):
GenFdsGlobalVariable.ToolChainTag = FdsCommandDict.get("toolchain_tag")
if FdsCommandDict.get("active_platform"):
ActivePlatform = FdsCommandDict.get("active_platform")
ActivePlatform = GenFdsGlobalVariable.ReplaceWorkspaceMacro(ActivePlatform)
if ActivePlatform[0:2] == '..':
ActivePlatform = os.path.abspath(ActivePlatform)
if not os.path.isabs (ActivePlatform):
ActivePlatform = mws.join(GenFdsGlobalVariable.WorkSpaceDir, ActivePlatform)
if not os.path.exists(ActivePlatform):
EdkLogger.error("GenFds", FILE_NOT_FOUND, "ActivePlatform doesn't exist!")
else:
EdkLogger.error("GenFds", OPTION_MISSING, "Missing active platform")
GenFdsGlobalVariable.ActivePlatform = PathClass(NormPath(ActivePlatform))
if FdsCommandDict.get("conf_directory"):
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(FdsCommandDict.get("conf_directory"))
if ConfDirectoryPath.startswith('"'):
ConfDirectoryPath = ConfDirectoryPath[1:]
if ConfDirectoryPath.endswith('"'):
ConfDirectoryPath = ConfDirectoryPath[:-1]
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.environ["CONF_PATH"])
else:
# Get standard WORKSPACE/Conf, use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(GenFdsGlobalVariable.WorkSpaceDir, 'Conf')
GenFdsGlobalVariable.ConfDir = ConfDirectoryPath
if not GlobalData.gConfDirectory:
GlobalData.gConfDirectory = GenFdsGlobalVariable.ConfDir
BuildConfigurationFile = os.path.normpath(os.path.join(ConfDirectoryPath, gDefaultTargetTxtFile))
if os.path.isfile(BuildConfigurationFile) == True:
# if no build target given in command line, get it from target.txt
TargetObj = TargetTxtDict()
TargetTxt = TargetObj.Target
if not GenFdsGlobalVariable.TargetName:
BuildTargetList = TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
if len(BuildTargetList) != 1:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="Only allows one instance for Target.")
GenFdsGlobalVariable.TargetName = BuildTargetList[0]
# if no tool chain given in command line, get it from target.txt
if not GenFdsGlobalVariable.ToolChainTag:
ToolChainList = TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if ToolChainList is None or len(ToolChainList) == 0:
EdkLogger.error("GenFds", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.")
if len(ToolChainList) != 1:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="Only allows one instance for ToolChain.")
GenFdsGlobalVariable.ToolChainTag = ToolChainList[0]
else:
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
#Set global flag for build mode
GlobalData.gIgnoreSource = FdsCommandDict.get("IgnoreSources")
if FdsCommandDict.get("macro"):
for Pair in FdsCommandDict.get("macro"):
if Pair.startswith('"'):
Pair = Pair[1:]
if Pair.endswith('"'):
Pair = Pair[:-1]
List = Pair.split('=')
if len(List) == 2:
if not List[1].strip():
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="No Value given for Macro %s" %List[0])
if List[0].strip() in ["WORKSPACE", "TARGET", "TOOLCHAIN"]:
GlobalData.gGlobalDefines[List[0].strip()] = List[1].strip()
else:
GlobalData.gCommandLineDefines[List[0].strip()] = List[1].strip()
else:
GlobalData.gCommandLineDefines[List[0].strip()] = "TRUE"
os.environ["WORKSPACE"] = Workspace
# Use the -t and -b option as gGlobalDefines's TOOLCHAIN and TARGET if they are not defined
if "TARGET" not in GlobalData.gGlobalDefines:
GlobalData.gGlobalDefines["TARGET"] = GenFdsGlobalVariable.TargetName
if "TOOLCHAIN" not in GlobalData.gGlobalDefines:
GlobalData.gGlobalDefines["TOOLCHAIN"] = GenFdsGlobalVariable.ToolChainTag
if "TOOL_CHAIN_TAG" not in GlobalData.gGlobalDefines:
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = GenFdsGlobalVariable.ToolChainTag
"""call Workspace build create database"""
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if WorkSpaceDataBase:
BuildWorkSpace = WorkSpaceDataBase
else:
BuildWorkSpace = WorkspaceDatabase()
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = DirCache(Workspace)
GlobalData.gWorkspace = Workspace
if FdsCommandDict.get("build_architecture_list"):
ArchList = FdsCommandDict.get("build_architecture_list").split(',')
else:
ArchList = BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, TAB_COMMON, FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].SupArchList
TargetArchList = set(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, TAB_COMMON, FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].SupArchList) & set(ArchList)
if len(TargetArchList) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, "Target ARCH %s not in platform supported ARCH %s" % (str(ArchList), str(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, TAB_COMMON].SupArchList)))
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirFromDscDict[Arch] = NormPath(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].OutputDirectory)
# assign platform name based on last entry in ArchList
GenFdsGlobalVariable.PlatformName = BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, ArchList[-1], FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].PlatformName
if FdsCommandDict.get("platform_build_directory"):
OutputDirFromCommandLine = GenFdsGlobalVariable.ReplaceWorkspaceMacro(FdsCommandDict.get("platform_build_directory"))
if not os.path.isabs (OutputDirFromCommandLine):
OutputDirFromCommandLine = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, OutputDirFromCommandLine)
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = OutputDirFromCommandLine
else:
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = os.path.join(GenFdsGlobalVariable.OutputDirFromDscDict[Arch], GenFdsGlobalVariable.TargetName + '_' + GenFdsGlobalVariable.ToolChainTag)
for Key in GenFdsGlobalVariable.OutputDirDict:
OutputDir = GenFdsGlobalVariable.OutputDirDict[Key]
if OutputDir[0:2] == '..':
OutputDir = os.path.abspath(OutputDir)
if OutputDir[1] != ':':
OutputDir = os.path.join (GenFdsGlobalVariable.WorkSpaceDir, OutputDir)
if not os.path.exists(OutputDir):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=OutputDir)
GenFdsGlobalVariable.OutputDirDict[Key] = OutputDir
""" Parse Fdf file, has to place after build Workspace as FDF may contain macros from DSC file """
if WorkSpaceDataBase:
FdfParserObj = GlobalData.gFdfParser
else:
FdfParserObj = FdfParser(FdfFilename)
FdfParserObj.ParseFile()
if FdfParserObj.CycleReferenceCheck():
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "Cycle Reference Detected in FDF file")
if FdsCommandDict.get("fd"):
if FdsCommandDict.get("fd")[0].upper() in FdfParserObj.Profile.FdDict:
GenFds.OnlyGenerateThisFd = FdsCommandDict.get("fd")[0]
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such an FD in FDF file: %s" % FdsCommandDict.get("fd")[0])
if FdsCommandDict.get("fv"):
if FdsCommandDict.get("fv")[0].upper() in FdfParserObj.Profile.FvDict:
GenFds.OnlyGenerateThisFv = FdsCommandDict.get("fv")[0]
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such an FV in FDF file: %s" % FdsCommandDict.get("fv")[0])
if FdsCommandDict.get("cap"):
if FdsCommandDict.get("cap")[0].upper() in FdfParserObj.Profile.CapsuleDict:
GenFds.OnlyGenerateThisCap = FdsCommandDict.get("cap")[0]
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such a Capsule in FDF file: %s" % FdsCommandDict.get("cap")[0])
GenFdsGlobalVariable.WorkSpace = BuildWorkSpace
if ArchList:
GenFdsGlobalVariable.ArchList = ArchList
# Dsc Build Data will handle Pcd Settings from CommandLine.
"""Modify images from build output if the feature of loading driver at fixed address is on."""
if GenFdsGlobalVariable.FixedLoadAddress:
GenFds.PreprocessImage(BuildWorkSpace, GenFdsGlobalVariable.ActivePlatform)
# Record the FV Region info that may specific in the FD
if FdfParserObj.Profile.FvDict and FdfParserObj.Profile.FdDict:
for FvObj in FdfParserObj.Profile.FvDict.values():
for FdObj in FdfParserObj.Profile.FdDict.values():
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType != BINARY_FILE_TYPE_FV:
continue
for RegionData in RegionObj.RegionDataList:
if FvObj.UiFvName.upper() == RegionData.upper():
if not FvObj.BaseAddress:
FvObj.BaseAddress = '0x%x' % (int(FdObj.BaseAddress, 0) + RegionObj.Offset)
if FvObj.FvRegionInFD:
if FvObj.FvRegionInFD != RegionObj.Size:
EdkLogger.error("GenFds", FORMAT_INVALID, "The FV %s's region is specified in multiple FD with different value." %FvObj.UiFvName)
else:
FvObj.FvRegionInFD = RegionObj.Size
RegionObj.BlockInfoOfRegion(FdObj.BlockSizeList, FvObj)
"""Call GenFds"""
GenFds.GenFd('', FdfParserObj, BuildWorkSpace, ArchList)
"""Generate GUID cross reference file"""
GenFds.GenerateGuidXRefFile(BuildWorkSpace, ArchList, FdfParserObj)
"""Display FV space info."""
GenFds.DisplayFvSpaceInfo(FdfParserObj)
except Warning as X:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except FatalError as X:
if FdsCommandDict.get("debug") is not None:
import traceback
EdkLogger.quiet(traceback.format_exc())
ReturnCode = X.args[0]
except:
import traceback
EdkLogger.error(
"\nPython",
CODE_ERROR,
"Tools code failure",
ExtraData="Please send email to %s for help, attaching following call stack trace!\n" % MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
EdkLogger.quiet(traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
ClearDuplicatedInf()
return ReturnCode
def OptionsToCommandDict(Options):
FdsCommandDict = {}
FdsCommandDict["verbose"] = Options.verbose
FdsCommandDict["FixedAddress"] = Options.FixedAddress
FdsCommandDict["quiet"] = Options.quiet
FdsCommandDict["debug"] = Options.debug
FdsCommandDict["Workspace"] = Options.Workspace
FdsCommandDict["GenfdsMultiThread"] = not Options.NoGenfdsMultiThread
FdsCommandDict["fdf_file"] = [PathClass(Options.filename)] if Options.filename else []
FdsCommandDict["build_target"] = Options.BuildTarget
FdsCommandDict["toolchain_tag"] = Options.ToolChain
FdsCommandDict["active_platform"] = Options.activePlatform
FdsCommandDict["OptionPcd"] = Options.OptionPcd
FdsCommandDict["conf_directory"] = Options.ConfDirectory
FdsCommandDict["IgnoreSources"] = Options.IgnoreSources
FdsCommandDict["macro"] = Options.Macros
FdsCommandDict["build_architecture_list"] = Options.archList
FdsCommandDict["platform_build_directory"] = Options.outputDir
FdsCommandDict["fd"] = [Options.uiFdName] if Options.uiFdName else []
FdsCommandDict["fv"] = [Options.uiFvName] if Options.uiFvName else []
FdsCommandDict["cap"] = [Options.uiCapName] if Options.uiCapName else []
return FdsCommandDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
#
def myOptionParser():
usage = "%prog [options] -f input_file -a arch_list -b build_target -p active_platform -t tool_chain_tag -D \"MacroName [= MacroValue]\""
Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber))
Parser.add_option("-f", "--file", dest="filename", type="string", help="Name of FDF file to convert", action="callback", callback=SingleCheckCallback)
Parser.add_option("-a", "--arch", dest="archList", help="comma separated list containing one or more of: IA32, X64, IPF, ARM, AARCH64 or EBC which should be built, overrides target.txt?s TARGET_ARCH")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-p", "--platform", type="string", dest="activePlatform", help="Set the ACTIVE_PLATFORM, overrides target.txt ACTIVE_PLATFORM setting.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-w", "--workspace", type="string", dest="Workspace", default=os.environ.get('WORKSPACE'), help="Set the WORKSPACE",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-o", "--outputDir", type="string", dest="outputDir", help="Name of Build Output directory",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-r", "--rom_image", dest="uiFdName", help="Build the image using the [FD] section named by FdUiName.")
Parser.add_option("-i", "--FvImage", dest="uiFvName", help="Build the FV image using the [FV] section named by UiFvName")
Parser.add_option("-C", "--CapsuleImage", dest="uiCapName", help="Build the Capsule image using the [Capsule] section named by UiCapName")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Set the build TARGET, overrides target.txt TARGET setting.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-t", "--tagname", type="string", dest="ToolChain", help="Using the tools: TOOL_CHAIN_TAG name to build the platform.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-s", "--specifyaddress", dest="FixedAddress", action="store_true", type=None, help="Specify driver load address.")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=True, help="Enable GenFds multi thread to generate ffs file.")
Parser.add_option("--no-genfds-multi-thread", action="store_true", dest="NoGenfdsMultiThread", default=False, help="Disable GenFds multi thread to generate ffs file.")
Options, _ = Parser.parse_args()
return Options
## The class implementing the EDK2 flash image generation process
#
# This process includes:
# 1. Collect workspace information, includes platform and module information
# 2. Call methods of Fd class to generate FD
# 3. Call methods of Fv class to generate FV that not belong to FD
#
class GenFds(object):
FdfParsef = None
OnlyGenerateThisFd = None
OnlyGenerateThisFv = None
OnlyGenerateThisCap = None
## GenFd()
#
# @param OutputDir Output directory
# @param FdfParserObject FDF contents parser
# @param Workspace The directory of workspace
# @param ArchList The Arch list of platform
#
@staticmethod
def GenFd (OutputDir, FdfParserObject, WorkSpace, ArchList):
GenFdsGlobalVariable.SetDir ('', FdfParserObject, WorkSpace, ArchList)
GenFdsGlobalVariable.VerboseLogger(" Generate all Fd images and their required FV and Capsule images!")
if GenFds.OnlyGenerateThisCap is not None and GenFds.OnlyGenerateThisCap.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict:
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[GenFds.OnlyGenerateThisCap.upper()]
if CapsuleObj is not None:
CapsuleObj.GenCapsule()
return
if GenFds.OnlyGenerateThisFd is not None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[GenFds.OnlyGenerateThisFd.upper()]
if FdObj is not None:
FdObj.GenFd()
return
elif GenFds.OnlyGenerateThisFd is None and GenFds.OnlyGenerateThisFv is None:
for FdObj in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
FdObj.GenFd()
GenFdsGlobalVariable.VerboseLogger("\n Generate other FV images! ")
if GenFds.OnlyGenerateThisFv is not None and GenFds.OnlyGenerateThisFv.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[GenFds.OnlyGenerateThisFv.upper()]
if FvObj is not None:
Buffer = BytesIO()
FvObj.AddToBuffer(Buffer)
Buffer.close()
return
elif GenFds.OnlyGenerateThisFv is None:
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict.values():
Buffer = BytesIO()
FvObj.AddToBuffer(Buffer)
Buffer.close()
if GenFds.OnlyGenerateThisFv is None and GenFds.OnlyGenerateThisFd is None and GenFds.OnlyGenerateThisCap is None:
if GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict != {}:
GenFdsGlobalVariable.VerboseLogger("\n Generate other Capsule images!")
for CapsuleObj in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict.values():
CapsuleObj.GenCapsule()
if GenFdsGlobalVariable.FdfParser.Profile.OptRomDict != {}:
GenFdsGlobalVariable.VerboseLogger("\n Generate all Option ROM!")
for OptRomObj in GenFdsGlobalVariable.FdfParser.Profile.OptRomDict.values():
OptRomObj.AddToBuffer(None)
@staticmethod
def GenFfsMakefile(OutputDir, FdfParserObject, WorkSpace, ArchList, GlobalData):
GenFdsGlobalVariable.SetEnv(FdfParserObject, WorkSpace, ArchList, GlobalData)
for FdObj in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
FdObj.GenFd(Flag=True)
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict.values():
FvObj.AddToBuffer(Buffer=None, Flag=True)
if GenFdsGlobalVariable.FdfParser.Profile.OptRomDict != {}:
for OptRomObj in GenFdsGlobalVariable.FdfParser.Profile.OptRomDict.values():
OptRomObj.AddToBuffer(Buffer=None, Flag=True)
return GenFdsGlobalVariable.FfsCmdDict
## GetFvBlockSize()
#
# @param FvObj Whose block size to get
# @retval int Block size value
#
@staticmethod
def GetFvBlockSize(FvObj):
DefaultBlockSize = 0x1
FdObj = None
if GenFds.OnlyGenerateThisFd is not None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[GenFds.OnlyGenerateThisFd.upper()]
if FdObj is None:
for ElementFd in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
for ElementRegion in ElementFd.RegionList:
if ElementRegion.RegionType == BINARY_FILE_TYPE_FV:
for ElementRegionData in ElementRegion.RegionDataList:
if ElementRegionData is not None and ElementRegionData.upper() == FvObj.UiFvName:
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
else:
return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList)
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
return DefaultBlockSize
else:
for ElementRegion in FdObj.RegionList:
if ElementRegion.RegionType == BINARY_FILE_TYPE_FV:
for ElementRegionData in ElementRegion.RegionDataList:
if ElementRegionData is not None and ElementRegionData.upper() == FvObj.UiFvName:
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
else:
return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList)
return DefaultBlockSize
## DisplayFvSpaceInfo()
#
# @param FvObj Whose block size to get
# @retval None
#
@staticmethod
def DisplayFvSpaceInfo(FdfParserObject):
FvSpaceInfoList = []
MaxFvNameLength = 0
for FvName in FdfParserObject.Profile.FvDict:
if len(FvName) > MaxFvNameLength:
MaxFvNameLength = len(FvName)
FvSpaceInfoFileName = os.path.join(GenFdsGlobalVariable.FvDir, FvName.upper() + '.Fv.map')
if os.path.exists(FvSpaceInfoFileName):
FileLinesList = getlines(FvSpaceInfoFileName)
TotalFound = False
Total = ''
UsedFound = False
Used = ''
FreeFound = False
Free = ''
for Line in FileLinesList:
NameValue = Line.split('=')
if len(NameValue) == 2:
if NameValue[0].strip() == 'EFI_FV_TOTAL_SIZE':
TotalFound = True
Total = NameValue[1].strip()
if NameValue[0].strip() == 'EFI_FV_TAKEN_SIZE':
UsedFound = True
Used = NameValue[1].strip()
if NameValue[0].strip() == 'EFI_FV_SPACE_SIZE':
FreeFound = True
Free = NameValue[1].strip()
if TotalFound and UsedFound and FreeFound:
FvSpaceInfoList.append((FvName, Total, Used, Free))
GenFdsGlobalVariable.InfLogger('\nFV Space Information')
for FvSpaceInfo in FvSpaceInfoList:
Name = FvSpaceInfo[0]
TotalSizeValue = int(FvSpaceInfo[1], 0)
UsedSizeValue = int(FvSpaceInfo[2], 0)
FreeSizeValue = int(FvSpaceInfo[3], 0)
if UsedSizeValue == TotalSizeValue:
Percentage = '100'
else:
Percentage = str((UsedSizeValue + 0.0) / TotalSizeValue)[0:4].lstrip('0.')
GenFdsGlobalVariable.InfLogger(Name + ' ' + '[' + Percentage + '%Full] '\
+ str(TotalSizeValue) + ' (' + hex(TotalSizeValue) + ')' + ' total, '\
+ str(UsedSizeValue) + ' (' + hex(UsedSizeValue) + ')' + ' used, '\
+ str(FreeSizeValue) + ' (' + hex(FreeSizeValue) + ')' + ' free')
## PreprocessImage()
#
# @param BuildDb Database from build meta data files
# @param DscFile modules from dsc file will be preprocessed
# @retval None
#
@staticmethod
def PreprocessImage(BuildDb, DscFile):
PcdDict = BuildDb.BuildObject[DscFile, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].Pcds
PcdValue = ''
for Key in PcdDict:
PcdObj = PcdDict[Key]
if PcdObj.TokenCName == 'PcdBsBaseAddress':
PcdValue = PcdObj.DefaultValue
break
if PcdValue == '':
return
Int64PcdValue = int(PcdValue, 0)
if Int64PcdValue == 0 or Int64PcdValue < -1:
return
TopAddress = 0
if Int64PcdValue > 0:
TopAddress = Int64PcdValue
ModuleDict = BuildDb.BuildObject[DscFile, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].Modules
for Key in ModuleDict:
ModuleObj = BuildDb.BuildObject[Key, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
print(ModuleObj.BaseName + ' ' + ModuleObj.ModuleType)
@staticmethod
def GenerateGuidXRefFile(BuildDb, ArchList, FdfParserObj):
GuidXRefFileName = os.path.join(GenFdsGlobalVariable.FvDir, "Guid.xref")
GuidXRefFile = []
PkgGuidDict = {}
GuidDict = {}
ModuleList = []
FileGuidList = []
VariableGuidSet = set()
for Arch in ArchList:
PlatformDataBase = BuildDb.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
PkgList = GenFdsGlobalVariable.WorkSpace.GetPackageList(GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag)
for P in PkgList:
PkgGuidDict.update(P.Guids)
for Name, Guid in PlatformDataBase.Pcds:
Pcd = PlatformDataBase.Pcds[Name, Guid]
if Pcd.Type in [TAB_PCDS_DYNAMIC_HII, TAB_PCDS_DYNAMIC_EX_HII]:
for SkuId in Pcd.SkuInfoList:
Sku = Pcd.SkuInfoList[SkuId]
if Sku.VariableGuid in VariableGuidSet:continue
VariableGuidSet.add(Sku.VariableGuid)
if Sku.VariableGuid and Sku.VariableGuid in PkgGuidDict.keys():
GuidDict[Sku.VariableGuid] = PkgGuidDict[Sku.VariableGuid]
for ModuleFile in PlatformDataBase.Modules:
Module = BuildDb.BuildObject[ModuleFile, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if Module in ModuleList:
continue
else:
ModuleList.append(Module)
if GlobalData.gGuidPattern.match(ModuleFile.BaseName):
GuidXRefFile.append("%s %s\n" % (ModuleFile.BaseName, Module.BaseName))
else:
GuidXRefFile.append("%s %s\n" % (Module.Guid, Module.BaseName))
GuidDict.update(Module.Protocols)
GuidDict.update(Module.Guids)
GuidDict.update(Module.Ppis)
for FvName in FdfParserObj.Profile.FvDict:
for FfsObj in FdfParserObj.Profile.FvDict[FvName].FfsList:
if not isinstance(FfsObj, FileStatement):
InfPath = PathClass(NormPath(mws.join(GenFdsGlobalVariable.WorkSpaceDir, FfsObj.InfFileName)))
FdfModule = BuildDb.BuildObject[InfPath, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if FdfModule in ModuleList:
continue
else:
ModuleList.append(FdfModule)
GuidXRefFile.append("%s %s\n" % (FdfModule.Guid, FdfModule.BaseName))
GuidDict.update(FdfModule.Protocols)
GuidDict.update(FdfModule.Guids)
GuidDict.update(FdfModule.Ppis)
else:
FileStatementGuid = FfsObj.NameGuid
if FileStatementGuid in FileGuidList:
continue
else:
FileGuidList.append(FileStatementGuid)
Name = []
FfsPath = os.path.join(GenFdsGlobalVariable.FvDir, 'Ffs')
FfsPath = glob(os.path.join(FfsPath, FileStatementGuid) + TAB_STAR)
if not FfsPath:
continue
if not os.path.exists(FfsPath[0]):
continue
MatchDict = {}
ReFileEnds = compile('\S+(.ui)$|\S+(fv.sec.txt)$|\S+(.pe32.txt)$|\S+(.te.txt)$|\S+(.pic.txt)$|\S+(.raw.txt)$|\S+(.ffs.txt)$')
FileList = os.listdir(FfsPath[0])
for File in FileList:
Match = ReFileEnds.search(File)
if Match:
for Index in range(1, 8):
if Match.group(Index) and Match.group(Index) in MatchDict:
MatchDict[Match.group(Index)].append(File)
elif Match.group(Index):
MatchDict[Match.group(Index)] = [File]
if not MatchDict:
continue
if '.ui' in MatchDict:
for File in MatchDict['.ui']:
with open(os.path.join(FfsPath[0], File), 'rb') as F:
F.read()
length = F.tell()
F.seek(4)
TmpStr = unpack('%dh' % ((length - 4) // 2), F.read())
Name = ''.join(chr(c) for c in TmpStr[:-1])
else:
FileList = []
if 'fv.sec.txt' in MatchDict:
FileList = MatchDict['fv.sec.txt']
elif '.pe32.txt' in MatchDict:
FileList = MatchDict['.pe32.txt']
elif '.te.txt' in MatchDict:
FileList = MatchDict['.te.txt']
elif '.pic.txt' in MatchDict:
FileList = MatchDict['.pic.txt']
elif '.raw.txt' in MatchDict:
FileList = MatchDict['.raw.txt']
elif '.ffs.txt' in MatchDict:
FileList = MatchDict['.ffs.txt']
else:
pass
for File in FileList:
with open(os.path.join(FfsPath[0], File), 'r') as F:
Name.append((F.read().split()[-1]))
if not Name:
continue
Name = ' '.join(Name) if isinstance(Name, type([])) else Name
GuidXRefFile.append("%s %s\n" %(FileStatementGuid, Name))
# Append GUIDs, Protocols, and PPIs to the Xref file
GuidXRefFile.append("\n")
for key, item in GuidDict.items():
GuidXRefFile.append("%s %s\n" % (GuidStructureStringToGuidString(item).upper(), key))
if GuidXRefFile:
GuidXRefFile = ''.join(GuidXRefFile)
SaveFileOnChange(GuidXRefFileName, GuidXRefFile, False)
GenFdsGlobalVariable.InfLogger("\nGUID cross reference file can be found at %s" % GuidXRefFileName)
elif os.path.exists(GuidXRefFileName):
os.remove(GuidXRefFileName)
if __name__ == '__main__':
r = main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127:
r = 1
exit(r)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/GenFds.py
|
## @file
# process FD generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Region
from . import Fv
import Common.LongFilePathOs as os
from io import BytesIO
import sys
from struct import *
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import FDClassObject
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import SaveFileOnChange
from Common.DataType import BINARY_FILE_TYPE_FV
## generate FD
#
#
class FD(FDClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FDClassObject.__init__(self)
## GenFd() method
#
# Generate FD
#
# @retval string Generated FD file name
#
def GenFd (self, Flag = False):
if self.FdUiName.upper() + 'fd' in GenFdsGlobalVariable.ImageBinDict:
return GenFdsGlobalVariable.ImageBinDict[self.FdUiName.upper() + 'fd']
#
# Print Information
#
FdFileName = os.path.join(GenFdsGlobalVariable.FvDir, self.FdUiName + '.fd')
if not Flag:
GenFdsGlobalVariable.InfLogger("\nFd File Name:%s (%s)" %(self.FdUiName, FdFileName))
Offset = 0x00
for item in self.BlockSizeList:
Offset = Offset + item[0] * item[1]
if Offset != self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s Size not consistent with block array' % self.FdUiName)
GenFdsGlobalVariable.VerboseLogger('Following Fv will be add to Fd !!!')
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
GenFdsGlobalVariable.VerboseLogger(FvObj)
HasCapsuleRegion = False
for RegionObj in self.RegionList:
if RegionObj.RegionType == 'CAPSULE':
HasCapsuleRegion = True
break
if HasCapsuleRegion:
TempFdBuffer = BytesIO()
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.RegionType == 'CAPSULE':
continue
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
pass
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
pass
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
if not Flag:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
if not Flag:
PadRegion.AddToBuffer(TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFdsGlobalVariable.ImageBinDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
# Call each region's AddToBuffer function
#
if PreviousRegionSize > self.Size:
pass
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFdsGlobalVariable.ImageBinDict, self.DefineVarDict)
FdBuffer = BytesIO()
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X in wrong order with Region starting from 0x%X, size 0x%X\nRegions in FDF must have offsets appear in ascending order.'\
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X overlaps with Region starting from 0x%X, size 0x%X' \
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
if not Flag:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
if not Flag:
PadRegion.AddToBuffer(FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFdsGlobalVariable.ImageBinDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
# Verify current region fits within allocated FD section Size
#
if PreviousRegionStart + PreviousRegionSize > self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
'FD %s size too small to fit region with offset 0x%X and size 0x%X'
% (self.FdUiName, PreviousRegionStart, PreviousRegionSize))
#
# Call each region's AddToBuffer function
#
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFdsGlobalVariable.ImageBinDict, self.DefineVarDict, Flag=Flag)
#
# Write the buffer contents to Fd file
#
GenFdsGlobalVariable.VerboseLogger('Write the buffer contents to Fd file')
if not Flag:
SaveFileOnChange(FdFileName, FdBuffer.getvalue())
FdBuffer.close()
GenFdsGlobalVariable.ImageBinDict[self.FdUiName.upper() + 'fd'] = FdFileName
return FdFileName
## generate flash map file
#
# @param self The object pointer
#
def GenFlashMap (self):
pass
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/Fd.py
|
## @file
# process Subtype GUIDed section generation
#
# Copyright (c) 2022, Konstantin Aladyshev <aladyshev22@gmail.com>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .GenFdsGlobalVariable import FindExtendTool
from CommonDataClass.FdfClass import SubTypeGuidSectionClassObject
import sys
from Common import EdkLogger
from Common.BuildToolError import *
from .FvImageSection import FvImageSection
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
## generate SubType GUIDed section
#
#
class SubTypeGuidSection(SubTypeGuidSectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SubTypeGuidSectionClassObject.__init__(self)
## GenSection() method
#
# Generate GUIDed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile=False):
#
# Generate all section
#
self.KeyStringList = KeyStringList
self.CurrentArchList = GenFdsGlobalVariable.ArchList
if FfsInf is not None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.SubTypeGuid = FfsInf.__ExtendMacro__(self.SubTypeGuid)
self.SectionType = FfsInf.__ExtendMacro__(self.SectionType)
self.CurrentArchList = [FfsInf.CurrentArch]
if Dict is None:
Dict = {}
self.SectFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.SectFileName)
self.SectFileName = GenFdsGlobalVariable.MacroExtend(self.SectFileName, Dict)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get("SUBTYPE_GUID"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [self.SectFileName], 'EFI_SECTION_FREEFORM_SUBTYPE_GUID', Guid=self.SubTypeGuid, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/GenFds/SubTypeGuidSection.py
|
# @file
# Split a file into two pieces at the request offset.
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/Split/__init__.py
|
# @file
# Split a file into two pieces at the request offset.
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import argparse
import os
import io
import shutil
import logging
import sys
import tempfile
parser = argparse.ArgumentParser(description='''
SplitFile creates two Binary files either in the same directory as the current working directory or in the specified directory.
''')
parser.add_argument("-f", "--filename", dest="inputfile",
required=True, help="The input file to split tool.")
parser.add_argument("-s", "--split", dest="position",
required=True, help="The number of bytes in the first file. The valid format are HEX, Decimal and Decimal[KMG].")
parser.add_argument("-p", "--prefix", dest="output",
help="The output folder.")
parser.add_argument("-o", "--firstfile", help="The first file name")
parser.add_argument("-t", "--secondfile", help="The second file name")
parser.add_argument("--version", action="version", version='%(prog)s Version 2.0',
help="Print debug information.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true",
help="Print debug information.")
group.add_argument("-q", "--quiet", action="store_true",
help="Disable all messages except fatal errors")
SizeDict = {
"K": 1024,
"M": 1024*1024,
"G": 1024*1024*1024
}
def GetPositionValue(position):
'''
Parse the string of the argument position and return a decimal number.
The valid position formats are
1. HEX
e.g. 0x1000 or 0X1000
2. Decimal
e.g. 100
3. Decimal[KMG]
e.g. 100K or 100M or 100G or 100k or 100m or 100g
'''
logger = logging.getLogger('Split')
PosVal = 0
header = position[:2].upper()
tailer = position[-1].upper()
try:
if tailer in SizeDict:
PosVal = int(position[:-1]) * SizeDict[tailer]
else:
if header == "0X":
PosVal = int(position, 16)
else:
PosVal = int(position)
except Exception as e:
logger.error(
"The parameter %s format is incorrect. The valid format is HEX, Decimal and Decimal[KMG]." % position)
raise(e)
return PosVal
def getFileSize(filename):
'''
Read the input file and return the file size.
'''
logger = logging.getLogger('Split')
length = 0
try:
with open(filename, "rb") as fin:
fin.seek(0, io.SEEK_END)
length = fin.tell()
except Exception as e:
logger.error("Access file failed: %s", filename)
raise(e)
return length
def getoutputfileabs(inputfile, prefix, outputfile,index):
inputfile = os.path.abspath(inputfile)
if outputfile is None:
if prefix is None:
outputfileabs = os.path.join(os.path.dirname(inputfile), "{}{}".format(os.path.basename(inputfile),index))
else:
if os.path.isabs(prefix):
outputfileabs = os.path.join(prefix, "{}{}".format(os.path.basename(inputfile),index))
else:
outputfileabs = os.path.join(os.getcwd(), prefix, "{}{}".format(os.path.basename(inputfile),index))
elif not os.path.isabs(outputfile):
if prefix is None:
outputfileabs = os.path.join(os.getcwd(), outputfile)
else:
if os.path.isabs(prefix):
outputfileabs = os.path.join(prefix, outputfile)
else:
outputfileabs = os.path.join(os.getcwd(), prefix, outputfile)
else:
outputfileabs = outputfile
return outputfileabs
def splitFile(inputfile, position, outputdir=None, outputfile1=None, outputfile2=None):
'''
Split the inputfile into outputfile1 and outputfile2 from the position.
'''
logger = logging.getLogger('Split')
if not os.path.exists(inputfile):
logger.error("File Not Found: %s" % inputfile)
raise(Exception)
if outputfile1 and outputfile2 and outputfile1 == outputfile2:
logger.error(
"The firstfile and the secondfile can't be the same: %s" % outputfile1)
raise(Exception)
# Create dir for the output files
try:
outputfile1 = getoutputfileabs(inputfile, outputdir, outputfile1,1)
outputfolder = os.path.dirname(outputfile1)
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
outputfile2 = getoutputfileabs(inputfile, outputdir, outputfile2,2)
outputfolder = os.path.dirname(outputfile2)
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
except Exception as e:
logger.error("Can't make dir: %s" % outputfolder)
raise(e)
if position <= 0:
if outputfile2 != os.path.abspath(inputfile):
shutil.copyfile(os.path.abspath(inputfile), outputfile2)
with open(outputfile1, "wb") as fout:
fout.write(b'')
else:
inputfilesize = getFileSize(inputfile)
if position >= inputfilesize:
if outputfile1 != os.path.abspath(inputfile):
shutil.copyfile(os.path.abspath(inputfile), outputfile1)
with open(outputfile2, "wb") as fout:
fout.write(b'')
else:
try:
tempdir = tempfile.mkdtemp()
tempfile1 = os.path.join(tempdir, "file1.bin")
tempfile2 = os.path.join(tempdir, "file2.bin")
with open(inputfile, "rb") as fin:
content1 = fin.read(position)
with open(tempfile1, "wb") as fout1:
fout1.write(content1)
content2 = fin.read(inputfilesize - position)
with open(tempfile2, "wb") as fout2:
fout2.write(content2)
shutil.copyfile(tempfile1, outputfile1)
shutil.copyfile(tempfile2, outputfile2)
except Exception as e:
logger.error("Split file failed")
raise(e)
finally:
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
def main():
args = parser.parse_args()
status = 0
logger = logging.getLogger('Split')
if args.quiet:
logger.setLevel(logging.CRITICAL)
if args.verbose:
logger.setLevel(logging.DEBUG)
lh = logging.StreamHandler(sys.stdout)
lf = logging.Formatter("%(levelname)-8s: %(message)s")
lh.setFormatter(lf)
logger.addHandler(lh)
try:
position = GetPositionValue(args.position)
splitFile(args.inputfile, position, args.output,
args.firstfile, args.secondfile)
except Exception as e:
status = 1
return status
if __name__ == "__main__":
exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/Split/Split.py
|
## @file
#
# Convert an AML file to a .c file containing the AML bytecode stored in a
# C array.
# By default, "Tables\Dsdt.aml" will generate "Tables\Dsdt.c".
# "Tables\Dsdt.c" will contain a C array named "dsdt_aml_code" that contains
# the AML bytecode.
#
# Copyright (c) 2020, ARM Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import argparse
import Common.EdkLogger as EdkLogger
from Common.BuildToolError import *
import sys
import os
__description__ = """
Convert an AML file to a .c file containing the AML bytecode stored in a C
array. By default, Tables\Dsdt.aml will generate Tables\Dsdt.c.
Tables\Dsdt.c will contain a C array named "dsdt_aml_code" that contains
the AML bytecode.
"""
## Parse the command line arguments.
#
# @retval A argparse.NameSpace instance, containing parsed values.
#
def ParseArgs():
# Initialize the parser.
Parser = argparse.ArgumentParser(description=__description__)
# Define the possible arguments.
Parser.add_argument(dest="InputFile",
help="Path to an input AML file to generate a .c file from.")
Parser.add_argument("-o", "--out-dir", dest="OutDir",
help="Output directory where the .c file will be generated. Default is the input file's directory.")
# Parse the input arguments.
Args = Parser.parse_args()
SplitInputName = ""
if not os.path.exists(Args.InputFile):
EdkLogger.error(__file__, FILE_OPEN_FAILURE,
ExtraData=Args.InputFile)
return None
else:
with open(Args.InputFile, "rb") as fIn:
Signature = str(fIn.read(4))
if ("DSDT" not in Signature) and ("SSDT" not in Signature):
EdkLogger.info("Invalid file type. File does not have a valid DSDT or SSDT signature: {}".format(Args.InputFile))
return None
# Get the basename of the input file.
SplitInputName = os.path.splitext(Args.InputFile)
BaseName = os.path.basename(SplitInputName[0])
# If no output directory is specified, output to the input directory.
if not Args.OutDir:
Args.OutputFile = os.path.join(os.path.dirname(Args.InputFile),
BaseName + ".c")
else:
if not os.path.exists(Args.OutDir):
os.mkdir(Args.OutDir)
Args.OutputFile = os.path.join(Args.OutDir, BaseName + ".c")
Args.BaseName = BaseName
return Args
## Convert an AML file to a .c file containing the AML bytecode stored
# in a C array.
#
# @param InputFile Path to the input AML file.
# @param OutputFile Path to the output .c file to generate.
# @param BaseName Base name of the input file.
# This is also the name of the generated .c file.
#
def AmlToC(InputFile, OutputFile, BaseName):
ArrayName = BaseName.lower() + "_aml_code"
FileHeader =\
"""
// This file has been generated from:
// -Python script: {}
// -Input AML file: {}
"""
with open(InputFile, "rb") as fIn, open(OutputFile, "w") as fOut:
# Write header.
fOut.write(FileHeader.format(os.path.abspath(InputFile), os.path.abspath(__file__)))
# Write the array and its content.
fOut.write("unsigned char {}[] = {{\n ".format(ArrayName))
cnt = 0
byte = fIn.read(1)
while len(byte) != 0:
fOut.write("0x{0:02X}, ".format(ord(byte)))
cnt += 1
if (cnt % 8) == 0:
fOut.write("\n ")
byte = fIn.read(1)
fOut.write("\n};\n")
## Main method
#
# This method:
# 1- Initialize an EdkLogger instance.
# 2- Parses the input arguments.
# 3- Converts an AML file to a .c file containing the AML bytecode stored
# in a C array.
#
# @retval 0 Success.
# @retval 1 Error.
#
def Main():
# Initialize an EdkLogger instance.
EdkLogger.Initialize()
try:
# Parse the input arguments.
CommandArguments = ParseArgs()
if not CommandArguments:
return 1
# Convert an AML file to a .c file containing the AML bytecode stored
# in a C array.
AmlToC(CommandArguments.InputFile, CommandArguments.OutputFile, CommandArguments.BaseName)
except Exception as e:
print(e)
return 1
return 0
if __name__ == '__main__':
r = Main()
# 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/AmlToC/AmlToC.py
|
## @file
# This file is used to define the Firmware Storage Format.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/FirmwareStorageFormat/__init__.py
|
## @file
# This file is used to define the common C struct and functions.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from ctypes import *
import uuid
# ZeroGuid = uuid.UUID('{00000000-0000-0000-0000-000000000000}')
# EFI_FIRMWARE_FILE_SYSTEM2_GUID = uuid.UUID('{8C8CE578-8A3D-4f1c-9935-896185C32DD3}')
# EFI_FIRMWARE_FILE_SYSTEM3_GUID = uuid.UUID('{5473C07A-3DCB-4dca-BD6F-1E9689E7349A}')
# EFI_FFS_VOLUME_TOP_FILE_GUID = uuid.UUID('{1BA0062E-C779-4582-8566-336AE8F78F09}')
EFI_FIRMWARE_FILE_SYSTEM2_GUID = uuid.UUID("8c8ce578-8a3d-4f1c-9935-896185c32dd3")
EFI_FIRMWARE_FILE_SYSTEM2_GUID_BYTE = b'x\xe5\x8c\x8c=\x8a\x1cO\x995\x89a\x85\xc3-\xd3'
# EFI_FIRMWARE_FILE_SYSTEM2_GUID_BYTE = EFI_FIRMWARE_FILE_SYSTEM2_GUID.bytes
EFI_FIRMWARE_FILE_SYSTEM3_GUID = uuid.UUID("5473C07A-3DCB-4dca-BD6F-1E9689E7349A")
# EFI_FIRMWARE_FILE_SYSTEM3_GUID_BYTE = b'x\xe5\x8c\x8c=\x8a\x1cO\x995\x89a\x85\xc3-\xd3'
EFI_FIRMWARE_FILE_SYSTEM3_GUID_BYTE = b'z\xc0sT\xcb=\xcaM\xbdo\x1e\x96\x89\xe74\x9a'
EFI_SYSTEM_NVDATA_FV_GUID = uuid.UUID("fff12b8d-7696-4c8b-a985-2747075b4f50")
EFI_SYSTEM_NVDATA_FV_GUID_BYTE = b"\x8d+\xf1\xff\x96v\x8bL\xa9\x85'G\x07[OP"
EFI_FFS_VOLUME_TOP_FILE_GUID = uuid.UUID("1ba0062e-c779-4582-8566-336ae8f78f09")
EFI_FFS_VOLUME_TOP_FILE_GUID_BYTE = b'.\x06\xa0\x1by\xc7\x82E\x85f3j\xe8\xf7\x8f\t'
ZEROVECTOR_BYTE = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
PADVECTOR = uuid.UUID("ffffffff-ffff-ffff-ffff-ffffffffffff")
FVH_SIGNATURE = b'_FVH'
#Alignment
SECTION_COMMON_ALIGNMENT = 4
FFS_COMMON_ALIGNMENT = 8
class GUID(Structure):
_pack_ = 1
_fields_ = [
('Guid1', c_uint32),
('Guid2', c_uint16),
('Guid3', c_uint16),
('Guid4', ARRAY(c_uint8, 8)),
]
def from_list(self, listformat: list) -> None:
self.Guid1 = listformat[0]
self.Guid2 = listformat[1]
self.Guid3 = listformat[2]
for i in range(8):
self.Guid4[i] = listformat[i+3]
def __cmp__(self, otherguid) -> bool:
if not isinstance(otherguid, GUID):
return 'Input is not the GUID instance!'
rt = False
if self.Guid1 == otherguid.Guid1 and self.Guid2 == otherguid.Guid2 and self.Guid3 == otherguid.Guid3:
rt = True
for i in range(8):
rt = rt & (self.Guid4[i] == otherguid.Guid4[i])
return rt
def ModifyGuidFormat(target_guid: str) -> GUID:
target_guid = target_guid.replace('-', '')
target_list = []
start = [0,8,12,16,18,20,22,24,26,28,30]
end = [8,12,16,18,20,22,24,26,28,30,32]
num = len(start)
for pos in range(num):
new_value = int(target_guid[start[pos]:end[pos]], 16)
target_list.append(new_value)
new_format = GUID()
new_format.from_list(target_list)
return new_format
# Get data from ctypes to bytes.
def struct2stream(s) -> bytes:
length = sizeof(s)
p = cast(pointer(s), POINTER(c_char * length))
return p.contents.raw
def GetPadSize(Size: int, alignment: int) -> int:
if Size % alignment == 0:
return 0
Pad_Size = alignment - Size % alignment
return Pad_Size
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/FirmwareStorageFormat/Common.py
|
## @file
# This file is used to define the FV Header C Struct.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from ast import Str
from struct import *
from ctypes import *
from FirmwareStorageFormat.Common import *
class EFI_FV_BLOCK_MAP_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('NumBlocks', c_uint32),
('Length', c_uint32),
]
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', GUID),
('FvLength', c_uint64),
('Signature', c_uint32),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8),
('BlockMap', ARRAY(EFI_FV_BLOCK_MAP_ENTRY, 1)),
]
def Refine_FV_Header(nums):
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', GUID),
('FvLength', c_uint64),
('Signature', c_uint32),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8),
('BlockMap', ARRAY(EFI_FV_BLOCK_MAP_ENTRY, nums)),
]
return EFI_FIRMWARE_VOLUME_HEADER
class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure):
_fields_ = [
('FvName', GUID),
('ExtHeaderSize', c_uint32)
]
class EFI_FIRMWARE_VOLUME_EXT_ENTRY(Structure):
_fields_ = [
('ExtEntrySize', c_uint16),
('ExtEntryType', c_uint16)
]
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_OEM_TYPE_0(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('TypeMask', c_uint32)
]
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_OEM_TYPE(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('TypeMask', c_uint32),
('Types', ARRAY(GUID, 1))
]
def Refine_FV_EXT_ENTRY_OEM_TYPE_Header(nums: int) -> EFI_FIRMWARE_VOLUME_EXT_ENTRY_OEM_TYPE:
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_OEM_TYPE(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('TypeMask', c_uint32),
('Types', ARRAY(GUID, nums))
]
return EFI_FIRMWARE_VOLUME_EXT_ENTRY_OEM_TYPE(Structure)
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_GUID_TYPE_0(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('FormatType', GUID)
]
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_GUID_TYPE(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('FormatType', GUID),
('Data', ARRAY(c_uint8, 1))
]
def Refine_FV_EXT_ENTRY_GUID_TYPE_Header(nums: int) -> EFI_FIRMWARE_VOLUME_EXT_ENTRY_GUID_TYPE:
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_GUID_TYPE(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('FormatType', GUID),
('Data', ARRAY(c_uint8, nums))
]
return EFI_FIRMWARE_VOLUME_EXT_ENTRY_GUID_TYPE(Structure)
class EFI_FIRMWARE_VOLUME_EXT_ENTRY_USED_SIZE_TYPE(Structure):
_fields_ = [
('Hdr', EFI_FIRMWARE_VOLUME_EXT_ENTRY),
('UsedSize', c_uint32)
]
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/FirmwareStorageFormat/FvHeader.py
|
## @file
# This file is used to define the Ffs Header C Struct.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from struct import *
from ctypes import *
from FirmwareStorageFormat.Common import *
EFI_FFS_FILE_HEADER_LEN = 24
EFI_FFS_FILE_HEADER2_LEN = 32
class CHECK_SUM(Structure):
_pack_ = 1
_fields_ = [
('Header', c_uint8),
('File', c_uint8),
]
class EFI_FFS_INTEGRITY_CHECK(Union):
_pack_ = 1
_fields_ = [
('Checksum', CHECK_SUM),
('Checksum16', c_uint16),
]
class EFI_FFS_FILE_HEADER(Structure):
_pack_ = 1
_fields_ = [
('Name', GUID),
('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', ARRAY(c_uint8, 3)),
('State', c_uint8),
]
@property
def FFS_FILE_SIZE(self) -> int:
return self.Size[0] | self.Size[1] << 8 | self.Size[2] << 16
@property
def HeaderLength(self) -> int:
return 24
class EFI_FFS_FILE_HEADER2(Structure):
_pack_ = 1
_fields_ = [
('Name', GUID),
('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', ARRAY(c_uint8, 3)),
('State', c_uint8),
('ExtendedSize', c_uint64),
]
@property
def FFS_FILE_SIZE(self) -> int:
return self.ExtendedSize
@property
def HeaderLength(self) -> int:
return 32
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/FirmwareStorageFormat/FfsFileHeader.py
|
## @file
# This file is used to define the Section Header C Struct.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from struct import *
from ctypes import *
from FirmwareStorageFormat.Common import *
EFI_COMMON_SECTION_HEADER_LEN = 4
EFI_COMMON_SECTION_HEADER2_LEN = 8
class EFI_COMMON_SECTION_HEADER(Structure):
_pack_ = 1
_fields_ = [
('Size', ARRAY(c_uint8, 3)),
('Type', c_uint8),
]
@property
def SECTION_SIZE(self) -> int:
return self.Size[0] | self.Size[1] << 8 | self.Size[2] << 16
def Common_Header_Size(self) -> int:
return 4
class EFI_COMMON_SECTION_HEADER2(Structure):
_pack_ = 1
_fields_ = [
('Size', ARRAY(c_uint8, 3)),
('Type', c_uint8),
('ExtendedSize', c_uint32),
]
@property
def SECTION_SIZE(self) -> int:
return self.ExtendedSize
def Common_Header_Size(self) -> int:
return 8
class EFI_COMPRESSION_SECTION(Structure):
_pack_ = 1
_fields_ = [
('UncompressedLength', c_uint32),
('CompressionType', c_uint8),
]
def ExtHeaderSize(self) -> int:
return 5
class EFI_FREEFORM_SUBTYPE_GUID_SECTION(Structure):
_pack_ = 1
_fields_ = [
('SubTypeGuid', GUID),
]
def ExtHeaderSize(self) -> int:
return 16
class EFI_GUID_DEFINED_SECTION(Structure):
_pack_ = 1
_fields_ = [
('SectionDefinitionGuid', GUID),
('DataOffset', c_uint16),
('Attributes', c_uint16),
]
def ExtHeaderSize(self) -> int:
return 20
def Get_USER_INTERFACE_Header(nums: int):
class EFI_SECTION_USER_INTERFACE(Structure):
_pack_ = 1
_fields_ = [
('FileNameString', ARRAY(c_uint16, nums)),
]
def ExtHeaderSize(self) -> int:
return 2 * nums
def GetUiString(self) -> str:
UiString = ''
for i in range(nums):
if self.FileNameString[i]:
UiString += chr(self.FileNameString[i])
return UiString
return EFI_SECTION_USER_INTERFACE
def Get_VERSION_Header(nums: int):
class EFI_SECTION_VERSION(Structure):
_pack_ = 1
_fields_ = [
('BuildNumber', c_uint16),
('VersionString', ARRAY(c_uint16, nums)),
]
def ExtHeaderSize(self) -> int:
return 2 * (nums+1)
def GetVersionString(self) -> str:
VersionString = ''
for i in range(nums):
if self.VersionString[i]:
VersionString += chr(self.VersionString[i])
return VersionString
return EFI_SECTION_VERSION
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/FirmwareStorageFormat/SectionHeader.py
|
## @file
# Install distribution package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
MkPkg
'''
##
# Import Modules
#
from os import remove
from os import getcwd
from os import chdir
import os.path
from sys import stdin
from sys import platform
from traceback import format_exc
from platform import python_version
from hashlib import md5
from time import strftime
from time import localtime
from uuid import uuid4
from Logger import StringTable as ST
from Logger.ToolError import OPTION_UNKNOWN_ERROR
from Logger.ToolError import OPTION_VALUE_INVALID
from Logger.ToolError import ABORT_ERROR
from Logger.ToolError import UPT_REPKG_ERROR
from Logger.ToolError import CODE_ERROR
from Logger.ToolError import FatalError
from Logger.ToolError import FILE_NOT_FOUND
import Logger.Log as Logger
from Xml.XmlParser import DistributionPackageXml
from Xml.IniToXml import IniToXml
from Library import GlobalData
from Library.ParserValidate import IsValidPath
from Core.DistributionPackageClass import DistributionPackageClass
from Core.PackageFile import PackageFile
from Common.MultipleWorkspace import MultipleWorkspace as mws
## CheckForExistingDp
#
# Check if there is a same name DP file existing
# @param Path: The path to be checked
#
def CheckForExistingDp(Path):
if os.path.exists(Path):
Logger.Info(ST.MSG_DISTRIBUTION_PACKAGE_FILE_EXISTS % Path)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input.upper() != "Y":
Logger.Error("\nMkPkg", ABORT_ERROR, ST.ERR_USER_ABORT, RaiseError=True)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
#
def Main(Options = None):
if Options is None:
Logger.Error("\nMkPkg", OPTION_UNKNOWN_ERROR, ST.ERR_OPTION_NOT_FOUND)
try:
DataBase = GlobalData.gDB
ContentFileClosed = True
WorkspaceDir = GlobalData.gWORKSPACE
#
# Init PackFileToCreate
#
if not Options.PackFileToCreate:
Logger.Error("\nMkPkg", OPTION_UNKNOWN_ERROR, ST.ERR_OPTION_NOT_FOUND)
#
# Handle if the distribution package file already exists
#
CheckForExistingDp(Options.PackFileToCreate)
#
# Check package file existing and valid
#
CheckFileList('.DEC', Options.PackageFileList, ST.ERR_INVALID_PACKAGE_NAME, ST.ERR_INVALID_PACKAGE_PATH)
#
# Check module file existing and valid
#
CheckFileList('.INF', Options.ModuleFileList, ST.ERR_INVALID_MODULE_NAME, ST.ERR_INVALID_MODULE_PATH)
#
# Get list of files that installed with RePackage attribute available
#
RePkgDict = DataBase.GetRePkgDict()
ContentFile = PackageFile(GlobalData.gCONTENT_FILE, "w")
ContentFileClosed = False
#
# Add temp distribution header
#
if Options.PackageInformationDataFile:
XmlFile = IniToXml(Options.PackageInformationDataFile)
DistPkg = DistributionPackageXml().FromXml(XmlFile)
remove(XmlFile)
#
# add distribution level tool/misc files
# before pack, current dir should be workspace dir, else the full
# path will be in the pack file
#
Cwd = getcwd()
chdir(WorkspaceDir)
ToolObject = DistPkg.Tools
MiscObject = DistPkg.MiscellaneousFiles
FileList = []
if ToolObject:
FileList += ToolObject.GetFileList()
if MiscObject:
FileList += MiscObject.GetFileList()
for FileObject in FileList:
#
# If you have unicode file names, please convert them to byte
# strings in your desired encoding before passing them to
# write().
#
FromFile = os.path.normpath(FileObject.GetURI()).encode('utf_8')
FileFullPath = mws.join(WorkspaceDir, FromFile)
if FileFullPath in RePkgDict:
(DpGuid, DpVersion, DpName, Repackage) = RePkgDict[FileFullPath]
if not Repackage:
Logger.Error("\nMkPkg",
UPT_REPKG_ERROR,
ST.ERR_UPT_REPKG_ERROR,
ExtraData=ST.MSG_REPKG_CONFLICT %\
(FileFullPath, DpGuid, DpVersion, DpName)
)
else:
DistPkg.Header.RePackage = True
ContentFile.PackFile(FromFile)
chdir(Cwd)
#
# Add init dp information
#
else:
DistPkg = DistributionPackageClass()
DistPkg.Header.Name = 'Distribution Package'
DistPkg.Header.Guid = str(uuid4())
DistPkg.Header.Version = '1.0'
DistPkg.GetDistributionPackage(WorkspaceDir, Options.PackageFileList, \
Options.ModuleFileList)
FileList, MetaDataFileList = DistPkg.GetDistributionFileList()
for File in FileList + MetaDataFileList:
FileFullPath = os.path.normpath(os.path.join(WorkspaceDir, File))
#
# check whether file was included in a distribution that can not
# be repackaged
#
if FileFullPath in RePkgDict:
(DpGuid, DpVersion, DpName, Repackage) = RePkgDict[FileFullPath]
if not Repackage:
Logger.Error("\nMkPkg",
UPT_REPKG_ERROR,
ST.ERR_UPT_REPKG_ERROR,
ExtraData = \
ST.MSG_REPKG_CONFLICT %(FileFullPath, DpName, \
DpGuid, DpVersion)
)
else:
DistPkg.Header.RePackage = True
Cwd = getcwd()
chdir(WorkspaceDir)
ContentFile.PackFiles(FileList)
chdir(Cwd)
Logger.Verbose(ST.MSG_COMPRESS_DISTRIBUTION_PKG)
ContentFile.Close()
ContentFileClosed = True
#
# Add Md5Signature
#
DistPkg.Header.Signature = md5(open(str(ContentFile), 'rb').read()).hexdigest()
#
# Add current Date
#
DistPkg.Header.Date = str(strftime("%Y-%m-%dT%H:%M:%S", localtime()))
#
# Finish final dp file
#
DistPkgFile = PackageFile(Options.PackFileToCreate, "w")
DistPkgFile.PackFile(str(ContentFile))
DistPkgXml = DistributionPackageXml()
DistPkgFile.PackData(DistPkgXml.ToXml(DistPkg), GlobalData.gDESC_FILE)
DistPkgFile.Close()
Logger.Quiet(ST.MSG_FINISH)
ReturnCode = 0
except FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % \
(python_version(), platform) + format_exc())
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % \
(python_version(), platform) + format_exc())
except OSError:
pass
except:
Logger.Error(
"\nMkPkg",
CODE_ERROR,
ST.ERR_UNKNOWN_FATAL_CREATING_ERR % \
Options.PackFileToCreate,
ExtraData=ST.MSG_SEARCH_FOR_HELP % ST.MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
Logger.Quiet(ST.MSG_PYTHON_ON % \
(python_version(), platform) + format_exc())
ReturnCode = CODE_ERROR
finally:
if os.path.exists(GlobalData.gCONTENT_FILE):
if not ContentFileClosed:
ContentFile.Close()
os.remove(GlobalData.gCONTENT_FILE)
return ReturnCode
## CheckFileList
#
# @param QualifiedExt: QualifiedExt
# @param FileList: FileList
# @param ErrorStringExt: ErrorStringExt
# @param ErrorStringFullPath: ErrorStringFullPath
#
def CheckFileList(QualifiedExt, FileList, ErrorStringExt, ErrorStringFullPath):
if not FileList:
return
WorkspaceDir = GlobalData.gWORKSPACE
WorkspaceDir = os.path.normpath(WorkspaceDir)
for Item in FileList:
Ext = os.path.splitext(Item)[1]
if Ext.upper() != QualifiedExt.upper():
Logger.Error("\nMkPkg", OPTION_VALUE_INVALID, \
ErrorStringExt % Item)
Item = os.path.normpath(Item)
Path = mws.join(WorkspaceDir, Item)
if not os.path.exists(Path):
Logger.Error("\nMkPkg", FILE_NOT_FOUND, ST.ERR_NOT_FOUND % Item)
elif Item == Path:
Logger.Error("\nMkPkg", OPTION_VALUE_INVALID,
ErrorStringFullPath % Item)
elif not IsValidPath(Item, WorkspaceDir):
Logger.Error("\nMkPkg", OPTION_VALUE_INVALID, \
ErrorStringExt % Item)
if not os.path.split(Item)[0]:
Logger.Error("\nMkPkg", OPTION_VALUE_INVALID, \
ST.ERR_INVALID_METAFILE_PATH % Item)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/MkPkg.py
|
## @file
# Replace distribution package.
#
# Copyright (c) 2014 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
Replace a distribution package
"""
##
# Import Modules
#
from shutil import rmtree
from traceback import format_exc
from platform import python_version
from sys import platform
from Logger import StringTable as ST
from Logger.ToolError import UNKNOWN_ERROR
from Logger.ToolError import FatalError
from Logger.ToolError import ABORT_ERROR
from Logger.ToolError import CODE_ERROR
from Logger.ToolError import UPT_ALREADY_INSTALLED_ERROR
import Logger.Log as Logger
from Core.DependencyRules import DependencyRules
from Library import GlobalData
from InstallPkg import UnZipDp
from InstallPkg import InstallDp
from RmPkg import GetInstalledDpInfo
from RmPkg import RemoveDist
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @param Options: command Options
#
def Main(Options = None):
ContentZipFile, DistFile = None, None
try:
DataBase = GlobalData.gDB
WorkspaceDir = GlobalData.gWORKSPACE
Dep = DependencyRules(DataBase)
DistPkg, ContentZipFile, DpPkgFileName, DistFile = UnZipDp(WorkspaceDir, Options.PackFileToReplace)
StoredDistFile, OrigDpGuid, OrigDpVersion = GetInstalledDpInfo(Options.PackFileToBeReplaced, \
Dep, DataBase, WorkspaceDir)
#
# check dependency
#
CheckReplaceDpx(Dep, DistPkg, OrigDpGuid, OrigDpVersion)
#
# Remove the old distribution
#
RemoveDist(OrigDpGuid, OrigDpVersion, StoredDistFile, DataBase, WorkspaceDir, Options.Yes)
#
# Install the new distribution
#
InstallDp(DistPkg, DpPkgFileName, ContentZipFile, Options, Dep, WorkspaceDir, DataBase)
ReturnCode = 0
except FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(),
platform) + format_exc())
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(),
platform) + format_exc())
except:
ReturnCode = CODE_ERROR
Logger.Error(
"\nReplacePkg",
CODE_ERROR,
ST.ERR_UNKNOWN_FATAL_REPLACE_ERR % (Options.PackFileToReplace, Options.PackFileToBeReplaced),
ExtraData=ST.MSG_SEARCH_FOR_HELP % ST.MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(),
platform) + format_exc())
finally:
Logger.Quiet(ST.MSG_REMOVE_TEMP_FILE_STARTED)
if DistFile:
DistFile.Close()
if ContentZipFile:
ContentZipFile.Close()
for TempDir in GlobalData.gUNPACK_DIR:
rmtree(TempDir)
GlobalData.gUNPACK_DIR = []
Logger.Quiet(ST.MSG_REMOVE_TEMP_FILE_DONE)
if ReturnCode == 0:
Logger.Quiet(ST.MSG_FINISH)
return ReturnCode
def CheckReplaceDpx(Dep, DistPkg, OrigDpGuid, OrigDpVersion):
NewDpPkgList = []
for PkgInfo in DistPkg.PackageSurfaceArea:
Guid, Version = PkgInfo[0], PkgInfo[1]
NewDpPkgList.append((Guid, Version))
NewDpInfo = "%s %s" % (DistPkg.Header.GetGuid(), DistPkg.Header.GetVersion())
OrigDpInfo = "%s %s" % (OrigDpGuid, OrigDpVersion)
#
# check whether new distribution is already installed and not replacing itself
#
if (NewDpInfo != OrigDpInfo):
if Dep.CheckDpExists(DistPkg.Header.GetGuid(), DistPkg.Header.GetVersion()):
Logger.Error("\nReplacePkg", UPT_ALREADY_INSTALLED_ERROR,
ST.WRN_DIST_PKG_INSTALLED,
ExtraData=ST.MSG_REPLACE_ALREADY_INSTALLED_DP)
#
# check whether the original distribution could be replaced by new distribution
#
Logger.Verbose(ST.MSG_CHECK_DP_FOR_REPLACE%(NewDpInfo, OrigDpInfo))
DepInfoResult = Dep.CheckDpDepexForReplace(OrigDpGuid, OrigDpVersion, NewDpPkgList)
Replaceable = DepInfoResult[0]
if not Replaceable:
Logger.Error("\nReplacePkg", UNKNOWN_ERROR,
ST.ERR_PACKAGE_NOT_MATCH_DEPENDENCY)
#
# check whether new distribution could be installed by dependency rule
#
Logger.Verbose(ST.MSG_CHECK_DP_FOR_INSTALL%str(NewDpInfo))
if not Dep.ReplaceCheckNewDpDepex(DistPkg, OrigDpGuid, OrigDpVersion):
Logger.Error("\nReplacePkg", UNKNOWN_ERROR,
ST.ERR_PACKAGE_NOT_MATCH_DEPENDENCY,
ExtraData=DistPkg.Header.Name)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/ReplacePkg.py
|
## @file
#
# This file is for build version number auto generation
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Build version information
'''
gBUILD_VERSION = "Developer Build based on Revision: Unknown"
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/BuildVersion.py
|
## @file
#
# This file is the main entry for UPT
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
UPT
'''
## import modules
#
import locale
import sys
from imp import reload
encoding = locale.getdefaultlocale()[1]
if encoding:
reload(sys)
sys.setdefaultencoding(encoding)
from Core import FileHook
import os.path
from sys import platform
import platform as pf
from optparse import OptionParser
from traceback import format_exc
from platform import python_version
from Logger import StringTable as ST
import Logger.Log as Logger
from Logger.StringTable import MSG_VERSION
from Logger.StringTable import MSG_DESCRIPTION
from Logger.StringTable import MSG_USAGE
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import OPTION_MISSING
from Logger.ToolError import FILE_TYPE_MISMATCH
from Logger.ToolError import OPTION_CONFLICT
from Logger.ToolError import FatalError
from Logger.ToolError import UPT_ALREADY_INSTALLED_ERROR
from Common.MultipleWorkspace import MultipleWorkspace as mws
import MkPkg
import InstallPkg
import RmPkg
import InventoryWs
import ReplacePkg
import TestInstall
from Library.Misc import GetWorkspace
from Library import GlobalData
from Core.IpiDb import IpiDatabase
from BuildVersion import gBUILD_VERSION
## CheckConflictOption
#
# CheckConflictOption
#
def CheckConflictOption(Opt):
if (Opt.PackFileToCreate or Opt.PackFileToInstall or Opt.PackFileToRemove or Opt.PackFileToReplace) \
and Opt.InventoryWs:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_L_OA_EXCLUSIVE)
elif Opt.PackFileToReplace and (Opt.PackFileToCreate or Opt.PackFileToInstall or Opt.PackFileToRemove):
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_U_ICR_EXCLUSIVE)
elif (Opt.PackFileToCreate and Opt.PackFileToInstall and Opt.PackFileToRemove):
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_REQUIRE_I_C_R_OPTION)
elif Opt.PackFileToCreate and Opt.PackFileToInstall:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_I_C_EXCLUSIVE)
elif Opt.PackFileToInstall and Opt.PackFileToRemove:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_I_R_EXCLUSIVE)
elif Opt.PackFileToCreate and Opt.PackFileToRemove:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_C_R_EXCLUSIVE)
elif Opt.TestDistFiles and (Opt.PackFileToCreate or Opt.PackFileToInstall \
or Opt.PackFileToRemove or Opt.PackFileToReplace):
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_C_R_EXCLUSIVE)
if Opt.CustomPath and Opt.UseGuidedPkgPath:
Logger.Warn("UPT", ST.WARN_CUSTOMPATH_OVERRIDE_USEGUIDEDPATH)
Opt.UseGuidedPkgPath = False
## SetLogLevel
#
def SetLogLevel(Opt):
if Opt.opt_verbose:
Logger.SetLevel(Logger.VERBOSE)
elif Opt.opt_quiet:
Logger.SetLevel(Logger.QUIET + 1)
elif Opt.debug_level is not None:
if Opt.debug_level < 0 or Opt.debug_level > 9:
Logger.Warn("UPT", ST.ERR_DEBUG_LEVEL)
Logger.SetLevel(Logger.INFO)
else:
Logger.SetLevel(Opt.debug_level + 1)
elif Opt.opt_slient:
Logger.SetLevel(Logger.SILENT)
else:
Logger.SetLevel(Logger.INFO)
## Main
#
# Main
#
def Main():
Logger.Initialize()
Parser = OptionParser(version=(MSG_VERSION + ' Build ' + gBUILD_VERSION), description=MSG_DESCRIPTION,
prog="UPT.exe", usage=MSG_USAGE)
Parser.add_option("-d", "--debug", action="store", type="int", dest="debug_level", help=ST.HLP_PRINT_DEBUG_INFO)
Parser.add_option("-v", "--verbose", action="store_true", dest="opt_verbose",
help=ST.HLP_PRINT_INFORMATIONAL_STATEMENT)
Parser.add_option("-s", "--silent", action="store_true", dest="opt_slient", help=ST.HLP_RETURN_NO_DISPLAY)
Parser.add_option("-q", "--quiet", action="store_true", dest="opt_quiet", help=ST.HLP_RETURN_AND_DISPLAY)
Parser.add_option("-i", "--install", action="append", type="string", dest="Install_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_INSTALL)
Parser.add_option("-c", "--create", action="store", type="string", dest="Create_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_CREATE)
Parser.add_option("-r", "--remove", action="store", type="string", dest="Remove_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_REMOVE)
Parser.add_option("-t", "--template", action="store", type="string", dest="Package_Information_Data_File",
help=ST.HLP_SPECIFY_TEMPLATE_NAME_CREATE)
Parser.add_option("-p", "--dec-filename", action="append", type="string", dest="EDK2_DEC_Filename",
help=ST.HLP_SPECIFY_DEC_NAME_CREATE)
Parser.add_option("-m", "--inf-filename", action="append", type="string", dest="EDK2_INF_Filename",
help=ST.HLP_SPECIFY_INF_NAME_CREATE)
Parser.add_option("-l", "--list", action="store_true", dest="List_Dist_Installed",
help=ST.HLP_LIST_DIST_INSTALLED)
Parser.add_option("-f", "--force", action="store_true", dest="Yes", help=ST.HLP_DISABLE_PROMPT)
Parser.add_option("-n", "--custom-path", action="store_true", dest="CustomPath", help=ST.HLP_CUSTOM_PATH_PROMPT)
Parser.add_option("-x", "--free-lock", action="store_true", dest="SkipLock", help=ST.HLP_SKIP_LOCK_CHECK)
Parser.add_option("-u", "--replace", action="store", type="string", dest="Replace_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_REPLACE)
Parser.add_option("-o", "--original", action="store", type="string", dest="Original_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_TO_BE_REPLACED)
Parser.add_option("--use-guided-paths", action="store_true", dest="Use_Guided_Paths", help=ST.HLP_USE_GUIDED_PATHS)
Parser.add_option("-j", "--test-install", action="append", type="string",
dest="Test_Install_Distribution_Package_Files", help=ST.HLP_TEST_INSTALL)
Opt = Parser.parse_args()[0]
Var2Var = [
("PackageInformationDataFile", Opt.Package_Information_Data_File),
("PackFileToInstall", Opt.Install_Distribution_Package_File),
("PackFileToCreate", Opt.Create_Distribution_Package_File),
("PackFileToRemove", Opt.Remove_Distribution_Package_File),
("PackageFileList", Opt.EDK2_DEC_Filename),
("ModuleFileList", Opt.EDK2_INF_Filename),
("InventoryWs", Opt.List_Dist_Installed),
("PackFileToReplace", Opt.Replace_Distribution_Package_File),
("PackFileToBeReplaced", Opt.Original_Distribution_Package_File),
("UseGuidedPkgPath", Opt.Use_Guided_Paths),
("TestDistFiles", Opt.Test_Install_Distribution_Package_Files)
]
for Var in Var2Var:
setattr(Opt, Var[0], Var[1])
try:
GlobalData.gWORKSPACE, GlobalData.gPACKAGE_PATH = GetWorkspace()
except FatalError as XExcept:
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
return XExcept.args[0]
# Support WORKSPACE is a long path
# Only works for windows system
if pf.system() == 'Windows':
Vol = 'B:'
for Index in range(90, 65, -1):
Vol = chr(Index) + ':'
if not os.path.isdir(Vol):
os.system('subst %s "%s"' % (Vol, GlobalData.gWORKSPACE))
break
GlobalData.gWORKSPACE = '%s\\' % Vol
WorkspaceDir = GlobalData.gWORKSPACE
SetLogLevel(Opt)
Mgr = FileHook.RecoverMgr(WorkspaceDir)
FileHook.SetRecoverMgr(Mgr)
GlobalData.gDB = IpiDatabase(os.path.normpath(os.path.join(WorkspaceDir, \
"Conf/DistributionPackageDatabase.db")), WorkspaceDir)
GlobalData.gDB.InitDatabase(Opt.SkipLock)
#
# Make sure the Db will get closed correctly
#
try:
ReturnCode = 0
CheckConflictOption(Opt)
RunModule = None
if Opt.PackFileToCreate:
if Opt.PackageInformationDataFile:
if not os.path.exists(Opt.PackageInformationDataFile):
if not os.path.exists(os.path.join(WorkspaceDir, Opt.PackageInformationDataFile)):
Logger.Error("\nUPT", FILE_NOT_FOUND, ST.ERR_NO_TEMPLATE_FILE % Opt.PackageInformationDataFile)
else:
Opt.PackageInformationDataFile = os.path.join(WorkspaceDir, Opt.PackageInformationDataFile)
else:
Logger.Error("UPT", OPTION_MISSING, ExtraData=ST.ERR_REQUIRE_T_OPTION)
if not Opt.PackFileToCreate.endswith('.dist'):
Logger.Error("CreatePkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToCreate)
RunModule = MkPkg.Main
elif Opt.PackFileToInstall:
AbsPath = []
for Item in Opt.PackFileToInstall:
if not Item.endswith('.dist'):
Logger.Error("InstallPkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Item)
AbsPath.append(GetFullPathDist(Item, WorkspaceDir))
if not AbsPath:
Logger.Error("InstallPkg", FILE_NOT_FOUND, ST.ERR_INSTALL_DIST_NOT_FOUND % Item)
Opt.PackFileToInstall = AbsPath
setattr(Opt, 'PackageFile', Opt.PackFileToInstall)
RunModule = InstallPkg.Main
elif Opt.PackFileToRemove:
if not Opt.PackFileToRemove.endswith('.dist'):
Logger.Error("RemovePkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToRemove)
head, tail = os.path.split(Opt.PackFileToRemove)
if head or not tail:
Logger.Error("RemovePkg",
FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_FILENAME_ONLY_FOR_REMOVE % Opt.PackFileToRemove)
setattr(Opt, 'DistributionFile', Opt.PackFileToRemove)
RunModule = RmPkg.Main
elif Opt.InventoryWs:
RunModule = InventoryWs.Main
elif Opt.PackFileToBeReplaced and not Opt.PackFileToReplace:
Logger.Error("ReplacePkg", OPTION_MISSING, ExtraData=ST.ERR_REQUIRE_U_OPTION)
elif Opt.PackFileToReplace:
if not Opt.PackFileToReplace.endswith('.dist'):
Logger.Error("ReplacePkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToReplace)
if not Opt.PackFileToBeReplaced:
Logger.Error("ReplacePkg", OPTION_MISSING, ExtraData=ST.ERR_REQUIRE_O_OPTION)
if not Opt.PackFileToBeReplaced.endswith('.dist'):
Logger.Error("ReplacePkg",
FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToBeReplaced)
head, tail = os.path.split(Opt.PackFileToBeReplaced)
if head or not tail:
Logger.Error("ReplacePkg",
FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_FILENAME_ONLY_FOR_REPLACE_ORIG % Opt.PackFileToBeReplaced)
AbsPath = GetFullPathDist(Opt.PackFileToReplace, WorkspaceDir)
if not AbsPath:
Logger.Error("ReplacePkg", FILE_NOT_FOUND, ST.ERR_REPLACE_DIST_NOT_FOUND % Opt.PackFileToReplace)
Opt.PackFileToReplace = AbsPath
RunModule = ReplacePkg.Main
elif Opt.Test_Install_Distribution_Package_Files:
for Dist in Opt.Test_Install_Distribution_Package_Files:
if not Dist.endswith('.dist'):
Logger.Error("TestInstall", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Dist)
setattr(Opt, 'DistFiles', Opt.Test_Install_Distribution_Package_Files)
RunModule = TestInstall.Main
else:
Parser.print_usage()
return OPTION_MISSING
ReturnCode = RunModule(Opt)
except FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + \
format_exc())
finally:
try:
if ReturnCode != 0 and ReturnCode != UPT_ALREADY_INSTALLED_ERROR:
Logger.Quiet(ST.MSG_RECOVER_START)
GlobalData.gDB.RollBack()
Mgr.rollback()
Logger.Quiet(ST.MSG_RECOVER_DONE)
else:
GlobalData.gDB.Commit()
Mgr.commit()
except Exception:
Logger.Quiet(ST.MSG_RECOVER_FAIL)
GlobalData.gDB.CloseDb()
if pf.system() == 'Windows':
os.system('subst %s /D' % GlobalData.gWORKSPACE.replace('\\', ''))
return ReturnCode
## GetFullPathDist
#
# This function will check DistFile existence, if not absolute path, then try current working directory,
# then $(WORKSPACE),and return the AbsPath. If file doesn't find, then return None
#
# @param DistFile: The distribution file in either relative path or absolute path
# @param WorkspaceDir: Workspace Directory
# @return AbsPath: The Absolute path of the distribution file if existed, None else
#
def GetFullPathDist(DistFile, WorkspaceDir):
if os.path.isabs(DistFile):
if not (os.path.exists(DistFile) and os.path.isfile(DistFile)):
return None
else:
return DistFile
else:
AbsPath = os.path.normpath(os.path.join(os.getcwd(), DistFile))
if not (os.path.exists(AbsPath) and os.path.isfile(AbsPath)):
AbsPath = os.path.normpath(os.path.join(WorkspaceDir, DistFile))
if not (os.path.exists(AbsPath) and os.path.isfile(AbsPath)):
return None
return AbsPath
if __name__ == '__main__':
RETVAL = Main()
#
# 0-127 is a safe return range, and 1 is a standard default error
#
if RETVAL < 0 or RETVAL > 127:
RETVAL = 1
sys.exit(RETVAL)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/UPT.py
|
## @file
# Inventory workspace's distribution package information.
#
# Copyright (c) 2014 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
Inventory workspace's distribution package information.
"""
##
# Import Modules
#
from sys import platform
from traceback import format_exc
from platform import python_version
from Logger import StringTable as ST
from Logger.ToolError import FatalError
from Logger.ToolError import ABORT_ERROR
from Logger.ToolError import CODE_ERROR
import Logger.Log as Logger
from Library import GlobalData
## InventoryDistInstalled
#
# This method retrieves the installed distribution information from the internal UPT database
#
# @param DataBase: the UPT database
#
def InventoryDistInstalled(DataBase):
DistInstalled = DataBase.InventoryDistInstalled()
#
# find the max length for each item
#
DpNameStr = "DpName"
DpGuidStr = "DpGuid"
DpVerStr = "DpVer"
DpOriginalNameStr = "DpOriginalName"
MaxGuidlen = len(DpGuidStr)
MaxVerlen = len(DpVerStr)
MaxDpAliasFileNameLen = len(DpNameStr)
MaxDpOrigFileNamelen = len(DpOriginalNameStr)
for (DpGuid, DpVersion, DpOriginalName, DpAliasFileName) in DistInstalled:
MaxGuidlen = max(MaxGuidlen, len(DpGuid))
MaxVerlen = max(MaxVerlen, len(DpVersion))
MaxDpAliasFileNameLen = max(MaxDpAliasFileNameLen, len(DpAliasFileName))
MaxDpOrigFileNamelen = max(MaxDpOrigFileNamelen, len(DpOriginalName))
OutMsgFmt = "%-*s\t%-*s\t%-*s\t%-s"
OutMsg = OutMsgFmt % (MaxDpAliasFileNameLen,
DpNameStr,
MaxGuidlen,
DpGuidStr,
MaxVerlen,
DpVerStr,
DpOriginalNameStr)
Logger.Info(OutMsg)
for (DpGuid, DpVersion, DpFileName, DpAliasFileName) in DistInstalled:
OutMsg = OutMsgFmt % (MaxDpAliasFileNameLen,
DpAliasFileName,
MaxGuidlen,
DpGuid,
MaxVerlen,
DpVersion,
DpFileName)
Logger.Info(OutMsg)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @param Options: command Options
#
def Main(Options = None):
if Options:
pass
try:
DataBase = GlobalData.gDB
InventoryDistInstalled(DataBase)
ReturnCode = 0
except FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
except:
ReturnCode = CODE_ERROR
Logger.Error("\nInventoryWs",
CODE_ERROR,
ST.ERR_UNKNOWN_FATAL_INVENTORYWS_ERR,
ExtraData=ST.MSG_SEARCH_FOR_HELP % ST.MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(),
platform) + format_exc())
if ReturnCode == 0:
Logger.Quiet(ST.MSG_FINISH)
return ReturnCode
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/InventoryWs.py
|
# # @file
# Test Install distribution package
#
# Copyright (c) 2016 - 2017, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
Test Install multiple distribution package
"""
# #
# Import Modules
#
from Library import GlobalData
import Logger.Log as Logger
from Logger import StringTable as ST
import Logger.ToolError as TE
from Core.DependencyRules import DependencyRules
from InstallPkg import UnZipDp
import shutil
from traceback import format_exc
from platform import python_version
from sys import platform
# # Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @param Options: command Options
#
def Main(Options=None):
ContentZipFile, DistFile = None, None
ReturnCode = 0
try:
DataBase = GlobalData.gDB
WorkspaceDir = GlobalData.gWORKSPACE
if not Options.DistFiles:
Logger.Error("TestInstallPkg", TE.OPTION_MISSING, ExtraData=ST.ERR_SPECIFY_PACKAGE)
DistPkgList = []
for DistFile in Options.DistFiles:
DistPkg, ContentZipFile, __, DistFile = UnZipDp(WorkspaceDir, DistFile)
DistPkgList.append(DistPkg)
#
# check dependency
#
Dep = DependencyRules(DataBase)
Result = True
DpObj = None
try:
Result, DpObj = Dep.CheckTestInstallPdDepexSatisfied(DistPkgList)
except:
Result = False
if Result:
Logger.Quiet(ST.MSG_TEST_INSTALL_PASS)
else:
Logger.Quiet(ST.MSG_TEST_INSTALL_FAIL)
except TE.FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
except Exception as x:
ReturnCode = TE.CODE_ERROR
Logger.Error(
"\nTestInstallPkg",
TE.CODE_ERROR,
ST.ERR_UNKNOWN_FATAL_INSTALL_ERR % Options.DistFiles,
ExtraData=ST.MSG_SEARCH_FOR_HELP % ST.MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
finally:
Logger.Quiet(ST.MSG_REMOVE_TEMP_FILE_STARTED)
if DistFile:
DistFile.Close()
if ContentZipFile:
ContentZipFile.Close()
for TempDir in GlobalData.gUNPACK_DIR:
shutil.rmtree(TempDir)
GlobalData.gUNPACK_DIR = []
Logger.Quiet(ST.MSG_REMOVE_TEMP_FILE_DONE)
if ReturnCode == 0:
Logger.Quiet(ST.MSG_FINISH)
return ReturnCode
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/TestInstall.py
|
## @file
# Install distribution package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
RmPkg
'''
##
# Import Modules
#
import os.path
from stat import S_IWUSR
from traceback import format_exc
from platform import python_version
from hashlib import md5
from sys import stdin
from sys import platform
from Core.DependencyRules import DependencyRules
from Library import GlobalData
from Logger import StringTable as ST
import Logger.Log as Logger
from Logger.ToolError import OPTION_MISSING
from Logger.ToolError import UNKNOWN_ERROR
from Logger.ToolError import ABORT_ERROR
from Logger.ToolError import CODE_ERROR
from Logger.ToolError import FatalError
## CheckDpDepex
#
# Check if the Depex is satisfied
# @param Dep: Dep
# @param Guid: Guid of Dp
# @param Version: Version of Dp
# @param WorkspaceDir: Workspace Dir
#
def CheckDpDepex(Dep, Guid, Version, WorkspaceDir):
(Removable, DependModuleList) = Dep.CheckDpDepexForRemove(Guid, Version)
if not Removable:
Logger.Info(ST.MSG_CONFIRM_REMOVE)
Logger.Info(ST.MSG_USER_DELETE_OP)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input.upper() != 'Y':
Logger.Error("RmPkg", UNKNOWN_ERROR, ST.ERR_USER_INTERRUPT)
return 1
else:
#
# report list of modules that are not valid due to force
# remove,
# also generate a log file for reference
#
Logger.Info(ST.MSG_INVALID_MODULE_INTRODUCED)
LogFilePath = os.path.normpath(os.path.join(WorkspaceDir, GlobalData.gINVALID_MODULE_FILE))
Logger.Info(ST.MSG_CHECK_LOG_FILE % LogFilePath)
try:
LogFile = open(LogFilePath, 'w')
try:
for ModulePath in DependModuleList:
LogFile.write("%s\n"%ModulePath)
Logger.Info(ModulePath)
except IOError:
Logger.Warn("\nRmPkg", ST.ERR_FILE_WRITE_FAILURE,
File=LogFilePath)
except IOError:
Logger.Warn("\nRmPkg", ST.ERR_FILE_OPEN_FAILURE,
File=LogFilePath)
finally:
LogFile.close()
## Remove Path
#
# removing readonly file on windows will get "Access is denied"
# error, so before removing, change the mode to be writeable
#
# @param Path: The Path to be removed
#
def RemovePath(Path):
Logger.Info(ST.MSG_REMOVE_FILE % Path)
if not os.access(Path, os.W_OK):
os.chmod(Path, S_IWUSR)
os.remove(Path)
try:
os.removedirs(os.path.split(Path)[0])
except OSError:
pass
## GetCurrentFileList
#
# @param DataBase: DataBase of UPT
# @param Guid: Guid of Dp
# @param Version: Version of Dp
# @param WorkspaceDir: Workspace Dir
#
def GetCurrentFileList(DataBase, Guid, Version, WorkspaceDir):
NewFileList = []
for Dir in DataBase.GetDpInstallDirList(Guid, Version):
RootDir = os.path.normpath(os.path.join(WorkspaceDir, Dir))
for Root, Dirs, Files in os.walk(RootDir):
Logger.Debug(0, Dirs)
for File in Files:
FilePath = os.path.join(Root, File)
if FilePath not in NewFileList:
NewFileList.append(FilePath)
return NewFileList
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @param Options: command option
#
def Main(Options = None):
try:
DataBase = GlobalData.gDB
if not Options.DistributionFile:
Logger.Error("RmPkg",
OPTION_MISSING,
ExtraData=ST.ERR_SPECIFY_PACKAGE)
WorkspaceDir = GlobalData.gWORKSPACE
#
# Prepare check dependency
#
Dep = DependencyRules(DataBase)
#
# Get the Dp information
#
StoredDistFile, Guid, Version = GetInstalledDpInfo(Options.DistributionFile, Dep, DataBase, WorkspaceDir)
#
# Check Dp depex
#
CheckDpDepex(Dep, Guid, Version, WorkspaceDir)
#
# remove distribution
#
RemoveDist(Guid, Version, StoredDistFile, DataBase, WorkspaceDir, Options.Yes)
Logger.Quiet(ST.MSG_FINISH)
ReturnCode = 0
except FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + \
format_exc())
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + \
format_exc())
except:
Logger.Error(
"\nRmPkg",
CODE_ERROR,
ST.ERR_UNKNOWN_FATAL_REMOVING_ERR,
ExtraData=ST.MSG_SEARCH_FOR_HELP % ST.MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + \
format_exc())
ReturnCode = CODE_ERROR
return ReturnCode
## GetInstalledDpInfo method
#
# Get the installed distribution information
#
# @param DistributionFile: the name of the distribution
# @param Dep: the instance of DependencyRules
# @param DataBase: the internal database
# @param WorkspaceDir: work space directory
# @retval StoredDistFile: the distribution file that backed up
# @retval Guid: the Guid of the distribution
# @retval Version: the Version of distribution
#
def GetInstalledDpInfo(DistributionFile, Dep, DataBase, WorkspaceDir):
(Guid, Version, NewDpFileName) = DataBase.GetDpByName(os.path.split(DistributionFile)[1])
if not Guid:
Logger.Error("RmPkg", UNKNOWN_ERROR, ST.ERR_PACKAGE_NOT_INSTALLED % DistributionFile)
#
# Check Dp existing
#
if not Dep.CheckDpExists(Guid, Version):
Logger.Error("RmPkg", UNKNOWN_ERROR, ST.ERR_DISTRIBUTION_NOT_INSTALLED)
#
# Check for Distribution files existence in /conf/upt, if not exist,
# Warn user and go on.
#
StoredDistFile = os.path.normpath(os.path.join(WorkspaceDir, GlobalData.gUPT_DIR, NewDpFileName))
if not os.path.isfile(StoredDistFile):
Logger.Warn("RmPkg", ST.WRN_DIST_NOT_FOUND%StoredDistFile)
StoredDistFile = None
return StoredDistFile, Guid, Version
## RemoveDist method
#
# remove a distribution
#
# @param Guid: the Guid of the distribution
# @param Version: the Version of distribution
# @param StoredDistFile: the distribution file that backed up
# @param DataBase: the internal database
# @param WorkspaceDir: work space directory
# @param ForceRemove: whether user want to remove file even it is modified
#
def RemoveDist(Guid, Version, StoredDistFile, DataBase, WorkspaceDir, ForceRemove):
#
# Get Current File List
#
NewFileList = GetCurrentFileList(DataBase, Guid, Version, WorkspaceDir)
#
# Remove all files
#
MissingFileList = []
for (Path, Md5Sum) in DataBase.GetDpFileList(Guid, Version):
if os.path.isfile(Path):
if Path in NewFileList:
NewFileList.remove(Path)
if not ForceRemove:
#
# check whether modified by users
#
Md5Signature = md5(open(str(Path), 'rb').read())
if Md5Sum != Md5Signature.hexdigest():
Logger.Info(ST.MSG_CONFIRM_REMOVE2 % Path)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input.upper() != 'Y':
continue
RemovePath(Path)
else:
MissingFileList.append(Path)
for Path in NewFileList:
if os.path.isfile(Path):
if (not ForceRemove) and (not os.path.split(Path)[1].startswith('.')):
Logger.Info(ST.MSG_CONFIRM_REMOVE3 % Path)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input.upper() != 'Y':
continue
RemovePath(Path)
#
# Remove distribution files in /Conf/.upt
#
if StoredDistFile is not None:
os.remove(StoredDistFile)
#
# update database
#
Logger.Quiet(ST.MSG_UPDATE_PACKAGE_DATABASE)
DataBase.RemoveDpObj(Guid, Version)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/RmPkg.py
|
## @file
# Install distribution package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
Install a distribution package
"""
##
# Import Modules
#
from Core.FileHook import __FileHookOpen__
import os.path
from os import chmod
from os import SEEK_SET
from os import SEEK_END
import stat
from hashlib import md5
import copy
from sys import stdin
from sys import platform
from shutil import rmtree
from shutil import copyfile
from traceback import format_exc
from platform import python_version
from Logger import StringTable as ST
from Logger.ToolError import UNKNOWN_ERROR
from Logger.ToolError import FILE_UNKNOWN_ERROR
from Logger.ToolError import OPTION_MISSING
from Logger.ToolError import UPT_ALREADY_INSTALLED_ERROR
from Logger.ToolError import FatalError
from Logger.ToolError import ABORT_ERROR
from Logger.ToolError import CODE_ERROR
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import FILE_TYPE_MISMATCH
import Logger.Log as Logger
from Library.Misc import Sdict
from Library.Misc import ConvertPath
from Library.ParserValidate import IsValidInstallPath
from Xml.XmlParser import DistributionPackageXml
from GenMetaFile.GenDecFile import PackageToDec
from GenMetaFile.GenInfFile import ModuleToInf
from Core.PackageFile import PackageFile
from Core.PackageFile import FILE_NOT_FOUND
from Core.PackageFile import FILE_CHECKSUM_FAILURE
from Core.PackageFile import CreateDirectory
from Core.DependencyRules import DependencyRules
from Library import GlobalData
## InstallNewPackage
#
# @param WorkspaceDir: Workspace Directory
# @param Path: Package Path
# @param CustomPath: whether need to customize path at first
#
def InstallNewPackage(WorkspaceDir, Path, CustomPath = False):
if os.path.isabs(Path):
Logger.Info(ST.MSG_RELATIVE_PATH_ONLY%Path)
elif CustomPath:
Logger.Info(ST.MSG_NEW_PKG_PATH)
else:
Path = ConvertPath(Path)
Path = os.path.normpath(Path)
FullPath = os.path.normpath(os.path.join(WorkspaceDir, Path))
if os.path.exists(FullPath):
Logger.Info(ST.ERR_DIR_ALREADY_EXIST%FullPath)
else:
return Path
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input == '':
Logger.Error("InstallPkg", UNKNOWN_ERROR, ST.ERR_USER_INTERRUPT)
Input = Input.replace('\r', '').replace('\n', '')
return InstallNewPackage(WorkspaceDir, Input, False)
## InstallNewModule
#
# @param WorkspaceDir: Workspace Directory
# @param Path: Standalone Module Path
# @param PathList: The already installed standalone module Path list
#
def InstallNewModule(WorkspaceDir, Path, PathList = None):
if PathList is None:
PathList = []
Path = ConvertPath(Path)
Path = os.path.normpath(Path)
FullPath = os.path.normpath(os.path.join(WorkspaceDir, Path))
if os.path.exists(FullPath) and FullPath not in PathList:
Logger.Info(ST.ERR_DIR_ALREADY_EXIST%Path)
elif Path == FullPath:
Logger.Info(ST.MSG_RELATIVE_PATH_ONLY%FullPath)
else:
return Path
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input == '':
Logger.Error("InstallPkg", UNKNOWN_ERROR, ST.ERR_USER_INTERRUPT)
Input = Input.replace('\r', '').replace('\n', '')
return InstallNewModule(WorkspaceDir, Input, PathList)
## InstallNewFile
#
# @param WorkspaceDir: Workspace Direction
# @param File: File
#
def InstallNewFile(WorkspaceDir, File):
FullPath = os.path.normpath(os.path.join(WorkspaceDir, File))
if os.path.exists(FullPath):
Logger.Info(ST.ERR_FILE_ALREADY_EXIST %File)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
if Input == '':
Logger.Error("InstallPkg", UNKNOWN_ERROR, ST.ERR_USER_INTERRUPT)
Input = Input.replace('\r', '').replace('\n', '')
return InstallNewFile(WorkspaceDir, Input)
else:
return File
## UnZipDp
#
# UnZipDp
#
def UnZipDp(WorkspaceDir, DpPkgFileName, Index=1):
ContentZipFile = None
Logger.Quiet(ST.MSG_UZIP_PARSE_XML)
DistFile = PackageFile(DpPkgFileName)
DpDescFileName, ContentFileName = GetDPFile(DistFile.GetZipFile())
TempDir = os.path.normpath(os.path.join(WorkspaceDir, "Conf/.tmp%s" % str(Index)))
GlobalData.gUNPACK_DIR.append(TempDir)
DistPkgFile = DistFile.UnpackFile(DpDescFileName, os.path.normpath(os.path.join(TempDir, DpDescFileName)))
if not DistPkgFile:
Logger.Error("InstallPkg", FILE_NOT_FOUND, ST.ERR_FILE_BROKEN %DpDescFileName)
#
# Generate distpkg
#
DistPkgObj = DistributionPackageXml()
DistPkg = DistPkgObj.FromXml(DistPkgFile)
if DistPkg.Header.RePackage == '':
DistPkg.Header.RePackage = False
if DistPkg.Header.ReadOnly == '':
DistPkg.Header.ReadOnly = False
#
# unzip contents.zip file
#
ContentFile = DistFile.UnpackFile(ContentFileName, os.path.normpath(os.path.join(TempDir, ContentFileName)))
if not ContentFile:
Logger.Error("InstallPkg", FILE_NOT_FOUND,
ST.ERR_FILE_BROKEN % ContentFileName)
#
# Get file size
#
FileSize = os.path.getsize(ContentFile)
if FileSize != 0:
ContentZipFile = PackageFile(ContentFile)
#
# verify MD5 signature when existed
#
if DistPkg.Header.Signature != '':
Md5Signature = md5(__FileHookOpen__(ContentFile, 'rb').read())
if DistPkg.Header.Signature != Md5Signature.hexdigest():
ContentZipFile.Close()
Logger.Error("InstallPkg", FILE_CHECKSUM_FAILURE,
ExtraData=ContentFile)
return DistPkg, ContentZipFile, DpPkgFileName, DistFile
## GetPackageList
#
# GetPackageList
#
def GetPackageList(DistPkg, Dep, WorkspaceDir, Options, ContentZipFile, ModuleList, PackageList):
NewDict = Sdict()
for Guid, Version, Path in DistPkg.PackageSurfaceArea:
PackagePath = Path
Package = DistPkg.PackageSurfaceArea[Guid, Version, Path]
Logger.Info(ST.MSG_INSTALL_PACKAGE % Package.GetName())
# if Dep.CheckPackageExists(Guid, Version):
# Logger.Info(ST.WRN_PACKAGE_EXISTED %(Guid, Version))
if Options.UseGuidedPkgPath:
GuidedPkgPath = "%s_%s_%s" % (Package.GetName(), Guid, Version)
NewPackagePath = InstallNewPackage(WorkspaceDir, GuidedPkgPath, Options.CustomPath)
else:
NewPackagePath = InstallNewPackage(WorkspaceDir, PackagePath, Options.CustomPath)
InstallPackageContent(PackagePath, NewPackagePath, Package, ContentZipFile, Dep, WorkspaceDir, ModuleList,
DistPkg.Header.ReadOnly)
PackageList.append(Package)
NewDict[Guid, Version, Package.GetPackagePath()] = Package
#
# Now generate meta-data files, first generate all dec for package
# dec should be generated before inf, and inf should be generated after
# all packages installed, else hard to resolve modules' package
# dependency (Hard to get the location of the newly installed package)
#
for Package in PackageList:
FilePath = PackageToDec(Package, DistPkg.Header)
Md5Signature = md5(__FileHookOpen__(str(FilePath), 'rb').read())
Md5Sum = Md5Signature.hexdigest()
if (FilePath, Md5Sum) not in Package.FileList:
Package.FileList.append((FilePath, Md5Sum))
return NewDict
## GetModuleList
#
# GetModuleList
#
def GetModuleList(DistPkg, Dep, WorkspaceDir, ContentZipFile, ModuleList):
#
# ModulePathList will keep track of the standalone module path that
# we just installed. If a new module's path in that list
# (only multiple INF in one directory will be so), we will
# install them directly. If not, we will try to create a new directory
# for it.
#
ModulePathList = []
#
# Check module exist and install
#
Module = None
NewDict = Sdict()
for Guid, Version, Name, Path in DistPkg.ModuleSurfaceArea:
ModulePath = Path
Module = DistPkg.ModuleSurfaceArea[Guid, Version, Name, Path]
Logger.Info(ST.MSG_INSTALL_MODULE % Module.GetName())
if Dep.CheckModuleExists(Guid, Version, Name, Path):
Logger.Quiet(ST.WRN_MODULE_EXISTED %Path)
#
# here check for the multiple inf share the same module path cases:
# they should be installed into the same directory
#
ModuleFullPath = \
os.path.normpath(os.path.join(WorkspaceDir, ModulePath))
if ModuleFullPath not in ModulePathList:
NewModulePath = InstallNewModule(WorkspaceDir, ModulePath, ModulePathList)
NewModuleFullPath = os.path.normpath(os.path.join(WorkspaceDir, NewModulePath))
ModulePathList.append(NewModuleFullPath)
else:
NewModulePath = ModulePath
InstallModuleContent(ModulePath, NewModulePath, '', Module, ContentZipFile, WorkspaceDir, ModuleList, None,
DistPkg.Header.ReadOnly)
#
# Update module
#
Module.SetModulePath(Module.GetModulePath().replace(Path, NewModulePath, 1))
NewDict[Guid, Version, Name, Module.GetModulePath()] = Module
#
# generate all inf for modules
#
for (Module, Package) in ModuleList:
CheckCNameInModuleRedefined(Module, DistPkg)
FilePath = ModuleToInf(Module, Package, DistPkg.Header)
Md5Signature = md5(__FileHookOpen__(str(FilePath), 'rb').read())
Md5Sum = Md5Signature.hexdigest()
if Package:
if (FilePath, Md5Sum) not in Package.FileList:
Package.FileList.append((FilePath, Md5Sum))
else:
if (FilePath, Md5Sum) not in Module.FileList:
Module.FileList.append((FilePath, Md5Sum))
#
# append the module unicode files to Package FileList
#
for (FilePath, Md5Sum) in Module.FileList:
if str(FilePath).endswith('.uni') and Package and (FilePath, Md5Sum) not in Package.FileList:
Package.FileList.append((FilePath, Md5Sum))
return NewDict
##
# Get all protocol/ppi/guid CNames and pcd name from all dependent DEC file
#
def GetDepProtocolPpiGuidPcdNames(DePackageObjList):
#
# [[Dec1Protocol1, Dec1Protocol2...], [Dec2Protocols...],...]
#
DependentProtocolCNames = []
DependentPpiCNames = []
DependentGuidCNames = []
DependentPcdNames = []
for PackageObj in DePackageObjList:
#
# Get protocol CName list from all dependent DEC file
#
ProtocolCNames = []
for Protocol in PackageObj.GetProtocolList():
if Protocol.GetCName() not in ProtocolCNames:
ProtocolCNames.append(Protocol.GetCName())
DependentProtocolCNames.append(ProtocolCNames)
#
# Get Ppi CName list from all dependent DEC file
#
PpiCNames = []
for Ppi in PackageObj.GetPpiList():
if Ppi.GetCName() not in PpiCNames:
PpiCNames.append(Ppi.GetCName())
DependentPpiCNames.append(PpiCNames)
#
# Get Guid CName list from all dependent DEC file
#
GuidCNames = []
for Guid in PackageObj.GetGuidList():
if Guid.GetCName() not in GuidCNames:
GuidCNames.append(Guid.GetCName())
DependentGuidCNames.append(GuidCNames)
#
# Get PcdName list from all dependent DEC file
#
PcdNames = []
for Pcd in PackageObj.GetPcdList():
PcdName = '.'.join([Pcd.GetTokenSpaceGuidCName(), Pcd.GetCName()])
if PcdName not in PcdNames:
PcdNames.append(PcdName)
DependentPcdNames.append(PcdNames)
return DependentProtocolCNames, DependentPpiCNames, DependentGuidCNames, DependentPcdNames
##
# Check if protocol CName is redefined
#
def CheckProtoclCNameRedefined(Module, DependentProtocolCNames):
for ProtocolInModule in Module.GetProtocolList():
IsCNameDefined = False
for PackageProtocolCNames in DependentProtocolCNames:
if ProtocolInModule.GetCName() in PackageProtocolCNames:
if IsCNameDefined:
Logger.Error("\nUPT", FORMAT_INVALID,
File = Module.GetFullPath(),
ExtraData = \
ST.ERR_INF_PARSER_ITEM_DUPLICATE_IN_DEC % ProtocolInModule.GetCName())
else:
IsCNameDefined = True
##
# Check if Ppi CName is redefined
#
def CheckPpiCNameRedefined(Module, DependentPpiCNames):
for PpiInModule in Module.GetPpiList():
IsCNameDefined = False
for PackagePpiCNames in DependentPpiCNames:
if PpiInModule.GetCName() in PackagePpiCNames:
if IsCNameDefined:
Logger.Error("\nUPT", FORMAT_INVALID,
File = Module.GetFullPath(),
ExtraData = ST.ERR_INF_PARSER_ITEM_DUPLICATE_IN_DEC % PpiInModule.GetCName())
else:
IsCNameDefined = True
##
# Check if Guid CName is redefined
#
def CheckGuidCNameRedefined(Module, DependentGuidCNames):
for GuidInModule in Module.GetGuidList():
IsCNameDefined = False
for PackageGuidCNames in DependentGuidCNames:
if GuidInModule.GetCName() in PackageGuidCNames:
if IsCNameDefined:
Logger.Error("\nUPT", FORMAT_INVALID,
File = Module.GetFullPath(),
ExtraData = \
ST.ERR_INF_PARSER_ITEM_DUPLICATE_IN_DEC % GuidInModule.GetCName())
else:
IsCNameDefined = True
##
# Check if PcdName is redefined
#
def CheckPcdNameRedefined(Module, DependentPcdNames):
PcdObjs = []
if not Module.GetBinaryFileList():
PcdObjs += Module.GetPcdList()
else:
Binary = Module.GetBinaryFileList()[0]
for AsBuild in Binary.GetAsBuiltList():
PcdObjs += AsBuild.GetPatchPcdList() + AsBuild.GetPcdExList()
for PcdObj in PcdObjs:
PcdName = '.'.join([PcdObj.GetTokenSpaceGuidCName(), PcdObj.GetCName()])
IsPcdNameDefined = False
for PcdNames in DependentPcdNames:
if PcdName in PcdNames:
if IsPcdNameDefined:
Logger.Error("\nUPT", FORMAT_INVALID,
File = Module.GetFullPath(),
ExtraData = ST.ERR_INF_PARSER_ITEM_DUPLICATE_IN_DEC % PcdName)
else:
IsPcdNameDefined = True
##
# Check if any Protocol/Ppi/Guid and Pcd name is redefined in its dependent DEC files
#
def CheckCNameInModuleRedefined(Module, DistPkg):
DePackageObjList = []
#
# Get all dependent package objects
#
for Obj in Module.GetPackageDependencyList():
Guid = Obj.GetGuid()
Version = Obj.GetVersion()
for Key in DistPkg.PackageSurfaceArea:
if Key[0] == Guid and Key[1] == Version:
if DistPkg.PackageSurfaceArea[Key] not in DePackageObjList:
DePackageObjList.append(DistPkg.PackageSurfaceArea[Key])
DependentProtocolCNames, DependentPpiCNames, DependentGuidCNames, DependentPcdNames = \
GetDepProtocolPpiGuidPcdNames(DePackageObjList)
CheckProtoclCNameRedefined(Module, DependentProtocolCNames)
CheckPpiCNameRedefined(Module, DependentPpiCNames)
CheckGuidCNameRedefined(Module, DependentGuidCNames)
CheckPcdNameRedefined(Module, DependentPcdNames)
## GenToolMisc
#
# GenToolMisc
#
#
def GenToolMisc(DistPkg, WorkspaceDir, ContentZipFile):
ToolObject = DistPkg.Tools
MiscObject = DistPkg.MiscellaneousFiles
DistPkg.FileList = []
FileList = []
ToolFileNum = 0
FileNum = 0
RootDir = WorkspaceDir
#
# FileList stores both tools files and misc files
# Misc file list must be appended to FileList *AFTER* Tools file list
#
if ToolObject:
FileList += ToolObject.GetFileList()
ToolFileNum = len(ToolObject.GetFileList())
if 'EDK_TOOLS_PATH' in os.environ:
RootDir = os.environ['EDK_TOOLS_PATH']
if MiscObject:
FileList += MiscObject.GetFileList()
for FileObject in FileList:
FileNum += 1
if FileNum > ToolFileNum:
#
# Misc files, root should be changed to WORKSPACE
#
RootDir = WorkspaceDir
File = ConvertPath(FileObject.GetURI())
ToFile = os.path.normpath(os.path.join(RootDir, File))
if os.path.exists(ToFile):
Logger.Info( ST.WRN_FILE_EXISTED % ToFile )
#
# ask for user input the new file name
#
Logger.Info( ST.MSG_NEW_FILE_NAME)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
OrigPath = os.path.split(ToFile)[0]
ToFile = os.path.normpath(os.path.join(OrigPath, Input))
FromFile = os.path.join(FileObject.GetURI())
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, DistPkg.Header.ReadOnly, FileObject.GetExecutable())
DistPkg.FileList.append((ToFile, Md5Sum))
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @param Options: command Options
#
def Main(Options = None):
try:
DataBase = GlobalData.gDB
WorkspaceDir = GlobalData.gWORKSPACE
if not Options.PackageFile:
Logger.Error("InstallPkg", OPTION_MISSING, ExtraData=ST.ERR_SPECIFY_PACKAGE)
# Get all Dist Info
DistInfoList = []
DistPkgList = []
Index = 1
for ToBeInstalledDist in Options.PackageFile:
#
# unzip dist.pkg file
#
DistInfoList.append(UnZipDp(WorkspaceDir, ToBeInstalledDist, Index))
DistPkgList.append(DistInfoList[-1][0])
Index += 1
#
# Add dist
#
GlobalData.gTO_BE_INSTALLED_DIST_LIST.append(DistInfoList[-1][0])
# Check for dependency
Dep = DependencyRules(DataBase, DistPkgList)
for ToBeInstalledDist in DistInfoList:
CheckInstallDpx(Dep, ToBeInstalledDist[0], ToBeInstalledDist[2])
#
# Install distribution
#
InstallDp(ToBeInstalledDist[0], ToBeInstalledDist[2], ToBeInstalledDist[1],
Options, Dep, WorkspaceDir, DataBase)
ReturnCode = 0
except FatalError as XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
except:
ReturnCode = CODE_ERROR
Logger.Error(
"\nInstallPkg",
CODE_ERROR,
ST.ERR_UNKNOWN_FATAL_INSTALL_ERR % Options.PackageFile,
ExtraData=ST.MSG_SEARCH_FOR_HELP % ST.MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(),
platform) + format_exc())
finally:
Logger.Quiet(ST.MSG_REMOVE_TEMP_FILE_STARTED)
for ToBeInstalledDist in DistInfoList:
if ToBeInstalledDist[3]:
ToBeInstalledDist[3].Close()
if ToBeInstalledDist[1]:
ToBeInstalledDist[1].Close()
for TempDir in GlobalData.gUNPACK_DIR:
rmtree(TempDir)
GlobalData.gUNPACK_DIR = []
Logger.Quiet(ST.MSG_REMOVE_TEMP_FILE_DONE)
if ReturnCode == 0:
Logger.Quiet(ST.MSG_FINISH)
return ReturnCode
# BackupDist method
#
# This method will backup the Distribution file into the $(WORKSPACE)/conf/upt, and rename it
# if there is already a same-named distribution existed.
#
# @param DpPkgFileName: The distribution path
# @param Guid: The distribution Guid
# @param Version: The distribution Version
# @param WorkspaceDir: The workspace directory
# @retval NewDpPkgFileName: The exact backup file name
#
def BackupDist(DpPkgFileName, Guid, Version, WorkspaceDir):
DistFileName = os.path.split(DpPkgFileName)[1]
DestDir = os.path.normpath(os.path.join(WorkspaceDir, GlobalData.gUPT_DIR))
CreateDirectory(DestDir)
DestFile = os.path.normpath(os.path.join(DestDir, DistFileName))
if os.path.exists(DestFile):
FileName, Ext = os.path.splitext(DistFileName)
NewFileName = FileName + '_' + Guid + '_' + Version + Ext
DestFile = os.path.normpath(os.path.join(DestDir, NewFileName))
if os.path.exists(DestFile):
#
# ask for user input the new file name
#
Logger.Info( ST.MSG_NEW_FILE_NAME_FOR_DIST)
Input = stdin.readline()
Input = Input.replace('\r', '').replace('\n', '')
DestFile = os.path.normpath(os.path.join(DestDir, Input))
copyfile(DpPkgFileName, DestFile)
NewDpPkgFileName = DestFile[DestFile.find(DestDir) + len(DestDir) + 1:]
return NewDpPkgFileName
## CheckInstallDpx method
#
# check whether distribution could be installed
#
# @param Dep: the DependencyRules instance that used to check dependency
# @param DistPkg: the distribution object
#
def CheckInstallDpx(Dep, DistPkg, DistPkgFileName):
#
# Check distribution package installed or not
#
if Dep.CheckDpExists(DistPkg.Header.GetGuid(),
DistPkg.Header.GetVersion()):
Logger.Error("InstallPkg",
UPT_ALREADY_INSTALLED_ERROR,
ST.WRN_DIST_PKG_INSTALLED % os.path.basename(DistPkgFileName))
#
# Check distribution dependency (all module dependency should be
# satisfied)
#
if not Dep.CheckInstallDpDepexSatisfied(DistPkg):
Logger.Error("InstallPkg", UNKNOWN_ERROR,
ST.ERR_PACKAGE_NOT_MATCH_DEPENDENCY,
ExtraData=DistPkg.Header.Name)
## InstallModuleContent method
#
# If this is standalone module, then Package should be none,
# ModulePath should be ''
# @param FromPath: FromPath
# @param NewPath: NewPath
# @param ModulePath: ModulePath
# @param Module: Module
# @param ContentZipFile: ContentZipFile
# @param WorkspaceDir: WorkspaceDir
# @param ModuleList: ModuleList
# @param Package: Package
#
def InstallModuleContent(FromPath, NewPath, ModulePath, Module, ContentZipFile,
WorkspaceDir, ModuleList, Package = None, ReadOnly = False):
if NewPath.startswith("\\") or NewPath.startswith("/"):
NewPath = NewPath[1:]
if not IsValidInstallPath(NewPath):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%NewPath)
NewModuleFullPath = os.path.normpath(os.path.join(WorkspaceDir, NewPath,
ConvertPath(ModulePath)))
Module.SetFullPath(os.path.normpath(os.path.join(NewModuleFullPath,
ConvertPath(Module.GetName()) + '.inf')))
Module.FileList = []
for MiscFile in Module.GetMiscFileList():
if not MiscFile:
continue
for Item in MiscFile.GetFileList():
File = Item.GetURI()
if File.startswith("\\") or File.startswith("/"):
File = File[1:]
if not IsValidInstallPath(File):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%File)
FromFile = os.path.join(FromPath, ModulePath, File)
Executable = Item.GetExecutable()
ToFile = os.path.normpath(os.path.join(NewModuleFullPath, ConvertPath(File)))
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly, Executable)
if Package and ((ToFile, Md5Sum) not in Package.FileList):
Package.FileList.append((ToFile, Md5Sum))
elif Package:
continue
elif (ToFile, Md5Sum) not in Module.FileList:
Module.FileList.append((ToFile, Md5Sum))
for Item in Module.GetSourceFileList():
File = Item.GetSourceFile()
if File.startswith("\\") or File.startswith("/"):
File = File[1:]
if not IsValidInstallPath(File):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%File)
FromFile = os.path.join(FromPath, ModulePath, File)
ToFile = os.path.normpath(os.path.join(NewModuleFullPath, ConvertPath(File)))
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly)
if Package and ((ToFile, Md5Sum) not in Package.FileList):
Package.FileList.append((ToFile, Md5Sum))
elif Package:
continue
elif (ToFile, Md5Sum) not in Module.FileList:
Module.FileList.append((ToFile, Md5Sum))
for Item in Module.GetBinaryFileList():
FileNameList = Item.GetFileNameList()
for FileName in FileNameList:
File = FileName.GetFilename()
if File.startswith("\\") or File.startswith("/"):
File = File[1:]
if not IsValidInstallPath(File):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%File)
FromFile = os.path.join(FromPath, ModulePath, File)
ToFile = os.path.normpath(os.path.join(NewModuleFullPath, ConvertPath(File)))
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly)
if Package and ((ToFile, Md5Sum) not in Package.FileList):
Package.FileList.append((ToFile, Md5Sum))
elif Package:
continue
elif (ToFile, Md5Sum) not in Module.FileList:
Module.FileList.append((ToFile, Md5Sum))
InstallModuleContentZipFile(ContentZipFile, FromPath, ModulePath, WorkspaceDir, NewPath, Module, Package, ReadOnly,
ModuleList)
## InstallModuleContentZipFile
#
# InstallModuleContentZipFile
#
def InstallModuleContentZipFile(ContentZipFile, FromPath, ModulePath, WorkspaceDir, NewPath, Module, Package, ReadOnly,
ModuleList):
#
# Extract other files under current module path in content Zip file but not listed in the description
#
if ContentZipFile:
for FileName in ContentZipFile.GetZipFile().namelist():
FileName = os.path.normpath(FileName)
CheckPath = os.path.normpath(os.path.join(FromPath, ModulePath))
if FileUnderPath(FileName, CheckPath):
if FileName.startswith("\\") or FileName.startswith("/"):
FileName = FileName[1:]
if not IsValidInstallPath(FileName):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%FileName)
FromFile = FileName
ToFile = os.path.normpath(os.path.join(WorkspaceDir,
ConvertPath(FileName.replace(FromPath, NewPath, 1))))
CheckList = copy.copy(Module.FileList)
if Package:
CheckList += Package.FileList
for Item in CheckList:
if Item[0] == ToFile:
break
else:
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly)
if Package and ((ToFile, Md5Sum) not in Package.FileList):
Package.FileList.append((ToFile, Md5Sum))
elif Package:
continue
elif (ToFile, Md5Sum) not in Module.FileList:
Module.FileList.append((ToFile, Md5Sum))
ModuleList.append((Module, Package))
## FileUnderPath
# Check whether FileName started with directory specified by CheckPath
#
# @param FileName: the FileName need to be checked
# @param CheckPath: the path need to be checked against
# @return: True or False
#
def FileUnderPath(FileName, CheckPath):
FileName = FileName.replace('\\', '/')
FileName = os.path.normpath(FileName)
CheckPath = CheckPath.replace('\\', '/')
CheckPath = os.path.normpath(CheckPath)
if FileName.startswith(CheckPath):
RemainingPath = os.path.normpath(FileName.replace(CheckPath, '', 1))
while RemainingPath.startswith('\\') or RemainingPath.startswith('/'):
RemainingPath = RemainingPath[1:]
if FileName == os.path.normpath(os.path.join(CheckPath, RemainingPath)):
return True
return False
## InstallFile
# Extract File from Zipfile, set file attribute, and return the Md5Sum
#
# @return: True or False
#
def InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly, Executable=False):
if os.path.exists(os.path.normpath(ToFile)):
pass
else:
if not ContentZipFile or not ContentZipFile.UnpackFile(FromFile, ToFile):
Logger.Error("UPT", FILE_NOT_FOUND, ST.ERR_INSTALL_FILE_FROM_EMPTY_CONTENT % FromFile)
if ReadOnly:
if not Executable:
chmod(ToFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
else:
chmod(ToFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
elif Executable:
chmod(ToFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR | stat.S_IWGRP |
stat.S_IWOTH | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
else:
chmod(ToFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
Md5Signature = md5(__FileHookOpen__(str(ToFile), 'rb').read())
Md5Sum = Md5Signature.hexdigest()
return Md5Sum
## InstallPackageContent method
#
# @param FromPath: FromPath
# @param ToPath: ToPath
# @param Package: Package
# @param ContentZipFile: ContentZipFile
# @param Dep: Dep
# @param WorkspaceDir: WorkspaceDir
# @param ModuleList: ModuleList
#
def InstallPackageContent(FromPath, ToPath, Package, ContentZipFile, Dep,
WorkspaceDir, ModuleList, ReadOnly = False):
if Dep:
pass
Package.FileList = []
if ToPath.startswith("\\") or ToPath.startswith("/"):
ToPath = ToPath[1:]
if not IsValidInstallPath(ToPath):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%ToPath)
if FromPath.startswith("\\") or FromPath.startswith("/"):
FromPath = FromPath[1:]
if not IsValidInstallPath(FromPath):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%FromPath)
PackageFullPath = os.path.normpath(os.path.join(WorkspaceDir, ToPath))
for MiscFile in Package.GetMiscFileList():
for Item in MiscFile.GetFileList():
FileName = Item.GetURI()
if FileName.startswith("\\") or FileName.startswith("/"):
FileName = FileName[1:]
if not IsValidInstallPath(FileName):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%FileName)
FromFile = os.path.join(FromPath, FileName)
Executable = Item.GetExecutable()
ToFile = (os.path.join(PackageFullPath, ConvertPath(FileName)))
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly, Executable)
if (ToFile, Md5Sum) not in Package.FileList:
Package.FileList.append((ToFile, Md5Sum))
PackageIncludeArchList = []
for Item in Package.GetPackageIncludeFileList():
FileName = Item.GetFilePath()
if FileName.startswith("\\") or FileName.startswith("/"):
FileName = FileName[1:]
if not IsValidInstallPath(FileName):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%FileName)
FromFile = os.path.join(FromPath, FileName)
ToFile = os.path.normpath(os.path.join(PackageFullPath, ConvertPath(FileName)))
RetFile = ContentZipFile.UnpackFile(FromFile, ToFile)
if RetFile == '':
#
# a non-exist path in Zipfile will return '', which means an include directory in our case
# save the information for later DEC creation usage and also create the directory
#
PackageIncludeArchList.append([Item.GetFilePath(), Item.GetSupArchList()])
CreateDirectory(ToFile)
continue
if ReadOnly:
chmod(ToFile, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
else:
chmod(ToFile, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH|stat.S_IWUSR|stat.S_IWGRP|stat.S_IWOTH)
Md5Signature = md5(__FileHookOpen__(str(ToFile), 'rb').read())
Md5Sum = Md5Signature.hexdigest()
if (ToFile, Md5Sum) not in Package.FileList:
Package.FileList.append((ToFile, Md5Sum))
Package.SetIncludeArchList(PackageIncludeArchList)
for Item in Package.GetStandardIncludeFileList():
FileName = Item.GetFilePath()
if FileName.startswith("\\") or FileName.startswith("/"):
FileName = FileName[1:]
if not IsValidInstallPath(FileName):
Logger.Error("UPT", FORMAT_INVALID, ST.ERR_FILE_NAME_INVALIDE%FileName)
FromFile = os.path.join(FromPath, FileName)
ToFile = os.path.normpath(os.path.join(PackageFullPath, ConvertPath(FileName)))
Md5Sum = InstallFile(ContentZipFile, FromFile, ToFile, ReadOnly)
if (ToFile, Md5Sum) not in Package.FileList:
Package.FileList.append((ToFile, Md5Sum))
#
# Update package
#
Package.SetPackagePath(Package.GetPackagePath().replace(FromPath,
ToPath, 1))
Package.SetFullPath(os.path.normpath(os.path.join(PackageFullPath,
ConvertPath(Package.GetName()) + '.dec')))
#
# Install files in module
#
Module = None
ModuleDict = Package.GetModuleDict()
for ModuleGuid, ModuleVersion, ModuleName, ModulePath in ModuleDict:
Module = ModuleDict[ModuleGuid, ModuleVersion, ModuleName, ModulePath]
InstallModuleContent(FromPath, ToPath, ModulePath, Module,
ContentZipFile, WorkspaceDir, ModuleList, Package, ReadOnly)
## GetDPFile method
#
# @param ZipFile: A ZipFile
#
def GetDPFile(ZipFile):
ContentFile = ''
DescFile = ''
for FileName in ZipFile.namelist():
if FileName.endswith('.content'):
if not ContentFile:
ContentFile = FileName
continue
elif FileName.endswith('.pkg'):
if not DescFile:
DescFile = FileName
continue
else:
continue
Logger.Error("PackagingTool", FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_FILE_TOOMANY)
if not DescFile or not ContentFile:
Logger.Error("PackagingTool", FILE_UNKNOWN_ERROR,
ExtraData=ST.ERR_DIST_FILE_TOOFEW)
return DescFile, ContentFile
## InstallDp method
#
# Install the distribution to current workspace
#
def InstallDp(DistPkg, DpPkgFileName, ContentZipFile, Options, Dep, WorkspaceDir, DataBase):
#
# PackageList, ModuleList record the information for the meta-data
# files that need to be generated later
#
PackageList = []
ModuleList = []
DistPkg.PackageSurfaceArea = GetPackageList(DistPkg, Dep, WorkspaceDir, Options,
ContentZipFile, ModuleList, PackageList)
DistPkg.ModuleSurfaceArea = GetModuleList(DistPkg, Dep, WorkspaceDir, ContentZipFile, ModuleList)
GenToolMisc(DistPkg, WorkspaceDir, ContentZipFile)
#
# copy "Distribution File" to directory $(WORKSPACE)/conf/upt
#
DistFileName = os.path.split(DpPkgFileName)[1]
NewDpPkgFileName = BackupDist(DpPkgFileName, DistPkg.Header.GetGuid(), DistPkg.Header.GetVersion(), WorkspaceDir)
#
# update database
#
Logger.Quiet(ST.MSG_UPDATE_PACKAGE_DATABASE)
DataBase.AddDPObject(DistPkg, NewDpPkgFileName, DistFileName,
DistPkg.Header.RePackage)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/InstallPkg.py
|
## @file
# This file contain unit test for DecParser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import os
import unittest
from Logger.Log import FatalError
from Parser.DecParser import \
Dec, \
_DecDefine, \
_DecLibraryclass, \
_DecPcd, \
_DecGuid, \
FileContent, \
_DecBase, \
CleanString
from Object.Parser.DecObject import _DecComments
#
# Test CleanString
#
class CleanStringTestCase(unittest.TestCase):
def testCleanString(self):
Line, Comment = CleanString('')
self.assertEqual(Line, '')
self.assertEqual(Comment, '')
Line, Comment = CleanString('line without comment')
self.assertEqual(Line, 'line without comment')
self.assertEqual(Comment, '')
Line, Comment = CleanString('# pure comment')
self.assertEqual(Line, '')
self.assertEqual(Comment, '# pure comment')
Line, Comment = CleanString('line # and comment')
self.assertEqual(Line, 'line')
self.assertEqual(Comment, '# and comment')
def testCleanStringCpp(self):
Line, Comment = CleanString('line // and comment', AllowCppStyleComment = True)
self.assertEqual(Line, 'line')
self.assertEqual(Comment, '# and comment')
#
# Test _DecBase._MacroParser function
#
class MacroParserTestCase(unittest.TestCase):
def setUp(self):
self.dec = _DecBase(FileContent('dummy', []))
def testCorrectMacro(self):
self.dec._MacroParser('DEFINE MACRO1 = test1')
self.failIf('MACRO1' not in self.dec._LocalMacro)
self.assertEqual(self.dec._LocalMacro['MACRO1'], 'test1')
def testErrorMacro1(self):
# Raise fatal error, macro name must be upper case letter
self.assertRaises(FatalError, self.dec._MacroParser, 'DEFINE not_upper_case = test2')
def testErrorMacro2(self):
# No macro name given
self.assertRaises(FatalError, self.dec._MacroParser, 'DEFINE ')
#
# Test _DecBase._TryBackSlash function
#
class TryBackSlashTestCase(unittest.TestCase):
def setUp(self):
Content = [
# Right case
'test no backslash',
'test with backslash \\',
'continue second line',
# Do not precede with whitespace
'\\',
# Empty line after backlash is not allowed
'line with backslash \\',
''
]
self.dec = _DecBase(FileContent('dummy', Content))
def testBackSlash(self):
#
# Right case, assert return values
#
ConcatLine, CommentList = self.dec._TryBackSlash(self.dec._RawData.GetNextLine(), [])
self.assertEqual(ConcatLine, 'test no backslash')
self.assertEqual(CommentList, [])
ConcatLine, CommentList = self.dec._TryBackSlash(self.dec._RawData.GetNextLine(), [])
self.assertEqual(CommentList, [])
self.assertEqual(ConcatLine, 'test with backslash continue second line')
#
# Error cases, assert raise exception
#
self.assertRaises(FatalError, self.dec._TryBackSlash, self.dec._RawData.GetNextLine(), [])
self.assertRaises(FatalError, self.dec._TryBackSlash, self.dec._RawData.GetNextLine(), [])
#
# Test _DecBase.Parse function
#
class DataItem(_DecComments):
def __init__(self):
_DecComments.__init__(self)
self.String = ''
class Data(_DecComments):
def __init__(self):
_DecComments.__init__(self)
# List of DataItem
self.ItemList = []
class TestInner(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = Data()
def _StopCurrentParsing(self, Line):
return Line == '[TOP]'
def _ParseItem(self):
Item = DataItem()
Item.String = self._RawData.CurrentLine
self.ItemObject.ItemList.append(Item)
return Item
def _TailCommentStrategy(self, Comment):
return Comment.find('@comment') != -1
class TestTop(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
# List of Data
self.ItemObject = []
# Top parser
def _StopCurrentParsing(self, Line):
return False
def _ParseItem(self):
TestParser = TestInner(self._RawData)
TestParser.Parse()
self.ItemObject.append(TestParser.ItemObject)
return TestParser.ItemObject
class ParseTestCase(unittest.TestCase):
def setUp(self):
pass
def testParse(self):
Content = \
'''# Top comment
[TOP]
# sub1 head comment
(test item has both head and tail comment) # sub1 tail comment
# sub2 head comment
(test item has head and special tail comment)
# @comment test TailCommentStrategy branch
(test item has no comment)
# test NextLine branch
[TOP]
sub-item
'''
dec = TestTop(FileContent('dummy', Content.splitlines()))
dec.Parse()
# Two sections
self.assertEqual(len(dec.ItemObject), 2)
data = dec.ItemObject[0]
self.assertEqual(data._HeadComment[0][0], '# Top comment')
self.assertEqual(data._HeadComment[0][1], 1)
# 3 subitems
self.assertEqual(len(data.ItemList), 3)
dataitem = data.ItemList[0]
self.assertEqual(dataitem.String, '(test item has both head and tail comment)')
# Comment content
self.assertEqual(dataitem._HeadComment[0][0], '# sub1 head comment')
self.assertEqual(dataitem._TailComment[0][0], '# sub1 tail comment')
# Comment line number
self.assertEqual(dataitem._HeadComment[0][1], 3)
self.assertEqual(dataitem._TailComment[0][1], 4)
dataitem = data.ItemList[1]
self.assertEqual(dataitem.String, '(test item has head and special tail comment)')
# Comment content
self.assertEqual(dataitem._HeadComment[0][0], '# sub2 head comment')
self.assertEqual(dataitem._TailComment[0][0], '# @comment test TailCommentStrategy branch')
# Comment line number
self.assertEqual(dataitem._HeadComment[0][1], 5)
self.assertEqual(dataitem._TailComment[0][1], 7)
dataitem = data.ItemList[2]
self.assertEqual(dataitem.String, '(test item has no comment)')
# Comment content
self.assertEqual(dataitem._HeadComment, [])
self.assertEqual(dataitem._TailComment, [])
data = dec.ItemObject[1]
self.assertEqual(data._HeadComment[0][0], '# test NextLine branch')
self.assertEqual(data._HeadComment[0][1], 11)
# 1 subitems
self.assertEqual(len(data.ItemList), 1)
dataitem = data.ItemList[0]
self.assertEqual(dataitem.String, 'sub-item')
self.assertEqual(dataitem._HeadComment, [])
self.assertEqual(dataitem._TailComment, [])
#
# Test _DecDefine._ParseItem
#
class DecDefineTestCase(unittest.TestCase):
def GetObj(self, Content):
Obj = _DecDefine(FileContent('dummy', Content.splitlines()))
Obj._RawData.CurrentLine = Obj._RawData.GetNextLine()
return Obj
def testDecDefine(self):
item = self.GetObj('PACKAGE_NAME = MdePkg')._ParseItem()
self.assertEqual(item.Key, 'PACKAGE_NAME')
self.assertEqual(item.Value, 'MdePkg')
def testDecDefine1(self):
obj = self.GetObj('PACKAGE_NAME')
self.assertRaises(FatalError, obj._ParseItem)
def testDecDefine2(self):
obj = self.GetObj('unknown_key = ')
self.assertRaises(FatalError, obj._ParseItem)
def testDecDefine3(self):
obj = self.GetObj('PACKAGE_NAME = ')
self.assertRaises(FatalError, obj._ParseItem)
#
# Test _DecLibraryclass._ParseItem
#
class DecLibraryTestCase(unittest.TestCase):
def GetObj(self, Content):
Obj = _DecLibraryclass(FileContent('dummy', Content.splitlines()))
Obj._RawData.CurrentLine = Obj._RawData.GetNextLine()
return Obj
def testNoInc(self):
obj = self.GetObj('UefiRuntimeLib')
self.assertRaises(FatalError, obj._ParseItem)
def testEmpty(self):
obj = self.GetObj(' | ')
self.assertRaises(FatalError, obj._ParseItem)
def testLibclassNaming(self):
obj = self.GetObj('lowercase_efiRuntimeLib|Include/Library/UefiRuntimeLib.h')
self.assertRaises(FatalError, obj._ParseItem)
def testLibclassExt(self):
obj = self.GetObj('RuntimeLib|Include/Library/UefiRuntimeLib.no_h')
self.assertRaises(FatalError, obj._ParseItem)
def testLibclassRelative(self):
obj = self.GetObj('RuntimeLib|Include/../UefiRuntimeLib.h')
self.assertRaises(FatalError, obj._ParseItem)
#
# Test _DecPcd._ParseItem
#
class DecPcdTestCase(unittest.TestCase):
def GetObj(self, Content):
Obj = _DecPcd(FileContent('dummy', Content.splitlines()))
Obj._RawData.CurrentLine = Obj._RawData.GetNextLine()
Obj._RawData.CurrentScope = [('PcdsFeatureFlag'.upper(), 'COMMON')]
return Obj
def testOK(self):
item = self.GetObj('gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable|FALSE|BOOLEAN|0x0000000d')._ParseItem()
self.assertEqual(item.TokenSpaceGuidCName, 'gEfiMdePkgTokenSpaceGuid')
self.assertEqual(item.TokenCName, 'PcdComponentNameDisable')
self.assertEqual(item.DefaultValue, 'FALSE')
self.assertEqual(item.DatumType, 'BOOLEAN')
self.assertEqual(item.TokenValue, '0x0000000d')
def testNoCvar(self):
obj = self.GetObj('123ai.PcdComponentNameDisable|FALSE|BOOLEAN|0x0000000d')
self.assertRaises(FatalError, obj._ParseItem)
def testSplit(self):
obj = self.GetObj('gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable FALSE|BOOLEAN|0x0000000d')
self.assertRaises(FatalError, obj._ParseItem)
obj = self.GetObj('gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable|FALSE|BOOLEAN|0x0000000d | abc')
self.assertRaises(FatalError, obj._ParseItem)
def testUnknownType(self):
obj = self.GetObj('gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable|FALSE|unknown|0x0000000d')
self.assertRaises(FatalError, obj._ParseItem)
def testVoid(self):
obj = self.GetObj('gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable|abc|VOID*|0x0000000d')
self.assertRaises(FatalError, obj._ParseItem)
def testUINT(self):
obj = self.GetObj('gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable|0xabc|UINT8|0x0000000d')
self.assertRaises(FatalError, obj._ParseItem)
#
# Test _DecInclude._ParseItem
#
class DecIncludeTestCase(unittest.TestCase):
#
# Test code to be added
#
pass
#
# Test _DecGuid._ParseItem
#
class DecGuidTestCase(unittest.TestCase):
def GetObj(self, Content):
Obj = _DecGuid(FileContent('dummy', Content.splitlines()))
Obj._RawData.CurrentLine = Obj._RawData.GetNextLine()
Obj._RawData.CurrentScope = [('guids'.upper(), 'COMMON')]
return Obj
def testCValue(self):
item = self.GetObj('gEfiIpSecProtocolGuid={ 0xdfb386f7, 0xe100, 0x43ad,'
' {0x9c, 0x9a, 0xed, 0x90, 0xd0, 0x8a, 0x5e, 0x12 }}')._ParseItem()
self.assertEqual(item.GuidCName, 'gEfiIpSecProtocolGuid')
self.assertEqual(item.GuidCValue, '{ 0xdfb386f7, 0xe100, 0x43ad, {0x9c, 0x9a, 0xed, 0x90, 0xd0, 0x8a, 0x5e, 0x12 }}')
def testGuidString(self):
item = self.GetObj('gEfiIpSecProtocolGuid=1E73767F-8F52-4603-AEB4-F29B510B6766')._ParseItem()
self.assertEqual(item.GuidCName, 'gEfiIpSecProtocolGuid')
self.assertEqual(item.GuidCValue, '1E73767F-8F52-4603-AEB4-F29B510B6766')
def testNoValue1(self):
obj = self.GetObj('gEfiIpSecProtocolGuid')
self.assertRaises(FatalError, obj._ParseItem)
def testNoValue2(self):
obj = self.GetObj('gEfiIpSecProtocolGuid=')
self.assertRaises(FatalError, obj._ParseItem)
def testNoName(self):
obj = self.GetObj('=')
self.assertRaises(FatalError, obj._ParseItem)
#
# Test Dec.__init__
#
class DecDecInitTestCase(unittest.TestCase):
def testNoDecFile(self):
self.assertRaises(FatalError, Dec, 'No_Such_File')
class TmpFile:
def __init__(self, File):
self.File = File
def Write(self, Content):
try:
FileObj = open(self.File, 'w')
FileObj.write(Content)
FileObj.close()
except:
pass
def Remove(self):
try:
os.remove(self.File)
except:
pass
#
# Test Dec._UserExtentionSectionParser
#
class DecUESectionTestCase(unittest.TestCase):
def setUp(self):
self.File = TmpFile('test.dec')
self.File.Write(
'''[userextensions.intel."myid"]
[userextensions.intel."myid".IA32]
[userextensions.intel."myid".IA32,]
[userextensions.intel."myid]
'''
)
def tearDown(self):
self.File.Remove()
def testUserExtentionHeader(self):
dec = Dec('test.dec', False)
# OK: [userextensions.intel."myid"]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
dec._UserExtentionSectionParser()
self.assertEqual(len(dec._RawData.CurrentScope), 1)
self.assertEqual(dec._RawData.CurrentScope[0][0], 'userextensions'.upper())
self.assertEqual(dec._RawData.CurrentScope[0][1], 'intel')
self.assertEqual(dec._RawData.CurrentScope[0][2], '"myid"')
self.assertEqual(dec._RawData.CurrentScope[0][3], 'COMMON')
# OK: [userextensions.intel."myid".IA32]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
dec._UserExtentionSectionParser()
self.assertEqual(len(dec._RawData.CurrentScope), 1)
self.assertEqual(dec._RawData.CurrentScope[0][0], 'userextensions'.upper())
self.assertEqual(dec._RawData.CurrentScope[0][1], 'intel')
self.assertEqual(dec._RawData.CurrentScope[0][2], '"myid"')
self.assertEqual(dec._RawData.CurrentScope[0][3], 'IA32')
# Fail: [userextensions.intel."myid".IA32,]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._UserExtentionSectionParser)
# Fail: [userextensions.intel."myid]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._UserExtentionSectionParser)
#
# Test Dec._SectionHeaderParser
#
class DecSectionTestCase(unittest.TestCase):
def setUp(self):
self.File = TmpFile('test.dec')
self.File.Write(
'''[no section start or end
[,] # empty sub-section
[unknow_section_name]
[Includes.IA32.other] # no third one
[PcdsFeatureFlag, PcdsFixedAtBuild] # feature flag PCD must not be in the same section of other types of PCD
[Includes.IA32, Includes.IA32]
[Includes, Includes.IA32] # common cannot be with other arch
[Includes.IA32, PcdsFeatureFlag] # different section name
''' )
def tearDown(self):
self.File.Remove()
def testSectionHeader(self):
dec = Dec('test.dec', False)
# [no section start or end
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
#[,] # empty sub-section
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
# [unknow_section_name]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
# [Includes.IA32.other] # no third one
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
# [PcdsFeatureFlag, PcdsFixedAtBuild]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
# [Includes.IA32, Includes.IA32]
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
dec._SectionHeaderParser()
self.assertEqual(len(dec._RawData.CurrentScope), 1)
self.assertEqual(dec._RawData.CurrentScope[0][0], 'Includes'.upper())
self.assertEqual(dec._RawData.CurrentScope[0][1], 'IA32')
# [Includes, Includes.IA32] # common cannot be with other arch
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
# [Includes.IA32, PcdsFeatureFlag] # different section name not allowed
dec._RawData.CurrentLine = CleanString(dec._RawData.GetNextLine())[0]
self.assertRaises(FatalError, dec._SectionHeaderParser)
#
# Test Dec._ParseDecComment
#
class DecDecCommentTestCase(unittest.TestCase):
def testDecHeadComment(self):
File = TmpFile('test.dec')
File.Write(
'''# abc
##''')
dec = Dec('test.dec', False)
dec.ParseDecComment()
self.assertEqual(len(dec._HeadComment), 2)
self.assertEqual(dec._HeadComment[0][0], '# abc')
self.assertEqual(dec._HeadComment[0][1], 1)
self.assertEqual(dec._HeadComment[1][0], '##')
self.assertEqual(dec._HeadComment[1][1], 2)
File.Remove()
def testNoDoubleComment(self):
File = TmpFile('test.dec')
File.Write(
'''# abc
#
[section_start]''')
dec = Dec('test.dec', False)
dec.ParseDecComment()
self.assertEqual(len(dec._HeadComment), 2)
self.assertEqual(dec._HeadComment[0][0], '# abc')
self.assertEqual(dec._HeadComment[0][1], 1)
self.assertEqual(dec._HeadComment[1][0], '#')
self.assertEqual(dec._HeadComment[1][1], 2)
File.Remove()
if __name__ == '__main__':
import Logger.Logger
Logger.Logger.Initialize()
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/UnitTest/DecParserUnitTest.py
|
## @file
# This file contain unit test for Test [Binary] section part of InfParser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
from __future__ import print_function
import os
#import Object.Parser.InfObject as InfObject
from Object.Parser.InfCommonObject import CurrentLine
from Object.Parser.InfCommonObject import InfLineCommentObject
from Object.Parser.InfBinaryObject import InfBinariesObject
import Logger.Log as Logger
import Library.GlobalData as Global
##
# Test Common binary item
#
#-------------start of common binary item test input--------------------------#
#
# Only has 1 element, binary item Type
#
SectionStringsCommonItem1 = \
"""
GUID
"""
#
# Have 2 elements, binary item Type and FileName
#
SectionStringsCommonItem2 = \
"""
GUID | Test/Test.guid
"""
#
# Have 3 elements, Type | FileName | Target | Family | TagName | FeatureFlagExp
#
SectionStringsCommonItem3 = \
"""
GUID | Test/Test.guid | DEBUG
"""
#
# Have 3 elements, Type | FileName | Target
# Target with MACRO defined in [Define] section
#
SectionStringsCommonItem4 = \
"""
GUID | Test/Test.guid | $(TARGET)
"""
#
# Have 3 elements, Type | FileName | Target
# FileName with MACRO defined in [Binary] section
#
SectionStringsCommonItem5 = \
"""
DEFINE BINARY_FILE_PATH = Test
GUID | $(BINARY_FILE_PATH)/Test.guid | $(TARGET)
"""
#
# Have 4 elements, Type | FileName | Target | Family
#
SectionStringsCommonItem6 = \
"""
GUID | Test/Test.guid | DEBUG | *
"""
#
# Have 4 elements, Type | FileName | Target | Family
#
SectionStringsCommonItem7 = \
"""
GUID | Test/Test.guid | DEBUG | MSFT
"""
#
# Have 5 elements, Type | FileName | Target | Family | TagName
#
SectionStringsCommonItem8 = \
"""
GUID | Test/Test.guid | DEBUG | MSFT | TEST
"""
#
# Have 6 elements, Type | FileName | Target | Family | TagName | FFE
#
SectionStringsCommonItem9 = \
"""
GUID | Test/Test.guid | DEBUG | MSFT | TEST | TRUE
"""
#
# Have 7 elements, Type | FileName | Target | Family | TagName | FFE | Overflow
# Test wrong format
#
SectionStringsCommonItem10 = \
"""
GUID | Test/Test.guid | DEBUG | MSFT | TEST | TRUE | OVERFLOW
"""
#-------------end of common binary item test input----------------------------#
#-------------start of VER type binary item test input------------------------#
#
# Has 1 element, error format
#
SectionStringsVerItem1 = \
"""
VER
"""
#
# Have 5 elements, error format(Maximum elements amount is 4)
#
SectionStringsVerItem2 = \
"""
VER | Test/Test.ver | * | TRUE | OverFlow
"""
#
# Have 2 elements, Type | FileName
#
SectionStringsVerItem3 = \
"""
VER | Test/Test.ver
"""
#
# Have 3 elements, Type | FileName | Target
#
SectionStringsVerItem4 = \
"""
VER | Test/Test.ver | DEBUG
"""
#
# Have 4 elements, Type | FileName | Target | FeatureFlagExp
#
SectionStringsVerItem5 = \
"""
VER | Test/Test.ver | DEBUG | TRUE
"""
#
# Exist 2 VER items, both opened.
#
SectionStringsVerItem6 = \
"""
VER | Test/Test.ver | * | TRUE
VER | Test/Test2.ver | * | TRUE
"""
#
# Exist 2 VER items, only 1 opened.
#
SectionStringsVerItem7 = \
"""
VER | Test/Test.ver | * | TRUE
VER | Test/Test2.ver | * | FALSE
"""
#-------------end of VER type binary item test input--------------------------#
#-------------start of UI type binary item test input-------------------------#
#
# Test only one UI section can exist
#
SectionStringsUiItem1 = \
"""
UI | Test/Test.ui | * | TRUE
UI | Test/Test2.ui | * | TRUE
"""
SectionStringsUiItem2 = \
"""
UI | Test/Test.ui | * | TRUE
SEC_UI | Test/Test2.ui | * | TRUE
"""
SectionStringsUiItem3 = \
"""
UI | Test/Test.ui | * | TRUE
UI | Test/Test2.ui | * | FALSE
"""
#
# Has 1 element, error format
#
SectionStringsUiItem4 = \
"""
UI
"""
#
# Have 5 elements, error format(Maximum elements amount is 4)
#
SectionStringsUiItem5 = \
"""
UI | Test/Test.ui | * | TRUE | OverFlow
"""
#
# Have 2 elements, Type | FileName
#
SectionStringsUiItem6 = \
"""
UI | Test/Test.ui
"""
#
# Have 3 elements, Type | FileName | Target
#
SectionStringsUiItem7 = \
"""
UI | Test/Test.ui | DEBUG
"""
#
# Have 4 elements, Type | FileName | Target | FeatureFlagExp
#
SectionStringsUiItem8 = \
"""
UI | Test/Test.ui | DEBUG | TRUE
"""
#---------------end of UI type binary item test input-------------------------#
gFileName = "BinarySectionTest.inf"
##
# Construct SectionString for call section parser usage.
#
def StringToSectionString(String):
Lines = String.split('\n')
LineNo = 0
SectionString = []
for Line in Lines:
if Line.strip() == '':
continue
SectionString.append((Line, LineNo, ''))
LineNo = LineNo + 1
return SectionString
def PrepareTest(String):
SectionString = StringToSectionString(String)
ItemList = []
for Item in SectionString:
ValueList = Item[0].split('|')
for count in range(len(ValueList)):
ValueList[count] = ValueList[count].strip()
if len(ValueList) >= 2:
#
# Create a temp file for test.
#
FileName = os.path.normpath(os.path.realpath(ValueList[1].strip()))
try:
TempFile = open (FileName, "w")
TempFile.close()
except:
print("File Create Error")
CurrentLine = CurrentLine()
CurrentLine.SetFileName("Test")
CurrentLine.SetLineString(Item[0])
CurrentLine.SetLineNo(Item[1])
InfLineCommentObject = InfLineCommentObject()
ItemList.append((ValueList, InfLineCommentObject, CurrentLine))
return ItemList
if __name__ == '__main__':
Logger.Initialize()
InfBinariesInstance = InfBinariesObject()
ArchList = ['COMMON']
Global.gINF_MODULE_DIR = os.getcwd()
AllPassedFlag = True
#
# For All Ui test
#
UiStringList = [
SectionStringsUiItem1,
SectionStringsUiItem2,
SectionStringsUiItem3,
SectionStringsUiItem4,
SectionStringsUiItem5,
SectionStringsUiItem6,
SectionStringsUiItem7,
SectionStringsUiItem8
]
for Item in UiStringList:
Ui = PrepareTest(Item)
if Item == SectionStringsUiItem4 or Item == SectionStringsUiItem5:
try:
InfBinariesInstance.SetBinary(Ui = Ui, ArchList = ArchList)
except Logger.FatalError:
pass
else:
try:
InfBinariesInstance.SetBinary(Ui = Ui, ArchList = ArchList)
except:
AllPassedFlag = False
#
# For All Ver Test
#
VerStringList = [
SectionStringsVerItem1,
SectionStringsVerItem2,
SectionStringsVerItem3,
SectionStringsVerItem4,
SectionStringsVerItem5,
SectionStringsVerItem6,
SectionStringsVerItem7
]
for Item in VerStringList:
Ver = PrepareTest(Item)
if Item == SectionStringsVerItem1 or \
Item == SectionStringsVerItem2:
try:
InfBinariesInstance.SetBinary(Ver = Ver, ArchList = ArchList)
except:
pass
else:
try:
InfBinariesInstance.SetBinary(Ver = Ver, ArchList = ArchList)
except:
AllPassedFlag = False
#
# For All Common Test
#
CommonStringList = [
SectionStringsCommonItem1,
SectionStringsCommonItem2,
SectionStringsCommonItem3,
SectionStringsCommonItem4,
SectionStringsCommonItem5,
SectionStringsCommonItem6,
SectionStringsCommonItem7,
SectionStringsCommonItem8,
SectionStringsCommonItem9,
SectionStringsCommonItem10
]
for Item in CommonStringList:
CommonBin = PrepareTest(Item)
if Item == SectionStringsCommonItem10 or \
Item == SectionStringsCommonItem1:
try:
InfBinariesInstance.SetBinary(CommonBinary = CommonBin, ArchList = ArchList)
except:
pass
else:
try:
InfBinariesInstance.SetBinary(Ver = Ver, ArchList = ArchList)
except:
print("Test Failed!")
AllPassedFlag = False
if AllPassedFlag :
print('All tests passed...')
else:
print('Some unit test failed!')
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/UnitTest/InfBinarySectionTest.py
|
## @file
# This file contain unit test for CommentParsing
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import unittest
import Logger.Log as Logger
from Library.CommentParsing import ParseHeaderCommentSection, \
ParseGenericComment, \
ParseDecPcdGenericComment, \
ParseDecPcdTailComment
from Library.CommentParsing import _IsCopyrightLine
from Library.StringUtils import GetSplitValueList
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_LANGUAGE_EN_US
#
# Test ParseHeaderCommentSection
#
class ParseHeaderCommentSectionTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# Normal case1: have license/copyright/license above @file
#
def testNormalCase1(self):
TestCommentLines1 = \
'''# License1
# License2
#
## @file
# example abstract
#
# example description
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# License3
#'''
CommentList = GetSplitValueList(TestCommentLines1, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = 'example abstract'
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = 'example description'
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2010,'\
' Intel Corporation. All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = 'License1\nLicense2\n\nLicense3'
self.assertEqual(License, ExpectedLicense)
#
# Normal case2: have license/copyright above @file, but no copyright after
#
def testNormalCase2(self):
TestCommentLines2 = \
''' # License1
# License2
#
## @file
# example abstract
#
# example description
#
#Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
##'''
CommentList = GetSplitValueList(TestCommentLines2, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = 'example abstract'
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = 'example description'
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = 'License1\nLicense2'
self.assertEqual(License, ExpectedLicense)
#
# Normal case2: have license/copyright/license above @file,
# but no abstract/description
#
def testNormalCase3(self):
TestCommentLines3 = \
''' # License1
# License2
#
## @file
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# License3 Line1
# License3 Line2
##'''
CommentList = GetSplitValueList(TestCommentLines3, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = ''
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = ''
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2010,'\
' Intel Corporation. All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = \
'License1\n' \
'License2\n\n' \
'License3 Line1\n' \
'License3 Line2'
self.assertEqual(License, ExpectedLicense)
#
# Normal case4: format example in spec
#
def testNormalCase4(self):
TestCommentLines = \
'''
## @file
# Abstract
#
# Description
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = 'Abstract'
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = 'Description'
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = \
'License'
self.assertEqual(License, ExpectedLicense)
#
# Normal case5: other line between copyright
#
def testNormalCase5(self):
TestCommentLines = \
'''
## @file
# Abstract
#
# Description
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# other line
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = 'Abstract'
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = 'Description'
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>\n'\
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = \
'License'
self.assertEqual(License, ExpectedLicense)
#
# Normal case6: multiple lines of copyright
#
def testNormalCase6(self):
TestCommentLines = \
'''
## @file
# Abstract
#
# Description
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2010, FOO1 Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2010, FOO2 Corporation. All rights reserved.<BR>
#
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = 'Abstract'
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = 'Description'
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>\n'\
'Copyright (c) 2007 - 2010, FOO1 Corporation.'\
' All rights reserved.<BR>\n'\
'Copyright (c) 2007 - 2010, FOO2 Corporation.'\
' All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = \
'License'
self.assertEqual(License, ExpectedLicense)
#
# Normal case7: Abstract not present
#
def testNormalCase7(self):
TestCommentLines = \
'''
## @file
#
# Description
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2010, FOO1 Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2010, FOO2 Corporation. All rights reserved.<BR>
#
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = ''
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = 'Description'
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>\n'\
'Copyright (c) 2007 - 2010, FOO1 Corporation.'\
' All rights reserved.<BR>\n'\
'Copyright (c) 2007 - 2010, FOO2 Corporation.'\
' All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = \
'License'
self.assertEqual(License, ExpectedLicense)
#
# Normal case8: Description not present
#
def testNormalCase8(self):
TestCommentLines = \
'''
## @file
# Abstact
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
Abstract, Description, Copyright, License = \
ParseHeaderCommentSection(TestCommentLinesList, "PhonyFile")
ExpectedAbstract = 'Abstact'
self.assertEqual(Abstract, ExpectedAbstract)
ExpectedDescription = ''
self.assertEqual(Description, ExpectedDescription)
ExpectedCopyright = \
'Copyright (c) 2007 - 2018, Intel Corporation.'\
' All rights reserved.<BR>'
self.assertEqual(Copyright, ExpectedCopyright)
ExpectedLicense = \
'License'
self.assertEqual(License, ExpectedLicense)
#
# Error case1: No copyright found
#
def testErrorCase1(self):
TestCommentLines = \
'''
## @file
# Abstract
#
# Description
#
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
self.assertRaises(Logger.FatalError,
ParseHeaderCommentSection,
TestCommentLinesList,
"PhonyFile")
#
# Error case2: non-empty non-comment lines passed in
#
def testErrorCase2(self):
TestCommentLines = \
'''
## @file
# Abstract
#
this is invalid line
# Description
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# License
#
##'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
self.assertRaises(Logger.FatalError,
ParseHeaderCommentSection,
TestCommentLinesList,
"PhonyFile")
#
# Test ParseGenericComment
#
class ParseGenericCommentTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# Normal case1: one line of comment
#
def testNormalCase1(self):
TestCommentLines = \
'''# hello world'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
HelptxtObj = ParseGenericComment(TestCommentLinesList, 'testNormalCase1')
self.failIf(not HelptxtObj)
self.assertEqual(HelptxtObj.GetString(), 'hello world')
self.assertEqual(HelptxtObj.GetLang(), TAB_LANGUAGE_EN_US)
#
# Normal case2: multiple lines of comment
#
def testNormalCase2(self):
TestCommentLines = \
'''## hello world
# second line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
HelptxtObj = ParseGenericComment(TestCommentLinesList, 'testNormalCase2')
self.failIf(not HelptxtObj)
self.assertEqual(HelptxtObj.GetString(),
'hello world\n' + 'second line')
self.assertEqual(HelptxtObj.GetLang(), TAB_LANGUAGE_EN_US)
#
# Normal case3: multiple lines of comment, non comment lines will be skipped
#
def testNormalCase3(self):
TestCommentLines = \
'''## hello world
This is not comment line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
HelptxtObj = ParseGenericComment(TestCommentLinesList, 'testNormalCase3')
self.failIf(not HelptxtObj)
self.assertEqual(HelptxtObj.GetString(),
'hello world\n\n')
self.assertEqual(HelptxtObj.GetLang(), TAB_LANGUAGE_EN_US)
#
# Test ParseDecPcdGenericComment
#
class ParseDecPcdGenericCommentTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# Normal case1: comments with no special comment
#
def testNormalCase1(self):
TestCommentLines = \
'''## hello world
# second line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(HelpTxt, PcdErr) = \
ParseDecPcdGenericComment(TestCommentLinesList, 'testNormalCase1')
self.failIf(not HelpTxt)
self.failIf(PcdErr)
self.assertEqual(HelpTxt,
'hello world\n' + 'second line')
#
# Normal case2: comments with valid list
#
def testNormalCase2(self):
TestCommentLines = \
'''## hello world
# second line
# @ValidList 1, 2, 3
# other line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(HelpTxt, PcdErr) = \
ParseDecPcdGenericComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpTxt)
self.failIf(not PcdErr)
self.assertEqual(HelpTxt,
'hello world\n' + 'second line\n' + 'other line')
ExpectedList = GetSplitValueList('1 2 3', TAB_SPACE_SPLIT)
ActualList = [item for item in \
GetSplitValueList(PcdErr.GetValidValue(), TAB_SPACE_SPLIT) if item]
self.assertEqual(ExpectedList, ActualList)
self.failIf(PcdErr.GetExpression())
self.failIf(PcdErr.GetValidValueRange())
#
# Normal case3: comments with valid range
#
def testNormalCase3(self):
TestCommentLines = \
'''## hello world
# second line
# @ValidRange LT 1 AND GT 2
# other line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(HelpTxt, PcdErr) = \
ParseDecPcdGenericComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpTxt)
self.failIf(not PcdErr)
self.assertEqual(HelpTxt,
'hello world\n' + 'second line\n' + 'other line')
self.assertEqual(PcdErr.GetValidValueRange().strip(), 'LT 1 AND GT 2')
self.failIf(PcdErr.GetExpression())
self.failIf(PcdErr.GetValidValue())
#
# Normal case4: comments with valid expression
#
def testNormalCase4(self):
TestCommentLines = \
'''## hello world
# second line
# @Expression LT 1 AND GT 2
# other line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(HelpTxt, PcdErr) = \
ParseDecPcdGenericComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpTxt)
self.failIf(not PcdErr)
self.assertEqual(HelpTxt,
'hello world\n' + 'second line\n' + 'other line')
self.assertEqual(PcdErr.GetExpression().strip(), 'LT 1 AND GT 2')
self.failIf(PcdErr.GetValidValueRange())
self.failIf(PcdErr.GetValidValue())
#
# Normal case5: comments with valid expression and no generic comment
#
def testNormalCase5(self):
TestCommentLines = \
'''# @Expression LT 1 AND GT 2'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(HelpTxt, PcdErr) = \
ParseDecPcdGenericComment(TestCommentLinesList, 'UnitTest')
self.failIf(HelpTxt)
self.failIf(not PcdErr)
self.assertEqual(PcdErr.GetExpression().strip(), 'LT 1 AND GT 2')
self.failIf(PcdErr.GetValidValueRange())
self.failIf(PcdErr.GetValidValue())
#
# Normal case6: comments with only generic help text
#
def testNormalCase6(self):
TestCommentLines = \
'''#'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(HelpTxt, PcdErr) = \
ParseDecPcdGenericComment(TestCommentLinesList, 'UnitTest')
self.assertEqual(HelpTxt, '\n')
self.failIf(PcdErr)
#
# Error case1: comments with both expression and valid list, use later
# ignore the former and with a warning message
#
def testErrorCase1(self):
TestCommentLines = \
'''## hello world
# second line
# @ValidList 1, 2, 3
# @Expression LT 1 AND GT 2
# other line'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
try:
ParseDecPcdGenericComment(TestCommentLinesList, 'UnitTest')
except Logger.FatalError:
pass
#
# Test ParseDecPcdTailComment
#
class ParseDecPcdTailCommentTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# Normal case1: comments with no SupModeList
#
def testNormalCase1(self):
TestCommentLines = \
'''## #hello world'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(SupModeList, HelpStr) = \
ParseDecPcdTailComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpStr)
self.failIf(SupModeList)
self.assertEqual(HelpStr,
'hello world')
#
# Normal case2: comments with one SupMode
#
def testNormalCase2(self):
TestCommentLines = \
'''## BASE #hello world'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(SupModeList, HelpStr) = \
ParseDecPcdTailComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpStr)
self.failIf(not SupModeList)
self.assertEqual(HelpStr,
'hello world')
self.assertEqual(SupModeList,
['BASE'])
#
# Normal case3: comments with more than one SupMode
#
def testNormalCase3(self):
TestCommentLines = \
'''## BASE UEFI_APPLICATION #hello world'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(SupModeList, HelpStr) = \
ParseDecPcdTailComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpStr)
self.failIf(not SupModeList)
self.assertEqual(HelpStr,
'hello world')
self.assertEqual(SupModeList,
['BASE', 'UEFI_APPLICATION'])
#
# Normal case4: comments with more than one SupMode, no help text
#
def testNormalCase4(self):
TestCommentLines = \
'''## BASE UEFI_APPLICATION'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(SupModeList, HelpStr) = \
ParseDecPcdTailComment(TestCommentLinesList, 'UnitTest')
self.failIf(HelpStr)
self.failIf(not SupModeList)
self.assertEqual(SupModeList,
['BASE', 'UEFI_APPLICATION'])
#
# Normal case5: general comments with no supModList, extract from real case
#
def testNormalCase5(self):
TestCommentLines = \
''' # 1 = 128MB, 2 = 256MB, 3 = MAX'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
(SupModeList, HelpStr) = \
ParseDecPcdTailComment(TestCommentLinesList, 'UnitTest')
self.failIf(not HelpStr)
self.assertEqual(HelpStr,
'1 = 128MB, 2 = 256MB, 3 = MAX')
self.failIf(SupModeList)
#
# Error case2: comments with supModList contains valid and invalid
# module type
#
def testErrorCase2(self):
TestCommentLines = \
'''## BASE INVALID_MODULE_TYPE #hello world'''
CommentList = GetSplitValueList(TestCommentLines, "\n")
LineNum = 0
TestCommentLinesList = []
for Comment in CommentList:
LineNum += 1
TestCommentLinesList.append((Comment, LineNum))
try:
ParseDecPcdTailComment(TestCommentLinesList, 'UnitTest')
except Logger.FatalError:
pass
#
# Test _IsCopyrightLine
#
class _IsCopyrightLineTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# Normal case
#
def testCase1(self):
Line = 'this is a copyright ( line'
Result = _IsCopyrightLine(Line)
self.failIf(not Result)
#
# Normal case
#
def testCase2(self):
Line = 'this is a Copyright ( line'
Result = _IsCopyrightLine(Line)
self.failIf(not Result)
#
# Normal case
#
def testCase3(self):
Line = 'this is not aCopyright ( line'
Result = _IsCopyrightLine(Line)
self.failIf(Result)
#
# Normal case
#
def testCase4(self):
Line = 'this is Copyright( line'
Result = _IsCopyrightLine(Line)
self.failIf(not Result)
#
# Normal case
#
def testCase5(self):
Line = 'this is Copyright (line'
Result = _IsCopyrightLine(Line)
self.failIf(not Result)
#
# Normal case
#
def testCase6(self):
Line = 'this is not Copyright line'
Result = _IsCopyrightLine(Line)
self.failIf(Result)
#
# Normal case
#
def testCase7(self):
Line = 'Copyright (c) line'
Result = _IsCopyrightLine(Line)
self.failIf(not Result)
#
# Normal case
#
def testCase8(self):
Line = ' Copyright (c) line'
Result = _IsCopyrightLine(Line)
self.failIf(not Result)
#
# Normal case
#
def testCase9(self):
Line = 'not a Copyright '
Result = _IsCopyrightLine(Line)
self.failIf(Result)
if __name__ == '__main__':
Logger.Initialize()
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/UnitTest/CommentParsingUnitTest.py
|
## @file
# This file contain unit test for CommentParsing
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import os
import unittest
import Logger.Log as Logger
from GenMetaFile.GenInfFile import GenGuidSections
from GenMetaFile.GenInfFile import GenProtocolPPiSections
from GenMetaFile.GenInfFile import GenPcdSections
from GenMetaFile.GenInfFile import GenSpecialSections
from Library.CommentGenerating import GenGenericCommentF
from Library.CommentGenerating import _GetHelpStr
from Object.POM.CommonObject import TextObject
from Object.POM.CommonObject import GuidObject
from Object.POM.CommonObject import ProtocolObject
from Object.POM.CommonObject import PpiObject
from Object.POM.CommonObject import PcdObject
from Object.POM.ModuleObject import HobObject
from Library.StringUtils import GetSplitValueList
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_LANGUAGE_EN_US
from Library.DataType import TAB_LANGUAGE_ENG
from Library.DataType import ITEM_UNDEFINED
from Library.DataType import TAB_INF_FEATURE_PCD
from Library import GlobalData
from Library.Misc import CreateDirectory
#
# Test _GetHelpStr
#
class _GetHelpStrTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# Normal case1: have one help text object with Lang = 'en-US'
#
def testNormalCase1(self):
HelpStr = 'Hello world'
HelpTextObj = TextObject()
HelpTextObj.SetLang(TAB_LANGUAGE_EN_US)
HelpTextObj.SetString(HelpStr)
HelpTextList = [HelpTextObj]
Result = _GetHelpStr(HelpTextList)
self.assertEqual(Result, HelpStr)
#
# Normal case2: have two help text object with Lang = 'en-US' and other
#
def testNormalCase2(self):
HelpStr = 'Hello world'
HelpTextObj = TextObject()
HelpTextObj.SetLang(TAB_LANGUAGE_ENG)
HelpTextObj.SetString(HelpStr)
HelpTextList = [HelpTextObj]
ExpectedStr = 'Hello world1'
HelpTextObj = TextObject()
HelpTextObj.SetLang(TAB_LANGUAGE_EN_US)
HelpTextObj.SetString(ExpectedStr)
HelpTextList.append(HelpTextObj)
Result = _GetHelpStr(HelpTextList)
self.assertEqual(Result, ExpectedStr)
#
# Normal case3: have two help text object with Lang = '' and 'eng'
#
def testNormalCase3(self):
HelpStr = 'Hello world'
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
HelpTextList = [HelpTextObj]
ExpectedStr = 'Hello world1'
HelpTextObj = TextObject()
HelpTextObj.SetLang(TAB_LANGUAGE_ENG)
HelpTextObj.SetString(ExpectedStr)
HelpTextList.append(HelpTextObj)
Result = _GetHelpStr(HelpTextList)
self.assertEqual(Result, ExpectedStr)
#
# Normal case4: have two help text object with Lang = '' and ''
#
def testNormalCase4(self):
ExpectedStr = 'Hello world1'
HelpTextObj = TextObject()
HelpTextObj.SetLang(TAB_LANGUAGE_ENG)
HelpTextObj.SetString(ExpectedStr)
HelpTextList = [HelpTextObj]
HelpStr = 'Hello world'
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
HelpTextList.append(HelpTextObj)
Result = _GetHelpStr(HelpTextList)
self.assertEqual(Result, ExpectedStr)
#
# Normal case: have three help text object with Lang = '','en', 'en-US'
#
def testNormalCase5(self):
ExpectedStr = 'Hello world1'
HelpTextObj = TextObject()
HelpTextObj.SetLang(TAB_LANGUAGE_EN_US)
HelpTextObj.SetString(ExpectedStr)
HelpTextList = [HelpTextObj]
HelpStr = 'Hello unknown world'
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
HelpTextList.append(HelpTextObj)
HelpStr = 'Hello mysterious world'
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
HelpTextList.append(HelpTextObj)
Result = _GetHelpStr(HelpTextList)
self.assertEqual(Result, ExpectedStr)
HelpTextList.sort()
self.assertEqual(Result, ExpectedStr)
HelpTextList.sort(reverse=True)
self.assertEqual(Result, ExpectedStr)
#
# Test GenGuidSections
#
class GenGuidSectionsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# This is the API to generate Guid Object to help UnitTest
#
def GuidFactory(self, CName, FFE, Usage, GuidType, VariableName, HelpStr):
Guid = GuidObject()
Guid.SetCName(CName)
Guid.SetFeatureFlag(FFE)
Guid.SetGuidTypeList([GuidType])
Guid.SetUsage(Usage)
Guid.SetVariableName(VariableName)
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
Guid.SetHelpTextList([HelpTextObj])
return Guid
#
# Normal case: have two GuidObject
#
def testNormalCase1(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 1'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'CONSUMES'
GuidType = 'Variable'
VariableName = ''
HelpStr = 'Usage comment line 2'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
## PRODUCES ## Event # Usage comment line 1
## CONSUMES ## Variable: # Usage comment line 2
Guid1|FFE1'''
self.assertEqual(Result.strip(), Expected)
#
# Normal case: have two GuidObject
#
def testNormalCase2(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 1'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = 'Generic comment line 1\n Generic comment line 2'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
## PRODUCES ## Event # Usage comment line 1
# Generic comment line 1
# Generic comment line 2
Guid1|FFE1'''
self.assertEqual(Result.strip(), Expected)
#
# Normal case: have two GuidObject, one help goes to generic help,
# the other go into usage comment
#
def testNormalCase3(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = 'Generic comment'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 1'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
# Generic comment
## PRODUCES ## Event # Usage comment line 1
Guid1|FFE1'''
self.assertEqual(Result.strip(), Expected)
#
# Normal case: have one GuidObject, generic comment multiple lines
#
def testNormalCase5(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = 'Generic comment line1 \n generic comment line 2'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
# Generic comment line1
# generic comment line 2
Guid1|FFE1'''
self.assertEqual(Result.strip(), Expected)
#
# Normal case: have one GuidObject, usage comment multiple lines
#
def testNormalCase6(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 1\n Usage comment line 2'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
Guid1|FFE1 ## PRODUCES ## Event # Usage comment line 1 Usage comment line 2
'''
self.assertEqual(Result.strip(), Expected.strip())
#
# Normal case: have one GuidObject, usage comment one line
#
def testNormalCase7(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = 'Usage comment line 1'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
Guid1|FFE1 # Usage comment line 1
'''
self.assertEqual(Result.strip(), Expected.strip())
#
# Normal case: have two GuidObject
#
def testNormalCase8(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 1\n Usage comment line 2'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 3'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
## PRODUCES ## Event # Usage comment line 1 Usage comment line 2
## PRODUCES ## Event # Usage comment line 3
Guid1|FFE1
'''
self.assertEqual(Result.strip(), Expected.strip())
#
# Normal case: have no GuidObject
#
def testNormalCase9(self):
GuidList = []
Result = GenGuidSections(GuidList)
Expected = ''
self.assertEqual(Result.strip(), Expected.strip())
#
# Normal case: have one GuidObject with no comment generated
#
def testNormalCase10(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = ''
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
Guid1|FFE1
'''
self.assertEqual(Result.strip(), Expected.strip())
#
# Normal case: have three GuidObject
#
def testNormalCase11(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = 'general comment line 1'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = 'Usage comment line 3'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
GuidType = 'UNDEFINED'
VariableName = ''
HelpStr = 'general comment line 2'
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
# general comment line 1
## PRODUCES ## Event # Usage comment line 3
# general comment line 2
Guid1|FFE1
'''
self.assertEqual(Result.strip(), Expected.strip())
#
# Normal case: have three GuidObject, with Usage/Type and no help
#
def testNormalCase12(self):
GuidList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'GUID'
VariableName = ''
HelpStr = ''
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
GuidType = 'Event'
VariableName = ''
HelpStr = ''
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'CONSUMES'
GuidType = 'Event'
VariableName = ''
HelpStr = ''
Guid1 = self.GuidFactory(CName, FFE, Usage, GuidType,
VariableName, HelpStr)
GuidList.append(Guid1)
Result = GenGuidSections(GuidList)
Expected = '''[Guids]
## PRODUCES ## GUID
## PRODUCES ## Event
## CONSUMES ## Event
Guid1|FFE1
'''
self.assertEqual(Result.strip(), Expected.strip())
#
# Test GenProtocolPPiSections
#
class GenProtocolPPiSectionsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# This is the API to generate Protocol/Ppi Object to help UnitTest
#
def ObjectFactory(self, CName, FFE, Usage, Notify, HelpStr, IsProtocol):
if IsProtocol:
Object = ProtocolObject()
else:
Object = PpiObject()
Object.SetCName(CName)
Object.SetFeatureFlag(FFE)
Object.SetUsage(Usage)
Object.SetNotify(Notify)
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
Object.SetHelpTextList([HelpTextObj])
return Object
# Usage Notify Help INF Comment
#1 UNDEFINED true Present ## UNDEFINED ## NOTIFY # Help
#2 UNDEFINED true Not Present ## UNDEFINED ## NOTIFY
#3 UNDEFINED false Present ## UNDEFINED # Help
#4 UNDEFINED false Not Present ## UNDEFINED
#5 UNDEFINED Not Present Present # Help
#6 UNDEFINED Not Present Not Present <empty>
#7 Other true Present ## Other ## NOTIFY # Help
#8 Other true Not Present ## Other ## NOTIFY
#9 Other false Present ## Other # Help
#A Other false Not Present ## Other
#B Other Not Present Present ## Other # Help
#C Other Not Present Not Present ## Other
def testNormalCase1(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
Notify = True
HelpStr = 'Help'
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## UNDEFINED ## NOTIFY # Help'''
self.assertEqual(Result.strip(), Expected)
IsProtocol = False
ObjectList = []
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Ppis]
Guid1|FFE1 ## UNDEFINED ## NOTIFY # Help'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase2(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
Notify = True
HelpStr = ''
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## UNDEFINED ## NOTIFY'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase3(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
Notify = False
HelpStr = 'Help'
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## UNDEFINED # Help'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase4(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
Notify = False
HelpStr = ''
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## UNDEFINED'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase5(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
Notify = ''
HelpStr = 'Help'
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 # Help'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase6(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'UNDEFINED'
Notify = ''
HelpStr = ''
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase7(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
Notify = True
HelpStr = 'Help'
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## PRODUCES ## NOTIFY # Help'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase8(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
Notify = True
HelpStr = ''
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## PRODUCES ## NOTIFY'''
self.assertEqual(Result.strip(), Expected)
def testNormalCase9(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
Notify = False
HelpStr = 'Help'
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## PRODUCES # Help'''
self.assertEqual(Result.strip(), Expected)
def testNormalCaseA(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
Notify = False
HelpStr = ''
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## PRODUCES'''
self.assertEqual(Result.strip(), Expected)
def testNormalCaseB(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
Notify = ''
HelpStr = 'Help'
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## PRODUCES # Help'''
self.assertEqual(Result.strip(), Expected)
def testNormalCaseC(self):
ObjectList = []
CName = 'Guid1'
FFE = 'FFE1'
Usage = 'PRODUCES'
Notify = ''
HelpStr = ''
IsProtocol = True
Object = self.ObjectFactory(CName, FFE, Usage, Notify,
HelpStr, IsProtocol)
ObjectList.append(Object)
Result = GenProtocolPPiSections(ObjectList, IsProtocol)
Expected = '''[Protocols]
Guid1|FFE1 ## PRODUCES'''
self.assertEqual(Result.strip(), Expected)
#
# Test GenPcdSections
#
class GenPcdSectionsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# This is the API to generate Pcd Object to help UnitTest
#
def ObjectFactory(self, ItemType, TSCName, CName, DValue, FFE, Usage, Str):
Object = PcdObject()
HelpStr = Str
Object.SetItemType(ItemType)
Object.SetTokenSpaceGuidCName(TSCName)
Object.SetCName(CName)
Object.SetDefaultValue(DValue)
Object.SetFeatureFlag(FFE)
Object.SetValidUsage(Usage)
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
Object.SetHelpTextList([HelpTextObj])
return Object
# Usage Help INF Comment
#1 UNDEFINED Present # Help
#2 UNDEFINED Not Present <empty>
#3 Other Present ## Other # Help
#4 Other Not Present ## Other
def testNormalCase1(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'UNDEFINED'
Str = 'Help'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = \
'[Pcd]\n' + \
'TSCName.CName|DValue|FFE # Help'
self.assertEqual(Result.strip(), Expected)
def testNormalCase2(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'UNDEFINED'
Str = ''
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '[Pcd]\nTSCName.CName|DValue|FFE'
self.assertEqual(Result.strip(), Expected)
def testNormalCase3(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'CONSUMES'
Str = 'Help'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '[Pcd]\nTSCName.CName|DValue|FFE ## CONSUMES # Help'
self.assertEqual(Result.strip(), Expected)
def testNormalCase4(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'CONSUMES'
Str = ''
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '[Pcd]\nTSCName.CName|DValue|FFE ## CONSUMES'
self.assertEqual(Result.strip(), Expected)
#
# multiple lines for normal usage
#
def testNormalCase5(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'CONSUMES'
Str = 'commment line 1\ncomment line 2'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '''[Pcd]
TSCName.CName|DValue|FFE ## CONSUMES # commment line 1 comment line 2'''
self.assertEqual(Result.strip(), Expected)
#
# multiple lines for UNDEFINED usage
#
def testNormalCase6(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'UNDEFINED'
Str = 'commment line 1\ncomment line 2'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Usage = 'UNDEFINED'
Str = 'commment line 3'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '''[Pcd]
# commment line 1
# comment line 2
# commment line 3
TSCName.CName|DValue|FFE'''
self.assertEqual(Result.strip(), Expected)
#
# multiple lines for UNDEFINED and normal usage
#
def testNormalCase7(self):
ObjectList = []
ItemType = 'Pcd'
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'UNDEFINED'
Str = 'commment line 1\ncomment line 2'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Usage = 'CONSUMES'
Str = 'Foo'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Usage = 'UNDEFINED'
Str = 'commment line 3'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '''[Pcd]
# commment line 1
# comment line 2
## CONSUMES # Foo
# commment line 3
TSCName.CName|DValue|FFE'''
self.assertEqual(Result.strip(), Expected)
# Usage Help INF Comment
# CONSUMES Present # Help (keep <EOL> and insert '#' at beginning of each new line)
# CONSUMES Not Present <empty>
#
# TAB_INF_FEATURE_PCD
#
def testNormalCase8(self):
ObjectList = []
ItemType = TAB_INF_FEATURE_PCD
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'CONSUMES'
Str = 'commment line 1\ncomment line 2'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '''[FeaturePcd]
# commment line 1
# comment line 2
TSCName.CName|DValue|FFE'''
self.assertEqual(Result.strip(), Expected)
#
# TAB_INF_FEATURE_PCD
#
def testNormalCase9(self):
ObjectList = []
ItemType = TAB_INF_FEATURE_PCD
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'CONSUMES'
Str = ''
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '''[FeaturePcd]
TSCName.CName|DValue|FFE'''
self.assertEqual(Result.strip(), Expected)
#
# TAB_INF_FEATURE_PCD
#
def testNormalCase10(self):
ObjectList = []
ItemType = TAB_INF_FEATURE_PCD
TSCName = 'TSCName'
CName = 'CName'
DValue = 'DValue'
FFE = 'FFE'
Usage = 'PRODUCES'
Str = 'commment line 1\ncomment line 2'
Object = self.ObjectFactory(ItemType, TSCName, CName, DValue, FFE,
Usage, Str)
ObjectList.append(Object)
Result = GenPcdSections(ObjectList)
Expected = '''
[FeaturePcd]
# commment line 1
# comment line 2
TSCName.CName|DValue|FFE
'''
self.assertEqual(Result, Expected)
#
# Test GenSpecialSections of Hob
#
class GenHobSectionsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#
# This is the API to generate Event Object to help UnitTest
#
def ObjectFactory(self, SupArchList, Type, Usage, Str):
Object = HobObject()
HelpStr = Str
Object.SetHobType(Type)
Object.SetUsage(Usage)
Object.SetSupArchList(SupArchList)
HelpTextObj = TextObject()
HelpTextObj.SetLang('')
HelpTextObj.SetString(HelpStr)
Object.SetHelpTextList([HelpTextObj])
return Object
def testNormalCase1(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = 'Help'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# # Help
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase2(self):
ObjectList = []
SupArchList = []
Type = 'Foo'
Usage = 'UNDEFINED'
Str = 'Help'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob]
# ##
# # Help
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase3(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\nComment Line 1\n\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# # Comment Line 1
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase4(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\nComment Line 1\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# # Comment Line 1
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase5(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = 'Comment Line 1\n\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# # Comment Line 1
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase6(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = ''
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase7(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\nNew Stack HoB'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# # New Stack HoB
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase8(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\nNew Stack HoB\n\nTail Comment'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# # New Stack HoB
# #
# # Tail Comment
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase9(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\n\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# #
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase10(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# #
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase11(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\n\n\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# #
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
def testNormalCase12(self):
ObjectList = []
SupArchList = ['X64']
Type = 'Foo'
Usage = 'UNDEFINED'
Str = '\n\n\n\n'
Object = self.ObjectFactory(SupArchList, Type, Usage, Str)
ObjectList.append(Object)
Result = GenSpecialSections(ObjectList, 'Hob')
Expected = '''# [Hob.X64]
# ##
# #
# #
# #
# Foo ## UNDEFINED
#
#
'''
self.assertEqual(Result, Expected)
#
# Test GenGenericCommentF
#
class GenGenericCommentFTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNormalCase1(self):
CommentLines = 'Comment Line 1'
Result = GenGenericCommentF(CommentLines)
Expected = '# Comment Line 1\n'
self.assertEqual(Result, Expected)
def testNormalCase2(self):
CommentLines = '\n'
Result = GenGenericCommentF(CommentLines)
Expected = '#\n'
self.assertEqual(Result, Expected)
def testNormalCase3(self):
CommentLines = '\n\n\n'
Result = GenGenericCommentF(CommentLines)
Expected = '#\n#\n#\n'
self.assertEqual(Result, Expected)
def testNormalCase4(self):
CommentLines = 'coment line 1\n'
Result = GenGenericCommentF(CommentLines)
Expected = '# coment line 1\n'
self.assertEqual(Result, Expected)
def testNormalCase5(self):
CommentLines = 'coment line 1\n coment line 2\n'
Result = GenGenericCommentF(CommentLines)
Expected = '# coment line 1\n# coment line 2\n'
self.assertEqual(Result, Expected)
if __name__ == '__main__':
Logger.Initialize()
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/UnitTest/CommentGeneratingUnitTest.py
|
## @file
# This file contain unit test for DecParser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
from __future__ import print_function
import os
import unittest
from Parser.DecParserMisc import \
IsValidCArray, \
IsValidPcdDatum
from Parser.DecParser import Dec
from Library.ParserValidate import IsValidCFormatGuid
#
# Test tool function
#
def TestToolFuncs():
assert IsValidCArray('{0x1, 0x23}')
# Empty after comma
assert not IsValidCArray('{0x1, 0x23, }')
# 0x2345 too long
assert not IsValidCArray('{0x1, 0x2345}')
# Must end with '}'
assert not IsValidCArray('{0x1, 0x23, ')
# Whitespace between numbers
assert not IsValidCArray('{0x1, 0x2 3, }')
assert IsValidPcdDatum('VOID*', '"test"')[0]
assert IsValidPcdDatum('VOID*', 'L"test"')[0]
assert IsValidPcdDatum('BOOLEAN', 'TRUE')[0]
assert IsValidPcdDatum('BOOLEAN', 'FALSE')[0]
assert IsValidPcdDatum('BOOLEAN', '0')[0]
assert IsValidPcdDatum('BOOLEAN', '1')[0]
assert IsValidPcdDatum('UINT8', '0xab')[0]
assert not IsValidPcdDatum('UNKNOWNTYPE', '0xabc')[0]
assert not IsValidPcdDatum('UINT8', 'not number')[0]
assert( IsValidCFormatGuid('{ 0xfa0b1735 , 0x87a0, 0x4193, {0xb2, 0x66 , 0x53, 0x8c , 0x38, 0xaf, 0x48, 0xce }}'))
assert( not IsValidCFormatGuid('{ 0xfa0b1735 , 0x87a0, 0x4193, {0xb2, 0x66 , 0x53, 0x8c , 0x38, 0xaf, 0x48, 0xce }} 0xaa'))
def TestTemplate(TestString, TestFunc):
Path = os.path.join(os.getcwd(), 'test.dec')
Path = os.path.normpath(Path)
try:
f = open(Path, 'w')
# Write test string to file
f.write(TestString)
# Close file
f.close()
except:
print('Can not create temporary file [%s]!' % Path)
exit(-1)
# Call test function to test
Ret = TestFunc(Path, TestString)
# Test done, remove temporary file
os.remove(Path)
return Ret
# To make test unit works OK, must set IsRaiseError to True
# This function test right syntax DEC file
# @retval: parser object
#
def TestOK(Path, TestString):
try:
Parser = Dec(Path)
except:
raise 'Bug!!! Correct syntax in DEC file, but exception raised!\n' + TestString
return Parser
# This function test wrong syntax DEC file
# if parser checked wrong syntax, exception thrown and it's expected result
def TestError(Path, TestString):
try:
Dec(Path)
except:
# Raise error, get expected result
return True
raise 'Bug!!! Wrong syntax in DEC file, but passed by DEC parser!!\n' + TestString
def TestDecDefine():
TestString = '''
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = MdePkg
PACKAGE_GUID = 1E73767F-8F52-4603-AEB4-F29B510B6766
PACKAGE_VERSION = 1.02
'''
Parser = TestTemplate(TestString, TestOK)
DefObj = Parser.GetDefineSectionObject()
assert DefObj.GetPackageSpecification() == '0x00010005'
assert DefObj.GetPackageName() == 'MdePkg'
assert DefObj.GetPackageGuid() == '1E73767F-8F52-4603-AEB4-F29B510B6766'
assert DefObj.GetPackageVersion() == '1.02'
TestString = '''
[Defines]
UNKNOW_KEY = 0x00010005 # A unknown key
'''
assert TestTemplate(TestString, TestError)
TestString = '''
[Defines]
PACKAGE_GUID = F-8F52-4603-AEB4-F29B510B6766 # Error GUID
'''
assert TestTemplate(TestString, TestError)
def TestDecInclude():
TestString = '''
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = MdePkg
PACKAGE_GUID = 1E73767F-8F52-4603-AEB4-F29B510B6766
PACKAGE_VERSION = 1.02
[ \\
Includes]
Include
[Includes.IA32]
Include/Ia32
'''
# Create directory in current directory
try:
os.makedirs('Include/Ia32')
except:
pass
Parser = TestTemplate(TestString, TestOK)
IncObj = Parser.GetIncludeSectionObject()
Items = IncObj.GetIncludes()
assert len(Items) == 1
assert Items[0].File == 'Include'
Items = IncObj.GetIncludes('IA32')
assert len(Items) == 1
# normpath is called in DEC parser so '/' is converted to '\'
assert Items[0].File == 'Include\\Ia32'
TestString = '''
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = MdePkg
PACKAGE_GUID = 1E73767F-8F52-4603-AEB4-F29B510B6766
PACKAGE_VERSION = 1.02
[Includes]
Include_not_exist # directory does not exist
'''
assert TestTemplate(TestString, TestError)
os.removedirs('Include/Ia32')
def TestDecGuidPpiProtocol():
TestString = '''
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = MdePkg
PACKAGE_GUID = 1E73767F-8F52-4603-AEB4-F29B510B6766
PACKAGE_VERSION = 1.02
[Guids]
#
# GUID defined in UEFI2.1/UEFI2.0/EFI1.1
#
## Include/Guid/GlobalVariable.h
gEfiGlobalVariableGuid = { 0x8BE4DF61, 0x93CA, 0x11D2, { 0xAA, 0x0D, 0x00, 0xE0, 0x98, 0x03, 0x2B, 0x8C }}
[Protocols]
## Include/Protocol/Bds.h
gEfiBdsArchProtocolGuid = { 0x665E3FF6, 0x46CC, 0x11D4, { 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D }}
[Ppis]
## Include/Ppi/MasterBootMode.h
gEfiPeiMasterBootModePpiGuid = { 0x7408d748, 0xfc8c, 0x4ee6, {0x92, 0x88, 0xc4, 0xbe, 0xc0, 0x92, 0xa4, 0x10 } }
'''
Parser = TestTemplate(TestString, TestOK)
Obj = Parser.GetGuidSectionObject()
Items = Obj.GetGuids()
assert Obj.GetSectionName() == 'Guids'.upper()
assert len(Items) == 1
assert Items[0].GuidCName == 'gEfiGlobalVariableGuid'
assert Items[0].GuidCValue == '{ 0x8BE4DF61, 0x93CA, 0x11D2, { 0xAA, 0x0D, 0x00, 0xE0, 0x98, 0x03, 0x2B, 0x8C }}'
Obj = Parser.GetProtocolSectionObject()
Items = Obj.GetProtocols()
assert Obj.GetSectionName() == 'Protocols'.upper()
assert len(Items) == 1
assert Items[0].GuidCName == 'gEfiBdsArchProtocolGuid'
assert Items[0].GuidCValue == '{ 0x665E3FF6, 0x46CC, 0x11D4, { 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D }}'
Obj = Parser.GetPpiSectionObject()
Items = Obj.GetPpis()
assert Obj.GetSectionName() == 'Ppis'.upper()
assert len(Items) == 1
assert Items[0].GuidCName == 'gEfiPeiMasterBootModePpiGuid'
assert Items[0].GuidCValue == '{ 0x7408d748, 0xfc8c, 0x4ee6, {0x92, 0x88, 0xc4, 0xbe, 0xc0, 0x92, 0xa4, 0x10 } }'
def TestDecPcd():
TestString = '''
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = MdePkg
PACKAGE_GUID = 1E73767F-8F52-4603-AEB4-F29B510B6766
PACKAGE_VERSION = 1.02
[PcdsFeatureFlag]
## If TRUE, the component name protocol will not be installed.
gEfiMdePkgTokenSpaceGuid.PcdComponentNameDisable|FALSE|BOOLEAN|0x0000000d
[PcdsFixedAtBuild]
## Indicates the maximum length of unicode string
gEfiMdePkgTokenSpaceGuid.PcdMaximumUnicodeStringLength|1000000|UINT32|0x00000001
[PcdsFixedAtBuild.IPF]
## The base address of IO port space for IA64 arch
gEfiMdePkgTokenSpaceGuid.PcdIoBlockBaseAddressForIpf|0x0ffffc000000|UINT64|0x0000000f
[PcdsFixedAtBuild,PcdsPatchableInModule]
## This flag is used to control the printout of DebugLib
gEfiMdePkgTokenSpaceGuid.PcdDebugPrintErrorLevel|0x80000000|UINT32|0x00000006
[PcdsFixedAtBuild,PcdsPatchableInModule,PcdsDynamic]
## This value is used to set the base address of pci express hierarchy
gEfiMdePkgTokenSpaceGuid.PcdPciExpressBaseAddress|0xE0000000|UINT64|0x0000000a
'''
Parser = TestTemplate(TestString, TestOK)
Obj = Parser.GetPcdSectionObject()
Items = Obj.GetPcds('PcdsFeatureFlag', 'COMMON')
assert len(Items) == 1
assert Items[0].TokenSpaceGuidCName == 'gEfiMdePkgTokenSpaceGuid'
assert Items[0].TokenCName == 'PcdComponentNameDisable'
assert Items[0].DefaultValue == 'FALSE'
assert Items[0].DatumType == 'BOOLEAN'
assert Items[0].TokenValue == '0x0000000d'
Items = Obj.GetPcdsByType('PcdsFixedAtBuild')
assert len(Items) == 4
assert len(Obj.GetPcdsByType('PcdsPatchableInModule')) == 2
def TestDecUserExtension():
TestString = '''
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = MdePkg
PACKAGE_GUID = 1E73767F-8F52-4603-AEB4-F29B510B6766
PACKAGE_VERSION = 1.02
[UserExtensions.MyID."TestString".IA32]
Some Strings...
'''
Parser = TestTemplate(TestString, TestOK)
Obj = Parser.GetUserExtensionSectionObject()
Items = Obj.GetAllUserExtensions()
assert len(Items) == 1
assert Items[0].UserString == 'Some Strings...'
assert len(Items[0].ArchAndModuleType) == 1
assert ['MyID', '"TestString"', 'IA32'] in Items[0].ArchAndModuleType
if __name__ == '__main__':
import Logger.Logger
Logger.Logger.Initialize()
unittest.FunctionTestCase(TestToolFuncs).runTest()
unittest.FunctionTestCase(TestDecDefine).runTest()
unittest.FunctionTestCase(TestDecInclude).runTest()
unittest.FunctionTestCase(TestDecGuidPpiProtocol).runTest()
unittest.FunctionTestCase(TestDecPcd).runTest()
unittest.FunctionTestCase(TestDecUserExtension).runTest()
print('All tests passed...')
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/UnitTest/DecParserTest.py
|
## @file
# This file is for installed package information database operations
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Dependency
'''
##
# Import Modules
#
from os.path import dirname
import os
import Logger.Log as Logger
from Logger import StringTable as ST
from Library.Parsing import GetWorkspacePackage
from Library.Parsing import GetWorkspaceModule
from Library.Parsing import GetPkgInfoFromDec
from Library.Misc import GetRelativePath
from Library import GlobalData
from Logger.ToolError import FatalError
from Logger.ToolError import EDK1_INF_ERROR
from Logger.ToolError import UNKNOWN_ERROR
(DEPEX_CHECK_SUCCESS, DEPEX_CHECK_MODULE_NOT_FOUND, \
DEPEX_CHECK_PACKAGE_NOT_FOUND, DEPEX_CHECK_DP_NOT_FOUND) = (0, 1, 2, 3)
## DependencyRules
#
# This class represents the dependency rule check mechanism
#
# @param object: Inherited from object class
#
class DependencyRules(object):
def __init__(self, Datab, ToBeInstalledPkgList=None):
self.IpiDb = Datab
self.WsPkgList = GetWorkspacePackage()
self.WsModuleList = GetWorkspaceModule()
self.PkgsToBeDepend = [(PkgInfo[1], PkgInfo[2]) for PkgInfo in self.WsPkgList]
# Add package info from the DIST to be installed.
self.PkgsToBeDepend.extend(self.GenToBeInstalledPkgList(ToBeInstalledPkgList))
def GenToBeInstalledPkgList(self, ToBeInstalledPkgList):
if not ToBeInstalledPkgList:
return []
RtnList = []
for Dist in ToBeInstalledPkgList:
for Package in Dist.PackageSurfaceArea:
RtnList.append((Package[0], Package[1]))
return RtnList
## Check whether a module exists by checking the Guid+Version+Name+Path combination
#
# @param Guid: Guid of a module
# @param Version: Version of a module
# @param Name: Name of a module
# @param Path: Path of a module
# @return: True if module existed, else False
#
def CheckModuleExists(self, Guid, Version, Name, Path):
Logger.Verbose(ST.MSG_CHECK_MODULE_EXIST)
ModuleList = self.IpiDb.GetModInPackage(Guid, Version, Name, Path)
ModuleList.extend(self.IpiDb.GetStandaloneModule(Guid, Version, Name, Path))
Logger.Verbose(ST.MSG_CHECK_MODULE_EXIST_FINISH)
if len(ModuleList) > 0:
return True
else:
return False
## Check whether a module depex satisfied.
#
# @param ModuleObj: A module object
# @param DpObj: A distribution object
# @return: True if module depex satisfied
# False else
#
def CheckModuleDepexSatisfied(self, ModuleObj, DpObj=None):
Logger.Verbose(ST.MSG_CHECK_MODULE_DEPEX_START)
Result = True
Dep = None
if ModuleObj.GetPackageDependencyList():
Dep = ModuleObj.GetPackageDependencyList()[0]
for Dep in ModuleObj.GetPackageDependencyList():
#
# first check whether the dependency satisfied by current workspace
#
Exist = self.CheckPackageExists(Dep.GetGuid(), Dep.GetVersion())
#
# check whether satisfied by current distribution
#
if not Exist:
if DpObj is None:
Result = False
break
for GuidVerPair in DpObj.PackageSurfaceArea.keys():
if Dep.GetGuid() == GuidVerPair[0]:
if Dep.GetVersion() is None or \
len(Dep.GetVersion()) == 0:
Result = True
break
if Dep.GetVersion() == GuidVerPair[1]:
Result = True
break
else:
Result = False
break
if not Result:
Logger.Error("CheckModuleDepex", UNKNOWN_ERROR, \
ST.ERR_DEPENDENCY_NOT_MATCH % (ModuleObj.GetName(), \
Dep.GetPackageFilePath(), \
Dep.GetGuid(), \
Dep.GetVersion()))
return Result
## Check whether a package exists in a package list specified by PkgsToBeDepend.
#
# @param Guid: Guid of a package
# @param Version: Version of a package
# @return: True if package exist
# False else
#
def CheckPackageExists(self, Guid, Version):
Logger.Verbose(ST.MSG_CHECK_PACKAGE_START)
Found = False
for (PkgGuid, PkgVer) in self.PkgsToBeDepend:
if (PkgGuid == Guid):
#
# if version is not empty and not equal, then not match
#
if Version and (PkgVer != Version):
Found = False
break
else:
Found = True
break
else:
Found = False
Logger.Verbose(ST.MSG_CHECK_PACKAGE_FINISH)
return Found
## Check whether a package depex satisfied.
#
# @param PkgObj: A package object
# @param DpObj: A distribution object
# @return: True if package depex satisfied
# False else
#
def CheckPackageDepexSatisfied(self, PkgObj, DpObj=None):
ModuleDict = PkgObj.GetModuleDict()
for ModKey in ModuleDict.keys():
ModObj = ModuleDict[ModKey]
if self.CheckModuleDepexSatisfied(ModObj, DpObj):
continue
else:
return False
return True
## Check whether a DP exists.
#
# @param Guid: Guid of a Distribution
# @param Version: Version of a Distribution
# @return: True if Distribution exist
# False else
def CheckDpExists(self, Guid, Version):
Logger.Verbose(ST.MSG_CHECK_DP_START)
DpList = self.IpiDb.GetDp(Guid, Version)
if len(DpList) > 0:
Found = True
else:
Found = False
Logger.Verbose(ST.MSG_CHECK_DP_FINISH)
return Found
## Check whether a DP depex satisfied by current workspace for Install
#
# @param DpObj: A distribution object
# @return: True if distribution depex satisfied
# False else
#
def CheckInstallDpDepexSatisfied(self, DpObj):
return self.CheckDpDepexSatisfied(DpObj)
# # Check whether multiple DP depex satisfied by current workspace for Install
#
# @param DpObjList: A distribution object list
# @return: True if distribution depex satisfied
# False else
#
def CheckTestInstallPdDepexSatisfied(self, DpObjList):
for DpObj in DpObjList:
if self.CheckDpDepexSatisfied(DpObj):
for PkgKey in DpObj.PackageSurfaceArea.keys():
PkgObj = DpObj.PackageSurfaceArea[PkgKey]
self.PkgsToBeDepend.append((PkgObj.Guid, PkgObj.Version))
else:
return False, DpObj
return True, DpObj
## Check whether a DP depex satisfied by current workspace
# (excluding the original distribution's packages to be replaced) for Replace
#
# @param DpObj: A distribution object
# @param OrigDpGuid: The original distribution's Guid
# @param OrigDpVersion: The original distribution's Version
#
def ReplaceCheckNewDpDepex(self, DpObj, OrigDpGuid, OrigDpVersion):
self.PkgsToBeDepend = [(PkgInfo[1], PkgInfo[2]) for PkgInfo in self.WsPkgList]
OrigDpPackageList = self.IpiDb.GetPackageListFromDp(OrigDpGuid, OrigDpVersion)
for OrigPkgInfo in OrigDpPackageList:
Guid, Version = OrigPkgInfo[0], OrigPkgInfo[1]
if (Guid, Version) in self.PkgsToBeDepend:
self.PkgsToBeDepend.remove((Guid, Version))
return self.CheckDpDepexSatisfied(DpObj)
## Check whether a DP depex satisfied by current workspace.
#
# @param DpObj: A distribution object
#
def CheckDpDepexSatisfied(self, DpObj):
for PkgKey in DpObj.PackageSurfaceArea.keys():
PkgObj = DpObj.PackageSurfaceArea[PkgKey]
if self.CheckPackageDepexSatisfied(PkgObj, DpObj):
continue
else:
return False
for ModKey in DpObj.ModuleSurfaceArea.keys():
ModObj = DpObj.ModuleSurfaceArea[ModKey]
if self.CheckModuleDepexSatisfied(ModObj, DpObj):
continue
else:
return False
return True
## Check whether a DP could be removed from current workspace.
#
# @param DpGuid: File's guid
# @param DpVersion: File's version
# @retval Removable: True if distribution could be removed, False Else
# @retval DependModuleList: the list of modules that make distribution can not be removed
#
def CheckDpDepexForRemove(self, DpGuid, DpVersion):
Removable = True
DependModuleList = []
WsModuleList = self.WsModuleList
#
# remove modules that included in current DP
# List of item (FilePath)
DpModuleList = self.IpiDb.GetDpModuleList(DpGuid, DpVersion)
for Module in DpModuleList:
if Module in WsModuleList:
WsModuleList.remove(Module)
else:
Logger.Warn("UPT\n",
ST.ERR_MODULE_NOT_INSTALLED % Module)
#
# get packages in current Dp and find the install path
# List of item (PkgGuid, PkgVersion, InstallPath)
DpPackageList = self.IpiDb.GetPackageListFromDp(DpGuid, DpVersion)
DpPackagePathList = []
WorkSP = GlobalData.gWORKSPACE
for (PkgName, PkgGuid, PkgVersion, DecFile) in self.WsPkgList:
if PkgName:
pass
DecPath = dirname(DecFile)
if DecPath.find(WorkSP) > -1:
InstallPath = GetRelativePath(DecPath, WorkSP)
DecFileRelaPath = GetRelativePath(DecFile, WorkSP)
else:
InstallPath = DecPath
DecFileRelaPath = DecFile
if (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
DpPackagePathList.append(DecFileRelaPath)
DpPackageList.remove((PkgGuid, PkgVersion, InstallPath))
#
# the left items in DpPackageList are the packages that installed but not found anymore
#
for (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
Logger.Warn("UPT",
ST.WARN_INSTALLED_PACKAGE_NOT_FOUND%(PkgGuid, PkgVersion, InstallPath))
#
# check modules to see if has dependency on package of current DP
#
for Module in WsModuleList:
if (not VerifyRemoveModuleDep(Module, DpPackagePathList)):
Removable = False
DependModuleList.append(Module)
return (Removable, DependModuleList)
## Check whether a DP could be replaced by a distribution containing NewDpPkgList
# from current workspace.
#
# @param OrigDpGuid: original Dp's Guid
# @param OrigDpVersion: original Dp's version
# @param NewDpPkgList: a list of package information (Guid, Version) in new Dp
# @retval Replaceable: True if distribution could be replaced, False Else
# @retval DependModuleList: the list of modules that make distribution can not be replaced
#
def CheckDpDepexForReplace(self, OrigDpGuid, OrigDpVersion, NewDpPkgList):
Replaceable = True
DependModuleList = []
WsModuleList = self.WsModuleList
#
# remove modules that included in current DP
# List of item (FilePath)
DpModuleList = self.IpiDb.GetDpModuleList(OrigDpGuid, OrigDpVersion)
for Module in DpModuleList:
if Module in WsModuleList:
WsModuleList.remove(Module)
else:
Logger.Warn("UPT\n",
ST.ERR_MODULE_NOT_INSTALLED % Module)
OtherPkgList = NewDpPkgList
#
# get packages in current Dp and find the install path
# List of item (PkgGuid, PkgVersion, InstallPath)
DpPackageList = self.IpiDb.GetPackageListFromDp(OrigDpGuid, OrigDpVersion)
DpPackagePathList = []
WorkSP = GlobalData.gWORKSPACE
for (PkgName, PkgGuid, PkgVersion, DecFile) in self.WsPkgList:
if PkgName:
pass
DecPath = dirname(DecFile)
if DecPath.find(WorkSP) > -1:
InstallPath = GetRelativePath(DecPath, WorkSP)
DecFileRelaPath = GetRelativePath(DecFile, WorkSP)
else:
InstallPath = DecPath
DecFileRelaPath = DecFile
if (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
DpPackagePathList.append(DecFileRelaPath)
DpPackageList.remove((PkgGuid, PkgVersion, InstallPath))
else:
OtherPkgList.append((PkgGuid, PkgVersion))
#
# the left items in DpPackageList are the packages that installed but not found anymore
#
for (PkgGuid, PkgVersion, InstallPath) in DpPackageList:
Logger.Warn("UPT",
ST.WARN_INSTALLED_PACKAGE_NOT_FOUND%(PkgGuid, PkgVersion, InstallPath))
#
# check modules to see if it can be satisfied by package not belong to removed DP
#
for Module in WsModuleList:
if (not VerifyReplaceModuleDep(Module, DpPackagePathList, OtherPkgList)):
Replaceable = False
DependModuleList.append(Module)
return (Replaceable, DependModuleList)
## check whether module depends on packages in DpPackagePathList, return True
# if found, False else
#
# @param Path: a module path
# @param DpPackagePathList: a list of Package Paths
# @retval: False: module depends on package in DpPackagePathList
# True: module doesn't depend on package in DpPackagePathList
#
def VerifyRemoveModuleDep(Path, DpPackagePathList):
try:
for Item in GetPackagePath(Path):
if Item in DpPackagePathList:
DecPath = os.path.normpath(os.path.join(GlobalData.gWORKSPACE, Item))
Logger.Info(ST.MSG_MODULE_DEPEND_ON % (Path, DecPath))
return False
else:
return True
except FatalError as ErrCode:
if ErrCode.message == EDK1_INF_ERROR:
Logger.Warn("UPT",
ST.WRN_EDK1_INF_FOUND%Path)
return True
else:
return True
# # GetPackagePath
#
# Get Dependency package path from an Inf file path
#
def GetPackagePath(InfPath):
PackagePath = []
if os.path.exists(InfPath):
FindSection = False
for Line in open(InfPath).readlines():
Line = Line.strip()
if not Line:
continue
if Line.startswith('#'):
continue
if Line.startswith('[Packages') and Line.endswith(']'):
FindSection = True
continue
if Line.startswith('[') and Line.endswith(']') and FindSection:
break
if FindSection:
PackagePath.append(os.path.normpath(Line))
return PackagePath
## check whether module depends on packages in DpPackagePathList and can not be satisfied by OtherPkgList
#
# @param Path: a module path
# @param DpPackagePathList: a list of Package Paths
# @param OtherPkgList: a list of Package Information (Guid, Version)
# @retval: False: module depends on package in DpPackagePathList and can not be satisfied by OtherPkgList
# True: either module doesn't depend on DpPackagePathList or module depends on DpPackagePathList
# but can be satisfied by OtherPkgList
#
def VerifyReplaceModuleDep(Path, DpPackagePathList, OtherPkgList):
try:
for Item in GetPackagePath(Path):
if Item in DpPackagePathList:
DecPath = os.path.normpath(os.path.join(GlobalData.gWORKSPACE, Item))
Name, Guid, Version = GetPkgInfoFromDec(DecPath)
if (Guid, Version) not in OtherPkgList:
Logger.Info(ST.MSG_MODULE_DEPEND_ON % (Path, DecPath))
return False
else:
return True
except FatalError as ErrCode:
if ErrCode.message == EDK1_INF_ERROR:
Logger.Warn("UPT",
ST.WRN_EDK1_INF_FOUND%Path)
return True
else:
return True
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Core/DependencyRules.py
|
## @file
# This file hooks file and directory creation and removal
#
# Copyright (c) 2014 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
File hook
'''
import os
import stat
import time
import zipfile
from time import sleep
from Library import GlobalData
__built_in_remove__ = os.remove
__built_in_mkdir__ = os.mkdir
__built_in_rmdir__ = os.rmdir
__built_in_chmod__ = os.chmod
__built_in_open__ = open
_RMFILE = 0
_MKFILE = 1
_RMDIR = 2
_MKDIR = 3
_CHMOD = 4
gBACKUPFILE = 'file.backup'
gEXCEPTION_LIST = ['Conf'+os.sep+'DistributionPackageDatabase.db', '.tmp', gBACKUPFILE]
class _PathInfo:
def __init__(self, action, path, mode=-1):
self.action = action
self.path = path
self.mode = mode
class RecoverMgr:
def __init__(self, workspace):
self.rlist = []
self.zip = None
self.workspace = os.path.normpath(workspace)
self.backupfile = gBACKUPFILE
self.zipfile = os.path.join(self.workspace, gBACKUPFILE)
def _createzip(self):
if self.zip:
return
self.zip = zipfile.ZipFile(self.zipfile, 'w', zipfile.ZIP_DEFLATED)
def _save(self, tmp, path):
if not self._tryhook(path):
return
self.rlist.append(_PathInfo(tmp, path))
def bkrmfile(self, path):
arc = self._tryhook(path)
if arc and os.path.isfile(path):
self._createzip()
self.zip.write(path, arc.encode('utf_8'))
sta = os.stat(path)
oldmode = stat.S_IMODE(sta.st_mode)
self.rlist.append(_PathInfo(_CHMOD, path, oldmode))
self.rlist.append(_PathInfo(_RMFILE, path))
__built_in_remove__(path)
def bkmkfile(self, path, mode, bufsize):
if not os.path.exists(path):
self._save(_MKFILE, path)
return __built_in_open__(path, mode, bufsize)
def bkrmdir(self, path):
if os.path.exists(path):
sta = os.stat(path)
oldmode = stat.S_IMODE(sta.st_mode)
self.rlist.append(_PathInfo(_CHMOD, path, oldmode))
self._save(_RMDIR, path)
__built_in_rmdir__(path)
def bkmkdir(self, path, mode):
if not os.path.exists(path):
self._save(_MKDIR, path)
__built_in_mkdir__(path, mode)
def bkchmod(self, path, mode):
if self._tryhook(path) and os.path.exists(path):
sta = os.stat(path)
oldmode = stat.S_IMODE(sta.st_mode)
self.rlist.append(_PathInfo(_CHMOD, path, oldmode))
__built_in_chmod__(path, mode)
def rollback(self):
if self.zip:
self.zip.close()
self.zip = None
index = len(self.rlist) - 1
while index >= 0:
item = self.rlist[index]
exist = os.path.exists(item.path)
if item.action == _MKFILE and exist:
#if not os.access(item.path, os.W_OK):
# os.chmod(item.path, S_IWUSR)
__built_in_remove__(item.path)
elif item.action == _RMFILE and not exist:
if not self.zip:
self.zip = zipfile.ZipFile(self.zipfile, 'r', zipfile.ZIP_DEFLATED)
arcname = os.path.normpath(item.path)
arcname = arcname[len(self.workspace)+1:].encode('utf_8')
if os.sep != "/" and os.sep in arcname:
arcname = arcname.replace(os.sep, '/')
mtime = self.zip.getinfo(arcname).date_time
content = self.zip.read(arcname)
filep = __built_in_open__(item.path, "wb")
filep.write(content)
filep.close()
intime = time.mktime(mtime + (0, 0, 0))
os.utime(item.path, (intime, intime))
elif item.action == _MKDIR and exist:
while True:
try:
__built_in_rmdir__(item.path)
break
except IOError:
# Sleep a short time and try again
# The anti-virus software may delay the file removal in this directory
sleep(0.1)
elif item.action == _RMDIR and not exist:
__built_in_mkdir__(item.path)
elif item.action == _CHMOD and exist:
try:
__built_in_chmod__(item.path, item.mode)
except EnvironmentError:
pass
index -= 1
self.commit()
def commit(self):
if self.zip:
self.zip.close()
__built_in_remove__(self.zipfile)
# Check if path needs to be hooked
def _tryhook(self, path):
path = os.path.normpath(path)
works = self.workspace if str(self.workspace).endswith(os.sep) else (self.workspace + os.sep)
if not path.startswith(works):
return ''
for exceptdir in gEXCEPTION_LIST:
full = os.path.join(self.workspace, exceptdir)
if full == path or path.startswith(full + os.sep) or os.path.split(full)[0] == path:
return ''
return path[len(self.workspace)+1:]
def _hookrm(path):
if GlobalData.gRECOVERMGR:
GlobalData.gRECOVERMGR.bkrmfile(path)
else:
__built_in_remove__(path)
def _hookmkdir(path, mode=0o777):
if GlobalData.gRECOVERMGR:
GlobalData.gRECOVERMGR.bkmkdir(path, mode)
else:
__built_in_mkdir__(path, mode)
def _hookrmdir(path):
if GlobalData.gRECOVERMGR:
GlobalData.gRECOVERMGR.bkrmdir(path)
else:
__built_in_rmdir__(path)
def _hookmkfile(path, mode='r', bufsize=-1):
if GlobalData.gRECOVERMGR:
return GlobalData.gRECOVERMGR.bkmkfile(path, mode, bufsize)
return __built_in_open__(path, mode, bufsize)
def _hookchmod(path, mode):
if GlobalData.gRECOVERMGR:
GlobalData.gRECOVERMGR.bkchmod(path, mode)
else:
__built_in_chmod__(path, mode)
def SetRecoverMgr(mgr):
GlobalData.gRECOVERMGR = mgr
os.remove = _hookrm
os.mkdir = _hookmkdir
os.rmdir = _hookrmdir
os.chmod = _hookchmod
__FileHookOpen__ = _hookmkfile
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Core/FileHook.py
|
## @file
#
# PackageFile class represents the zip file of a distribution package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
PackageFile
'''
##
# Import Modules
#
import os.path
import zipfile
import tempfile
import platform
from Logger.ToolError import FILE_OPEN_FAILURE
from Logger.ToolError import FILE_CHECKSUM_FAILURE
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import FILE_DECOMPRESS_FAILURE
from Logger.ToolError import FILE_UNKNOWN_ERROR
from Logger.ToolError import FILE_WRITE_FAILURE
from Logger.ToolError import FILE_COMPRESS_FAILURE
import Logger.Log as Logger
from Logger import StringTable as ST
from Library.Misc import CreateDirectory
from Library.Misc import RemoveDirectory
from Core.FileHook import __FileHookOpen__
from Common.MultipleWorkspace import MultipleWorkspace as mws
class PackageFile:
def __init__(self, FileName, Mode="r"):
self._FileName = FileName
if Mode not in ["r", "w", "a"]:
Mode = "r"
try:
self._ZipFile = zipfile.ZipFile(FileName, Mode, \
zipfile.ZIP_DEFLATED)
self._Files = {}
for Filename in self._ZipFile.namelist():
self._Files[os.path.normpath(Filename)] = Filename
except BaseException as Xstr:
Logger.Error("PackagingTool", FILE_OPEN_FAILURE,
ExtraData="%s (%s)" % (FileName, str(Xstr)))
BadFile = self._ZipFile.testzip()
if BadFile is not None:
Logger.Error("PackagingTool", FILE_CHECKSUM_FAILURE,
ExtraData="[%s] in %s" % (BadFile, FileName))
def GetZipFile(self):
return self._ZipFile
## Get file name
#
def __str__(self):
return self._FileName
## Extract the file
#
# @param To: the destination file
#
def Unpack(self, ToDest):
for FileN in self._ZipFile.namelist():
ToFile = os.path.normpath(os.path.join(ToDest, FileN))
Msg = "%s -> %s" % (FileN, ToFile)
Logger.Info(Msg)
self.Extract(FileN, ToFile)
## Extract the file
#
# @param File: the extracted file
# @param ToFile: the destination file
#
def UnpackFile(self, File, ToFile):
File = File.replace('\\', '/')
if File in self._ZipFile.namelist():
Msg = "%s -> %s" % (File, ToFile)
Logger.Info(Msg)
self.Extract(File, ToFile)
return ToFile
return ''
## Extract the file
#
# @param Which: the source path
# @param ToDest: the destination path
#
def Extract(self, Which, ToDest):
Which = os.path.normpath(Which)
if Which not in self._Files:
Logger.Error("PackagingTool", FILE_NOT_FOUND,
ExtraData="[%s] in %s" % (Which, self._FileName))
try:
FileContent = self._ZipFile.read(self._Files[Which])
except BaseException as Xstr:
Logger.Error("PackagingTool", FILE_DECOMPRESS_FAILURE,
ExtraData="[%s] in %s (%s)" % (Which, \
self._FileName, \
str(Xstr)))
try:
CreateDirectory(os.path.dirname(ToDest))
if os.path.exists(ToDest) and not os.access(ToDest, os.W_OK):
Logger.Warn("PackagingTool", \
ST.WRN_FILE_NOT_OVERWRITTEN % ToDest)
return
else:
ToFile = __FileHookOpen__(ToDest, 'wb')
except BaseException as Xstr:
Logger.Error("PackagingTool", FILE_OPEN_FAILURE,
ExtraData="%s (%s)" % (ToDest, str(Xstr)))
try:
ToFile.write(FileContent)
ToFile.close()
except BaseException as Xstr:
Logger.Error("PackagingTool", FILE_WRITE_FAILURE,
ExtraData="%s (%s)" % (ToDest, str(Xstr)))
## Remove the file
#
# @param Files: the removed files
#
def Remove(self, Files):
TmpDir = os.path.join(tempfile.gettempdir(), ".packaging")
if os.path.exists(TmpDir):
RemoveDirectory(TmpDir, True)
os.mkdir(TmpDir)
self.Unpack(TmpDir)
for SinF in Files:
SinF = os.path.normpath(SinF)
if SinF not in self._Files:
Logger.Error("PackagingTool", FILE_NOT_FOUND,
ExtraData="%s is not in %s!" % \
(SinF, self._FileName))
self._Files.pop(SinF)
self._ZipFile.close()
self._ZipFile = zipfile.ZipFile(self._FileName, "w", \
zipfile.ZIP_DEFLATED)
Cwd = os.getcwd()
os.chdir(TmpDir)
self.PackFiles(self._Files)
os.chdir(Cwd)
RemoveDirectory(TmpDir, True)
## Pack the files under Top directory, the directory shown in the zipFile start from BaseDir,
# BaseDir should be the parent directory of the Top directory, for example,
# Pack(Workspace\Dir1, Workspace) will pack files under Dir1, and the path in the zipfile will
# start from Workspace
#
# @param Top: the top directory
# @param BaseDir: the base directory
#
def Pack(self, Top, BaseDir):
if not os.path.isdir(Top):
Logger.Error("PackagingTool", FILE_UNKNOWN_ERROR, \
"%s is not a directory!" %Top)
FilesToPack = []
Cwd = os.getcwd()
os.chdir(BaseDir)
RelaDir = Top[Top.upper().find(BaseDir.upper()).\
join(len(BaseDir).join(1)):]
for Root, Dirs, Files in os.walk(RelaDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for File1 in Files:
if File1.startswith('.'):
continue
ExtName = os.path.splitext(File1)[1]
#
# skip '.dec', '.inf', '.dsc', '.fdf' files
#
if ExtName.lower() in ['.dec', '.inf', '.dsc', '.fdf']:
continue
FilesToPack.append(os.path.join(Root, File1))
self.PackFiles(FilesToPack)
os.chdir(Cwd)
## Pack the file
#
# @param Files: the files to pack
#
def PackFiles(self, Files):
for File in Files:
Cwd = os.getcwd()
os.chdir(mws.getWs(mws.WORKSPACE, File))
self.PackFile(File)
os.chdir(Cwd)
## Pack the file
#
# @param File: the files to pack
# @param ArcName: the Arc Name
#
def PackFile(self, File, ArcName=None):
try:
#
# avoid packing same file multiple times
#
if platform.system() != 'Windows':
File = File.replace('\\', '/')
ZipedFilesNameList = self._ZipFile.namelist()
for ZipedFile in ZipedFilesNameList:
if File == os.path.normpath(ZipedFile):
return
Logger.Info("packing ..." + File)
self._ZipFile.write(File, ArcName)
except BaseException as Xstr:
Logger.Error("PackagingTool", FILE_COMPRESS_FAILURE,
ExtraData="%s (%s)" % (File, str(Xstr)))
## Write data to the packed file
#
# @param Data: data to write
# @param ArcName: the Arc Name
#
def PackData(self, Data, ArcName):
try:
if os.path.splitext(ArcName)[1].lower() == '.pkg':
Data = Data.encode('utf_8')
self._ZipFile.writestr(ArcName, Data)
except BaseException as Xstr:
Logger.Error("PackagingTool", FILE_COMPRESS_FAILURE,
ExtraData="%s (%s)" % (ArcName, str(Xstr)))
## Close file
#
#
def Close(self):
self._ZipFile.close()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Core/PackageFile.py
|
## @file
# Python 'Library' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Core init file
'''
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Core/__init__.py
|
## @file
# This file is used to define a class object to describe a distribution package
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
DistributionPackageClass
'''
##
# Import Modules
#
import os.path
from Library.Misc import Sdict
from Library.Misc import GetNonMetaDataFiles
from PomAdapter.InfPomAlignment import InfPomAlignment
from PomAdapter.DecPomAlignment import DecPomAlignment
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import OPTION_VALUE_INVALID
from Logger.ToolError import FatalError
from Logger.ToolError import EDK1_INF_ERROR
from Object.POM.CommonObject import IdentificationObject
from Object.POM.CommonObject import CommonHeaderObject
from Object.POM.CommonObject import MiscFileObject
from Common.MultipleWorkspace import MultipleWorkspace as mws
## DistributionPackageHeaderClass
#
# @param IdentificationObject: Identification Object
# @param CommonHeaderObject: Common Header Object
#
class DistributionPackageHeaderObject(IdentificationObject, \
CommonHeaderObject):
def __init__(self):
IdentificationObject.__init__(self)
CommonHeaderObject.__init__(self)
self.ReadOnly = ''
self.RePackage = ''
self.Vendor = ''
self.Date = ''
self.Signature = 'Md5Sum'
self.XmlSpecification = ''
def GetReadOnly(self):
return self.ReadOnly
def SetReadOnly(self, ReadOnly):
self.ReadOnly = ReadOnly
def GetRePackage(self):
return self.RePackage
def SetRePackage(self, RePackage):
self.RePackage = RePackage
def GetVendor(self):
return self.Vendor
def SetDate(self, Date):
self.Date = Date
def GetDate(self):
return self.Date
def SetSignature(self, Signature):
self.Signature = Signature
def GetSignature(self):
return self.Signature
def SetXmlSpecification(self, XmlSpecification):
self.XmlSpecification = XmlSpecification
def GetXmlSpecification(self):
return self.XmlSpecification
## DistributionPackageClass
#
# @param object: DistributionPackageClass
#
class DistributionPackageClass(object):
def __init__(self):
self.Header = DistributionPackageHeaderObject()
#
# {(Guid, Version, Path) : PackageObj}
#
self.PackageSurfaceArea = Sdict()
#
# {(Guid, Version, Name, Path) : ModuleObj}
#
self.ModuleSurfaceArea = Sdict()
self.Tools = MiscFileObject()
self.MiscellaneousFiles = MiscFileObject()
self.UserExtensions = []
self.FileList = []
## Get all included packages and modules for a distribution package
#
# @param WorkspaceDir: WorkspaceDir
# @param PackageList: A list of all packages
# @param ModuleList: A list of all modules
#
def GetDistributionPackage(self, WorkspaceDir, PackageList, ModuleList):
# Backup WorkspaceDir
Root = WorkspaceDir
#
# Get Packages
#
if PackageList:
for PackageFile in PackageList:
PackageFileFullPath = mws.join(Root, PackageFile)
WorkspaceDir = mws.getWs(Root, PackageFile)
DecObj = DecPomAlignment(PackageFileFullPath, WorkspaceDir, CheckMulDec=True)
PackageObj = DecObj
#
# Parser inf file one bye one
#
ModuleInfFileList = PackageObj.GetModuleFileList()
for File in ModuleInfFileList:
WsRelPath = os.path.join(PackageObj.GetPackagePath(), File)
WsRelPath = os.path.normpath(WsRelPath)
if ModuleList and WsRelPath in ModuleList:
Logger.Error("UPT",
OPTION_VALUE_INVALID,
ST.ERR_NOT_STANDALONE_MODULE_ERROR%\
(WsRelPath, PackageFile))
Filename = os.path.normpath\
(os.path.join(PackageObj.GetRelaPath(), File))
os.path.splitext(Filename)
#
# Call INF parser to generate Inf Object.
# Actually, this call is not directly call, but wrapped by
# Inf class in InfPomAlignment.
#
try:
ModuleObj = InfPomAlignment(Filename, WorkspaceDir, PackageObj.GetPackagePath())
#
# Add module to package
#
ModuleDict = PackageObj.GetModuleDict()
ModuleDict[(ModuleObj.GetGuid(), \
ModuleObj.GetVersion(), \
ModuleObj.GetName(), \
ModuleObj.GetCombinePath())] = ModuleObj
PackageObj.SetModuleDict(ModuleDict)
except FatalError as ErrCode:
if ErrCode.message == EDK1_INF_ERROR:
Logger.Warn("UPT",
ST.WRN_EDK1_INF_FOUND%Filename)
else:
raise
self.PackageSurfaceArea\
[(PackageObj.GetGuid(), PackageObj.GetVersion(), \
PackageObj.GetCombinePath())] = PackageObj
#
# Get Modules
#
if ModuleList:
for ModuleFile in ModuleList:
ModuleFileFullPath = mws.join(Root, ModuleFile)
WorkspaceDir = mws.getWs(Root, ModuleFile)
try:
ModuleObj = InfPomAlignment(ModuleFileFullPath, WorkspaceDir)
ModuleKey = (ModuleObj.GetGuid(),
ModuleObj.GetVersion(),
ModuleObj.GetName(),
ModuleObj.GetCombinePath())
self.ModuleSurfaceArea[ModuleKey] = ModuleObj
except FatalError as ErrCode:
if ErrCode.message == EDK1_INF_ERROR:
Logger.Error("UPT",
EDK1_INF_ERROR,
ST.WRN_EDK1_INF_FOUND%ModuleFileFullPath,
ExtraData=ST.ERR_NOT_SUPPORTED_SA_MODULE)
else:
raise
# Recover WorkspaceDir
WorkspaceDir = Root
## Get all files included for a distribution package, except tool/misc of
# distribution level
#
# @retval DistFileList A list of filepath for NonMetaDataFile, relative to workspace
# @retval MetaDataFileList A list of filepath for MetaDataFile, relative to workspace
#
def GetDistributionFileList(self):
MetaDataFileList = []
SkipModulesUniList = []
for Guid, Version, Path in self.PackageSurfaceArea:
Package = self.PackageSurfaceArea[Guid, Version, Path]
PackagePath = Package.GetPackagePath()
FullPath = Package.GetFullPath()
MetaDataFileList.append(Path)
IncludePathList = Package.GetIncludePathList()
for IncludePath in IncludePathList:
SearchPath = os.path.normpath(os.path.join(os.path.dirname(FullPath), IncludePath))
AddPath = os.path.normpath(os.path.join(PackagePath, IncludePath))
self.FileList += GetNonMetaDataFiles(SearchPath, ['CVS', '.svn'], False, AddPath)
#
# Add the miscellaneous files on DEC file
#
for MiscFileObj in Package.GetMiscFileList():
for FileObj in MiscFileObj.GetFileList():
MiscFileFullPath = os.path.normpath(os.path.join(PackagePath, FileObj.GetURI()))
if MiscFileFullPath not in self.FileList:
self.FileList.append(MiscFileFullPath)
Module = None
ModuleDict = Package.GetModuleDict()
for Guid, Version, Name, Path in ModuleDict:
Module = ModuleDict[Guid, Version, Name, Path]
ModulePath = Module.GetModulePath()
FullPath = Module.GetFullPath()
PkgRelPath = os.path.normpath(os.path.join(PackagePath, ModulePath))
MetaDataFileList.append(Path)
SkipList = ['CVS', '.svn']
NonMetaDataFileList = []
if Module.UniFileClassObject:
for UniFile in Module.UniFileClassObject.IncFileList:
OriPath = os.path.normpath(os.path.dirname(FullPath))
UniFilePath = os.path.normpath(os.path.join(PkgRelPath, UniFile.Path[len(OriPath) + 1:]))
if UniFilePath not in SkipModulesUniList:
SkipModulesUniList.append(UniFilePath)
for IncludeFile in Module.UniFileClassObject.IncludePathList:
if IncludeFile not in SkipModulesUniList:
SkipModulesUniList.append(IncludeFile)
NonMetaDataFileList = GetNonMetaDataFiles(os.path.dirname(FullPath), SkipList, False, PkgRelPath)
for NonMetaDataFile in NonMetaDataFileList:
if NonMetaDataFile not in self.FileList:
self.FileList.append(NonMetaDataFile)
for Guid, Version, Name, Path in self.ModuleSurfaceArea:
Module = self.ModuleSurfaceArea[Guid, Version, Name, Path]
ModulePath = Module.GetModulePath()
FullPath = Module.GetFullPath()
MetaDataFileList.append(Path)
SkipList = ['CVS', '.svn']
NonMetaDataFileList = []
if Module.UniFileClassObject:
for UniFile in Module.UniFileClassObject.IncFileList:
OriPath = os.path.normpath(os.path.dirname(FullPath))
UniFilePath = os.path.normpath(os.path.join(ModulePath, UniFile.Path[len(OriPath) + 1:]))
if UniFilePath not in SkipModulesUniList:
SkipModulesUniList.append(UniFilePath)
NonMetaDataFileList = GetNonMetaDataFiles(os.path.dirname(FullPath), SkipList, False, ModulePath)
for NonMetaDataFile in NonMetaDataFileList:
if NonMetaDataFile not in self.FileList:
self.FileList.append(NonMetaDataFile)
for SkipModuleUni in SkipModulesUniList:
if SkipModuleUni in self.FileList:
self.FileList.remove(SkipModuleUni)
return self.FileList, MetaDataFileList
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Core/DistributionPackageClass.py
|
## @file
# This file is for installed package information database operations
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
IpiDb
'''
##
# Import Modules
#
import sqlite3
import os.path
import time
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import UPT_ALREADY_RUNNING_ERROR
from Logger.ToolError import UPT_DB_UPDATE_ERROR
import platform as pf
## IpiDb
#
# This class represents the installed package information database
# Add/Remove/Get installed distribution package information here.
#
#
# @param object: Inherited from object class
# @param DbPath: A string for the path of the database
#
#
class IpiDatabase(object):
def __init__(self, DbPath, Workspace):
Dir = os.path.dirname(DbPath)
if not os.path.isdir(Dir):
os.mkdir(Dir)
self.Conn = sqlite3.connect(u''.join(DbPath), isolation_level='DEFERRED')
self.Conn.execute("PRAGMA page_size=4096")
self.Conn.execute("PRAGMA synchronous=OFF")
self.Cur = self.Conn.cursor()
self.DpTable = 'DpInfo'
self.PkgTable = 'PkgInfo'
self.ModInPkgTable = 'ModInPkgInfo'
self.StandaloneModTable = 'StandaloneModInfo'
self.ModDepexTable = 'ModDepexInfo'
self.DpFileListTable = 'DpFileListInfo'
self.DummyTable = 'Dummy'
self.Workspace = os.path.normpath(Workspace)
## Initialize build database
#
#
def InitDatabase(self, SkipLock = False):
Logger.Verbose(ST.MSG_INIT_IPI_START)
if not SkipLock:
try:
#
# Create a dummy table, if already existed,
# then UPT is already running
#
SqlCommand = """
create table %s (
Dummy TEXT NOT NULL,
PRIMARY KEY (Dummy)
)""" % self.DummyTable
self.Cur.execute(SqlCommand)
self.Conn.commit()
except sqlite3.OperationalError:
Logger.Error("UPT",
UPT_ALREADY_RUNNING_ERROR,
ST.ERR_UPT_ALREADY_RUNNING_ERROR
)
#
# Create new table
#
SqlCommand = """
create table IF NOT EXISTS %s (
DpGuid TEXT NOT NULL,DpVersion TEXT NOT NULL,
InstallTime REAL NOT NULL,
NewPkgFileName TEXT NOT NULL,
PkgFileName TEXT NOT NULL,
RePackage TEXT NOT NULL,
PRIMARY KEY (DpGuid, DpVersion)
)""" % self.DpTable
self.Cur.execute(SqlCommand)
SqlCommand = """
create table IF NOT EXISTS %s (
FilePath TEXT NOT NULL,
DpGuid TEXT,
DpVersion TEXT,
Md5Sum TEXT,
PRIMARY KEY (FilePath)
)""" % self.DpFileListTable
self.Cur.execute(SqlCommand)
SqlCommand = """
create table IF NOT EXISTS %s (
PackageGuid TEXT NOT NULL,
PackageVersion TEXT NOT NULL,
InstallTime REAL NOT NULL,
DpGuid TEXT,
DpVersion TEXT,
InstallPath TEXT NOT NULL,
PRIMARY KEY (PackageGuid, PackageVersion, InstallPath)
)""" % self.PkgTable
self.Cur.execute(SqlCommand)
SqlCommand = """
create table IF NOT EXISTS %s (
ModuleGuid TEXT NOT NULL,
ModuleVersion TEXT NOT NULL,
ModuleName TEXT NOT NULL,
InstallTime REAL NOT NULL,
PackageGuid TEXT,
PackageVersion TEXT,
InstallPath TEXT NOT NULL,
PRIMARY KEY (ModuleGuid, ModuleVersion, ModuleName, InstallPath)
)""" % self.ModInPkgTable
self.Cur.execute(SqlCommand)
SqlCommand = """
create table IF NOT EXISTS %s (
ModuleGuid TEXT NOT NULL,
ModuleVersion TEXT NOT NULL,
ModuleName TEXT NOT NULL,
InstallTime REAL NOT NULL,
DpGuid TEXT,
DpVersion TEXT,
InstallPath TEXT NOT NULL,
PRIMARY KEY (ModuleGuid, ModuleVersion, ModuleName, InstallPath)
)""" % self.StandaloneModTable
self.Cur.execute(SqlCommand)
SqlCommand = """
create table IF NOT EXISTS %s (
ModuleGuid TEXT NOT NULL,
ModuleVersion TEXT NOT NULL,
ModuleName TEXT NOT NULL,
InstallPath TEXT NOT NULL,
DepexGuid TEXT,
DepexVersion TEXT
)""" % self.ModDepexTable
self.Cur.execute(SqlCommand)
self.Conn.commit()
Logger.Verbose(ST.MSG_INIT_IPI_FINISH)
def RollBack(self):
self.Conn.rollback()
def Commit(self):
self.Conn.commit()
## Add a distribution install information from DpObj
#
# @param DpObj:
# @param NewDpPkgFileName: New DpPkg File Name
# @param DpPkgFileName: DpPkg File Name
# @param RePackage: A RePackage
#
def AddDPObject(self, DpObj, NewDpPkgFileName, DpPkgFileName, RePackage):
try:
for PkgKey in DpObj.PackageSurfaceArea.keys():
PkgGuid = PkgKey[0]
PkgVersion = PkgKey[1]
PkgInstallPath = PkgKey[2]
self._AddPackage(PkgGuid, PkgVersion, DpObj.Header.GetGuid(), \
DpObj.Header.GetVersion(), PkgInstallPath)
PkgObj = DpObj.PackageSurfaceArea[PkgKey]
for ModKey in PkgObj.GetModuleDict().keys():
ModGuid = ModKey[0]
ModVersion = ModKey[1]
ModName = ModKey[2]
ModInstallPath = ModKey[3]
ModInstallPath = \
os.path.normpath(os.path.join(PkgInstallPath, ModInstallPath))
self._AddModuleInPackage(ModGuid, ModVersion, ModName, PkgGuid, \
PkgVersion, ModInstallPath)
ModObj = PkgObj.GetModuleDict()[ModKey]
for Dep in ModObj.GetPackageDependencyList():
DepexGuid = Dep.GetGuid()
DepexVersion = Dep.GetVersion()
self._AddModuleDepex(ModGuid, ModVersion, ModName, ModInstallPath, \
DepexGuid, DepexVersion)
for (FilePath, Md5Sum) in PkgObj.FileList:
self._AddDpFilePathList(DpObj.Header.GetGuid(), \
DpObj.Header.GetVersion(), FilePath, \
Md5Sum)
for ModKey in DpObj.ModuleSurfaceArea.keys():
ModGuid = ModKey[0]
ModVersion = ModKey[1]
ModName = ModKey[2]
ModInstallPath = ModKey[3]
self._AddStandaloneModule(ModGuid, ModVersion, ModName, \
DpObj.Header.GetGuid(), \
DpObj.Header.GetVersion(), \
ModInstallPath)
ModObj = DpObj.ModuleSurfaceArea[ModKey]
for Dep in ModObj.GetPackageDependencyList():
DepexGuid = Dep.GetGuid()
DepexVersion = Dep.GetVersion()
self._AddModuleDepex(ModGuid, ModVersion, ModName, ModInstallPath, \
DepexGuid, DepexVersion)
for (Path, Md5Sum) in ModObj.FileList:
self._AddDpFilePathList(DpObj.Header.GetGuid(), \
DpObj.Header.GetVersion(), \
Path, Md5Sum)
#
# add tool/misc files
#
for (Path, Md5Sum) in DpObj.FileList:
self._AddDpFilePathList(DpObj.Header.GetGuid(), \
DpObj.Header.GetVersion(), Path, Md5Sum)
self._AddDp(DpObj.Header.GetGuid(), DpObj.Header.GetVersion(), \
NewDpPkgFileName, DpPkgFileName, RePackage)
except sqlite3.IntegrityError as DetailMsg:
Logger.Error("UPT",
UPT_DB_UPDATE_ERROR,
ST.ERR_UPT_DB_UPDATE_ERROR,
ExtraData = DetailMsg
)
## Add a distribution install information
#
# @param Guid Guid of the distribution package
# @param Version Version of the distribution package
# @param NewDpFileName the saved filename of distribution package file
# @param DistributionFileName the filename of distribution package file
#
def _AddDp(self, Guid, Version, NewDpFileName, DistributionFileName, \
RePackage):
if Version is None or len(Version.strip()) == 0:
Version = 'N/A'
#
# Add newly installed DP information to DB.
#
if NewDpFileName is None or len(NewDpFileName.strip()) == 0:
PkgFileName = 'N/A'
else:
PkgFileName = NewDpFileName
CurrentTime = time.time()
SqlCommand = \
"""insert into %s values('%s', '%s', %s, '%s', '%s', '%s')""" % \
(self.DpTable, Guid, Version, CurrentTime, PkgFileName, \
DistributionFileName, str(RePackage).upper())
self.Cur.execute(SqlCommand)
## Add a file list from DP
#
# @param DpGuid: A DpGuid
# @param DpVersion: A DpVersion
# @param Path: A Path
# @param Path: A Md5Sum
#
def _AddDpFilePathList(self, DpGuid, DpVersion, Path, Md5Sum):
Path = os.path.normpath(Path)
if pf.system() == 'Windows':
if Path.startswith(self.Workspace):
Path = Path[len(self.Workspace):]
else:
if Path.startswith(self.Workspace + os.sep):
Path = Path[len(self.Workspace)+1:]
SqlCommand = """insert into %s values('%s', '%s', '%s', '%s')""" % \
(self.DpFileListTable, Path, DpGuid, DpVersion, Md5Sum)
self.Cur.execute(SqlCommand)
## Add a package install information
#
# @param Guid: A package guid
# @param Version: A package version
# @param DpGuid: A DpGuid
# @param DpVersion: A DpVersion
# @param Path: A Path
#
def _AddPackage(self, Guid, Version, DpGuid=None, DpVersion=None, Path=''):
if Version is None or len(Version.strip()) == 0:
Version = 'N/A'
if DpGuid is None or len(DpGuid.strip()) == 0:
DpGuid = 'N/A'
if DpVersion is None or len(DpVersion.strip()) == 0:
DpVersion = 'N/A'
#
# Add newly installed package information to DB.
#
CurrentTime = time.time()
SqlCommand = \
"""insert into %s values('%s', '%s', %s, '%s', '%s', '%s')""" % \
(self.PkgTable, Guid, Version, CurrentTime, DpGuid, DpVersion, Path)
self.Cur.execute(SqlCommand)
## Add a module that from a package install information
#
# @param Guid: Module Guid
# @param Version: Module version
# @param Name: Module Name
# @param PkgGuid: Package Guid
# @param PkgVersion: Package version
# @param Path: Package relative path that module installs
#
def _AddModuleInPackage(self, Guid, Version, Name, PkgGuid=None, \
PkgVersion=None, Path=''):
if Version is None or len(Version.strip()) == 0:
Version = 'N/A'
if PkgGuid is None or len(PkgGuid.strip()) == 0:
PkgGuid = 'N/A'
if PkgVersion is None or len(PkgVersion.strip()) == 0:
PkgVersion = 'N/A'
if os.name == 'posix':
Path = Path.replace('\\', os.sep)
else:
Path = Path.replace('/', os.sep)
#
# Add module from package information to DB.
#
CurrentTime = time.time()
SqlCommand = \
"""insert into %s values('%s', '%s', '%s', %s, '%s', '%s', '%s')""" % \
(self.ModInPkgTable, Guid, Version, Name, CurrentTime, PkgGuid, PkgVersion, \
Path)
self.Cur.execute(SqlCommand)
## Add a module that is standalone install information
#
# @param Guid: a module Guid
# @param Version: a module Version
# @param Name: a module name
# @param DpGuid: a DpGuid
# @param DpVersion: a DpVersion
# @param Path: path
#
def _AddStandaloneModule(self, Guid, Version, Name, DpGuid=None, \
DpVersion=None, Path=''):
if Version is None or len(Version.strip()) == 0:
Version = 'N/A'
if DpGuid is None or len(DpGuid.strip()) == 0:
DpGuid = 'N/A'
if DpVersion is None or len(DpVersion.strip()) == 0:
DpVersion = 'N/A'
#
# Add module standalone information to DB.
#
CurrentTime = time.time()
SqlCommand = \
"""insert into %s values('%s', '%s', '%s', %s, '%s', '%s', '%s')""" % \
(self.StandaloneModTable, Guid, Version, Name, CurrentTime, DpGuid, \
DpVersion, Path)
self.Cur.execute(SqlCommand)
## Add a module depex
#
# @param Guid: a module Guid
# @param Version: a module Version
# @param Name: a module name
# @param DepexGuid: a module DepexGuid
# @param DepexVersion: a module DepexVersion
#
def _AddModuleDepex(self, Guid, Version, Name, Path, DepexGuid=None, \
DepexVersion=None):
if DepexGuid is None or len(DepexGuid.strip()) == 0:
DepexGuid = 'N/A'
if DepexVersion is None or len(DepexVersion.strip()) == 0:
DepexVersion = 'N/A'
if os.name == 'posix':
Path = Path.replace('\\', os.sep)
else:
Path = Path.replace('/', os.sep)
#
# Add module depex information to DB.
#
SqlCommand = """insert into %s values('%s', '%s', '%s', '%s', '%s', '%s')"""\
% (self.ModDepexTable, Guid, Version, Name, Path, DepexGuid, DepexVersion)
self.Cur.execute(SqlCommand)
## Remove a distribution install information, if no version specified,
# remove all DPs with this Guid.
#
# @param DpGuid: guid of dpex
# @param DpVersion: version of dpex
#
def RemoveDpObj(self, DpGuid, DpVersion):
PkgList = self.GetPackageListFromDp(DpGuid, DpVersion)
#
# delete from ModDepex the standalone module's dependency
#
SqlCommand = \
"""delete from ModDepexInfo where ModDepexInfo.ModuleGuid in
(select ModuleGuid from StandaloneModInfo as B where B.DpGuid = '%s'
and B.DpVersion = '%s')
and ModDepexInfo.ModuleVersion in
(select ModuleVersion from StandaloneModInfo as B
where B.DpGuid = '%s' and B.DpVersion = '%s')
and ModDepexInfo.ModuleName in
(select ModuleName from StandaloneModInfo as B
where B.DpGuid = '%s' and B.DpVersion = '%s')
and ModDepexInfo.InstallPath in
(select InstallPath from StandaloneModInfo as B
where B.DpGuid = '%s' and B.DpVersion = '%s') """ % \
(DpGuid, DpVersion, DpGuid, DpVersion, DpGuid, DpVersion, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
#
# delete from ModDepex the from pkg module's dependency
#
for Pkg in PkgList:
SqlCommand = \
"""delete from ModDepexInfo where ModDepexInfo.ModuleGuid in
(select ModuleGuid from ModInPkgInfo
where ModInPkgInfo.PackageGuid ='%s' and
ModInPkgInfo.PackageVersion = '%s')
and ModDepexInfo.ModuleVersion in
(select ModuleVersion from ModInPkgInfo
where ModInPkgInfo.PackageGuid ='%s' and
ModInPkgInfo.PackageVersion = '%s')
and ModDepexInfo.ModuleName in
(select ModuleName from ModInPkgInfo
where ModInPkgInfo.PackageGuid ='%s' and
ModInPkgInfo.PackageVersion = '%s')
and ModDepexInfo.InstallPath in
(select InstallPath from ModInPkgInfo where
ModInPkgInfo.PackageGuid ='%s'
and ModInPkgInfo.PackageVersion = '%s')""" \
% (Pkg[0], Pkg[1], Pkg[0], Pkg[1], Pkg[0], Pkg[1], Pkg[0], Pkg[1])
self.Cur.execute(SqlCommand)
#
# delete the standalone module
#
SqlCommand = \
"""delete from %s where DpGuid ='%s' and DpVersion = '%s'""" % \
(self.StandaloneModTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
#
# delete the from pkg module
#
for Pkg in PkgList:
SqlCommand = \
"""delete from %s where %s.PackageGuid ='%s'
and %s.PackageVersion = '%s'""" % \
(self.ModInPkgTable, self.ModInPkgTable, Pkg[0], \
self.ModInPkgTable, Pkg[1])
self.Cur.execute(SqlCommand)
#
# delete packages
#
SqlCommand = \
"""delete from %s where DpGuid ='%s' and DpVersion = '%s'""" % \
(self.PkgTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
#
# delete file list from DP
#
SqlCommand = \
"""delete from %s where DpGuid ='%s' and DpVersion = '%s'""" % \
(self.DpFileListTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
#
# delete DP
#
SqlCommand = \
"""delete from %s where DpGuid ='%s' and DpVersion = '%s'""" % \
(self.DpTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
#self.Conn.commit()
## Get a list of distribution install information.
#
# @param Guid: distribution package guid
# @param Version: distribution package version
#
def GetDp(self, Guid, Version):
if Version is None or len(Version.strip()) == 0:
Version = 'N/A'
Logger.Verbose(ST.MSG_GET_DP_INSTALL_LIST)
(DpGuid, DpVersion) = (Guid, Version)
SqlCommand = """select * from %s where DpGuid ='%s'""" % \
(self.DpTable, DpGuid)
self.Cur.execute(SqlCommand)
else:
Logger.Verbose(ST.MSG_GET_DP_INSTALL_INFO_START)
(DpGuid, DpVersion) = (Guid, Version)
SqlCommand = \
"""select * from %s where DpGuid ='%s' and DpVersion = '%s'""" % \
(self.DpTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
DpList = []
for DpInfo in self.Cur:
DpGuid = DpInfo[0]
DpVersion = DpInfo[1]
InstallTime = DpInfo[2]
PkgFileName = DpInfo[3]
DpList.append((DpGuid, DpVersion, InstallTime, PkgFileName))
Logger.Verbose(ST.MSG_GET_DP_INSTALL_INFO_FINISH)
return DpList
## Get a list of distribution install dirs
#
# @param Guid: distribution package guid
# @param Version: distribution package version
#
def GetDpInstallDirList(self, Guid, Version):
SqlCommand = """select InstallPath from PkgInfo where DpGuid = '%s' and DpVersion = '%s'""" % (Guid, Version)
self.Cur.execute(SqlCommand)
DirList = []
for Result in self.Cur:
if Result[0] not in DirList:
DirList.append(Result[0])
SqlCommand = """select InstallPath from StandaloneModInfo where DpGuid = '%s' and DpVersion = '%s'""" % \
(Guid, Version)
self.Cur.execute(SqlCommand)
for Result in self.Cur:
if Result[0] not in DirList:
DirList.append(Result[0])
return DirList
## Get a list of distribution install file path information.
#
# @param Guid: distribution package guid
# @param Version: distribution package version
#
def GetDpFileList(self, Guid, Version):
(DpGuid, DpVersion) = (Guid, Version)
SqlCommand = \
"""select * from %s where DpGuid ='%s' and DpVersion = '%s'""" % \
(self.DpFileListTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
PathList = []
for Result in self.Cur:
Path = Result[0]
Md5Sum = Result[3]
PathList.append((os.path.join(self.Workspace, Path), Md5Sum))
return PathList
## Get files' repackage attribute if present that are installed into current workspace
#
# @retval FileDict: a Dict of file, key is file path, value is (DpGuid, DpVersion, NewDpFileName, RePackage)
#
def GetRePkgDict(self):
SqlCommand = """select * from %s """ % (self.DpTable)
self.Cur.execute(SqlCommand)
DpInfoList = []
for Result in self.Cur:
DpInfoList.append(Result)
FileDict = {}
for Result in DpInfoList:
DpGuid = Result[0]
DpVersion = Result[1]
NewDpFileName = Result[3]
RePackage = Result[5]
if RePackage == 'TRUE':
RePackage = True
else:
RePackage = False
for FileInfo in self.GetDpFileList(DpGuid, DpVersion):
PathInfo = FileInfo[0]
FileDict[PathInfo] = DpGuid, DpVersion, NewDpFileName, RePackage
return FileDict
## Get (Guid, Version) from distribution file name information.
#
# @param DistributionFile: Distribution File
#
def GetDpByName(self, DistributionFile):
SqlCommand = """select * from %s where NewPkgFileName = '%s'""" % \
(self.DpTable, DistributionFile)
self.Cur.execute(SqlCommand)
for Result in self.Cur:
DpGuid = Result[0]
DpVersion = Result[1]
NewDpFileName = Result[3]
return (DpGuid, DpVersion, NewDpFileName)
else:
return (None, None, None)
## Get a list of package information.
#
# @param Guid: package guid
# @param Version: package version
#
def GetPackage(self, Guid, Version, DpGuid='', DpVersion=''):
if DpVersion == '' or DpGuid == '':
(PackageGuid, PackageVersion) = (Guid, Version)
SqlCommand = """select * from %s where PackageGuid ='%s'
and PackageVersion = '%s'""" % (self.PkgTable, PackageGuid, \
PackageVersion)
self.Cur.execute(SqlCommand)
elif Version is None or len(Version.strip()) == 0:
SqlCommand = """select * from %s where PackageGuid ='%s'""" % \
(self.PkgTable, Guid)
self.Cur.execute(SqlCommand)
else:
(PackageGuid, PackageVersion) = (Guid, Version)
SqlCommand = """select * from %s where PackageGuid ='%s' and
PackageVersion = '%s'
and DpGuid = '%s' and DpVersion = '%s'""" % \
(self.PkgTable, PackageGuid, PackageVersion, \
DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
PkgList = []
for PkgInfo in self.Cur:
PkgGuid = PkgInfo[0]
PkgVersion = PkgInfo[1]
InstallTime = PkgInfo[2]
InstallPath = PkgInfo[5]
PkgList.append((PkgGuid, PkgVersion, InstallTime, DpGuid, \
DpVersion, InstallPath))
return PkgList
## Get a list of module in package information.
#
# @param Guid: A module guid
# @param Version: A module version
#
def GetModInPackage(self, Guid, Version, Name, Path, PkgGuid='', PkgVersion=''):
(ModuleGuid, ModuleVersion, ModuleName, InstallPath) = (Guid, Version, Name, Path)
if PkgVersion == '' or PkgGuid == '':
SqlCommand = """select * from %s where ModuleGuid ='%s' and
ModuleVersion = '%s' and InstallPath = '%s'
and ModuleName = '%s'""" % (self.ModInPkgTable, ModuleGuid, \
ModuleVersion, InstallPath, ModuleName)
self.Cur.execute(SqlCommand)
else:
SqlCommand = """select * from %s where ModuleGuid ='%s' and
ModuleVersion = '%s' and InstallPath = '%s'
and ModuleName = '%s' and PackageGuid ='%s'
and PackageVersion = '%s'
""" % (self.ModInPkgTable, ModuleGuid, \
ModuleVersion, InstallPath, ModuleName, PkgGuid, PkgVersion)
self.Cur.execute(SqlCommand)
ModList = []
for ModInfo in self.Cur:
ModGuid = ModInfo[0]
ModVersion = ModInfo[1]
InstallTime = ModInfo[2]
InstallPath = ModInfo[5]
ModList.append((ModGuid, ModVersion, InstallTime, PkgGuid, \
PkgVersion, InstallPath))
return ModList
## Get a list of module standalone.
#
# @param Guid: A module guid
# @param Version: A module version
#
def GetStandaloneModule(self, Guid, Version, Name, Path, DpGuid='', DpVersion=''):
(ModuleGuid, ModuleVersion, ModuleName, InstallPath) = (Guid, Version, Name, Path)
if DpGuid == '':
SqlCommand = """select * from %s where ModuleGuid ='%s' and
ModuleVersion = '%s' and InstallPath = '%s'
and ModuleName = '%s'""" % (self.StandaloneModTable, ModuleGuid, \
ModuleVersion, InstallPath, ModuleName)
self.Cur.execute(SqlCommand)
else:
SqlCommand = """select * from %s where ModuleGuid ='%s' and
ModuleVersion = '%s' and InstallPath = '%s' and ModuleName = '%s' and DpGuid ='%s' and DpVersion = '%s'
""" % (self.StandaloneModTable, ModuleGuid, \
ModuleVersion, ModuleName, InstallPath, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
ModList = []
for ModInfo in self.Cur:
ModGuid = ModInfo[0]
ModVersion = ModInfo[1]
InstallTime = ModInfo[2]
InstallPath = ModInfo[5]
ModList.append((ModGuid, ModVersion, InstallTime, DpGuid, \
DpVersion, InstallPath))
return ModList
## Get a list of module information that comes from DP.
#
# @param DpGuid: A Distribution Guid
# @param DpVersion: A Distribution version
#
def GetSModInsPathListFromDp(self, DpGuid, DpVersion):
PathList = []
SqlCommand = """select InstallPath from %s where DpGuid ='%s'
and DpVersion = '%s'
""" % (self.StandaloneModTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
for Result in self.Cur:
InstallPath = Result[0]
PathList.append(InstallPath)
return PathList
## Get a list of package information.
#
# @param DpGuid: A Distribution Guid
# @param DpVersion: A Distribution version
#
def GetPackageListFromDp(self, DpGuid, DpVersion):
SqlCommand = """select * from %s where DpGuid ='%s' and
DpVersion = '%s' """ % (self.PkgTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
PkgList = []
for PkgInfo in self.Cur:
PkgGuid = PkgInfo[0]
PkgVersion = PkgInfo[1]
InstallPath = PkgInfo[5]
PkgList.append((PkgGuid, PkgVersion, InstallPath))
return PkgList
## Get a list of modules that depends on package information from a DP.
#
# @param DpGuid: A Distribution Guid
# @param DpVersion: A Distribution version
#
def GetDpDependentModuleList(self, DpGuid, DpVersion):
ModList = []
PkgList = self.GetPackageListFromDp(DpGuid, DpVersion)
if len(PkgList) > 0:
return ModList
for Pkg in PkgList:
#
# get all in-package modules that depends on current
# Pkg (Guid match, Version match or NA) but not belong to
# current Pkg
#
SqlCommand = """select t1.ModuleGuid, t1.ModuleVersion,
t1.InstallPath from %s as t1, %s as t2 where
t1.ModuleGuid = t2.ModuleGuid and
t1.ModuleVersion = t2.ModuleVersion and t2.DepexGuid ='%s'
and (t2.DepexVersion = '%s' or t2.DepexVersion = 'N/A') and
t1.PackageGuid != '%s' and t1.PackageVersion != '%s'
""" % (self.ModInPkgTable, \
self.ModDepexTable, Pkg[0], Pkg[1], Pkg[0], \
Pkg[1])
self.Cur.execute(SqlCommand)
for ModInfo in self.Cur:
ModGuid = ModInfo[0]
ModVersion = ModInfo[1]
InstallPath = ModInfo[2]
ModList.append((ModGuid, ModVersion, InstallPath))
#
# get all modules from standalone modules that depends on current
#Pkg (Guid match, Version match or NA) but not in current dp
#
SqlCommand = \
"""select t1.ModuleGuid, t1.ModuleVersion, t1.InstallPath
from %s as t1, %s as t2 where t1.ModuleGuid = t2.ModuleGuid and
t1.ModuleVersion = t2.ModuleVersion and t2.DepexGuid ='%s'
and (t2.DepexVersion = '%s' or t2.DepexVersion = 'N/A') and
t1.DpGuid != '%s' and t1.DpVersion != '%s'
""" % \
(self.StandaloneModTable, self.ModDepexTable, Pkg[0], \
Pkg[1], DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
for ModInfo in self.Cur:
ModGuid = ModInfo[0]
ModVersion = ModInfo[1]
InstallPath = ModInfo[2]
ModList.append((ModGuid, ModVersion, InstallPath))
return ModList
## Get Dp's list of modules.
#
# @param DpGuid: A Distribution Guid
# @param DpVersion: A Distribution version
#
def GetDpModuleList(self, DpGuid, DpVersion):
ModList = []
#
# get Dp module list from the DpFileList table
#
SqlCommand = """select FilePath
from %s
where DpGuid = '%s' and DpVersion = '%s' and
FilePath like '%%.inf'
""" % (self.DpFileListTable, DpGuid, DpVersion)
self.Cur.execute(SqlCommand)
for ModuleInfo in self.Cur:
FilePath = ModuleInfo[0]
ModList.append(os.path.join(self.Workspace, FilePath))
return ModList
## Get a module depex
#
# @param DpGuid: A module Guid
# @param DpVersion: A module version
# @param Path:
#
def GetModuleDepex(self, Guid, Version, Path):
#
# Get module depex information to DB.
#
SqlCommand = """select * from %s where ModuleGuid ='%s' and
ModuleVersion = '%s' and InstallPath ='%s'
""" % (self.ModDepexTable, Guid, Version, Path)
self.Cur.execute(SqlCommand)
DepexList = []
for DepInfo in self.Cur:
DepexGuid = DepInfo[3]
DepexVersion = DepInfo[4]
DepexList.append((DepexGuid, DepexVersion))
return DepexList
## Inventory the distribution installed to current workspace
#
# Inventory the distribution installed to current workspace
#
def InventoryDistInstalled(self):
SqlCommand = """select * from %s """ % (self.DpTable)
self.Cur.execute(SqlCommand)
DpInfoList = []
for Result in self.Cur:
DpGuid = Result[0]
DpVersion = Result[1]
DpAliasName = Result[3]
DpFileName = Result[4]
DpInfoList.append((DpGuid, DpVersion, DpFileName, DpAliasName))
return DpInfoList
## Close entire database
#
# Close the connection and cursor
#
def CloseDb(self):
#
# drop the dummy table
#
SqlCommand = """
drop table IF EXISTS %s
""" % self.DummyTable
self.Cur.execute(SqlCommand)
self.Conn.commit()
self.Cur.close()
self.Conn.close()
## Convert To Sql String
#
# 1. Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def __ConvertToSqlString(self, StringList):
if self.DpTable:
pass
return list(map(lambda s: s.replace("'", "''"), StringList))
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Core/IpiDb.py
|
## @file
# This file is used to define strings used in the UPT tool
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
"""
This file contains user visible strings in a format that can be used for
localization
"""
import gettext
#
# string table starts here...
#
## strings are classified as following types
# MSG_...: it is a message string
# ERR_...: it is a error string
# WRN_...: it is a warning string
# HLP_...: it is a help string
#
_ = gettext.gettext
MSG_USAGE_STRING = _("\n"
"UEFI Packaging Tool (UEFIPT)\n"
"%prog [options]"
)
##
# Version and Copyright
#
MSG_VERSION_NUMBER = _("1.1")
MSG_VERSION = _("UEFI Packaging Tool (UEFIPT) - Revision " + \
MSG_VERSION_NUMBER)
MSG_COPYRIGHT = _("Copyright (c) 2011 - 2018 Intel Corporation All Rights Reserved.")
MSG_VERSION_COPYRIGHT = _("\n %s\n %s" % (MSG_VERSION, MSG_COPYRIGHT))
MSG_USAGE = _("%s [options]\n%s" % ("UPT", MSG_VERSION_COPYRIGHT))
MSG_DESCRIPTION = _("The UEFIPT is used to create, " + \
"install or remove a UEFI Distribution Package. " + \
"If WORKSPACE environment variable is present, " + \
"then UPT will install packages to the location specified by WORKSPACE, " + \
"otherwise UPT will install packages to the current directory. " + \
"Option -n will override this default installation location")
#
# INF Parser related strings.
#
ERR_INF_PARSER_HEADER_FILE = _(
"The Header comment section should start with an @file at the top.")
ERR_INF_PARSER_HEADER_MISSGING = _(
"The Header comment is missing. It must be corrected before continuing.")
ERR_INF_PARSER_UNKNOWN_SECTION = _("An unknown section was found. "
"It must be corrected before continuing. ")
ERR_INF_PARSER_NO_SECTION_ERROR = _("No section was found. "
"A section must be included before continuing.")
ERR_INF_PARSER_BUILD_OPTION_FORMAT_INVALID = \
_("Build Option format incorrect.")
ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID = _(
"The format of binary %s item is incorrect. "
"It should contain at least %d elements.")
ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID_MAX = _(
"The format of binary %s item is invalid, "
"it should contain not more than %d elements.")
ERR_INF_PARSER_BINARY_ITEM_INVALID_FILETYPE = _(
"The Binary FileType is incorrect. It should in %s")
ERR_INF_PARSER_BINARY_ITEM_FILE_NOT_EXIST = _(
"The Binary File: %s not exist.")
ERR_INF_PARSER_BINARY_ITEM_FILENAME_NOT_EXIST = _(
"The Binary File Name item not exist")
ERR_INF_PARSER_BINARY_VER_TYPE = _(
"Only this type is allowed: \"%s\".")
ERR_INF_PARSER_MULTI_DEFINE_SECTION = \
_("Multiple define sections found. "
"It must be corrected before continuing.")
ERR_INF_PARSER_DEFINE_ITEM_MORE_THAN_ONE_FOUND = \
_("More than 1 %s is defined in DEFINES section. "
"It must be corrected before continuing.")
ERR_INF_PARSER_DEFINE_NAME_INVALID = \
_("Incorrect name format for : %s")
ERR_INF_PARSER_DEFINE_GUID_INVALID = \
_("The format of this GUID is incorrect: %s")
ERR_INF_PARSER_DEFINE_MODULETYPE_INVALID = _("Incorrect MODULE_TYPE: %s")
ERR_INF_PARSER_DEFINE_FROMAT_INVALID = _("Incorrect format: %s")
ERR_INF_PARSER_FILE_NOT_EXIST = _("This file does not exist: %s")
ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID = \
_("The file does not exist or not in sub-directories "
"or has an incorrect file name of the directory containing the INF or DEC file: %s. "
"It must be corrected before continuing")
ERR_INF_PARSER_DEFINE_SHADOW_INVALID = \
_("The SHADOW keyword is only valid for"
" SEC, PEI_CORE and PEIM module types.")
ERR_INF_PARSER_DEFINE_SECTION_HEADER_INVALID = \
_("The format of the section header is incorrect")
ERR_INF_PARSER_DEPEX_SECTION_INVALID = \
_("A module can't have a Depex section when its module type is %s")
ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_BASE_LIBRARY_CLASS = \
_("A base type library class can't have a Depex section with module type not defined.")
ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_LIBRARY_CLASS = \
_("A library class can't have a Depex section when its supported module type list is not defined.")
ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_DRIVER = \
_("A driver can't have a Depex section when its module type is UEFI_DRIVER.")
ERR_INF_PARSER_DEPEX_SECTION_NOT_DETERMINED = \
_("Cannot determine the module's Depex type. The Depex's module types are conflict")
ERR_INF_PARSER_DEFINE_SECTION_MUST_ITEM_NOT_EXIST = _(
"No %s found in INF file, please check it.")
ERR_INF_PARSER_DEPEX_SECTION_MODULE_TYPE_ERROR = \
_("The module type of [Depex] section is invalid, not support type of %s")
ERR_INF_PARSER_DEPEX_SECTION_CONTENT_MISSING = \
_("Missing content in: %s")
ERR_INF_PARSER_DEPEX_SECTION_CONTENT_ERROR = \
_("The [Depex] section contains invalid content: %s")
ERR_INF_PARSER_DEPEX_SECTION_SEC_TYPE_ERROR = \
_("The format is incorrect. The section type keyword of the content in the"
" [Depex] section is only for 'PEI_DEPEX', 'DXE_DEPEX', 'SMM_DEPEX', "
"it does not support type: %s")
ERR_INF_PARSER_UE_SECTION_USER_ID_ERROR = \
_("This format is incorrect. "
"The UserID: %s in [UserExtension] section is incorrect.")
ERR_INF_PARSER_UE_SECTION_ID_STRING_ERROR = \
_("This format is incorrect. "
"IdString: %s in [UserExtension] section is incorrect.")
ERR_INF_PARSER_LIBRARY_SECTION_CONTENT_ERROR = \
_("The format is incorrect. "
"You can only have a Library name and a Feature flag in one line.")
ERR_INF_PARSER_LIBRARY_SECTION_LIBNAME_MISSING = \
_("Format invalid. Please specify a library name.")
ERR_INF_PARSER_SOURCES_SECTION_CONTENT_ERROR = \
_("The format is incorrect. It should be formatted as follows: "
"FileName, Family | TagName | ToolCode | FeatureFlagExpr.")
ERR_INF_PARSER_PCD_SECTION_TYPE_ERROR = \
_("The PCD section type is incorrect. The value should be this list: %s")
ERR_INF_PARSER_PCD_SECTION_CONTENT_ERROR = \
_("PcdName format invalid."
"Should like following: PcdName | Value | FeatureFlag.")
ERR_INF_PARSER_PCD_NAME_FORMAT_ERROR = \
_("Format invalid."
"Should like following: <TokenSpaceGuidCName>.<PcdCName> ")
ERR_INF_PARSER_GUID_PPI_PROTOCOL_SECTION_CONTENT_ERROR = \
_("The format is incorrect. "
"It should be formatted as follows: CName | FeatureFlag.")
ERR_INF_PARSER_PACKAGE_SECTION_CONTENT_ERROR = \
_("The format is incorrect. "
"It should be formatted as follows: <TokenSpaceGuidCName>.<PcdCName>")
ERR_INF_PARSER_PCD_TAIL_COMMENTS_INVALID = \
_("The format is incorrect. "
"Multiple usage descriptions must be described on subsequent lines.")
ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR = \
_("This section format is incorrect: %s.")
ERR_INF_PARSER_SECTION_NAME_DUPLICATE = \
_("This section has multiple section names, "
"only one section name is permitted.")
ERR_INF_PARSER_SECTION_ARCH_CONFLICT = \
_("The 'common' ARCH must not be used with the specified ARCHs.")
ERR_INF_PARSER_SOURCE_SECTION_TAGNAME_INVALID = \
_("This TagName is incorrect: %s. "
"It must be corrected before continuing.")
ERR_INF_PARSER_TAGNAME_NOT_PERMITTED = \
_("TagName is not permitted: %s. "
"It must be corrected before continuing.")
ERR_INF_PARSER_TOOLCODE_NOT_PERMITTED = \
_("ToolCode is not permitted: %s. "
"It must be corrected before continuing.")
ERR_INF_PARSER_SOURCE_SECTION_FAMILY_INVALID = \
_("This family is incorrect: %s. "
"It must be corrected before continuing. ")
ERR_INF_PARSER_SOURCE_SECTION_SECTIONNAME_INVALID = \
_("This SectionName is incorrect: %s. "
"It must be corrected before continuing.")
ERR_INF_PARSER_PCD_CVAR_GUID = \
_("TokenSpaceGuidCName must be valid C variable format.")
ERR_INF_PARSER_PCD_CVAR_PCDCNAME = \
_("PcdCName must be valid C variable format.")
ERR_INF_PARSER_PCD_VALUE_INVALID = \
_("The PCD value is incorrect. It must be corrected before continuing.")
ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID = \
_("Incorrect feature flag expression: %s")
ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING = \
_("The feature flag expression is missing. Please specify a feature flag.")
ERR_INF_PARSER_INVALID_CNAME = \
_("Incorrect CName: %s. You must specify a valid C variable name.")
ERR_INF_PARSER_CNAME_MISSING = \
_("Missing CName. Specify a valid C variable name.")
ERR_INF_PARSER_DEFINE_SECTION_KEYWORD_INVALID = \
_("The Define section contains an invalid keyword: \"%s\"."
"It must be corrected before continuing.")
ERR_INF_PARSER_FILE_MISS_DEFINE = \
_("The following file listed in the module "
"directory is not listed in the INF: %s")
ERR_INF_PARSER_VERSION_NUMBER_DEPRICATED = \
_("VERSION_NUMBER deprecated. "
"The INF file %s should be modified to use the VERSION_STRING instead.")
ERR_INF_PARSER_VER_EXIST_BOTH_NUM_STR = \
_("The INF file %s defines both VERSION_NUMBER and VERSION_STRING, "
"using VERSION_STRING")
ERR_INF_PARSER_NOT_SUPPORT_EDKI_INF = _("EDKI INF is not supported")
ERR_INF_PARSER_EDKI_COMMENT_IN_EDKII = _("The EDKI style comment is not supported in EDKII modules")
ERR_INF_PARSER_FEATUREPCD_USAGE_INVALID = _("The usage for FeaturePcd can only"
" be type of \"CONSUMES\".")
ERR_INF_PARSER_DEFINE_ITEM_NO_NAME = _("No name specified")
ERR_INF_PARSER_DEFINE_ITEM_NO_VALUE = _("No value specified")
ERR_INF_PARSER_MODULETYPE_INVALID = _("Drivers and applications are not allowed to have a MODULE_TYPE of \"BASE\". "
"Only libraries are permitted to a have a MODULE_TYPE of \"BASE\".")
ERR_INF_GET_PKG_DEPENDENCY_FAIL = _("Failed to get PackageDependencies information from file %s")
ERR_INF_NO_PKG_DEPENDENCY_INFO = _("There are no packages defined that use the AsBuilt PCD information.")
#
# Item duplicate
#
ERR_INF_PARSER_ITEM_DUPLICATE_IN_DEC = \
_('"%s" is redefined in its dependent DEC files')
ERR_INF_PARSER_ITEM_DUPLICATE = _("%s define duplicated! "
"It must be corrected before continuing.")
ERR_INF_PARSER_ITEM_DUPLICATE_COMMON = _("%s define duplicated! Item listed"
"in an architectural section must not be listed in the common architectural"
"section.It must be corrected before continuing.")
ERR_INF_PARSER_UE_SECTION_DUPLICATE_ERROR = \
_("%s define duplicated! Each UserExtensions section header must have a "
"unique set of UserId, IdString and Arch values. "
"It must be corrected before continuing.")
ERR_INF_PARSER_DEFINE_LIB_NAME_INVALID = \
_("The name 'NULL' for LibraryClass is a reserved word."
"Please don't use it.")
ERR_GLOBAL_MARCO_INVALID = \
_("Using global MACRO in INF/DEC is not permitted: %s . "
"It must be corrected before continuing.")
ERR_MARCO_DEFINITION_MISS_ERROR = \
_("MACRO expand incorrectly, can not find the MACRO definition. "
"It must be corrected before continuing.")
#
# AsBuilt related
#
ERR_LIB_CONTATIN_ASBUILD_AND_COMMON = _("A binary INF file should not contain both AsBuilt LIB_INSTANCES information "
"and a common library entry.")
ERR_LIB_INSTANCE_MISS_GUID = _("Could not get FILE_GUID definition from instance INF file.")
ERR_BO_CONTATIN_ASBUILD_AND_COMMON = _("A binary INF file should contain either AsBuilt information "
"or a common build option entry, not both.")
ERR_ASBUILD_PCD_SECTION_TYPE = _("The AsBuilt INF file contains a PCD section type that is not permitted: %s.")
ERR_ASBUILD_PATCHPCD_FORMAT_INVALID = _("The AsBuilt PatchPcd entry must contain 3 elements: PcdName|Value|Offset")
ERR_ASBUILD_PCDEX_FORMAT_INVALID = _("The AsBuilt PcdEx entry must contain one element: PcdName")
ERR_ASBUILD_PCD_VALUE_INVALID = \
_("The AsBuilt PCD value %s is incorrect or not align with its datum type %s. "
"It must be corrected before continuing.")
ERR_ASBUILD_PCD_TOKENSPACE_GUID_VALUE_MISS = _("Package file value could not be retrieved for %s.")
ERR_ASBUILD_PCD_DECLARITION_MISS = _("PCD Declaration in DEC files could not be found for: %s.")
ERR_ASBUILD_PCD_OFFSET_FORMAT_INVALID = _("PCD offset format invalid, number of (0-4294967295) or"
"Hex number of UINT32 allowed : %s.")
#
# XML parser related strings
#
ERR_XML_PARSER_REQUIRED_ITEM_MISSING = \
_("The XML section/attribute '%s' is required under %s, it can't be missing or empty")
ERR_XML_INVALID_VARIABLENAME = \
_("The VariableName of the GUID in the XML tree does not conform to the packaging specification. "
"Only a Hex Byte Array of UCS-2 format or L\"string\" is allowed): %s %s %s")
ERR_XML_INVALID_LIB_SUPMODLIST = _("The LIBRARY_CLASS entry %s must have the list appended using the format as: \n"
"BASE SEC PEI_CORE PEIM DXE_CORE DXE_DRIVER SMM_CORE DXE_SMM_DRIVER DXE_RUNTIME_DRIVER "
"DXE_SAL_DRIVER UEFI_DRIVER UEFI_APPLICATION USER_DEFINED\n Current is %s.")
ERR_XML_INVALID_EXTERN_SUPARCHLIST = \
_("There is a mismatch of SupArchList %s between the EntryPoint, UnloadImage, Constructor, "
"and Destructor elements in the ModuleSurfaceArea.ModuleProperties: SupArchList: %s. ")
ERR_XML_INVALID_EXTERN_SUPMODLIST = _("The SupModList attribute of the CONSTRUCTOR or DESTRUCTOR element: %s does not "
"match the Supported Module Types listed after LIBRARY_CLASS = <Keyword> | %s")
ERR_XML_INVALID_EXTERN_SUPMODLIST_NOT_LIB = _("The module is not a library module. "
"The MODULE_TYPE : %s listed in the ModuleSurfaceArea.Header "
"must match the SupModList attribute %s")
ERR_XML_INVALID_BINARY_FILE_TYPE = _("Invalid binary file type %s.")
#
# Verbosity related strings.
#
MSG_DISTRIBUTION_PACKAGE_FILE_EXISTS = _(
"The distribution package file %s already exists.\nPress Y to override it."
" To exit the application, press any other key.")
MSG_CHECK_MODULE_EXIST = _(
"\nChecking to see if module exists in workspace started ...")
MSG_CHECK_MODULE_EXIST_FINISH = \
_("Checking to see if module exists in workspace ... Done.")
MSG_CHECK_MODULE_DEPEX_START = _(
"\nChecking to see if module depex met by workspace started ...")
MSG_CHECK_MODULE_DEPEX_FINISH = _(
"Checking to see if module depex met by workspace ... Done.")
MSG_CHECK_PACKAGE_START = _(
"\nChecking to see if package exists in workspace started ...")
MSG_CHECK_PACKAGE_FINISH = _(
"Checking to see if package exists in workspace ... Done.")
MSG_CHECK_DP_START = \
_("\nChecking to see if DP exists in workspace ... Done.")
MSG_CHECK_DP_FINISH = _("Check DP exists in workspace ... Done.")
MSG_MODULE_DEPEND_ON = _("Module %s depends on Package %s")
MSG_INIT_IPI_START = _("\nInitialize IPI database started ...")
MSG_INIT_IPI_FINISH = _("Initialize IPI database ... Done.")
MSG_GET_DP_INSTALL_LIST = _(
"\nGetting list of DP install information started ...")
MSG_GET_DP_INSTALL_INFO_START = _(
"\nGetting list of DP install information started ...")
MSG_GET_DP_INSTALL_INFO_FINISH = _("Getting DP install information ... Done.")
MSG_UZIP_PARSE_XML = _(
"Unzipping and parsing distribution package XML file ... ")
MSG_INSTALL_PACKAGE = _("Installing package ... %s")
MSG_INSTALL_MODULE = _("Installing module ... %s")
MSG_NEW_FILE_NAME_FOR_DIST = _(
"Provide new filename for distribution file to be saved:\n")
MSG_UPDATE_PACKAGE_DATABASE = _("Update Distribution Package Database ...")
MSG_PYTHON_ON = _("(Python %s on %s) ")
MSG_EDKII_MAIL_ADDR = 'devel@edk2.groups.io'
MSG_SEARCH_FOR_HELP = _(
"\n(Please send email to %s for\n"
" help, attach the following call stack trace.)\n")
MSG_REMOVE_TEMP_FILE_STARTED = _("Removing temp files started ... ")
MSG_REMOVE_TEMP_FILE_DONE = _("Removing temp files ... Done.")
MSG_FINISH = _("Successfully Done.")
MSG_COMPRESS_DISTRIBUTION_PKG = _("Compressing Distribution Package File ...")
MSG_CONFIRM_REMOVE = _(
"Some packages or modules depend on this distribution package.\n"
"Do you really want to remove it?")
MSG_CONFIRM_REMOVE2 = _(
"This file has been modified: %s. Do you want to remove it?"
"Press Y to remove or other key to keep it")
MSG_CONFIRM_REMOVE3 = _(
"This is a newly created file: %s. Are you sure you want to remove it? "
"Press Y to remove or any other key to keep it")
MSG_USER_DELETE_OP = _(
"Press Y to delete all files or press any other key to quit:")
MSG_REMOVE_FILE = _("Removing file: %s ...")
MSG_INITIALIZE_ECC_STARTED = _("\nInitialize ECC database started ...")
MSG_INITIALIZE_ECC_DONE = _("Initialize ECC database ... Done.")
MSG_DEFINE_STATEMENT_FOUND = _("DEFINE statement '%s' found in section %s")
MSG_PARSING = _("Parsing %s ...")
MSG_REPKG_CONFLICT = \
_("Repackaging is not allowed on this file: %s. "
"It was installed from distribution %s(Guid %s Version %s).")
MSG_INVALID_MODULE_INTRODUCED = _("Some modules are not valid after removal.")
MSG_CHECK_LOG_FILE = _("Please check log file %s for full list")
MSG_NEW_FILE_NAME = _(
"Provide new filename:\n")
MSG_RELATIVE_PATH_ONLY = _("Please specify a relative path, full path is not allowed: %s")
MSG_NEW_PKG_PATH = _(
"Select package location. To quit with no input, press [Enter].")
MSG_CHECK_DP_FOR_REPLACE = _("Verifying the dependency rule for replacement of distributions:\n %s replaces %s")
MSG_CHECK_DP_FOR_INSTALL = _("Verifying the dependency rule for installation of distribution:\n %s")
MSG_REPLACE_ALREADY_INSTALLED_DP = _("Distribution with the same GUID/Version is already installed, "
"replace would result in two instances, which is not allowed")
MSG_RECOVER_START = _('An error was detected, recovery started ...')
MSG_RECOVER_DONE = _('Recovery completed.')
MSG_RECOVER_FAIL = _('Recovery failed.')
#
# Error related strings.
#
ERR_DEPENDENCY_NOT_MATCH = _(
"Module %s's dependency on package %s (GUID %s Version %s) "
"cannot be satisfied")
ERR_MODULE_NOT_INSTALLED = _(
"This module is not installed in the workspace: %s\n")
ERR_DIR_ALREADY_EXIST = _(
"This directory already exists: %s.\n"
"Select another location. Press [Enter] with no input to quit:")
ERR_USER_INTERRUPT = _("The user has paused the application")
ERR_DIST_FILE_TOOMANY = _(
"Only one .content and one .pkg file in ZIP file are allowed.")
ERR_DIST_FILE_TOOFEW = _(
"Must have one .content and one .pkg file in the ZIP file.")
ERR_FILE_ALREADY_EXIST = _(
"This file already exists: %s.\n"
"Select another path to continue. To quit with no input press [Enter]:")
ERR_SPECIFY_PACKAGE = _(
"One distribution package must be specified")
ERR_FILE_BROKEN = _(
"This file is invalid in the distribution package: %s")
ERR_PACKAGE_NOT_MATCH_DEPENDENCY = _(
"This distribution package does not meet the dependency requirements")
ERR_UNKNOWN_FATAL_INSTALL_ERR = \
_("Unknown unrecoverable error when installing: %s")
ERR_UNKNOWN_FATAL_REPLACE_ERR = \
_("Unknown unrecoverable error during replacement of distributions: %s replaces %s")
ERR_OPTION_NOT_FOUND = _("Options not found")
ERR_INVALID_PACKAGE_NAME = _("Incorrect package name: %s. ")
ERR_INVALID_PACKAGE_PATH = \
_("Incorrect package path: %s. The path must be a relative path.")
ERR_NOT_FOUND = _("This was not found: %s")
ERR_INVALID_MODULE_NAME = _("This is not a valid module name: %s")
ERR_INVALID_METAFILE_PATH = _('This file must be in sub-directory of WORKSPACE: %s.')
ERR_INVALID_MODULE_PATH = \
_("Incorrect module path: %s. The path must be a relative path.")
ERR_UNKNOWN_FATAL_CREATING_ERR = _("Unknown error when creating: %s")
ERR_PACKAGE_NOT_INSTALLED = _(
"This distribution package not installed: %s")
ERR_DISTRIBUTION_NOT_INSTALLED = _(
"The distribution package is not installed.")
ERR_UNKNOWN_FATAL_REMOVING_ERR = _("Unknown error when removing package")
ERR_UNKNOWN_FATAL_INVENTORYWS_ERR = _("Unknown error when inventorying WORKSPACE")
ERR_NOT_CONFIGURE_WORKSPACE_ENV = _(
"The WORKSPACE environment variable must be configured.")
ERR_NO_TEMPLATE_FILE = _("This package information data file is not found: %s")
ERR_DEBUG_LEVEL = _(
"Not supported debug level. Use default level instead.")
ERR_REQUIRE_T_OPTION = _(
"Option -t is required during distribution creation.")
ERR_REQUIRE_O_OPTION = _(
"Option -o is required during distribution replacement.")
ERR_REQUIRE_U_OPTION = _(
"Option -u is required during distribution replacement.")
ERR_REQUIRE_I_C_R_OPTION = _(
"Options -i, -c and -r are mutually exclusive.")
ERR_I_C_EXCLUSIVE = \
_("Option -c and -i are mutually exclusive.")
ERR_I_R_EXCLUSIVE = \
_("Option -i and -r are mutually exclusive.")
ERR_C_R_EXCLUSIVE = \
_("Option -c and -r are mutually exclusive.")
ERR_U_ICR_EXCLUSIVE = \
_("Option -u and -c/-i/-r are mutually exclusive.")
ERR_L_OA_EXCLUSIVE = \
_("Option -l and -c/-i/-r/-u are mutually exclusive.")
ERR_FAILED_LOAD = _("Failed to load %s\n\t%s")
ERR_PLACEHOLDER_DIFFERENT_REPEAT = _(
"${%s} has different repeat time from others.")
ERR_KEY_NOTALLOWED = _("This keyword is not allowed: %s")
ERR_NOT_FOUND_ENVIRONMENT = _("Environment variable not found")
ERR_WORKSPACE_NOTEXIST = _("WORKSPACE doesn't exist")
ERR_SPACE_NOTALLOWED = _(
"Whitespace characters are not allowed in the WORKSPACE path. ")
ERR_MACRONAME_NOGIVEN = _("No MACRO name given")
ERR_MACROVALUE_NOGIVEN = _("No MACRO value given")
ERR_MACRONAME_INVALID = _("Incorrect MACRO name: %s")
ERR_MACROVALUE_INVALID = _("Incorrect MACRO value: %s")
ERR_NAME_ONLY_DEFINE = _(
"This variable can only be defined via environment variable: %s")
ERR_EDK_GLOBAL_SAMENAME = _(
"EDK_GLOBAL defined a macro with the same name as one defined by 'DEFINE'")
ERR_SECTIONNAME_INVALID = _(
"An incorrect section name was found: %s. 'The correct file is '%s' .")
ERR_CHECKFILE_NOTFOUND = _(
"Can't find file '%s' defined in section '%s'")
ERR_INVALID_NOTFOUND = _(
"Incorrect statement '%s' was found in section '%s'")
ERR_TEMPLATE_NOTFOUND = _("This package information data file is not found: %s")
ERR_SECTION_NAME_INVALID = _('Incorrect section name: %s')
ERR_SECTION_REDEFINE = _(
"This section already defined: %s.")
ERR_SECTION_NAME_NONE = \
_('The section needs to be specified first.')
ERR_KEYWORD_INVALID = _('Invalid keyword: %s')
ERR_VALUE_INVALID = _("Invalid \"%s\" value in section [%s].")
ERR_FILELIST_LOCATION = _(
'The directory "%s" must contain this file: "%s".')
ERR_KEYWORD_REDEFINE = _(
"Keyword in this section can only be used once: %s.")
ERR_FILELIST_EXIST = _(
'This file does not exist: %s.')
ERR_COPYRIGHT_CONTENT = _(
"The copyright content must contain the word \"Copyright\" (case insensitive).")
ERR_WRONG_FILELIST_FORMAT = \
_('File list format is incorrect.'
'The correct format is: filename|key=value[|key=value]')
ERR_FILELIST_ATTR = _(
"The value of attribute \"%s\" includes illegal character.")
ERR_UNKNOWN_FILELIST_ATTR = _(
'Unknown attribute name: %s.')
ERR_EMPTY_VALUE = _("Empty value is not allowed")
ERR_KEYWORD_MANDATORY = _('This keyword is mandatory: %s')
ERR_BOOLEAN_VALUE = _(
'Value of key [%s] must be true or false, current: [%s]')
ERR_GUID_VALUE = _(
'GUID must have the format of 8-4-4-4-12 with HEX value. '
'Current value: [%s]')
ERR_VERSION_VALUE = _(
'The value of key [%s] must be a decimal number. Found: [%s]')
ERR_VERSION_XMLSPEC = _(
'XmlSpecification value must be 1.1, current: %s.')
ERR_INVALID_GUID = _("Incorrect GUID value string: %s")
ERR_FILE_NOT_FOUND = \
_("File or directory not found in workspace")
ERR_FILE_OPEN_FAILURE = _("Could not open file")
ERR_FILE_WRITE_FAILURE = _("Could not write file.")
ERR_FILE_PARSE_FAILURE = _("Could not parse file")
ERR_FILE_READ_FAILURE = _("Could not read file")
ERR_FILE_CREATE_FAILURE = _("Could not create file")
ERR_FILE_CHECKSUM_FAILURE = _("Checksum of file is incorrect")
ERR_FILE_COMPRESS_FAILURE = _("File compression did not correctly")
ERR_FILE_DECOMPRESS_FAILURE = \
_("File decompression did not complete correctly")
ERR_FILE_MOVE_FAILURE = _("Move file did not complete successfully")
ERR_FILE_DELETE_FAILURE = _("File could not be deleted")
ERR_FILE_COPY_FAILURE = _("File did not copy correctly")
ERR_FILE_POSITIONING_FAILURE = _("Could not find file seek position")
ERR_FILE_TYPE_MISMATCH = _("Incorrect file type")
ERR_FILE_CASE_MISMATCH = _("File name case mismatch")
ERR_FILE_DUPLICATED = _("Duplicate file found")
ERR_FILE_UNKNOWN_ERROR = _("Unknown error encountered on file")
ERR_FILE_NAME_INVALIDE = _("This file name is invalid, it must not be an absolute path or "
"contain a period \".\" or \"..\": %s.")
ERR_OPTION_UNKNOWN = _("Unknown option")
ERR_OPTION_MISSING = _("Missing option")
ERR_OPTION_CONFLICT = _("Options conflict")
ERR_OPTION_VALUE_INVALID = _("Invalid option value")
ERR_OPTION_DEPRECATED = _("Deprecated option")
ERR_OPTION_NOT_SUPPORTED = _("Unsupported option")
ERR_OPTION_UNKNOWN_ERROR = _("Unknown error when processing options")
ERR_PARAMETER_INVALID = _("Invalid parameter")
ERR_PARAMETER_MISSING = _("Missing parameter")
ERR_PARAMETER_UNKNOWN_ERROR = _("Unknown error in parameters")
ERR_FORMAT_INVALID = _("Invalid syntax/format")
ERR_FORMAT_NOT_SUPPORTED = _("Syntax/format not supported")
ERR_FORMAT_UNKNOWN = _("Unknown format")
ERR_FORMAT_UNKNOWN_ERROR = _("Unknown error in syntax/format ")
ERR_RESOURCE_NOT_AVAILABLE = _("Not available")
ERR_RESOURCE_ALLOCATE_FAILURE = _("A resource allocation has failed")
ERR_RESOURCE_FULL = _("Full")
ERR_RESOURCE_OVERFLOW = _("Overflow")
ERR_RESOURCE_UNDERRUN = _("Underrun")
ERR_RESOURCE_UNKNOWN_ERROR = _("Unknown error")
ERR_ATTRIBUTE_NOT_AVAILABLE = _("Not available")
ERR_ATTRIBUTE_RETRIEVE_FAILURE = _("Unable to retrieve")
ERR_ATTRIBUTE_SET_FAILURE = _("Unable to set")
ERR_ATTRIBUTE_UPDATE_FAILURE = _("Unable to update")
ERR_ATTRIBUTE_ACCESS_DENIED = _("Access denied")
ERR_ATTRIBUTE_UNKNOWN_ERROR = _("Unknown error when accessing")
ERR_COMMAND_FAILURE = _("Unable to execute command")
ERR_IO_NOT_READY = _("Not ready")
ERR_IO_BUSY = _("Busy")
ERR_IO_TIMEOUT = _("Timeout")
ERR_IO_UNKNOWN_ERROR = _("Unknown error in IO operation")
ERR_UNKNOWN_ERROR = _("Unknown error")
ERR_UPT_ALREADY_INSTALLED_ERROR = _("Already installed")
ERR_UPT_ENVIRON_MISSING_ERROR = _("Environ missing")
ERR_UPT_REPKG_ERROR = _("File not allowed for RePackage")
ERR_UPT_DB_UPDATE_ERROR = _("Update database did not complete successfully")
ERR_UPT_INI_PARSE_ERROR = _("INI file parse error")
ERR_COPYRIGHT_MISSING = \
_("Header comment section must have copyright information")
ERR_LICENSE_MISSING = \
_("Header comment section must have license information")
ERR_INVALID_BINARYHEADER_FORMAT = \
_("Binary Header comment section must have abstract,description,copyright,license information")
ERR_MULTIPLE_BINARYHEADER_EXIST = \
_("the inf file at most support one BinaryHeader at the fileheader section.")
ERR_INVALID_COMMENT_FORMAT = _("Comment must start with #")
ERR_USER_ABORT = _("User has stopped the application")
ERR_DIST_EXT_ERROR = \
_("Distribution file extension should be '.dist'. Current given: '%s'.")
ERR_DIST_FILENAME_ONLY_FOR_REMOVE = \
_("Only distribution filename without path allowed during remove. Current given: '%s'.")
ERR_NOT_STANDALONE_MODULE_ERROR = \
_("Module %s is not a standalone module (found in Package %s)")
ERR_UPT_ALREADY_RUNNING_ERROR = \
_("UPT is already running, only one instance is allowed")
ERR_MUL_DEC_ERROR = _("Multiple DEC files found within one package directory tree %s: %s, %s")
ERR_INSTALL_FILE_FROM_EMPTY_CONTENT = _("Error file to be installed is not found in content file: %s")
ERR_INSTALL_FILE_DEC_FILE_ERROR = _("Could not obtain the TokenSpaceGuidCName and the PcdCName from the DEC files "
"that the package depends on for this pcd entry: TokenValue: %s Token: %s")
ERR_NOT_SUPPORTED_SA_MODULE = _("Stand-alone module distribution does not allow EDK 1 INF")
ERR_INSTALL_DIST_NOT_FOUND = \
_("Distribution file to be installed is not found in current working directory or workspace: %s")
ERR_REPLACE_DIST_NOT_FOUND = \
_("Distribution file for replace function was not found in the current working directory or workspace: %s")
ERR_DIST_FILENAME_ONLY_FOR_REPLACE_ORIG = \
_("Only a distribution file name without a path is allowed for "
"the distribution to be replaced during replace. Current given: '%s'.")
ERR_UNIPARSE_DBLQUOTE_UNMATCHED = \
_("Only Language entry can contain a couple of matched quote in one line")
ERR_UNIPARSE_NO_SECTION_EXIST = _("No PackageDef or ModuleDef section exists in the UNI file.")
ERR_UNIPARSE_STRNAME_FORMAT_ERROR = _("The String Token Name %s must start with \"STR_\"")
ERR_UNIPARSE_SEP_LANGENTRY_LINE = _("Each <LangEntry> should be in a separate line :%s.")
ERR_UNIPARSE_MULTI_ENTRY_EXIST = \
_("There are same entries : %s in the UNI file, every kind of entry should be only one.")
ERR_UNIPARSE_ENTRY_ORDER_WRONG = \
_("The string entry order in UNI file should be <AbstractStrings>, <DescriptionStrings>, \
<BinaryAbstractStrings>, <BinaryDescriptionStrings>.")
ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR = _("The String Token Type %s must be one of the '_PROMPT', '_HELP' and '_ERR_'.")
ERR_UNIPARSE_LINEFEED_UNDER_EXIST = _("Line feed should not exist under this line: %s.")
ERR_UNIPARSE_LINEFEED_UP_EXIST = _("Line feed should not exist up this line: %s.")
ERR_UNI_MISS_STRING_ENTRY = _("String entry missed in this Entry, %s.")
ERR_UNI_MISS_LANGENTRY = _("Language entry missed in this Entry, %s.")
ERR_BINARY_HEADER_ORDER = _("Binary header must follow the file header.")
ERR_NO_SOURCE_HEADER = _("File header statement \"## @file\" must exist at the first place.")
ERR_UNI_FILE_SUFFIX_WRONG = _("The UNI file must have an extension of '.uni', '.UNI' or '.Uni'")
ERR_UNI_FILE_NAME_INVALID = _("The use of '..', '../' and './' in the UNI file is prohibited.")
ERR_UNI_SUBGUID_VALUE_DEFINE_DEC_NOT_FOUND = _("There are no DEC file to define the GUID value for \
this GUID CName: '%s'.")
#
# Expression error message
#
ERR_EXPR_RIGHT_PAREN = \
_('Missing ")" in expression "%s".')
ERR_EXPR_FACTOR = \
_('"%s" is expected to be HEX, integer, macro, quoted string or PcdName in '
'expression "%s".')
ERR_EXPR_STRING_ITEM = \
_('"%s" is expected to be HEX, integer, macro, quoted string or PcdName in '
'expression [%s].')
ERR_EXPR_EQUALITY = \
_('"%s" is expected to be ==, EQ, != or NE in expression "%s".')
ERR_EXPR_BOOLEAN = \
_('The string "%s" in expression "%s" can not be recognized as a part of the logical expression.')
ERR_EXPR_EMPTY = _('Boolean value cannot be empty.')
ERR_EXPRESS_EMPTY = _('Expression can not be empty.')
ERR_EXPR_LOGICAL = \
_('The following is not a valid logical expression: "%s".')
ERR_EXPR_OR = _('The expression: "%s" must be encapsulated in open "(" and close ")" '
'parenthesis when using | or ||.')
ERR_EXPR_RANGE = \
_('The following is not a valid range expression: "%s".')
ERR_EXPR_RANGE_FACTOR = \
_('"%s" is expected to be HEX, integer in valid range expression "%s".')
ERR_EXPR_RANGE_DOUBLE_PAREN_NESTED = \
_('Double parentheses nested is not allowed in valid range expression: "%s".')
ERR_EXPR_RANGE_EMPTY = _('Valid range can not be empty.')
ERR_EXPR_LIST_EMPTY = _('Valid list can not be empty.')
ERR_PAREN_NOT_USED = _('Parenthesis must be used on both sides of "OR", "AND" in valid range : %s.')
ERR_EXPR_LIST = \
_('The following is not a valid list expression: "%s".')
# DEC parser error message
#
ERR_DECPARSE_STATEMENT_EMPTY = \
_('Must have at least one statement in section %s.')
ERR_DECPARSE_DEFINE_DEFINED = \
_('%s already defined in define section.')
ERR_DECPARSE_DEFINE_SECNAME = \
_('No arch and others can be followed for define section.')
ERR_DECPARSE_DEFINE_MULTISEC = \
_('The DEC file does not allow multiple define sections.')
ERR_DECPARSE_DEFINE_REQUIRED = \
_("Field [%s] is required in define section.")
ERR_DECPARSE_DEFINE_FORMAT = \
_("Wrong define section format, must be KEY = Value.")
ERR_DECPARSE_DEFINE_UNKNOWKEY = \
_("Unknown key [%s] in define section.")
ERR_DECPARSE_DEFINE_SPEC = \
_("Specification value must be HEX numbers or decimal numbers.")
ERR_DECPARSE_DEFINE_PKGNAME = \
_("Package name must be AlphaNumeric characters.")
ERR_DECPARSE_DEFINE_PKGGUID = \
_("GUID format error, must be HEX value with form 8-4-4-4-12.")
ERR_DECPARSE_DEFINE_PKGVERSION = \
_("Version number must be decimal number.")
ERR_DECPARSE_DEFINE_PKGVUNI = \
_("UNI file name format error or file does not exist.")
ERR_DECPARSE_INCLUDE = \
_("Incorrect path: [%s].")
ERR_DECPARSE_LIBCLASS_SPLIT = \
_("Library class format error, must be Libraryclass|Headerpath.")
ERR_DECPARSE_LIBCLASS_EMPTY = \
_("Class name or file name must not be empty.")
ERR_DECPARSE_LIBCLASS_LIB = \
_("Class name format error, must start with upper case letter followed with "
"zero or more alphanumeric characters.")
ERR_DECPARSE_LIBCLASS_PATH_EXT = _("File name must be end with .h.")
ERR_DECPARSE_LIBCLASS_PATH_DOT = _("Path must not include '..'.")
ERR_DECPARSE_LIBCLASS_PATH_EXIST = _("File name [%s] does not exist.")
ERR_DECPARSE_PCD_CVAR_GUID = \
_("TokenSpaceGuidCName must be valid C variable format.")
ERR_DECPARSE_PCD_SPLIT = \
_("Incorrect PcdName. The format must be TokenSpaceGuidCName.PcdCName"
"|PcdData|PcdType|Token.")
ERR_DECPARSE_PCD_NAME = \
_("Incorrect PCD name. The correct format must be "
"<TokenSpaceGuidCName>.<PcdCName>.")
ERR_DECPARSE_PCD_CVAR_PCDCNAME = \
_("PcdCName must be valid C variable format.")
ERR_DECPARSE_PCD_TYPE = \
_('Incorrect PCD data type. A PCD data type must be one of '
'"UINT8", "UINT16", "UINT32", "UINT64", "VOID*", "BOOLEAN".')
ERR_DECPARSE_PCD_VOID = \
_("Incorrect value [%s] of type [%s]. Value must be printable and in the "
"form of{...} for array, or ""..."" for string, or L""..."""
"for unicode string.")
ERR_DECPARSE_PCD_VALUE_EMPTY = \
_("Pcd value can not be empty.")
ERR_DECPARSE_PCD_BOOL = \
_("Invalid value [%s] of type [%s]; must be expression, TRUE, FALSE, 0 or 1.")
ERR_DECPARSE_PCD_INT = _("Incorrect value [%s] of type [%s]."\
" Value must be a hexadecimal, decimal or octal in C language format.")
ERR_DECPARSE_PCD_INT_NEGTIVE = _("Incorrect value [%s] of type [%s];"
" must not be signed number.")
ERR_DECPARSE_PCD_INT_EXCEED = _("Incorrect value [%s] of type [%s]; "
"the number is too long for this type.")
ERR_DECPARSE_PCD_FEATUREFLAG = \
_("PcdFeatureFlag only allow BOOLEAN type.")
ERR_DECPARSE_PCD_TOKEN = \
_("An incorrect PCD token found: [%s]. "
"It must start with 0x followed by 1 - 8 hexadecimal. ")
ERR_DECPARSE_PCD_TOKEN_INT = _("Incorrect token number [%s]. "
"This token number exceeds the maximal value of unsigned 32.")
ERR_DECPARSE_PCD_TOKEN_UNIQUE = _("Token number must be unique to the token space: %s.")
ERR_DECPARSE_CGUID = \
_("No GUID name or value specified, must be <CName> = <GuidValueInCFormat>.")
ERR_DECPARSE_CGUID_NAME = \
_("No GUID name specified, must be <CName> = <GuidValueInCFormat>.")
ERR_DECPARSE_CGUID_GUID = \
_("No GUID value specified, must be <CName> = <GuidValueInCFormat>.")
ERR_DECPARSE_CGUID_GUIDFORMAT = \
_("Incorrect GUID value format, must be <GuidValueInCFormat:"
"{8,4,4,{2,2,2,2,2,2,2,2}}>.")
ERR_DECPARSE_CGUID_NOT_FOUND = _("Unable to find the GUID value of this GUID CName : '%s'.")
ERR_DECPARSE_FILEOPEN = _("Unable to open: [%s].")
ERR_DECPARSE_SECTION_EMPTY = _("Empty sections are not allowed.")
ERR_DECPARSE_SECTION_UE = _("Incorrect UserExtensions format. "
"Must be UserExtenxions.UserId.IdString[.Arch]+.")
ERR_DECPARSE_SECTION_UE_USERID = _("Invalid UserId, must be underscore"
"or alphanumeric characters.")
ERR_DECPARSE_SECTION_UE_IDSTRING = \
_("Incorrect IdString, must be \" ... \".")
ERR_DECPARSE_ARCH = \
_("Unknown arch, must be 'common' or start with upper case letter followed by"
" zero or more upper case letters and numbers.")
ERR_DECPARSE_SECTION_COMMA = _("Section cannot end with comma.")
ERR_DECPARSE_SECTION_COMMON = \
_("'COMMON' must not be used with specific ARCHs in the same section.")
ERR_DECPARSE_SECTION_IDENTIFY = \
_("Section header must start with and end with brackets[].")
ERR_DECPARSE_SECTION_SUBEMPTY = \
_("Missing a sub-section name in section: [%s]. "
"All sub-sections need to have names. ")
ERR_DECPARSE_SECTION_SUBTOOMANY = _("Too many DOT splits in [%s].")
ERR_DECPARSE_SECTION_UNKNOW = _("Section name [%s] unknown.")
ERR_DECPARSE_SECTION_FEATUREFLAG = \
_("[%s] must not be in the same section as other types of PCD.")
ERR_DECPARSE_MACRO_PAIR = _("No macro name/value given.")
ERR_DECPARSE_MACRO_NAME = _("No macro name given.")
ERR_DECPARSE_MACRO_NAME_UPPER = \
_("Macro name must start with upper case letter followed "
"by zero or more upper case letters or numbers. Current macro name is: [%s].")
ERR_DECPARSE_SECTION_NAME = \
_('Cannot mix different section names %s.')
ERR_DECPARSE_BACKSLASH = \
_('Backslash must be the last character on a line and '
'preceded by a space character.')
ERR_DECPARSE_BACKSLASH_EMPTY = \
_('Empty line after previous line that has backslash is not allowed.')
ERR_DECPARSE_REDEFINE = _(
"\"%s\" already defined in line %d.")
ERR_DECPARSE_MACRO_RESOLVE = _("Macro %s in %s cannot be resolved.")
ERR_DECPARSE_UE_DUPLICATE = \
_("Duplicated UserExtensions header found.")
ERR_DECPARSE_PCDERRORMSG_MISS_VALUE_SPLIT = \
_("Missing '|' between Pcd's error code and Pcd's error message.")
ERR_DECPARSE_PCD_MISS_ERRORMSG = \
_("Missing Pcd's error message.")
ERR_DECPARSE_PCD_UNMATCHED_ERRORCODE = \
_("There is no error message matched with this Pcd error code : %s in both DEC and UNI file.")
ERR_DECPARSE_PCD_NODEFINED = _("The PCD : %s used in the Expression is undefined.")
#
# Used to print the current line content which cause error raise.
# Be attached to the end of every error message above.
#
ERR_DECPARSE_LINE = _(" Parsing line: \"%s\".")
#
# Warning related strings.
#
WRN_PACKAGE_EXISTED = _(
"A package with this GUID and Version already exists: "
"GUID %s, Version %s.")
WRN_MODULE_EXISTED = _("This module already exists: %s")
WRN_FILE_EXISTED = _("This file already exists: %s")
WRN_FILE_NOT_OVERWRITTEN = \
_("This file already exist and cannot be overwritten: %s")
WRN_DIST_PKG_INSTALLED = _("This distribution package %s has previously been installed.")
WRN_DIST_NOT_FOUND = _(
"Distribution is not found at location %s")
WRN_MULTI_PCD_RANGES = _(
"A PCD can only have one type of @ValidRange, @ValidList, and @Expression comment")
WRN_MULTI_PCD_VALIDVALUE = _(
"A PCD can only have one of @ValidList comment")
WRN_MULTI_PCD_PROMPT = _(
"A PCD can only have one of @Prompt comment")
WRN_MISSING_USAGE = _("Missing usage")
WRN_INVALID_GUID_TYPE = _("This is and incorrect Guid type: %s")
WRN_MISSING_GUID_TYPE = _("Missing Guid Type")
WRN_INVALID_USAGE = _("This is an incorrect Usage: %s")
WRN_INF_PARSER_MODULE_INVALID_HOB_TYPE = \
_("This is an incorrect HOB type: %s")
WRN_INF_PARSER_MODULE_INVALID_EVENT_TYPE = \
_("This is an incorrect EVENT type: %s")
WRN_INF_PARSER_MODULE_INVALID_BOOTMODE_TYPE = \
_("This is an incorrect BOOTMODE type: %s")
WRN_INVALID_MODULE_TYPE = \
_("This is an incorrect Module type: %s")
WRN_MODULE_PARSE_FAILED = \
_("Parsing of this module did not complete correctly: %s.")
WRN_EDK1_INF_FOUND = \
_("EDK 1 module file found: %s")
WRN_INVALID_COPYRIGHT = \
_("Copyright information is not right")
WARN_SPECIAL_SECTION_LOCATION_WRONG = _("Warning. A special section should be "
"at the end of a file or at the end of a section.")
WARN_INSTALLED_PACKAGE_NOT_FOUND = \
_("File not found. The DEC file for a package cannot be found in GUID/Version/Install path: %s %s %s")
WARN_CUSTOMPATH_OVERRIDE_USEGUIDEDPATH = \
_("option selection of --custom-path will override the option --use-guided-paths")
#
# Help related strings.
#
HLP_PRINT_DEBUG_INFO = _(
"Print DEBUG statements, where DEBUG_LEVEL is 0-9")
HLP_PRINT_INFORMATIONAL_STATEMENT = _("Print informational statements")
HLP_RETURN_NO_DISPLAY = _(
"Returns only the exit code, informational and error messages are"
" not displayed")
HLP_RETURN_AND_DISPLAY = _(
"Returns the exit code and displays error messages only")
HLP_SPECIFY_PACKAGE_NAME_INSTALL = _(
"Specify the UEFI Distribution Package filename to install")
HLP_SPECIFY_PACKAGE_NAME_CREATE = _(
"Specify the UEFI Distribution Package filename to create")
HLP_SPECIFY_PACKAGE_NAME_REMOVE = _(
"Specify the UEFI Distribution Package filename to remove")
HLP_SPECIFY_TEMPLATE_NAME_CREATE = _(
"Specify Package Information Data filename to create package")
HLP_SPECIFY_DEC_NAME_CREATE = _(
"Specify dec file names to create package")
HLP_SPECIFY_INF_NAME_CREATE = _(
"Specify inf file names to create package")
HLP_LIST_DIST_INSTALLED = _(
"List the UEFI Distribution Packages that have been installed")
HLP_NO_SUPPORT_GUI = _(
"Starting the tool in graphical mode is not supported in this version")
HLP_DISABLE_PROMPT = _(
"Disable user prompts for removing modified files. Valid only when -r is present")
HLP_CUSTOM_PATH_PROMPT = _(
"Enable user prompting for alternate installation directories")
HLP_SKIP_LOCK_CHECK = _(
"Skip the check for multiple instances")
HLP_SPECIFY_PACKAGE_NAME_REPLACE = _(
"Specify the UEFI Distribution Package file name to replace the existing file name")
HLP_SPECIFY_PACKAGE_NAME_TO_BE_REPLACED = _(
"Specify the UEFI Distribution Package file name to be replaced")
HLP_USE_GUIDED_PATHS = _(
"Install packages to the following directory path by default: <PackageName>_<PACKAGE_GUID>_<PACKAGE_VERSION>")
HLP_TEST_INSTALL = _(
"Specify the UEFI Distribution Package filenames to install")
MSG_TEST_INSTALL_PASS = _("All distribution package file are satisfied for dependence check.")
MSG_TEST_INSTALL_FAIL = _("NOT all distribution package file are satisfied for dependence check.")
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Logger/StringTable.py
|
## @file
# This file implements the log mechanism for Python tools.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Logger
'''
## Import modules
from sys import argv
from sys import stdout
from sys import stderr
import os.path
from os import remove
from logging import getLogger
from logging import Formatter
from logging import StreamHandler
from logging import FileHandler
from traceback import extract_stack
from Logger.ToolError import FatalError
from Logger.ToolError import WARNING_AS_ERROR
from Logger.ToolError import gERROR_MESSAGE
from Logger.ToolError import UNKNOWN_ERROR
from Library import GlobalData
#
# Log level constants
#
DEBUG_0 = 1
DEBUG_1 = 2
DEBUG_2 = 3
DEBUG_3 = 4
DEBUG_4 = 5
DEBUG_5 = 6
DEBUG_6 = 7
DEBUG_7 = 8
DEBUG_8 = 9
DEBUG_9 = 10
VERBOSE = 15
INFO = 20
WARN = 30
QUIET = 40
QUIET_1 = 41
ERROR = 50
SILENT = 60
IS_RAISE_ERROR = True
SUPRESS_ERROR = False
#
# Tool name
#
_TOOL_NAME = os.path.basename(argv[0])
#
# For validation purpose
#
_LOG_LEVELS = [DEBUG_0, DEBUG_1, DEBUG_2, DEBUG_3, DEBUG_4, DEBUG_5, DEBUG_6, \
DEBUG_7, DEBUG_8, DEBUG_9, VERBOSE, WARN, INFO, ERROR, QUIET, \
QUIET_1, SILENT]
#
# For DEBUG level (All DEBUG_0~9 are applicable)
#
_DEBUG_LOGGER = getLogger("tool_debug")
_DEBUG_FORMATTER = Formatter("[%(asctime)s.%(msecs)d]: %(message)s", \
datefmt="%H:%M:%S")
#
# For VERBOSE, INFO, WARN level
#
_INFO_LOGGER = getLogger("tool_info")
_INFO_FORMATTER = Formatter("%(message)s")
#
# For ERROR level
#
_ERROR_LOGGER = getLogger("tool_error")
_ERROR_FORMATTER = Formatter("%(message)s")
#
# String templates for ERROR/WARN/DEBUG log message
#
_ERROR_MESSAGE_TEMPLATE = \
('\n\n%(tool)s...\n%(file)s(%(line)s): error %(errorcode)04X: %(msg)s\n\t%(extra)s')
__ERROR_MESSAGE_TEMPLATE_WITHOUT_FILE = \
'\n\n%(tool)s...\n : error %(errorcode)04X: %(msg)s\n\t%(extra)s'
_WARNING_MESSAGE_TEMPLATE = '%(tool)s...\n%(file)s(%(line)s): warning: %(msg)s'
_WARNING_MESSAGE_TEMPLATE_WITHOUT_FILE = '%(tool)s: : warning: %(msg)s'
_DEBUG_MESSAGE_TEMPLATE = '%(file)s(%(line)s): debug: \n %(msg)s'
#
# Log INFO message
#
#Info = _INFO_LOGGER.info
def Info(msg, *args, **kwargs):
_INFO_LOGGER.info(msg, *args, **kwargs)
#
# Log information which should be always put out
#
def Quiet(msg, *args, **kwargs):
_ERROR_LOGGER.error(msg, *args, **kwargs)
## Log debug message
#
# @param Level DEBUG level (DEBUG0~9)
# @param Message Debug information
# @param ExtraData More information associated with "Message"
#
def Debug(Level, Message, ExtraData=None):
if _DEBUG_LOGGER.level > Level:
return
if Level > DEBUG_9:
return
#
# Find out the caller method information
#
CallerStack = extract_stack()[-2]
TemplateDict = {
"file" : CallerStack[0],
"line" : CallerStack[1],
"msg" : Message,
}
if ExtraData is not None:
LogText = _DEBUG_MESSAGE_TEMPLATE % TemplateDict + "\n %s" % ExtraData
else:
LogText = _DEBUG_MESSAGE_TEMPLATE % TemplateDict
_DEBUG_LOGGER.log(Level, LogText)
## Log verbose message
#
# @param Message Verbose information
#
def Verbose(Message):
return _INFO_LOGGER.log(VERBOSE, Message)
## Log warning message
#
# Warning messages are those which might be wrong but won't fail the tool.
#
# @param ToolName The name of the tool. If not given, the name of caller
# method will be used.
# @param Message Warning information
# @param File The name of file which caused the warning.
# @param Line The line number in the "File" which caused the warning.
# @param ExtraData More information associated with "Message"
#
def Warn(ToolName, Message, File=None, Line=None, ExtraData=None):
if _INFO_LOGGER.level > WARN:
return
#
# if no tool name given, use caller's source file name as tool name
#
if ToolName is None or ToolName == "":
ToolName = os.path.basename(extract_stack()[-2][0])
if Line is None:
Line = "..."
else:
Line = "%d" % Line
TemplateDict = {
"tool" : ToolName,
"file" : File,
"line" : Line,
"msg" : Message,
}
if File is not None:
LogText = _WARNING_MESSAGE_TEMPLATE % TemplateDict
else:
LogText = _WARNING_MESSAGE_TEMPLATE_WITHOUT_FILE % TemplateDict
if ExtraData is not None:
LogText += "\n %s" % ExtraData
_INFO_LOGGER.log(WARN, LogText)
#
# Raise an exception if indicated
#
if GlobalData.gWARNING_AS_ERROR == True:
raise FatalError(WARNING_AS_ERROR)
## Log ERROR message
#
# Once an error messages is logged, the tool's execution will be broken by
# raising an exception. If you don't want to break the execution later, you
# can give "RaiseError" with "False" value.
#
# @param ToolName The name of the tool. If not given, the name of caller
# method will be used.
# @param ErrorCode The error code
# @param Message Warning information
# @param File The name of file which caused the error.
# @param Line The line number in the "File" which caused the warning.
# @param ExtraData More information associated with "Message"
# @param RaiseError Raise an exception to break the tool's execution if
# it's True. This is the default behavior.
#
def Error(ToolName, ErrorCode, Message=None, File=None, Line=None, \
ExtraData=None, RaiseError=IS_RAISE_ERROR):
if ToolName:
pass
if Line is None:
Line = "..."
else:
Line = "%d" % Line
if Message is None:
if ErrorCode in gERROR_MESSAGE:
Message = gERROR_MESSAGE[ErrorCode]
else:
Message = gERROR_MESSAGE[UNKNOWN_ERROR]
if ExtraData is None:
ExtraData = ""
TemplateDict = {
"tool" : _TOOL_NAME,
"file" : File,
"line" : Line,
"errorcode" : ErrorCode,
"msg" : Message,
"extra" : ExtraData
}
if File is not None:
LogText = _ERROR_MESSAGE_TEMPLATE % TemplateDict
else:
LogText = __ERROR_MESSAGE_TEMPLATE_WITHOUT_FILE % TemplateDict
if not SUPRESS_ERROR:
_ERROR_LOGGER.log(ERROR, LogText)
if RaiseError:
raise FatalError(ErrorCode)
## Initialize log system
#
def Initialize():
#
# Since we use different format to log different levels of message into
# different place (stdout or stderr), we have to use different "Logger"
# objects to do this.
#
# For DEBUG level (All DEBUG_0~9 are applicable)
_DEBUG_LOGGER.setLevel(INFO)
_DebugChannel = StreamHandler(stdout)
_DebugChannel.setFormatter(_DEBUG_FORMATTER)
_DEBUG_LOGGER.addHandler(_DebugChannel)
#
# For VERBOSE, INFO, WARN level
#
_INFO_LOGGER.setLevel(INFO)
_InfoChannel = StreamHandler(stdout)
_InfoChannel.setFormatter(_INFO_FORMATTER)
_INFO_LOGGER.addHandler(_InfoChannel)
#
# For ERROR level
#
_ERROR_LOGGER.setLevel(INFO)
_ErrorCh = StreamHandler(stderr)
_ErrorCh.setFormatter(_ERROR_FORMATTER)
_ERROR_LOGGER.addHandler(_ErrorCh)
## Set log level
#
# @param Level One of log level in _LogLevel
#
def SetLevel(Level):
if Level not in _LOG_LEVELS:
Info("Not supported log level (%d). Use default level instead." % \
Level)
Level = INFO
_DEBUG_LOGGER.setLevel(Level)
_INFO_LOGGER.setLevel(Level)
_ERROR_LOGGER.setLevel(Level)
## Get current log level
#
def GetLevel():
return _INFO_LOGGER.getEffectiveLevel()
## Raise up warning as error
#
def SetWarningAsError():
GlobalData.gWARNING_AS_ERROR = True
## Specify a file to store the log message as well as put on console
#
# @param LogFile The file path used to store the log message
#
def SetLogFile(LogFile):
if os.path.exists(LogFile):
remove(LogFile)
_Ch = FileHandler(LogFile)
_Ch.setFormatter(_DEBUG_FORMATTER)
_DEBUG_LOGGER.addHandler(_Ch)
_Ch = FileHandler(LogFile)
_Ch.setFormatter(_INFO_FORMATTER)
_INFO_LOGGER.addHandler(_Ch)
_Ch = FileHandler(LogFile)
_Ch.setFormatter(_ERROR_FORMATTER)
_ERROR_LOGGER.addHandler(_Ch)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Logger/Log.py
|
## @file
# Python 'Logger' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Logger
'''
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Logger/__init__.py
|
## @file
# Standardized Error Handling infrastructures.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
ToolError
'''
import Logger.StringTable as ST
FILE_OPEN_FAILURE = 1
FILE_WRITE_FAILURE = 2
FILE_PARSE_FAILURE = 3
FILE_READ_FAILURE = 4
FILE_CREATE_FAILURE = 5
FILE_CHECKSUM_FAILURE = 6
FILE_COMPRESS_FAILURE = 7
FILE_DECOMPRESS_FAILURE = 8
FILE_MOVE_FAILURE = 9
FILE_DELETE_FAILURE = 10
FILE_COPY_FAILURE = 11
FILE_POSITIONING_FAILURE = 12
FILE_ALREADY_EXIST = 13
FILE_NOT_FOUND = 14
FILE_TYPE_MISMATCH = 15
FILE_CASE_MISMATCH = 16
FILE_DUPLICATED = 17
FILE_UNKNOWN_ERROR = 0x0FFF
OPTION_UNKNOWN = 0x1000
OPTION_MISSING = 0x1001
OPTION_CONFLICT = 0x1002
OPTION_VALUE_INVALID = 0x1003
OPTION_DEPRECATED = 0x1004
OPTION_NOT_SUPPORTED = 0x1005
OPTION_UNKNOWN_ERROR = 0x1FFF
PARAMETER_INVALID = 0x2000
PARAMETER_MISSING = 0x2001
PARAMETER_UNKNOWN_ERROR = 0x2FFF
FORMAT_INVALID = 0x3000
FORMAT_NOT_SUPPORTED = 0x3001
FORMAT_UNKNOWN = 0x3002
FORMAT_UNKNOWN_ERROR = 0x3FFF
RESOURCE_NOT_AVAILABLE = 0x4000
RESOURCE_ALLOCATE_FAILURE = 0x4001
RESOURCE_FULL = 0x4002
RESOURCE_OVERFLOW = 0x4003
RESOURCE_UNDERRUN = 0x4004
RESOURCE_UNKNOWN_ERROR = 0x4FFF
ATTRIBUTE_NOT_AVAILABLE = 0x5000
ATTRIBUTE_GET_FAILURE = 0x5001
ATTRIBUTE_SET_FAILURE = 0x5002
ATTRIBUTE_UPDATE_FAILURE = 0x5003
ATTRIBUTE_ACCESS_DENIED = 0x5004
ATTRIBUTE_RETRIEVE_FAILURE = 0x5005
ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF
ATTRIBUTE_RETRIEVE_FAILURE = 0x5F00
IO_NOT_READY = 0x6000
IO_BUSY = 0x6001
IO_TIMEOUT = 0x6002
IO_UNKNOWN_ERROR = 0x6FFF
COMMAND_FAILURE = 0x7000
CODE_ERROR = 0xC0DE
AUTOGEN_ERROR = 0xF000
PARSER_ERROR = 0xF001
BUILD_ERROR = 0xF002
GENFDS_ERROR = 0xF003
ECC_ERROR = 0xF004
EOT_ERROR = 0xF005
DDC_ERROR = 0xF009
WARNING_AS_ERROR = 0xF006
MIGRATION_ERROR = 0xF010
EDK1_INF_ERROR = 0xF011
ABORT_ERROR = 0xFFFE
UNKNOWN_ERROR = 0xFFFF
UPT_ALREADY_INSTALLED_ERROR = 0xD000
UPT_ENVIRON_MISSING_ERROR = 0xD001
UPT_REPKG_ERROR = 0xD002
UPT_ALREADY_RUNNING_ERROR = 0xD003
UPT_MUL_DEC_ERROR = 0xD004
UPT_DB_UPDATE_ERROR = 0xD005
UPT_INI_PARSE_ERROR = 0xE000
## Error message of each error code
#
gERROR_MESSAGE = {
FILE_NOT_FOUND : ST.ERR_FILE_NOT_FOUND,
FILE_OPEN_FAILURE : ST.ERR_FILE_OPEN_FAILURE,
FILE_WRITE_FAILURE : ST.ERR_FILE_WRITE_FAILURE,
FILE_PARSE_FAILURE : ST.ERR_FILE_PARSE_FAILURE,
FILE_READ_FAILURE : ST.ERR_FILE_READ_FAILURE,
FILE_CREATE_FAILURE : ST.ERR_FILE_CREATE_FAILURE,
FILE_CHECKSUM_FAILURE : ST.ERR_FILE_CHECKSUM_FAILURE,
FILE_COMPRESS_FAILURE : ST.ERR_FILE_COMPRESS_FAILURE,
FILE_DECOMPRESS_FAILURE : ST.ERR_FILE_DECOMPRESS_FAILURE,
FILE_MOVE_FAILURE : ST.ERR_FILE_MOVE_FAILURE,
FILE_DELETE_FAILURE : ST.ERR_FILE_DELETE_FAILURE,
FILE_COPY_FAILURE : ST.ERR_FILE_COPY_FAILURE,
FILE_POSITIONING_FAILURE: ST.ERR_FILE_POSITIONING_FAILURE,
FILE_ALREADY_EXIST : ST.ERR_FILE_ALREADY_EXIST,
FILE_TYPE_MISMATCH : ST.ERR_FILE_TYPE_MISMATCH ,
FILE_CASE_MISMATCH : ST.ERR_FILE_CASE_MISMATCH,
FILE_DUPLICATED : ST.ERR_FILE_DUPLICATED,
FILE_UNKNOWN_ERROR : ST.ERR_FILE_UNKNOWN_ERROR,
OPTION_UNKNOWN : ST.ERR_OPTION_UNKNOWN,
OPTION_MISSING : ST.ERR_OPTION_MISSING,
OPTION_CONFLICT : ST.ERR_OPTION_CONFLICT,
OPTION_VALUE_INVALID : ST.ERR_OPTION_VALUE_INVALID,
OPTION_DEPRECATED : ST.ERR_OPTION_DEPRECATED,
OPTION_NOT_SUPPORTED : ST.ERR_OPTION_NOT_SUPPORTED,
OPTION_UNKNOWN_ERROR : ST.ERR_OPTION_UNKNOWN_ERROR,
PARAMETER_INVALID : ST.ERR_PARAMETER_INVALID,
PARAMETER_MISSING : ST.ERR_PARAMETER_MISSING,
PARAMETER_UNKNOWN_ERROR : ST.ERR_PARAMETER_UNKNOWN_ERROR,
FORMAT_INVALID : ST.ERR_FORMAT_INVALID,
FORMAT_NOT_SUPPORTED : ST.ERR_FORMAT_NOT_SUPPORTED,
FORMAT_UNKNOWN : ST.ERR_FORMAT_UNKNOWN,
FORMAT_UNKNOWN_ERROR : ST.ERR_FORMAT_UNKNOWN_ERROR,
RESOURCE_NOT_AVAILABLE : ST.ERR_RESOURCE_NOT_AVAILABLE,
RESOURCE_ALLOCATE_FAILURE : ST.ERR_RESOURCE_ALLOCATE_FAILURE,
RESOURCE_FULL : ST.ERR_RESOURCE_FULL,
RESOURCE_OVERFLOW : ST.ERR_RESOURCE_OVERFLOW,
RESOURCE_UNDERRUN : ST.ERR_RESOURCE_UNDERRUN,
RESOURCE_UNKNOWN_ERROR : ST.ERR_RESOURCE_UNKNOWN_ERROR,
ATTRIBUTE_NOT_AVAILABLE : ST.ERR_ATTRIBUTE_NOT_AVAILABLE,
ATTRIBUTE_RETRIEVE_FAILURE : ST.ERR_ATTRIBUTE_RETRIEVE_FAILURE,
ATTRIBUTE_SET_FAILURE : ST.ERR_ATTRIBUTE_SET_FAILURE,
ATTRIBUTE_UPDATE_FAILURE: ST.ERR_ATTRIBUTE_UPDATE_FAILURE,
ATTRIBUTE_ACCESS_DENIED : ST.ERR_ATTRIBUTE_ACCESS_DENIED,
ATTRIBUTE_UNKNOWN_ERROR : ST.ERR_ATTRIBUTE_UNKNOWN_ERROR,
COMMAND_FAILURE : ST.ERR_COMMAND_FAILURE,
IO_NOT_READY : ST.ERR_IO_NOT_READY,
IO_BUSY : ST.ERR_IO_BUSY,
IO_TIMEOUT : ST.ERR_IO_TIMEOUT,
IO_UNKNOWN_ERROR : ST.ERR_IO_UNKNOWN_ERROR,
UNKNOWN_ERROR : ST.ERR_UNKNOWN_ERROR,
UPT_ALREADY_INSTALLED_ERROR : ST.ERR_UPT_ALREADY_INSTALLED_ERROR,
UPT_ENVIRON_MISSING_ERROR : ST.ERR_UPT_ENVIRON_MISSING_ERROR,
UPT_REPKG_ERROR : ST.ERR_UPT_REPKG_ERROR,
UPT_ALREADY_RUNNING_ERROR : ST.ERR_UPT_ALREADY_RUNNING_ERROR,
UPT_MUL_DEC_ERROR : ST.ERR_MUL_DEC_ERROR,
UPT_INI_PARSE_ERROR : ST.ERR_UPT_INI_PARSE_ERROR,
}
## Exception indicating a fatal error
#
class FatalError(Exception):
pass
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Logger/ToolError.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2011 - 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Misc
'''
##
# Import Modules
#
import os.path
from os import access
from os import F_OK
from os import makedirs
from os import getcwd
from os import chdir
from os import listdir
from os import remove
from os import rmdir
from os import linesep
from os import walk
from os import environ
import re
from collections import OrderedDict as Sdict
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger import ToolError
from Library import GlobalData
from Library.DataType import SUP_MODULE_LIST
from Library.DataType import END_OF_LINE
from Library.DataType import TAB_SPLIT
from Library.DataType import TAB_LANGUAGE_EN_US
from Library.DataType import TAB_LANGUAGE_EN
from Library.DataType import TAB_LANGUAGE_EN_X
from Library.DataType import TAB_UNI_FILE_SUFFIXS
from Library.StringUtils import GetSplitValueList
from Library.ParserValidate import IsValidHexVersion
from Library.ParserValidate import IsValidPath
from Object.POM.CommonObject import TextObject
from Core.FileHook import __FileHookOpen__
from Common.MultipleWorkspace import MultipleWorkspace as mws
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C
# structure style
#
# @param Guid: The GUID string
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Check whether GUID string is of format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue: The GUID value
#
def CheckGuidRegFormat(GuidValue):
## Regular expression used to find out register format of GUID
#
RegFormatGuidPattern = re.compile("^\s*([0-9a-fA-F]){8}-"
"([0-9a-fA-F]){4}-"
"([0-9a-fA-F]){4}-"
"([0-9a-fA-F]){4}-"
"([0-9a-fA-F]){12}\s*$")
if RegFormatGuidPattern.match(GuidValue):
return True
else:
return False
## Convert GUID string in C structure style to
# xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue: The GUID value in C structure format
#
def GuidStructureStringToGuidString(GuidValue):
GuidValueString = GuidValue.lower().replace("{", "").replace("}", "").\
replace(" ", "").replace(";", "")
GuidValueList = GuidValueString.split(",")
if len(GuidValueList) != 11:
return ''
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(GuidValueList[0], 16),
int(GuidValueList[1], 16),
int(GuidValueList[2], 16),
int(GuidValueList[3], 16),
int(GuidValueList[4], 16),
int(GuidValueList[5], 16),
int(GuidValueList[6], 16),
int(GuidValueList[7], 16),
int(GuidValueList[8], 16),
int(GuidValueList[9], 16),
int(GuidValueList[10], 16)
)
except BaseException:
return ''
## Create directories
#
# @param Directory: The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not access(Directory, F_OK):
makedirs(Directory)
except BaseException:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory: The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not \
os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = getcwd()
chdir(Directory)
for File in listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
remove(File)
chdir(CurrentDirectory)
rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what
# won't.
#
# @param File: The path of file
# @param Content: The new content of the file
# @param IsBinaryFile: The flag indicating if the file is binary file
# or not
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if os.path.exists(File):
if IsBinaryFile:
try:
if Content == __FileHookOpen__(File, "rb").read():
return False
except BaseException:
Logger.Error(None, ToolError.FILE_OPEN_FAILURE, ExtraData=File)
else:
try:
if Content == __FileHookOpen__(File, "r").read():
return False
except BaseException:
Logger.Error(None, ToolError.FILE_OPEN_FAILURE, ExtraData=File)
CreateDirectory(os.path.dirname(File))
if IsBinaryFile:
try:
FileFd = __FileHookOpen__(File, "wb")
FileFd.write(Content)
FileFd.close()
except BaseException:
Logger.Error(None, ToolError.FILE_CREATE_FAILURE, ExtraData=File)
else:
try:
FileFd = __FileHookOpen__(File, "w")
FileFd.write(Content)
FileFd.close()
except BaseException:
Logger.Error(None, ToolError.FILE_CREATE_FAILURE, ExtraData=File)
return True
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
def GetFiles(Root, SkipList=None, FullPath=True):
OriPath = os.path.normpath(Root)
FileList = []
for Root, Dirs, Files in walk(Root):
if SkipList:
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
if Item in Files:
Files.remove(Item)
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for File in Files:
if File.startswith('.'):
continue
File = os.path.normpath(os.path.join(Root, File))
if not FullPath:
File = File[len(OriPath) + 1:]
FileList.append(File)
return FileList
## Get all non-metadata files of a directory
#
# @param Root: Root Dir
# @param SkipList : List of path need be skipped
# @param FullPath: True if the returned file should be full path
# @param PrefixPath: the path that need to be added to the files found
# @return: the list of files found
#
def GetNonMetaDataFiles(Root, SkipList, FullPath, PrefixPath):
FileList = GetFiles(Root, SkipList, FullPath)
NewFileList = []
for File in FileList:
ExtName = os.path.splitext(File)[1]
#
# skip '.dec', '.inf', '.dsc', '.fdf' files
#
if ExtName.lower() not in ['.dec', '.inf', '.dsc', '.fdf']:
NewFileList.append(os.path.normpath(os.path.join(PrefixPath, File)))
return NewFileList
## Check if given file exists or not
#
# @param File: File name or path to be checked
# @param Dir: The directory the file is relative to
#
def ValidFile(File, Ext=None):
File = File.replace('\\', '/')
if Ext is not None:
FileExt = os.path.splitext(File)[1]
if FileExt.lower() != Ext.lower():
return False
if not os.path.exists(File):
return False
return True
## RealPath
#
# @param File: File name or path to be checked
# @param Dir: The directory the file is relative to
# @param OverrideDir: The override directory
#
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gALL_FILES[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gALL_FILES[NewFile]
return NewFile
## RealPath2
#
# @param File: File name or path to be checked
# @param Dir: The directory the file is relative to
# @param OverrideDir: The override directory
#
def RealPath2(File, Dir='', OverrideDir=''):
if OverrideDir:
NewFile = GlobalData.gALL_FILES[os.path.normpath(os.path.join\
(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], \
NewFile[0:len(OverrideDir)]
NewFile = GlobalData.gALL_FILES[os.path.normpath(os.path.join(Dir, File))]
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## CommonPath
#
# @param PathList: PathList
#
def CommonPath(PathList):
Path1 = min(PathList).split(os.path.sep)
Path2 = max(PathList).split(os.path.sep)
for Index in range(min(len(Path1), len(Path2))):
if Path1[Index] != Path2[Index]:
return os.path.sep.join(Path1[:Index])
return os.path.sep.join(Path1)
## PathClass
#
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', \
ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
#
# Remove any '.' and '..' in path
#
if self.Root:
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
#
# eliminate the side-effect of 'C:'
#
if self.Root[-1] == ':':
self.Root += os.path.sep
#
# file path should not start with path separator
#
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
def __eq__(self, Other):
if isinstance(Other, type(self)):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __hash__ function
#
# Use Path as key in hash table
#
def __hash__(self):
return hash(self.Path)
## _GetFileKey
#
def _GetFileKey(self):
if self._Key is None:
self._Key = self.Path.upper()
return self._Key
## Validate
#
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCASE_INSENSITIVE:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return ToolError.FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % \
(self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
return ToolError.FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or \
(RealRoot != self.Root and RealRoot != \
self.AlterRoot)):
ErrorCode = ToolError.FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + \
" [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
## Get current workspace
#
# get WORKSPACE from environment variable if present,if not use current working directory as WORKSPACE
#
def GetWorkspace():
#
# check WORKSPACE
#
if "WORKSPACE" in environ:
WorkspaceDir = os.path.normpath(environ["WORKSPACE"])
if not os.path.exists(WorkspaceDir):
Logger.Error("UPT",
ToolError.UPT_ENVIRON_MISSING_ERROR,
ST.ERR_WORKSPACE_NOTEXIST,
ExtraData="%s" % WorkspaceDir)
else:
WorkspaceDir = os.getcwd()
if WorkspaceDir[-1] == ':':
WorkspaceDir += os.sep
PackagesPath = os.environ.get("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
return WorkspaceDir, mws.PACKAGES_PATH
## Get relative path
#
# use full path and workspace to get relative path
# the destination of this function is mainly to resolve the root path issue(like c: or c:\)
#
# @param Fullpath: a string of fullpath
# @param Workspace: a string of workspace
#
def GetRelativePath(Fullpath, Workspace):
RelativePath = ''
if Workspace.endswith(os.sep):
RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace):]
else:
RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace)+1:]
return RelativePath
## Check whether all module types are in list
#
# check whether all module types (SUP_MODULE_LIST) are in list
#
# @param ModuleList: a list of ModuleType
#
def IsAllModuleList(ModuleList):
NewModuleList = [Module.upper() for Module in ModuleList]
for Module in SUP_MODULE_LIST:
if Module not in NewModuleList:
return False
else:
return True
## Dictionary that use comment(GenericComment, TailComment) as value,
# if a new comment which key already in the dic is inserted, then the
# comment will be merged.
# Key is (Statement, SupArch), when TailComment is added, it will ident
# according to Statement
#
class MergeCommentDict(dict):
## []= operator
#
def __setitem__(self, Key, CommentVal):
GenericComment, TailComment = CommentVal
if Key in self:
OrigVal1, OrigVal2 = dict.__getitem__(self, Key)
Statement = Key[0]
dict.__setitem__(self, Key, (OrigVal1 + GenericComment, OrigVal2 \
+ len(Statement) * ' ' + TailComment))
else:
dict.__setitem__(self, Key, (GenericComment, TailComment))
## =[] operator
#
def __getitem__(self, Key):
return dict.__getitem__(self, Key)
## GenDummyHelpTextObj
#
# @retval HelpTxt: Generated dummy help text object
#
def GenDummyHelpTextObj():
HelpTxt = TextObject()
HelpTxt.SetLang(TAB_LANGUAGE_EN_US)
HelpTxt.SetString(' ')
return HelpTxt
## ConvertVersionToDecimal, the minor version should be within 0 - 99
# <HexVersion> ::= "0x" <Major> <Minor>
# <Major> ::= (a-fA-F0-9){4}
# <Minor> ::= (a-fA-F0-9){4}
# <DecVersion> ::= (0-65535) ["." (0-99)]
#
# @param StringIn: The string contains version defined in INF file.
# It can be Decimal or Hex
#
def ConvertVersionToDecimal(StringIn):
if IsValidHexVersion(StringIn):
Value = int(StringIn, 16)
Major = Value >> 16
Minor = Value & 0xFFFF
MinorStr = str(Minor)
if len(MinorStr) == 1:
MinorStr = '0' + MinorStr
return str(Major) + '.' + MinorStr
else:
if StringIn.find(TAB_SPLIT) != -1:
return StringIn
elif StringIn:
return StringIn + '.0'
else:
#
# when StringIn is '', return it directly
#
return StringIn
## GetHelpStringByRemoveHashKey
#
# Remove hash key at the header of string and return the remain.
#
# @param String: The string need to be processed.
#
def GetHelpStringByRemoveHashKey(String):
ReturnString = ''
PattenRemoveHashKey = re.compile(r"^[#+\s]+", re.DOTALL)
String = String.strip()
if String == '':
return String
LineList = GetSplitValueList(String, END_OF_LINE)
for Line in LineList:
ValueList = PattenRemoveHashKey.split(Line)
if len(ValueList) == 1:
ReturnString += ValueList[0] + END_OF_LINE
else:
ReturnString += ValueList[1] + END_OF_LINE
if ReturnString.endswith('\n') and not ReturnString.endswith('\n\n') and ReturnString != '\n':
ReturnString = ReturnString[:-1]
return ReturnString
## ConvPathFromAbsToRel
#
# Get relative file path from absolute path.
#
# @param Path: The string contain file absolute path.
# @param Root: The string contain the parent path of Path in.
#
#
def ConvPathFromAbsToRel(Path, Root):
Path = os.path.normpath(Path)
Root = os.path.normpath(Root)
FullPath = os.path.normpath(os.path.join(Root, Path))
#
# If Path is absolute path.
# It should be in Root.
#
if os.path.isabs(Path):
return FullPath[FullPath.find(Root) + len(Root) + 1:]
else:
return Path
## ConvertPath
#
# Convert special characters to '_', '\' to '/'
# return converted path: Test!1.inf -> Test_1.inf
#
# @param Path: Path to be converted
#
def ConvertPath(Path):
RetPath = ''
for Char in Path.strip():
if Char.isalnum() or Char in '.-_/':
RetPath = RetPath + Char
elif Char == '\\':
RetPath = RetPath + '/'
else:
RetPath = RetPath + '_'
return RetPath
## ConvertSpec
#
# during install, convert the Spec string extract from UPD into INF allowable definition,
# the difference is period is allowed in the former (not the first letter) but not in the latter.
# return converted Spec string
#
# @param SpecStr: SpecStr to be converted
#
def ConvertSpec(SpecStr):
RetStr = ''
for Char in SpecStr:
if Char.isalnum() or Char == '_':
RetStr = RetStr + Char
else:
RetStr = RetStr + '_'
return RetStr
## IsEqualList
#
# Judge two lists are identical(contain same item).
# The rule is elements in List A are in List B and elements in List B are in List A.
#
# @param ListA, ListB Lists need to be judged.
#
# @return True ListA and ListB are identical
# @return False ListA and ListB are different with each other
#
def IsEqualList(ListA, ListB):
if ListA == ListB:
return True
for ItemA in ListA:
if not ItemA in ListB:
return False
for ItemB in ListB:
if not ItemB in ListA:
return False
return True
## ConvertArchList
#
# Convert item in ArchList if the start character is lower case.
# In UDP spec, Arch is only allowed as: [A-Z]([a-zA-Z0-9])*
#
# @param ArchList The ArchList need to be converted.
#
# @return NewList The ArchList been converted.
#
def ConvertArchList(ArchList):
NewArchList = []
if not ArchList:
return NewArchList
if isinstance(ArchList, list):
for Arch in ArchList:
Arch = Arch.upper()
NewArchList.append(Arch)
elif isinstance(ArchList, str):
ArchList = ArchList.upper()
NewArchList.append(ArchList)
return NewArchList
## ProcessLineExtender
#
# Process the LineExtender of Line in LineList.
# If one line ends with a line extender, then it will be combined together with next line.
#
# @param LineList The LineList need to be processed.
#
# @return NewList The ArchList been processed.
#
def ProcessLineExtender(LineList):
NewList = []
Count = 0
while Count < len(LineList):
if LineList[Count].strip().endswith("\\") and Count + 1 < len(LineList):
NewList.append(LineList[Count].strip()[:-2] + LineList[Count + 1])
Count = Count + 1
else:
NewList.append(LineList[Count])
Count = Count + 1
return NewList
## ProcessEdkComment
#
# Process EDK style comment in LineList: c style /* */ comment or cpp style // comment
#
#
# @param LineList The LineList need to be processed.
#
# @return LineList The LineList been processed.
# @return FirstPos Where Edk comment is first found, -1 if not found
#
def ProcessEdkComment(LineList):
FindEdkBlockComment = False
Count = 0
StartPos = -1
EndPos = -1
FirstPos = -1
while(Count < len(LineList)):
Line = LineList[Count].strip()
if Line.startswith("/*"):
#
# handling c style comment
#
StartPos = Count
while Count < len(LineList):
Line = LineList[Count].strip()
if Line.endswith("*/"):
if (Count == StartPos) and Line.strip() == '/*/':
Count = Count + 1
continue
EndPos = Count
FindEdkBlockComment = True
break
Count = Count + 1
if FindEdkBlockComment:
if FirstPos == -1:
FirstPos = StartPos
for Index in range(StartPos, EndPos+1):
LineList[Index] = ''
FindEdkBlockComment = False
elif Line.find("//") != -1 and not Line.startswith("#"):
#
# handling cpp style comment
#
LineList[Count] = Line.replace("//", '#')
if FirstPos == -1:
FirstPos = Count
Count = Count + 1
return LineList, FirstPos
## GetLibInstanceInfo
#
# Get the information from Library Instance INF file.
#
# @param string. A string start with # and followed by INF file path
# @param WorkSpace. The WorkSpace directory used to combined with INF file path.
#
# @return GUID, Version
def GetLibInstanceInfo(String, WorkSpace, LineNo):
FileGuidString = ""
VerString = ""
OriginalString = String
String = String.strip()
if not String:
return None, None
#
# Remove "#" characters at the beginning
#
String = GetHelpStringByRemoveHashKey(String)
String = String.strip()
#
# Validate file name exist.
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(WorkSpace, String)))
if not (ValidFile(FullFileName)):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_FILELIST_EXIST % (String),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=OriginalString)
#
# Validate file exist/format.
#
if IsValidPath(String, WorkSpace):
IsValidFileFlag = True
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID % (String),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=OriginalString)
return False
if IsValidFileFlag:
FileLinesList = []
try:
FInputfile = open(FullFileName, "r")
try:
FileLinesList = FInputfile.readlines()
except BaseException:
Logger.Error("InfParser",
ToolError.FILE_READ_FAILURE,
ST.ERR_FILE_OPEN_FAILURE,
File=FullFileName)
finally:
FInputfile.close()
except BaseException:
Logger.Error("InfParser",
ToolError.FILE_READ_FAILURE,
ST.ERR_FILE_OPEN_FAILURE,
File=FullFileName)
ReFileGuidPattern = re.compile("^\s*FILE_GUID\s*=.*$")
ReVerStringPattern = re.compile("^\s*VERSION_STRING\s*=.*$")
FileLinesList = ProcessLineExtender(FileLinesList)
for Line in FileLinesList:
if ReFileGuidPattern.match(Line):
FileGuidString = Line
if ReVerStringPattern.match(Line):
VerString = Line
if FileGuidString:
FileGuidString = GetSplitValueList(FileGuidString, '=', 1)[1]
if VerString:
VerString = GetSplitValueList(VerString, '=', 1)[1]
return FileGuidString, VerString
## GetLocalValue
#
# Generate the local value for INF and DEC file. If Lang attribute not present, then use this value.
# If present, and there is no element without the Lang attribute, and one of the elements has the rfc1766 code is
# "en-x-tianocore", or "en-US" if "en-x-tianocore" was not found, or "en" if "en-US" was not found, or startswith 'en'
# if 'en' was not found, then use this value.
# If multiple entries of a tag exist which have the same language code, use the last entry.
#
# @param ValueList A list need to be processed.
# @param UseFirstValue: True to use the first value, False to use the last value
#
# @return LocalValue
def GetLocalValue(ValueList, UseFirstValue=False):
Value1 = ''
Value2 = ''
Value3 = ''
Value4 = ''
Value5 = ''
for (Key, Value) in ValueList:
if Key == TAB_LANGUAGE_EN_X:
if UseFirstValue:
if not Value1:
Value1 = Value
else:
Value1 = Value
if Key == TAB_LANGUAGE_EN_US:
if UseFirstValue:
if not Value2:
Value2 = Value
else:
Value2 = Value
if Key == TAB_LANGUAGE_EN:
if UseFirstValue:
if not Value3:
Value3 = Value
else:
Value3 = Value
if Key.startswith(TAB_LANGUAGE_EN):
if UseFirstValue:
if not Value4:
Value4 = Value
else:
Value4 = Value
if Key == '':
if UseFirstValue:
if not Value5:
Value5 = Value
else:
Value5 = Value
if Value1:
return Value1
if Value2:
return Value2
if Value3:
return Value3
if Value4:
return Value4
if Value5:
return Value5
return ''
## GetCharIndexOutStr
#
# Get comment character index outside a string
#
# @param Line: The string to be checked
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval Index
#
def GetCharIndexOutStr(CommentCharacter, Line):
#
# remove whitespace
#
Line = Line.strip()
#
# Check whether comment character is in a string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and InString :
pass
elif Line[Index] == CommentCharacter and (Index +1) < len(Line) and Line[Index+1] == CommentCharacter \
and not InString :
return Index
return -1
## ValidateUNIFilePath
#
# Check the UNI file path
#
# @param FilePath: The UNI file path
#
def ValidateUNIFilePath(Path):
Suffix = Path[Path.rfind(TAB_SPLIT):]
#
# Check if the suffix is one of the '.uni', '.UNI', '.Uni'
#
if Suffix not in TAB_UNI_FILE_SUFFIXS:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_SUFFIX_WRONG,
ExtraData=Path)
#
# Check if '..' in the file name(without suffix)
#
if (TAB_SPLIT + TAB_SPLIT) in Path:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_NAME_INVALID,
ExtraData=Path)
#
# Check if the file name is valid according to the DEC and INF specification
#
Pattern = '[a-zA-Z0-9_][a-zA-Z0-9_\-\.]*'
FileName = Path.replace(Suffix, '')
InvalidCh = re.sub(Pattern, '', FileName)
if InvalidCh:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID,
ExtraData=Path)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/Misc.py
|
## @file
# Collect all defined strings in multiple uni files.
#
# Copyright (c) 2014 - 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
"""
Collect all defined strings in multiple uni files
"""
from __future__ import print_function
##
# Import Modules
#
import os, codecs, re
import shlex
from Logger import ToolError
from Logger import Log as EdkLogger
from Logger import StringTable as ST
from Library.StringUtils import GetLineNo
from Library.Misc import PathClass
from Library.Misc import GetCharIndexOutStr
from Library import DataType as DT
from Library.ParserValidate import CheckUTF16FileHeader
##
# Static definitions
#
UNICODE_WIDE_CHAR = u'\\wide'
UNICODE_NARROW_CHAR = u'\\narrow'
UNICODE_NON_BREAKING_CHAR = u'\\nbr'
UNICODE_UNICODE_CR = '\r'
UNICODE_UNICODE_LF = '\n'
NARROW_CHAR = u'\uFFF0'
WIDE_CHAR = u'\uFFF1'
NON_BREAKING_CHAR = u'\uFFF2'
CR = u'\u000D'
LF = u'\u000A'
NULL = u'\u0000'
TAB = u'\t'
BACK_SPLASH = u'\\'
gLANG_CONV_TABLE = {'eng':'en', 'fra':'fr', \
'aar':'aa', 'abk':'ab', 'ave':'ae', 'afr':'af', 'aka':'ak', 'amh':'am', \
'arg':'an', 'ara':'ar', 'asm':'as', 'ava':'av', 'aym':'ay', 'aze':'az', \
'bak':'ba', 'bel':'be', 'bul':'bg', 'bih':'bh', 'bis':'bi', 'bam':'bm', \
'ben':'bn', 'bod':'bo', 'bre':'br', 'bos':'bs', 'cat':'ca', 'che':'ce', \
'cha':'ch', 'cos':'co', 'cre':'cr', 'ces':'cs', 'chu':'cu', 'chv':'cv', \
'cym':'cy', 'dan':'da', 'deu':'de', 'div':'dv', 'dzo':'dz', 'ewe':'ee', \
'ell':'el', 'epo':'eo', 'spa':'es', 'est':'et', 'eus':'eu', 'fas':'fa', \
'ful':'ff', 'fin':'fi', 'fij':'fj', 'fao':'fo', 'fry':'fy', 'gle':'ga', \
'gla':'gd', 'glg':'gl', 'grn':'gn', 'guj':'gu', 'glv':'gv', 'hau':'ha', \
'heb':'he', 'hin':'hi', 'hmo':'ho', 'hrv':'hr', 'hat':'ht', 'hun':'hu', \
'hye':'hy', 'her':'hz', 'ina':'ia', 'ind':'id', 'ile':'ie', 'ibo':'ig', \
'iii':'ii', 'ipk':'ik', 'ido':'io', 'isl':'is', 'ita':'it', 'iku':'iu', \
'jpn':'ja', 'jav':'jv', 'kat':'ka', 'kon':'kg', 'kik':'ki', 'kua':'kj', \
'kaz':'kk', 'kal':'kl', 'khm':'km', 'kan':'kn', 'kor':'ko', 'kau':'kr', \
'kas':'ks', 'kur':'ku', 'kom':'kv', 'cor':'kw', 'kir':'ky', 'lat':'la', \
'ltz':'lb', 'lug':'lg', 'lim':'li', 'lin':'ln', 'lao':'lo', 'lit':'lt', \
'lub':'lu', 'lav':'lv', 'mlg':'mg', 'mah':'mh', 'mri':'mi', 'mkd':'mk', \
'mal':'ml', 'mon':'mn', 'mar':'mr', 'msa':'ms', 'mlt':'mt', 'mya':'my', \
'nau':'na', 'nob':'nb', 'nde':'nd', 'nep':'ne', 'ndo':'ng', 'nld':'nl', \
'nno':'nn', 'nor':'no', 'nbl':'nr', 'nav':'nv', 'nya':'ny', 'oci':'oc', \
'oji':'oj', 'orm':'om', 'ori':'or', 'oss':'os', 'pan':'pa', 'pli':'pi', \
'pol':'pl', 'pus':'ps', 'por':'pt', 'que':'qu', 'roh':'rm', 'run':'rn', \
'ron':'ro', 'rus':'ru', 'kin':'rw', 'san':'sa', 'srd':'sc', 'snd':'sd', \
'sme':'se', 'sag':'sg', 'sin':'si', 'slk':'sk', 'slv':'sl', 'smo':'sm', \
'sna':'sn', 'som':'so', 'sqi':'sq', 'srp':'sr', 'ssw':'ss', 'sot':'st', \
'sun':'su', 'swe':'sv', 'swa':'sw', 'tam':'ta', 'tel':'te', 'tgk':'tg', \
'tha':'th', 'tir':'ti', 'tuk':'tk', 'tgl':'tl', 'tsn':'tn', 'ton':'to', \
'tur':'tr', 'tso':'ts', 'tat':'tt', 'twi':'tw', 'tah':'ty', 'uig':'ug', \
'ukr':'uk', 'urd':'ur', 'uzb':'uz', 'ven':'ve', 'vie':'vi', 'vol':'vo', \
'wln':'wa', 'wol':'wo', 'xho':'xh', 'yid':'yi', 'yor':'yo', 'zha':'za', \
'zho':'zh', 'zul':'zu'}
## Convert a python unicode string to a normal string
#
# Convert a python unicode string to a normal string
# UniToStr(u'I am a string') is 'I am a string'
#
# @param Uni: The python unicode string
#
# @retval: The formatted normal string
#
def UniToStr(Uni):
return repr(Uni)[2:-1]
## Convert a unicode string to a Hex list
#
# Convert a unicode string to a Hex list
# UniToHexList('ABC') is ['0x41', '0x00', '0x42', '0x00', '0x43', '0x00']
#
# @param Uni: The python unicode string
#
# @retval List: The formatted hex list
#
def UniToHexList(Uni):
List = []
for Item in Uni:
Temp = '%04X' % ord(Item)
List.append('0x' + Temp[2:4])
List.append('0x' + Temp[0:2])
return List
## Convert special unicode characters
#
# Convert special characters to (c), (r) and (tm).
#
# @param Uni: The python unicode string
#
# @retval NewUni: The converted unicode string
#
def ConvertSpecialUnicodes(Uni):
OldUni = NewUni = Uni
NewUni = NewUni.replace(u'\u00A9', '(c)')
NewUni = NewUni.replace(u'\u00AE', '(r)')
NewUni = NewUni.replace(u'\u2122', '(tm)')
if OldUni == NewUni:
NewUni = OldUni
return NewUni
## GetLanguageCode1766
#
# Check the language code read from .UNI file and convert RFC 4646 codes to RFC 1766 codes
# RFC 1766 language codes supported in compatibility mode
# RFC 4646 language codes supported in native mode
#
# @param LangName: Language codes read from .UNI file
#
# @retval LangName: Valid language code in RFC 1766 format or None
#
def GetLanguageCode1766(LangName, File=None):
return LangName
length = len(LangName)
if length == 2:
if LangName.isalpha():
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName.lower():
return Key
elif length == 3:
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()):
return LangName
else:
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
File)
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
return Key
elif length >= 6:
if LangName[0:2].isalpha() and LangName[2] == '-':
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
return Key
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
for Key in gLANG_CONV_TABLE.keys():
if Key == LangName[0:3].lower():
return Key
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## GetLanguageCode
#
# Check the language code read from .UNI file and convert RFC 1766 codes to RFC 4646 codes if appropriate
# RFC 1766 language codes supported in compatibility mode
# RFC 4646 language codes supported in native mode
#
# @param LangName: Language codes read from .UNI file
#
# @retval LangName: Valid lanugage code in RFC 4646 format or None
#
def GetLanguageCode(LangName, IsCompatibleMode, File):
length = len(LangName)
if IsCompatibleMode:
if length == 3 and LangName.isalpha():
TempLangName = gLANG_CONV_TABLE.get(LangName.lower())
if TempLangName is not None:
return TempLangName
return LangName
else:
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
File)
if (LangName[0] == 'X' or LangName[0] == 'x') and LangName[1] == '-':
return LangName
if length == 2:
if LangName.isalpha():
return LangName
elif length == 3:
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None:
return LangName
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
return LangName
elif length >= 6:
if LangName[0:2].isalpha() and LangName[2] == '-':
return LangName
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
return LangName
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## FormatUniEntry
#
# Formatted the entry in Uni file.
#
# @param StrTokenName StrTokenName.
# @param TokenValueList A list need to be processed.
# @param ContainerFile ContainerFile.
#
# @return formatted entry
def FormatUniEntry(StrTokenName, TokenValueList, ContainerFile):
SubContent = ''
PreFormatLength = 40
if len(StrTokenName) > PreFormatLength:
PreFormatLength = len(StrTokenName) + 1
for (Lang, Value) in TokenValueList:
if not Value or Lang == DT.TAB_LANGUAGE_EN_X:
continue
if Lang == '':
Lang = DT.TAB_LANGUAGE_EN_US
if Lang == 'eng':
Lang = DT.TAB_LANGUAGE_EN_US
elif len(Lang.split('-')[0]) == 3:
Lang = GetLanguageCode(Lang.split('-')[0], True, ContainerFile)
else:
Lang = GetLanguageCode(Lang, False, ContainerFile)
ValueList = Value.split('\n')
SubValueContent = ''
for SubValue in ValueList:
if SubValue.strip():
SubValueContent += \
' ' * (PreFormatLength + len('#language en-US ')) + '\"%s\\n\"' % SubValue.strip() + '\r\n'
SubValueContent = SubValueContent[(PreFormatLength + len('#language en-US ')):SubValueContent.rfind('\\n')] \
+ '\"' + '\r\n'
SubContent += ' '*PreFormatLength + '#language %-5s ' % Lang + SubValueContent
if SubContent:
SubContent = StrTokenName + ' '*(PreFormatLength - len(StrTokenName)) + SubContent[PreFormatLength:]
return SubContent
## StringDefClassObject
#
# A structure for language definition
#
class StringDefClassObject(object):
def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''):
self.StringName = ''
self.StringNameByteList = []
self.StringValue = ''
self.StringValueByteList = ''
self.Token = 0
self.Referenced = Referenced
self.UseOtherLangDef = UseOtherLangDef
self.Length = 0
if Name is not None:
self.StringName = Name
self.StringNameByteList = UniToHexList(Name)
if Value is not None:
self.StringValue = Value
self.StringValueByteList = UniToHexList(self.StringValue)
self.Length = len(self.StringValueByteList)
if Token is not None:
self.Token = Token
def __str__(self):
return repr(self.StringName) + ' ' + \
repr(self.Token) + ' ' + \
repr(self.Referenced) + ' ' + \
repr(self.StringValue) + ' ' + \
repr(self.UseOtherLangDef)
def UpdateValue(self, Value = None):
if Value is not None:
if self.StringValue:
self.StringValue = self.StringValue + '\r\n' + Value
else:
self.StringValue = Value
self.StringValueByteList = UniToHexList(self.StringValue)
self.Length = len(self.StringValueByteList)
## UniFileClassObject
#
# A structure for .uni file definition
#
class UniFileClassObject(object):
def __init__(self, FileList = None, IsCompatibleMode = False, IncludePathList = None):
self.FileList = FileList
self.File = None
self.IncFileList = FileList
self.UniFileHeader = ''
self.Token = 2
self.LanguageDef = [] #[ [u'LanguageIdentifier', u'PrintableName'], ... ]
self.OrderedStringList = {} #{ u'LanguageIdentifier' : [StringDefClassObject] }
self.OrderedStringDict = {} #{ u'LanguageIdentifier' : {StringName:(IndexInList)} }
self.OrderedStringListByToken = {} #{ u'LanguageIdentifier' : {Token: StringDefClassObject} }
self.IsCompatibleMode = IsCompatibleMode
if not IncludePathList:
self.IncludePathList = []
else:
self.IncludePathList = IncludePathList
if len(self.FileList) > 0:
self.LoadUniFiles(FileList)
#
# Get Language definition
#
def GetLangDef(self, File, Line):
Lang = shlex.split(Line.split(u"//")[0])
if len(Lang) != 3:
try:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_OPEN_FAILURE,
"File read failure: %s" % str(Xstr),
ExtraData=File)
LineNo = GetLineNo(FileIn, Line, False)
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
"Wrong language definition",
ExtraData="""%s\n\t*Correct format is like '#langdef en-US "English"'""" % Line,
File = File, Line = LineNo)
else:
LangName = GetLanguageCode(Lang[1], self.IsCompatibleMode, self.File)
LangPrintName = Lang[2]
IsLangInDef = False
for Item in self.LanguageDef:
if Item[0] == LangName:
IsLangInDef = True
break
if not IsLangInDef:
self.LanguageDef.append([LangName, LangPrintName])
#
# Add language string
#
self.AddStringToList(u'$LANGUAGE_NAME', LangName, LangName, 0, True, Index=0)
self.AddStringToList(u'$PRINTABLE_LANGUAGE_NAME', LangName, LangPrintName, 1, True, Index=1)
if not IsLangInDef:
#
# The found STRING tokens will be added into new language string list
# so that the unique STRING identifier is reserved for all languages in the package list.
#
FirstLangName = self.LanguageDef[0][0]
if LangName != FirstLangName:
for Index in range (2, len (self.OrderedStringList[FirstLangName])):
Item = self.OrderedStringList[FirstLangName][Index]
if Item.UseOtherLangDef != '':
OtherLang = Item.UseOtherLangDef
else:
OtherLang = FirstLangName
self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName,
'',
Item.Referenced,
Item.Token,
OtherLang))
self.OrderedStringDict[LangName][Item.StringName] = len(self.OrderedStringList[LangName]) - 1
return True
#
# Get String name and value
#
def GetStringObject(self, Item):
Language = ''
Value = ''
Name = Item.split()[1]
# Check the string name is the upper character
if Name != '':
MatchString = re.match('[A-Z0-9_]+', Name, re.UNICODE)
if MatchString is None or MatchString.end(0) != len(Name):
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
'The string token name %s in UNI file %s must be upper case character.' %(Name, self.File))
LanguageList = Item.split(u'#language ')
for IndexI in range(len(LanguageList)):
if IndexI == 0:
continue
else:
Language = LanguageList[IndexI].split()[0]
#.replace(u'\r\n', u'')
Value = \
LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
#
# Get include file list and load them
#
def GetIncludeFile(self, Item, Dir = None):
if Dir:
pass
FileName = Item[Item.find(u'!include ') + len(u'!include ') :Item.find(u' ', len(u'!include '))][1:-1]
self.LoadUniFile(FileName)
#
# Pre-process before parse .uni file
#
def PreProcess(self, File, IsIncludeFile=False):
if not os.path.exists(File.Path) or not os.path.isfile(File.Path):
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=File.Path)
#
# Check file header of the Uni file
#
# if not CheckUTF16FileHeader(File.Path):
# EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
# ExtraData='The file %s is either invalid UTF-16LE or it is missing the BOM.' % File.Path)
try:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
except UnicodeError:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=File.Path)
#
# get the file header
#
Lines = []
HeaderStart = False
HeaderEnd = False
if not self.UniFileHeader:
FirstGenHeader = True
else:
FirstGenHeader = False
for Line in FileIn:
Line = Line.strip()
if Line == u'':
continue
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and (Line.find(DT.TAB_HEADER_COMMENT) > -1) \
and not HeaderEnd and not HeaderStart:
HeaderStart = True
if not Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd:
HeaderEnd = True
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd and FirstGenHeader:
self.UniFileHeader += Line + '\r\n'
continue
#
# Use unique identifier
#
FindFlag = -1
LineCount = 0
MultiLineFeedExits = False
#
# 0: initial value
# 1: single String entry exist
# 2: line feed exist under the some single String entry
#
StringEntryExistsFlag = 0
for Line in FileIn:
Line = FileIn[LineCount]
LineCount += 1
Line = Line.strip()
#
# Ignore comment line and empty line
#
if Line == u'' or Line.startswith(u'//'):
#
# Change the single line String entry flag status
#
if StringEntryExistsFlag == 1:
StringEntryExistsFlag = 2
#
# If the '#string' line and the '#language' line are not in the same line,
# there should be only one line feed character between them
#
if MultiLineFeedExits:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
continue
MultiLineFeedExits = False
#
# Process comment embedded in string define lines
#
FindFlag = Line.find(u'//')
if FindFlag != -1 and Line.find(u'//') < Line.find(u'"'):
Line = Line.replace(Line[FindFlag:], u' ')
if FileIn[LineCount].strip().startswith('#language'):
Line = Line + FileIn[LineCount]
FileIn[LineCount-1] = Line
FileIn[LineCount] = '\r\n'
LineCount -= 1
for Index in range (LineCount + 1, len (FileIn) - 1):
if (Index == len(FileIn) -1):
FileIn[Index] = '\r\n'
else:
FileIn[Index] = FileIn[Index + 1]
continue
CommIndex = GetCharIndexOutStr(u'/', Line)
if CommIndex > -1:
if (len(Line) - 1) > CommIndex:
if Line[CommIndex+1] == u'/':
Line = Line[:CommIndex].strip()
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
Line = Line.replace(UNICODE_WIDE_CHAR, WIDE_CHAR)
Line = Line.replace(UNICODE_NARROW_CHAR, NARROW_CHAR)
Line = Line.replace(UNICODE_NON_BREAKING_CHAR, NON_BREAKING_CHAR)
Line = Line.replace(u'\\\\', u'\u0006')
Line = Line.replace(u'\\r\\n', CR + LF)
Line = Line.replace(u'\\n', CR + LF)
Line = Line.replace(u'\\r', CR)
Line = Line.replace(u'\\t', u'\t')
Line = Line.replace(u'''\"''', u'''"''')
Line = Line.replace(u'\t', u' ')
Line = Line.replace(u'\u0006', u'\\')
#
# Check if single line has correct '"'
#
if Line.startswith(u'#string') and Line.find(u'#language') > -1 and Line.find('"') > Line.find(u'#language'):
if not Line.endswith('"'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Between Name entry and Language entry can not contain line feed
#
if Line.startswith(u'#string') and Line.find(u'#language') == -1:
MultiLineFeedExits = True
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.find(u'"') < 0:
MultiLineFeedExits = True
#
# Between Language entry and String entry can not contain line feed
#
if Line.startswith(u'#language') and len(Line.split()) == 2:
MultiLineFeedExits = True
#
# Check the situation that there only has one '"' for the language entry
#
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.count(u'"') == 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Check the situation that there has more than 2 '"' for the language entry
#
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.replace(u'\\"', '').count(u'"') > 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s has more than 2 '"' for language entry in file %s'''
% (LineCount, File.Path))
#
# Between two String entry, can not contain line feed
#
if Line.startswith(u'"'):
if StringEntryExistsFlag == 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_LINEFEED_UP_EXIST % Line, ExtraData=File.Path)
StringEntryExistsFlag = 1
if not Line.endswith('"'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Check the situation that there has more than 2 '"' for the language entry
#
if Line.strip() and Line.replace(u'\\"', '').count(u'"') > 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s has more than 2 '"' for language entry in file %s'''
% (LineCount, File.Path))
elif Line.startswith(u'#language'):
if StringEntryExistsFlag == 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_MISS_STRING_ENTRY % Line, ExtraData=File.Path)
StringEntryExistsFlag = 0
else:
StringEntryExistsFlag = 0
Lines.append(Line)
#
# Convert string def format as below
#
# #string MY_STRING_1
# #language eng
# "My first English string line 1"
# "My first English string line 2"
# #string MY_STRING_1
# #language spa
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if not IsIncludeFile and not Lines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_NO_SECTION_EXIST, \
ExtraData=File.Path)
NewLines = []
StrName = u''
ExistStrNameList = []
for Line in Lines:
if StrName and not StrName.split()[1].startswith(DT.TAB_STR_TOKENCNAME + DT.TAB_UNDERLINE_SPLIT):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and len(StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)) == 4:
StringTokenList = StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)
if (StringTokenList[3].upper() in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP] and \
StringTokenList[3] not in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP]) or \
(StringTokenList[2].upper() == DT.TAB_STR_TOKENERR and StringTokenList[2] != DT.TAB_STR_TOKENERR):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if Line.count(u'#language') > 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_SEP_LANGENTRY_LINE % Line, \
ExtraData=File.Path)
if Line.startswith(u'//'):
continue
elif Line.startswith(u'#langdef'):
if len(Line.split()) == 2:
NewLines.append(Line)
continue
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
NewLines.append(Line[:Line.find(u'"')].strip())
NewLines.append(Line[Line.find(u'"'):])
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'#string'):
if len(Line.split()) == 2:
StrName = Line
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
continue
elif len(Line.split()) == 4 and Line.find(u'#language') > 0:
if Line[Line.find(u'#language')-1] != ' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line.find(u'"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):]).strip())
elif len(Line.split()) > 4 and Line.find(u'#language') > 0 and Line.find(u'"') > 0:
if Line[Line.find(u'#language')-1] != u' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line[Line.find(u'"')-1] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'#language'):
if len(Line.split()) == 2:
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append(StrName)
else:
NewLines.append(StrName)
NewLines.append(Line)
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append(StrName)
else:
NewLines.append(StrName)
NewLines.append((Line[:Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'"'):
if u'#string' in Line or u'#language' in Line:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
NewLines.append(Line)
else:
print(Line)
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if StrName and not StrName.split()[1].startswith(u'STR_'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and not NewLines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNI_MISS_LANGENTRY % StrName, \
ExtraData=File.Path)
#
# Check Abstract, Description, BinaryAbstract and BinaryDescription order,
# should be Abstract, Description, BinaryAbstract, BinaryDescription
AbstractPosition = -1
DescriptionPosition = -1
BinaryAbstractPosition = -1
BinaryDescriptionPosition = -1
for StrName in ExistStrNameList:
if DT.TAB_HEADER_ABSTRACT.upper() in StrName:
if 'BINARY' in StrName:
BinaryAbstractPosition = ExistStrNameList.index(StrName)
else:
AbstractPosition = ExistStrNameList.index(StrName)
if DT.TAB_HEADER_DESCRIPTION.upper() in StrName:
if 'BINARY' in StrName:
BinaryDescriptionPosition = ExistStrNameList.index(StrName)
else:
DescriptionPosition = ExistStrNameList.index(StrName)
OrderList = sorted([AbstractPosition, DescriptionPosition])
BinaryOrderList = sorted([BinaryAbstractPosition, BinaryDescriptionPosition])
Min = OrderList[0]
Max = OrderList[1]
BinaryMin = BinaryOrderList[0]
BinaryMax = BinaryOrderList[1]
if BinaryDescriptionPosition > -1:
if not(BinaryDescriptionPosition == BinaryMax and BinaryAbstractPosition == BinaryMin and \
BinaryMax > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
elif BinaryAbstractPosition > -1:
if not(BinaryAbstractPosition > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
if DescriptionPosition > -1:
if not(DescriptionPosition == Max and AbstractPosition == Min and \
DescriptionPosition > AbstractPosition):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
if not self.UniFileHeader:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message = ST.ERR_NO_SOURCE_HEADER,
ExtraData=File.Path)
return NewLines
#
# Load a .uni file
#
def LoadUniFile(self, File = None):
if File is None:
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
Message='No unicode file is given',
ExtraData=File.Path)
self.File = File
#
# Process special char in file
#
Lines = self.PreProcess(File)
#
# Get Unicode Information
#
for IndexI in range(len(Lines)):
Line = Lines[IndexI]
if (IndexI + 1) < len(Lines):
SecondLine = Lines[IndexI + 1]
if (IndexI + 2) < len(Lines):
ThirdLine = Lines[IndexI + 2]
#
# Get Language def information
#
if Line.find(u'#langdef ') >= 0:
self.GetLangDef(File, Line + u' ' + SecondLine)
continue
Name = ''
Language = ''
Value = ''
CombineToken = False
#
# Get string def information format as below
#
# #string MY_STRING_1
# #language eng
# "My first English string line 1"
# "My first English string line 2"
# #string MY_STRING_1
# #language spa
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if Line.find(u'#string ') >= 0 and Line.find(u'#language ') < 0 and \
SecondLine.find(u'#string ') < 0 and SecondLine.find(u'#language ') >= 0 and \
ThirdLine.find(u'#string ') < 0 and ThirdLine.find(u'#language ') < 0:
if Line.find('"') > 0 or SecondLine.find('"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_DBLQUOTE_UNMATCHED,
ExtraData=File.Path)
Name = Line[Line.find(u'#string ') + len(u'#string ') : ].strip(' ')
Language = SecondLine[SecondLine.find(u'#language ') + len(u'#language ') : ].strip(' ')
for IndexJ in range(IndexI + 2, len(Lines)):
if Lines[IndexJ].find(u'#string ') < 0 and Lines[IndexJ].find(u'#language ') < 0 and \
Lines[IndexJ].strip().startswith(u'"') and Lines[IndexJ].strip().endswith(u'"'):
if Lines[IndexJ][-2] == ' ':
CombineToken = True
if CombineToken:
if Lines[IndexJ].strip()[1:-1].strip():
Value = Value + Lines[IndexJ].strip()[1:-1].rstrip() + ' '
else:
Value = Value + Lines[IndexJ].strip()[1:-1]
CombineToken = False
else:
Value = Value + Lines[IndexJ].strip()[1:-1] + '\r\n'
else:
IndexI = IndexJ
break
if Value.endswith('\r\n'):
Value = Value[: Value.rfind('\r\n')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
continue
#
# Load multiple .uni files
#
def LoadUniFiles(self, FileList):
if len(FileList) > 0:
for File in FileList:
FilePath = File.Path.strip()
if FilePath.endswith('.uni') or FilePath.endswith('.UNI') or FilePath.endswith('.Uni'):
self.LoadUniFile(File)
#
# Add a string to list
#
def AddStringToList(self, Name, Language, Value, Token = 0, Referenced = False, UseOtherLangDef = '', Index = -1):
for LangNameItem in self.LanguageDef:
if Language == LangNameItem[0]:
break
if Language not in self.OrderedStringList:
self.OrderedStringList[Language] = []
self.OrderedStringDict[Language] = {}
IsAdded = True
if Name in self.OrderedStringDict[Language]:
IsAdded = False
if Value is not None:
ItemIndexInList = self.OrderedStringDict[Language][Name]
Item = self.OrderedStringList[Language][ItemIndexInList]
Item.UpdateValue(Value)
Item.UseOtherLangDef = ''
if IsAdded:
Token = len(self.OrderedStringList[Language])
if Index == -1:
self.OrderedStringList[Language].append(StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Token
for LangName in self.LanguageDef:
#
# New STRING token will be added into all language string lists.
# so that the unique STRING identifier is reserved for all languages in the package list.
#
if LangName[0] != Language:
if UseOtherLangDef != '':
OtherLangDef = UseOtherLangDef
else:
OtherLangDef = Language
self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name,
'',
Referenced,
Token,
OtherLangDef))
self.OrderedStringDict[LangName[0]][Name] = len(self.OrderedStringList[LangName[0]]) - 1
else:
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Index
#
# Set the string as referenced
#
def SetStringReferenced(self, Name):
#
# String stoken are added in the same order in all language string lists.
# So, only update the status of string stoken in first language string list.
#
Lang = self.LanguageDef[0][0]
if Name in self.OrderedStringDict[Lang]:
ItemIndexInList = self.OrderedStringDict[Lang][Name]
Item = self.OrderedStringList[Lang][ItemIndexInList]
Item.Referenced = True
#
# Search the string in language definition by Name
#
def FindStringValue(self, Name, Lang):
if Name in self.OrderedStringDict[Lang]:
ItemIndexInList = self.OrderedStringDict[Lang][Name]
return self.OrderedStringList[Lang][ItemIndexInList]
return None
#
# Search the string in language definition by Token
#
def FindByToken(self, Token, Lang):
for Item in self.OrderedStringList[Lang]:
if Item.Token == Token:
return Item
return None
#
# Re-order strings and re-generate tokens
#
def ReToken(self):
if len(self.LanguageDef) == 0:
return None
#
# Retoken all language strings according to the status of string stoken in the first language string.
#
FirstLangName = self.LanguageDef[0][0]
# Convert the OrderedStringList to be OrderedStringListByToken in order to faciliate future search by token
for LangNameItem in self.LanguageDef:
self.OrderedStringListByToken[LangNameItem[0]] = {}
#
# Use small token for all referred string stoken.
#
RefToken = 0
for Index in range (0, len (self.OrderedStringList[FirstLangName])):
FirstLangItem = self.OrderedStringList[FirstLangName][Index]
if FirstLangItem.Referenced == True:
for LangNameItem in self.LanguageDef:
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Referenced = True
OtherLangItem.Token = RefToken
self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
RefToken = RefToken + 1
#
# Use big token for all unreferred string stoken.
#
UnRefToken = 0
for Index in range (0, len (self.OrderedStringList[FirstLangName])):
FirstLangItem = self.OrderedStringList[FirstLangName][Index]
if FirstLangItem.Referenced == False:
for LangNameItem in self.LanguageDef:
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Token = RefToken + UnRefToken
self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
UnRefToken = UnRefToken + 1
#
# Show the instance itself
#
def ShowMe(self):
print(self.LanguageDef)
#print self.OrderedStringList
for Item in self.OrderedStringList:
print(Item)
for Member in self.OrderedStringList[Item]:
print(str(Member))
#
# Read content from '!include' UNI file
#
def ReadIncludeUNIfile(self, FilaPath):
if self.File:
pass
if not os.path.exists(FilaPath) or not os.path.isfile(FilaPath):
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=FilaPath)
try:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16').readlines()
except UnicodeError:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=FilaPath)
return FileIn
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/UniClassObject.py
|
## @file
# This file is used to define common static strings and global data used by UPT
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
GlobalData
'''
#
# The workspace directory
#
gWORKSPACE = '.'
gPACKAGE_PATH = None
#
# INF module directory
#
gINF_MODULE_DIR = "."
gINF_MODULE_NAME = ''
#
# the directory to holds upt related files
#
gUPT_DIR = r"Conf/upt/"
#
# Log file for invalid meta-data files during force removing
#
gINVALID_MODULE_FILE = gUPT_DIR + r"Invalid_Modules.log"
#
# File name for content zip file in the distribution
#
gCONTENT_FILE = "dist.content"
#
# File name for XML file in the distribution
#
gDESC_FILE = 'dist.pkg'
#
# Case Insensitive flag
#
gCASE_INSENSITIVE = ''
#
# All Files dictionary
#
gALL_FILES = {}
#
# Database instance
#
gDB = None
#
# list for files that are found in module level but not in INF files,
# items are (File, ModulePath), all these should be relative to $(WORKSPACE)
#
gMISS_FILE_IN_MODLIST = []
#
# Global Current Line
#
gINF_CURRENT_LINE = None
#
# Global pkg list
#
gWSPKG_LIST = []
#
# Flag used to take WARN as ERROR.
# By default, only ERROR message will break the tools execution.
#
gWARNING_AS_ERROR = False
#
# Used to specify the temp directory to hold the unpacked distribution files
#
gUNPACK_DIR = []
#
# Flag used to mark whether the INF file is Binary INF or not.
#
gIS_BINARY_INF = False
#
# Used by FileHook module.
#
gRECOVERMGR = None
#
# Used by PCD parser
#
gPackageDict = {}
#
# Used by Library instance parser
# {FilePath: FileObj}
#
gLIBINSTANCEDICT = {}
#
# Store the list of DIST
#
gTO_BE_INSTALLED_DIST_LIST = []
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/GlobalData.py
|
## @file
# This file is used to define common parsing related functions used in parsing
# INF/DEC/DSC process
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Parsing
'''
from __future__ import absolute_import
##
# Import Modules
#
import os.path
import re
from Library.StringUtils import RaiseParserError
from Library.StringUtils import GetSplitValueList
from Library.StringUtils import CheckFileType
from Library.StringUtils import CheckFileExist
from Library.StringUtils import CleanString
from Library.StringUtils import NormPath
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import FatalError
from Logger.ToolError import FORMAT_INVALID
from Library import DataType
from Library.Misc import GuidStructureStringToGuidString
from Library.Misc import CheckGuidRegFormat
from Logger import StringTable as ST
import Logger.Log as Logger
from Parser.DecParser import Dec
from . import GlobalData
gPKG_INFO_DICT = {}
## GetBuildOption
#
# Parse a string with format "[<Family>:]<ToolFlag>=Flag"
# Return (Family, ToolFlag, Flag)
#
# @param String: String with BuildOption statement
# @param File: The file which defines build option, used in error report
#
def GetBuildOption(String, File, LineNo= -1):
(Family, ToolChain, Flag) = ('', '', '')
if String.find(DataType.TAB_EQUAL_SPLIT) < 0:
RaiseParserError(String, 'BuildOptions', File, \
'[<Family>:]<ToolFlag>=Flag', LineNo)
else:
List = GetSplitValueList(String, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
if List[0].find(':') > -1:
Family = List[0][ : List[0].find(':')].strip()
ToolChain = List[0][List[0].find(':') + 1 : ].strip()
else:
ToolChain = List[0].strip()
Flag = List[1].strip()
return (Family, ToolChain, Flag)
## Get Library Class
#
# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
List = GetSplitValueList(Item[0])
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(List) != 2:
RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, \
'<LibraryClassKeyWord>|<LibraryInstance>')
else:
CheckFileType(List[1], '.Inf', ContainerFile, \
'library class instance', Item[0], LineNo)
CheckFileExist(WorkspaceDir, List[1], ContainerFile, \
'LibraryClasses', Item[0], LineNo)
if Item[1] != '':
SupMod = Item[1]
return (List[0], List[1], SupMod)
## Get Library Class
#
# Get Library of Dsc as <LibraryClassKeyWord>[|<LibraryInstance>]
# [|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1):
ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(ItemList) > 5:
RaiseParserError\
(Item[0], 'LibraryClasses', ContainerFile, \
'<LibraryClassKeyWord>[|<LibraryInstance>]\
[|<TokenSpaceGuidCName>.<PcdCName>]')
else:
CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', \
Item[0], LineNo)
CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, \
'LibraryClasses', Item[0], LineNo)
if ItemList[2] != '':
CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', \
ContainerFile, LineNo)
if Item[1] != '':
SupMod = Item[1]
return (ItemList[0], ItemList[1], ItemList[2], SupMod)
## CheckPcdTokenInfo
#
# Check if PcdTokenInfo is following <TokenSpaceGuidCName>.<PcdCName>
#
# @param TokenInfoString: String to be checked
# @param Section: Used for error report
# @param File: Used for error report
#
def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>'
if TokenInfoString != '' and TokenInfoString is not None:
TokenInfoList = GetSplitValueList(TokenInfoString, DataType.TAB_SPLIT)
if len(TokenInfoList) == 2:
return True
RaiseParserError(TokenInfoString, Section, File, Format, LineNo)
## Get Pcd
#
# Get Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>
# [|<Type>|<MaximumDatumSize>]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <Value>[|<Type>|<MaximumDatumSize>]
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 6:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
[|<Type>|<MaximumDatumSize>]', LineNo)
else:
Value = List[1]
MaximumDatumSize = List[2]
Token = List[3]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type)
## Get FeatureFlagPcd
#
# Get FeatureFlagPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
#
# @param Item: String as <PcdTokenSpaceGuidCName>
# .<TokenCName>|TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value = '', '', ''
List = GetSplitValueList(Item)
if len(List) != 2:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE', \
LineNo)
else:
Value = List[1]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, Type)
## Get DynamicDefaultPcd
#
# Get DynamicDefaultPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>
# |<Value>[|<DatumTyp>[|<MaxDatumSize>]]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 8:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
[|<DatumTyp>[|<MaxDatumSize>]]', LineNo)
else:
Value = List[1]
DatumTyp = List[2]
MaxDatumSize = List[3]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type)
## Get DynamicHiiPcd
#
# Get DynamicHiiPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|
# <VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2, List3, List4, List5 = \
'', '', '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 6 or len(List) > 8:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<String>|\
<VariableGuidCName>|<VariableOffset>[|<DefaultValue>\
[|<MaximumDatumSize>]]', LineNo)
else:
List1, List2, List3, List4, List5 = \
List[1], List[2], List[3], List[4], List[5]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, List1, List2, List3, List4, List5, Type)
## Get DynamicVpdPcd
#
# Get DynamicVpdPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <VpdOffset>[|<MaximumDatumSize>]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>
# |TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2 = '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
if len(List) < 3 or len(List) > 4:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>\
[|<MaximumDatumSize>]', LineNo)
else:
List1, List2 = List[1], List[2]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, List1, List2, Type)
## GetComponent
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
# @param KeyValues: To store data after parsing
#
def GetComponent(Lines, KeyValues):
(FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
FindPcdsDynamicEx) = (False, False, False, False, False, False, False, \
False)
ListItem = None
LibraryClassItem = []
BuildOption = []
Pcd = []
for Line in Lines:
Line = Line[0]
#
# Ignore !include statement
#
if Line.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1 or \
Line.upper().find(DataType.TAB_DEFINE + ' ') > -1:
continue
if FindBlock == False:
ListItem = Line
#
# find '{' at line tail
#
if Line.endswith('{'):
FindBlock = True
ListItem = CleanString(Line.rsplit('{', 1)[0], \
DataType.TAB_COMMENT_SPLIT)
#
# Parse a block content
#
if FindBlock:
if Line.find('<LibraryClasses>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(True, False, False, False, False, False, False)
continue
if Line.find('<BuildOptions>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, True, False, False, False, False, False)
continue
if Line.find('<PcdsFeatureFlag>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, True, False, False, False, False)
continue
if Line.find('<PcdsPatchableInModule>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, True, False, False, False)
continue
if Line.find('<PcdsFixedAtBuild>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, True, False, False)
continue
if Line.find('<PcdsDynamic>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, True, False)
continue
if Line.find('<PcdsDynamicEx>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, True)
continue
if Line.endswith('}'):
#
# find '}' at line tail
#
KeyValues.append([ListItem, LibraryClassItem, \
BuildOption, Pcd])
(FindBlock, FindLibraryClass, FindBuildOption, \
FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
LibraryClassItem, BuildOption, Pcd = [], [], []
continue
if FindBlock:
if FindLibraryClass:
LibraryClassItem.append(Line)
elif FindBuildOption:
BuildOption.append(Line)
elif FindPcdsFeatureFlag:
Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line))
elif FindPcdsPatchableInModule:
Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line))
elif FindPcdsFixedAtBuild:
Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line))
elif FindPcdsDynamic:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line))
elif FindPcdsDynamicEx:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line))
else:
KeyValues.append([ListItem, [], [], []])
return True
## GetExec
#
# Parse a string with format "InfFilename [EXEC = ExecFilename]"
# Return (InfFilename, ExecFilename)
#
# @param String: String with EXEC statement
#
def GetExec(String):
InfFilename = ''
ExecFilename = ''
if String.find('EXEC') > -1:
InfFilename = String[ : String.find('EXEC')].strip()
ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip()
else:
InfFilename = String.strip()
return (InfFilename, ExecFilename)
## GetComponents
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
# [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get component successfully
#
def GetComponents(Lines, KeyValues, CommentCharacter):
if Lines.find(DataType.TAB_SECTION_END) > -1:
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
(FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
ListItem = None
LibraryClassItem = []
BuildOption = []
Pcd = []
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line is None or Line == '':
continue
if FindBlock == False:
ListItem = Line
#
# find '{' at line tail
#
if Line.endswith('{'):
FindBlock = True
ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter)
#
# Parse a block content
#
if FindBlock:
if Line.find('<LibraryClasses>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(True, False, False, False, False, False, False)
continue
if Line.find('<BuildOptions>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, True, False, False, False, False, False)
continue
if Line.find('<PcdsFeatureFlag>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, True, False, False, False, False)
continue
if Line.find('<PcdsPatchableInModule>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, True, False, False, False)
continue
if Line.find('<PcdsFixedAtBuild>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, True, False, False)
continue
if Line.find('<PcdsDynamic>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, True, False)
continue
if Line.find('<PcdsDynamicEx>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, True)
continue
if Line.endswith('}'):
#
# find '}' at line tail
#
KeyValues.append([ListItem, LibraryClassItem, BuildOption, \
Pcd])
(FindBlock, FindLibraryClass, FindBuildOption, \
FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
LibraryClassItem, BuildOption, Pcd = [], [], []
continue
if FindBlock:
if FindLibraryClass:
LibraryClassItem.append(Line)
elif FindBuildOption:
BuildOption.append(Line)
elif FindPcdsFeatureFlag:
Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line))
elif FindPcdsPatchableInModule:
Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line))
elif FindPcdsFixedAtBuild:
Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line))
elif FindPcdsDynamic:
Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line))
elif FindPcdsDynamicEx:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line))
else:
KeyValues.append([ListItem, [], [], []])
return True
## Get Source
#
# Get Source of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
#
# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class, used
# for error report
#
def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
List = GetSplitValueList(ItemNew)
if len(List) < 5 or len(List) > 9:
RaiseParserError(Item, 'Sources', ContainerFile, \
'<Filename>[|<Family>[|<TagName>[|<ToolCode>\
[|<PcdFeatureFlag>]]]]', LineNo)
List[0] = NormPath(List[0])
CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', \
Item, LineNo)
if List[4] != '':
CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo)
return (List[0], List[1], List[2], List[3], List[4])
## Get Binary
#
# Get Binary of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
#
# @param Item: String as <Filename>[|<Family>[|<TagName>
# [|<ToolCode>[|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetBinary(Item, ContainerFile, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
if len(List) < 3 or len(List) > 5:
RaiseParserError(Item, 'Binaries', ContainerFile, \
"<FileType>|<Filename>[|<Target>\
[|<TokenSpaceGuidCName>.<PcdCName>]]", LineNo)
if len(List) >= 4:
if List[3] != '':
CheckPcdTokenInfo(List[3], 'Binaries', ContainerFile, LineNo)
return (List[0], List[1], List[2], List[3])
elif len(List) == 3:
return (List[0], List[1], List[2], '')
## Get Guids/Protocols/Ppis
#
# Get Guids/Protocols/Ppis of Inf as <GuidCName>[|<PcdFeatureFlag>]
#
# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfInf(Item):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
return (List[0], List[1])
## Get Guids/Protocols/Ppis
#
# Get Guids/Protocols/Ppis of Dec as <GuidCName>=<GuidValue>
#
# @param Item: String as <GuidCName>=<GuidValue>
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', \
LineNo)
#
#convert C-Format Guid to Register Format
#
if List[1][0] == '{' and List[1][-1] == '}':
RegisterFormatGuid = GuidStructureStringToGuidString(List[1])
if RegisterFormatGuid == '':
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
else:
if CheckGuidRegFormat(List[1]):
RegisterFormatGuid = List[1]
else:
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
return (List[0], RegisterFormatGuid)
## GetPackage
#
# Get Package of Inf as <PackagePath>[|<PcdFeatureFlag>]
#
# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Packages', \
List[0], LineNo)
if List[1] != '':
CheckPcdTokenInfo(List[1], 'Packages', ContainerFile, LineNo)
return (List[0], List[1])
## Get Pcd Values of Inf
#
# Get Pcd of Inf as <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
#
# @param Item: The string describes pcd
# @param Type: The type of Pcd
# @param File: The file which describes the pcd, used for error report
#
def GetPcdOfInf(Item, Type, File, LineNo):
Format = '<TokenSpaceGuidCName>.<PcdCName>[|<Value>]'
TokenGuid, TokenName, Value, InfType = '', '', '', ''
if Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
InfType = DataType.TAB_INF_FIXED_PCD
elif Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
InfType = DataType.TAB_INF_PATCH_PCD
elif Type == DataType.TAB_PCDS_FEATURE_FLAG:
InfType = DataType.TAB_INF_FEATURE_PCD
elif Type == DataType.TAB_PCDS_DYNAMIC_EX:
InfType = DataType.TAB_INF_PCD_EX
elif Type == DataType.TAB_PCDS_DYNAMIC:
InfType = DataType.TAB_INF_PCD
List = GetSplitValueList(Item, DataType.TAB_VALUE_SPLIT, 1)
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
if len(TokenInfo) != 2:
RaiseParserError(Item, InfType, File, Format, LineNo)
else:
TokenGuid = TokenInfo[0]
TokenName = TokenInfo[1]
if len(List) > 1:
Value = List[1]
else:
Value = None
return (TokenGuid, TokenName, Value, InfType)
## Get Pcd Values of Dec
#
# Get Pcd of Dec as <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
# @param Item: Pcd item
# @param Type: Pcd type
# @param File: Dec file
# @param LineNo: Line number
#
def GetPcdOfDec(Item, Type, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
List = GetSplitValueList(Item)
if len(List) != 4:
RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
else:
Value = List[1]
DatumType = List[2]
Token = List[3]
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
if len(TokenInfo) != 2:
RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
else:
TokenGuid = TokenInfo[0]
TokenName = TokenInfo[1]
return (TokenGuid, TokenName, Value, DatumType, Token, Type)
## Parse DEFINE statement
#
# Get DEFINE macros
#
# @param LineValue: A DEFINE line value
# @param StartLine: A DEFINE start line
# @param Table: A table
# @param FileID: File ID
# @param Filename: File name
# @param SectionName: DEFINE section name
# @param SectionModel: DEFINE section model
# @param Arch: DEFINE arch
#
def ParseDefine(LineValue, StartLine, Table, FileID, SectionName, \
SectionModel, Arch):
Logger.Debug(Logger.DEBUG_2, ST.MSG_DEFINE_STATEMENT_FOUND % (LineValue, \
SectionName))
Define = \
GetSplitValueList(CleanString\
(LineValue[LineValue.upper().\
find(DataType.TAB_DEFINE.upper() + ' ') + \
len(DataType.TAB_DEFINE + ' ') : ]), \
DataType.TAB_EQUAL_SPLIT, 1)
Table.Insert(DataType.MODEL_META_DATA_DEFINE, Define[0], Define[1], '', \
'', '', Arch, SectionModel, FileID, StartLine, -1, \
StartLine, -1, 0)
## InsertSectionItems
#
# Insert item data of a section to a dict
#
# @param Model: A model
# @param CurrentSection: Current section
# @param SectionItemList: Section item list
# @param ArchList: Arch list
# @param ThirdList: Third list
# @param RecordSet: Record set
#
def InsertSectionItems(Model, SectionItemList, ArchList, \
ThirdList, RecordSet):
#
# Insert each item data of a section
#
for Index in range(0, len(ArchList)):
Arch = ArchList[Index]
Third = ThirdList[Index]
if Arch == '':
Arch = DataType.TAB_ARCH_COMMON
Records = RecordSet[Model]
for SectionItem in SectionItemList:
LineValue, StartLine, Comment = SectionItem[0], \
SectionItem[1], SectionItem[2]
Logger.Debug(4, ST.MSG_PARSING % LineValue)
#
# And then parse DEFINE statement
#
if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
continue
#
# At last parse other sections
#
IdNum = -1
Records.append([LineValue, Arch, StartLine, IdNum, Third, Comment])
if RecordSet != {}:
RecordSet[Model] = Records
## GenMetaDatSectionItem
#
# @param Key: A key
# @param Value: A value
# @param List: A list
#
def GenMetaDatSectionItem(Key, Value, List):
if Key not in List:
List[Key] = [Value]
else:
List[Key].append(Value)
## GetPkgInfoFromDec
#
# get package name, guid, version info from dec files
#
# @param Path: File path
#
def GetPkgInfoFromDec(Path):
PkgName = None
PkgGuid = None
PkgVersion = None
Path = Path.replace('\\', '/')
if not os.path.exists(Path):
Logger.Error("\nUPT", FILE_NOT_FOUND, File=Path)
if Path in gPKG_INFO_DICT:
return gPKG_INFO_DICT[Path]
try:
DecParser = None
if Path not in GlobalData.gPackageDict:
DecParser = Dec(Path)
GlobalData.gPackageDict[Path] = DecParser
else:
DecParser = GlobalData.gPackageDict[Path]
PkgName = DecParser.GetPackageName()
PkgGuid = DecParser.GetPackageGuid()
PkgVersion = DecParser.GetPackageVersion()
gPKG_INFO_DICT[Path] = (PkgName, PkgGuid, PkgVersion)
return PkgName, PkgGuid, PkgVersion
except FatalError:
return None, None, None
## GetWorkspacePackage
#
# Get a list of workspace package information.
#
def GetWorkspacePackage():
DecFileList = []
WorkspaceDir = GlobalData.gWORKSPACE
PackageDir = GlobalData.gPACKAGE_PATH
for PkgRoot in [WorkspaceDir] + PackageDir:
for Root, Dirs, Files in os.walk(PkgRoot):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for FileSp in Files:
if FileSp.startswith('.'):
continue
Ext = os.path.splitext(FileSp)[1]
if Ext.lower() in ['.dec']:
DecFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
#
# abstract package guid, version info from DecFile List
#
PkgList = []
for DecFile in DecFileList:
(PkgName, PkgGuid, PkgVersion) = GetPkgInfoFromDec(DecFile)
if PkgName and PkgGuid and PkgVersion:
PkgList.append((PkgName, PkgGuid, PkgVersion, DecFile))
return PkgList
## GetWorkspaceModule
#
# Get a list of workspace modules.
#
def GetWorkspaceModule():
InfFileList = []
WorkspaceDir = GlobalData.gWORKSPACE
for Root, Dirs, Files in os.walk(WorkspaceDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
if 'Build' in Dirs:
Dirs.remove('Build')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for FileSp in Files:
if FileSp.startswith('.'):
continue
Ext = os.path.splitext(FileSp)[1]
if Ext.lower() in ['.inf']:
InfFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
return InfFileList
## MacroParser used to parse macro definition
#
# @param Line: The content contain linestring and line number
# @param FileName: The meta-file file name
# @param SectionType: Section for the Line belong to
# @param FileLocalMacros: A list contain Macro defined in [Defines] section.
#
def MacroParser(Line, FileName, SectionType, FileLocalMacros):
MacroDefPattern = re.compile("^(DEFINE)[ \t]+")
LineContent = Line[0]
LineNo = Line[1]
Match = MacroDefPattern.match(LineContent)
if not Match:
#
# Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
#
return None, None
TokenList = GetSplitValueList(LineContent[Match.end(1):], \
DataType.TAB_EQUAL_SPLIT, 1)
#
# Syntax check
#
if not TokenList[0]:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACRONAME_NOGIVEN,
ExtraData=LineContent, File=FileName, Line=LineNo)
if len(TokenList) < 2:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACROVALUE_NOGIVEN,
ExtraData=LineContent, File=FileName, Line=LineNo)
Name, Value = TokenList
#
# DEFINE defined macros
#
if SectionType == DataType.MODEL_META_DATA_HEADER:
FileLocalMacros[Name] = Value
ReIsValidMacroName = re.compile(r"^[A-Z][A-Z0-9_]*$", re.DOTALL)
if ReIsValidMacroName.match(Name) is None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACRONAME_INVALID % (Name),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
# Validate MACRO Value
#
# <MacroDefinition> ::= [<Comments>]{0,}
# "DEFINE" <MACRO> "=" [{<PATH>} {<VALUE>}] <EOL>
# <Value> ::= {<NumVal>} {<Boolean>} {<AsciiString>} {<GUID>}
# {<CString>} {<UnicodeString>} {<CArray>}
#
# The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
# <UnicodeString>, <CArray> are subset of <AsciiString>.
#
ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
if ReIsValidMacroValue.match(Value) is None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACROVALUE_INVALID % (Value),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
return Name, Value
## GenSection
#
# generate section contents
#
# @param SectionName: indicate the name of the section, details refer to
# INF, DEC specs
# @param SectionDict: section statement dict, key is SectionAttrs(arch,
# moduletype or platform may exist as needed) list
# separated by space,
# value is statement
#
def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
Content = ''
for SectionAttrs in SectionDict:
StatementList = SectionDict[SectionAttrs]
if SectionAttrs and SectionName != 'Defines' and SectionAttrs.strip().upper() != DataType.TAB_ARCH_COMMON:
if SplitArch:
ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_SPACE_SPLIT)
else:
if SectionName != 'UserExtensions':
ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_COMMENT_SPLIT)
else:
ArchList = [SectionAttrs]
for Index in range(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
Section = '[' + SectionName + '.' + (', ' + SectionName + '.').join(ArchList) + ']'
else:
Section = '[' + SectionName + ']'
Content += '\n' + Section + '\n'
if StatementList is not None:
for Statement in StatementList:
LineList = Statement.split('\n')
NewStatement = ""
for Line in LineList:
# ignore blank comment
if not Line.replace("#", '').strip() and SectionName not in ('Defines', 'Hob', 'Event', 'BootMode'):
continue
# add two space before non-comments line except the comments in Defines section
if Line.strip().startswith('#') and SectionName == 'Defines':
NewStatement += "%s\n" % Line
continue
NewStatement += " %s\n" % Line
if NeedBlankLine:
Content += NewStatement + '\n'
else:
Content += NewStatement
if NeedBlankLine:
Content = Content[:-1]
if not Content.replace('\\n', '').strip():
return ''
return Content
## ConvertArchForInstall
# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
# Anything else, the case must be preserved
#
# @param Arch: the arch string that need to be converted, it should be stripped before pass in
# @return: the arch string that get converted
#
def ConvertArchForInstall(Arch):
if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
DataType.TAB_ARCH_IPF, DataType.TAB_ARCH_EBC]:
Arch = Arch.upper()
elif Arch.upper() == DataType.TAB_ARCH_COMMON:
Arch = Arch.lower()
return Arch
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/Parsing.py
|
## @file
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
StringUtils
'''
##
# Import Modules
#
import re
import os.path
import Logger.Log as Logger
import Library.DataType as DataType
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import PARSER_ERROR
from Logger import StringTable as ST
#
# Regular expression for matching macro used in DSC/DEC/INF file inclusion
#
gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
## GetSplitValueList
#
# Get a value list from a string with multiple values split with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
return list(map(lambda l: l.strip(), String.split(SplitTag, MaxSplit)))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supported Arch
# @param Defines: DEFINE statement to be parsed
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').\
split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
NewKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions=None, SelfReplacement=False, Line=None, FileName=None, Flag=False):
LastString = String
if MacroDefinitions is None:
MacroDefinitions = {}
while MacroDefinitions:
QuotedStringList = []
HaveQuotedMacroFlag = False
if not Flag:
MacroUsed = gMACRO_PATTERN.findall(String)
else:
ReQuotedString = re.compile('\"')
QuotedStringList = ReQuotedString.split(String)
if len(QuotedStringList) >= 3:
HaveQuotedMacroFlag = True
Count = 0
MacroString = ""
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count % 2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" % (FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
else:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count - 1] = QuotedStringList[Count - 1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count % 2 == 0:
QuotedStringList[Count - 1] = QuotedStringList[Count - 1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count - 1] + "\""
else:
RetString += QuotedStringList[Count - 1]
String = RetString
#
# in case there's macro not defined
#
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DEFINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
def NormPath(Path, Defines=None):
IsRelativePath = False
if Defines is None:
Defines = {}
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and not InString:
Line = Line[0: Index]
break
#
# remove whitespace again
#
Line = Line.strip()
return Line
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
if Key:
pass
if KeyValues:
pass
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDefineValue(String, Key, CommentCharacter):
if CommentCharacter:
pass
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = list(map(lambda x: x.strip(), LineList[1].split(ValueSplitCharacter)))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
if SupSectionTag:
pass
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be
# checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
if CheckFilename != '' and CheckFilename is not None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
CheckFile = ''
if CheckFilename != '' and CheckFilename is not None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line=LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Remove '"' which startswith and endswith string
#
# @param String: The string need to be split
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return list(map(lambda s: s.replace("'", "''"), StringList))
## Convert To Sql String
#
# Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
## GetStringOfList
#
# Get String of a List
#
# @param Lines: string list
# @param Split: split character
#
def GetStringOfList(List, Split=' '):
if not isinstance(List, type([])):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
## Get HelpTextList
#
# Get HelpTextList from HelpTextClassList
#
# @param HelpTextClassList: Help Text Class List
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
## Get String Array Length
#
# Get String Array Length
#
# @param String: the source string
#
def StringArrayLength(String):
if String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
## RemoveDupOption
#
# Remove Dup Option
#
# @param OptionString: the option string
# @param Which: Which flag
# @param Against: Against flag
#
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
## Check if the string is HexDgit
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigit(Str):
try:
int(Str, 10)
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
int(Str, 16)
return True
except ValueError:
return False
return False
## Check if the string is HexDgit and its integer value within limit of UINT32
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigitUINT32(Str):
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
Value = int(Str, 16)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
return False
return False
## CleanSpecialChar
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
# and may contain characters that cannot be directly translated to strings that
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# must be converted to a space character[0x20] as part of the parsing process.
#
def ConvertSpecialChar(Lines):
RetLines = []
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
## __GetTokenList
#
# Assume Str is a valid feature flag expression.
# Return a list which contains tokens: alpha numeric token and other token
# Whitespace are not stripped
#
def __GetTokenList(Str):
InQuote = False
Token = ''
TokenOP = ''
PreChar = ''
List = []
for Char in Str:
if InQuote:
Token += Char
if Char == '"' and PreChar != '\\':
InQuote = not InQuote
List.append(Token)
Token = ''
continue
if Char == '"':
if Token and Token != 'L':
List.append(Token)
Token = ''
if TokenOP:
List.append(TokenOP)
TokenOP = ''
InQuote = not InQuote
Token += Char
continue
if not (Char.isalnum() or Char in '_'):
TokenOP += Char
if Token:
List.append(Token)
Token = ''
else:
Token += Char
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
PreChar = Char
if Token:
List.append(Token)
if TokenOP:
List.append(TokenOP)
return List
## ConvertNEToNOTEQ
#
# Convert NE operator to NOT EQ
# For example: 1 NE 2 -> 1 NOT EQ 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNEToNOTEQ(Expr):
List = __GetTokenList(Expr)
for Index in range(len(List)):
if List[Index] == 'NE':
List[Index] = 'NOT EQ'
return ''.join(List)
## ConvertNOTEQToNE
#
# Convert NOT EQ operator to NE
# For example: 1 NOT NE 2 -> 1 NE 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNOTEQToNE(Expr):
List = __GetTokenList(Expr)
HasNOT = False
RetList = []
for Token in List:
if HasNOT and Token == 'EQ':
# At least, 'NOT' is in the list
while not RetList[-1].strip():
RetList.pop()
RetList[-1] = 'NE'
HasNOT = False
continue
if Token == 'NOT':
HasNOT = True
elif Token.strip():
HasNOT = False
RetList.append(Token)
return ''.join(RetList)
## SplitPcdEntry
#
# Split an PCD entry string to Token.CName and PCD value and FFE.
# NOTE: PCD Value and FFE can contain "|" in its expression. And in INF specification, have below rule.
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '', ''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
InQuote = False
InParenthesis = False
StrItem = ''
for StrCh in ValueList[1]:
if StrCh == '"':
InQuote = not InQuote
elif StrCh == '(' or StrCh == ')':
InParenthesis = not InParenthesis
if StrCh == '|':
if not InQuote or not InParenthesis:
NewValueList.append(StrItem.strip())
StrItem = ' '
continue
StrItem += StrCh
NewValueList.append(StrItem.strip())
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
return [PcdTokenCName, PcdValue], True
elif len(NewValueList) == 2:
PcdValue = NewValueList[0]
PcdFeatureFlagExp = NewValueList[1]
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
## Check if two arches matched?
#
# @param Arch1
# @param Arch2
#
def IsMatchArch(Arch1, Arch2):
if 'COMMON' in Arch1 or 'COMMON' in Arch2:
return True
try:
if isinstance(Arch1, list) and isinstance(Arch2, list):
for Item1 in Arch1:
for Item2 in Arch2:
if Item1 == Item2:
return True
elif isinstance(Arch1, list):
return Arch2 in Arch1
elif isinstance(Arch2, list):
return Arch1 in Arch2
else:
if Arch1 == Arch2:
return True
except:
return False
# Search all files in FilePath to find the FileName with the largest index
# Return the FileName with index +1 under the FilePath
#
def GetUniFileName(FilePath, FileName):
Files = []
try:
Files = os.listdir(FilePath)
except:
pass
LargestIndex = -1
IndexNotFound = True
for File in Files:
if File.upper().startswith(FileName.upper()) and File.upper().endswith('.UNI'):
Index = File.upper().replace(FileName.upper(), '').replace('.UNI', '')
if Index:
try:
Index = int(Index)
except Exception:
Index = -1
else:
IndexNotFound = False
Index = 0
if Index > LargestIndex:
LargestIndex = Index + 1
if LargestIndex > -1 and not IndexNotFound:
return os.path.normpath(os.path.join(FilePath, FileName + str(LargestIndex) + '.uni'))
else:
return os.path.normpath(os.path.join(FilePath, FileName + '.uni'))
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/StringUtils.py
|
## @file
# Python 'Library' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Library
'''
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/__init__.py
|
## @file
# This file is used to check PCD logical expression
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
ExpressionValidate
'''
from __future__ import print_function
##
# Import Modules
#
import re
from Logger import StringTable as ST
## IsValidBareCString
#
# Check if String is comprised by whitespace(0x20), !(0x21), 0x23 - 0x7E
# or '\n', '\t', '\f', '\r', '\b', '\0', '\\'
#
# @param String: string to be checked
#
def IsValidBareCString(String):
EscapeList = ['n', 't', 'f', 'r', 'b', '0', '\\', '"']
PreChar = ''
LastChar = ''
for Char in String:
LastChar = Char
if PreChar == '\\':
if Char not in EscapeList:
return False
if Char == '\\':
PreChar = ''
continue
else:
IntChar = ord(Char)
if IntChar != 0x20 and IntChar != 0x09 and IntChar != 0x21 \
and (IntChar < 0x23 or IntChar > 0x7e):
return False
PreChar = Char
# Last char cannot be \ if PreChar is not \
if LastChar == '\\' and PreChar == LastChar:
return False
return True
def _ValidateToken(Token):
Token = Token.strip()
Index = Token.find("\"")
if Index != -1:
return IsValidBareCString(Token[Index+1:-1])
return True
## _ExprError
#
# @param Exception: Exception
#
class _ExprError(Exception):
def __init__(self, Error = ''):
Exception.__init__(self)
self.Error = Error
## _ExprBase
#
class _ExprBase:
HEX_PATTERN = '[\t\s]*0[xX][a-fA-F0-9]+'
INT_PATTERN = '[\t\s]*[0-9]+'
MACRO_PATTERN = '[\t\s]*\$\(([A-Z][_A-Z0-9]*)\)'
PCD_PATTERN = \
'[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*[\t\s]*\.[\t\s]*[_a-zA-Z][a-zA-Z0-9_]*'
QUOTED_PATTERN = '[\t\s]*L?"[^"]*"'
BOOL_PATTERN = '[\t\s]*(true|True|TRUE|false|False|FALSE)'
def __init__(self, Token):
self.Token = Token
self.Index = 0
self.Len = len(Token)
## SkipWhitespace
#
def SkipWhitespace(self):
for Char in self.Token[self.Index:]:
if Char not in ' \t':
break
self.Index += 1
## IsCurrentOp
#
# @param OpList: option list
#
def IsCurrentOp(self, OpList):
self.SkipWhitespace()
LetterOp = ["EQ", "NE", "GE", "LE", "GT", "LT", "NOT", "and", "AND",
"or", "OR", "XOR"]
OpMap = {
'|' : '|',
'&' : '&',
'!' : '=',
'>' : '=',
'<' : '='
}
for Operator in OpList:
if not self.Token[self.Index:].startswith(Operator):
continue
self.Index += len(Operator)
Char = self.Token[self.Index : self.Index + 1]
if (Operator in LetterOp and (Char == '_' or Char.isalnum())) \
or (Operator in OpMap and OpMap[Operator] == Char):
self.Index -= len(Operator)
break
return True
return False
## _LogicalExpressionParser
#
# @param _ExprBase: _ExprBase object
#
class _LogicalExpressionParser(_ExprBase):
#
# STRINGITEM can only be logical field according to spec
#
STRINGITEM = -1
#
# Evaluate to True or False
#
LOGICAL = 0
REALLOGICAL = 2
#
# Just arithmetic expression
#
ARITH = 1
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.Parens = 0
def _CheckToken(self, MatchList):
for Match in MatchList:
if Match and Match.start() == 0:
if not _ValidateToken(
self.Token[self.Index:self.Index+Match.end()]
):
return False
self.Index += Match.end()
if self.Token[self.Index - 1] == '"':
return True
if self.Token[self.Index:self.Index+1] == '_' or \
self.Token[self.Index:self.Index+1].isalnum():
self.Index -= Match.end()
return False
Token = self.Token[self.Index - Match.end():self.Index]
if Token.strip() in ["EQ", "NE", "GE", "LE", "GT", "LT",
"NOT", "and", "AND", "or", "OR", "XOR"]:
self.Index -= Match.end()
return False
return True
return False
def IsAtomicNumVal(self):
#
# Hex number
#
Match1 = re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
#
# Number
#
Match2 = re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
#
# Macro
#
Match3 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
#
# PcdName
#
Match4 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
return self._CheckToken([Match1, Match2, Match3, Match4])
def IsAtomicItem(self):
#
# Macro
#
Match1 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
#
# PcdName
#
Match2 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
#
# Quoted string
#
Match3 = re.compile(self.QUOTED_PATTERN).\
match(self.Token[self.Index:].replace('\\\\', '//').\
replace('\\\"', '\\\''))
return self._CheckToken([Match1, Match2, Match3])
## A || B
#
def LogicalExpression(self):
Ret = self.SpecNot()
while self.IsCurrentOp(['||', 'OR', 'or', '&&', 'AND', 'and', 'XOR', 'xor', '^']):
if self.Token[self.Index-1] == '|' and self.Parens <= 0:
raise _ExprError(ST.ERR_EXPR_OR % self.Token)
if Ret not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.SpecNot()
if Ret not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.REALLOGICAL
return Ret
def SpecNot(self):
if self.IsCurrentOp(["NOT", "!", "not"]):
return self.SpecNot()
return self.Rel()
## A < B, A > B, A <= B, A >= B
#
def Rel(self):
Ret = self.Expr()
if self.IsCurrentOp(["<=", ">=", ">", "<", "GT", "LT", "GE", "LE",
"==", "EQ", "!=", "NE"]):
if Ret == self.STRINGITEM:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.Expr()
if Ret == self.REALLOGICAL:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.REALLOGICAL
return Ret
## A + B, A - B
#
def Expr(self):
Ret = self.Factor()
while self.IsCurrentOp(["+", "-", "&", "|", "^", "XOR", "xor"]):
if self.Token[self.Index-1] == '|' and self.Parens <= 0:
raise _ExprError(ST.ERR_EXPR_OR)
if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.Factor()
if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.ARITH
return Ret
## Factor
#
def Factor(self):
if self.IsCurrentOp(["("]):
self.Parens += 1
Ret = self.LogicalExpression()
if not self.IsCurrentOp([")"]):
raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % \
(self.Token, self.Token[self.Index:]))
self.Parens -= 1
return Ret
if self.IsAtomicItem():
if self.Token[self.Index - 1] == '"':
return self.STRINGITEM
return self.LOGICAL
elif self.IsAtomicNumVal():
return self.ARITH
else:
raise _ExprError(ST.ERR_EXPR_FACTOR % \
(self.Token[self.Index:], self.Token))
## IsValidLogicalExpression
#
def IsValidLogicalExpression(self):
if self.Len == 0:
return False, ST.ERR_EXPRESS_EMPTY
try:
if self.LogicalExpression() not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
return False, ST.ERR_EXPR_LOGICAL % self.Token
except _ExprError as XExcept:
return False, XExcept.Error
self.SkipWhitespace()
if self.Index != self.Len:
return False, (ST.ERR_EXPR_BOOLEAN % \
(self.Token[self.Index:], self.Token))
return True, ''
## _ValidRangeExpressionParser
#
class _ValidRangeExpressionParser(_ExprBase):
INT_RANGE_PATTERN = '[\t\s]*[0-9]+[\t\s]*-[\t\s]*[0-9]+'
HEX_RANGE_PATTERN = \
'[\t\s]*0[xX][a-fA-F0-9]+[\t\s]*-[\t\s]*0[xX][a-fA-F0-9]+'
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.Parens = 0
self.HEX = 1
self.INT = 2
self.IsParenHappen = False
self.IsLogicalOpHappen = False
## IsValidRangeExpression
#
def IsValidRangeExpression(self):
if self.Len == 0:
return False, ST.ERR_EXPR_RANGE_EMPTY
try:
if self.RangeExpression() not in [self.HEX, self.INT]:
return False, ST.ERR_EXPR_RANGE % self.Token
except _ExprError as XExcept:
return False, XExcept.Error
self.SkipWhitespace()
if self.Index != self.Len:
return False, (ST.ERR_EXPR_RANGE % self.Token)
return True, ''
## RangeExpression
#
def RangeExpression(self):
Ret = self.Unary()
while self.IsCurrentOp(['OR', 'AND', 'and', 'or']):
self.IsLogicalOpHappen = True
if not self.IsParenHappen:
raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
self.IsParenHappen = False
Ret = self.Unary()
if self.IsCurrentOp(['XOR']):
Ret = self.Unary()
return Ret
## Unary
#
def Unary(self):
if self.IsCurrentOp(["NOT"]):
return self.Unary()
return self.ValidRange()
## ValidRange
#
def ValidRange(self):
Ret = -1
if self.IsCurrentOp(["("]):
self.IsLogicalOpHappen = False
self.IsParenHappen = True
self.Parens += 1
if self.Parens > 1:
raise _ExprError(ST.ERR_EXPR_RANGE_DOUBLE_PAREN_NESTED % self.Token)
Ret = self.RangeExpression()
if not self.IsCurrentOp([")"]):
raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % self.Token)
self.Parens -= 1
return Ret
if self.IsLogicalOpHappen:
raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
if self.IsCurrentOp(["LT", "GT", "LE", "GE", "EQ", "XOR"]):
IntMatch = \
re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
HexMatch = \
re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
if HexMatch and HexMatch.start() == 0:
self.Index += HexMatch.end()
Ret = self.HEX
elif IntMatch and IntMatch.start() == 0:
self.Index += IntMatch.end()
Ret = self.INT
else:
raise _ExprError(ST.ERR_EXPR_RANGE_FACTOR % (self.Token[self.Index:], self.Token))
else:
IntRangeMatch = re.compile(
self.INT_RANGE_PATTERN).match(self.Token[self.Index:]
)
HexRangeMatch = re.compile(
self.HEX_RANGE_PATTERN).match(self.Token[self.Index:]
)
if HexRangeMatch and HexRangeMatch.start() == 0:
self.Index += HexRangeMatch.end()
Ret = self.HEX
elif IntRangeMatch and IntRangeMatch.start() == 0:
self.Index += IntRangeMatch.end()
Ret = self.INT
else:
raise _ExprError(ST.ERR_EXPR_RANGE % self.Token)
return Ret
## _ValidListExpressionParser
#
class _ValidListExpressionParser(_ExprBase):
VALID_LIST_PATTERN = '(0[xX][0-9a-fA-F]+|[0-9]+)([\t\s]*,[\t\s]*(0[xX][0-9a-fA-F]+|[0-9]+))*'
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.NUM = 1
def IsValidListExpression(self):
if self.Len == 0:
return False, ST.ERR_EXPR_LIST_EMPTY
try:
if self.ListExpression() not in [self.NUM]:
return False, ST.ERR_EXPR_LIST % self.Token
except _ExprError as XExcept:
return False, XExcept.Error
self.SkipWhitespace()
if self.Index != self.Len:
return False, (ST.ERR_EXPR_LIST % self.Token)
return True, ''
def ListExpression(self):
Ret = -1
self.SkipWhitespace()
ListMatch = re.compile(self.VALID_LIST_PATTERN).match(self.Token[self.Index:])
if ListMatch and ListMatch.start() == 0:
self.Index += ListMatch.end()
Ret = self.NUM
else:
raise _ExprError(ST.ERR_EXPR_LIST % self.Token)
return Ret
## _StringTestParser
#
class _StringTestParser(_ExprBase):
def __init__(self, Token):
_ExprBase.__init__(self, Token)
## IsValidStringTest
#
def IsValidStringTest(self):
if self.Len == 0:
return False, ST.ERR_EXPR_EMPTY
try:
self.StringTest()
except _ExprError as XExcept:
return False, XExcept.Error
return True, ''
## StringItem
#
def StringItem(self):
Match1 = re.compile(self.QUOTED_PATTERN)\
.match(self.Token[self.Index:].replace('\\\\', '//')\
.replace('\\\"', '\\\''))
Match2 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
Match3 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
MatchList = [Match1, Match2, Match3]
for Match in MatchList:
if Match and Match.start() == 0:
if not _ValidateToken(
self.Token[self.Index:self.Index+Match.end()]
):
raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
(self.Token, self.Token[self.Index:]))
self.Index += Match.end()
Token = self.Token[self.Index - Match.end():self.Index]
if Token.strip() in ["EQ", "NE"]:
raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
(self.Token, self.Token[self.Index:]))
return
else:
raise _ExprError(ST.ERR_EXPR_STRING_ITEM % \
(self.Token, self.Token[self.Index:]))
## StringTest
#
def StringTest(self):
self.StringItem()
if not self.IsCurrentOp(["==", "EQ", "!=", "NE"]):
raise _ExprError(ST.ERR_EXPR_EQUALITY % \
(self.Token[self.Index:], self.Token))
self.StringItem()
if self.Index != self.Len:
raise _ExprError(ST.ERR_EXPR_BOOLEAN % \
(self.Token[self.Index:], self.Token))
##
# Check syntax of string test
#
# @param Token: string test token
#
def IsValidStringTest(Token, Flag=False):
#
# Not do the check right now, keep the implementation for future enhancement.
#
if not Flag:
return True, ""
return _StringTestParser(Token).IsValidStringTest()
##
# Check syntax of logical expression
#
# @param Token: expression token
#
def IsValidLogicalExpr(Token, Flag=False):
#
# Not do the check right now, keep the implementation for future enhancement.
#
if not Flag:
return True, ""
return _LogicalExpressionParser(Token).IsValidLogicalExpression()
##
# Check syntax of range expression
#
# @param Token: range expression token
#
def IsValidRangeExpr(Token):
return _ValidRangeExpressionParser(Token).IsValidRangeExpression()
##
# Check syntax of value list expression token
#
# @param Token: value list expression token
#
def IsValidListExpr(Token):
return _ValidListExpressionParser(Token).IsValidListExpression()
##
# Check whether the feature flag expression is valid or not
#
# @param Token: feature flag expression
#
def IsValidFeatureFlagExp(Token, Flag=False):
#
# Not do the check right now, keep the implementation for future enhancement.
#
if not Flag:
return True, "", Token
else:
if Token in ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
'0x1', '0x01', '0x0', '0x00']:
return True, ""
Valid, Cause = IsValidStringTest(Token, Flag)
if not Valid:
Valid, Cause = IsValidLogicalExpr(Token, Flag)
if not Valid:
return False, Cause
return True, ""
if __name__ == '__main__':
# print IsValidRangeExpr('LT 9')
print(_LogicalExpressionParser('gCrownBayTokenSpaceGuid.PcdPciDevice1BridgeAddressLE0').IsValidLogicalExpression())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/ExpressionValidate.py
|
## @file
# This file is used to define comment generating interface
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
CommentGenerating
'''
##
# Import Modules
#
from Library.StringUtils import GetSplitValueList
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_INF_GUIDTYPE_VAR
from Library.DataType import USAGE_ITEM_NOTIFY
from Library.DataType import ITEM_UNDEFINED
from Library.DataType import TAB_HEADER_COMMENT
from Library.DataType import TAB_BINARY_HEADER_COMMENT
from Library.DataType import TAB_COMMENT_SPLIT
from Library.DataType import TAB_SPECIAL_COMMENT
from Library.DataType import END_OF_LINE
from Library.DataType import TAB_COMMENT_EDK1_SPLIT
from Library.DataType import TAB_COMMENT_EDK1_START
from Library.DataType import TAB_COMMENT_EDK1_END
from Library.DataType import TAB_STAR
from Library.DataType import TAB_PCD_PROMPT
from Library.UniClassObject import ConvertSpecialUnicodes
from Library.Misc import GetLocalValue
## GenTailCommentLines
#
# @param TailCommentLines: the tail comment lines that need to be generated
# @param LeadingSpaceNum: the number of leading space needed for non-first
# line tail comment
#
def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0):
TailCommentLines = TailCommentLines.rstrip(END_OF_LINE)
CommentStr = TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + \
(END_OF_LINE + LeadingSpaceNum * TAB_SPACE_SPLIT + TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + \
TAB_SPACE_SPLIT).join(GetSplitValueList(TailCommentLines, END_OF_LINE))
return CommentStr
## GenGenericComment
#
# @param CommentLines: Generic comment Text, maybe Multiple Lines
#
def GenGenericComment (CommentLines):
if not CommentLines:
return ''
CommentLines = CommentLines.rstrip(END_OF_LINE)
CommentStr = TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + (END_OF_LINE + TAB_COMMENT_SPLIT + TAB_SPACE_SPLIT).join\
(GetSplitValueList(CommentLines, END_OF_LINE)) + END_OF_LINE
return CommentStr
## GenGenericCommentF
#
# similar to GenGenericComment but will remove <EOL> at end of comment once,
# and for line with only <EOL>, '#\n' will be generated instead of '# \n'
#
# @param CommentLines: Generic comment Text, maybe Multiple Lines
# @return CommentStr: Generated comment line
#
def GenGenericCommentF (CommentLines, NumOfPound=1, IsPrompt=False, IsInfLibraryClass=False):
if not CommentLines:
return ''
#
# if comment end with '\n', then remove it to prevent one extra line
# generate later on
#
if CommentLines.endswith(END_OF_LINE):
CommentLines = CommentLines[:-1]
CommentStr = ''
if IsPrompt:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + TAB_PCD_PROMPT + TAB_SPACE_SPLIT + \
CommentLines.replace(END_OF_LINE, '') + END_OF_LINE
else:
CommentLineList = GetSplitValueList(CommentLines, END_OF_LINE)
FindLibraryClass = False
for Line in CommentLineList:
# If this comment is for @libraryclass and it has multiple lines
# make sure the second lines align to the first line after @libraryclass as below
#
# ## @libraryclass XYZ FIRST_LINE
# ## ABC SECOND_LINE
#
if IsInfLibraryClass and Line.find(u'@libraryclass ') > -1:
FindLibraryClass = True
if Line == '':
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + END_OF_LINE
else:
if FindLibraryClass and Line.find(u'@libraryclass ') > -1:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
elif FindLibraryClass:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT * 16 + Line + END_OF_LINE
else:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
return CommentStr
## GenHeaderCommentSection
#
# Generate Header comment sections
#
# @param Abstract One line of abstract
# @param Description multiple lines of Description
# @param Copyright possible multiple copyright lines
# @param License possible multiple license lines
#
def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryHeader=False, \
CommChar=TAB_COMMENT_SPLIT):
Content = ''
#
# Convert special character to (c), (r) and (tm).
#
Abstract = ConvertSpecialUnicodes(Abstract)
Description = ConvertSpecialUnicodes(Description)
if IsBinaryHeader:
Content += CommChar * 2 + TAB_SPACE_SPLIT + TAB_BINARY_HEADER_COMMENT + '\r\n'
elif CommChar == TAB_COMMENT_EDK1_SPLIT:
Content += CommChar + TAB_SPACE_SPLIT + TAB_COMMENT_EDK1_START + TAB_STAR + TAB_SPACE_SPLIT +\
TAB_HEADER_COMMENT + '\r\n'
else:
Content += CommChar * 2 + TAB_SPACE_SPLIT + TAB_HEADER_COMMENT + '\r\n'
if Abstract:
Abstract = Abstract.rstrip('\r\n')
Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(Abstract, '\n'))
Content += '\r\n' + CommChar + '\r\n'
else:
Content += CommChar + '\r\n'
if Description:
Description = Description.rstrip('\r\n')
Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(Description, '\n'))
Content += '\r\n' + CommChar + '\r\n'
#
# There is no '#\n' line to separate multiple copyright lines in code base
#
if Copyright:
Copyright = Copyright.rstrip('\r\n')
Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join\
(GetSplitValueList(Copyright, '\n'))
Content += '\r\n' + CommChar + '\r\n'
if License:
License = License.rstrip('\r\n')
Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(License, '\n'))
Content += '\r\n' + CommChar + '\r\n'
if CommChar == TAB_COMMENT_EDK1_SPLIT:
Content += CommChar + TAB_SPACE_SPLIT + TAB_STAR + TAB_COMMENT_EDK1_END + '\r\n'
else:
Content += CommChar * 2 + '\r\n'
return Content
## GenInfPcdTailComment
# Generate Pcd tail comment for Inf, this would be one line comment
#
# @param Usage: Usage type
# @param TailCommentText: Comment text for tail comment
#
def GenInfPcdTailComment (Usage, TailCommentText):
if (Usage == ITEM_UNDEFINED) and (not TailCommentText):
return ''
CommentLine = TAB_SPACE_SPLIT.join([Usage, TailCommentText])
return GenTailCommentLines(CommentLine)
## GenInfProtocolPPITailComment
# Generate Protocol/PPI tail comment for Inf
#
# @param Usage: Usage type
# @param TailCommentText: Comment text for tail comment
#
def GenInfProtocolPPITailComment (Usage, Notify, TailCommentText):
if (not Notify) and (Usage == ITEM_UNDEFINED) and (not TailCommentText):
return ''
if Notify:
CommentLine = USAGE_ITEM_NOTIFY + " ## "
else:
CommentLine = ''
CommentLine += TAB_SPACE_SPLIT.join([Usage, TailCommentText])
return GenTailCommentLines(CommentLine)
## GenInfGuidTailComment
# Generate Guid tail comment for Inf
#
# @param Usage: Usage type
# @param TailCommentText: Comment text for tail comment
#
def GenInfGuidTailComment (Usage, GuidTypeList, VariableName, TailCommentText):
GuidType = GuidTypeList[0]
if (Usage == ITEM_UNDEFINED) and (GuidType == ITEM_UNDEFINED) and \
(not TailCommentText):
return ''
FirstLine = Usage + " ## " + GuidType
if GuidType == TAB_INF_GUIDTYPE_VAR:
FirstLine += ":" + VariableName
CommentLine = TAB_SPACE_SPLIT.join([FirstLine, TailCommentText])
return GenTailCommentLines(CommentLine)
## GenDecGuidTailComment
#
# @param SupModuleList: Supported module type list
#
def GenDecTailComment (SupModuleList):
CommentLine = TAB_SPACE_SPLIT.join(SupModuleList)
return GenTailCommentLines(CommentLine)
## _GetHelpStr
# get HelpString from a list of HelpTextObject, the priority refer to
# related HLD
#
# @param HelpTextObjList: List of HelpTextObject
#
# @return HelpStr: the help text string found, '' means no help text found
#
def _GetHelpStr(HelpTextObjList):
ValueList = []
for HelpObj in HelpTextObjList:
ValueList.append((HelpObj.GetLang(), HelpObj.GetString()))
return GetLocalValue(ValueList, True)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/CommentGenerating.py
|
## @file
# This file is used to define class for data type structure
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
# Portions Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
DataType
'''
##
# Module List Items
#
MODULE_LIST = ["BASE",
"SEC",
"PEI_CORE",
"PEIM",
"DXE_CORE",
"DXE_DRIVER",
"SMM_CORE",
"DXE_RUNTIME_DRIVER",
"DXE_SAL_DRIVER",
"DXE_SMM_DRIVER",
"UEFI_DRIVER",
"UEFI_APPLICATION",
"USER_DEFINED"]
VALID_DEPEX_MODULE_TYPE_LIST = ["PEIM",
"DXE_DRIVER",
"DXE_SMM_DRIVER",
"DXE_RUNTIME_DRIVER",
"DXE_SAL_DRIVER",
"UEFI_DRIVER",
]
##
# Usage List Items
#
USAGE_LIST = ["CONSUMES",
"SOMETIMES_CONSUMES",
"PRODUCES",
"SOMETIMES_PRODUCES"]
TAB_LANGUAGE_EN_US = 'en-US'
TAB_LANGUAGE_ENG = 'eng'
TAB_LANGUAGE_EN = 'en'
TAB_LANGUAGE_EN_X = 'en-x-tianocore'
USAGE_ITEM_PRODUCES = 'PRODUCES'
USAGE_ITEM_SOMETIMES_PRODUCES = 'SOMETIMES_PRODUCES'
USAGE_ITEM_CONSUMES = 'CONSUMES'
USAGE_ITEM_SOMETIMES_CONSUMES = 'SOMETIMES_CONSUMES'
USAGE_ITEM_TO_START = 'TO_START'
USAGE_ITEM_BY_START = 'BY_START'
USAGE_ITEM_NOTIFY = 'NOTIFY'
USAGE_ITEM_UNDEFINED = 'UNDEFINED'
USAGE_CONSUMES_LIST = [USAGE_ITEM_CONSUMES,
'CONSUMED',
'ALWAYS_CONSUMED',
'ALWAYS_CONSUMES'
]
USAGE_PRODUCES_LIST = [USAGE_ITEM_PRODUCES,
'PRODUCED',
'ALWAYS_PRODUCED',
'ALWAYS_PRODUCES'
]
USAGE_SOMETIMES_PRODUCES_LIST = [USAGE_ITEM_SOMETIMES_PRODUCES,
'SOMETIMES_PRODUCED'
]
USAGE_SOMETIMES_CONSUMES_LIST = [USAGE_ITEM_SOMETIMES_CONSUMES,
'SOMETIMES_CONSUMED'
]
ITEM_UNDEFINED = 'UNDEFINED'
TAB_PCD_VALIDRANGE = '@ValidRange'
TAB_PCD_VALIDLIST = '@ValidList'
TAB_PCD_EXPRESSION = '@Expression'
TAB_PCD_PROMPT = '@Prompt'
TAB_STR_TOKENCNAME = 'STR'
TAB_STR_TOKENPROMPT = 'PROMPT'
TAB_STR_TOKENHELP = 'HELP'
TAB_STR_TOKENERR = 'ERR'
#
# Dictionary of usage tokens and their synonyms
#
ALL_USAGE_TOKENS = {
"PRODUCES" : "PRODUCES",
"PRODUCED" : "PRODUCES",
"ALWAYS_PRODUCES" : "PRODUCES",
"ALWAYS_PRODUCED" : "PRODUCES",
"SOMETIMES_PRODUCES" : "SOMETIMES_PRODUCES",
"SOMETIMES_PRODUCED" : "SOMETIMES_PRODUCES",
"CONSUMES" : "CONSUMES",
"CONSUMED" : "CONSUMES",
"ALWAYS_CONSUMES" : "CONSUMES",
"ALWAYS_CONSUMED" : "CONSUMES",
"SOMETIMES_CONSUMES" : "SOMETIMES_CONSUMES",
"SOMETIMES_CONSUMED" : "SOMETIMES_CONSUMES",
"SOMETIME_CONSUMES" : "SOMETIMES_CONSUMES",
"UNDEFINED" : "UNDEFINED"
}
PROTOCOL_USAGE_TOKENS = {
"TO_START" : "TO_START",
"BY_START" : "BY_START"
}
PROTOCOL_USAGE_TOKENS.update (ALL_USAGE_TOKENS)
#
# Dictionary of GUID type tokens
#
GUID_TYPE_TOKENS = {
"Event" : "Event",
"File" : "File",
"FV" : "FV",
"GUID" : "GUID",
"Guid" : "GUID",
"HII" : "HII",
"HOB" : "HOB",
"Hob" : "HOB",
"Hob:" : "HOB",
"SystemTable" : "SystemTable",
"TokenSpaceGuid" : "TokenSpaceGuid",
"UNDEFINED" : "UNDEFINED"
}
#
# Dictionary of Protocol Notify tokens and their synonyms
#
PROTOCOL_NOTIFY_TOKENS = {
"NOTIFY" : "NOTIFY",
"PROTOCOL_NOTIFY" : "NOTIFY",
"UNDEFINED" : "UNDEFINED"
}
#
# Dictionary of PPI Notify tokens and their synonyms
#
PPI_NOTIFY_TOKENS = {
"NOTIFY" : "NOTIFY",
"PPI_NOTIFY" : "NOTIFY",
"UNDEFINED" : "UNDEFINED"
}
EVENT_TOKENS = {
"EVENT_TYPE_PERIODIC_TIMER" : "EVENT_TYPE_PERIODIC_TIMER",
"EVENT_TYPE_RELATIVE_TIMER" : "EVENT_TYPE_RELATIVE_TIMER",
"UNDEFINED" : "UNDEFINED"
}
BOOTMODE_TOKENS = {
"FULL" : "FULL",
"MINIMAL" : "MINIMAL",
"NO_CHANGE" : "NO_CHANGE",
"DIAGNOSTICS" : "DIAGNOSTICS",
"DEFAULT" : "DEFAULT",
"S2_RESUME" : "S2_RESUME",
"S3_RESUME" : "S3_RESUME",
"S4_RESUME" : "S4_RESUME",
"S5_RESUME" : "S5_RESUME",
"FLASH_UPDATE" : "FLASH_UPDATE",
"RECOVERY_FULL" : "RECOVERY_FULL",
"RECOVERY_MINIMAL" : "RECOVERY_MINIMAL",
"RECOVERY_NO_CHANGE" : "RECOVERY_NO_CHANGE",
"RECOVERY_DIAGNOSTICS" : "RECOVERY_DIAGNOSTICS",
"RECOVERY_DEFAULT" : "RECOVERY_DEFAULT",
"RECOVERY_S2_RESUME" : "RECOVERY_S2_RESUME",
"RECOVERY_S3_RESUME" : "RECOVERY_S3_RESUME",
"RECOVERY_S4_RESUME" : "RECOVERY_S4_RESUME",
"RECOVERY_S5_RESUME" : "RECOVERY_S5_RESUME",
"RECOVERY_FLASH_UPDATE" : "RECOVERY_FLASH_UPDATE",
"UNDEFINED" : "UNDEFINED"
}
HOB_TOKENS = {
"PHIT" : "PHIT",
"MEMORY_ALLOCATION" : "MEMORY_ALLOCATION",
"LOAD_PEIM" : "LOAD_PEIM",
"RESOURCE_DESCRIPTOR" : "RESOURCE_DESCRIPTOR",
"FIRMWARE_VOLUME" : "FIRMWARE_VOLUME",
"UNDEFINED" : "UNDEFINED"
}
##
# Usage List Items for Protocol
#
PROTOCOL_USAGE_LIST = USAGE_LIST + ["TO_START", "BY_START"]
##
# End of Line
# Use this but not os.linesep for os.linesep has bug in it.
#
END_OF_LINE = '\n'
##
# Arch List Items
#
ARCH_LIST = ["IA32",
"X64",
"IPF",
"EBC",
"COMMON"]
##
# PCD driver type list items
#
PCD_DRIVER_TYPE_LIST = ["PEI_PCD_DRIVER", "DXE_PCD_DRIVER"]
##
# Boot Mode List Items
#
BOOT_MODE_LIST = ["FULL",
"MINIMAL",
"NO_CHANGE",
"DIAGNOSTICS",
"DEFAULT",
"S2_RESUME",
"S3_RESUME",
"S4_RESUME",
"S5_RESUME",
"FLASH_UPDATE",
"RECOVERY_FULL",
"RECOVERY_MINIMAL",
"RECOVERY_NO_CHANGE",
"RECOVERY_DIAGNOSTICS",
"RECOVERY_DEFAULT",
"RECOVERY_S2_RESUME",
"RECOVERY_S3_RESUME",
"RECOVERY_S4_RESUME",
"RECOVERY_S5_RESUME",
"RECOVERY_FLASH_UPDATE"]
##
# Event Type List Items
#
EVENT_TYPE_LIST = ["EVENT_TYPE_PERIODIC_TIMER",
"EVENT_TYPE_RELATIVE_TIMER"]
##
# Hob Type List Items
#
HOB_TYPE_LIST = ["PHIT",
"MEMORY_ALLOCATION",
"RESOURCE_DESCRIPTOR",
"FIRMWARE_VOLUME",
"LOAD_PEIM"]
##
# GUID_TYPE_LIST
#
GUID_TYPE_LIST = ["Event", "File", "FV", "GUID", "HII", "HOB",
"SystemTable", "TokenSpaceGuid", "Variable"]
##
# PCD Usage Type List of Package
#
PCD_USAGE_TYPE_LIST_OF_PACKAGE = ["FeatureFlag", "PatchableInModule",
"FixedAtBuild", "Dynamic", "DynamicEx"]
##
# PCD Usage Type List of Module
#
PCD_USAGE_TYPE_LIST_OF_MODULE = ["FEATUREPCD", "PATCHPCD", "FIXEDPCD", "PCD", "PCDEX"]
##
# PCD Usage Type List of UPT
#
PCD_USAGE_TYPE_LIST_OF_UPT = PCD_USAGE_TYPE_LIST_OF_MODULE
##
# Binary File Type List
#
BINARY_FILE_TYPE_LIST = ["PE32", "PIC", "TE", "DXE_DEPEX", "VER", "UI", "COMPAT16", "FV", "BIN", "RAW",
"ACPI", "ASL",
"PEI_DEPEX",
"SMM_DEPEX",
"SUBTYPE_GUID",
"DISPOSABLE"
]
BINARY_FILE_TYPE_LIST_IN_UDP = \
["GUID", "FREEFORM",
"UEFI_IMAGE", "PE32", "PIC",
"PEI_DEPEX",
"DXE_DEPEX",
"SMM_DEPEX",
"FV", "TE",
"BIN", "VER", "UI"
]
SUBTYPE_GUID_BINARY_FILE_TYPE = "FREEFORM"
##
# Possible values for COMPONENT_TYPE, and their descriptions, are listed in
# the table,
# "Component (module) Types." For each component, the BASE_NAME and
# COMPONENT_TYPE
# are required. The COMPONENT_TYPE definition is case sensitive.
#
COMPONENT_TYPE_LIST = [
"APPLICATION",
"ACPITABLE",
"APRIORI",
"BINARY",
"BS_DRIVER",
"CONFIG",
"FILE",
"FVIMAGEFILE",
"LIBRARY",
"LOGO",
"LEGACY16",
"MICROCODE",
"PE32_PEIM",
"PEI_CORE",
"RAWFILE",
"RT_DRIVER",
"SAL_RT_DRIVER",
"SECURITY_CORE",
"COMBINED_PEIM_DRIVER",
"PIC_PEIM",
"RELOCATABLE_PEIM"
]
##
# Common Definitions
#
TAB_SPLIT = '.'
TAB_COMMENT_EDK1_START = '/*'
TAB_COMMENT_EDK1_END = '*/'
TAB_COMMENT_EDK1_SPLIT = '//'
TAB_COMMENT_SPLIT = '#'
TAB_EQUAL_SPLIT = '='
TAB_DEQUAL_SPLIT = '=='
TAB_VALUE_SPLIT = '|'
TAB_COMMA_SPLIT = ','
TAB_HORIZON_LINE_SPLIT = '-'
TAB_SPACE_SPLIT = ' '
TAB_UNDERLINE_SPLIT = '_'
TAB_SEMI_COLON_SPLIT = ';'
TAB_COLON_SPLIT = ':'
TAB_SECTION_START = '['
TAB_SECTION_END = ']'
TAB_OPTION_START = '<'
TAB_OPTION_END = '>'
TAB_SLASH = '\\'
TAB_BACK_SLASH = '/'
TAB_SPECIAL_COMMENT = '##'
TAB_HEADER_COMMENT = '@file'
TAB_BINARY_HEADER_COMMENT = '@BinaryHeader'
TAB_STAR = '*'
TAB_ENCODING_UTF16LE = 'utf_16_le'
TAB_CAPHEX_START = '0X'
TAB_HEX_START = '0x'
TAB_PCD_ERROR = 'Error'
TAB_PCD_ERROR_SECTION_COMMENT = 'Error message section'
TAB_UNI_FILE_SUFFIXS = ['.uni', '.UNI', '.Uni']
TAB_EDK_SOURCE = '$(EDK_SOURCE)'
TAB_EFI_SOURCE = '$(EFI_SOURCE)'
TAB_WORKSPACE = '$(WORKSPACE)'
TAB_ARCH_NULL = ''
TAB_ARCH_COMMON = 'COMMON'
TAB_ARCH_IA32 = 'IA32'
TAB_ARCH_X64 = 'X64'
TAB_ARCH_IPF = 'IPF'
TAB_ARCH_ARM = 'ARM'
TAB_ARCH_LOONGARCH64 = 'LOONGARCH64'
TAB_ARCH_EBC = 'EBC'
ARCH_LIST = \
[TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_IPF, TAB_ARCH_ARM, TAB_ARCH_LOONGARCH64, TAB_ARCH_EBC]
SUP_MODULE_BASE = 'BASE'
SUP_MODULE_SEC = 'SEC'
SUP_MODULE_PEI_CORE = 'PEI_CORE'
SUP_MODULE_PEIM = 'PEIM'
SUP_MODULE_DXE_CORE = 'DXE_CORE'
SUP_MODULE_DXE_DRIVER = 'DXE_DRIVER'
SUP_MODULE_DXE_RUNTIME_DRIVER = 'DXE_RUNTIME_DRIVER'
SUP_MODULE_DXE_SAL_DRIVER = 'DXE_SAL_DRIVER'
SUP_MODULE_DXE_SMM_DRIVER = 'DXE_SMM_DRIVER'
SUP_MODULE_UEFI_DRIVER = 'UEFI_DRIVER'
SUP_MODULE_UEFI_APPLICATION = 'UEFI_APPLICATION'
SUP_MODULE_USER_DEFINED = 'USER_DEFINED'
SUP_MODULE_SMM_CORE = 'SMM_CORE'
SUP_MODULE_LIST = \
[SUP_MODULE_BASE, SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, \
SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER, \
SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, \
SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_UEFI_DRIVER, \
SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, \
SUP_MODULE_SMM_CORE]
SUP_MODULE_LIST_STRING = TAB_VALUE_SPLIT.join(l for l in SUP_MODULE_LIST)
EDK_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
EDK_COMPONENT_TYPE_SECUARITY_CORE = 'SECUARITY_CORE'
EDK_COMPONENT_TYPE_PEI_CORE = 'PEI_CORE'
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER = 'COMBINED_PEIM_DRIVER'
EDK_COMPONENT_TYPE_PIC_PEIM = 'PIC_PEIM'
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM = 'RELOCATABLE_PEIM'
EDK_COMPONENT_TYPE_BS_DRIVER = 'BS_DRIVER'
EDK_COMPONENT_TYPE_RT_DRIVER = 'RT_DRIVER'
EDK_COMPONENT_TYPE_SAL_RT_DRIVER = 'SAL_RT_DRIVER'
EDK_COMPONENT_TYPE_APPLICATION = 'APPLICATION'
EDK_NAME = 'EDK'
EDKII_NAME = 'EDKII'
BINARY_FILE_TYPE_FW = 'FW'
BINARY_FILE_TYPE_GUID = 'GUID'
BINARY_FILE_TYPE_PREEFORM = 'PREEFORM'
BINARY_FILE_TYPE_UEFI_APP = 'UEFI_APP'
BINARY_FILE_TYPE_UNI_UI = 'UNI_UI'
BINARY_FILE_TYPE_SEC_UI = 'SEC_UI'
BINARY_FILE_TYPE_UNI_VER = 'UNI_VER'
BINARY_FILE_TYPE_SEC_VER = 'SEC_VER'
BINARY_FILE_TYPE_LIB = 'LIB'
BINARY_FILE_TYPE_PE32 = 'PE32'
BINARY_FILE_TYPE_PIC = 'PIC'
BINARY_FILE_TYPE_PEI_DEPEX = 'PEI_DEPEX'
BINARY_FILE_TYPE_DXE_DEPEX = 'DXE_DEPEX'
BINARY_FILE_TYPE_SMM_DEPEX = 'SMM_DEPEX'
BINARY_FILE_TYPE_TE = 'TE'
BINARY_FILE_TYPE_VER = 'VER'
BINARY_FILE_TYPE_UI = 'UI'
BINARY_FILE_TYPE_BIN = 'BIN'
BINARY_FILE_TYPE_FV = 'FV'
BINARY_FILE_TYPE_UI_LIST = [BINARY_FILE_TYPE_UNI_UI,
BINARY_FILE_TYPE_SEC_UI,
BINARY_FILE_TYPE_UI
]
BINARY_FILE_TYPE_VER_LIST = [BINARY_FILE_TYPE_UNI_VER,
BINARY_FILE_TYPE_SEC_VER,
BINARY_FILE_TYPE_VER
]
DEPEX_SECTION_LIST = ['<PEI_DEPEX>',
'<DXE_DEPEX>',
'<SMM_DEPEX>'
]
PLATFORM_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS = 'LIBRARY_CLASS'
PLATFORM_COMPONENT_TYPE_MODULE = 'MODULE'
TAB_LIBRARIES = 'Libraries'
TAB_SOURCE = 'Source'
TAB_SOURCES = 'Sources'
TAB_SOURCES_COMMON = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_SOURCES_IA32 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IA32
TAB_SOURCES_X64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_X64
TAB_SOURCES_IPF = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IPF
TAB_SOURCES_ARM = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_ARM
TAB_SOURCES_LOONGARCH64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_SOURCES_EBC = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_EBC
TAB_BINARIES = 'Binaries'
TAB_BINARIES_COMMON = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_BINARIES_IA32 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IA32
TAB_BINARIES_X64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_X64
TAB_BINARIES_IPF = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IPF
TAB_BINARIES_ARM = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_ARM
TAB_BINARIES_LOONGARCH64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_BINARIES_EBC = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_EBC
TAB_INCLUDES = 'Includes'
TAB_INCLUDES_COMMON = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_INCLUDES_IA32 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IA32
TAB_INCLUDES_X64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_X64
TAB_INCLUDES_IPF = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IPF
TAB_INCLUDES_ARM = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_ARM
TAB_INCLUDES_LOONGARCH64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_INCLUDES_EBC = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_EBC
TAB_GUIDS = 'Guids'
TAB_GUIDS_COMMON = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_GUIDS_IA32 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IA32
TAB_GUIDS_X64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_X64
TAB_GUIDS_IPF = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IPF
TAB_GUIDS_ARM = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_ARM
TAB_GUIDS_LOONGARCH64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_GUIDS_EBC = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_EBC
TAB_PROTOCOLS = 'Protocols'
TAB_PROTOCOLS_COMMON = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PROTOCOLS_IA32 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IA32
TAB_PROTOCOLS_X64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_X64
TAB_PROTOCOLS_IPF = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IPF
TAB_PROTOCOLS_ARM = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_ARM
TAB_PROTOCOLS_LOONGARCH64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_PROTOCOLS_EBC = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_EBC
TAB_PPIS = 'Ppis'
TAB_PPIS_COMMON = TAB_PPIS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PPIS_IA32 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IA32
TAB_PPIS_X64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_X64
TAB_PPIS_IPF = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IPF
TAB_PPIS_ARM = TAB_PPIS + TAB_SPLIT + TAB_ARCH_ARM
TAB_PPIS_LOONGARCH64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_PPIS_EBC = TAB_PPIS + TAB_SPLIT + TAB_ARCH_EBC
TAB_LIBRARY_CLASSES = 'LibraryClasses'
TAB_LIBRARY_CLASSES_COMMON = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_LIBRARY_CLASSES_IA32 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IA32
TAB_LIBRARY_CLASSES_X64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_X64
TAB_LIBRARY_CLASSES_IPF = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IPF
TAB_LIBRARY_CLASSES_ARM = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_ARM
TAB_LIBRARY_CLASSES_LOONGARCH64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_LIBRARY_CLASSES_EBC = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_EBC
TAB_PACKAGES = 'Packages'
TAB_PACKAGES_COMMON = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PACKAGES_IA32 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IA32
TAB_PACKAGES_X64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_X64
TAB_PACKAGES_IPF = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IPF
TAB_PACKAGES_ARM = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_ARM
TAB_PACKAGES_LOONGARCH64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_PACKAGES_EBC = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS = 'Pcds'
TAB_PCDS_FIXED_AT_BUILD = 'FixedAtBuild'
TAB_PCDS_PATCHABLE_IN_MODULE = 'PatchableInModule'
TAB_PCDS_FEATURE_FLAG = 'FeatureFlag'
TAB_PCDS_DYNAMIC_EX = 'DynamicEx'
TAB_PCDS_DYNAMIC_EX_DEFAULT = 'DynamicExDefault'
TAB_PCDS_DYNAMIC_EX_VPD = 'DynamicExVpd'
TAB_PCDS_DYNAMIC_EX_HII = 'DynamicExHii'
TAB_PCDS_DYNAMIC = 'Dynamic'
TAB_PCDS_DYNAMIC_DEFAULT = 'DynamicDefault'
TAB_PCDS_DYNAMIC_VPD = 'DynamicVpd'
TAB_PCDS_DYNAMIC_HII = 'DynamicHii'
TAB_PTR_TYPE_PCD = 'VOID*'
PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_DEFAULT, \
TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_HII]
PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, \
TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
## Dynamic-ex PCD types
#
gDYNAMIC_EX_PCD = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, \
TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII]
TAB_PCDS_FIXED_AT_BUILD_NULL = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD
TAB_PCDS_FIXED_AT_BUILD_COMMON = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_FIXED_AT_BUILD_IA32 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_FIXED_AT_BUILD_X64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_FIXED_AT_BUILD_IPF = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_IPF
TAB_PCDS_FIXED_AT_BUILD_ARM = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_FIXED_AT_BUILD_LOONGARCH64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_PCDS_FIXED_AT_BUILD_EBC = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_PATCHABLE_IN_MODULE_NULL = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE
TAB_PCDS_PATCHABLE_IN_MODULE_COMMON = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE \
+ TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_PATCHABLE_IN_MODULE_IA32 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_PATCHABLE_IN_MODULE_X64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_PATCHABLE_IN_MODULE_IPF = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_IPF
TAB_PCDS_PATCHABLE_IN_MODULE_ARM = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_PATCHABLE_IN_MODULE_LOONGARCH64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_PCDS_PATCHABLE_IN_MODULE_EBC = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_FEATURE_FLAG_NULL = TAB_PCDS + TAB_PCDS_FEATURE_FLAG
TAB_PCDS_FEATURE_FLAG_COMMON = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT \
+ TAB_ARCH_COMMON
TAB_PCDS_FEATURE_FLAG_IA32 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_IA32
TAB_PCDS_FEATURE_FLAG_X64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_X64
TAB_PCDS_FEATURE_FLAG_IPF = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_IPF
TAB_PCDS_FEATURE_FLAG_ARM = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_ARM
TAB_PCDS_FEATURE_FLAG_LOONGARCH64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_LOONGARCH64
TAB_PCDS_FEATURE_FLAG_EBC = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_EBC
TAB_PCDS_DYNAMIC_EX_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX
TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_DEFAULT
TAB_PCDS_DYNAMIC_EX_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_HII
TAB_PCDS_DYNAMIC_EX_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_VPD
TAB_PCDS_DYNAMIC_EX_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_COMMON
TAB_PCDS_DYNAMIC_EX_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_IA32
TAB_PCDS_DYNAMIC_EX_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_X64
TAB_PCDS_DYNAMIC_EX_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_IPF
TAB_PCDS_DYNAMIC_EX_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_ARM
TAB_PCDS_DYNAMIC_EX_LOONGARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_LOONGARCH64
TAB_PCDS_DYNAMIC_EX_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_EBC
TAB_PCDS_DYNAMIC_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC
TAB_PCDS_DYNAMIC_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_DEFAULT
TAB_PCDS_DYNAMIC_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_HII
TAB_PCDS_DYNAMIC_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_VPD
TAB_PCDS_DYNAMIC_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + \
TAB_ARCH_COMMON
TAB_PCDS_DYNAMIC_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_DYNAMIC_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_DYNAMIC_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IPF
TAB_PCDS_DYNAMIC_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_DYNAMIC_LOONGARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_PCDS_DYNAMIC_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC_DEFAULT_NULL, \
TAB_PCDS_DYNAMIC_VPD_NULL, \
TAB_PCDS_DYNAMIC_HII_NULL]
TAB_PCD_DYNAMIC_EX_TYPE_LIST = [TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, \
TAB_PCDS_DYNAMIC_EX_VPD_NULL, \
TAB_PCDS_DYNAMIC_EX_HII_NULL]
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE = \
'PcdLoadFixAddressPeiCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE = \
'PcdLoadFixAddressBootTimeCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE = \
'PcdLoadFixAddressRuntimeCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE = \
'PcdLoadFixAddressSmmCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST = \
[TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE, \
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE, \
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE, \
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE]
PCD_SECTION_LIST = [TAB_PCDS_FIXED_AT_BUILD_NULL.upper(), \
TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper(), \
TAB_PCDS_FEATURE_FLAG_NULL.upper(), \
TAB_PCDS_DYNAMIC_EX_NULL.upper(), \
TAB_PCDS_DYNAMIC_NULL.upper()]
INF_PCD_SECTION_LIST = ["FixedPcd".upper(), "FeaturePcd".upper(), \
"PatchPcd".upper(), "Pcd".upper(), "PcdEx".upper()]
TAB_DEPEX = 'Depex'
TAB_DEPEX_COMMON = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_COMMON
TAB_DEPEX_IA32 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IA32
TAB_DEPEX_X64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_X64
TAB_DEPEX_IPF = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IPF
TAB_DEPEX_ARM = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_ARM
TAB_DEPEX_LOONGARCH64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_DEPEX_EBC = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_EBC
TAB_SKUIDS = 'SkuIds'
TAB_LIBRARIES = 'Libraries'
TAB_LIBRARIES_COMMON = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_LIBRARIES_IA32 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IA32
TAB_LIBRARIES_X64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_X64
TAB_LIBRARIES_IPF = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IPF
TAB_LIBRARIES_ARM = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_ARM
TAB_LIBRARIES_LOONGARCH64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_LIBRARIES_EBC = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_EBC
TAB_COMPONENTS = 'Components'
TAB_COMPONENTS_COMMON = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_COMPONENTS_IA32 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IA32
TAB_COMPONENTS_X64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_X64
TAB_COMPONENTS_IPF = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IPF
TAB_COMPONENTS_ARM = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_ARM
TAB_COMPONENTS_LOONGARCH64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_LOONGARCH64
TAB_COMPONENTS_EBC = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_EBC
TAB_BUILD_OPTIONS = 'BuildOptions'
TAB_DEFINE = 'DEFINE'
TAB_NMAKE = 'Nmake'
TAB_USER_EXTENSIONS = 'UserExtensions'
TAB_INCLUDE = '!include'
TAB_PRIVATE = 'Private'
TAB_INTEL = 'Intel'
#
# Common Define
#
TAB_COMMON_DEFINES = 'Defines'
#
# Inf Definitions
#
TAB_INF_DEFINES = TAB_COMMON_DEFINES
TAB_INF_DEFINES_INF_VERSION = 'INF_VERSION'
TAB_INF_DEFINES_BASE_NAME = 'BASE_NAME'
TAB_INF_DEFINES_FILE_GUID = 'FILE_GUID'
TAB_INF_DEFINES_MODULE_TYPE = 'MODULE_TYPE'
TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION = 'EFI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION = 'UEFI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_PI_SPECIFICATION_VERSION = 'PI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_EDK_RELEASE_VERSION = 'EDK_RELEASE_VERSION'
TAB_INF_DEFINES_MODULE_UNI_FILE = 'MODULE_UNI_FILE'
TAB_INF_DEFINES_BINARY_MODULE = 'BINARY_MODULE'
TAB_INF_DEFINES_LIBRARY_CLASS = 'LIBRARY_CLASS'
TAB_INF_DEFINES_COMPONENT_TYPE = 'COMPONENT_TYPE'
TAB_INF_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
TAB_INF_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
TAB_INF_DEFINES_BUILD_TYPE = 'BUILD_TYPE'
TAB_INF_DEFINES_FFS_EXT = 'FFS_EXT'
TAB_INF_DEFINES_FV_EXT = 'FV_EXT'
TAB_INF_DEFINES_SOURCE_FV = 'SOURCE_FV'
TAB_INF_DEFINES_PACKAGE = 'PACKAGE'
TAB_INF_DEFINES_VERSION_NUMBER = 'VERSION_NUMBER'
TAB_INF_DEFINES_VERSION = 'VERSION'
TAB_INF_DEFINES_VERSION_STRING = 'VERSION_STRING'
TAB_INF_DEFINES_PCD_IS_DRIVER = 'PCD_IS_DRIVER'
TAB_INF_DEFINES_TIANO_EDK1_FLASHMAP_H = 'TIANO_EDK1_FLASHMAP_H'
TAB_INF_DEFINES_ENTRY_POINT = 'ENTRY_POINT'
TAB_INF_DEFINES_UNLOAD_IMAGE = 'UNLOAD_IMAGE'
TAB_INF_DEFINES_CONSTRUCTOR = 'CONSTRUCTOR'
TAB_INF_DEFINES_DESTRUCTOR = 'DESTRUCTOR'
TAB_INF_DEFINES_PCI_VENDOR_ID = 'PCI_VENDOR_ID'
TAB_INF_DEFINES_PCI_DEVICE_ID = 'PCI_DEVICE_ID'
TAB_INF_DEFINES_PCI_CLASS_CODE = 'PCI_CLASS_CODE'
TAB_INF_DEFINES_PCI_REVISION = 'PCI_REVISION'
TAB_INF_DEFINES_PCI_COMPRESS = 'PCI_COMPRESS'
TAB_INF_DEFINES_DEFINE = 'DEFINE'
TAB_INF_DEFINES_SPEC = 'SPEC'
TAB_INF_DEFINES_UEFI_HII_RESOURCE_SECTION = 'UEFI_HII_RESOURCE_SECTION'
TAB_INF_DEFINES_CUSTOM_MAKEFILE = 'CUSTOM_MAKEFILE'
TAB_INF_DEFINES_MACRO = '__MACROS__'
TAB_INF_DEFINES_SHADOW = 'SHADOW'
TAB_INF_DEFINES_DPX_SOURCE = 'DPX_SOURCE'
TAB_INF_FIXED_PCD = 'FixedPcd'
TAB_INF_FEATURE_PCD = 'FeaturePcd'
TAB_INF_PATCH_PCD = 'PatchPcd'
TAB_INF_PCD = 'Pcd'
TAB_INF_PCD_EX = 'PcdEx'
TAB_INF_GUIDTYPE_VAR = 'Variable'
TAB_INF_ABSTRACT = 'STR_MODULE_ABSTRACT'
TAB_INF_DESCRIPTION = 'STR_MODULE_DESCRIPTION'
TAB_INF_LICENSE = 'STR_MODULE_LICENSE'
TAB_INF_BINARY_ABSTRACT = 'STR_MODULE_BINARY_ABSTRACT'
TAB_INF_BINARY_DESCRIPTION = 'STR_MODULE_BINARY_DESCRIPTION'
TAB_INF_BINARY_LICENSE = 'STR_MODULE_BINARY_LICENSE'
#
# Dec Definitions
#
TAB_DEC_DEFINES = TAB_COMMON_DEFINES
TAB_DEC_DEFINES_DEC_SPECIFICATION = 'DEC_SPECIFICATION'
TAB_DEC_DEFINES_PACKAGE_NAME = 'PACKAGE_NAME'
TAB_DEC_DEFINES_PACKAGE_GUID = 'PACKAGE_GUID'
TAB_DEC_DEFINES_PACKAGE_VERSION = 'PACKAGE_VERSION'
TAB_DEC_DEFINES_PKG_UNI_FILE = 'PACKAGE_UNI_FILE'
TAB_DEC_PACKAGE_ABSTRACT = 'STR_PACKAGE_ABSTRACT'
TAB_DEC_PACKAGE_DESCRIPTION = 'STR_PACKAGE_DESCRIPTION'
TAB_DEC_PACKAGE_LICENSE = 'STR_PACKAGE_LICENSE'
TAB_DEC_BINARY_ABSTRACT = 'STR_PACKAGE_BINARY_ABSTRACT'
TAB_DEC_BINARY_DESCRIPTION = 'STR_PACKAGE_BINARY_DESCRIPTION'
TAB_DEC_BINARY_LICENSE = 'STR_PACKAGE_ASBUILT_LICENSE'
#
# Dsc Definitions
#
TAB_DSC_DEFINES = TAB_COMMON_DEFINES
TAB_DSC_DEFINES_PLATFORM_NAME = 'PLATFORM_NAME'
TAB_DSC_DEFINES_PLATFORM_GUID = 'PLATFORM_GUID'
TAB_DSC_DEFINES_PLATFORM_VERSION = 'PLATFORM_VERSION'
TAB_DSC_DEFINES_DSC_SPECIFICATION = 'DSC_SPECIFICATION'
TAB_DSC_DEFINES_OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES = 'SUPPORTED_ARCHITECTURES'
TAB_DSC_DEFINES_BUILD_TARGETS = 'BUILD_TARGETS'
TAB_DSC_DEFINES_SKUID_IDENTIFIER = 'SKUID_IDENTIFIER'
TAB_DSC_DEFINES_FLASH_DEFINITION = 'FLASH_DEFINITION'
TAB_DSC_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
TAB_DSC_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
TAB_DSC_DEFINES_BS_BASE_ADDRESS = 'BsBaseAddress'
TAB_DSC_DEFINES_RT_BASE_ADDRESS = 'RtBaseAddress'
TAB_DSC_DEFINES_DEFINE = 'DEFINE'
TAB_FIX_LOAD_TOP_MEMORY_ADDRESS = 'FIX_LOAD_TOP_MEMORY_ADDRESS'
#
# TargetTxt Definitions
#
TAB_TAT_DEFINES_ACTIVE_PLATFORM = 'ACTIVE_PLATFORM'
TAB_TAT_DEFINES_ACTIVE_MODULE = 'ACTIVE_MODULE'
TAB_TAT_DEFINES_TOOL_CHAIN_CONF = 'TOOL_CHAIN_CONF'
TAB_TAT_DEFINES_MULTIPLE_THREAD = 'MULTIPLE_THREAD'
TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER = 'MAX_CONCURRENT_THREAD_NUMBER'
TAB_TAT_DEFINES_TARGET = 'TARGET'
TAB_TAT_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
TAB_TAT_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
TAB_TAT_DEFINES_BUILD_RULE_CONF = "BUILD_RULE_CONF"
#
# ToolDef Definitions
#
TAB_TOD_DEFINES_TARGET = 'TARGET'
TAB_TOD_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
TAB_TOD_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
TAB_TOD_DEFINES_COMMAND_TYPE = 'COMMAND_TYPE'
TAB_TOD_DEFINES_FAMILY = 'FAMILY'
TAB_TOD_DEFINES_BUILDRULEFAMILY = 'BUILDRULEFAMILY'
#
# Conditional Statements
#
TAB_IF = '!if'
TAB_END_IF = '!endif'
TAB_ELSE_IF = '!elseif'
TAB_ELSE = '!else'
TAB_IF_DEF = '!ifdef'
TAB_IF_N_DEF = '!ifndef'
TAB_IF_EXIST = '!if exist'
#
# Unknown section
#
TAB_UNKNOWN = 'UNKNOWN'
#
# Header section (virtual section for abstract, description, copyright,
# license)
#
TAB_HEADER = 'Header'
TAB_HEADER_ABSTRACT = 'Abstract'
TAB_HEADER_DESCRIPTION = 'Description'
TAB_HEADER_COPYRIGHT = 'Copyright'
TAB_HEADER_LICENSE = 'License'
TAB_BINARY_HEADER_IDENTIFIER = 'BinaryHeader'
TAB_BINARY_HEADER_USERID = 'TianoCore'
#
# Build database path
#
DATABASE_PATH = ":memory:"
#
# used by ECC
#
MODIFIER_LIST = ['IN', 'OUT', 'OPTIONAL', 'UNALIGNED', 'EFI_RUNTIMESERVICE', \
'EFI_BOOTSERVICE', 'EFIAPI']
#
# Dependency Expression
#
DEPEX_SUPPORTED_OPCODE = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", \
"END", "SOR", "TRUE", "FALSE", '(', ')']
TAB_STATIC_LIBRARY = "STATIC-LIBRARY-FILE"
TAB_DYNAMIC_LIBRARY = "DYNAMIC-LIBRARY-FILE"
TAB_FRAMEWORK_IMAGE = "EFI-IMAGE-FILE"
TAB_C_CODE_FILE = "C-CODE-FILE"
TAB_C_HEADER_FILE = "C-HEADER-FILE"
TAB_UNICODE_FILE = "UNICODE-TEXT-FILE"
TAB_DEPENDENCY_EXPRESSION_FILE = "DEPENDENCY-EXPRESSION-FILE"
TAB_UNKNOWN_FILE = "UNKNOWN-TYPE-FILE"
TAB_DEFAULT_BINARY_FILE = "_BINARY_FILE_"
#
# used to indicate the state of processing header comment section of dec,
# inf files
#
HEADER_COMMENT_NOT_STARTED = -1
HEADER_COMMENT_STARTED = 0
HEADER_COMMENT_FILE = 1
HEADER_COMMENT_ABSTRACT = 2
HEADER_COMMENT_DESCRIPTION = 3
HEADER_COMMENT_COPYRIGHT = 4
HEADER_COMMENT_LICENSE = 5
HEADER_COMMENT_END = 6
#
# Static values for data models
#
MODEL_UNKNOWN = 0
MODEL_FILE_C = 1001
MODEL_FILE_H = 1002
MODEL_FILE_ASM = 1003
MODEL_FILE_INF = 1011
MODEL_FILE_DEC = 1012
MODEL_FILE_DSC = 1013
MODEL_FILE_FDF = 1014
MODEL_FILE_INC = 1015
MODEL_FILE_CIF = 1016
MODEL_IDENTIFIER_FILE_HEADER = 2001
MODEL_IDENTIFIER_FUNCTION_HEADER = 2002
MODEL_IDENTIFIER_COMMENT = 2003
MODEL_IDENTIFIER_PARAMETER = 2004
MODEL_IDENTIFIER_STRUCTURE = 2005
MODEL_IDENTIFIER_VARIABLE = 2006
MODEL_IDENTIFIER_INCLUDE = 2007
MODEL_IDENTIFIER_PREDICATE_EXPRESSION = 2008
MODEL_IDENTIFIER_ENUMERATE = 2009
MODEL_IDENTIFIER_PCD = 2010
MODEL_IDENTIFIER_UNION = 2011
MODEL_IDENTIFIER_MACRO_IFDEF = 2012
MODEL_IDENTIFIER_MACRO_IFNDEF = 2013
MODEL_IDENTIFIER_MACRO_DEFINE = 2014
MODEL_IDENTIFIER_MACRO_ENDIF = 2015
MODEL_IDENTIFIER_MACRO_PROGMA = 2016
MODEL_IDENTIFIER_FUNCTION_CALLING = 2018
MODEL_IDENTIFIER_TYPEDEF = 2017
MODEL_IDENTIFIER_FUNCTION_DECLARATION = 2019
MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION = 2020
MODEL_EFI_PROTOCOL = 3001
MODEL_EFI_PPI = 3002
MODEL_EFI_GUID = 3003
MODEL_EFI_LIBRARY_CLASS = 3004
MODEL_EFI_LIBRARY_INSTANCE = 3005
MODEL_EFI_PCD = 3006
MODEL_EFI_SOURCE_FILE = 3007
MODEL_EFI_BINARY_FILE = 3008
MODEL_EFI_SKU_ID = 3009
MODEL_EFI_INCLUDE = 3010
MODEL_EFI_DEPEX = 3011
MODEL_PCD = 4000
MODEL_PCD_FIXED_AT_BUILD = 4001
MODEL_PCD_PATCHABLE_IN_MODULE = 4002
MODEL_PCD_FEATURE_FLAG = 4003
MODEL_PCD_DYNAMIC_EX = 4004
MODEL_PCD_DYNAMIC_EX_DEFAULT = 4005
MODEL_PCD_DYNAMIC_EX_VPD = 4006
MODEL_PCD_DYNAMIC_EX_HII = 4007
MODEL_PCD_DYNAMIC = 4008
MODEL_PCD_DYNAMIC_DEFAULT = 4009
MODEL_PCD_DYNAMIC_VPD = 4010
MODEL_PCD_DYNAMIC_HII = 4011
MODEL_META_DATA_FILE_HEADER = 5000
MODEL_META_DATA_HEADER = 5001
MODEL_META_DATA_INCLUDE = 5002
MODEL_META_DATA_DEFINE = 5003
MODEL_META_DATA_CONDITIONAL_STATEMENT_IF = 5004
MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSE = 5005
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFDEF = 5006
MODEL_META_DATA_CONDITIONAL_STATEMENT_IFNDEF = 5007
MODEL_META_DATA_BUILD_OPTION = 5008
MODEL_META_DATA_COMPONENT = 5009
MODEL_META_DATA_USER_EXTENSION = 5010
MODEL_META_DATA_PACKAGE = 5011
MODEL_META_DATA_NMAKE = 5012
MODEL_META_DATA_CONDITIONAL_STATEMENT_ELSEIF = 50013
MODEL_META_DATA_CONDITIONAL_STATEMENT_ENDIF = 5014
TOOL_FAMILY_LIST = ["MSFT",
"INTEL",
"GCC",
]
TYPE_HOB_SECTION = 'HOB'
TYPE_EVENT_SECTION = 'EVENT'
TYPE_BOOTMODE_SECTION = 'BOOTMODE'
PCD_ERR_CODE_MAX_SIZE = 4294967295
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/DataType.py
|
## @file
# This file is used to define comment parsing interface
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
CommentParsing
'''
##
# Import Modules
#
import re
from Library.StringUtils import GetSplitValueList
from Library.StringUtils import CleanString2
from Library.DataType import HEADER_COMMENT_NOT_STARTED
from Library.DataType import TAB_COMMENT_SPLIT
from Library.DataType import HEADER_COMMENT_LICENSE
from Library.DataType import HEADER_COMMENT_ABSTRACT
from Library.DataType import HEADER_COMMENT_COPYRIGHT
from Library.DataType import HEADER_COMMENT_DESCRIPTION
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_COMMA_SPLIT
from Library.DataType import SUP_MODULE_LIST
from Library.DataType import TAB_VALUE_SPLIT
from Library.DataType import TAB_PCD_VALIDRANGE
from Library.DataType import TAB_PCD_VALIDLIST
from Library.DataType import TAB_PCD_EXPRESSION
from Library.DataType import TAB_PCD_PROMPT
from Library.DataType import TAB_CAPHEX_START
from Library.DataType import TAB_HEX_START
from Library.DataType import PCD_ERR_CODE_MAX_SIZE
from Library.ExpressionValidate import IsValidRangeExpr
from Library.ExpressionValidate import IsValidListExpr
from Library.ExpressionValidate import IsValidLogicalExpr
from Object.POM.CommonObject import TextObject
from Object.POM.CommonObject import PcdErrorObject
import Logger.Log as Logger
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import FORMAT_NOT_SUPPORTED
from Logger import StringTable as ST
## ParseHeaderCommentSection
#
# Parse Header comment section lines, extract Abstract, Description, Copyright
# , License lines
#
# @param CommentList: List of (Comment, LineNumber)
# @param FileName: FileName of the comment
#
def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = False):
Abstract = ''
Description = ''
Copyright = ''
License = ''
EndOfLine = "\n"
if IsBinaryHeader:
STR_HEADER_COMMENT_START = "@BinaryHeader"
else:
STR_HEADER_COMMENT_START = "@file"
HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
#
# first find the last copyright line
#
Last = 0
for Index in range(len(CommentList)-1, 0, -1):
Line = CommentList[Index][0]
if _IsCopyrightLine(Line):
Last = Index
break
for Item in CommentList:
Line = Item[0]
LineNo = Item[1]
if not Line.startswith(TAB_COMMENT_SPLIT) and Line:
Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_INVALID_COMMENT_FORMAT, FileName, Item[1])
Comment = CleanString2(Line)[1]
Comment = Comment.strip()
#
# if there are blank lines between License or Description, keep them as they would be
# indication of different block; or in the position that Abstract should be, also keep it
# as it indicates that no abstract
#
if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
continue
if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
if Comment.startswith(STR_HEADER_COMMENT_START):
HeaderCommentStage = HEADER_COMMENT_ABSTRACT
else:
License += Comment + EndOfLine
else:
if HeaderCommentStage == HEADER_COMMENT_ABSTRACT:
#
# in case there is no abstract and description
#
if not Comment:
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Abstract += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
#
# in case there is no description
#
if _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Description += Comment + EndOfLine
elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
if _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
else:
#
# Contents after copyright line are license, those non-copyright lines in between
# copyright line will be discarded
#
if LineNo > Last:
if License:
License += EndOfLine
License += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_LICENSE
else:
if not Comment and not License:
continue
License += Comment + EndOfLine
return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
## _IsCopyrightLine
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
#
# @param LineContent: the line need to be checked
# @return: True if current line is copyright line, False else
#
def _IsCopyrightLine (LineContent):
LineContent = LineContent.upper()
Result = False
ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
if ReIsCopyrightRe.search(LineContent):
Result = True
return Result
## ParseGenericComment
#
# @param GenericComment: Generic comment list, element of
# (CommentLine, LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
if ContainerFile:
pass
HelpTxt = None
HelpStr = ''
for Item in GenericComment:
CommentLine = Item[0]
Comment = CleanString2(CommentLine)[1]
if SkipTag is not None and Comment.startswith(SkipTag):
Comment = Comment.replace(SkipTag, '', 1)
HelpStr += Comment + '\n'
if HelpStr:
HelpTxt = TextObject()
if HelpStr.endswith('\n') and not HelpStr.endswith('\n\n') and HelpStr != '\n':
HelpStr = HelpStr[:-1]
HelpTxt.SetString(HelpStr)
return HelpTxt
## ParsePcdErrorCode
#
# @param Value: original ErrorCode value
# @param ContainerFile: Input value for filename of Dec file
# @param LineNum: Line Num
#
def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
try:
if Value.strip().startswith((TAB_HEX_START, TAB_CAPHEX_START)):
Base = 16
else:
Base = 10
ErrorCode = int(Value, Base)
if ErrorCode > PCD_ERR_CODE_MAX_SIZE or ErrorCode < 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
Line = LineNum)
ErrorCode = '0x%x' % ErrorCode
return ErrorCode
except ValueError as XStr:
if XStr:
pass
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
Line = LineNum)
## ParseDecPcdGenericComment
#
# @param GenericComment: Generic comment list, element of (CommentLine,
# LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCName, CName, MacroReplaceDict):
HelpStr = ''
PromptStr = ''
PcdErr = None
PcdErrList = []
ValidValueNum = 0
ValidRangeNum = 0
ExpressionNum = 0
for (CommentLine, LineNum) in GenericComment:
Comment = CleanString2(CommentLine)[1]
#
# To replace Macro
#
MACRO_PATTERN = '[\t\s]*\$\([A-Z][_A-Z0-9]*\)'
MatchedStrs = re.findall(MACRO_PATTERN, Comment)
for MatchedStr in MatchedStrs:
if MatchedStr:
Macro = MatchedStr.strip().lstrip('$(').rstrip(')').strip()
if Macro in MacroReplaceDict:
Comment = Comment.replace(MatchedStr, MacroReplaceDict[Macro])
if Comment.startswith(TAB_PCD_VALIDRANGE):
if ValidValueNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ValidRangeNum += 1
ValidRange = Comment.replace(TAB_PCD_VALIDRANGE, "", 1).strip()
Valid, Cause = _CheckRangeExpression(ValidRange)
if Valid:
ValueList = ValidRange.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetValidValueRange((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetValidValueRange(ValidRange)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_VALIDLIST):
if ValidRangeNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
elif ValidValueNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_VALIDVALUE,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ValidValueNum += 1
ValidValueExpr = Comment.replace(TAB_PCD_VALIDLIST, "", 1).strip()
Valid, Cause = _CheckListExpression(ValidValueExpr)
if Valid:
ValidValue = Comment.replace(TAB_PCD_VALIDLIST, "", 1).replace(TAB_COMMA_SPLIT, TAB_SPACE_SPLIT)
ValueList = ValidValue.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetValidValue((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetValidValue(ValidValue)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_EXPRESSION):
if ValidRangeNum > 0 or ValidValueNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ExpressionNum += 1
Expression = Comment.replace(TAB_PCD_EXPRESSION, "", 1).strip()
Valid, Cause = _CheckExpression(Expression)
if Valid:
ValueList = Expression.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetExpression((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetExpression(Expression)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_PROMPT):
if PromptStr:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_PROMPT,
File = ContainerFile,
Line = LineNum)
PromptStr = Comment.replace(TAB_PCD_PROMPT, "", 1).strip()
else:
if Comment:
HelpStr += Comment + '\n'
#
# remove the last EOL if the comment is of format 'FOO\n'
#
if HelpStr.endswith('\n'):
if HelpStr != '\n' and not HelpStr.endswith('\n\n'):
HelpStr = HelpStr[:-1]
return HelpStr, PcdErrList, PromptStr
## ParseDecPcdTailComment
#
# @param TailCommentList: Tail comment list of Pcd, item of format (Comment, LineNum)
# @param ContainerFile: Input value for filename of Dec file
# @retVal SupModuleList: The supported module type list detected
# @retVal HelpStr: The generic help text string detected
#
def ParseDecPcdTailComment (TailCommentList, ContainerFile):
assert(len(TailCommentList) == 1)
TailComment = TailCommentList[0][0]
LineNum = TailCommentList[0][1]
Comment = TailComment.lstrip(" #")
ReFindFirstWordRe = re.compile(r"""^([^ #]*)""", re.DOTALL)
#
# get first word and compare with SUP_MODULE_LIST
#
MatchObject = ReFindFirstWordRe.match(Comment)
if not (MatchObject and MatchObject.group(1) in SUP_MODULE_LIST):
return None, Comment
#
# parse line, it must have supported module type specified
#
if Comment.find(TAB_COMMENT_SPLIT) == -1:
Comment += TAB_COMMENT_SPLIT
SupMode, HelpStr = GetSplitValueList(Comment, TAB_COMMENT_SPLIT, 1)
SupModuleList = []
for Mod in GetSplitValueList(SupMode, TAB_SPACE_SPLIT):
if not Mod:
continue
elif Mod not in SUP_MODULE_LIST:
Logger.Error("UPT",
FORMAT_INVALID,
ST.WRN_INVALID_MODULE_TYPE%Mod,
ContainerFile,
LineNum)
else:
SupModuleList.append(Mod)
return SupModuleList, HelpStr
## _CheckListExpression
#
# @param Expression: Pcd value list expression
#
def _CheckListExpression(Expression):
ListExpr = ''
if TAB_VALUE_SPLIT in Expression:
ListExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
ListExpr = Expression
return IsValidListExpr(ListExpr)
## _CheckExpression
#
# @param Expression: Pcd value expression
#
def _CheckExpression(Expression):
Expr = ''
if TAB_VALUE_SPLIT in Expression:
Expr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
Expr = Expression
return IsValidLogicalExpr(Expr, True)
## _CheckRangeExpression
#
# @param Expression: Pcd range expression
#
def _CheckRangeExpression(Expression):
RangeExpr = ''
if TAB_VALUE_SPLIT in Expression:
RangeExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
RangeExpr = Expression
return IsValidRangeExpr(RangeExpr)
## ValidateCopyright
#
#
#
def ValidateCopyright(Result, ErrType, FileName, LineNo, ErrMsg):
if not Result:
Logger.Warn("\nUPT", ErrType, FileName, LineNo, ErrMsg)
## _ValidateCopyright
#
# @param Line: Line that contains copyright information, # stripped
#
# @retval Result: True if line is conformed to Spec format, False else
# @retval ErrMsg: the detailed error description
#
def _ValidateCopyright(Line):
if Line:
pass
Result = True
ErrMsg = ''
return Result, ErrMsg
def GenerateTokenList (Comment):
#
# Tokenize Comment using '#' and ' ' as token separators
#
ReplacedComment = None
while Comment != ReplacedComment:
ReplacedComment = Comment
Comment = Comment.replace('##', '#').replace(' ', ' ').replace(' ', '#').strip('# ')
return Comment.split('#')
#
# Comment - Comment to parse
# TypeTokens - A dictionary of type token synonyms
# RemoveTokens - A list of tokens to remove from help text
# ParseVariable - True for parsing [Guids]. Otherwise False
#
def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable):
#
# Initialize return values
#
Usage = None
Type = None
String = None
Comment = Comment[0]
NumTokens = 2
if ParseVariable:
#
# Remove white space around first instance of ':' from Comment if 'Variable'
# is in front of ':' and Variable is the 1st or 2nd token in Comment.
#
List = Comment.split(':', 1)
if len(List) > 1:
SubList = GenerateTokenList (List[0].strip())
if len(SubList) in [1, 2] and SubList[-1] == 'Variable':
if List[1].strip().find('L"') == 0:
Comment = List[0].strip() + ':' + List[1].strip()
#
# Remove first instance of L"<VariableName> from Comment and put into String
# if and only if L"<VariableName>" is the 1st token, the 2nd token. Or
# L"<VariableName>" is the third token immediately following 'Variable:'.
#
End = -1
Start = Comment.find('Variable:L"')
if Start >= 0:
String = Comment[Start + 9:]
End = String[2:].find('"')
else:
Start = Comment.find('L"')
if Start >= 0:
String = Comment[Start:]
End = String[2:].find('"')
if End >= 0:
SubList = GenerateTokenList (Comment[:Start])
if len(SubList) < 2:
Comment = Comment[:Start] + String[End + 3:]
String = String[:End + 3]
Type = 'Variable'
NumTokens = 1
#
# Initialize HelpText to Comment.
# Content will be remove from HelpText as matching tokens are found
#
HelpText = Comment
#
# Tokenize Comment using '#' and ' ' as token separators
#
List = GenerateTokenList (Comment)
#
# Search first two tokens for Usage and Type and remove any matching tokens
# from HelpText
#
for Token in List[0:NumTokens]:
if Usage is None and Token in UsageTokens:
Usage = UsageTokens[Token]
HelpText = HelpText.replace(Token, '')
if Usage is not None or not ParseVariable:
for Token in List[0:NumTokens]:
if Type is None and Token in TypeTokens:
Type = TypeTokens[Token]
HelpText = HelpText.replace(Token, '')
if Usage is not None:
for Token in List[0:NumTokens]:
if Token in RemoveTokens:
HelpText = HelpText.replace(Token, '')
#
# If no Usage token is present and set Usage to UNDEFINED
#
if Usage is None:
Usage = 'UNDEFINED'
#
# If no Type token is present and set Type to UNDEFINED
#
if Type is None:
Type = 'UNDEFINED'
#
# If Type is not 'Variable:', then set String to None
#
if Type != 'Variable':
String = None
#
# Strip ' ' and '#' from the beginning of HelpText
# If HelpText is an empty string after all parsing is
# complete then set HelpText to None
#
HelpText = HelpText.lstrip('# ')
if HelpText == '':
HelpText = None
#
# Return parsing results
#
return Usage, Type, String, HelpText
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/CommentParsing.py
|
## @file ParserValidate.py
# Functions for parser validation
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
ParserValidate
'''
import os.path
import re
import platform
from Library.DataType import MODULE_LIST
from Library.DataType import COMPONENT_TYPE_LIST
from Library.DataType import PCD_USAGE_TYPE_LIST_OF_MODULE
from Library.DataType import TAB_SPACE_SPLIT
from Library.StringUtils import GetSplitValueList
from Library.ExpressionValidate import IsValidBareCString
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Common.MultipleWorkspace import MultipleWorkspace as mws
## __HexDigit() method
#
# Whether char input is a Hex data bit
#
# @param TempChar: The char to test
#
def __HexDigit(TempChar):
if (TempChar >= 'a' and TempChar <= 'f') or \
(TempChar >= 'A' and TempChar <= 'F') \
or (TempChar >= '0' and TempChar <= '9'):
return True
else:
return False
## IsValidHex() method
#
# Whether char input is a Hex data.
#
# @param TempChar: The char to test
#
def IsValidHex(HexStr):
if not HexStr.upper().startswith("0X"):
return False
CharList = [c for c in HexStr[2:] if not __HexDigit(c)]
if len(CharList) == 0:
return True
else:
return False
## Judge the input string is valid bool type or not.
#
# <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"}
# <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"}
# <BoolType> ::= {<TRUE>} {<FALSE>}
#
# @param BoolString: A string contained the value need to be judged.
#
def IsValidBoolType(BoolString):
#
# Valid True
#
if BoolString == 'TRUE' or \
BoolString == 'True' or \
BoolString == 'true' or \
BoolString == '0x1' or \
BoolString == '0x01':
return True
#
# Valid False
#
elif BoolString == 'FALSE' or \
BoolString == 'False' or \
BoolString == 'false' or \
BoolString == '0x0' or \
BoolString == '0x00':
return True
#
# Invalid bool type
#
else:
return False
## Is Valid Module Type List or not
#
# @param ModuleTypeList: A list contain ModuleType strings need to be
# judged.
#
def IsValidInfMoudleTypeList(ModuleTypeList):
for ModuleType in ModuleTypeList:
return IsValidInfMoudleType(ModuleType)
## Is Valid Module Type or not
#
# @param ModuleType: A string contain ModuleType need to be judged.
#
def IsValidInfMoudleType(ModuleType):
if ModuleType in MODULE_LIST:
return True
else:
return False
## Is Valid Component Type or not
#
# @param ComponentType: A string contain ComponentType need to be judged.
#
def IsValidInfComponentType(ComponentType):
if ComponentType.upper() in COMPONENT_TYPE_LIST:
return True
else:
return False
## Is valid Tool Family or not
#
# @param ToolFamily: A string contain Tool Family need to be judged.
# Family := [A-Z]([a-zA-Z0-9])*
#
def IsValidToolFamily(ToolFamily):
ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValidFamily.match(ToolFamily) is None:
return False
return True
## Is valid Tool TagName or not
#
# The TagName sample is MYTOOLS and VS2005.
#
# @param TagName: A string contain Tool TagName need to be judged.
#
def IsValidToolTagName(TagName):
if TagName.strip() == '':
return True
if TagName.strip() == '*':
return True
if not IsValidWord(TagName):
return False
return True
## Is valid arch or not
#
# @param Arch The arch string need to be validated
# <OA> ::= (a-zA-Z)(A-Za-z0-9){0,}
# <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>}
# {"common"}
# @param Arch: Input arch
#
def IsValidArch(Arch):
if Arch == 'common':
return True
ReIsValidArch = re.compile(r"^[a-zA-Z]+[a-zA-Z0-9]{0,}$", re.DOTALL)
if ReIsValidArch.match(Arch) is None:
return False
return True
## Is valid family or not
#
# <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"}
# <Usr> ::= [A-Z][A-Za-z0-9]{0,}
#
# @param family: The family string need to be validated
#
def IsValidFamily(Family):
Family = Family.strip()
if Family == '*':
return True
if Family == '':
return True
ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValidFamily.match(Family) is None:
return False
return True
## Is valid build option name or not
#
# @param BuildOptionName: The BuildOptionName string need to be validated
#
def IsValidBuildOptionName(BuildOptionName):
if not BuildOptionName:
return False
ToolOptionList = GetSplitValueList(BuildOptionName, '_', 4)
if len(ToolOptionList) != 5:
return False
ReIsValidBuildOption1 = re.compile(r"^\s*(\*)|([A-Z][a-zA-Z0-9]*)$")
ReIsValidBuildOption2 = re.compile(r"^\s*(\*)|([a-zA-Z][a-zA-Z0-9]*)$")
if ReIsValidBuildOption1.match(ToolOptionList[0]) is None:
return False
if ReIsValidBuildOption1.match(ToolOptionList[1]) is None:
return False
if ReIsValidBuildOption2.match(ToolOptionList[2]) is None:
return False
if ToolOptionList[3] == "*" and ToolOptionList[4] not in ['FAMILY', 'DLL', 'DPATH']:
return False
return True
## IsValidToken
#
# Check if pattern string matches total token
#
# @param ReString: regular string
# @param Token: Token to be matched
#
def IsValidToken(ReString, Token):
Match = re.compile(ReString).match(Token)
return Match and Match.start() == 0 and Match.end() == len(Token)
## IsValidPath
#
# Check if path exist
#
# @param Path: Absolute path or relative path to be checked
# @param Root: Root path
#
def IsValidPath(Path, Root):
Path = Path.strip()
OrigPath = Path.replace('\\', '/')
Path = os.path.normpath(Path).replace('\\', '/')
Root = os.path.normpath(Root).replace('\\', '/')
FullPath = mws.join(Root, Path)
if not os.path.exists(FullPath):
return False
#
# If Path is absolute path.
# It should be in Root.
#
if os.path.isabs(Path):
if not Path.startswith(Root):
return False
return True
#
# Check illegal character
#
for Rel in ['/', './', '../']:
if OrigPath.startswith(Rel):
return False
for Rel in ['//', '/./', '/../']:
if Rel in OrigPath:
return False
for Rel in ['/.', '/..', '/']:
if OrigPath.endswith(Rel):
return False
Path = Path.rstrip('/')
#
# Check relative path
#
for Word in Path.split('/'):
if not IsValidWord(Word):
return False
return True
## IsValidInstallPath
#
# Check if an install path valid or not.
#
# Absolute path or path starts with '.' or path contains '..' are invalid.
#
# @param Path: path to be checked
#
def IsValidInstallPath(Path):
if platform.platform().find("Windows") >= 0:
if os.path.isabs(Path):
return False
else:
if Path[1:2] == ':':
return False
if os.path.isabs(Path):
return False
if Path.startswith('.'):
return False
if Path.find('..') != -1:
return False
return True
## IsValidCFormatGuid
#
# Check if GUID format has the from of {8,4,4,{2,2,2,2,2,2,2,2}}
#
# @param Guid: Guid to be checked
#
def IsValidCFormatGuid(Guid):
#
# Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
# Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }} 0x123
# Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
#
List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
',', 4, ',', 4, ',', 4, ',', 4, ',', 4, '}}']
Index = 0
Value = ''
SepValue = ''
for Char in Guid:
if Char not in '{},\t ':
Value += Char
continue
if Value:
try:
#
# Index may out of bound
#
if not SepValue or SepValue != List[Index]:
return False
Index += 1
SepValue = ''
if not Value.startswith('0x') and not Value.startswith('0X'):
return False
#
# Index may out of bound
#
if not isinstance(List[Index], type(1)) or \
len(Value) > List[Index] or len(Value) < 3:
return False
#
# Check if string can be converted to integer
# Throw exception if not
#
int(Value, 16)
except BaseException:
#
# Exception caught means invalid format
#
return False
Value = ''
Index += 1
if Char in '{},':
SepValue += Char
return SepValue == '}}' and Value == ''
## IsValidPcdType
#
# Check whether the PCD type is valid
#
# @param PcdTypeString: The PcdType string need to be checked.
#
def IsValidPcdType(PcdTypeString):
if PcdTypeString.upper() in PCD_USAGE_TYPE_LIST_OF_MODULE:
return True
else:
return False
## IsValidWord
#
# Check whether the word is valid.
# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
# optional
# dash "-" and/or underscore "_" characters. No whitespace
# characters are permitted.
#
# @param Word: The word string need to be checked.
#
def IsValidWord(Word):
if not Word:
return False
#
# The first char should be alpha, _ or Digit.
#
if not Word[0].isalnum() and \
not Word[0] == '_' and \
not Word[0].isdigit():
return False
LastChar = ''
for Char in Word[1:]:
if (not Char.isalpha()) and \
(not Char.isdigit()) and \
Char != '-' and \
Char != '_' and \
Char != '.':
return False
if Char == '.' and LastChar == '.':
return False
LastChar = Char
return True
## IsValidSimpleWord
#
# Check whether the SimpleWord is valid.
# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
# A word that cannot contain a period character.
#
# @param Word: The word string need to be checked.
#
def IsValidSimpleWord(Word):
ReIsValidSimpleWord = \
re.compile(r"^[0-9A-Za-z][0-9A-Za-z\-_]*$", re.DOTALL)
Word = Word.strip()
if not Word:
return False
if not ReIsValidSimpleWord.match(Word):
return False
return True
## IsValidDecVersion
#
# Check whether the decimal version is valid.
# <DecVersion> ::= (0-9){1,} ["." (0-9){1,}]
#
# @param Word: The word string need to be checked.
#
def IsValidDecVersion(Word):
if Word.find('.') > -1:
ReIsValidDecVersion = re.compile(r"[0-9]+\.?[0-9]+$")
else:
ReIsValidDecVersion = re.compile(r"[0-9]+$")
if ReIsValidDecVersion.match(Word) is None:
return False
return True
## IsValidHexVersion
#
# Check whether the hex version is valid.
# <HexVersion> ::= "0x" <Major> <Minor>
# <Major> ::= <HexDigit>{4}
# <Minor> ::= <HexDigit>{4}
#
# @param Word: The word string need to be checked.
#
def IsValidHexVersion(Word):
ReIsValidHexVersion = re.compile(r"[0][xX][0-9A-Fa-f]{8}$", re.DOTALL)
if ReIsValidHexVersion.match(Word) is None:
return False
return True
## IsValidBuildNumber
#
# Check whether the BUILD_NUMBER is valid.
# ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>]
#
# @param Word: The BUILD_NUMBER string need to be checked.
#
def IsValidBuildNumber(Word):
ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL)
if ReIsValieBuildNumber.match(Word) is None:
return False
return True
## IsValidDepex
#
# Check whether the Depex is valid.
#
# @param Word: The Depex string need to be checked.
#
def IsValidDepex(Word):
Index = Word.upper().find("PUSH")
if Index > -1:
return IsValidCFormatGuid(Word[Index+4:].strip())
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL)
if ReIsValidCName.match(Word) is None:
return False
return True
## IsValidNormalizedString
#
# Check
# <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote>
# <Space> ::= 0x20
#
# @param String: string to be checked
#
def IsValidNormalizedString(String):
if String == '':
return True
for Char in String:
if Char == '\t':
return False
StringList = GetSplitValueList(String, TAB_SPACE_SPLIT)
for Item in StringList:
if not Item:
continue
if not IsValidWord(Item):
return False
return True
## IsValidIdString
#
# Check whether the IdString is valid.
#
# @param IdString: The IdString need to be checked.
#
def IsValidIdString(String):
if IsValidSimpleWord(String.strip()):
return True
if String.strip().startswith('"') and \
String.strip().endswith('"'):
String = String[1:-1]
if String.strip() == "":
return True
if IsValidNormalizedString(String):
return True
return False
## IsValidVersionString
#
# Check whether the VersionString is valid.
# <AsciiString> ::= [ [<WhiteSpace>]{0,} [<AsciiChars>]{0,} ] {0,}
# <WhiteSpace> ::= {<Tab>} {<Space>}
# <Tab> ::= 0x09
# <Space> ::= 0x20
# <AsciiChars> ::= (0x21 - 0x7E)
#
# @param VersionString: The VersionString need to be checked.
#
def IsValidVersionString(VersionString):
VersionString = VersionString.strip()
for Char in VersionString:
if not (Char >= 0x21 and Char <= 0x7E):
return False
return True
## IsValidPcdValue
#
# Check whether the PcdValue is valid.
#
# @param VersionString: The PcdValue need to be checked.
#
def IsValidPcdValue(PcdValue):
for Char in PcdValue:
if Char == '\n' or Char == '\t' or Char == '\f':
return False
#
# <Boolean>
#
if IsValidFeatureFlagExp(PcdValue, True)[0]:
return True
#
# <Number> ::= {<Integer>} {<HexNumber>}
# <Integer> ::= {(0-9)} {(1-9)(0-9){1,}}
# <HexNumber> ::= "0x" <HexDigit>{1,}
# <HexDigit> ::= (a-fA-F0-9)
#
if IsValidHex(PcdValue):
return True
ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
if ReIsValidIntegerSingle.match(PcdValue) is not None:
return True
ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
if ReIsValidIntegerMulti.match(PcdValue) is not None:
return True
#
# <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
# <StringType> ::= {<UnicodeString>} {<CString>}
#
ReIsValidStringType = re.compile(r"^\s*[\"L].*[\"]\s*$")
if ReIsValidStringType.match(PcdValue):
IsTrue = False
if PcdValue.strip().startswith('L\"'):
StringValue = PcdValue.strip().lstrip('L\"').rstrip('\"')
if IsValidBareCString(StringValue):
IsTrue = True
elif PcdValue.strip().startswith('\"'):
StringValue = PcdValue.strip().lstrip('\"').rstrip('\"')
if IsValidBareCString(StringValue):
IsTrue = True
if IsTrue:
return IsTrue
#
# <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>}
# <CArray> ::= "{" [<NList>] <CArray>{0,} "}"
# <NList> ::= <HexByte> ["," <HexByte>]{0,}
# <HexDigit> ::= (a-fA-F0-9)
# <HexByte> ::= "0x" <HexDigit>{1,2}
#
if IsValidCFormatGuid(PcdValue):
return True
ReIsValidByteHex = re.compile(r"^\s*0x[0-9a-fA-F]{1,2}\s*$", re.DOTALL)
if PcdValue.strip().startswith('{') and PcdValue.strip().endswith('}') :
StringValue = PcdValue.strip().lstrip('{').rstrip('}')
ValueList = StringValue.split(',')
AllValidFlag = True
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
#
# NList
#
AllValidFlag = True
ValueList = PcdValue.split(',')
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
return False
## IsValidCVariableName
#
# Check whether the PcdValue is valid.
#
# @param VersionString: The PcdValue need to be checked.
#
def IsValidCVariableName(CName):
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIsValidCName.match(CName) is None:
return False
return True
## IsValidIdentifier
#
# <Identifier> ::= <NonDigit> <Chars>{0,}
# <Chars> ::= (a-zA-Z0-9_)
# <NonDigit> ::= (a-zA-Z_)
#
# @param Ident: identifier to be checked
#
def IsValidIdentifier(Ident):
ReIdent = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIdent.match(Ident) is None:
return False
return True
## IsValidDecVersionVal
#
# {(0-9){1,} "." (0-99)}
#
# @param Ver: version to be checked
#
def IsValidDecVersionVal(Ver):
ReVersion = re.compile(r"[0-9]+(\.[0-9]{1,2})$")
if ReVersion.match(Ver) is None:
return False
return True
## IsValidLibName
#
# (A-Z)(a-zA-Z0-9){0,} and could not be "NULL"
#
def IsValidLibName(LibName):
if LibName == 'NULL':
return False
ReLibName = re.compile("^[A-Z]+[a-zA-Z0-9]*$")
if not ReLibName.match(LibName):
return False
return True
# IsValidUserId
#
# <UserId> ::= (a-zA-Z)(a-zA-Z0-9_.){0,}
# Words that contain period "." must be encapsulated in double quotation marks.
#
def IsValidUserId(UserId):
UserId = UserId.strip()
Quoted = False
if UserId.startswith('"') and UserId.endswith('"'):
Quoted = True
UserId = UserId[1:-1]
if not UserId or not UserId[0].isalpha():
return False
for Char in UserId[1:]:
if not Char.isalnum() and not Char in '_.':
return False
if Char == '.' and not Quoted:
return False
return True
#
# Check if a UTF16-LE file has a BOM header
#
def CheckUTF16FileHeader(File):
FileIn = open(File, 'rb').read(2)
if FileIn != b'\xff\xfe':
return False
return True
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/ParserValidate.py
|
## @file
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
XmlRoutines
'''
##
# Import Modules
#
import xml.dom.minidom
import re
import codecs
from Logger.ToolError import PARSER_ERROR
import Logger.Log as Logger
## Create a element of XML
#
# @param Name
# @param String
# @param NodeList
# @param AttributeList
#
def CreateXmlElement(Name, String, NodeList, AttributeList):
Doc = xml.dom.minidom.Document()
Element = Doc.createElement(Name)
if String != '' and String is not None:
Element.appendChild(Doc.createTextNode(String))
for Item in NodeList:
if isinstance(Item, type([])):
Key = Item[0]
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Node = Doc.createElement(Key)
Node.appendChild(Doc.createTextNode(Value))
Element.appendChild(Node)
else:
Element.appendChild(Item)
for Item in AttributeList:
Key = Item[0]
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Element.setAttribute(Key, Value)
return Element
## Get a list of XML nodes using XPath style syntax.
#
# Return a list of XML DOM nodes from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
def XmlList(Dom, String):
if String is None or String == "" or Dom is None or Dom == "":
return []
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Nodes = [Dom]
Index = 0
End = len(TagList) - 1
while Index <= End:
ChildNodes = []
for Node in Nodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == \
TagList[Index]:
if Index < End:
ChildNodes.extend(Node.childNodes)
else:
ChildNodes.append(Node)
Nodes = ChildNodes
ChildNodes = []
Index += 1
return Nodes
## Get a single XML node using XPath style syntax.
#
# Return a single XML DOM node from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
def XmlNode(Dom, String):
if String is None or String == "" or Dom is None or Dom == "":
return None
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Index = 0
End = len(TagList) - 1
ChildNodes = [Dom]
while Index <= End:
for Node in ChildNodes:
if Node.nodeType == Node.ELEMENT_NODE and \
Node.tagName == TagList[Index]:
if Index < End:
ChildNodes = Node.childNodes
else:
return Node
break
Index += 1
return None
## Get a single XML element using XPath style syntax.
#
# Return a single XML element from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
def XmlElement(Dom, String):
try:
return XmlNode(Dom, String).firstChild.data.strip()
except BaseException:
return ""
## Get a single XML element using XPath style syntax.
#
# Similar with XmlElement, but do not strip all the leading and tailing space
# and newline, instead just remove the newline and spaces introduced by
# toprettyxml()
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
def XmlElement2(Dom, String):
try:
HelpStr = XmlNode(Dom, String).firstChild.data
gRemovePrettyRe = re.compile(r"""(?:(\n *) )(.*)\1""", re.DOTALL)
HelpStr = re.sub(gRemovePrettyRe, r"\2", HelpStr)
return HelpStr
except BaseException:
return ""
## Get a single XML element of the current node.
#
# Return a single XML element specified by the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
def XmlElementData(Dom):
try:
return Dom.firstChild.data.strip()
except BaseException:
return ""
## Get a list of XML elements using XPath style syntax.
#
# Return a list of XML elements from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM object.
# @param String A XPath style path.
#
def XmlElementList(Dom, String):
return list(map(XmlElementData, XmlList(Dom, String)))
## Get the XML attribute of the current node.
#
# Return a single XML attribute named Attribute from the current root Dom.
# If the input Dom or Attribute is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Attribute The name of Attribute.
#
def XmlAttribute(Dom, Attribute):
try:
return Dom.getAttribute(Attribute)
except BaseException:
return ''
## Get the XML node name of the current node.
#
# Return a single XML node name from the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
def XmlNodeName(Dom):
try:
return Dom.nodeName.strip()
except BaseException:
return ''
## Parse an XML file.
#
# Parse the input XML file named FileName and return a XML DOM it stands for.
# If the input File is not a valid XML file, then an empty string is returned.
#
# @param FileName The XML file name.
#
def XmlParseFile(FileName):
try:
XmlFile = codecs.open(FileName, 'rb')
Dom = xml.dom.minidom.parse(XmlFile)
XmlFile.close()
return Dom
except BaseException as XExcept:
XmlFile.close()
Logger.Error('\nUPT', PARSER_ERROR, XExcept, File=FileName, RaiseError=True)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py
|
## @file
# Python 'Library' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Xml
'''
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Library/Xml/__init__.py
|
## @file
# This file contained the parser for [Pcds] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfPcdSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Library import GlobalData
from Library.StringUtils import SplitPcdEntry
from Parser.InfParserMisc import InfParserSectionRoot
class InfPcdSectionParser(InfParserSectionRoot):
## Section PCD related parser
#
# For 5 types of PCD list below, all use this function.
# 'FixedPcd', 'FeaturePcd', 'PatchPcd', 'Pcd', 'PcdEx'
#
# This is a INF independent parser, the validation in this parser only
# cover
# INF spec scope, will not cross DEC/DSC to check pcd value
#
def InfPcdParser(self, SectionString, InfSectionObject, FileName):
KeysList = []
PcdList = []
CommentsList = []
ValueList = []
#
# Current section archs
#
LineIndex = -1
for Item in self.LastSectionHeaderContent:
if (Item[0], Item[1], Item[3]) not in KeysList:
KeysList.append((Item[0], Item[1], Item[3]))
LineIndex = Item[3]
if (Item[0].upper() == DT.TAB_INF_FIXED_PCD.upper() or \
Item[0].upper() == DT.TAB_INF_FEATURE_PCD.upper() or \
Item[0].upper() == DT.TAB_INF_PCD.upper()) and GlobalData.gIS_BINARY_INF:
Logger.Error('InfParser', FORMAT_INVALID, ST.ERR_ASBUILD_PCD_SECTION_TYPE%("\"" + Item[0] + "\""),
File=FileName, Line=LineIndex)
#
# For Common INF file
#
if not GlobalData.gIS_BINARY_INF:
#
# Macro defined in this section
#
SectionMacros = {}
for Line in SectionString:
PcdLineContent = Line[0]
PcdLineNo = Line[1]
if PcdLineContent.strip() == '':
CommentsList = []
continue
if PcdLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(Line)
continue
else:
#
# Encounter a PCD entry
#
if PcdLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
CommentsList.append((
PcdLineContent[PcdLineContent.find(DT.TAB_COMMENT_SPLIT):],
PcdLineNo))
PcdLineContent = PcdLineContent[:PcdLineContent.find(DT.TAB_COMMENT_SPLIT)]
if PcdLineContent != '':
#
# Find Macro
#
Name, Value = MacroParser((PcdLineContent, PcdLineNo),
FileName,
DT.MODEL_EFI_PCD,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
ValueList = []
CommentsList = []
continue
PcdEntryReturn = SplitPcdEntry(PcdLineContent)
if not PcdEntryReturn[1]:
TokenList = ['']
else:
TokenList = PcdEntryReturn[0]
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, PcdLineContent, PcdLineNo),
self.FileLocalMacros, SectionMacros, True)
for Value in ValueList]
if len(ValueList) >= 1:
PcdList.append((ValueList, CommentsList, (PcdLineContent, PcdLineNo, FileName)))
ValueList = []
CommentsList = []
continue
#
# For Binary INF file
#
else:
for Line in SectionString:
LineContent = Line[0].strip()
LineNo = Line[1]
if LineContent == '':
CommentsList = []
continue
if LineContent.startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(LineContent)
continue
#
# Have comments at tail.
#
CommentIndex = LineContent.find(DT.TAB_COMMENT_SPLIT)
if CommentIndex > -1:
CommentsList.append(LineContent[CommentIndex+1:])
LineContent = LineContent[:CommentIndex]
TokenList = GetSplitValueList(LineContent, DT.TAB_VALUE_SPLIT)
#
# PatchablePcd
# TokenSpace.CName | Value | Offset
#
if KeysList[0][0].upper() == DT.TAB_INF_PATCH_PCD.upper():
if len(TokenList) != 3:
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_ASBUILD_PATCHPCD_FORMAT_INVALID,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
#
elif KeysList[0][0].upper() == DT.TAB_INF_PCD_EX.upper():
if len(TokenList) != 1:
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_ASBUILD_PCDEX_FORMAT_INVALID,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
ValueList[0:len(TokenList)] = TokenList
if len(ValueList) >= 1:
PcdList.append((ValueList, CommentsList, (LineContent, LineNo, FileName)))
ValueList = []
CommentsList = []
continue
if not InfSectionObject.SetPcds(PcdList, KeysList = KeysList,
PackageInfo = self.InfPackageSection.GetPackages()):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR%("[PCD]"),
File=FileName,
Line=LineIndex)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfPcdSectionParser.py
|
## @file
# This file contained the parser for [Depex] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfDepexSectionParser
'''
##
# Import Modules
#
import re
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Misc import GetSplitValueList
from Parser.InfParserMisc import InfParserSectionRoot
class InfDepexSectionParser(InfParserSectionRoot):
## InfDepexParser
#
# For now, only separate Depex String and comments.
# Have two types of section header.
# 1. [Depex.Arch.ModuleType, ...]
# 2. [Depex.Arch|FFE, ...]
#
def InfDepexParser(self, SectionString, InfSectionObject, FileName):
DepexContent = []
DepexComment = []
ValueList = []
#
# Parse section content
#
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
#
# Found comment
#
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
DepexComment.append((LineContent, LineNo))
continue
#
# Replace with [Defines] section Macro
#
LineContent = InfExpandMacro(LineContent,
(FileName, LineContent, Line[1]),
self.FileLocalMacros,
None, True)
CommentCount = LineContent.find(DT.TAB_COMMENT_SPLIT)
if CommentCount > -1:
DepexComment.append((LineContent[CommentCount:], LineNo))
LineContent = LineContent[:CommentCount-1]
CommentCount = -1
DepexContent.append((LineContent, LineNo))
TokenList = GetSplitValueList(LineContent, DT.TAB_COMMENT_SPLIT)
ValueList[0:len(TokenList)] = TokenList
#
# Current section archs
#
KeyList = []
LastItem = ''
for Item in self.LastSectionHeaderContent:
LastItem = Item
if (Item[1], Item[2], Item[3]) not in KeyList:
KeyList.append((Item[1], Item[2], Item[3]))
NewCommentList = []
FormatCommentLn = -1
ReFormatComment = re.compile(r"""#(?:\s*)\[(.*?)\](?:.*)""", re.DOTALL)
for CommentItem in DepexComment:
CommentContent = CommentItem[0]
if ReFormatComment.match(CommentContent) is not None:
FormatCommentLn = CommentItem[1] + 1
continue
if CommentItem[1] != FormatCommentLn:
NewCommentList.append(CommentContent)
else:
FormatCommentLn = CommentItem[1] + 1
if not InfSectionObject.SetDepex(DepexContent, KeyList = KeyList, CommentList = NewCommentList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR%("[Depex]"),
File=FileName,
Line=LastItem[3])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfDepexSectionParser.py
|
## @file
# This file contained the parser for INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfParser
'''
##
# Import Modules
#
import re
import os
from copy import deepcopy
from Library.StringUtils import GetSplitValueList
from Library.StringUtils import ConvertSpecialChar
from Library.Misc import ProcessLineExtender
from Library.Misc import ProcessEdkComment
from Library.Parsing import NormPath
from Library.ParserValidate import IsValidInfMoudleTypeList
from Library.ParserValidate import IsValidArch
from Library import DataType as DT
from Library import GlobalData
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import FILE_READ_FAILURE
from Logger.ToolError import PARSER_ERROR
from Object.Parser.InfCommonObject import InfSectionCommonDef
from Parser.InfSectionParser import InfSectionParser
from Parser.InfParserMisc import gINF_SECTION_DEF
from Parser.InfParserMisc import IsBinaryInf
## OpenInfFile
#
#
def OpenInfFile(Filename):
FileLinesList = []
try:
FInputfile = open(Filename, "r")
try:
FileLinesList = FInputfile.readlines()
except BaseException:
Logger.Error("InfParser",
FILE_READ_FAILURE,
ST.ERR_FILE_OPEN_FAILURE,
File=Filename)
finally:
FInputfile.close()
except BaseException:
Logger.Error("InfParser",
FILE_READ_FAILURE,
ST.ERR_FILE_OPEN_FAILURE,
File=Filename)
return FileLinesList
## InfParser
#
# This class defined the structure used in InfParser object
#
# @param InfObject: Inherited from InfSectionParser class
# @param Filename: Input value for Filename of INF file, default is
# None
# @param WorkspaceDir: Input value for current workspace directory,
# default is None
#
class InfParser(InfSectionParser):
def __init__(self, Filename = None, WorkspaceDir = None):
#
# Call parent class construct function
#
InfSectionParser.__init__()
self.WorkspaceDir = WorkspaceDir
self.SupArchList = DT.ARCH_LIST
self.EventList = []
self.HobList = []
self.BootModeList = []
#
# Load Inf file if filename is not None
#
if Filename is not None:
self.ParseInfFile(Filename)
## Parse INF file
#
# Parse the file if it exists
#
# @param Filename: Input value for filename of INF file
#
def ParseInfFile(self, Filename):
Filename = NormPath(Filename)
(Path, Name) = os.path.split(Filename)
self.FullPath = Filename
self.RelaPath = Path
self.FileName = Name
GlobalData.gINF_MODULE_DIR = Path
GlobalData.gINF_MODULE_NAME = self.FullPath
GlobalData.gIS_BINARY_INF = False
#
# Initialize common data
#
LineNo = 0
CurrentSection = DT.MODEL_UNKNOWN
SectionLines = []
#
# Flags
#
HeaderCommentStart = False
HeaderCommentEnd = False
HeaderStarLineNo = -1
BinaryHeaderCommentStart = False
BinaryHeaderCommentEnd = False
BinaryHeaderStarLineNo = -1
#
# While Section ends. parse whole section contents.
#
NewSectionStartFlag = False
FirstSectionStartFlag = False
#
# Parse file content
#
CommentBlock = []
#
# Variables for Event/Hob/BootMode
#
self.EventList = []
self.HobList = []
self.BootModeList = []
SectionType = ''
FileLinesList = OpenInfFile (Filename)
#
# One INF file can only has one [Defines] section.
#
DefineSectionParsedFlag = False
#
# Convert special characters in lines to space character.
#
FileLinesList = ConvertSpecialChar(FileLinesList)
#
# Process Line Extender
#
FileLinesList = ProcessLineExtender(FileLinesList)
#
# Process EdkI INF style comment if found
#
OrigLines = [Line for Line in FileLinesList]
FileLinesList, EdkCommentStartPos = ProcessEdkComment(FileLinesList)
#
# Judge whether the INF file is Binary INF or not
#
if IsBinaryInf(FileLinesList):
GlobalData.gIS_BINARY_INF = True
InfSectionCommonDefObj = None
for Line in FileLinesList:
LineNo = LineNo + 1
Line = Line.strip()
if (LineNo < len(FileLinesList) - 1):
NextLine = FileLinesList[LineNo].strip()
#
# blank line
#
if (Line == '' or not Line) and LineNo == len(FileLinesList):
LastSectionFalg = True
#
# check whether file header comment section started
#
if Line.startswith(DT.TAB_SPECIAL_COMMENT) and \
(Line.find(DT.TAB_HEADER_COMMENT) > -1) and \
not HeaderCommentStart and not HeaderCommentEnd:
CurrentSection = DT.MODEL_META_DATA_FILE_HEADER
#
# Append the first line to section lines.
#
HeaderStarLineNo = LineNo
SectionLines.append((Line, LineNo))
HeaderCommentStart = True
continue
#
# Collect Header content.
#
if (Line.startswith(DT.TAB_COMMENT_SPLIT) and CurrentSection == DT.MODEL_META_DATA_FILE_HEADER) and\
HeaderCommentStart and not Line.startswith(DT.TAB_SPECIAL_COMMENT) and not\
HeaderCommentEnd and NextLine != '':
SectionLines.append((Line, LineNo))
continue
#
# Header content end
#
if (Line.startswith(DT.TAB_SPECIAL_COMMENT) or not Line.strip().startswith("#")) and HeaderCommentStart \
and not HeaderCommentEnd:
HeaderCommentEnd = True
BinaryHeaderCommentStart = False
BinaryHeaderCommentEnd = False
HeaderCommentStart = False
if Line.find(DT.TAB_BINARY_HEADER_COMMENT) > -1:
self.InfHeaderParser(SectionLines, self.InfHeader, self.FileName)
SectionLines = []
else:
SectionLines.append((Line, LineNo))
#
# Call Header comment parser.
#
self.InfHeaderParser(SectionLines, self.InfHeader, self.FileName)
SectionLines = []
continue
#
# check whether binary header comment section started
#
if Line.startswith(DT.TAB_SPECIAL_COMMENT) and \
(Line.find(DT.TAB_BINARY_HEADER_COMMENT) > -1) and \
not BinaryHeaderCommentStart:
SectionLines = []
CurrentSection = DT.MODEL_META_DATA_FILE_HEADER
#
# Append the first line to section lines.
#
BinaryHeaderStarLineNo = LineNo
SectionLines.append((Line, LineNo))
BinaryHeaderCommentStart = True
HeaderCommentEnd = True
continue
#
# check whether there are more than one binary header exist
#
if Line.startswith(DT.TAB_SPECIAL_COMMENT) and BinaryHeaderCommentStart and \
not BinaryHeaderCommentEnd and (Line.find(DT.TAB_BINARY_HEADER_COMMENT) > -1):
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MULTIPLE_BINARYHEADER_EXIST,
File=Filename)
#
# Collect Binary Header content.
#
if (Line.startswith(DT.TAB_COMMENT_SPLIT) and CurrentSection == DT.MODEL_META_DATA_FILE_HEADER) and\
BinaryHeaderCommentStart and not Line.startswith(DT.TAB_SPECIAL_COMMENT) and not\
BinaryHeaderCommentEnd and NextLine != '':
SectionLines.append((Line, LineNo))
continue
#
# Binary Header content end
#
if (Line.startswith(DT.TAB_SPECIAL_COMMENT) or not Line.strip().startswith(DT.TAB_COMMENT_SPLIT)) and \
BinaryHeaderCommentStart and not BinaryHeaderCommentEnd:
SectionLines.append((Line, LineNo))
BinaryHeaderCommentStart = False
#
# Call Binary Header comment parser.
#
self.InfHeaderParser(SectionLines, self.InfBinaryHeader, self.FileName, True)
SectionLines = []
BinaryHeaderCommentEnd = True
continue
#
# Find a new section tab
# Or at the last line of INF file,
# need to process the last section.
#
LastSectionFalg = False
if LineNo == len(FileLinesList):
LastSectionFalg = True
if Line.startswith(DT.TAB_COMMENT_SPLIT) and not Line.startswith(DT.TAB_SPECIAL_COMMENT):
SectionLines.append((Line, LineNo))
if not LastSectionFalg:
continue
#
# Encountered a section. start with '[' and end with ']'
#
if (Line.startswith(DT.TAB_SECTION_START) and \
Line.find(DT.TAB_SECTION_END) > -1) or LastSectionFalg:
HeaderCommentEnd = True
BinaryHeaderCommentEnd = True
if not LastSectionFalg:
#
# check to prevent '#' inside section header
#
HeaderContent = Line[1:Line.find(DT.TAB_SECTION_END)]
if HeaderContent.find(DT.TAB_COMMENT_SPLIT) != -1:
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_SECTION_HEADER_INVALID,
File=self.FullPath,
Line=LineNo,
ExtraData=Line)
#
# Keep last time section header content for section parser
# usage.
#
self.LastSectionHeaderContent = deepcopy(self.SectionHeaderContent)
#
# TailComments in section define.
#
TailComments = ''
CommentIndex = Line.find(DT.TAB_COMMENT_SPLIT)
if CommentIndex > -1:
TailComments = Line[CommentIndex:]
Line = Line[:CommentIndex]
InfSectionCommonDefObj = InfSectionCommonDef()
if TailComments != '':
InfSectionCommonDefObj.SetTailComments(TailComments)
if CommentBlock != '':
InfSectionCommonDefObj.SetHeaderComments(CommentBlock)
CommentBlock = []
#
# Call section parser before section header parer to avoid encounter EDKI INF file
#
if CurrentSection == DT.MODEL_META_DATA_DEFINE:
DefineSectionParsedFlag = self._CallSectionParsers(CurrentSection,
DefineSectionParsedFlag, SectionLines,
InfSectionCommonDefObj, LineNo)
#
# Compare the new section name with current
#
self.SectionHeaderParser(Line, self.FileName, LineNo)
self._CheckSectionHeaders(Line, LineNo)
SectionType = _ConvertSecNameToType(self.SectionHeaderContent[0][0])
if not FirstSectionStartFlag:
CurrentSection = SectionType
FirstSectionStartFlag = True
else:
NewSectionStartFlag = True
else:
SectionLines.append((Line, LineNo))
continue
if LastSectionFalg:
SectionLines, CurrentSection = self._ProcessLastSection(SectionLines, Line, LineNo, CurrentSection)
#
# End of section content collect.
# Parser the section content collected previously.
#
if NewSectionStartFlag or LastSectionFalg:
if CurrentSection != DT.MODEL_META_DATA_DEFINE or \
(LastSectionFalg and CurrentSection == DT.MODEL_META_DATA_DEFINE):
DefineSectionParsedFlag = self._CallSectionParsers(CurrentSection,
DefineSectionParsedFlag, SectionLines,
InfSectionCommonDefObj, LineNo)
CurrentSection = SectionType
#
# Clear section lines
#
SectionLines = []
if HeaderStarLineNo == -1:
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_NO_SOURCE_HEADER,
File=self.FullPath)
if BinaryHeaderStarLineNo > -1 and HeaderStarLineNo > -1 and HeaderStarLineNo > BinaryHeaderStarLineNo:
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_BINARY_HEADER_ORDER,
File=self.FullPath)
#
# EDKII INF should not have EDKI style comment
#
if EdkCommentStartPos != -1:
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_EDKI_COMMENT_IN_EDKII,
File=self.FullPath,
Line=EdkCommentStartPos + 1,
ExtraData=OrigLines[EdkCommentStartPos])
#
# extract [Event] [Hob] [BootMode] sections
#
self._ExtractEventHobBootMod(FileLinesList)
## _CheckSectionHeaders
#
#
def _CheckSectionHeaders(self, Line, LineNo):
if len(self.SectionHeaderContent) == 0:
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_SECTION_HEADER_INVALID,
File=self.FullPath,
Line=LineNo, ExtraData=Line)
else:
for SectionItem in self.SectionHeaderContent:
ArchList = []
#
# Not cover Depex/UserExtension section header
# check.
#
if SectionItem[0].strip().upper() == DT.TAB_INF_FIXED_PCD.upper() or \
SectionItem[0].strip().upper() == DT.TAB_INF_PATCH_PCD.upper() or \
SectionItem[0].strip().upper() == DT.TAB_INF_PCD_EX.upper() or \
SectionItem[0].strip().upper() == DT.TAB_INF_PCD.upper() or \
SectionItem[0].strip().upper() == DT.TAB_INF_FEATURE_PCD.upper():
ArchList = GetSplitValueList(SectionItem[1].strip(), ' ')
else:
ArchList = [SectionItem[1].strip()]
for Arch in ArchList:
if (not IsValidArch(Arch)) and \
(SectionItem[0].strip().upper() != DT.TAB_DEPEX.upper()) and \
(SectionItem[0].strip().upper() != DT.TAB_USER_EXTENSIONS.upper()) and \
(SectionItem[0].strip().upper() != DT.TAB_COMMON_DEFINES.upper()):
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID%(SectionItem[1]),
File=self.FullPath,
Line=LineNo, ExtraData=Line)
#
# Check if the ModuleType is valid
#
ChkModSectionList = ['LIBRARYCLASSES']
if (self.SectionHeaderContent[0][0].upper() in ChkModSectionList):
if SectionItem[2].strip().upper():
MoudleTypeList = GetSplitValueList(
SectionItem[2].strip().upper())
if (not IsValidInfMoudleTypeList(MoudleTypeList)):
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID%(SectionItem[2]),
File=self.FullPath, Line=LineNo,
ExtraData=Line)
## _CallSectionParsers
#
#
def _CallSectionParsers(self, CurrentSection, DefineSectionParsedFlag,
SectionLines, InfSectionCommonDefObj, LineNo):
if CurrentSection == DT.MODEL_META_DATA_DEFINE:
if not DefineSectionParsedFlag:
self.InfDefineParser(SectionLines,
self.InfDefSection,
self.FullPath,
InfSectionCommonDefObj)
DefineSectionParsedFlag = True
else:
Logger.Error("Parser",
PARSER_ERROR,
ST.ERR_INF_PARSER_MULTI_DEFINE_SECTION,
File=self.FullPath,
RaiseError = Logger.IS_RAISE_ERROR)
elif CurrentSection == DT.MODEL_META_DATA_BUILD_OPTION:
self.InfBuildOptionParser(SectionLines,
self.InfBuildOptionSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_LIBRARY_CLASS:
self.InfLibraryParser(SectionLines,
self.InfLibraryClassSection,
self.FullPath)
elif CurrentSection == DT.MODEL_META_DATA_PACKAGE:
self.InfPackageParser(SectionLines,
self.InfPackageSection,
self.FullPath)
#
# [Pcd] Sections, put it together
#
elif CurrentSection == DT.MODEL_PCD_FIXED_AT_BUILD or \
CurrentSection == DT.MODEL_PCD_PATCHABLE_IN_MODULE or \
CurrentSection == DT.MODEL_PCD_FEATURE_FLAG or \
CurrentSection == DT.MODEL_PCD_DYNAMIC_EX or \
CurrentSection == DT.MODEL_PCD_DYNAMIC:
self.InfPcdParser(SectionLines,
self.InfPcdSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_SOURCE_FILE:
self.InfSourceParser(SectionLines,
self.InfSourcesSection,
self.FullPath)
elif CurrentSection == DT.MODEL_META_DATA_USER_EXTENSION:
self.InfUserExtensionParser(SectionLines,
self.InfUserExtensionSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_PROTOCOL:
self.InfProtocolParser(SectionLines,
self.InfProtocolSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_PPI:
self.InfPpiParser(SectionLines,
self.InfPpiSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_GUID:
self.InfGuidParser(SectionLines,
self.InfGuidSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_DEPEX:
self.InfDepexParser(SectionLines,
self.InfDepexSection,
self.FullPath)
elif CurrentSection == DT.MODEL_EFI_BINARY_FILE:
self.InfBinaryParser(SectionLines,
self.InfBinariesSection,
self.FullPath)
#
# Unknown section type found, raise error.
#
else:
if len(self.SectionHeaderContent) >= 1:
Logger.Error("Parser",
PARSER_ERROR,
ST.ERR_INF_PARSER_UNKNOWN_SECTION,
File=self.FullPath, Line=LineNo,
RaiseError = Logger.IS_RAISE_ERROR)
else:
Logger.Error("Parser",
PARSER_ERROR,
ST.ERR_INF_PARSER_NO_SECTION_ERROR,
File=self.FullPath, Line=LineNo,
RaiseError = Logger.IS_RAISE_ERROR)
return DefineSectionParsedFlag
def _ExtractEventHobBootMod(self, FileLinesList):
SpecialSectionStart = False
CheckLocation = False
GFindSpecialCommentRe = \
re.compile(r"""#(?:\s*)\[(.*?)\](?:.*)""", re.DOTALL)
GFindNewSectionRe2 = \
re.compile(r"""#?(\s*)\[(.*?)\](.*)""", re.DOTALL)
LineNum = 0
Element = []
for Line in FileLinesList:
Line = Line.strip()
LineNum += 1
MatchObject = GFindSpecialCommentRe.search(Line)
if MatchObject:
SpecialSectionStart = True
Element = []
if MatchObject.group(1).upper().startswith("EVENT"):
List = self.EventList
elif MatchObject.group(1).upper().startswith("HOB"):
List = self.HobList
elif MatchObject.group(1).upper().startswith("BOOTMODE"):
List = self.BootModeList
else:
SpecialSectionStart = False
CheckLocation = False
if SpecialSectionStart:
Element.append([Line, LineNum])
List.append(Element)
else:
#
# if currently in special section, try to detect end of current section
#
MatchObject = GFindNewSectionRe2.search(Line)
if SpecialSectionStart:
if MatchObject:
SpecialSectionStart = False
CheckLocation = False
Element = []
elif not Line:
SpecialSectionStart = False
CheckLocation = True
Element = []
else:
if not Line.startswith(DT.TAB_COMMENT_SPLIT):
Logger.Warn("Parser",
ST.WARN_SPECIAL_SECTION_LOCATION_WRONG,
File=self.FullPath, Line=LineNum)
SpecialSectionStart = False
CheckLocation = False
Element = []
else:
Element.append([Line, LineNum])
else:
if CheckLocation:
if MatchObject:
CheckLocation = False
elif Line:
Logger.Warn("Parser",
ST.WARN_SPECIAL_SECTION_LOCATION_WRONG,
File=self.FullPath, Line=LineNum)
CheckLocation = False
if len(self.BootModeList) >= 1:
self.InfSpecialCommentParser(self.BootModeList,
self.InfSpecialCommentSection,
self.FileName,
DT.TYPE_BOOTMODE_SECTION)
if len(self.EventList) >= 1:
self.InfSpecialCommentParser(self.EventList,
self.InfSpecialCommentSection,
self.FileName,
DT.TYPE_EVENT_SECTION)
if len(self.HobList) >= 1:
self.InfSpecialCommentParser(self.HobList,
self.InfSpecialCommentSection,
self.FileName,
DT.TYPE_HOB_SECTION)
## _ProcessLastSection
#
#
def _ProcessLastSection(self, SectionLines, Line, LineNo, CurrentSection):
#
# The last line is a section header. will discard it.
#
if not (Line.startswith(DT.TAB_SECTION_START) and Line.find(DT.TAB_SECTION_END) > -1):
SectionLines.append((Line, LineNo))
if len(self.SectionHeaderContent) >= 1:
TemSectionName = self.SectionHeaderContent[0][0].upper()
if TemSectionName.upper() not in gINF_SECTION_DEF.keys():
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_UNKNOWN_SECTION,
File=self.FullPath,
Line=LineNo,
ExtraData=Line,
RaiseError = Logger.IS_RAISE_ERROR
)
else:
CurrentSection = gINF_SECTION_DEF[TemSectionName]
self.LastSectionHeaderContent = self.SectionHeaderContent
return SectionLines, CurrentSection
## _ConvertSecNameToType
#
#
def _ConvertSecNameToType(SectionName):
SectionType = ''
if SectionName.upper() not in gINF_SECTION_DEF.keys():
SectionType = DT.MODEL_UNKNOWN
else:
SectionType = gINF_SECTION_DEF[SectionName.upper()]
return SectionType
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfParser.py
|
## @file
# This file contained the parser for BuildOption sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfBuildOptionSectionParser
'''
##
# Import Modules
#
from Library import DataType as DT
from Library import GlobalData
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library.Misc import GetSplitValueList
from Parser.InfParserMisc import IsAsBuildOptionInfo
from Library.Misc import GetHelpStringByRemoveHashKey
from Library.ParserValidate import IsValidFamily
from Library.ParserValidate import IsValidBuildOptionName
from Parser.InfParserMisc import InfParserSectionRoot
class InfBuildOptionSectionParser(InfParserSectionRoot):
## InfBuildOptionParser
#
#
def InfBuildOptionParser(self, SectionString, InfSectionObject, FileName):
BuildOptionList = []
SectionContent = ''
if not GlobalData.gIS_BINARY_INF:
ValueList = []
LineNo = 0
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
TailComments = ''
ReplaceFlag = False
if LineContent.strip() == '':
SectionContent += LineContent + DT.END_OF_LINE
continue
#
# Found Comment
#
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
SectionContent += LineContent + DT.END_OF_LINE
continue
#
# Find Tail comment.
#
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = LineContent[LineContent.find(DT.TAB_COMMENT_SPLIT):]
LineContent = LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
TokenList = GetSplitValueList(LineContent, DT.TAB_DEQUAL_SPLIT, 1)
if len(TokenList) == 2:
#
# "Replace" type build option
#
TokenList.append('True')
ReplaceFlag = True
else:
TokenList = GetSplitValueList(LineContent, DT.TAB_EQUAL_SPLIT, 1)
#
# "Append" type build option
#
if len(TokenList) == 2:
TokenList.append('False')
else:
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_BUILD_OPTION_FORMAT_INVALID,
ExtraData=LineContent,
File=FileName,
Line=LineNo)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with [Defines] section Macro
#
ValueList[0] = InfExpandMacro(ValueList[0], (FileName, LineContent, LineNo),
self.FileLocalMacros, None)
ValueList[1] = InfExpandMacro(ValueList[1], (FileName, LineContent, LineNo),
self.FileLocalMacros, None, True)
EqualString = ''
if not ReplaceFlag:
EqualString = ' = '
else:
EqualString = ' == '
SectionContent += ValueList[0] + EqualString + ValueList[1] + TailComments + DT.END_OF_LINE
Family = GetSplitValueList(ValueList[0], DT.TAB_COLON_SPLIT, 1)
if len(Family) == 2:
if not IsValidFamily(Family[0]):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_BUILD_OPTION_FORMAT_INVALID,
ExtraData=LineContent,
File=FileName,
Line=LineNo)
if not IsValidBuildOptionName(Family[1]):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_BUILD_OPTION_FORMAT_INVALID,
ExtraData=LineContent,
File=FileName,
Line=LineNo)
if len(Family) == 1:
if not IsValidBuildOptionName(Family[0]):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_BUILD_OPTION_FORMAT_INVALID,
ExtraData=LineContent,
File=FileName,
Line=LineNo)
BuildOptionList.append(ValueList)
ValueList = []
continue
else:
BuildOptionList = InfAsBuiltBuildOptionParser(SectionString, FileName)
#
# Current section archs
#
ArchList = []
LastItem = ''
for Item in self.LastSectionHeaderContent:
LastItem = Item
if not (Item[1] == '' or Item[1] == '') and Item[1] not in ArchList:
ArchList.append(Item[1])
InfSectionObject.SetSupArchList(Item[1])
InfSectionObject.SetAllContent(SectionContent)
if not InfSectionObject.SetBuildOptions(BuildOptionList, ArchList, SectionContent):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR%("[BuilOptions]"),
File=FileName,
Line=LastItem[3])
## InfBuildOptionParser
#
#
def InfAsBuiltBuildOptionParser(SectionString, FileName):
BuildOptionList = []
#
# AsBuild Binary INF file.
#
AsBuildOptionFlag = False
BuildOptionItem = []
Count = 0
for Line in SectionString:
Count += 1
LineContent = Line[0]
LineNo = Line[1]
#
# The last line
#
if len(SectionString) == Count:
if LineContent.strip().startswith("##") and AsBuildOptionFlag:
BuildOptionList.append(BuildOptionItem)
BuildOptionList.append([GetHelpStringByRemoveHashKey(LineContent)])
elif LineContent.strip().startswith("#") and AsBuildOptionFlag:
BuildOptionInfo = GetHelpStringByRemoveHashKey(LineContent)
BuildOptionItem.append(BuildOptionInfo)
BuildOptionList.append(BuildOptionItem)
else:
if len(BuildOptionItem) > 0:
BuildOptionList.append(BuildOptionItem)
break
if LineContent.strip() == '':
AsBuildOptionFlag = False
continue
if LineContent.strip().startswith("##") and AsBuildOptionFlag:
if len(BuildOptionItem) > 0:
BuildOptionList.append(BuildOptionItem)
BuildOptionItem = []
if not LineContent.strip().startswith("#"):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_BO_CONTATIN_ASBUILD_AND_COMMON,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
if IsAsBuildOptionInfo(LineContent):
AsBuildOptionFlag = True
continue
if AsBuildOptionFlag:
BuildOptionInfo = GetHelpStringByRemoveHashKey(LineContent)
BuildOptionItem.append(BuildOptionInfo)
return BuildOptionList
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfBuildOptionSectionParser.py
|
## @file
# This file contained the parser for [Binaries] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfBinarySectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Object.Parser.InfCommonObject import CurrentLine
from Parser.InfParserMisc import InfParserSectionRoot
class InfBinarySectionParser(InfParserSectionRoot):
## InfBinaryParser
#
#
def InfBinaryParser(self, SectionString, InfSectionObject, FileName):
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
#
# For UI (UI, SEC_UI, UNI_UI) binaries
# One and only one UI section can be included
#
UiBinaryList = []
#
# For Version (VER, SEC_VER, UNI_VER).
# One and only one VER section on be included
#
VerBinaryList = []
#
# For other common type binaries
#
ComBinaryList = []
StillCommentFalg = False
HeaderComments = []
LineComment = None
AllSectionContent = ''
#
# Parse section content
#
for Line in SectionString:
BinLineContent = Line[0]
BinLineNo = Line[1]
if BinLineContent.strip() == '':
continue
CurrentLineObj = CurrentLine()
CurrentLineObj.FileName = FileName
CurrentLineObj.LineString = BinLineContent
CurrentLineObj.LineNo = BinLineNo
#
# Found Header Comments
#
if BinLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
AllSectionContent += BinLineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
AllSectionContent += BinLineContent + DT.END_OF_LINE
StillCommentFalg = True
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if BinLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = BinLineContent[BinLineContent.find(DT.TAB_COMMENT_SPLIT):]
BinLineContent = BinLineContent[:BinLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment is None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
MacroDef = MacroParser((BinLineContent, BinLineNo),
FileName,
DT.MODEL_EFI_BINARY_FILE,
self.FileLocalMacros)
if MacroDef[0] is not None:
SectionMacros[MacroDef[0]] = MacroDef[1]
LineComment = None
HeaderComments = []
continue
#
# Replace with Local section Macro and [Defines] section Macro.
#
LineContent = InfExpandMacro(BinLineContent,
(FileName, BinLineContent, BinLineNo),
self.FileLocalMacros,
SectionMacros, True)
AllSectionContent += LineContent + DT.END_OF_LINE
TokenList = GetSplitValueList(LineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Should equal to UI/SEC_UI/UNI_UI
#
ValueList[0] = ValueList[0].strip()
if ValueList[0] == DT.BINARY_FILE_TYPE_UNI_UI or \
ValueList[0] == DT.BINARY_FILE_TYPE_SEC_UI or \
ValueList[0] == DT.BINARY_FILE_TYPE_UI:
if len(ValueList) == 2:
TokenList = GetSplitValueList(ValueList[1],
DT.TAB_VALUE_SPLIT,
2)
NewValueList = []
NewValueList.append(ValueList[0])
for Item in TokenList:
NewValueList.append(Item)
UiBinaryList.append((NewValueList,
LineComment,
CurrentLineObj))
#
# Should equal to VER/SEC_VER/UNI_VER
#
elif ValueList[0] == DT.BINARY_FILE_TYPE_UNI_VER or \
ValueList[0] == DT.BINARY_FILE_TYPE_SEC_VER or \
ValueList[0] == DT.BINARY_FILE_TYPE_VER:
if len(ValueList) == 2:
TokenList = GetSplitValueList(ValueList[1],
DT.TAB_VALUE_SPLIT,
2)
NewValueList = []
NewValueList.append(ValueList[0])
for Item in TokenList:
NewValueList.append(Item)
VerBinaryList.append((NewValueList,
LineComment,
CurrentLineObj))
else:
if len(ValueList) == 2:
if ValueList[0].strip() == 'SUBTYPE_GUID':
TokenList = GetSplitValueList(ValueList[1],
DT.TAB_VALUE_SPLIT,
5)
else:
TokenList = GetSplitValueList(ValueList[1],
DT.TAB_VALUE_SPLIT,
4)
NewValueList = []
NewValueList.append(ValueList[0])
for Item in TokenList:
NewValueList.append(Item)
ComBinaryList.append((NewValueList,
LineComment,
CurrentLineObj))
elif len(ValueList) == 1:
NewValueList = []
NewValueList.append(ValueList[0])
ComBinaryList.append((NewValueList,
LineComment,
CurrentLineObj))
ValueList = []
LineComment = None
TailComments = ''
HeaderComments = []
continue
#
# Current section archs
#
ArchList = []
for Item in self.LastSectionHeaderContent:
if Item[1] not in ArchList:
ArchList.append(Item[1])
InfSectionObject.SetSupArchList(Item[1])
InfSectionObject.SetAllContent(AllSectionContent)
if not InfSectionObject.SetBinary(UiBinaryList,
VerBinaryList,
ComBinaryList,
ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR%("[Binaries]"),
File=FileName,
Line=Item[3])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfBinarySectionParser.py
|
## @file
# This file contained the parser for [Sources] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfSourceSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Parser.InfParserMisc import InfParserSectionRoot
class InfSourceSectionParser(InfParserSectionRoot):
## InfSourceParser
#
#
def InfSourceParser(self, SectionString, InfSectionObject, FileName):
SectionMacros = {}
ValueList = []
SourceList = []
StillCommentFalg = False
HeaderComments = []
LineComment = None
SectionContent = ''
for Line in SectionString:
SrcLineContent = Line[0]
SrcLineNo = Line[1]
if SrcLineContent.strip() == '':
continue
#
# Found Header Comments
#
if SrcLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if SrcLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = SrcLineContent[SrcLineContent.find(DT.TAB_COMMENT_SPLIT):]
SrcLineContent = SrcLineContent[:SrcLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment is None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((SrcLineContent, SrcLineNo),
FileName,
DT.MODEL_EFI_SOURCE_FILE,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
LineComment = None
HeaderComments = []
continue
#
# Replace with Local section Macro and [Defines] section Macro.
#
SrcLineContent = InfExpandMacro(SrcLineContent,
(FileName, SrcLineContent, SrcLineNo),
self.FileLocalMacros,
SectionMacros)
TokenList = GetSplitValueList(SrcLineContent, DT.TAB_VALUE_SPLIT, 4)
ValueList[0:len(TokenList)] = TokenList
#
# Store section content string after MACRO replaced.
#
SectionContent += SrcLineContent + DT.END_OF_LINE
SourceList.append((ValueList, LineComment,
(SrcLineContent, SrcLineNo, FileName)))
ValueList = []
LineComment = None
TailComments = ''
HeaderComments = []
continue
#
# Current section archs
#
ArchList = []
for Item in self.LastSectionHeaderContent:
if Item[1] not in ArchList:
ArchList.append(Item[1])
InfSectionObject.SetSupArchList(Item[1])
InfSectionObject.SetAllContent(SectionContent)
if not InfSectionObject.SetSources(SourceList, Arch = ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Sources]"),
File=FileName,
Line=Item[3])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfSourceSectionParser.py
|
## @file
# This file contained the parser for [Guids], [Ppis], [Protocols] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfGuidPpiProtocolSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library import GlobalData
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Library.ParserValidate import IsValidIdString
from Library.ParserValidate import IsValidUserId
from Library.ParserValidate import IsValidArch
from Parser.InfParserMisc import InfParserSectionRoot
class InfGuidPpiProtocolSectionParser(InfParserSectionRoot):
## InfGuidParser
#
#
def InfGuidParser(self, SectionString, InfSectionObject, FileName):
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
GuidList = []
CommentsList = []
CurrentLineVar = None
#
# Parse section content
#
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
if LineContent.strip() == '':
CommentsList = []
continue
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(Line)
continue
else:
#
# Encounter a GUID entry
#
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
CommentsList.append((
LineContent[LineContent.find(DT.TAB_COMMENT_SPLIT):],
LineNo))
LineContent = \
LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineContent != '':
#
# Find Macro
#
Name, Value = MacroParser((LineContent, LineNo),
FileName,
DT.MODEL_EFI_GUID,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
CommentsList = []
ValueList = []
continue
TokenList = GetSplitValueList(LineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, LineContent, LineNo),
self.FileLocalMacros, SectionMacros, True)
for Value in ValueList]
CurrentLineVar = (LineContent, LineNo, FileName)
if len(ValueList) >= 1:
GuidList.append((ValueList, CommentsList, CurrentLineVar))
CommentsList = []
ValueList = []
continue
#
# Current section archs
#
ArchList = []
LineIndex = -1
for Item in self.LastSectionHeaderContent:
LineIndex = Item[3]
if Item[1] not in ArchList:
ArchList.append(Item[1])
if not InfSectionObject.SetGuid(GuidList, Arch=ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Guid]"),
File=FileName,
Line=LineIndex)
## InfPpiParser
#
#
def InfPpiParser(self, SectionString, InfSectionObject, FileName):
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
PpiList = []
CommentsList = []
CurrentLineVar = None
#
# Parse section content
#
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
if LineContent.strip() == '':
CommentsList = []
continue
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(Line)
continue
else:
#
# Encounter a PPI entry
#
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
CommentsList.append((
LineContent[LineContent.find(DT.TAB_COMMENT_SPLIT):],
LineNo))
LineContent = \
LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineContent != '':
#
# Find Macro
#
Name, Value = MacroParser((LineContent, LineNo),
FileName,
DT.MODEL_EFI_PPI,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
ValueList = []
CommentsList = []
continue
TokenList = GetSplitValueList(LineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, LineContent, LineNo), self.FileLocalMacros, SectionMacros)
for Value in ValueList]
CurrentLineVar = (LineContent, LineNo, FileName)
if len(ValueList) >= 1:
PpiList.append((ValueList, CommentsList, CurrentLineVar))
ValueList = []
CommentsList = []
continue
#
# Current section archs
#
ArchList = []
LineIndex = -1
for Item in self.LastSectionHeaderContent:
LineIndex = Item[3]
if Item[1] not in ArchList:
ArchList.append(Item[1])
if not InfSectionObject.SetPpi(PpiList, Arch=ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Ppis]"),
File=FileName,
Line=LineIndex)
## InfUserExtensionParser
#
#
def InfUserExtensionParser(self, SectionString, InfSectionObject, FileName):
UserExtensionContent = ''
#
# Parse section content
#
for Line in SectionString:
LineContent = Line[0]
# Comment the code to support user extension without any statement just the section header in []
# if LineContent.strip() == '':
# continue
UserExtensionContent += LineContent + DT.END_OF_LINE
continue
#
# Current section UserId, IdString
#
IdContentList = []
LastItem = ''
SectionLineNo = None
for Item in self.LastSectionHeaderContent:
UserId = Item[1]
IdString = Item[2]
Arch = Item[3]
SectionLineNo = Item[4]
if not IsValidArch(Arch):
Logger.Error(
'InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID % (Arch),
File=GlobalData.gINF_MODULE_NAME,
Line=SectionLineNo,
ExtraData=None)
if (UserId, IdString, Arch) not in IdContentList:
#
# To check the UserId and IdString valid or not.
#
if not IsValidUserId(UserId):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_UE_SECTION_USER_ID_ERROR % (Item[1]),
File=GlobalData.gINF_MODULE_NAME,
Line=SectionLineNo,
ExtraData=None)
if not IsValidIdString(IdString):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_UE_SECTION_ID_STRING_ERROR % (IdString),
File=GlobalData.gINF_MODULE_NAME, Line=SectionLineNo,
ExtraData=None)
IdContentList.append((UserId, IdString, Arch))
else:
#
# Each UserExtensions section header must have a unique set
# of UserId, IdString and Arch values.
# This means that the same UserId can be used in more than one
# section header, provided the IdString or Arch values are
# different. The same IdString values can be used in more than
# one section header if the UserId or Arch values are
# different. The same UserId and the same IdString can be used
# in a section header if the Arch values are different in each
# of the section headers.
#
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_UE_SECTION_DUPLICATE_ERROR % (
IdString),
File=GlobalData.gINF_MODULE_NAME,
Line=SectionLineNo,
ExtraData=None)
LastItem = Item
if not InfSectionObject.SetUserExtension(UserExtensionContent,
IdContent=IdContentList,
LineNo=SectionLineNo):
Logger.Error\
('InfParser', FORMAT_INVALID, \
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[UserExtension]"), \
File=FileName, Line=LastItem[4])
def InfProtocolParser(self, SectionString, InfSectionObject, FileName):
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
ProtocolList = []
CommentsList = []
CurrentLineVar = None
#
# Parse section content
#
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
if LineContent.strip() == '':
CommentsList = []
continue
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(Line)
continue
else:
#
# Encounter a Protocol entry
#
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
CommentsList.append((
LineContent[LineContent.find(DT.TAB_COMMENT_SPLIT):],
LineNo))
LineContent = \
LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineContent != '':
#
# Find Macro
#
Name, Value = MacroParser((LineContent, LineNo),
FileName,
DT.MODEL_EFI_PROTOCOL,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
ValueList = []
CommentsList = []
continue
TokenList = GetSplitValueList(LineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, LineContent, LineNo), self.FileLocalMacros, SectionMacros)
for Value in ValueList]
CurrentLineVar = (LineContent, LineNo, FileName)
if len(ValueList) >= 1:
ProtocolList.append((ValueList, CommentsList, CurrentLineVar))
ValueList = []
CommentsList = []
continue
#
# Current section archs
#
ArchList = []
LineIndex = -1
for Item in self.LastSectionHeaderContent:
LineIndex = Item[3]
if Item[1] not in ArchList:
ArchList.append(Item[1])
if not InfSectionObject.SetProtocol(ProtocolList, Arch=ArchList):
Logger.Error\
('InfParser', FORMAT_INVALID, \
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Protocol]"), \
File=FileName, Line=LineIndex)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfGuidPpiProtocolSectionParser.py
|
## @file
# Python 'Parser' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
Parser
'''
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/__init__.py
|
## @file
# This file is used to provide method for process AsBuilt INF file. It will consumed by InfParser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
InfAsBuiltProcess
'''
## Import modules
#
import os
import re
from Library import GlobalData
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger import ToolError
from Library.StringUtils import GetSplitValueList
from Library.Misc import GetHelpStringByRemoveHashKey
from Library.Misc import ValidFile
from Library.Misc import ProcessLineExtender
from Library.ParserValidate import IsValidPath
from Library.Parsing import MacroParser
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
## GetLibInstanceInfo
#
# Get the information from Library Instance INF file.
#
# @param string. A string start with # and followed by INF file path
# @param WorkSpace. The WorkSpace directory used to combined with INF file path.
#
# @return GUID, Version
def GetLibInstanceInfo(String, WorkSpace, LineNo, CurrentInfFileName):
FileGuidString = ""
VerString = ""
OriginalString = String
String = String.strip()
if not String:
return None, None
#
# Remove "#" characters at the beginning
#
String = GetHelpStringByRemoveHashKey(String)
String = String.strip()
#
# To deal with library instance specified by GUID and version
#
RegFormatGuidPattern = re.compile("\s*([0-9a-fA-F]){8}-"
"([0-9a-fA-F]){4}-"
"([0-9a-fA-F]){4}-"
"([0-9a-fA-F]){4}-"
"([0-9a-fA-F]){12}\s*")
VersionPattern = re.compile('[\t\s]*\d+(\.\d+)?[\t\s]*')
GuidMatchedObj = RegFormatGuidPattern.search(String)
if String.upper().startswith('GUID') and GuidMatchedObj and 'Version' in String:
VersionStr = String[String.upper().find('VERSION') + 8:]
VersionMatchedObj = VersionPattern.search(VersionStr)
if VersionMatchedObj:
Guid = GuidMatchedObj.group().strip()
Version = VersionMatchedObj.group().strip()
return Guid, Version
#
# To deal with library instance specified by file name
#
FileLinesList = GetFileLineContent(String, WorkSpace, LineNo, OriginalString)
ReFindFileGuidPattern = re.compile("^\s*FILE_GUID\s*=.*$")
ReFindVerStringPattern = re.compile("^\s*VERSION_STRING\s*=.*$")
for Line in FileLinesList:
if ReFindFileGuidPattern.match(Line):
FileGuidString = Line
if ReFindVerStringPattern.match(Line):
VerString = Line
if FileGuidString:
FileGuidString = GetSplitValueList(FileGuidString, '=', 1)[1]
if VerString:
VerString = GetSplitValueList(VerString, '=', 1)[1]
return FileGuidString, VerString
## GetPackageListInfo
#
# Get the package information from INF file.
#
# @param string. A string start with # and followed by INF file path
# @param WorkSpace. The WorkSpace directory used to combined with INF file path.
#
# @return GUID, Version
def GetPackageListInfo(FileNameString, WorkSpace, LineNo):
PackageInfoList = []
DefineSectionMacros = {}
PackageSectionMacros = {}
FileLinesList = GetFileLineContent(FileNameString, WorkSpace, LineNo, '')
RePackageHeader = re.compile('^\s*\[Packages.*\].*$')
ReDefineHeader = re.compile('^\s*\[Defines].*$')
PackageHederFlag = False
DefineHeaderFlag = False
LineNo = -1
for Line in FileLinesList:
LineNo += 1
Line = Line.strip()
if Line.startswith('['):
PackageHederFlag = False
DefineHeaderFlag = False
if Line.startswith("#"):
continue
if not Line:
continue
#
# Found [Packages] section
#
if RePackageHeader.match(Line):
PackageHederFlag = True
continue
#
# Found [Define] section
#
if ReDefineHeader.match(Line):
DefineHeaderFlag = True
continue
if DefineHeaderFlag:
#
# Find Macro
#
Name, Value = MacroParser((Line, LineNo),
FileNameString,
DT.MODEL_META_DATA_HEADER,
DefineSectionMacros)
if Name is not None:
DefineSectionMacros[Name] = Value
continue
if PackageHederFlag:
#
# Find Macro
#
Name, Value = MacroParser((Line, LineNo),
FileNameString,
DT.MODEL_META_DATA_PACKAGE,
DefineSectionMacros)
if Name is not None:
PackageSectionMacros[Name] = Value
continue
#
# Replace with Local section Macro and [Defines] section Macro.
#
Line = InfExpandMacro(Line, (FileNameString, Line, LineNo), DefineSectionMacros, PackageSectionMacros, True)
Line = GetSplitValueList(Line, "#", 1)[0]
Line = GetSplitValueList(Line, "|", 1)[0]
PackageInfoList.append(Line)
return PackageInfoList
def GetFileLineContent(FileName, WorkSpace, LineNo, OriginalString):
if not LineNo:
LineNo = -1
#
# Validate file name exist.
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(WorkSpace, FileName)))
if not (ValidFile(FullFileName)):
return []
#
# Validate file exist/format.
#
if not IsValidPath(FileName, WorkSpace):
return []
FileLinesList = []
try:
FullFileName = FullFileName.replace('\\', '/')
Inputfile = open(FullFileName, "r")
try:
FileLinesList = Inputfile.readlines()
except BaseException:
Logger.Error("InfParser", ToolError.FILE_READ_FAILURE, ST.ERR_FILE_OPEN_FAILURE, File=FullFileName)
finally:
Inputfile.close()
except BaseException:
Logger.Error("InfParser",
ToolError.FILE_READ_FAILURE,
ST.ERR_FILE_OPEN_FAILURE,
File=FullFileName)
FileLinesList = ProcessLineExtender(FileLinesList)
return FileLinesList
##
# Get all INF files from current workspace
#
#
def GetInfsFromWorkSpace(WorkSpace):
InfFiles = []
for top, dirs, files in os.walk(WorkSpace):
dirs = dirs # just for pylint
for File in files:
if File.upper().endswith(".INF"):
InfFiles.append(os.path.join(top, File))
return InfFiles
##
# Get GUID and version from library instance file
#
#
def GetGuidVerFormLibInstance(Guid, Version, WorkSpace, CurrentInfFileName):
for InfFile in GetInfsFromWorkSpace(WorkSpace):
try:
if InfFile.strip().upper() == CurrentInfFileName.strip().upper():
continue
InfFile = InfFile.replace('\\', '/')
if InfFile not in GlobalData.gLIBINSTANCEDICT:
InfFileObj = open(InfFile, "r")
GlobalData.gLIBINSTANCEDICT[InfFile] = InfFileObj
else:
InfFileObj = GlobalData.gLIBINSTANCEDICT[InfFile]
except BaseException:
Logger.Error("InfParser",
ToolError.FILE_READ_FAILURE,
ST.ERR_FILE_OPEN_FAILURE,
File=InfFile)
try:
FileLinesList = InfFileObj.readlines()
FileLinesList = ProcessLineExtender(FileLinesList)
ReFindFileGuidPattern = re.compile("^\s*FILE_GUID\s*=.*$")
ReFindVerStringPattern = re.compile("^\s*VERSION_STRING\s*=.*$")
for Line in FileLinesList:
if ReFindFileGuidPattern.match(Line):
FileGuidString = Line
if ReFindVerStringPattern.match(Line):
VerString = Line
if FileGuidString:
FileGuidString = GetSplitValueList(FileGuidString, '=', 1)[1]
if VerString:
VerString = GetSplitValueList(VerString, '=', 1)[1]
if FileGuidString.strip().upper() == Guid.upper() and \
VerString.strip().upper() == Version.upper():
return Guid, Version
except BaseException:
Logger.Error("InfParser", ToolError.FILE_READ_FAILURE, ST.ERR_FILE_OPEN_FAILURE, File=InfFile)
finally:
InfFileObj.close()
return '', ''
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfAsBuiltProcess.py
|
## @file
# This file contained the miscellaneous functions for INF parser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfParserMisc
'''
##
# Import Modules
#
import re
from Library import DataType as DT
from Library.StringUtils import gMACRO_PATTERN
from Library.StringUtils import ReplaceMacro
from Object.Parser.InfMisc import ErrorInInf
from Logger.StringTable import ERR_MARCO_DEFINITION_MISS_ERROR
#
# Global variable
#
#
# Sections can exist in INF file
#
gINF_SECTION_DEF = {
DT.TAB_UNKNOWN.upper() : DT.MODEL_UNKNOWN,
DT.TAB_HEADER.upper() : DT.MODEL_META_DATA_FILE_HEADER,
DT.TAB_INF_DEFINES.upper() : DT.MODEL_META_DATA_DEFINE,
DT.TAB_BUILD_OPTIONS.upper() : DT.MODEL_META_DATA_BUILD_OPTION,
DT.TAB_LIBRARY_CLASSES.upper() : DT.MODEL_EFI_LIBRARY_CLASS,
DT.TAB_PACKAGES.upper() : DT.MODEL_META_DATA_PACKAGE,
DT.TAB_INF_FIXED_PCD.upper() : DT.MODEL_PCD_FIXED_AT_BUILD,
DT.TAB_INF_PATCH_PCD.upper() : DT.MODEL_PCD_PATCHABLE_IN_MODULE,
DT.TAB_INF_FEATURE_PCD.upper() : DT.MODEL_PCD_FEATURE_FLAG,
DT.TAB_INF_PCD_EX.upper() : DT.MODEL_PCD_DYNAMIC_EX,
DT.TAB_INF_PCD.upper() : DT.MODEL_PCD_DYNAMIC,
DT.TAB_SOURCES.upper() : DT.MODEL_EFI_SOURCE_FILE,
DT.TAB_GUIDS.upper() : DT.MODEL_EFI_GUID,
DT.TAB_PROTOCOLS.upper() : DT.MODEL_EFI_PROTOCOL,
DT.TAB_PPIS.upper() : DT.MODEL_EFI_PPI,
DT.TAB_DEPEX.upper() : DT.MODEL_EFI_DEPEX,
DT.TAB_BINARIES.upper() : DT.MODEL_EFI_BINARY_FILE,
DT.TAB_USER_EXTENSIONS.upper() : DT.MODEL_META_DATA_USER_EXTENSION
#
# EDK1 section
# TAB_NMAKE.upper() : MODEL_META_DATA_NMAKE
#
}
## InfExpandMacro
#
# Expand MACRO definition with MACROs defined in [Defines] section and specific section.
# The MACROs defined in specific section has high priority and will be expanded firstly.
#
# @param LineInfo Contain information of FileName, LineContent, LineNo
# @param GlobalMacros MACROs defined in INF [Defines] section
# @param SectionMacros MACROs defined in INF specific section
# @param Flag If the flag set to True, need to skip macros in a quoted string
#
def InfExpandMacro(Content, LineInfo, GlobalMacros=None, SectionMacros=None, Flag=False):
if GlobalMacros is None:
GlobalMacros = {}
if SectionMacros is None:
SectionMacros = {}
FileName = LineInfo[0]
LineContent = LineInfo[1]
LineNo = LineInfo[2]
# Don't expand macros in comments
if LineContent.strip().startswith("#"):
return Content
NewLineInfo = (FileName, LineNo, LineContent)
#
# First, replace MARCOs with value defined in specific section
#
Content = ReplaceMacro (Content,
SectionMacros,
False,
(LineContent, LineNo),
FileName,
Flag)
#
# Then replace MARCOs with value defined in [Defines] section
#
Content = ReplaceMacro (Content,
GlobalMacros,
False,
(LineContent, LineNo),
FileName,
Flag)
MacroUsed = gMACRO_PATTERN.findall(Content)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
return Content
else:
for Macro in MacroUsed:
gQuotedMacro = re.compile(".*\".*\$\(%s\).*\".*"%(Macro))
if not gQuotedMacro.match(Content):
#
# Still have MACROs can't be expanded.
#
ErrorInInf (ERR_MARCO_DEFINITION_MISS_ERROR,
LineInfo=NewLineInfo)
return Content
## IsBinaryInf
#
# Judge whether the INF file is Binary INF or Common INF
#
# @param FileLineList A list contain all INF file content.
#
def IsBinaryInf(FileLineList):
if not FileLineList:
return False
ReIsSourcesSection = re.compile("^\s*\[Sources.*\]\s.*$", re.IGNORECASE)
ReIsBinarySection = re.compile("^\s*\[Binaries.*\]\s.*$", re.IGNORECASE)
BinarySectionFoundFlag = False
for Line in FileLineList:
if ReIsSourcesSection.match(Line):
return False
if ReIsBinarySection.match(Line):
BinarySectionFoundFlag = True
if BinarySectionFoundFlag:
return True
return False
## IsLibInstanceInfo
#
# Judge whether the string contain the information of ## @LIB_INSTANCES.
#
# @param String
#
# @return Flag
#
def IsLibInstanceInfo(String):
ReIsLibInstance = re.compile("^\s*##\s*@LIB_INSTANCES\s*$")
if ReIsLibInstance.match(String):
return True
else:
return False
## IsAsBuildOptionInfo
#
# Judge whether the string contain the information of ## @ASBUILD.
#
# @param String
#
# @return Flag
#
def IsAsBuildOptionInfo(String):
ReIsAsBuildInstance = re.compile("^\s*##\s*@AsBuilt\s*$")
if ReIsAsBuildInstance.match(String):
return True
else:
return False
class InfParserSectionRoot(object):
def __init__(self):
#
# Macros defined in [Define] section are file scope global
#
self.FileLocalMacros = {}
#
# Current Section Header content.
#
self.SectionHeaderContent = []
#
# Last time Section Header content.
#
self.LastSectionHeaderContent = []
self.FullPath = ''
self.InfDefSection = None
self.InfBuildOptionSection = None
self.InfLibraryClassSection = None
self.InfPackageSection = None
self.InfPcdSection = None
self.InfSourcesSection = None
self.InfUserExtensionSection = None
self.InfProtocolSection = None
self.InfPpiSection = None
self.InfGuidSection = None
self.InfDepexSection = None
self.InfPeiDepexSection = None
self.InfDxeDepexSection = None
self.InfSmmDepexSection = None
self.InfBinariesSection = None
self.InfHeader = None
self.InfSpecialCommentSection = None
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfParserMisc.py
|
## @file
# This file is used to define helper class and function for DEC parser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
DecParserMisc
'''
## Import modules
#
import os
import Logger.Log as Logger
from Logger.ToolError import FILE_PARSE_FAILURE
from Logger import StringTable as ST
from Library.DataType import TAB_COMMENT_SPLIT
from Library.DataType import TAB_COMMENT_EDK1_SPLIT
from Library.ExpressionValidate import IsValidBareCString
from Library.ParserValidate import IsValidCFormatGuid
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Library.ExpressionValidate import IsValidLogicalExpr
from Library.ExpressionValidate import IsValidStringTest
from Library.Misc import CheckGuidRegFormat
TOOL_NAME = 'DecParser'
VERSION_PATTERN = '[0-9]+(\.[0-9]+)?'
CVAR_PATTERN = '[_a-zA-Z][a-zA-Z0-9_]*'
PCD_TOKEN_PATTERN = '(0[xX]0*[a-fA-F0-9]{1,8})|([0-9]+)'
MACRO_PATTERN = '[A-Z][_A-Z0-9]*'
## FileContent
# Class to hold DEC file information
#
class FileContent:
def __init__(self, Filename, FileContent2):
self.Filename = Filename
self.PackagePath, self.PackageFile = os.path.split(Filename)
self.LineIndex = 0
self.CurrentLine = ''
self.NextLine = ''
self.HeadComment = []
self.TailComment = []
self.CurrentScope = None
self.Content = FileContent2
self.Macros = {}
self.FileLines = len(FileContent2)
def GetNextLine(self):
if self.LineIndex >= self.FileLines:
return ''
Line = self.Content[self.LineIndex]
self.LineIndex += 1
return Line
def UndoNextLine(self):
if self.LineIndex > 0:
self.LineIndex -= 1
def ResetNext(self):
self.HeadComment = []
self.TailComment = []
self.NextLine = ''
def SetNext(self, Line, HeadComment, TailComment):
self.NextLine = Line
self.HeadComment = HeadComment
self.TailComment = TailComment
def IsEndOfFile(self):
return self.LineIndex >= self.FileLines
## StripRoot
#
# Strip root path
#
# @param Root: Root must be absolute path
# @param Path: Path to be stripped
#
def StripRoot(Root, Path):
OrigPath = Path
Root = os.path.normpath(Root)
Path = os.path.normpath(Path)
if not os.path.isabs(Root):
return OrigPath
if Path.startswith(Root):
Path = Path[len(Root):]
if Path and Path[0] == os.sep:
Path = Path[1:]
return Path
return OrigPath
## CleanString
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=TAB_COMMENT_SPLIT, \
AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
Comment = ''
InQuote = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InQuote = not InQuote
continue
if Line[Index] == CommentCharacter and not InQuote:
Comment = Line[Index:].strip()
Line = Line[0:Index].strip()
break
return Line, Comment
## IsValidNumValUint8
#
# Check if Token is NumValUint8: <NumValUint8> ::= {<ShortNum>} {<UINT8>} {<Expression>}
#
# @param Token: Token to be checked
#
def IsValidNumValUint8(Token):
Valid = True
Cause = ""
TokenValue = None
Token = Token.strip()
if Token.lower().startswith('0x'):
Base = 16
else:
Base = 10
try:
TokenValue = int(Token, Base)
except BaseException:
Valid, Cause = IsValidLogicalExpr(Token, True)
if Cause:
pass
if not Valid:
return False
if TokenValue and (TokenValue < 0 or TokenValue > 0xFF):
return False
else:
return True
## IsValidNList
#
# Check if Value has the format of <NumValUint8> ["," <NumValUint8>]{0,}
# <NumValUint8> ::= {<ShortNum>} {<UINT8>} {<Expression>}
#
# @param Value: Value to be checked
#
def IsValidNList(Value):
Par = ParserHelper(Value)
if Par.End():
return False
while not Par.End():
Token = Par.GetToken(',')
if not IsValidNumValUint8(Token):
return False
if Par.Expect(','):
if Par.End():
return False
continue
else:
break
return Par.End()
## IsValidCArray
#
# check Array is valid
#
# @param Array: The input Array
#
def IsValidCArray(Array):
Par = ParserHelper(Array)
if not Par.Expect('{'):
return False
if Par.End():
return False
while not Par.End():
Token = Par.GetToken(',}')
#
# ShortNum, UINT8, Expression
#
if not IsValidNumValUint8(Token):
return False
if Par.Expect(','):
if Par.End():
return False
continue
elif Par.Expect('}'):
#
# End of C array
#
break
else:
return False
return Par.End()
## IsValidPcdDatum
#
# check PcdDatum is valid
#
# @param Type: The pcd Type
# @param Value: The pcd Value
#
def IsValidPcdDatum(Type, Value):
if not Value:
return False, ST.ERR_DECPARSE_PCD_VALUE_EMPTY
Valid = True
Cause = ""
if Type not in ["UINT8", "UINT16", "UINT32", "UINT64", "VOID*", "BOOLEAN"]:
return False, ST.ERR_DECPARSE_PCD_TYPE
if Type == "VOID*":
if not ((Value.startswith('L"') or Value.startswith('"') and \
Value.endswith('"'))
or (IsValidCArray(Value)) or (IsValidCFormatGuid(Value)) \
or (IsValidNList(Value)) or (CheckGuidRegFormat(Value))
):
return False, ST.ERR_DECPARSE_PCD_VOID % (Value, Type)
RealString = Value[Value.find('"') + 1 :-1]
if RealString:
if not IsValidBareCString(RealString):
return False, ST.ERR_DECPARSE_PCD_VOID % (Value, Type)
elif Type == 'BOOLEAN':
if Value in ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
'0x1', '0x01', '1', '0x0', '0x00', '0']:
return True, ""
Valid, Cause = IsValidStringTest(Value, True)
if not Valid:
Valid, Cause = IsValidFeatureFlagExp(Value, True)
if not Valid:
return False, Cause
else:
if Value and (Value[0] == '-' or Value[0] == '+'):
return False, ST.ERR_DECPARSE_PCD_INT_NEGTIVE % (Value, Type)
try:
StrVal = Value
if Value and not Value.startswith('0x') \
and not Value.startswith('0X'):
Value = Value.lstrip('0')
if not Value:
return True, ""
Value = int(Value, 0)
MAX_VAL_TYPE = {"BOOLEAN": 0x01, 'UINT8': 0xFF, 'UINT16': 0xFFFF, 'UINT32': 0xFFFFFFFF,
'UINT64': 0xFFFFFFFFFFFFFFFF}
if Value > MAX_VAL_TYPE[Type]:
return False, ST.ERR_DECPARSE_PCD_INT_EXCEED % (StrVal, Type)
except BaseException:
Valid, Cause = IsValidLogicalExpr(Value, True)
if not Valid:
return False, Cause
return True, ""
## ParserHelper
#
class ParserHelper:
def __init__(self, String, File=''):
self._String = String
self._StrLen = len(String)
self._Index = 0
self._File = File
## End
#
# End
#
def End(self):
self.__SkipWhitespace()
return self._Index >= self._StrLen
## __SkipWhitespace
#
# Skip whitespace
#
def __SkipWhitespace(self):
for Char in self._String[self._Index:]:
if Char not in ' \t':
break
self._Index += 1
## Expect
#
# Expect char in string
#
# @param ExpectChar: char expected in index of string
#
def Expect(self, ExpectChar):
self.__SkipWhitespace()
for Char in self._String[self._Index:]:
if Char != ExpectChar:
return False
else:
self._Index += 1
return True
#
# Index out of bound of String
#
return False
## GetToken
#
# Get token until encounter StopChar, front whitespace is consumed
#
# @param StopChar: Get token until encounter char in StopChar
# @param StkipPair: Only can be ' or ", StopChar in SkipPair are skipped
#
def GetToken(self, StopChar='.,|\t ', SkipPair='"'):
self.__SkipWhitespace()
PreIndex = self._Index
InQuote = False
LastChar = ''
for Char in self._String[self._Index:]:
if Char == SkipPair and LastChar != '\\':
InQuote = not InQuote
if Char in StopChar and not InQuote:
break
self._Index += 1
if Char == '\\' and LastChar == '\\':
LastChar = ''
else:
LastChar = Char
return self._String[PreIndex:self._Index]
## AssertChar
#
# Assert char at current index of string is AssertChar, or will report
# error message
#
# @param AssertChar: AssertChar
# @param ErrorString: ErrorString
# @param ErrorLineNum: ErrorLineNum
#
def AssertChar(self, AssertChar, ErrorString, ErrorLineNum):
if not self.Expect(AssertChar):
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._File,
Line=ErrorLineNum, ExtraData=ErrorString)
## AssertEnd
#
# @param ErrorString: ErrorString
# @param ErrorLineNum: ErrorLineNum
#
def AssertEnd(self, ErrorString, ErrorLineNum):
self.__SkipWhitespace()
if self._Index != self._StrLen:
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._File,
Line=ErrorLineNum, ExtraData=ErrorString)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/DecParserMisc.py
|
## @file
# This file contained the parser for [Libraries] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfLibrarySectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Library import GlobalData
from Parser.InfParserMisc import IsLibInstanceInfo
from Parser.InfAsBuiltProcess import GetLibInstanceInfo
from Parser.InfParserMisc import InfParserSectionRoot
class InfLibrarySectionParser(InfParserSectionRoot):
## InfLibraryParser
#
#
def InfLibraryParser(self, SectionString, InfSectionObject, FileName):
#
# For Common INF file
#
if not GlobalData.gIS_BINARY_INF:
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
LibraryList = []
LibStillCommentFalg = False
LibHeaderComments = []
LibLineComment = None
#
# Parse section content
#
for Line in SectionString:
LibLineContent = Line[0]
LibLineNo = Line[1]
if LibLineContent.strip() == '':
continue
#
# Found Header Comments
#
if LibLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if LibStillCommentFalg:
LibHeaderComments.append(Line)
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
LibHeaderComments = []
LibHeaderComments.append(Line)
LibStillCommentFalg = True
continue
else:
LibStillCommentFalg = False
if len(LibHeaderComments) >= 1:
LibLineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in LibHeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LibLineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if LibLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
LibTailComments = LibLineContent[LibLineContent.find(DT.TAB_COMMENT_SPLIT):]
LibLineContent = LibLineContent[:LibLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LibLineComment is None:
LibLineComment = InfLineCommentObject()
LibLineComment.SetTailComments(LibTailComments)
#
# Find Macro
#
Name, Value = MacroParser((LibLineContent, LibLineNo),
FileName,
DT.MODEL_EFI_LIBRARY_CLASS,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
LibLineComment = None
LibHeaderComments = []
continue
TokenList = GetSplitValueList(LibLineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, LibLineContent, LibLineNo),
self.FileLocalMacros, SectionMacros, True)
for Value in ValueList]
LibraryList.append((ValueList, LibLineComment,
(LibLineContent, LibLineNo, FileName)))
ValueList = []
LibLineComment = None
LibTailComments = ''
LibHeaderComments = []
continue
#
# Current section archs
#
KeyList = []
for Item in self.LastSectionHeaderContent:
if (Item[1], Item[2]) not in KeyList:
KeyList.append((Item[1], Item[2]))
if not InfSectionObject.SetLibraryClasses(LibraryList, KeyList=KeyList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Library]"),
File=FileName,
Line=Item[3])
#
# For Binary INF
#
else:
self.InfAsBuiltLibraryParser(SectionString, InfSectionObject, FileName)
def InfAsBuiltLibraryParser(self, SectionString, InfSectionObject, FileName):
LibraryList = []
LibInsFlag = False
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
if LineContent.strip() == '':
LibInsFlag = False
continue
if not LineContent.strip().startswith("#"):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_LIB_CONTATIN_ASBUILD_AND_COMMON,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
if IsLibInstanceInfo(LineContent):
LibInsFlag = True
continue
if LibInsFlag:
LibGuid, LibVer = GetLibInstanceInfo(LineContent, GlobalData.gWORKSPACE, LineNo, FileName)
#
# If the VERSION_STRING is missing from the INF file, tool should default to "0".
#
if LibVer == '':
LibVer = '0'
if LibGuid != '':
if (LibGuid, LibVer) not in LibraryList:
LibraryList.append((LibGuid, LibVer))
#
# Current section archs
#
KeyList = []
Item = ['', '', '']
for Item in self.LastSectionHeaderContent:
if (Item[1], Item[2]) not in KeyList:
KeyList.append((Item[1], Item[2]))
if not InfSectionObject.SetLibraryClasses(LibraryList, KeyList=KeyList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Library]"),
File=FileName,
Line=Item[3])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfLibrarySectionParser.py
|
## @file
# This file is used to parse DEC file. It will consumed by DecParser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
DecParser
'''
## Import modules
#
import Logger.Log as Logger
from Logger.ToolError import FILE_PARSE_FAILURE
from Logger.ToolError import FILE_OPEN_FAILURE
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
import Library.DataType as DT
from Library.ParserValidate import IsValidToken
from Library.ParserValidate import IsValidPath
from Library.ParserValidate import IsValidCFormatGuid
from Library.ParserValidate import IsValidIdString
from Library.ParserValidate import IsValidUserId
from Library.ParserValidate import IsValidArch
from Library.ParserValidate import IsValidWord
from Library.ParserValidate import IsValidDecVersionVal
from Parser.DecParserMisc import TOOL_NAME
from Parser.DecParserMisc import CleanString
from Parser.DecParserMisc import IsValidPcdDatum
from Parser.DecParserMisc import ParserHelper
from Parser.DecParserMisc import StripRoot
from Parser.DecParserMisc import VERSION_PATTERN
from Parser.DecParserMisc import CVAR_PATTERN
from Parser.DecParserMisc import PCD_TOKEN_PATTERN
from Parser.DecParserMisc import MACRO_PATTERN
from Parser.DecParserMisc import FileContent
from Object.Parser.DecObject import _DecComments
from Object.Parser.DecObject import DecDefineObject
from Object.Parser.DecObject import DecDefineItemObject
from Object.Parser.DecObject import DecIncludeObject
from Object.Parser.DecObject import DecIncludeItemObject
from Object.Parser.DecObject import DecLibraryclassObject
from Object.Parser.DecObject import DecLibraryclassItemObject
from Object.Parser.DecObject import DecGuidObject
from Object.Parser.DecObject import DecPpiObject
from Object.Parser.DecObject import DecProtocolObject
from Object.Parser.DecObject import DecGuidItemObject
from Object.Parser.DecObject import DecUserExtensionObject
from Object.Parser.DecObject import DecUserExtensionItemObject
from Object.Parser.DecObject import DecPcdObject
from Object.Parser.DecObject import DecPcdItemObject
from Library.Misc import GuidStructureStringToGuidString
from Library.Misc import CheckGuidRegFormat
from Library.StringUtils import ReplaceMacro
from Library.StringUtils import GetSplitValueList
from Library.StringUtils import gMACRO_PATTERN
from Library.StringUtils import ConvertSpecialChar
from Library.CommentParsing import ParsePcdErrorCode
##
# _DecBase class for parsing
#
class _DecBase:
def __init__(self, RawData):
self._RawData = RawData
self._ItemDict = {}
self._LocalMacro = {}
#
# Data parsed by 'self' are saved to this object
#
self.ItemObject = None
def GetDataObject(self):
return self.ItemObject
def GetLocalMacro(self):
return self._LocalMacro
## BlockStart
#
# Called if a new section starts
#
def BlockStart(self):
self._LocalMacro = {}
## _CheckReDefine
#
# @param Key: to be checked if multi-defined
# @param Scope: Format: [[SectionName, Arch], ...].
# If scope is none, use global scope
#
def _CheckReDefine(self, Key, Scope = None):
if not Scope:
Scope = self._RawData.CurrentScope
return
SecArch = []
#
# Copy scope to SecArch, avoid Scope be changed outside
#
SecArch[0:1] = Scope[:]
if Key not in self._ItemDict:
self._ItemDict[Key] = [[SecArch, self._RawData.LineIndex]]
return
for Value in self._ItemDict[Key]:
for SubValue in Scope:
#
# If current is common section
#
if SubValue[-1] == 'COMMON':
for Other in Value[0]:
# Key in common cannot be redefined in other arches
# [:-1] means stripping arch info
if Other[:-1] == SubValue[:-1]:
self._LoggerError(ST.ERR_DECPARSE_REDEFINE % (Key, Value[1]))
return
continue
CommonScope = []
CommonScope[0:1] = SubValue
CommonScope[-1] = 'COMMON'
#
# Cannot be redefined if this key already defined in COMMON Or defined in same arch
#
if SubValue in Value[0] or CommonScope in Value[0]:
self._LoggerError(ST.ERR_DECPARSE_REDEFINE % (Key, Value[1]))
return
self._ItemDict[Key].append([SecArch, self._RawData.LineIndex])
## CheckRequiredFields
# Some sections need to check if some fields exist, define section for example
# Derived class can re-implement, top parser will call this function after all parsing done
#
def CheckRequiredFields(self):
if self._RawData:
pass
return True
## IsItemRequired
# In DEC spec, sections must have at least one statement except user
# extension.
# For example: "[guids" [<attribs>] "]" <EOL> <statements>+
# sub class can override this method to indicate if statement is a must.
#
def _IsStatementRequired(self):
if self._RawData:
pass
return False
def _LoggerError(self, ErrorString):
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
Line = self._RawData.LineIndex,
ExtraData=ErrorString + ST.ERR_DECPARSE_LINE % self._RawData.CurrentLine)
def _ReplaceMacro(self, String):
if gMACRO_PATTERN.findall(String):
String = ReplaceMacro(String, self._LocalMacro, False,
FileName = self._RawData.Filename,
Line = ['', self._RawData.LineIndex])
String = ReplaceMacro(String, self._RawData.Macros, False,
FileName = self._RawData.Filename,
Line = ['', self._RawData.LineIndex])
MacroUsed = gMACRO_PATTERN.findall(String)
if MacroUsed:
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE,
File=self._RawData.Filename,
Line = self._RawData.LineIndex,
ExtraData = ST.ERR_DECPARSE_MACRO_RESOLVE % (str(MacroUsed), String))
return String
def _MacroParser(self, String):
TokenList = GetSplitValueList(String, ' ', 1)
if len(TokenList) < 2 or TokenList[1] == '':
self._LoggerError(ST.ERR_DECPARSE_MACRO_PAIR)
TokenList = GetSplitValueList(TokenList[1], DT.TAB_EQUAL_SPLIT, 1)
if TokenList[0] == '':
self._LoggerError(ST.ERR_DECPARSE_MACRO_NAME)
elif not IsValidToken(MACRO_PATTERN, TokenList[0]):
self._LoggerError(ST.ERR_DECPARSE_MACRO_NAME_UPPER % TokenList[0])
if len(TokenList) == 1:
self._LocalMacro[TokenList[0]] = ''
else:
self._LocalMacro[TokenList[0]] = self._ReplaceMacro(TokenList[1])
## _ParseItem
#
# Parse specified item, this function must be derived by subclass
#
def _ParseItem(self):
if self._RawData:
pass
#
# Should never be called
#
return None
## _TailCommentStrategy
#
# This function can be derived to parse tail comment
# default is it will not consume any lines
#
# @param Comment: Comment of current line
#
def _TailCommentStrategy(self, Comment):
if Comment:
pass
if self._RawData:
pass
return False
## _StopCurrentParsing
#
# Called in Parse if current parsing should be stopped when encounter some
# keyword
# Default is section start and end
#
# @param Line: Current line
#
def _StopCurrentParsing(self, Line):
if self._RawData:
pass
return Line[0] == DT.TAB_SECTION_START and Line[-1] == DT.TAB_SECTION_END
## _TryBackSlash
#
# Split comment and DEC content, concatenate lines if end of char is '\'
#
# @param ProcessedLine: ProcessedLine line
# @param ProcessedComments: ProcessedComments line
#
def _TryBackSlash(self, ProcessedLine, ProcessedComments):
CatLine = ''
Comment = ''
Line = ProcessedLine
CommentList = ProcessedComments
while not self._RawData.IsEndOfFile():
if Line == '':
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH_EMPTY)
break
if Comment:
CommentList.append((Comment, self._RawData.LineIndex))
if Line[-1] != DT.TAB_SLASH:
CatLine += Line
break
elif len(Line) < 2 or Line[-2] != ' ':
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH)
else:
CatLine += Line[:-1]
Line, Comment = CleanString(self._RawData.GetNextLine())
#
# Reach end of content
#
if self._RawData.IsEndOfFile():
if not CatLine:
if ProcessedLine[-1] == DT.TAB_SLASH:
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH_EMPTY)
CatLine = ProcessedLine
else:
if not Line or Line[-1] == DT.TAB_SLASH:
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH_EMPTY)
CatLine += Line
#
# All MACRO values defined by the DEFINE statements in any section
# (except [Userextensions] sections for Intel) of the INF or DEC file
# must be expanded before processing of the file.
#
__IsReplaceMacro = True
Header = self._RawData.CurrentScope[0] if self._RawData.CurrentScope else None
if Header and len(Header) > 2:
if Header[0].upper() == 'USEREXTENSIONS' and not (Header[1] == 'TianoCore' and Header[2] == '"ExtraFiles"'):
__IsReplaceMacro = False
if __IsReplaceMacro:
self._RawData.CurrentLine = self._ReplaceMacro(CatLine)
else:
self._RawData.CurrentLine = CatLine
return CatLine, CommentList
## Parse
# This is a template method in which other member functions which might
# override by sub class are called. It is responsible for reading file
# line by line, and call other member functions to parse. This function
# should not be re-implement by sub class.
#
def Parse(self):
HeadComments = []
TailComments = []
#======================================================================
# CurComments may pointer to HeadComments or TailComments
#======================================================================
CurComments = HeadComments
CurObj = None
ItemNum = 0
FromBuf = False
#======================================================================
# Used to report error information if empty section found
#======================================================================
Index = self._RawData.LineIndex
LineStr = self._RawData.CurrentLine
while not self._RawData.IsEndOfFile() or self._RawData.NextLine:
if self._RawData.NextLine:
#==============================================================
# Have processed line in buffer
#==============================================================
Line = self._RawData.NextLine
HeadComments.extend(self._RawData.HeadComment)
TailComments.extend(self._RawData.TailComment)
self._RawData.ResetNext()
Comment = ''
FromBuf = True
else:
#==============================================================
# No line in buffer, read next line
#==============================================================
Line, Comment = CleanString(self._RawData.GetNextLine())
FromBuf = False
if Line:
if not FromBuf and CurObj and TailComments:
#==========================================================
# Set tail comments to previous statement if not empty.
#==========================================================
CurObj.SetTailComment(CurObj.GetTailComment()+TailComments)
if not FromBuf:
del TailComments[:]
CurComments = TailComments
Comments = []
if Comment:
Comments = [(Comment, self._RawData.LineIndex)]
#==============================================================
# Try if last char of line has backslash
#==============================================================
Line, Comments = self._TryBackSlash(Line, Comments)
CurComments.extend(Comments)
#==============================================================
# Macro found
#==============================================================
if Line.startswith('DEFINE '):
self._MacroParser(Line)
del HeadComments[:]
del TailComments[:]
CurComments = HeadComments
continue
if self._StopCurrentParsing(Line):
#==========================================================
# This line does not belong to this parse,
# Save it, can be used by next parse
#==========================================================
self._RawData.SetNext(Line, HeadComments, TailComments)
break
Obj = self._ParseItem()
ItemNum += 1
if Obj:
Obj.SetHeadComment(Obj.GetHeadComment()+HeadComments)
Obj.SetTailComment(Obj.GetTailComment()+TailComments)
del HeadComments[:]
del TailComments[:]
CurObj = Obj
else:
CurObj = None
else:
if id(CurComments) == id(TailComments):
#==========================================================
# Check if this comment belongs to tail comment
#==========================================================
if not self._TailCommentStrategy(Comment):
CurComments = HeadComments
if Comment:
CurComments.append(((Comment, self._RawData.LineIndex)))
else:
del CurComments[:]
if self._IsStatementRequired() and ItemNum == 0:
Logger.Error(
TOOL_NAME, FILE_PARSE_FAILURE,
File=self._RawData.Filename,
Line=Index,
ExtraData=ST.ERR_DECPARSE_STATEMENT_EMPTY % LineStr
)
## _DecDefine
# Parse define section
#
class _DecDefine(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecDefineObject(RawData.Filename)
self._LocalMacro = self._RawData.Macros
self._DefSecNum = 0
#
# Each field has a function to validate
#
self.DefineValidation = {
DT.TAB_DEC_DEFINES_DEC_SPECIFICATION : self._SetDecSpecification,
DT.TAB_DEC_DEFINES_PACKAGE_NAME : self._SetPackageName,
DT.TAB_DEC_DEFINES_PACKAGE_GUID : self._SetPackageGuid,
DT.TAB_DEC_DEFINES_PACKAGE_VERSION : self._SetPackageVersion,
DT.TAB_DEC_DEFINES_PKG_UNI_FILE : self._SetPackageUni,
}
def BlockStart(self):
self._DefSecNum += 1
if self._DefSecNum > 1:
self._LoggerError(ST.ERR_DECPARSE_DEFINE_MULTISEC)
## CheckRequiredFields
#
# Check required fields: DEC_SPECIFICATION, PACKAGE_NAME
# PACKAGE_GUID, PACKAGE_VERSION
#
def CheckRequiredFields(self):
Ret = False
if self.ItemObject.GetPackageSpecification() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_DEC_SPECIFICATION)
elif self.ItemObject.GetPackageName() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_PACKAGE_NAME)
elif self.ItemObject.GetPackageGuid() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_PACKAGE_GUID)
elif self.ItemObject.GetPackageVersion() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_PACKAGE_VERSION)
else:
Ret = True
return Ret
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = GetSplitValueList(Line, DT.TAB_EQUAL_SPLIT, 1)
if TokenList[0] == DT.TAB_DEC_DEFINES_PKG_UNI_FILE:
self.DefineValidation[TokenList[0]](TokenList[1])
elif len(TokenList) < 2:
self._LoggerError(ST.ERR_DECPARSE_DEFINE_FORMAT)
elif TokenList[0] not in self.DefineValidation:
self._LoggerError(ST.ERR_DECPARSE_DEFINE_UNKNOWKEY % TokenList[0])
else:
self.DefineValidation[TokenList[0]](TokenList[1])
DefineItem = DecDefineItemObject()
DefineItem.Key = TokenList[0]
DefineItem.Value = TokenList[1]
self.ItemObject.AddItem(DefineItem, self._RawData.CurrentScope)
return DefineItem
def _SetDecSpecification(self, Token):
if self.ItemObject.GetPackageSpecification():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_DEC_SPECIFICATION)
if not IsValidToken('0[xX][0-9a-fA-F]{8}', Token):
if not IsValidDecVersionVal(Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_SPEC)
self.ItemObject.SetPackageSpecification(Token)
def _SetPackageName(self, Token):
if self.ItemObject.GetPackageName():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PACKAGE_NAME)
if not IsValidWord(Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGNAME)
self.ItemObject.SetPackageName(Token)
def _SetPackageGuid(self, Token):
if self.ItemObject.GetPackageGuid():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PACKAGE_GUID)
if not CheckGuidRegFormat(Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGGUID)
self.ItemObject.SetPackageGuid(Token)
def _SetPackageVersion(self, Token):
if self.ItemObject.GetPackageVersion():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PACKAGE_VERSION)
if not IsValidToken(VERSION_PATTERN, Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGVERSION)
else:
if not DT.TAB_SPLIT in Token:
Token = Token + '.0'
self.ItemObject.SetPackageVersion(Token)
def _SetPackageUni(self, Token):
if self.ItemObject.GetPackageUniFile():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PKG_UNI_FILE)
self.ItemObject.SetPackageUniFile(Token)
## _DecInclude
#
# Parse include section
#
class _DecInclude(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecIncludeObject(RawData.Filename)
def _ParseItem(self):
Line = self._RawData.CurrentLine
if not IsValidPath(Line, self._RawData.PackagePath):
self._LoggerError(ST.ERR_DECPARSE_INCLUDE % Line)
Item = DecIncludeItemObject(StripRoot(self._RawData.PackagePath, Line), self._RawData.PackagePath)
self.ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecLibraryclass
#
# Parse library class section
#
class _DecLibraryclass(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecLibraryclassObject(RawData.Filename)
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = GetSplitValueList(Line, DT.TAB_VALUE_SPLIT)
if len(TokenList) != 2:
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_SPLIT)
if TokenList[0] == '' or TokenList[1] == '':
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_EMPTY)
if not IsValidToken('[A-Z][0-9A-Za-z]*', TokenList[0]):
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_LIB)
self._CheckReDefine(TokenList[0])
Value = TokenList[1]
#
# Must end with .h
#
if not Value.endswith('.h'):
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_PATH_EXT)
#
# Path must be existed
#
if not IsValidPath(Value, self._RawData.PackagePath):
self._LoggerError(ST.ERR_DECPARSE_INCLUDE % Value)
Item = DecLibraryclassItemObject(TokenList[0], StripRoot(self._RawData.PackagePath, Value),
self._RawData.PackagePath)
self.ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecPcd
#
# Parse PCD section
#
class _DecPcd(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecPcdObject(RawData.Filename)
#
# Used to check duplicate token
# Key is token space and token number (integer), value is C name
#
self.TokenMap = {}
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = Line.split(DT.TAB_VALUE_SPLIT)
if len(TokenList) < 4:
self._LoggerError(ST.ERR_DECPARSE_PCD_SPLIT)
#
# Token space guid C name
#
PcdName = GetSplitValueList(TokenList[0], DT.TAB_SPLIT)
if len(PcdName) != 2 or PcdName[0] == '' or PcdName[1] == '':
self._LoggerError(ST.ERR_DECPARSE_PCD_NAME)
Guid = PcdName[0]
if not IsValidToken(CVAR_PATTERN, Guid):
self._LoggerError(ST.ERR_DECPARSE_PCD_CVAR_GUID)
#
# PCD C name
#
CName = PcdName[1]
if not IsValidToken(CVAR_PATTERN, CName):
self._LoggerError(ST.ERR_DECPARSE_PCD_CVAR_PCDCNAME)
self._CheckReDefine(Guid + DT.TAB_SPLIT + CName)
#
# Default value, may be C array, string or number
#
Data = DT.TAB_VALUE_SPLIT.join(TokenList[1:-2]).strip()
#
# PCD data type
#
DataType = TokenList[-2].strip()
Valid, Cause = IsValidPcdDatum(DataType, Data)
if not Valid:
self._LoggerError(Cause)
PcdType = self._RawData.CurrentScope[0][0]
if PcdType == DT.TAB_PCDS_FEATURE_FLAG_NULL.upper() and DataType != 'BOOLEAN':
self._LoggerError(ST.ERR_DECPARSE_PCD_FEATUREFLAG)
#
# Token value is the last element in list.
#
Token = TokenList[-1].strip()
if not IsValidToken(PCD_TOKEN_PATTERN, Token):
self._LoggerError(ST.ERR_DECPARSE_PCD_TOKEN % Token)
elif not Token.startswith('0x') and not Token.startswith('0X'):
if int(Token) > 4294967295:
self._LoggerError(ST.ERR_DECPARSE_PCD_TOKEN_INT % Token)
Token = '0x%x' % int(Token)
IntToken = int(Token, 0)
if (Guid, IntToken) in self.TokenMap:
if self.TokenMap[Guid, IntToken] != CName:
self._LoggerError(ST.ERR_DECPARSE_PCD_TOKEN_UNIQUE%(Token))
else:
self.TokenMap[Guid, IntToken] = CName
Item = DecPcdItemObject(Guid, CName, Data, DataType, Token)
self.ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecGuid
#
# Parse GUID, PPI, Protocol section
#
class _DecGuid(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.GuidObj = DecGuidObject(RawData.Filename)
self.PpiObj = DecPpiObject(RawData.Filename)
self.ProtocolObj = DecProtocolObject(RawData.Filename)
self.ObjectDict = \
{
DT.TAB_GUIDS.upper() : self.GuidObj,
DT.TAB_PPIS.upper() : self.PpiObj,
DT.TAB_PROTOCOLS.upper() : self.ProtocolObj
}
def GetDataObject(self):
if self._RawData.CurrentScope:
return self.ObjectDict[self._RawData.CurrentScope[0][0]]
return None
def GetGuidObject(self):
return self.GuidObj
def GetPpiObject(self):
return self.PpiObj
def GetProtocolObject(self):
return self.ProtocolObj
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = GetSplitValueList(Line, DT.TAB_EQUAL_SPLIT, 1)
if len(TokenList) < 2:
self._LoggerError(ST.ERR_DECPARSE_CGUID)
if TokenList[0] == '':
self._LoggerError(ST.ERR_DECPARSE_CGUID_NAME)
if TokenList[1] == '':
self._LoggerError(ST.ERR_DECPARSE_CGUID_GUID)
if not IsValidToken(CVAR_PATTERN, TokenList[0]):
self._LoggerError(ST.ERR_DECPARSE_PCD_CVAR_GUID)
self._CheckReDefine(TokenList[0])
if TokenList[1][0] != '{':
if not CheckGuidRegFormat(TokenList[1]):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGGUID)
GuidString = TokenList[1]
else:
#
# Convert C format GUID to GUID string and Simple error check
#
GuidString = GuidStructureStringToGuidString(TokenList[1])
if TokenList[1][0] != '{' or TokenList[1][-1] != '}' or GuidString == '':
self._LoggerError(ST.ERR_DECPARSE_CGUID_GUIDFORMAT)
#
# Check C format GUID
#
if not IsValidCFormatGuid(TokenList[1]):
self._LoggerError(ST.ERR_DECPARSE_CGUID_GUIDFORMAT)
Item = DecGuidItemObject(TokenList[0], TokenList[1], GuidString)
ItemObject = self.ObjectDict[self._RawData.CurrentScope[0][0]]
ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecUserExtension
#
# Parse user extension section
#
class _DecUserExtension(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecUserExtensionObject(RawData.Filename)
self._Headers = []
self._CurItems = []
def BlockStart(self):
self._CurItems = []
for Header in self._RawData.CurrentScope:
if Header in self._Headers:
self._LoggerError(ST.ERR_DECPARSE_UE_DUPLICATE)
else:
self._Headers.append(Header)
for Item in self._CurItems:
if Item.UserId == Header[1] and Item.IdString == Header[2]:
Item.ArchAndModuleType.append(Header[3])
break
else:
Item = DecUserExtensionItemObject()
Item.UserId = Header[1]
Item.IdString = Header[2]
Item.ArchAndModuleType.append(Header[3])
self._CurItems.append(Item)
self.ItemObject.AddItem(Item, None)
self._LocalMacro = {}
def _ParseItem(self):
Line = self._RawData.CurrentLine
Item = None
for Item in self._CurItems:
if Item.UserString:
Item.UserString = '\n'.join([Item.UserString, Line])
else:
Item.UserString = Line
return Item
## Dec
#
# Top dec parser
#
class Dec(_DecBase, _DecComments):
def __init__(self, DecFile, Parse = True):
try:
Content = ConvertSpecialChar(open(DecFile, 'r').readlines())
except BaseException:
Logger.Error(TOOL_NAME, FILE_OPEN_FAILURE, File=DecFile,
ExtraData=ST.ERR_DECPARSE_FILEOPEN % DecFile)
#
# Pre-parser for Private section
#
self._Private = ''
__IsFoundPrivate = False
NewContent = []
for Line in Content:
Line = Line.strip()
if Line.startswith(DT.TAB_SECTION_START) and Line.endswith(DT.TAB_PRIVATE + DT.TAB_SECTION_END):
__IsFoundPrivate = True
if Line.startswith(DT.TAB_SECTION_START) and Line.endswith(DT.TAB_SECTION_END)\
and not Line.endswith(DT.TAB_PRIVATE + DT.TAB_SECTION_END):
__IsFoundPrivate = False
if __IsFoundPrivate:
self._Private += Line + '\r'
if not __IsFoundPrivate:
NewContent.append(Line + '\r')
RawData = FileContent(DecFile, NewContent)
_DecComments.__init__(self)
_DecBase.__init__(self, RawData)
self.BinaryHeadComment = []
self.PcdErrorCommentDict = {}
self._Define = _DecDefine(RawData)
self._Include = _DecInclude(RawData)
self._Guid = _DecGuid(RawData)
self._LibClass = _DecLibraryclass(RawData)
self._Pcd = _DecPcd(RawData)
self._UserEx = _DecUserExtension(RawData)
#
# DEC file supported data types (one type per section)
#
self._SectionParser = {
DT.TAB_DEC_DEFINES.upper() : self._Define,
DT.TAB_INCLUDES.upper() : self._Include,
DT.TAB_LIBRARY_CLASSES.upper() : self._LibClass,
DT.TAB_GUIDS.upper() : self._Guid,
DT.TAB_PPIS.upper() : self._Guid,
DT.TAB_PROTOCOLS.upper() : self._Guid,
DT.TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : self._Pcd,
DT.TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : self._Pcd,
DT.TAB_PCDS_FEATURE_FLAG_NULL.upper() : self._Pcd,
DT.TAB_PCDS_DYNAMIC_NULL.upper() : self._Pcd,
DT.TAB_PCDS_DYNAMIC_EX_NULL.upper() : self._Pcd,
DT.TAB_USER_EXTENSIONS.upper() : self._UserEx
}
if Parse:
self.ParseDecComment()
self.Parse()
#
# Parsing done, check required fields
#
self.CheckRequiredFields()
def CheckRequiredFields(self):
for SectionParser in self._SectionParser.values():
if not SectionParser.CheckRequiredFields():
return False
return True
##
# Parse DEC file
#
def ParseDecComment(self):
IsFileHeader = False
IsBinaryHeader = False
FileHeaderLineIndex = -1
BinaryHeaderLineIndex = -1
TokenSpaceGuidCName = ''
#
# Parse PCD error comment section
#
while not self._RawData.IsEndOfFile():
self._RawData.CurrentLine = self._RawData.GetNextLine()
if self._RawData.CurrentLine.startswith(DT.TAB_COMMENT_SPLIT) and \
DT.TAB_SECTION_START in self._RawData.CurrentLine and \
DT.TAB_SECTION_END in self._RawData.CurrentLine:
self._RawData.CurrentLine = self._RawData.CurrentLine.replace(DT.TAB_COMMENT_SPLIT, '').strip()
if self._RawData.CurrentLine[0] == DT.TAB_SECTION_START and \
self._RawData.CurrentLine[-1] == DT.TAB_SECTION_END:
RawSection = self._RawData.CurrentLine[1:-1].strip()
if RawSection.upper().startswith(DT.TAB_PCD_ERROR.upper()+'.'):
TokenSpaceGuidCName = RawSection.split(DT.TAB_PCD_ERROR+'.')[1].strip()
continue
if TokenSpaceGuidCName and self._RawData.CurrentLine.startswith(DT.TAB_COMMENT_SPLIT):
self._RawData.CurrentLine = self._RawData.CurrentLine.replace(DT.TAB_COMMENT_SPLIT, '').strip()
if self._RawData.CurrentLine != '':
if DT.TAB_VALUE_SPLIT not in self._RawData.CurrentLine:
self._LoggerError(ST.ERR_DECPARSE_PCDERRORMSG_MISS_VALUE_SPLIT)
PcdErrorNumber, PcdErrorMsg = GetSplitValueList(self._RawData.CurrentLine, DT.TAB_VALUE_SPLIT, 1)
PcdErrorNumber = ParsePcdErrorCode(PcdErrorNumber, self._RawData.Filename, self._RawData.LineIndex)
if not PcdErrorMsg.strip():
self._LoggerError(ST.ERR_DECPARSE_PCD_MISS_ERRORMSG)
self.PcdErrorCommentDict[(TokenSpaceGuidCName, PcdErrorNumber)] = PcdErrorMsg.strip()
else:
TokenSpaceGuidCName = ''
self._RawData.LineIndex = 0
self._RawData.CurrentLine = ''
self._RawData.NextLine = ''
while not self._RawData.IsEndOfFile():
Line, Comment = CleanString(self._RawData.GetNextLine())
#
# Header must be pure comment
#
if Line != '':
self._RawData.UndoNextLine()
break
if Comment and Comment.startswith(DT.TAB_SPECIAL_COMMENT) and Comment.find(DT.TAB_HEADER_COMMENT) > 0 \
and not Comment[2:Comment.find(DT.TAB_HEADER_COMMENT)].strip():
IsFileHeader = True
IsBinaryHeader = False
FileHeaderLineIndex = self._RawData.LineIndex
#
# Get license information before '@file'
#
if not IsFileHeader and not IsBinaryHeader and Comment and Comment.startswith(DT.TAB_COMMENT_SPLIT) and \
DT.TAB_BINARY_HEADER_COMMENT not in Comment:
self._HeadComment.append((Comment, self._RawData.LineIndex))
if Comment and IsFileHeader and \
not(Comment.startswith(DT.TAB_SPECIAL_COMMENT) \
and Comment.find(DT.TAB_BINARY_HEADER_COMMENT) > 0):
self._HeadComment.append((Comment, self._RawData.LineIndex))
#
# Double '#' indicates end of header comments
#
if (not Comment or Comment == DT.TAB_SPECIAL_COMMENT) and IsFileHeader:
IsFileHeader = False
continue
if Comment and Comment.startswith(DT.TAB_SPECIAL_COMMENT) \
and Comment.find(DT.TAB_BINARY_HEADER_COMMENT) > 0:
IsBinaryHeader = True
IsFileHeader = False
BinaryHeaderLineIndex = self._RawData.LineIndex
if Comment and IsBinaryHeader:
self.BinaryHeadComment.append((Comment, self._RawData.LineIndex))
#
# Double '#' indicates end of header comments
#
if (not Comment or Comment == DT.TAB_SPECIAL_COMMENT) and IsBinaryHeader:
IsBinaryHeader = False
break
if FileHeaderLineIndex > -1 and not IsFileHeader and not IsBinaryHeader:
break
if FileHeaderLineIndex > BinaryHeaderLineIndex and FileHeaderLineIndex > -1 and BinaryHeaderLineIndex > -1:
self._LoggerError(ST.ERR_BINARY_HEADER_ORDER)
if FileHeaderLineIndex == -1:
# self._LoggerError(ST.ERR_NO_SOURCE_HEADER)
Logger.Error(TOOL_NAME, FORMAT_INVALID,
ST.ERR_NO_SOURCE_HEADER,
File=self._RawData.Filename)
return
def _StopCurrentParsing(self, Line):
return False
def _ParseItem(self):
self._SectionHeaderParser()
if len(self._RawData.CurrentScope) == 0:
self._LoggerError(ST.ERR_DECPARSE_SECTION_EMPTY)
SectionObj = self._SectionParser[self._RawData.CurrentScope[0][0]]
SectionObj.BlockStart()
SectionObj.Parse()
return SectionObj.GetDataObject()
def _UserExtentionSectionParser(self):
self._RawData.CurrentScope = []
ArchList = set()
Section = self._RawData.CurrentLine[1:-1]
Par = ParserHelper(Section, self._RawData.Filename)
while not Par.End():
#
# User extention
#
Token = Par.GetToken()
if Token.upper() != DT.TAB_USER_EXTENSIONS.upper():
self._LoggerError(ST.ERR_DECPARSE_SECTION_UE)
UserExtension = Token.upper()
Par.AssertChar(DT.TAB_SPLIT, ST.ERR_DECPARSE_SECTION_UE, self._RawData.LineIndex)
#
# UserID
#
Token = Par.GetToken()
if not IsValidUserId(Token):
self._LoggerError(ST.ERR_DECPARSE_SECTION_UE_USERID)
UserId = Token
Par.AssertChar(DT.TAB_SPLIT, ST.ERR_DECPARSE_SECTION_UE, self._RawData.LineIndex)
#
# IdString
#
Token = Par.GetToken()
if not IsValidIdString(Token):
self._LoggerError(ST.ERR_DECPARSE_SECTION_UE_IDSTRING)
IdString = Token
Arch = 'COMMON'
if Par.Expect(DT.TAB_SPLIT):
Token = Par.GetToken()
Arch = Token.upper()
if not IsValidArch(Arch):
self._LoggerError(ST.ERR_DECPARSE_ARCH)
ArchList.add(Arch)
if [UserExtension, UserId, IdString, Arch] not in \
self._RawData.CurrentScope:
self._RawData.CurrentScope.append(
[UserExtension, UserId, IdString, Arch]
)
if not Par.Expect(DT.TAB_COMMA_SPLIT):
break
elif Par.End():
self._LoggerError(ST.ERR_DECPARSE_SECTION_COMMA)
Par.AssertEnd(ST.ERR_DECPARSE_SECTION_UE, self._RawData.LineIndex)
if 'COMMON' in ArchList and len(ArchList) > 1:
self._LoggerError(ST.ERR_DECPARSE_SECTION_COMMON)
## Section header parser
#
# The section header is always in following format:
#
# [section_name.arch<.platform|module_type>]
#
def _SectionHeaderParser(self):
if self._RawData.CurrentLine[0] != DT.TAB_SECTION_START or self._RawData.CurrentLine[-1] != DT.TAB_SECTION_END:
self._LoggerError(ST.ERR_DECPARSE_SECTION_IDENTIFY)
RawSection = self._RawData.CurrentLine[1:-1].strip().upper()
#
# Check defines section which is only allowed to occur once and
# no arch can be followed
#
if RawSection.startswith(DT.TAB_DEC_DEFINES.upper()):
if RawSection != DT.TAB_DEC_DEFINES.upper():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_SECNAME)
#
# Check user extension section
#
if RawSection.startswith(DT.TAB_USER_EXTENSIONS.upper()):
return self._UserExtentionSectionParser()
self._RawData.CurrentScope = []
SectionNames = []
ArchList = set()
for Item in GetSplitValueList(RawSection, DT.TAB_COMMA_SPLIT):
if Item == '':
self._LoggerError(ST.ERR_DECPARSE_SECTION_SUBEMPTY % self._RawData.CurrentLine)
ItemList = GetSplitValueList(Item, DT.TAB_SPLIT)
#
# different types of PCD are permissible in one section
#
SectionName = ItemList[0]
if SectionName not in self._SectionParser:
self._LoggerError(ST.ERR_DECPARSE_SECTION_UNKNOW % SectionName)
if SectionName not in SectionNames:
SectionNames.append(SectionName)
#
# In DEC specification, all section headers have at most two part:
# SectionName.Arch except UserExtension
#
if len(ItemList) > 2:
self._LoggerError(ST.ERR_DECPARSE_SECTION_SUBTOOMANY % Item)
if DT.TAB_PCDS_FEATURE_FLAG_NULL.upper() in SectionNames and len(SectionNames) > 1:
self._LoggerError(ST.ERR_DECPARSE_SECTION_FEATUREFLAG % DT.TAB_PCDS_FEATURE_FLAG_NULL)
#
# S1 is always Arch
#
if len(ItemList) > 1:
Str1 = ItemList[1]
if not IsValidArch(Str1):
self._LoggerError(ST.ERR_DECPARSE_ARCH)
else:
Str1 = 'COMMON'
ArchList.add(Str1)
if [SectionName, Str1] not in self._RawData.CurrentScope:
self._RawData.CurrentScope.append([SectionName, Str1])
#
# 'COMMON' must not be used with specific ARCHs at the same section
#
if 'COMMON' in ArchList and len(ArchList) > 1:
self._LoggerError(ST.ERR_DECPARSE_SECTION_COMMON)
if len(SectionNames) == 0:
self._LoggerError(ST.ERR_DECPARSE_SECTION_SUBEMPTY % self._RawData.CurrentLine)
if len(SectionNames) != 1:
for Sec in SectionNames:
if not Sec.startswith(DT.TAB_PCDS.upper()):
self._LoggerError(ST.ERR_DECPARSE_SECTION_NAME % str(SectionNames))
def GetDefineSectionMacro(self):
return self._Define.GetLocalMacro()
def GetDefineSectionObject(self):
return self._Define.GetDataObject()
def GetIncludeSectionObject(self):
return self._Include.GetDataObject()
def GetGuidSectionObject(self):
return self._Guid.GetGuidObject()
def GetProtocolSectionObject(self):
return self._Guid.GetProtocolObject()
def GetPpiSectionObject(self):
return self._Guid.GetPpiObject()
def GetLibraryClassSectionObject(self):
return self._LibClass.GetDataObject()
def GetPcdSectionObject(self):
return self._Pcd.GetDataObject()
def GetUserExtensionSectionObject(self):
return self._UserEx.GetDataObject()
def GetPackageSpecification(self):
return self._Define.GetDataObject().GetPackageSpecification()
def GetPackageName(self):
return self._Define.GetDataObject().GetPackageName()
def GetPackageGuid(self):
return self._Define.GetDataObject().GetPackageGuid()
def GetPackageVersion(self):
return self._Define.GetDataObject().GetPackageVersion()
def GetPackageUniFile(self):
return self._Define.GetDataObject().GetPackageUniFile()
def GetPrivateSections(self):
return self._Private
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/DecParser.py
|
## @file
# This file contained the parser for sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfSectionParser
'''
##
# Import Modules
#
from copy import deepcopy
import re
from Library.StringUtils import GetSplitValueList
from Library.CommentParsing import ParseHeaderCommentSection
from Library.CommentParsing import ParseComment
from Library import DataType as DT
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Object.Parser.InfDefineObject import InfDefObject
from Object.Parser.InfBuildOptionObject import InfBuildOptionsObject
from Object.Parser.InfLibraryClassesObject import InfLibraryClassObject
from Object.Parser.InfPackagesObject import InfPackageObject
from Object.Parser.InfPcdObject import InfPcdObject
from Object.Parser.InfSoucesObject import InfSourcesObject
from Object.Parser.InfUserExtensionObject import InfUserExtensionObject
from Object.Parser.InfProtocolObject import InfProtocolObject
from Object.Parser.InfPpiObject import InfPpiObject
from Object.Parser.InfGuidObject import InfGuidObject
from Object.Parser.InfDepexObject import InfDepexObject
from Object.Parser.InfBinaryObject import InfBinariesObject
from Object.Parser.InfHeaderObject import InfHeaderObject
from Object.Parser.InfMisc import InfSpecialCommentObject
from Object.Parser.InfMisc import InfHobObject
from Object.Parser.InfMisc import InfBootModeObject
from Object.Parser.InfMisc import InfEventObject
from Parser.InfParserMisc import gINF_SECTION_DEF
from Parser.InfDefineSectionParser import InfDefinSectionParser
from Parser.InfBuildOptionSectionParser import InfBuildOptionSectionParser
from Parser.InfSourceSectionParser import InfSourceSectionParser
from Parser.InfLibrarySectionParser import InfLibrarySectionParser
from Parser.InfPackageSectionParser import InfPackageSectionParser
from Parser.InfGuidPpiProtocolSectionParser import InfGuidPpiProtocolSectionParser
from Parser.InfBinarySectionParser import InfBinarySectionParser
from Parser.InfPcdSectionParser import InfPcdSectionParser
from Parser.InfDepexSectionParser import InfDepexSectionParser
## GetSpecialStr2
#
# GetSpecialStr2
#
def GetSpecialStr2(ItemList, FileName, LineNo, SectionString):
Str2 = ''
#
# S2 may be Platform or ModuleType
#
if len(ItemList) == 3:
#
# Except [LibraryClass], [Depex]
# section can has more than 2 items in section header string,
# others should report error.
#
if not (ItemList[0].upper() == DT.TAB_LIBRARY_CLASSES.upper() or \
ItemList[0].upper() == DT.TAB_DEPEX.upper() or \
ItemList[0].upper() == DT.TAB_USER_EXTENSIONS.upper()):
if ItemList[2] != '':
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_SOURCE_SECTION_SECTIONNAME_INVALID % (SectionString),
File=FileName,
Line=LineNo,
ExtraData=SectionString)
Str2 = ItemList[2]
elif len(ItemList) == 4:
#
# Except [UserExtension]
# section can has 4 items in section header string,
# others should report error.
#
if not ItemList[0].upper() == DT.TAB_USER_EXTENSIONS.upper() or ItemList[0].upper() == DT.TAB_DEPEX.upper():
if ItemList[3] != '':
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_INF_PARSER_SOURCE_SECTION_SECTIONNAME_INVALID \
% (SectionString), File=FileName, Line=LineNo, ExtraData=SectionString)
if not ItemList[0].upper() == DT.TAB_USER_EXTENSIONS.upper():
Str2 = ItemList[2] + ' | ' + ItemList[3]
else:
Str2 = ItemList[2]
elif len(ItemList) > 4:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_INF_PARSER_SOURCE_SECTION_SECTIONNAME_INVALID \
% (SectionString), File=FileName, Line=LineNo, ExtraData=SectionString)
return Str2
## ProcessUseExtHeader
#
#
def ProcessUseExtHeader(ItemList):
NewItemList = []
AppendContent = ''
CompleteFlag = False
for Item in ItemList:
if Item.startswith('\"') and not Item.endswith('\"'):
AppendContent = Item
CompleteFlag = True
elif Item.endswith('\"') and not Item.startswith('\"'):
#
# Should not have an userId or IdString not starts with " before but ends with ".
#
if not CompleteFlag:
return False, []
AppendContent = AppendContent + "." + Item
NewItemList.append(AppendContent)
CompleteFlag = False
AppendContent = ''
elif Item.endswith('\"') and Item.startswith('\"'):
#
# Common item, not need to combine the information
#
NewItemList.append(Item)
else:
if not CompleteFlag:
NewItemList.append(Item)
else:
AppendContent = AppendContent + "." + Item
if len(NewItemList) > 4:
return False, []
return True, NewItemList
## GetArch
#
# GetArch
#
def GetArch(ItemList, ArchList, FileName, LineNo, SectionString):
#
# S1 is always Arch
#
if len(ItemList) > 1:
Arch = ItemList[1]
else:
Arch = 'COMMON'
ArchList.add(Arch)
#
# 'COMMON' must not be used with specific ARCHs at the same section
#
if 'COMMON' in ArchList and len(ArchList) > 1:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_SECTION_ARCH_CONFLICT,
File=FileName,
Line=LineNo,
ExtraData=SectionString)
return Arch, ArchList
## InfSectionParser
#
# Inherit from object
#
class InfSectionParser(InfDefinSectionParser,
InfBuildOptionSectionParser,
InfSourceSectionParser,
InfLibrarySectionParser,
InfPackageSectionParser,
InfGuidPpiProtocolSectionParser,
InfBinarySectionParser,
InfPcdSectionParser,
InfDepexSectionParser):
#
# Parser objects used to implement singleton
#
MetaFiles = {}
## Factory method
#
# One file, one parser object. This factory method makes sure that there's
# only one object constructed for one meta file.
#
# @param Class class object of real AutoGen class
# (InfParser, DecParser or DscParser)
# @param FilePath The path of meta file
#
def __new__(cls, FilePath, *args, **kwargs):
if args:
pass
if kwargs:
pass
if FilePath in cls.MetaFiles:
return cls.MetaFiles[FilePath]
else:
ParserObject = super(InfSectionParser, cls).__new__(cls)
cls.MetaFiles[FilePath] = ParserObject
return ParserObject
def __init__(self):
InfDefinSectionParser.__init__(self)
InfBuildOptionSectionParser.__init__(self)
InfSourceSectionParser.__init__(self)
InfLibrarySectionParser.__init__(self)
InfPackageSectionParser.__init__(self)
InfGuidPpiProtocolSectionParser.__init__(self)
InfBinarySectionParser.__init__(self)
InfPcdSectionParser.__init__(self)
InfDepexSectionParser.__init__(self)
#
# Initialize all objects that an INF file will generated.
#
self.InfDefSection = InfDefObject()
self.InfBuildOptionSection = InfBuildOptionsObject()
self.InfLibraryClassSection = InfLibraryClassObject()
self.InfPackageSection = InfPackageObject()
self.InfPcdSection = InfPcdObject(list(self.MetaFiles.keys())[0])
self.InfSourcesSection = InfSourcesObject()
self.InfUserExtensionSection = InfUserExtensionObject()
self.InfProtocolSection = InfProtocolObject()
self.InfPpiSection = InfPpiObject()
self.InfGuidSection = InfGuidObject()
self.InfDepexSection = InfDepexObject()
self.InfPeiDepexSection = InfDepexObject()
self.InfDxeDepexSection = InfDepexObject()
self.InfSmmDepexSection = InfDepexObject()
self.InfBinariesSection = InfBinariesObject()
self.InfHeader = InfHeaderObject()
self.InfBinaryHeader = InfHeaderObject()
self.InfSpecialCommentSection = InfSpecialCommentObject()
#
# A List for store define section content.
#
self._PcdNameList = []
self._SectionName = ''
self._SectionType = 0
self.RelaPath = ''
self.FileName = ''
#
# File Header content parser
#
def InfHeaderParser(self, Content, InfHeaderObject2, FileName, IsBinaryHeader = False):
if IsBinaryHeader:
(Abstract, Description, Copyright, License) = ParseHeaderCommentSection(Content, FileName, True)
if not Abstract or not Description or not Copyright or not License:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INVALID_BINARYHEADER_FORMAT,
File=FileName)
else:
(Abstract, Description, Copyright, License) = ParseHeaderCommentSection(Content, FileName)
#
# Not process file name now, for later usage.
#
if self.FileName:
pass
#
# Insert Abstract, Description, CopyRight, License into header object
#
InfHeaderObject2.SetAbstract(Abstract)
InfHeaderObject2.SetDescription(Description)
InfHeaderObject2.SetCopyright(Copyright)
InfHeaderObject2.SetLicense(License)
## Section header parser
#
# The section header is always in following format:
#
# [section_name.arch<.platform|module_type>]
#
# @param String A string contained the content need to be parsed.
#
def SectionHeaderParser(self, SectionString, FileName, LineNo):
_Scope = []
_SectionName = ''
ArchList = set()
_ValueList = []
_PcdNameList = [DT.TAB_INF_FIXED_PCD.upper(),
DT.TAB_INF_FEATURE_PCD.upper(),
DT.TAB_INF_PATCH_PCD.upper(),
DT.TAB_INF_PCD.upper(),
DT.TAB_INF_PCD_EX.upper()
]
SectionString = SectionString.strip()
for Item in GetSplitValueList(SectionString[1:-1], DT.TAB_COMMA_SPLIT):
if Item == '':
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % (""),
File=FileName,
Line=LineNo,
ExtraData=SectionString)
ItemList = GetSplitValueList(Item, DT.TAB_SPLIT)
#
# different section should not mix in one section
# Allow different PCD type sections mixed together
#
if _SectionName.upper() not in _PcdNameList:
if _SectionName != '' and _SectionName.upper() != ItemList[0].upper():
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_SECTION_NAME_DUPLICATE,
File=FileName,
Line=LineNo,
ExtraData=SectionString)
elif _PcdNameList[1] in [_SectionName.upper(), ItemList[0].upper()] and \
(_SectionName.upper()!= ItemList[0].upper()):
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % (""),
File=FileName,
Line=LineNo,
ExtraData=SectionString)
_SectionName = ItemList[0]
if _SectionName.upper() in gINF_SECTION_DEF:
self._SectionType = gINF_SECTION_DEF[_SectionName.upper()]
else:
self._SectionType = DT.MODEL_UNKNOWN
Logger.Error("Parser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_UNKNOWN_SECTION,
File=FileName,
Line=LineNo,
ExtraData=SectionString)
#
# Get Arch
#
Str1, ArchList = GetArch(ItemList, ArchList, FileName, LineNo, SectionString)
#
# For [Defines] section, do special check.
#
if ItemList[0].upper() == DT.TAB_COMMON_DEFINES.upper():
if len(ItemList) != 1:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID % (SectionString),
File=FileName, Line=LineNo, ExtraData=SectionString)
#
# For [UserExtension] section, do special check.
#
if ItemList[0].upper() == DT.TAB_USER_EXTENSIONS.upper():
RetValue = ProcessUseExtHeader(ItemList)
if not RetValue[0]:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID % (SectionString),
File=FileName, Line=LineNo, ExtraData=SectionString)
else:
ItemList = RetValue[1]
if len(ItemList) == 3:
ItemList.append('COMMON')
Str1 = ItemList[1]
#
# For Library classes, need to check module type.
#
if ItemList[0].upper() == DT.TAB_LIBRARY_CLASSES.upper() and len(ItemList) == 3:
if ItemList[2] != '':
ModuleTypeList = GetSplitValueList(ItemList[2], DT.TAB_VALUE_SPLIT)
for Item in ModuleTypeList:
if Item.strip() not in DT.MODULE_LIST:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_MODULETYPE_INVALID % (Item),
File=FileName,
Line=LineNo,
ExtraData=SectionString)
#
# GetSpecialStr2
#
Str2 = GetSpecialStr2(ItemList, FileName, LineNo, SectionString)
_Scope.append([Str1, Str2])
_NewValueList = []
_AppendFlag = True
if _SectionName.upper() in _PcdNameList:
for ValueItem in _ValueList:
if _SectionName.upper() == ValueItem[0].upper() and Str1.upper() not in ValueItem[1].split():
ValueItem[1] = ValueItem[1] + " " + Str1
_AppendFlag = False
elif _SectionName.upper() == ValueItem[0].upper() and Str1.upper() in ValueItem[1].split():
_AppendFlag = False
_NewValueList.append(ValueItem)
_ValueList = _NewValueList
if _AppendFlag:
if not ItemList[0].upper() == DT.TAB_USER_EXTENSIONS.upper():
_ValueList.append([_SectionName, Str1, Str2, LineNo])
else:
if len(ItemList) == 4:
_ValueList.append([_SectionName, Str1, Str2, ItemList[3], LineNo])
self.SectionHeaderContent = deepcopy(_ValueList)
## GenSpecialSectionList
#
# @param SpecialSectionList: a list of list, of which item's format
# (Comment, LineNum)
# @param ContainerFile: Input value for filename of Inf file
#
def InfSpecialCommentParser (self, SpecialSectionList, InfSectionObject, ContainerFile, SectionType):
ReFindSpecialCommentRe = re.compile(r"""#(?:\s*)\[(.*?)\](?:.*)""", re.DOTALL)
ReFindHobArchRe = re.compile(r"""[Hh][Oo][Bb]\.([^,]*)""", re.DOTALL)
if self.FileName:
pass
SpecialObjectList = []
ArchList = []
if SectionType == DT.TYPE_EVENT_SECTION:
TokenDict = DT.EVENT_TOKENS
elif SectionType == DT.TYPE_HOB_SECTION:
TokenDict = DT.HOB_TOKENS
else:
TokenDict = DT.BOOTMODE_TOKENS
for List in SpecialSectionList:
#
# Hob has Arch attribute, need to be handled specially here
#
if SectionType == DT.TYPE_HOB_SECTION:
MatchObject = ReFindSpecialCommentRe.search(List[0][0])
HobSectionStr = MatchObject.group(1)
ArchList = []
for Match in ReFindHobArchRe.finditer(HobSectionStr):
Arch = Match.groups(1)[0].upper()
ArchList.append(Arch)
CommentSoFar = ''
for Index in range(1, len(List)):
Result = ParseComment(List[Index], DT.ALL_USAGE_TOKENS, TokenDict, [], False)
Usage = Result[0]
Type = Result[1]
HelpText = Result[3]
if Usage == DT.ITEM_UNDEFINED and Type == DT.ITEM_UNDEFINED:
if HelpText is None:
HelpText = ''
if not HelpText.endswith('\n'):
HelpText += '\n'
CommentSoFar += HelpText
else:
if HelpText:
CommentSoFar += HelpText
if SectionType == DT.TYPE_EVENT_SECTION:
SpecialObject = InfEventObject()
SpecialObject.SetEventType(Type)
SpecialObject.SetUsage(Usage)
SpecialObject.SetHelpString(CommentSoFar)
elif SectionType == DT.TYPE_HOB_SECTION:
SpecialObject = InfHobObject()
SpecialObject.SetHobType(Type)
SpecialObject.SetUsage(Usage)
SpecialObject.SetHelpString(CommentSoFar)
if len(ArchList) >= 1:
SpecialObject.SetSupArchList(ArchList)
else:
SpecialObject = InfBootModeObject()
SpecialObject.SetSupportedBootModes(Type)
SpecialObject.SetUsage(Usage)
SpecialObject.SetHelpString(CommentSoFar)
SpecialObjectList.append(SpecialObject)
CommentSoFar = ''
if not InfSectionObject.SetSpecialComments(SpecialObjectList,
SectionType):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % (SectionType),
ContainerFile
)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfSectionParser.py
|
## @file
# This file contained the parser for [Packages] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfPackageSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Parser.InfParserMisc import InfParserSectionRoot
class InfPackageSectionParser(InfParserSectionRoot):
## InfPackageParser
#
#
def InfPackageParser(self, SectionString, InfSectionObject, FileName):
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
PackageList = []
StillCommentFalg = False
HeaderComments = []
LineComment = None
#
# Parse section content
#
for Line in SectionString:
PkgLineContent = Line[0]
PkgLineNo = Line[1]
if PkgLineContent.strip() == '':
continue
#
# Find Header Comments
#
if PkgLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if PkgLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = PkgLineContent[PkgLineContent.find(DT.TAB_COMMENT_SPLIT):]
PkgLineContent = PkgLineContent[:PkgLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment is None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((PkgLineContent, PkgLineNo),
FileName,
DT.MODEL_META_DATA_PACKAGE,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
LineComment = None
HeaderComments = []
continue
TokenList = GetSplitValueList(PkgLineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, PkgLineContent, PkgLineNo),
self.FileLocalMacros, SectionMacros, True)
for Value in ValueList]
PackageList.append((ValueList, LineComment,
(PkgLineContent, PkgLineNo, FileName)))
ValueList = []
LineComment = None
TailComments = ''
HeaderComments = []
continue
#
# Current section archs
#
ArchList = []
for Item in self.LastSectionHeaderContent:
if Item[1] not in ArchList:
ArchList.append(Item[1])
if not InfSectionObject.SetPackages(PackageList, Arch = ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR\
%("[Packages]"),
File=FileName,
Line=Item[3])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfPackageSectionParser.py
|
## @file
# This file contained the parser for define sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
InfDefineSectionParser
'''
##
# Import Modules
#
import re
from Library import DataType as DT
from Library import GlobalData
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Library.ParserValidate import IsValidArch
from Object.Parser.InfCommonObject import InfLineCommentObject
from Object.Parser.InfDefineObject import InfDefMember
from Parser.InfParserMisc import InfExpandMacro
from Object.Parser.InfMisc import ErrorInInf
from Logger import StringTable as ST
from Parser.InfParserMisc import InfParserSectionRoot
## __GetValidateArchList
#
#
def GetValidateArchList(LineContent):
TempArch = ''
ArchList = []
ValidateAcrhPatten = re.compile(r"^\s*#\s*VALID_ARCHITECTURES\s*=\s*.*$", re.DOTALL)
if ValidateAcrhPatten.match(LineContent):
TempArch = GetSplitValueList(LineContent, DT.TAB_EQUAL_SPLIT, 1)[1]
TempArch = GetSplitValueList(TempArch, '(', 1)[0]
ArchList = re.split('\s+', TempArch)
NewArchList = []
for Arch in ArchList:
if IsValidArch(Arch):
NewArchList.append(Arch)
ArchList = NewArchList
return ArchList
class InfDefinSectionParser(InfParserSectionRoot):
def InfDefineParser(self, SectionString, InfSectionObject, FileName, SectionComment):
if SectionComment:
pass
#
# Parser Defines section content and fill self._ContentList dict.
#
StillCommentFalg = False
HeaderComments = []
SectionContent = ''
ArchList = []
_ContentList = []
_ValueList = []
#
# Add WORKSPACE to global Marco dict.
#
self.FileLocalMacros['WORKSPACE'] = GlobalData.gWORKSPACE
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
TailComments = ''
LineComment = None
LineInfo = ['', -1, '']
LineInfo[0] = FileName
LineInfo[1] = LineNo
LineInfo[2] = LineContent
if LineContent.strip() == '':
continue
#
# The first time encountered VALIDATE_ARCHITECHERS will be considered as support arch list.
#
if not ArchList:
ArchList = GetValidateArchList(LineContent)
#
# Parser Comment
#
if LineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
SectionContent += LineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
SectionContent += LineContent + DT.END_OF_LINE
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = LineContent[LineContent.find(DT.TAB_COMMENT_SPLIT):]
LineContent = LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment is None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((LineContent, LineNo),
FileName,
DT.MODEL_META_DATA_HEADER,
self.FileLocalMacros)
if Name is not None:
self.FileLocalMacros[Name] = Value
continue
#
# Replace with [Defines] section Macro
#
LineContent = InfExpandMacro(LineContent,
(FileName, LineContent, LineNo),
self.FileLocalMacros,
None, True)
SectionContent += LineContent + DT.END_OF_LINE
TokenList = GetSplitValueList(LineContent, DT.TAB_EQUAL_SPLIT, 1)
if len(TokenList) < 2:
ErrorInInf(ST.ERR_INF_PARSER_DEFINE_ITEM_NO_VALUE,
LineInfo=LineInfo)
_ValueList[0:len(TokenList)] = TokenList
if not _ValueList[0]:
ErrorInInf(ST.ERR_INF_PARSER_DEFINE_ITEM_NO_NAME,
LineInfo=LineInfo)
if not _ValueList[1]:
ErrorInInf(ST.ERR_INF_PARSER_DEFINE_ITEM_NO_VALUE,
LineInfo=LineInfo)
Name, Value = _ValueList[0], _ValueList[1]
InfDefMemberObj = InfDefMember(Name, Value)
if (LineComment is not None):
InfDefMemberObj.Comments.SetHeaderComments(LineComment.GetHeaderComments())
InfDefMemberObj.Comments.SetTailComments(LineComment.GetTailComments())
InfDefMemberObj.CurrentLine.SetFileName(self.FullPath)
InfDefMemberObj.CurrentLine.SetLineString(LineContent)
InfDefMemberObj.CurrentLine.SetLineNo(LineNo)
_ContentList.append(InfDefMemberObj)
HeaderComments = []
TailComments = ''
#
# Current Define section archs
#
if not ArchList:
ArchList = ['COMMON']
InfSectionObject.SetAllContent(SectionContent)
InfSectionObject.SetDefines(_ContentList, Arch=ArchList)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Parser/InfDefineSectionParser.py
|
## @file
# This file is used to parse a xml file of .PKG file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
XmlParserMisc
'''
from Object.POM.CommonObject import TextObject
from Logger.StringTable import ERR_XML_PARSER_REQUIRED_ITEM_MISSING
from Logger.ToolError import PARSER_ERROR
import Logger.Log as Logger
## ConvertVariableName()
# Convert VariableName to be L"string",
# input of UCS-2 format Hex Array or L"string" (C style.) could be converted successfully,
# others will not.
#
# @param VariableName: string need to be converted
# @retval: the L quoted string converted if success, else None will be returned
#
def ConvertVariableName(VariableName):
VariableName = VariableName.strip()
#
# check for L quoted string
#
if VariableName.startswith('L"') and VariableName.endswith('"'):
return VariableName
#
# check for Hex Array, it should be little endian even number of hex numbers
#
ValueList = VariableName.split(' ')
if len(ValueList)%2 == 1:
return None
TransferedStr = ''
Index = 0
while Index < len(ValueList):
FirstByte = int(ValueList[Index], 16)
SecondByte = int(ValueList[Index + 1], 16)
if SecondByte != 0:
return None
if FirstByte not in range(0x20, 0x7F):
return None
TransferedStr += ('%c')%FirstByte
Index = Index + 2
return 'L"' + TransferedStr + '"'
## IsRequiredItemListNull
#
# Check if a required XML section item/attribue is NULL
#
# @param ItemList: The list of items to be checked
# @param XmlTreeLevel: The error message tree level
#
def IsRequiredItemListNull(ItemDict, XmlTreeLevel):
for Key in ItemDict:
if not ItemDict[Key]:
Msg = "->".join(Node for Node in XmlTreeLevel)
ErrorMsg = ERR_XML_PARSER_REQUIRED_ITEM_MISSING % (Key, Msg)
Logger.Error('\nUPT', PARSER_ERROR, ErrorMsg, RaiseError=True)
## Get help text
#
# @param HelpText
#
def GetHelpTextList(HelpText):
HelpTextList = []
for HelT in HelpText:
HelpTextObj = TextObject()
HelpTextObj.SetLang(HelT.Lang)
HelpTextObj.SetString(HelT.HelpText)
HelpTextList.append(HelpTextObj)
return HelpTextList
## Get Prompt text
#
# @param Prompt
#
def GetPromptList(Prompt):
PromptList = []
for SubPrompt in Prompt:
PromptObj = TextObject()
PromptObj.SetLang(SubPrompt.Lang)
PromptObj.SetString(SubPrompt.Prompt)
PromptList.append(PromptObj)
return PromptList
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/Python/UPT/Xml/XmlParserMisc.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.