File size: 55,737 Bytes
4021124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
#     http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module is the entry to run spark processing script.

This module contains code related to Spark Processors, which are used
for Processing jobs. These jobs let customers perform data pre-processing,
post-processing, feature engineering, data validation, and model evaluation
on SageMaker using Spark and PySpark.
"""
from __future__ import absolute_import

import json
import logging
import os.path
import shutil
import subprocess
import tempfile
import time
import urllib.request
from enum import Enum
from io import BytesIO
from urllib.parse import urlparse

from typing import Union, List, Dict, Optional

from sagemaker import image_uris
from sagemaker.local.image import _ecr_login_if_needed, _pull_image
from sagemaker.processing import ProcessingInput, ProcessingOutput, ScriptProcessor
from sagemaker.s3 import S3Uploader
from sagemaker.session import Session
from sagemaker.network import NetworkConfig
from sagemaker.spark import defaults

from sagemaker.workflow import is_pipeline_variable
from sagemaker.workflow.entities import PipelineVariable
from sagemaker.workflow.functions import Join

logger = logging.getLogger(__name__)


class _SparkProcessorBase(ScriptProcessor):
    """Handles Amazon SageMaker processing tasks for jobs using Spark.

    Base class for either PySpark or SparkJars.
    """

    _default_command = "smspark-submit"
    _conf_container_base_path = "/opt/ml/processing/input/"
    _conf_container_input_name = "conf"
    _conf_file_name = "configuration.json"
    _valid_configuration_keys = ["Classification", "Properties", "Configurations"]
    _valid_configuration_classifications = [
        "core-site",
        "hadoop-env",
        "hadoop-log4j",
        "hive-env",
        "hive-log4j",
        "hive-exec-log4j",
        "hive-site",
        "spark-defaults",
        "spark-env",
        "spark-log4j",
        "spark-hive-site",
        "spark-metrics",
        "yarn-env",
        "yarn-site",
        "export",
    ]

    _submit_jars_input_channel_name = "jars"
    _submit_files_input_channel_name = "files"
    _submit_py_files_input_channel_name = "py-files"
    _submit_deps_error_message = (
        "Please specify a list of one or more S3 URIs, "
        "local file paths, and/or local directory paths"
    )

    # history server vars
    _history_server_port = "15050"
    _history_server_url_suffix = f"/proxy/{_history_server_port}"
    _spark_event_log_default_local_path = "/opt/ml/processing/spark-events/"

    def __init__(
        self,
        role,
        instance_type,
        instance_count,
        framework_version=None,
        py_version=None,
        container_version=None,
        image_uri=None,
        volume_size_in_gb=30,
        volume_kms_key=None,
        output_kms_key=None,
        max_runtime_in_seconds=None,
        base_job_name=None,
        sagemaker_session=None,
        env=None,
        tags=None,
        network_config=None,
    ):
        """Initialize a ``_SparkProcessorBase`` instance.

        The _SparkProcessorBase handles Amazon SageMaker processing tasks for
        jobs using SageMaker Spark.

        Args:
            framework_version (str): The version of SageMaker PySpark.
            py_version (str): The version of python.
            container_version (str): The version of spark container.
            role (str): An AWS IAM role name or ARN. The Amazon SageMaker training jobs
                and APIs that create Amazon SageMaker endpoints use this role
                to access training data and model artifacts. After the endpoint
                is created, the inference code might use the IAM role, if it
                needs to access an AWS resource.
            instance_type (str): Type of EC2 instance to use for
                processing, for example, 'ml.c4.xlarge'.
            instance_count (int): The number of instances to run
                the Processing job with. Defaults to 1.
            volume_size_in_gb (int): Size in GB of the EBS volume to
                use for storing data during processing (default: 30).
            volume_kms_key (str): A KMS key for the processing
                volume.
            output_kms_key (str): The KMS key id for all ProcessingOutputs.
            max_runtime_in_seconds (int): Timeout in seconds.
                After this amount of time Amazon SageMaker terminates the job
                regardless of its current status.
            base_job_name (str): Prefix for processing name. If not specified,
                the processor generates a default job name, based on the
                training image name and current timestamp.
            sagemaker_session (sagemaker.session.Session): Session object which
                manages interactions with Amazon
                SageMaker APIs and any other AWS services needed. If not specified,
                the processor creates one using the default AWS configuration chain.
            env (dict): Environment variables to be passed to the processing job.
            tags ([dict]): List of tags to be passed to the processing job.
            network_config (sagemaker.network.NetworkConfig): A NetworkConfig
                object that configures network isolation, encryption of
                inter-container traffic, security group IDs, and subnets.
        """
        self.history_server = None
        self._spark_event_logs_s3_uri = None

        session = sagemaker_session or Session()
        region = session.boto_region_name

        self.image_uri = self._retrieve_image_uri(
            image_uri, framework_version, py_version, container_version, region, instance_type
        )

        env = env or {}
        command = [_SparkProcessorBase._default_command]

        super(_SparkProcessorBase, self).__init__(
            role=role,
            image_uri=self.image_uri,
            instance_count=instance_count,
            instance_type=instance_type,
            command=command,
            volume_size_in_gb=volume_size_in_gb,
            volume_kms_key=volume_kms_key,
            output_kms_key=output_kms_key,
            max_runtime_in_seconds=max_runtime_in_seconds,
            base_job_name=base_job_name,
            sagemaker_session=session,
            env=env,
            tags=tags,
            network_config=network_config,
        )

    def get_run_args(
        self,
        code,
        inputs=None,
        outputs=None,
        arguments=None,
    ):
        """Returns a RunArgs object.

        For processors (:class:`~sagemaker.spark.processing.PySparkProcessor`,
            :class:`~sagemaker.spark.processing.SparkJar`) that have special
            run() arguments, this object contains the normalized arguments for passing to
            :class:`~sagemaker.workflow.steps.ProcessingStep`.

        Args:
            code (str): This can be an S3 URI or a local path to a file with the framework
                script to run.
            inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for
                the processing job. These must be provided as
                :class:`~sagemaker.processing.ProcessingInput` objects (default: None).
            outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for
                the processing job. These can be specified as either path strings or
                :class:`~sagemaker.processing.ProcessingOutput` objects (default: None).
            arguments (list[str]): A list of string arguments to be passed to a
                processing job (default: None).
        """
        return super().get_run_args(
            code=code,
            inputs=inputs,
            outputs=outputs,
            arguments=arguments,
        )

    def run(
        self,
        submit_app,
        inputs=None,
        outputs=None,
        arguments=None,
        wait=True,
        logs=True,
        job_name=None,
        experiment_config=None,
        kms_key=None,
    ):
        """Runs a processing job.

        Args:
            submit_app (str): .py or .jar file to submit to Spark as the primary application
            inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for
                the processing job. These must be provided as
                :class:`~sagemaker.processing.ProcessingInput` objects (default: None).
            outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for
                the processing job. These can be specified as either path strings or
                :class:`~sagemaker.processing.ProcessingOutput` objects (default: None).
            arguments (list[str]): A list of string arguments to be passed to a
                processing job (default: None).
            wait (bool): Whether the call should wait until the job completes (default: True).
            logs (bool): Whether to show the logs produced by the job.
                Only meaningful when wait is True (default: True).
            job_name (str): Processing job name. If not specified, the processor generates
                a default job name, based on the base job name and current timestamp.
            experiment_config (dict[str, str]): Experiment management configuration.
                Optionally, the dict can contain three keys:
                'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
                The behavior of setting these keys is as follows:
                * If `ExperimentName` is supplied but `TrialName` is not a Trial will be
                automatically created and the job's Trial Component associated with the Trial.
                * If `TrialName` is supplied and the Trial already exists the job's Trial Component
                will be associated with the Trial.
                * If both `ExperimentName` and `TrialName` are not supplied the trial component
                will be unassociated.
                * `TrialComponentDisplayName` is used for display in Studio.
            kms_key (str): The ARN of the KMS key that is used to encrypt the
                user code file (default: None).
        """
        self._current_job_name = self._generate_current_job_name(job_name=job_name)

        if is_pipeline_variable(submit_app):
            raise ValueError(
                "submit_app argument has to be a valid S3 URI or local file path "
                + "rather than a pipeline variable"
            )

        return super().run(
            submit_app,
            inputs,
            outputs,
            arguments,
            wait,
            logs,
            job_name,
            experiment_config,
            kms_key,
        )

    def _extend_processing_args(self, inputs, outputs, **kwargs):
        """Extends processing job args such as inputs."""

        if kwargs.get("spark_event_logs_s3_uri"):
            spark_event_logs_s3_uri = kwargs.get("spark_event_logs_s3_uri")
            self._validate_s3_uri(spark_event_logs_s3_uri)

            self._spark_event_logs_s3_uri = spark_event_logs_s3_uri
            self.command.extend(
                [
                    "--local-spark-event-logs-dir",
                    _SparkProcessorBase._spark_event_log_default_local_path,
                ]
            )

            output = ProcessingOutput(
                source=_SparkProcessorBase._spark_event_log_default_local_path,
                destination=spark_event_logs_s3_uri,
                s3_upload_mode="Continuous",
            )

            outputs = outputs or []
            outputs.append(output)

        if kwargs.get("configuration"):
            configuration = kwargs.get("configuration")
            self._validate_configuration(configuration)
            inputs = inputs or []
            inputs.append(self._stage_configuration(configuration))

        return inputs, outputs

    def start_history_server(self, spark_event_logs_s3_uri=None):
        """Starts a Spark history server.

        Args:
            spark_event_logs_s3_uri (str): S3 URI where Spark events are stored.
        """
        if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image_uri):
            logger.info("Pulling spark history server image...")
            _pull_image(self.image_uri)
        history_server_env_variables = self._prepare_history_server_env_variables(
            spark_event_logs_s3_uri
        )
        self.history_server = _HistoryServer(
            history_server_env_variables, self.image_uri, self._get_network_config()
        )
        self.history_server.run()
        self._check_history_server()

    def terminate_history_server(self):
        """Terminates the Spark history server."""
        if self.history_server:
            logger.info("History server is running, terminating history server")
            self.history_server.down()
            self.history_server = None

    def _retrieve_image_uri(
        self, image_uri, framework_version, py_version, container_version, region, instance_type
    ):
        """Builds an image URI."""
        if not image_uri:
            if (py_version is None) != (container_version is None):
                raise ValueError(
                    "Both or neither of py_version and container_version should be set"
                )

            if container_version:
                container_version = f"v{container_version}"

            return image_uris.retrieve(
                defaults.SPARK_NAME,
                region,
                version=framework_version,
                instance_type=instance_type,
                py_version=py_version,
                container_version=container_version,
            )

        return image_uri

    def _validate_configuration(self, configuration):
        """Validates the user-provided Hadoop/Spark/Hive configuration.

        This ensures that the list or dictionary the user provides will serialize to
        JSON matching the schema of EMR's application configuration:

        https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
        """
        emr_configure_apps_url = (
            "https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html"
        )
        if isinstance(configuration, dict):
            keys = configuration.keys()
            if "Classification" not in keys or "Properties" not in keys:
                raise ValueError(
                    f"Missing one or more required keys in configuration dictionary "
                    f"{configuration} Please see {emr_configure_apps_url} for more information"
                )

            for key in keys:
                if key not in self._valid_configuration_keys:
                    raise ValueError(
                        f"Invalid key: {key}. Must be one of {self._valid_configuration_keys}. "
                        f"Please see {emr_configure_apps_url} for more information."
                    )
                if key == "Classification":
                    if configuration[key] not in self._valid_configuration_classifications:
                        raise ValueError(
                            f"Invalid classification: {key}. Must be one of "
                            f"{self._valid_configuration_classifications}"
                        )

        if isinstance(configuration, list):
            for item in configuration:
                self._validate_configuration(item)

    def _stage_configuration(self, configuration):
        """Serializes and uploads the user-provided EMR application configuration to S3.

        This method prepares an input channel.

        Args:
            configuration (Dict): the configuration dict for the EMR application configuration.
        """

        serialized_configuration = BytesIO(json.dumps(configuration).encode("utf-8"))
        s3_uri = (
            f"s3://{self.sagemaker_session.default_bucket()}/{self._current_job_name}/"
            f"input/{self._conf_container_input_name}/{self._conf_file_name}"
        )

        S3Uploader.upload_string_as_file_body(
            body=serialized_configuration,
            desired_s3_uri=s3_uri,
            sagemaker_session=self.sagemaker_session,
        )

        conf_input = ProcessingInput(
            source=s3_uri,
            destination=f"{self._conf_container_base_path}{self._conf_container_input_name}",
            input_name=_SparkProcessorBase._conf_container_input_name,
        )
        return conf_input

    def _stage_submit_deps(self, submit_deps, input_channel_name):
        """Prepares a list of paths to jars, py-files, or files dependencies.

        This prepared list of paths is provided as `spark-submit` options.
        The submit_deps list may include a combination of S3 URIs and local paths.
        Any S3 URIs are appended to the `spark-submit` option value without modification.
        Any local file paths are copied to a temp directory, uploaded to a default S3 URI,
        and included as a ProcessingInput channel to provide as local files to the SageMaker
        Spark container.

        :param submit_deps (list[str]): List of one or more dependency paths to include.
        :param input_channel_name (str): The `spark-submit` option name associated with
                    the input channel.
        :return (Optional[ProcessingInput], str): Tuple of (left) optional ProcessingInput
                    for the input channel, and (right) comma-delimited value for
                    `spark-submit` option.
        """
        if not submit_deps:
            raise ValueError(
                f"submit_deps value may not be empty. {self._submit_deps_error_message}"
            )
        if not input_channel_name:
            raise ValueError("input_channel_name value may not be empty.")

        input_channel_s3_uri = (
            f"s3://{self.sagemaker_session.default_bucket()}"
            f"/{self._current_job_name}/input/{input_channel_name}"
        )

        use_input_channel = False
        spark_opt_s3_uris = []
        spark_opt_s3_uris_has_pipeline_var = False

        with tempfile.TemporaryDirectory() as tmpdir:
            for dep_path in submit_deps:
                if is_pipeline_variable(dep_path):
                    spark_opt_s3_uris.append(dep_path)
                    spark_opt_s3_uris_has_pipeline_var = True
                    continue
                dep_url = urlparse(dep_path)
                # S3 URIs are included as-is in the spark-submit argument
                if dep_url.scheme in ["s3", "s3a"]:
                    spark_opt_s3_uris.append(dep_path)
                # Local files are copied to temp directory to be uploaded to S3
                elif not dep_url.scheme or dep_url.scheme == "file":
                    if not os.path.isfile(dep_path):
                        raise ValueError(
                            f"submit_deps path {dep_path} is not a valid local file. "
                            f"{self._submit_deps_error_message}"
                        )
                    logger.info(
                        "Copying dependency from local path %s to tmpdir %s", dep_path, tmpdir
                    )
                    shutil.copy(dep_path, tmpdir)
                else:
                    raise ValueError(
                        f"submit_deps path {dep_path} references unsupported filesystem "
                        f"scheme: {dep_url.scheme} {self._submit_deps_error_message}"
                    )

            # If any local files were found and copied, upload the temp directory to S3
            if os.listdir(tmpdir):
                logger.info(
                    "Uploading dependencies from tmpdir %s to S3 %s", tmpdir, input_channel_s3_uri
                )
                S3Uploader.upload(
                    local_path=tmpdir,
                    desired_s3_uri=input_channel_s3_uri,
                    sagemaker_session=self.sagemaker_session,
                )
                use_input_channel = True

        # If any local files were uploaded, construct a ProcessingInput to provide
        # them to the Spark container  and form the spark-submit option from a
        # combination of S3 URIs and container's local input path
        if use_input_channel:
            input_channel = ProcessingInput(
                source=input_channel_s3_uri,
                destination=f"{self._conf_container_base_path}{input_channel_name}",
                input_name=input_channel_name,
            )
            spark_opt = (
                Join(on=",", values=spark_opt_s3_uris + [input_channel.destination])
                if spark_opt_s3_uris_has_pipeline_var
                else ",".join(spark_opt_s3_uris + [input_channel.destination])
            )
        # If no local files were uploaded, form the spark-submit option from a list of S3 URIs
        else:
            input_channel = None
            spark_opt = (
                Join(on=",", values=spark_opt_s3_uris)
                if spark_opt_s3_uris_has_pipeline_var
                else ",".join(spark_opt_s3_uris)
            )

        return input_channel, spark_opt

    def _get_network_config(self):
        """Runs container with different network config based on different env."""
        if self._is_notebook_instance():
            return "--network host"

        return f"-p 80:80 -p {self._history_server_port}:{self._history_server_port}"

    def _prepare_history_server_env_variables(self, spark_event_logs_s3_uri):
        """Gets all parameters required to run history server."""
        # prepare env varibles
        history_server_env_variables = {}

        if spark_event_logs_s3_uri:
            history_server_env_variables[
                _HistoryServer.arg_event_logs_s3_uri
            ] = spark_event_logs_s3_uri
        # this variable will be previously set by run() method
        elif self._spark_event_logs_s3_uri is not None:
            history_server_env_variables[
                _HistoryServer.arg_event_logs_s3_uri
            ] = self._spark_event_logs_s3_uri
        else:
            raise ValueError(
                "SPARK_EVENT_LOGS_S3_URI not present. You can specify spark_event_logs_s3_uri "
                "either in run() or start_history_server()"
            )

        history_server_env_variables.update(self._config_aws_credentials())
        region = self.sagemaker_session.boto_region_name
        history_server_env_variables["AWS_REGION"] = region

        if self._is_notebook_instance():
            history_server_env_variables[
                _HistoryServer.arg_remote_domain_name
            ] = self._get_notebook_instance_domain()

        return history_server_env_variables

    def _is_notebook_instance(self):
        """Determine whether it is a notebook instance."""
        return os.path.isfile("/opt/ml/metadata/resource-metadata.json")

    def _get_notebook_instance_domain(self):
        """Get the instance's domain."""
        region = self.sagemaker_session.boto_region_name
        with open("/opt/ml/metadata/resource-metadata.json") as file:
            data = json.load(file)
            notebook_name = data["ResourceName"]

        return f"https://{notebook_name}.notebook.{region}.sagemaker.aws"

    def _check_history_server(self, ping_timeout=40):
        """Print message indicating the status of history server.

        Pings port 15050 to check whether the history server is up.
        Times out after `ping_timeout`.

        Args:
            ping_timeout (int): Timeout in seconds (defaults to 40).
        """
        # ping port 15050 to check history server is up
        timeout = time.time() + ping_timeout

        while True:
            if self._is_history_server_started():
                if self._is_notebook_instance():
                    logger.info(
                        "History server is up on %s%s",
                        self._get_notebook_instance_domain(),
                        self._history_server_url_suffix,
                    )
                else:
                    logger.info(
                        "History server is up on http://0.0.0.0%s", self._history_server_url_suffix
                    )
                break
            if time.time() > timeout:
                logger.error(
                    "History server failed to start. Please run 'docker logs history_server' "
                    "to see logs"
                )
                break

            time.sleep(1)

    def _is_history_server_started(self):
        """Check if history server is started."""
        try:
            response = urllib.request.urlopen(f"http://localhost:{self._history_server_port}")
            return response.status == 200
        except Exception:  # pylint: disable=W0703
            return False

    # TODO (guoqioa@): method only checks urlparse scheme, need to perform deep s3 validation
    def _validate_s3_uri(self, spark_output_s3_path):
        """Validate whether the URI uses an S3 scheme.

        In the future, this validation will perform deeper S3 validation.

        Args:
            spark_output_s3_path (str): The URI of the Spark output S3 Path.
        """
        if is_pipeline_variable(spark_output_s3_path):
            return

        if urlparse(spark_output_s3_path).scheme != "s3":
            raise ValueError(
                f"Invalid s3 path: {spark_output_s3_path}. Please enter something like "
                "s3://bucket-name/folder-name"
            )

    def _config_aws_credentials(self):
        """Configure AWS credentials."""
        try:
            creds = self.sagemaker_session.boto_session.get_credentials()
            access_key = creds.access_key
            secret_key = creds.secret_key
            token = creds.token

            return {
                "AWS_ACCESS_KEY_ID": str(access_key),
                "AWS_SECRET_ACCESS_KEY": str(secret_key),
                "AWS_SESSION_TOKEN": str(token),
            }
        except Exception as e:  # pylint: disable=W0703
            logger.info("Could not get AWS credentials: %s", e)
            return {}

    def _handle_script_dependencies(self, inputs, submit_files, file_type):
        """Handle script dependencies

        The method extends inputs and command based on input files and file_type
        """

        if not submit_files:
            return inputs

        input_channel_name_dict = {
            FileType.PYTHON: self._submit_py_files_input_channel_name,
            FileType.JAR: self._submit_jars_input_channel_name,
            FileType.FILE: self._submit_files_input_channel_name,
        }

        files_input, files_opt = self._stage_submit_deps(
            submit_files, input_channel_name_dict[file_type]
        )

        inputs = inputs or []

        if files_input:
            inputs.append(files_input)

        if files_opt:
            self.command.extend([f"--{input_channel_name_dict[file_type]}", files_opt])

        return inputs


class PySparkProcessor(_SparkProcessorBase):
    """Handles Amazon SageMaker processing tasks for jobs using PySpark."""

    def __init__(
        self,
        role: str,
        instance_type: Union[int, PipelineVariable],
        instance_count: Union[str, PipelineVariable],
        framework_version: Optional[str] = None,
        py_version: Optional[str] = None,
        container_version: Optional[str] = None,
        image_uri: Optional[Union[str, PipelineVariable]] = None,
        volume_size_in_gb: Union[int, PipelineVariable] = 30,
        volume_kms_key: Optional[Union[str, PipelineVariable]] = None,
        output_kms_key: Optional[Union[str, PipelineVariable]] = None,
        max_runtime_in_seconds: Optional[Union[int, PipelineVariable]] = None,
        base_job_name: Optional[str] = None,
        sagemaker_session: Optional[Session] = None,
        env: Optional[Dict[str, Union[str, PipelineVariable]]] = None,
        tags: Optional[List[Dict[str, Union[str, PipelineVariable]]]] = None,
        network_config: Optional[NetworkConfig] = None,
    ):
        """Initialize an ``PySparkProcessor`` instance.

        The PySparkProcessor handles Amazon SageMaker processing tasks for jobs
        using SageMaker PySpark.

        Args:
            framework_version (str): The version of SageMaker PySpark.
            py_version (str): The version of python.
            container_version (str): The version of spark container.
            role (str): An AWS IAM role name or ARN. The Amazon SageMaker training jobs
                and APIs that create Amazon SageMaker endpoints use this role
                to access training data and model artifacts. After the endpoint
                is created, the inference code might use the IAM role, if it
                needs to access an AWS resource.
            instance_type (str or PipelineVariable): Type of EC2 instance to use for
                processing, for example, 'ml.c4.xlarge'.
            instance_count (int or PipelineVariable): The number of instances to run
                the Processing job with. Defaults to 1.
            volume_size_in_gb (int or PipelineVariable): Size in GB of the EBS volume to
                use for storing data during processing (default: 30).
            volume_kms_key (str or PipelineVariable): A KMS key for the processing
                volume.
            output_kms_key (str or PipelineVariable): The KMS key id for all ProcessingOutputs.
            max_runtime_in_seconds (int or PipelineVariable): Timeout in seconds.
                After this amount of time Amazon SageMaker terminates the job
                regardless of its current status.
            base_job_name (str): Prefix for processing name. If not specified,
                the processor generates a default job name, based on the
                training image name and current timestamp.
            sagemaker_session (sagemaker.session.Session): Session object which
                manages interactions with Amazon SageMaker APIs and any other
                AWS services needed. If not specified, the processor creates one
                using the default AWS configuration chain.
            env (dict[str, str] or dict[str, PipelineVariable]): Environment variables to
                be passed to the processing job.
            tags (list[dict[str, str] or list[dict[str, PipelineVariable]]): List of tags to
                be passed to the processing job.
            network_config (sagemaker.network.NetworkConfig): A NetworkConfig
                object that configures network isolation, encryption of
                inter-container traffic, security group IDs, and subnets.
        """

        super(PySparkProcessor, self).__init__(
            role=role,
            instance_count=instance_count,
            instance_type=instance_type,
            framework_version=framework_version,
            py_version=py_version,
            container_version=container_version,
            image_uri=image_uri,
            volume_size_in_gb=volume_size_in_gb,
            volume_kms_key=volume_kms_key,
            output_kms_key=output_kms_key,
            max_runtime_in_seconds=max_runtime_in_seconds,
            base_job_name=base_job_name,
            sagemaker_session=sagemaker_session,
            env=env,
            tags=tags,
            network_config=network_config,
        )

    def get_run_args(
        self,
        submit_app,
        submit_py_files=None,
        submit_jars=None,
        submit_files=None,
        inputs=None,
        outputs=None,
        arguments=None,
        job_name=None,
        configuration=None,
        spark_event_logs_s3_uri=None,
    ):
        """Returns a RunArgs object.

        This object contains the normalized inputs, outputs and arguments
        needed when using a ``PySparkProcessor`` in a
        :class:`~sagemaker.workflow.steps.ProcessingStep`.

        Args:
            submit_app (str): Path (local or S3) to Python file to submit to Spark
                as the primary application. This is translated to the `code`
                property on the returned `RunArgs` object.
            submit_py_files (list[str]): List of paths (local or S3) to provide for
                `spark-submit --py-files` option
            submit_jars (list[str]): List of paths (local or S3) to provide for
                `spark-submit --jars` option
            submit_files (list[str]): List of paths (local or S3) to provide for
                `spark-submit --files` option
            inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for
                the processing job. These must be provided as
                :class:`~sagemaker.processing.ProcessingInput` objects (default: None).
            outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for
                the processing job. These can be specified as either path strings or
                :class:`~sagemaker.processing.ProcessingOutput` objects (default: None).
            arguments (list[str]): A list of string arguments to be passed to a
                processing job (default: None).
            job_name (str): Processing job name. If not specified, the processor generates
                a default job name, based on the base job name and current timestamp.
            configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive.
                List or dictionary of EMR-style classifications.
                https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
            spark_event_logs_s3_uri (str): S3 path where spark application events will
                be published to.
        """
        self._current_job_name = self._generate_current_job_name(job_name=job_name)

        if not submit_app:
            raise ValueError("submit_app is required")

        extended_inputs, extended_outputs = self._extend_processing_args(
            inputs=inputs,
            outputs=outputs,
            submit_py_files=submit_py_files,
            submit_jars=submit_jars,
            submit_files=submit_files,
            configuration=configuration,
            spark_event_logs_s3_uri=spark_event_logs_s3_uri,
        )

        return super().get_run_args(
            code=submit_app,
            inputs=extended_inputs,
            outputs=extended_outputs,
            arguments=arguments,
        )

    def run(
        self,
        submit_app: str,
        submit_py_files: Optional[List[Union[str, PipelineVariable]]] = None,
        submit_jars: Optional[List[Union[str, PipelineVariable]]] = None,
        submit_files: Optional[List[Union[str, PipelineVariable]]] = None,
        inputs: Optional[List[ProcessingInput]] = None,
        outputs: Optional[List[ProcessingOutput]] = None,
        arguments: Optional[List[Union[str, PipelineVariable]]] = None,
        wait: bool = True,
        logs: bool = True,
        job_name: Optional[str] = None,
        experiment_config: Optional[Dict[str, str]] = None,
        configuration: Optional[Union[List[Dict], Dict]] = None,
        spark_event_logs_s3_uri: Optional[Union[str, PipelineVariable]] = None,
        kms_key: Optional[str] = None,
    ):
        """Runs a processing job.

        Args:
            submit_app (str): Path (local or S3) to Python file to submit to Spark
                as the primary application
            submit_py_files (list[str] or list[PipelineVariable]): List of paths (local or S3)
                to provide for `spark-submit --py-files` option
            submit_jars (list[str] or list[PipelineVariable]): List of paths (local or S3)
                to provide for `spark-submit --jars` option
            submit_files (list[str] or list[PipelineVariable]): List of paths (local or S3)
                to provide for `spark-submit --files` option
            inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for
                the processing job. These must be provided as
                :class:`~sagemaker.processing.ProcessingInput` objects (default: None).
            outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for
                the processing job. These can be specified as either path strings or
                :class:`~sagemaker.processing.ProcessingOutput` objects (default: None).
            arguments (list[str] or list[PipelineVariable]): A list of string arguments to
                be passed to a processing job (default: None).
            wait (bool): Whether the call should wait until the job completes (default: True).
            logs (bool): Whether to show the logs produced by the job.
                Only meaningful when wait is True (default: True).
            job_name (str): Processing job name. If not specified, the processor generates
                a default job name, based on the base job name and current timestamp.
            experiment_config (dict[str, str]): Experiment management configuration.
                Optionally, the dict can contain three keys:
                'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
                The behavior of setting these keys is as follows:
                * If `ExperimentName` is supplied but `TrialName` is not a Trial will be
                automatically created and the job's Trial Component associated with the Trial.
                * If `TrialName` is supplied and the Trial already exists the job's Trial Component
                will be associated with the Trial.
                * If both `ExperimentName` and `TrialName` are not supplied the trial component
                will be unassociated.
                * `TrialComponentDisplayName` is used for display in Studio.
            configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive.
                List or dictionary of EMR-style classifications.
                https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
            spark_event_logs_s3_uri (str or PipelineVariable): S3 path where spark application
                events will be published to.
            kms_key (str): The ARN of the KMS key that is used to encrypt the
                user code file (default: None).
        """
        self._current_job_name = self._generate_current_job_name(job_name=job_name)

        if not submit_app:
            raise ValueError("submit_app is required")

        extended_inputs, extended_outputs = self._extend_processing_args(
            inputs=inputs,
            outputs=outputs,
            submit_py_files=submit_py_files,
            submit_jars=submit_jars,
            submit_files=submit_files,
            configuration=configuration,
            spark_event_logs_s3_uri=spark_event_logs_s3_uri,
        )

        return super().run(
            submit_app=submit_app,
            inputs=extended_inputs,
            outputs=extended_outputs,
            arguments=arguments,
            wait=wait,
            logs=logs,
            job_name=self._current_job_name,
            experiment_config=experiment_config,
            kms_key=kms_key,
        )

    def _extend_processing_args(self, inputs, outputs, **kwargs):
        """Extends inputs and outputs.

        Args:
            inputs: Processing inputs.
            outputs: Processing outputs.
            kwargs: Additional keyword arguments passed to `super()`.
        """
        self.command = [_SparkProcessorBase._default_command]
        extended_inputs = self._handle_script_dependencies(
            inputs, kwargs.get("submit_py_files"), FileType.PYTHON
        )
        extended_inputs = self._handle_script_dependencies(
            extended_inputs, kwargs.get("submit_jars"), FileType.JAR
        )
        extended_inputs = self._handle_script_dependencies(
            extended_inputs, kwargs.get("submit_files"), FileType.FILE
        )

        return super()._extend_processing_args(extended_inputs, outputs, **kwargs)


class SparkJarProcessor(_SparkProcessorBase):
    """Handles Amazon SageMaker processing tasks for jobs using Spark with Java or Scala Jars."""

    def __init__(
        self,
        role: str,
        instance_type: Union[int, PipelineVariable],
        instance_count: Union[str, PipelineVariable],
        framework_version: Optional[str] = None,
        py_version: Optional[str] = None,
        container_version: Optional[str] = None,
        image_uri: Optional[Union[str, PipelineVariable]] = None,
        volume_size_in_gb: Union[int, PipelineVariable] = 30,
        volume_kms_key: Optional[Union[str, PipelineVariable]] = None,
        output_kms_key: Optional[Union[str, PipelineVariable]] = None,
        max_runtime_in_seconds: Optional[Union[int, PipelineVariable]] = None,
        base_job_name: Optional[str] = None,
        sagemaker_session: Optional[Session] = None,
        env: Optional[Dict[str, Union[str, PipelineVariable]]] = None,
        tags: Optional[List[Dict[str, Union[str, PipelineVariable]]]] = None,
        network_config: Optional[NetworkConfig] = None,
    ):
        """Initialize a ``SparkJarProcessor`` instance.

        The SparkProcessor handles Amazon SageMaker processing tasks for jobs
        using SageMaker Spark.

        Args:
            framework_version (str): The version of SageMaker PySpark.
            py_version (str): The version of python.
            container_version (str): The version of spark container.
            role (str): An AWS IAM role name or ARN. The Amazon SageMaker training jobs
                and APIs that create Amazon SageMaker endpoints use this role
                to access training data and model artifacts. After the endpoint
                is created, the inference code might use the IAM role, if it
                needs to access an AWS resource.
            instance_type (str or PipelineVariable): Type of EC2 instance to use for
                processing, for example, 'ml.c4.xlarge'.
            instance_count (int or PipelineVariable): The number of instances to run
                the Processing job with. Defaults to 1.
            volume_size_in_gb (int or PipelineVariable): Size in GB of the EBS volume to
                use for storing data during processing (default: 30).
            volume_kms_key (str or PipelineVariable): A KMS key for the processing
                volume.
            output_kms_key (str or PipelineVariable): The KMS key id for all ProcessingOutputs.
            max_runtime_in_seconds (int or PipelineVariable): Timeout in seconds.
                After this amount of time Amazon SageMaker terminates the job
                regardless of its current status.
            base_job_name (str): Prefix for processing name. If not specified,
                the processor generates a default job name, based on the
                training image name and current timestamp.
            sagemaker_session (sagemaker.session.Session): Session object which
                manages interactions with Amazon SageMaker APIs and any other
                AWS services needed. If not specified, the processor creates one
                using the default AWS configuration chain.
            env (dict[str, str] or dict[str, PipelineVariable]): Environment variables to
                be passed to the processing job.
            tags (list[dict[str, str] or list[dict[str, PipelineVariable]]): List of tags to
                be passed to the processing job.
            network_config (sagemaker.network.NetworkConfig): A NetworkConfig
                object that configures network isolation, encryption of
                inter-container traffic, security group IDs, and subnets.
        """

        super(SparkJarProcessor, self).__init__(
            role=role,
            instance_count=instance_count,
            instance_type=instance_type,
            framework_version=framework_version,
            py_version=py_version,
            container_version=container_version,
            image_uri=image_uri,
            volume_size_in_gb=volume_size_in_gb,
            volume_kms_key=volume_kms_key,
            output_kms_key=output_kms_key,
            max_runtime_in_seconds=max_runtime_in_seconds,
            base_job_name=base_job_name,
            sagemaker_session=sagemaker_session,
            env=env,
            tags=tags,
            network_config=network_config,
        )

    def get_run_args(
        self,
        submit_app,
        submit_class=None,
        submit_jars=None,
        submit_files=None,
        inputs=None,
        outputs=None,
        arguments=None,
        job_name=None,
        configuration=None,
        spark_event_logs_s3_uri=None,
    ):
        """Returns a RunArgs object.

        This object contains the normalized inputs, outputs and arguments
        needed when using a ``SparkJarProcessor`` in a
        :class:`~sagemaker.workflow.steps.ProcessingStep`.

        Args:
            submit_app (str): Path (local or S3) to Python file to submit to Spark
                as the primary application. This is translated to the `code`
                property on the returned `RunArgs` object
            submit_class (str): Java class reference to submit to Spark as the primary
                application
            submit_jars (list[str]): List of paths (local or S3) to provide for
                `spark-submit --jars` option
            submit_files (list[str]): List of paths (local or S3) to provide for
                `spark-submit --files` option
            inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for
                the processing job. These must be provided as
                :class:`~sagemaker.processing.ProcessingInput` objects (default: None).
            outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for
                the processing job. These can be specified as either path strings or
                :class:`~sagemaker.processing.ProcessingOutput` objects (default: None).
            arguments (list[str]): A list of string arguments to be passed to a
                processing job (default: None).
            job_name (str): Processing job name. If not specified, the processor generates
                a default job name, based on the base job name and current timestamp.
            configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive.
                List or dictionary of EMR-style classifications.
                https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
            spark_event_logs_s3_uri (str): S3 path where spark application events will
                be published to.
        """
        self._current_job_name = self._generate_current_job_name(job_name=job_name)

        if not submit_app:
            raise ValueError("submit_app is required")

        extended_inputs, extended_outputs = self._extend_processing_args(
            inputs=inputs,
            outputs=outputs,
            submit_class=submit_class,
            submit_jars=submit_jars,
            submit_files=submit_files,
            configuration=configuration,
            spark_event_logs_s3_uri=spark_event_logs_s3_uri,
        )

        return super().get_run_args(
            code=submit_app,
            inputs=extended_inputs,
            outputs=extended_outputs,
            arguments=arguments,
        )

    def run(
        self,
        submit_app: str,
        submit_class: Union[str, PipelineVariable],
        submit_jars: Optional[List[Union[str, PipelineVariable]]] = None,
        submit_files: Optional[List[Union[str, PipelineVariable]]] = None,
        inputs: Optional[List[ProcessingInput]] = None,
        outputs: Optional[List[ProcessingOutput]] = None,
        arguments: Optional[List[Union[str, PipelineVariable]]] = None,
        wait: bool = True,
        logs: bool = True,
        job_name: Optional[str] = None,
        experiment_config: Optional[Dict[str, str]] = None,
        configuration: Optional[Union[List[Dict], Dict]] = None,
        spark_event_logs_s3_uri: Optional[Union[str, PipelineVariable]] = None,
        kms_key: Optional[str] = None,
    ):
        """Runs a processing job.

        Args:
            submit_app (str): Path (local or S3) to Jar file to submit to Spark as
                the primary application
            submit_class (str or PipelineVariable): Java class reference to submit to Spark
                as the primary application
            submit_jars (list[str] or list[PipelineVariable]): List of paths (local or S3)
                to provide for `spark-submit --jars` option
            submit_files (list[str] or list[PipelineVariable]): List of paths (local or S3)
                to provide for `spark-submit --files` option
            inputs (list[:class:`~sagemaker.processing.ProcessingInput`]): Input files for
                the processing job. These must be provided as
                :class:`~sagemaker.processing.ProcessingInput` objects (default: None).
            outputs (list[:class:`~sagemaker.processing.ProcessingOutput`]): Outputs for
                the processing job. These can be specified as either path strings or
                :class:`~sagemaker.processing.ProcessingOutput` objects (default: None).
            arguments (list[str] or list[PipelineVariable]): A list of string arguments to
                be passed to a processing job (default: None).
            wait (bool): Whether the call should wait until the job completes (default: True).
            logs (bool): Whether to show the logs produced by the job.
                Only meaningful when wait is True (default: True).
            job_name (str): Processing job name. If not specified, the processor generates
                a default job name, based on the base job name and current timestamp.
            experiment_config (dict[str, str]): Experiment management configuration.
                Optionally, the dict can contain three keys:
                'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
                The behavior of setting these keys is as follows:
                * If `ExperimentName` is supplied but `TrialName` is not a Trial will be
                automatically created and the job's Trial Component associated with the Trial.
                * If `TrialName` is supplied and the Trial already exists the job's Trial Component
                will be associated with the Trial.
                * If both `ExperimentName` and `TrialName` are not supplied the trial component
                will be unassociated.
                * `TrialComponentDisplayName` is used for display in Studio.
            configuration (list[dict] or dict): Configuration for Hadoop, Spark, or Hive.
                List or dictionary of EMR-style classifications.
                https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
            spark_event_logs_s3_uri (str or PipelineVariable): S3 path where spark application
                events will be published to.
            kms_key (str): The ARN of the KMS key that is used to encrypt the
                user code file (default: None).
        """
        self._current_job_name = self._generate_current_job_name(job_name=job_name)

        if not submit_app:
            raise ValueError("submit_app is required")

        extended_inputs, extended_outputs = self._extend_processing_args(
            inputs=inputs,
            outputs=outputs,
            submit_class=submit_class,
            submit_jars=submit_jars,
            submit_files=submit_files,
            configuration=configuration,
            spark_event_logs_s3_uri=spark_event_logs_s3_uri,
        )

        return super().run(
            submit_app=submit_app,
            inputs=extended_inputs,
            outputs=extended_outputs,
            arguments=arguments,
            wait=wait,
            logs=logs,
            job_name=self._current_job_name,
            experiment_config=experiment_config,
            kms_key=kms_key,
        )

    def _extend_processing_args(self, inputs, outputs, **kwargs):
        self.command = [_SparkProcessorBase._default_command]
        if kwargs.get("submit_class"):
            self.command.extend(["--class", kwargs.get("submit_class")])
        else:
            raise ValueError("submit_class is required")

        extended_inputs = self._handle_script_dependencies(
            inputs, kwargs.get("submit_jars"), FileType.JAR
        )
        extended_inputs = self._handle_script_dependencies(
            extended_inputs, kwargs.get("submit_files"), FileType.FILE
        )

        return super()._extend_processing_args(extended_inputs, outputs, **kwargs)


class _HistoryServer:
    """History server class that is responsible for starting history server."""

    _container_name = "history_server"
    _entry_point = "smspark-history-server"
    arg_event_logs_s3_uri = "event_logs_s3_uri"
    arg_remote_domain_name = "remote_domain_name"

    _history_server_args_format_map = {
        arg_event_logs_s3_uri: "--event-logs-s3-uri {} ",
        arg_remote_domain_name: "--remote-domain-name {} ",
    }

    def __init__(self, cli_args, image_uri, network_config):
        self.cli_args = cli_args
        self.image_uri = image_uri
        self.network_config = network_config
        self.run_history_server_command = self._get_run_history_server_cmd()

    def run(self):
        """Runs the history server."""
        self.down()
        logger.info("Starting history server...")
        subprocess.Popen(
            self.run_history_server_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
        )

    def down(self):
        """Stops and removes the container."""
        subprocess.call(["docker", "stop", self._container_name])
        subprocess.call(["docker", "rm", self._container_name])
        logger.info("History server terminated")

    # This method belongs to _HistoryServer because _CONTAINER_NAME(app name) belongs
    # to _HistoryServer. In the future, dynamically creating new app name, available
    # port should also belong to _HistoryServer rather than PySparkProcessor
    def _get_run_history_server_cmd(self):
        """Gets the history server command."""
        env_options = ""
        ser_cli_args = ""
        for key, value in self.cli_args.items():
            if key in self._history_server_args_format_map:
                ser_cli_args += self._history_server_args_format_map[key].format(value)
            else:
                env_options += f"--env {key}={value} "

        cmd = (
            f"docker run {env_options.strip()} --name {self._container_name} "
            f"{self.network_config} --entrypoint {self._entry_point} {self.image_uri} "
            f"{ser_cli_args.strip()}"
        )

        return cmd


class FileType(Enum):
    """Enum of file type"""

    JAR = 1
    PYTHON = 2
    FILE = 3