diff --git a/adfaprocess/Dockerfile b/adfaprocess/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..22882ce14c2fa0fac1d42efe321370951ce61b1b
--- /dev/null
+++ b/adfaprocess/Dockerfile
@@ -0,0 +1,33 @@
+# 使用官方的 Python 基础镜像
+FROM python:3.9-slim
+
+# 设置工作目录
+WORKDIR /opt/ml/code
+
+# 安装系统依赖
+RUN apt-get update && apt-get install -y --no-install-recommends \
+    build-essential \
+    gcc \
+    curl \
+    libffi-dev \
+    && rm -rf /var/lib/apt/lists/*
+
+# 安装 Rust 编译器(解决 tokenizers 构建问题)
+RUN curl https://sh.rustup.rs -sSf | sh -s -- -y
+ENV PATH="/root/.cargo/bin:${PATH}"
+
+# 更新 pip 并安装 Python 包
+RUN pip install --upgrade pip setuptools wheel
+
+# 安装指定版本的 Python 包
+RUN pip install torch==2.5.1  -i https://pypi.tuna.tsinghua.edu.cn/simple
+RUN pip install pandas numpy==1.22.3 rdt joblib tqdm scikit-learn==1.1.2 -i https://pypi.tuna.tsinghua.edu.cn/simple
+RUN pip install  transformers==4.19.2 imbalanced-learn sagemaker-training matplotlib -i https://pypi.tuna.tsinghua.edu.cn/simple
+RUN pip install flask -i https://pypi.tuna.tsinghua.edu.cn/simple
+# 将整个 training 目录复制到容器中
+COPY dataprocess /opt/ml/code/
+
+# 设置默认的命令来运行 `main.py`
+ENTRYPOINT ["python", "/opt/ml/code/predict.py"]
+#ENTRYPOINT ["python","/opt/ml/code/evalute.py"]
+#ENTRYPOINT ["python","/opt/ml/code/predict.py"]
diff --git a/adfaprocess/dataprocess/best_mlp_model.joblib b/adfaprocess/dataprocess/best_mlp_model.joblib
new file mode 100644
index 0000000000000000000000000000000000000000..74679308b1b6911d158401838494033c76190d95
Binary files /dev/null and b/adfaprocess/dataprocess/best_mlp_model.joblib differ
diff --git a/adfaprocess/dataprocess/count_vectorizer.joblib b/adfaprocess/dataprocess/count_vectorizer.joblib
new file mode 100644
index 0000000000000000000000000000000000000000..4c328b076cc2b434db90704ded7638ab514f6d1b
Binary files /dev/null and b/adfaprocess/dataprocess/count_vectorizer.joblib differ
diff --git a/adfaprocess/dataprocess/dataprocess.py b/adfaprocess/dataprocess/dataprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f4359853513ca9aaa1abedf639b90ab3e35c129
--- /dev/null
+++ b/adfaprocess/dataprocess/dataprocess.py
@@ -0,0 +1,213 @@
+import json
+import os
+import glob
+import numpy as np
+from collections import defaultdict
+from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
+from sklearn.model_selection import train_test_split, GridSearchCV
+from sklearn.neural_network import MLPClassifier
+from sklearn.metrics import classification_report, accuracy_score
+from scipy.sparse import hstack
+import joblib  # 用于保存模型
+
+# 定义文件夹路径
+ATTACK_DIR = r"E:\ADFL\ADFA-LD\ADFA-LD\Attack_Data_Master"
+TRAINING_DIR = r"E:\ADFL\ADFA-LD\ADFA-LD\Training_Data_Master"
+
+# 加载文件序列
+def load_sequences_from_dir(base_dir):
+    """
+    加载数据集中的所有文件,并将每个序列解析为整数列表。
+    :param base_dir: 数据集路径
+    :return: 文件内容列表,每个元素是一个系统调用序列(整数列表)
+    """
+    sequences = []
+    for file_path in glob.glob(os.path.join(base_dir, "*")):
+        if os.path.isfile(file_path):
+            with open(file_path, "r") as file:
+                sequence = file.read().strip()  # 读取整个文件内容
+                if sequence:  # 确保序列非空
+                    # 将系统调用字符串解析为整数列表
+                    int_sequence = list(map(int, sequence.split()))
+                    sequences.append(int_sequence)
+    return sequences
+
+# 加载普通数据并解析为数字列表
+normal_sequences = load_sequences_from_dir(TRAINING_DIR)
+
+if not normal_sequences:
+    raise ValueError("Training_Data_Master 文件夹中没有找到任何序列数据!")
+
+print(f"Number of normal sequences: {len(normal_sequences)}")
+
+# 将序列转换为字符串,用于特征提取
+def sequence_to_string(sequences):
+    """
+    将整数列表序列转换为字符串列表,用于 TF-IDF 和 CountVectorizer。
+    :param sequences: 序列列表,每个序列是一个整数列表
+    :return: 序列字符串列表
+    """
+    return [" ".join(map(str, seq)) for seq in sequences]
+
+# 转换普通序列为字符串
+normal_sequences_strings = sequence_to_string(normal_sequences)
+
+# 初始化攻击序列和标签存储
+attack_sequences = []
+attack_labels = []
+
+# 用于存储类别与标签的映射
+category_to_label = {}
+current_label = 0
+label_to_category = {}
+# 遍历所有攻击类别
+for attack_category in os.listdir(ATTACK_DIR):
+    attack_dir = os.path.join(ATTACK_DIR, attack_category)
+
+    if os.path.isdir(attack_dir):  # 确保是子文件夹
+        # 提取攻击类别前缀(如 'Web_Shell_10' -> 'Web_Shell')
+        merged_category = attack_category.split("_")[0]
+
+        # 如果这是一个新的类别,则分配一个新的标签
+        if merged_category not in category_to_label:
+            category_to_label[merged_category] = current_label
+            label_to_category[current_label] = merged_category
+            current_label += 1
+
+        # 获取当前类别的标签
+        category_label = category_to_label[merged_category]
+        print(f"\nProcessing attack category: {attack_category} as merged category: {merged_category} with label {category_label}")
+
+        # 加载攻击数据并解析为数字列表
+        category_attack_sequences = load_sequences_from_dir(attack_dir)
+
+        if not category_attack_sequences:
+            print(f"Warning: No sequences found for category {attack_category}, skipping.")
+            continue
+
+        print(f"Number of sequences in {attack_category}: {len(category_attack_sequences)}")
+
+        # 保存攻击序列
+        attack_sequences.extend(category_attack_sequences)
+        attack_labels.extend([category_label] * len(category_attack_sequences))  # 动态分配标签
+
+# 普通序列标签为 -1(或者其他值,用来区分攻击序列的标签)
+normal_labels = [-1] * len(normal_sequences)
+
+# 合并普通序列和攻击序列
+all_sequences = normal_sequences + attack_sequences
+all_labels = normal_labels + attack_labels
+
+print(f"Total sequences: {len(all_sequences)}, Total labels: {len(all_labels)}")
+print(f"Category to Label Mapping: {category_to_label}")
+
+
+# 使用 CountVectorizer 提取 N-gram 特征
+def build_count_vectorizer(sequences, ngram_range=(1, 3), max_features=5000):
+    """
+    构建 CountVectorizer 特征矩阵。
+    :param sequences: 系统调用序列列表,每个序列是一个数字列表
+    :param ngram_range: N-gram 范围,例如 (1, 3) 表示 uni-gram、bi-gram 和 tri-gram
+    :param max_features: 最大特征数
+    :return: Count 特征矩阵和向量化器
+    """
+    # 将数字列表转换为字符串列表
+    sequences_as_strings = [" ".join(map(str, seq)) for seq in sequences]
+    vectorizer = CountVectorizer(
+        analyzer="word",
+        token_pattern=r"\d+",
+        ngram_range=ngram_range,
+        max_features=max_features
+    )
+    count_matrix = vectorizer.fit_transform(sequences_as_strings)
+    return count_matrix, vectorizer
+
+# 使用 TF-IDF 提取 N-gram 特征
+def build_tfidf_vectorizer(sequences, ngram_range=(1, 3), max_features=5000):
+    """
+    构建 TF-IDF 特征矩阵。
+    :param sequences: 系统调用序列列表,每个序列是一个数字列表
+    :param ngram_range: N-gram 范围,例如 (1, 3) 表示 uni-gram、bi-gram 和 tri-gram
+    :param max_features: 最大特征数
+    :return: TF-IDF 特征矩阵和向量化器
+    """
+    # 将数字列表转换为字符串列表
+    sequences_as_strings = [" ".join(map(str, seq)) for seq in sequences]
+    vectorizer = TfidfVectorizer(
+        analyzer="word",
+        token_pattern=r"\d+",
+        ngram_range=ngram_range,
+        max_features=max_features
+    )
+    tfidf_matrix = vectorizer.fit_transform(sequences_as_strings)
+    return tfidf_matrix, vectorizer
+
+# 构建 Count 和 TF-IDF 特征矩阵
+print("\nBuilding Count and TF-IDF feature matrices...")
+count_matrix, count_vectorizer = build_count_vectorizer(all_sequences)
+tfidf_matrix, tfidf_vectorizer = build_tfidf_vectorizer(all_sequences)
+
+# 将两个矩阵水平拼接
+combined_matrix = hstack([count_matrix, tfidf_matrix])
+print(f"Combined matrix shape: {combined_matrix.shape}")
+
+# 保存 CountVectorizer 和 TfidfVectorizer 到本地
+joblib.dump(count_vectorizer, "count_vectorizer.joblib")
+joblib.dump(tfidf_vectorizer, "tfidf_vectorizer.joblib")
+print("CountVectorizer and TfidfVectorizer saved to local files.")
+
+# 将数据集划分为训练集和测试集
+X_train, X_test, y_train, y_test = train_test_split(combined_matrix, all_labels, test_size=0.2, random_state=42)
+print(f"Training set size: {X_train.shape}, Testing set size: {X_test.shape}")
+
+# 网格搜索优化 MLPClassifier
+print("\nOptimizing MLPClassifier with GridSearchCV...")
+
+# 定义参数网格
+param_grid = {
+    'hidden_layer_sizes': [(100,), (100, 50), (150, 100, 50)],  # 隐藏层结构
+    'activation': ['relu', 'tanh'],                            # 激活函数
+    'solver': ['adam', 'sgd'],                                 # 优化器
+    'learning_rate_init': [0.001, 0.01],                       # 初始学习率
+    'max_iter': [200, 300]                                     # 最大迭代次数
+}
+
+# 使用 GridSearchCV
+grid_search = GridSearchCV(
+    estimator=MLPClassifier(random_state=42),
+    param_grid=param_grid,
+    scoring='accuracy',
+    cv=3,  # 3折交叉验证
+    verbose=2,
+    n_jobs=-1  # 并行处理
+)
+label_to_category_path = "label_to_category.json"
+with open(label_to_category_path, "w") as json_file:
+    json.dump(label_to_category, json_file, indent=4)
+
+print(f"Label-to-Category mapping saved to {label_to_category_path}")
+# 执行网格搜索
+grid_search.fit(X_train, y_train)
+print("GridSearchCV completed!")
+
+# 获取最佳参数和模型
+best_params = grid_search.best_params_
+best_model = grid_search.best_estimator_
+print(f"Best Parameters: {best_params}")
+
+# 测试最佳模型
+print("\nEvaluating best model on test set...")
+y_pred = best_model.predict(X_test)
+
+# 打印分类报告
+print("\nClassification Report:")
+print(classification_report(y_test, y_pred))
+
+# 打印准确率
+accuracy = accuracy_score(y_test, y_pred)
+print(f"Accuracy: {accuracy:.4f}")
+
+# 保存模型到本地
+model_path = "best_mlp_model.joblib"
+joblib.dump(best_model, model_path)
+print(f"Model saved to {model_path}")
diff --git a/adfaprocess/dataprocess/demotest.py b/adfaprocess/dataprocess/demotest.py
new file mode 100644
index 0000000000000000000000000000000000000000..34e423766d1f990151b41d06a5bdfad1b89b9775
--- /dev/null
+++ b/adfaprocess/dataprocess/demotest.py
@@ -0,0 +1,65 @@
+import os
+import glob
+import numpy as np
+import joblib
+from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
+from scipy.sparse import hstack
+
+# 定义测试数据文件夹路径
+TEST_DATA_DIR = r"E:\ADFL\ADFA-LD\ADFA-LD\Validation_Data_Master"
+
+# 加载 CountVectorizer 和 TfidfVectorizer
+count_vectorizer = joblib.load("count_vectorizer.joblib")
+tfidf_vectorizer = joblib.load("tfidf_vectorizer.joblib")
+print("CountVectorizer and TfidfVectorizer loaded.")
+
+# 加载最佳模型
+best_model = joblib.load("best_mlp_model.joblib")
+print("Best MLP model loaded.")
+
+# 定义一个函数用于加载测试数据
+def load_test_sequences_from_dir(base_dir):
+    """
+    加载测试数据文件夹中的所有文件,并将每个序列解析为整数列表。
+    :param base_dir: 测试数据文件夹路径
+    :return: 文件内容列表,每个元素是一个系统调用序列(整数列表)
+    """
+    sequences = []
+    for file_path in glob.glob(os.path.join(base_dir, "*")):
+        if os.path.isfile(file_path):
+            with open(file_path, "r") as file:
+                sequence = file.read().strip()  # 读取整个文件内容
+                if sequence:  # 确保序列非空
+                    # 将系统调用字符串解析为整数列表
+                    int_sequence = list(map(int, sequence.split()))
+                    sequences.append(int_sequence)
+    return sequences
+
+# 加载测试数据
+test_sequences = load_test_sequences_from_dir(TEST_DATA_DIR)
+
+if not test_sequences:
+    raise ValueError("Test_Data_Master 文件夹中没有找到任何序列数据!")
+
+print(f"Number of test sequences: {len(test_sequences)}")
+
+# 将测试数据转换为字符串形式,用于特征提取
+test_sequences_strings = [" ".join(map(str, seq)) for seq in test_sequences]
+
+# 使用加载的 CountVectorizer 和 TfidfVectorizer 提取特征
+print("\nTransforming test sequences into feature matrices...")
+test_count_matrix = count_vectorizer.transform(test_sequences_strings)
+test_tfidf_matrix = tfidf_vectorizer.transform(test_sequences_strings)
+
+# 合并 Count 和 TF-IDF 特征矩阵
+test_combined_matrix = hstack([test_count_matrix, test_tfidf_matrix])
+print(f"Test combined matrix shape: {test_combined_matrix.shape}")
+
+# 使用加载的最佳模型进行预测
+print("\nPredicting test data using the best model...")
+test_predictions = best_model.predict(test_combined_matrix)
+
+# 打印预测结果
+print("\nTest Predictions:")
+for i, prediction in enumerate(test_predictions):
+    print(f"Sequence {i+1}: Predicted Label = {prediction}")
diff --git a/adfaprocess/dataprocess/label_to_category.json b/adfaprocess/dataprocess/label_to_category.json
new file mode 100644
index 0000000000000000000000000000000000000000..b3f4afe6ac7e6ad81542a6f8142ded142a485d07
--- /dev/null
+++ b/adfaprocess/dataprocess/label_to_category.json
@@ -0,0 +1,7 @@
+{
+    "0": "Adduser",
+    "1": "Hydra",
+    "2": "Java",
+    "3": "Meterpreter",
+    "4": "Web"
+}
\ No newline at end of file
diff --git a/adfaprocess/dataprocess/predict.py b/adfaprocess/dataprocess/predict.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6b072be0e96987bd4dc2947bc8a85f208083139
--- /dev/null
+++ b/adfaprocess/dataprocess/predict.py
@@ -0,0 +1,137 @@
+import os
+import glob
+import numpy as np
+import joblib
+import json
+from flask import Flask, request, jsonify
+from scipy.sparse import hstack
+
+# Flask 应用初始化
+app = Flask(__name__)
+
+# 加载模型及向量化器
+class ADFAEndpointModel:
+    def __init__(self):
+        self.count_vectorizer = None
+        self.tfidf_vectorizer = None
+        self.model = None
+
+    def load_model(self, model_dir):
+        """
+        加载模型和向量化器
+        """
+        print("Loading CountVectorizer and TfidfVectorizer...")
+        self.count_vectorizer = joblib.load(  "count_vectorizer.joblib")
+        self.tfidf_vectorizer = joblib.load(  "tfidf_vectorizer.joblib")
+
+
+        print("CountVectorizer and TfidfVectorizer loaded.")
+
+        print("Loading MLP model...")
+        self.model = joblib.load("best_mlp_model.joblib")
+        print("Best MLP model loaded.")
+
+    def predict(self, sequences):
+        """
+        生成预测结果
+        :param sequences: 输入的系统调用序列(整数列表的列表)
+        :return: 预测标签列表
+        """
+        # 将输入序列转换为字符串形式,用于向量化
+        sequences_strings = [" ".join(map(str, seq)) for seq in sequences]
+
+        # 提取特征矩阵
+        print("\nTransforming sequences into feature matrices...")
+        count_matrix = self.count_vectorizer.transform(sequences_strings)
+        tfidf_matrix = self.tfidf_vectorizer.transform(sequences_strings)
+        combined_matrix = hstack([count_matrix, tfidf_matrix])
+        print(f"Feature matrix shape: {combined_matrix.shape}")
+
+        # 生成预测
+        print("Predicting with the model...")
+        predictions = self.model.predict(combined_matrix)
+        return predictions.tolist()
+
+
+# 初始化模型
+model_handler = ADFAEndpointModel()
+
+
+@app.route("/ping", methods=["GET"])
+def ping():
+    """
+    健康检查接口
+    """
+    status = 200 if model_handler.model else 500
+    return jsonify({"status": "ok" if status == 200 else "error"}), status
+
+
+@app.route("/invocations", methods=["POST"])
+def invocations():
+    """
+    推理接口
+    """
+    try:
+        # 解析请求数据
+        input_data = input_fn(request.data, request.content_type)
+        # 使用模型生成预测
+        predictions = predict_fn(input_data, model_handler)
+        # 格式化输出
+        return output_fn(predictions, "application/json")
+    except Exception as e:
+        print(f"Error during prediction: {e}")
+        return jsonify({"error": str(e)}), 500
+
+
+def input_fn(request_body, request_content_type):
+    """
+    解析输入数据
+    :param request_body: HTTP 请求的 Body 数据
+    :param request_content_type: HTTP 请求的 Content-Type
+    :return: 解析后的输入数据(整数列表的列表)
+    """
+    if request_content_type == "application/json":
+        input_data = json.loads(request_body)
+        if isinstance(input_data, dict) and "data" in input_data:
+            return input_data["data"]  # 返回输入数据
+        elif isinstance(input_data, list):
+            return input_data
+        else:
+            raise ValueError("Invalid input format. Must be a JSON with 'data' key or a list of sequences.")
+    else:
+        raise ValueError(f"Unsupported content type: {request_content_type}")
+
+
+def predict_fn(input_data, model):
+    """
+    使用模型生成预测
+    :param input_data: 输入数据
+    :param model: 模型处理对象
+    :return: 预测结果
+    """
+    return model.predict(input_data)
+
+
+def output_fn(prediction, content_type):
+    """
+    格式化输出数据
+    :param prediction: 预测结果
+    :param content_type: 输出的内容类型
+    :return: 格式化后的预测结果
+    """
+    if content_type == "application/json":
+        return json.dumps({"predictions": prediction})
+    else:
+        raise ValueError(f"Unsupported content type: {content_type}")
+
+
+if __name__ == "__main__":
+    # 模型目录(SageMaker 中为 /opt/ml/model)
+    model_dir = ""
+
+    # 加载模型
+    print("Starting model loading...")
+    model_handler.load_model(model_dir)
+
+    # 启动 Flask 服务
+    app.run(host="0.0.0.0", port=8080)
diff --git a/adfaprocess/dataprocess/syscall_word2vec.model b/adfaprocess/dataprocess/syscall_word2vec.model
new file mode 100644
index 0000000000000000000000000000000000000000..a8a79f0fb287fa4823c550ac55ee801c6902ef01
Binary files /dev/null and b/adfaprocess/dataprocess/syscall_word2vec.model differ
diff --git a/adfaprocess/dataprocess/tfidf_vectorizer.joblib b/adfaprocess/dataprocess/tfidf_vectorizer.joblib
new file mode 100644
index 0000000000000000000000000000000000000000..43230b83a5fa7c0ca09b1b2243bf2467ef40480d
Binary files /dev/null and b/adfaprocess/dataprocess/tfidf_vectorizer.joblib differ
diff --git a/adfaprocess/model/predicttest.py b/adfaprocess/model/predicttest.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e9cab54c1830ead07076e94d00aecb5a92b8267
--- /dev/null
+++ b/adfaprocess/model/predicttest.py
@@ -0,0 +1,75 @@
+import boto3
+import json
+
+# SageMaker Endpoint 名称
+ENDPOINT_NAME = "adfa-predict-endpoint"
+
+# 配置 SageMaker Runtime 客户端
+sagemaker_client = boto3.client(
+    "sagemaker-runtime",
+    region_name="ap-south-1",
+    aws_access_key_id="AKIA46ZDFEKUZGSUBIOJ",  # 替换为您的 Access Key ID
+    aws_secret_access_key="/hJ2+4k+MkaZb7XtlR0ydmP2uLWBUqBk7hoRTrgi"  # 替换为您的 Secret Access Key
+)
+
+# 测试数据 (您提供的数据)
+# 测试数据 (您提供的数据)
+input_data = [
+    [
+        6, 221, 221, 66, 6, 6, 6, 5, 41, 41, 60, 97, 12, 174, 174, 174, 174, 174, 174, 174,
+        174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174,
+        174, 174, 174, 174, 174, 174, 174, 175, 11, 45, 33, 192, 33, 5, 197, 192, 6, 33, 5,
+        3, 197, 192, 192, 6, 33, 5, 3, 197, 192, 192, 192, 6, 192, 192, 243, 125, 125, 125,
+        125, 91, 45, 45, 66, 20, 33, 5, 221, 140, 27, 174, 27, 221, 3, 221, 27, 174, 27,
+        174, 27, 221, 3, 221, 27, 174, 27, 174, 27, 221, 3, 221, 27, 174, 13, 33, 5, 140,
+        140, 63, 6, 27, 174, 27, 221, 3, 221, 27, 174, 27, 174, 27, 221, 140, 4, 221, 27,
+        174, 6, 33, 5, 27, 174, 27, 221, 140, 4, 221, 27, 174, 6, 6, 6, 12, 195, 6, 5, 41,
+        41, 54, 212, 15, 54, 54, 54, 54, 54, 221, 221, 221, 221, 175, 174, 175, 162, 54,
+        122, 4, 5, 54, 54, 54, 197, 192, 3, 197, 54, 192, 4, 4, 3, 54, 54, 54, 6, 91, 122,
+        4, 4
+    ],
+    [
+        3, 146, 146, 142, 146, 265, 142, 142, 146, 142, 3, 3, 3, 104, 3, 142, 265, 265, 142, 142,
+        265, 175, 175, 142, 142, 175, 142, 146, 142, 142, 142, 265, 265, 142, 142, 146, 142, 119,
+        146, 3, 265, 142, 175, 119, 265, 142, 142, 265, 3, 175, 142, 142, 3, 142, 265, 146, 265,
+        119, 175, 142, 265, 265, 146, 142, 265, 142, 146, 119, 265, 104, 146, 3, 142, 3, 142, 146,
+        142, 3, 3, 265, 142, 265, 142, 175, 142, 142, 142, 119, 175, 142, 142, 3, 3, 119, 3, 142,
+        142, 265, 142, 142, 142, 146, 142, 142, 146, 142, 265, 146, 265, 265, 146, 175, 142, 142,
+        265, 142, 265, 142, 146, 104, 142, 146, 3, 142, 142, 146, 142, 265, 146, 146, 146, 146,
+        265, 104, 142, 146, 146, 146, 146, 175, 175, 3, 142, 265, 146, 3, 175, 104, 142, 3, 3, 3,
+        142, 3, 3, 146, 142, 3, 3, 175, 104, 175, 142, 104, 3, 142, 3, 142, 119, 265, 265, 142,
+        142, 104, 142, 104, 3, 142, 146, 146, 142, 142, 142, 146, 146, 142, 146, 142, 265, 104,
+        3, 104, 3, 3, 3, 265, 265, 265, 142, 3, 3, 265, 265, 265, 3, 3, 142, 104, 104, 265, 265,
+        146, 265, 3, 265, 104, 3, 104, 265, 265, 142, 3, 142, 142, 3, 142, 175, 265, 142, 265,
+        265, 142, 265, 3, 3, 3, 175, 91, 265, 265, 3, 142, 142, 3, 3, 3, 142, 3, 142, 3, 3, 265,
+        265, 3, 265, 265, 265, 104, 3, 3, 142, 3, 104, 3, 142, 265, 265, 142, 3, 3, 146, 142,
+        142, 119, 142, 3, 104, 265, 142, 146, 146, 142, 142, 119, 142, 265, 265, 265, 265, 119,
+        142, 175, 119, 265, 119, 142, 119, 3, 142, 142, 142, 146, 265, 175, 119, 265, 142, 146,
+        142, 142, 146, 104, 142, 175, 175, 142, 265, 3, 142, 175, 3, 146, 192, 175, 3, 175, 175,
+        146, 265, 104, 265, 265, 146, 142, 3, 3, 3, 3, 175, 175, 175, 175, 175, 175, 175, 175,
+        175, 175, 175, 175, 265, 104, 265, 146, 3, 104, 142, 175, 142, 175
+    ]
+]
+
+
+# 将测试数据转换为 JSON 格式
+payload = {
+    "data": input_data
+}
+
+# 将 payload 转换为 JSON 字符串
+payload_json = json.dumps(payload)
+
+# 调用 SageMaker Endpoint
+response = sagemaker_client.invoke_endpoint(
+    EndpointName=ENDPOINT_NAME,
+    ContentType="application/json",
+    Body=payload_json
+)
+
+# 解析响应
+response_body = response["Body"].read().decode("utf-8")
+response_json = json.loads(response_body)
+
+# 打印预测结果
+print("Predictions:", response_json["predictions"])