diff --git a/.github/workflows/dlc.json b/.github/workflows/dlc.json
index 8c6e04fe74c..e01ae258c8f 100644
--- a/.github/workflows/dlc.json
+++ b/.github/workflows/dlc.json
@@ -1,10 +1,13 @@
{
- "ignorePatterns": [
- {
- "pattern": "^http://localhost"
+ "ignorePatterns": [
+ {
+ "pattern": "^http://localhost"
+ },
+ {
+ "pattern": "^http://127.0.0.1"
},
{
- "pattern": "^http://127.0.0.1"
+ "pattern": "^(https?://)?([a-zA-Z0-9-]+\\.)*bilibili\\.com"
}
],
"timeout": "10s",
diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml
index f99f8c30e68..e5af9803c39 100644
--- a/.github/workflows/integration-test.yml
+++ b/.github/workflows/integration-test.yml
@@ -152,6 +152,8 @@ jobs:
sleep 20
done
+ bash ./linkis-dist/helm/scripts/prepare-for-spark.sh
+
#show linkis pod logs
#POD_NAME=`kubectl get pods -n linkis -l app.kubernetes.io/instance=linkis-demo-cg-linkismanager -o jsonpath='{.items[0].metadata.name}'`
#kubectl logs -n linkis ${POD_NAME} -f --tail=10000
@@ -170,16 +172,15 @@ jobs:
# Execute test by linkis-cli
POD_NAME=`kubectl get pods -n linkis -l app.kubernetes.io/instance=linkis-demo-mg-gateway -o jsonpath='{.items[0].metadata.name}'`
- kubectl exec -it -n linkis ${POD_NAME} -- bash -c " \
+ kubectl exec -n linkis ${POD_NAME} -- bash -c " \
sh /opt/linkis/bin/linkis-cli -engineType shell-1 -codeType shell -code \"pwd\" ";
- kubectl exec -it -n linkis ${POD_NAME} -- bash -c " \
+ kubectl exec -n linkis ${POD_NAME} -- bash -c " \
sh /opt/linkis/bin/linkis-cli -engineType python-python2 -codeType python -code 'print(\"hello\")' "
- #todo
- #kubectl exec -it -n linkis ${POD_NAME} -- bash -c " \
- #sh /opt/linkis/bin/linkis-cli -engineType hive-3.1.3 -codeType hql -code 'show databases' "
+ kubectl exec -n linkis ${POD_NAME} -- bash -c " \
+ sh /opt/linkis/bin/linkis-cli -engineType hive-3.1.3 -codeType hql -code 'show databases' "
- #kubectl exec -it -n linkis ${POD_NAME} -- bash -c " \
- #sh /opt/linkis/bin/linkis-cli -engineType spark-3.2.1 -codeType sql -code 'show databases' "
+ kubectl exec -n linkis ${POD_NAME} -- bash -c " \
+ sh /opt/linkis/bin/linkis-cli -engineType spark-3.2.1 -codeType sql -code 'show databases' "
shell: bash
diff --git a/README.md b/README.md
index c3b154b6856..e378297288e 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
- Linkis builds a computation middleware layer to facilitate connection,
+ Linkis builds a computation middleware layer to facilitate connection,
governance and orchestration between the upper applications and the underlying data engines.
@@ -14,41 +14,40 @@
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
@@ -89,21 +88,21 @@ Apache Linkis | DeepWiki : https://deepwiki.com/apache/linkis
# Engine Type
-| **Engine name** | **Support underlying component version (default dependency version)** | **Linkis Version Requirements** | **Included in Release Package By Default** | **Description** |
-|:---- |:---- |:---- |:---- |:---- |
-|Spark|Apache >= 2.0.0, CDH >= 5.4.0, (default Apache Spark 3.2.1)|\>=1.0.3|Yes|Spark EngineConn, supports SQL , Scala, Pyspark and R code|
-|Hive|Apache >= 1.0.0, CDH >= 5.4.0, (default Apache Hive 3.1.3)|\>=1.0.3|Yes |Hive EngineConn, supports HiveQL code|
-|Python|Python >= 2.6, (default Python2*)|\>=1.0.3|Yes |Python EngineConn, supports python code|
-|Shell|Bash >= 2.0|\>=1.0.3|Yes|Shell EngineConn, supports Bash shell code|
-|JDBC|MySQL >= 5.0, Hive >=1.2.1, (default Hive-jdbc 2.3.4)|\>=1.0.3|No |JDBC EngineConn, already supports ClickHouse, DB2, DM, Greenplum, kingbase, MySQL, Oracle, PostgreSQL and SQLServer, can be extended quickly Support other DB, such as SQLite|
-|Flink |Flink >= 1.12.2, (default Apache Flink 1.12.2)|\>=1.0.2|No |Flink EngineConn, supports FlinkSQL code, also supports starting a new Yarn in the form of Flink Jar Application|
-|Pipeline|-|\>=1.0.2|No|Pipeline EngineConn, supports file import and export|
-|openLooKeng|openLooKeng >= 1.5.0, (default openLookEng 1.5.0)|\>=1.1.1|No|openLooKeng EngineConn, supports querying data virtualization engine with Sql openLooKeng|
-|Sqoop| Sqoop >= 1.4.6, (default Apache Sqoop 1.4.6)|\>=1.1.2|No|Sqoop EngineConn, support data migration tool Sqoop engine|
-|Presto|Presto >= 0.180|\>=1.2.0|No|Presto EngineConn, supports Presto SQL code|
-|ElasticSearch|ElasticSearch >=6.0|\>=1.2.0|No|ElasticSearch EngineConn, supports SQL and DSL code|
-|Trino | Trino >=371 | >=1.3.1 | No | Trino EngineConn, supports Trino SQL code |
-|Seatunnel | Seatunnel >=2.1.2 | >=1.3.1 | No | Seatunnel EngineConn, supportt Seatunnel SQL code |
+| **Engine name** | **Support underlying component version (default dependency version)** | **Linkis Version Requirements** | **Included in Release Package By Default** | **Description** |
+| :-------------- | :------------------------------------------------------------------------ | :------------------------------ | :----------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Spark | Apache >= 2.0.0, CDH >= 5.4.0, (default Apache Spark 3.2.1) | \>=1.0.3 | Yes | Spark EngineConn, supports SQL , Scala, Pyspark and R code |
+| Hive | Apache >= 1.0.0, CDH >= 5.4.0, (default Apache Hive 3.1.3) | \>=1.0.3 | Yes | Hive EngineConn, supports HiveQL code |
+| Python | Python >= 2.6, (default Python2*) | \>=1.0.3 | Yes | Python EngineConn, supports python code |
+| Shell | Bash >= 2.0 | \>=1.0.3 | Yes | Shell EngineConn, supports Bash shell code |
+| JDBC | MySQL >= 5.0, Hive >=1.2.1, (default Hive-jdbc 2.3.4) | \>=1.0.3 | No | JDBC EngineConn, already supports ClickHouse, DB2, DM, Greenplum, kingbase, MySQL, Oracle, PostgreSQL and SQLServer, can be extended quickly Support other DB, such as SQLite |
+| Flink | Flink >= 1.12.2, (default Apache Flink 1.12.2) | \>=1.0.2 | No | Flink EngineConn, supports FlinkSQL code, also supports starting a new Yarn in the form of Flink Jar Application |
+| Pipeline | - | \>=1.0.2 | No | Pipeline EngineConn, supports file import and export |
+| openLooKeng | openLooKeng >= 1.5.0, (default openLookEng 1.5.0) | \>=1.1.1 | No | openLooKeng EngineConn, supports querying data virtualization engine with Sql openLooKeng |
+| Sqoop | Sqoop >= 1.4.6, (default Apache Sqoop 1.4.6) | \>=1.1.2 | No | Sqoop EngineConn, support data migration tool Sqoop engine |
+| Presto | Presto >= 0.180 | \>=1.2.0 | No | Presto EngineConn, supports Presto SQL code |
+| ElasticSearch | ElasticSearch >=6.0 | \>=1.2.0 | No | ElasticSearch EngineConn, supports SQL and DSL code |
+| Trino | Trino >=371 | >=1.3.1 | No | Trino EngineConn, supports Trino SQL code |
+| Seatunnel | Seatunnel >=2.1.2 | >=1.3.1 | No | Seatunnel EngineConn, supportt Seatunnel SQL code |
# Download
@@ -152,13 +151,13 @@ npm run build
```
### Bundled with MySQL JDBC Driver
-Due to the MySQL licensing restrictions, the MySQL Java Database Connectivity (JDBC) driver is not bundled with the
+Due to the MySQL licensing restrictions, the MySQL Java Database Connectivity (JDBC) driver is not bundled with the
official released linkis image by default. However, at current stage, linkis still relies on this library to work properly.
-To solve this problem, we provide a script which can help to creating a custom image with mysql jdbc from the official
+To solve this problem, we provide a script which can help to creating a custom image with mysql jdbc from the official
linkis image by yourself, the image created by this tool will be tagged as `linkis:with-jdbc` by default.
```shell
-$> LINKIS_IMAGE=linkis:1.3.1
+$> LINKIS_IMAGE=linkis:1.3.1
$> ./linkis-dist/docker/scripts/make-linkis-image-with-mysql-jdbc.sh
```
@@ -167,7 +166,7 @@ Please refer to [Quick Deployment](https://linkis.apache.org/docs/latest/deploym
# Examples and Guidance
- [User Manual](https://linkis.apache.org/docs/latest/user-guide/how-to-use)
-- [Engine Usage Documents](https://linkis.apache.org/docs/latest/engine-usage/overview)
+- [Engine Usage Documents](https://linkis.apache.org/docs/latest/engine-usage/overview)
- [API Documents](https://linkis.apache.org/docs/latest/api/overview)
# Documentation & Vedio
@@ -186,13 +185,13 @@ Below is the Linkis architecture diagram. You can find more detailed architectur
# Contributing
-Contributions are always welcomed, we need more contributors to build Linkis together. either code, or doc, or other supports that could help the community.
+Contributions are always welcomed, we need more contributors to build Linkis together. either code, or doc, or other supports that could help the community.
For code and documentation contributions, please follow the [contribution guide](https://linkis.apache.org/community/how-to-contribute).
# Contact Us
-- Any questions or suggestions please kindly submit an [issue](https://github.com/apache/linkis/issues).
+- Any questions or suggestions please kindly submit an [issue](https://github.com/apache/linkis/issues).
- By mail [dev@linkis.apache.org](mailto:dev@linkis.apache.org)
- You can scan the QR code below to join our WeChat group to get more immediate response
@@ -200,5 +199,5 @@ For code and documentation contributions, please follow the [contribution guide]
# Who is Using Linkis
-We opened an issue [[Who is Using Linkis]](https://github.com/apache/linkis/issues/23) for users to feedback and record who is using Linkis.
+We opened an issue [[Who is Using Linkis]](https://github.com/apache/linkis/issues/23) for users to feedback and record who is using Linkis.
Since the first release of Linkis in 2019, it has accumulated more than **700** trial companies and **1000+** sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.
diff --git a/README_CN.md b/README_CN.md
index e42e5f62c09..545e53a050e 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -13,41 +13,40 @@
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
@@ -57,9 +56,9 @@
# 介绍
Linkis 在上层应用程序和底层引擎之间构建了一层计算中间件。通过使用 Linkis 提供的 REST/WebSocket/JDBC 等标准接口,
-上层应用可以方便地连接访问 MySQL/Spark/Hive/Presto/Flink 等底层引擎,同时实现变量、脚本、函数和资源文件等用户资源的跨上层应用互通。
+上层应用可以方便地连接访问 MySQL/Spark/Hive/Presto/Flink 等底层引擎,同时实现变量、脚本、函数和资源文件等用户资源的跨上层应用互通。
作为计算中间件,Linkis 提供了强大的连通、复用、编排、扩展和治理管控能力。通过计算中间件将应用层和引擎层解耦,简化了复杂的网络调用关系,
-降低了整体复杂度,同时节约了整体开发和维护成本。
+降低了整体复杂度,同时节约了整体开发和维护成本。
Linkis 自 2019 年开源发布以来,已累计积累了 700 多家试验企业和 1000+沙盒试验用户,涉及金融、电信、制造、互联网等多个行业。
许多公司已经将 Linkis 作为大数据平台底层计算存储引擎的统一入口,和计算请求/任务的治理管控利器。
@@ -82,21 +81,21 @@ Apache Linkis | DeepWiki : https://deepwiki.com/apache/linkis
# 引擎类型
-| **引擎名** | **支持底层组件版本 (默认依赖版本)** | **Linkis 1.X 版本要求** | **是否默认包含在发布包中** | **说明** |
-|:---- |:---- |:---- |:---- |:---- |
-|Spark|Apache >= 2.0.0, CDH >= 5.4.0, (默认 Apache Spark 3.2.1)|\>=1.0.3|是|Spark EngineConn, 支持 SQL, Scala, Pyspark 和 R 代码|
-|Hive|Apache >= 1.0.0, CDH >= 5.4.0, (默认 Apache Hive 3.1.3)|\>=1.0.3|是|Hive EngineConn, 支持 HiveQL 代码|
-|Python|Python >= 2.6, (默认 Python2*)|\>=1.0.3|是|Python EngineConn, 支持 python 代码|
-|Shell|Bash >= 2.0|\>=1.0.3|是|Shell EngineConn, 支持 Bash shell 代码|
-|JDBC|MySQL >= 5.0, Hive >=1.2.1, (默认 Hive-jdbc 2.3.4)|\>=1.0.3|否|JDBC EngineConn, 已支持ClickHouse, DB2, DM, Greenplum, kingbase, MySQL, Oracle, PostgreSQL 和 SQLServer,可快速扩展支持其他数据库组件, 如SQLite|
-|Flink |Flink >= 1.12.2, (默认 Apache Flink 1.12.2)|\>=1.0.3|否|Flink EngineConn, 支持 FlinkSQL 代码,也支持以 Flink Jar 形式启动一个新的 Yarn 应用程序|
-|Pipeline|-|\>=1.0.3|否|Pipeline EngineConn, 支持文件的导入和导出|
-|openLooKeng|openLooKeng >= 1.5.0, (默认 openLookEng 1.5.0)|\>=1.1.1|否|openLooKeng EngineConn, 支持用 Sql 查询数据虚拟化引擎 openLooKeng|
-|Sqoop| Sqoop >= 1.4.6, (默认 Apache Sqoop 1.4.6)|\>=1.1.2|否|Sqoop EngineConn, 支持 数据迁移工具 Sqoop 引擎|
-|Presto|Presto >= 0.180, (默认 Presto 0.234)|\>=1.2.0|否|Presto EngineConn, 支持 Presto SQL 代码|
-|ElasticSearch|ElasticSearch >=6.0, ((默认 ElasticSearch 7.6.2)|\>=1.2.0|否|ElasticSearch EngineConn, 支持 SQL 和 DSL 代码|
-|Trino | Trino >=371 | >=1.3.1 | 否 | Trino EngineConn, 支持Trino SQL 代码 |
-|Seatunnel | Seatunnel >=2.1.2 | >=1.3.1 | 否 | Seatunnel EngineConn, 支持Seatunnel SQL 代码 |
+| **引擎名** | **支持底层组件版本 (默认依赖版本)** | **Linkis 1.X 版本要求** | **是否默认包含在发布包中** | **说明** |
+| :------------ | :------------------------------------------------------------------- | :---------------------- | :------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- |
+| Spark | Apache >= 2.0.0, CDH >= 5.4.0, (默认 Apache Spark 3.2.1) | \>=1.0.3 | 是 | Spark EngineConn, 支持 SQL, Scala, Pyspark 和 R 代码 |
+| Hive | Apache >= 1.0.0, CDH >= 5.4.0, (默认 Apache Hive 3.1.3) | \>=1.0.3 | 是 | Hive EngineConn, 支持 HiveQL 代码 |
+| Python | Python >= 2.6, (默认 Python2*) | \>=1.0.3 | 是 | Python EngineConn, 支持 python 代码 |
+| Shell | Bash >= 2.0 | \>=1.0.3 | 是 | Shell EngineConn, 支持 Bash shell 代码 |
+| JDBC | MySQL >= 5.0, Hive >=1.2.1, (默认 Hive-jdbc 2.3.4) | \>=1.0.3 | 否 | JDBC EngineConn, 已支持ClickHouse, DB2, DM, Greenplum, kingbase, MySQL, Oracle, PostgreSQL 和 SQLServer,可快速扩展支持其他数据库组件, 如SQLite |
+| Flink | Flink >= 1.12.2, (默认 Apache Flink 1.12.2) | \>=1.0.3 | 否 | Flink EngineConn, 支持 FlinkSQL 代码,也支持以 Flink Jar 形式启动一个新的 Yarn 应用程序 |
+| Pipeline | - | \>=1.0.3 | 否 | Pipeline EngineConn, 支持文件的导入和导出 |
+| openLooKeng | openLooKeng >= 1.5.0, (默认 openLookEng 1.5.0) | \>=1.1.1 | 否 | openLooKeng EngineConn, 支持用 Sql 查询数据虚拟化引擎 openLooKeng |
+| Sqoop | Sqoop >= 1.4.6, (默认 Apache Sqoop 1.4.6) | \>=1.1.2 | 否 | Sqoop EngineConn, 支持 数据迁移工具 Sqoop 引擎 |
+| Presto | Presto >= 0.180, (默认 Presto 0.234) | \>=1.2.0 | 否 | Presto EngineConn, 支持 Presto SQL 代码 |
+| ElasticSearch | ElasticSearch >=6.0, ((默认 ElasticSearch 7.6.2) | \>=1.2.0 | 否 | ElasticSearch EngineConn, 支持 SQL 和 DSL 代码 |
+| Trino | Trino >=371 | >=1.3.1 | 否 | Trino EngineConn, 支持Trino SQL 代码 |
+| Seatunnel | Seatunnel >=2.1.2 | >=1.3.1 | 否 | Seatunnel EngineConn, 支持Seatunnel SQL 代码 |
# 下载
@@ -147,7 +146,7 @@ npm run build
我们提供了一个脚本,它可以帮助你快速的基于官方的 Linkis 镜像创建一个集成了MySQL JDBC 的自定义镜像。 这个工具创建的镜像默认的名称是 `linkis:with-jdbc`。
```shell
-$> LINKIS_IMAGE=linkis:1.3.1
+$> LINKIS_IMAGE=linkis:1.3.1
$> ./linkis-dist/docker/scripts/make-linkis-image-with-mysql-jdbc.sh
```
@@ -156,17 +155,17 @@ $> ./linkis-dist/docker/scripts/make-linkis-image-with-mysql-jdbc.sh
# 示例和使用指引
- [用户手册 ](https://linkis.apache.org/zh-CN/docs/latest/user-guide/how-to-use),
-- [各引擎使用指引 ](https://linkis.apache.org/zh-CN/docs/latest/engine-usage/overview)
+- [各引擎使用指引 ](https://linkis.apache.org/zh-CN/docs/latest/engine-usage/overview)
- [API 文档 ](https://linkis.apache.org/zh-CN/docs/latest/api/overview)
# 文档&视频
-- 完整的 Linkis 文档代码存放在[linkis-website 仓库中 ](https://github.com/apache/linkis-website)
+- 完整的 Linkis 文档代码存放在[linkis-website 仓库中 ](https://github.com/apache/linkis-website)
- Meetup 视频 [Bilibili](https://space.bilibili.com/598542776?from=search&seid=14344213924133040656)
# 架构概要
-Linkis 基于微服务架构开发,其服务可以分为 3 类:计算治理服务、公共增强服务和微服务治理服务。
+Linkis 基于微服务架构开发,其服务可以分为 3 类:计算治理服务、公共增强服务和微服务治理服务。
- 计算治理服务,支持计算任务/请求处理流程的 3 个主要阶段:提交-> 准备-> 执行
- 公共增强服务,包括上下文服务、物料管理服务及数据源服务等
@@ -178,7 +177,7 @@ Linkis 基于微服务架构开发,其服务可以分为 3 类:计算治理服
# 贡献
-我们非常欢迎和期待更多的贡献者参与共建 Linkis, 不论是代码、文档,或是其他能够帮助到社区的贡献形式。
+我们非常欢迎和期待更多的贡献者参与共建 Linkis, 不论是代码、文档,或是其他能够帮助到社区的贡献形式。
代码和文档相关的贡献请参照[贡献指引](https://linkis.apache.org/zh-CN/community/how-to-contribute).
# 联系我们
@@ -192,5 +191,5 @@ Linkis 基于微服务架构开发,其服务可以分为 3 类:计算治理服
# 谁在使用 Linkis
-我们创建了一个 issue [[Who is Using Linkis]](https://github.com/apache/linkis/issues/23) 以便用户反馈和记录谁在使用 Linkis.
+我们创建了一个 issue [[Who is Using Linkis]](https://github.com/apache/linkis/issues/23) 以便用户反馈和记录谁在使用 Linkis.
Linkis 自 2019 年开源发布以来,累计已有 700 多家试验企业和 1000+沙盒试验用户,涉及金融、电信、制造、互联网等多个行业。
diff --git a/codecheck.ignore b/codecheck.ignore
new file mode 100644
index 00000000000..cc679f1529c
--- /dev/null
+++ b/codecheck.ignore
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/DESUtil.java
+linkis-public-enhancements/linkis-pes-common/src/main/java/org/apache/linkis/cs/common/serialize/helper/ContextSerializationHelper.java
+linkis-public-enhancements/linkis-pes-common/src/main/java/org/apache/linkis/cs/listener/callback/imp/DefaultContextIDCallbackEngine.java
+linkis-public-enhancements/linkis-pes-common/src/main/java/org/apache/linkis/cs/listener/callback/imp/DefaultContextKeyCallbackEngine.java
+linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/utils/EngineTypeLabelCreator.java
+linkis-public-enhancements/linkis-pes-common/src/main/java/org/apache/linkis/cs/listener/ListenerBus/ContextAsyncListenerBus.java
+linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/LabelManagerMapper.xml
+linkis-engineconn-plugins/hbase/hbase-core/src/main/java/org/apache/linkis/manager/engineplugin/hbase/HBaseConnectionManager.java
+linkis-public-enhancements/linkis-instance-label-server/src/main/resources/mapper/common/InsLabelRelationMapper.xml
+linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/AESUtils.java
\ No newline at end of file
diff --git a/linkis-commons/linkis-common/pom.xml b/linkis-commons/linkis-common/pom.xml
index 425c0673d2c..6fe5c47f53c 100644
--- a/linkis-commons/linkis-common/pom.xml
+++ b/linkis-commons/linkis-common/pom.xml
@@ -150,7 +150,7 @@
com.github.oshi
oshi-core
- 6.2.1
+ 6.4.0
@@ -163,5 +163,17 @@
-
+
+
+ spark-3
+
+ 3.7.0-M11
+ 1.11.0-wds-spark3
+ 3.4.4
+ 2.12.17
+ 2.12
+ 2.14.2
+
+
+
diff --git a/linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/AESUtils.java b/linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/AESUtils.java
new file mode 100644
index 00000000000..969cb8718eb
--- /dev/null
+++ b/linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/AESUtils.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.common.utils;
+
+import org.apache.linkis.common.conf.CommonVars;
+import org.apache.linkis.common.exception.ErrorException;
+
+import org.apache.commons.net.util.Base64;
+
+import javax.crypto.Cipher;
+import javax.crypto.KeyGenerator;
+import javax.crypto.SecretKey;
+import javax.crypto.spec.SecretKeySpec;
+
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+
+/**
+ * @author cr949
+ * @description 字符串加密 生成xx位加密串
+ */
+public class AESUtils {
+
+ /** key 加密算法 */
+ private static final String KEY_ALGORITHM = "AES";
+
+ /** 固定值 */
+ private static final String SECRET_RANDOM = "SHA1PRNG";
+
+ /** 编码方式 */
+ public static final String ENCODING_TYPE = "UTF-8";
+
+ /** 默认的加密算法 */
+ private static final String DEFAULT_CIPHER_ALGORITHM = "AES/ECB/PKCS5Padding";
+
+ public static final String PASSWORD = "password";
+
+ public static final String IS_ENCRYPT = "isEncrypt";
+
+ public static final String DECRYPT = "0";
+
+ public static final String ENCRYPT = "1";
+
+ public static final CommonVars LINKIS_DATASOURCE_AES_KEY =
+ CommonVars.apply("linkis.datasource.aes.secretkey", "");
+
+ public static final CommonVars LINKIS_DATASOURCE_AES_SWITCH =
+ CommonVars.apply("linkis.datasource.aes.switch", false);
+
+ /**
+ * 加密
+ *
+ * @param content
+ * @param password
+ * @return
+ */
+ public static String encrypt(String content, String password) {
+ try {
+ // 创建密码器
+ Cipher cipher = Cipher.getInstance(DEFAULT_CIPHER_ALGORITHM);
+
+ byte[] byteContent = content.getBytes(ENCODING_TYPE);
+ // 初始化为加密模式的密码器
+ cipher.init(Cipher.ENCRYPT_MODE, getSecretKey(password));
+ // 加密
+ byte[] result = cipher.doFinal(byteContent);
+ // 通过Base64转码返回
+ return Base64.encodeBase64String(result).trim();
+ } catch (Exception e) {
+ throw new ErrorException(21304, "AES加密加密失败");
+ }
+ }
+
+ public static String encrypt(byte[] content, String password) {
+ try {
+ // 创建密码器
+ Cipher cipher = Cipher.getInstance(DEFAULT_CIPHER_ALGORITHM);
+ // 初始化为加密模式的密码器
+ cipher.init(Cipher.ENCRYPT_MODE, getSecretKey(password));
+ // 加密
+ byte[] result = cipher.doFinal(content);
+ // 通过Base64转码返回
+ return Base64.encodeBase64String(result).trim();
+ } catch (Exception e) {
+ throw new ErrorException(21304, "AES加密加密失败");
+ }
+ }
+
+ /**
+ * AES 解密操作
+ *
+ * @param content
+ * @param password
+ * @return
+ */
+ public static String decrypt(String content, String password) {
+ try {
+ // 实例化
+ Cipher cipher = Cipher.getInstance(DEFAULT_CIPHER_ALGORITHM);
+ // 使用密钥初始化,设置为解密模式
+ cipher.init(Cipher.DECRYPT_MODE, getSecretKey(password));
+ // 执行操作
+ byte[] result = cipher.doFinal(Base64.decodeBase64(content));
+ return new String(result, ENCODING_TYPE);
+ } catch (Exception e) {
+ throw new ErrorException(21304, "AES加密解密失败");
+ }
+ }
+
+ /**
+ * AES 解密操作
+ *
+ * @param content
+ * @param password
+ * @return
+ */
+ public static byte[] decrypt(byte[] content, String password) {
+ try {
+ // 实例化
+ Cipher cipher = Cipher.getInstance(DEFAULT_CIPHER_ALGORITHM);
+ // 使用密钥初始化,设置为解密模式
+ cipher.init(Cipher.DECRYPT_MODE, getSecretKey(password));
+ // 执行操作
+ return cipher.doFinal(Base64.decodeBase64(content));
+ } catch (Exception e) {
+ throw new ErrorException(21304, "AES加密解密失败");
+ }
+ }
+
+ /**
+ * 生成加密秘钥
+ *
+ * @return
+ */
+ private static SecretKeySpec getSecretKey(String password) {
+ // 返回生成指定算法密钥生成器的 KeyGenerator 对象
+ KeyGenerator kg;
+ try {
+ kg = KeyGenerator.getInstance(KEY_ALGORITHM);
+ SecureRandom secureRandom = SecureRandom.getInstance(SECRET_RANDOM);
+ secureRandom.setSeed(password.getBytes());
+ // AES 要求密钥长度为 128
+ kg.init(128, secureRandom);
+ // 生成一个密钥
+ SecretKey secretKey = kg.generateKey();
+ // 转换为AES专用密钥
+ return new SecretKeySpec(secretKey.getEncoded(), KEY_ALGORITHM);
+ } catch (NoSuchAlgorithmException e) {
+ throw new ErrorException(21304, "AES生成加密秘钥失败");
+ }
+ }
+
+ public static String isDecryptByConf(String password) {
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ // decrypt
+ password = AESUtils.decrypt(password, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ }
+ return password;
+ }
+}
diff --git a/linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/SHAUtils.java b/linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/SHAUtils.java
new file mode 100644
index 00000000000..fab5441ea43
--- /dev/null
+++ b/linkis-commons/linkis-common/src/main/java/org/apache/linkis/common/utils/SHAUtils.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.common.utils;
+
+import org.apache.linkis.common.conf.CommonVars;
+
+import org.apache.commons.lang3.StringUtils;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class SHAUtils {
+
+ public static final String DOCTOR_NONCE =
+ CommonVars.apply("linkis.doctor.signature.nonce", "").getValue();
+ public static final CommonVars DOCTOR_TOKEN =
+ CommonVars.apply("linkis.doctor.signature.token", "");
+
+ /**
+ * 对字符串加密,默认使用SHA-256
+ *
+ * @param strSrc 要加密的字符串
+ * @param encName 加密类型
+ * @return
+ * @throws UnsupportedEncodingException
+ */
+ public static String Encrypt(String strSrc, String encName) throws UnsupportedEncodingException {
+ MessageDigest md = null;
+ String strDes = null;
+ byte[] bt = strSrc.getBytes("utf-8");
+ try {
+ if (encName == null || encName.equals("")) {
+ encName = "SHA-256";
+ }
+ md = MessageDigest.getInstance(encName);
+ md.update(bt);
+ strDes = bytes2Hex(md.digest()); // to HexString
+ } catch (NoSuchAlgorithmException e) {
+ return null;
+ }
+ return strDes;
+ }
+
+ public static String bytes2Hex(byte[] bts) {
+ String des = "";
+ String tmp = null;
+ for (int i = 0; i < bts.length; i++) {
+ tmp = (Integer.toHexString(bts[i] & 0xFF));
+ if (tmp.length() == 1) {
+ des += "0";
+ }
+ des += tmp;
+ }
+ return des;
+ }
+
+ public static void main(String[] args) throws IOException {
+ if (StringUtils.isBlank(args[0])) {
+ throw new LinkageError("Invalid applicationId cannot be empty");
+ }
+ Map parms = new HashMap<>();
+ String timestampStr = String.valueOf(System.currentTimeMillis());
+ parms.put("applicationId", args[0]);
+ parms.put("app_id", args[1]);
+ parms.put("timestamp", timestampStr);
+ parms.put("nonce", DOCTOR_NONCE);
+ String token = args[2];
+ if (StringUtils.isNotBlank(token)) {
+ String signature =
+ Encrypt(
+ Encrypt(parms.get("app_id") + DOCTOR_NONCE + System.currentTimeMillis(), null)
+ + token,
+ null);
+ parms.put("signature", signature);
+ }
+ System.out.println(parms);
+ }
+}
diff --git a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/conf/Configuration.scala b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/conf/Configuration.scala
index 16cac1d2047..a2917041e86 100644
--- a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/conf/Configuration.scala
+++ b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/conf/Configuration.scala
@@ -17,7 +17,7 @@
package org.apache.linkis.common.conf
-import org.apache.linkis.common.utils.Logging
+import org.apache.linkis.common.utils.{Logging, RSAUtils}
import org.apache.commons.lang3.StringUtils
@@ -31,6 +31,8 @@ object Configuration extends Logging {
val IS_TEST_MODE = CommonVars("wds.linkis.test.mode", false)
+ val LINKIS_SYS_NAME = CommonVars("linkis.system.name", "")
+
val IS_PROMETHEUS_ENABLE = CommonVars("wds.linkis.prometheus.enable", false)
val IS_MULTIPLE_YARN_CLUSTER = CommonVars("linkis.multiple.yarn.cluster", false).getValue
@@ -55,6 +57,9 @@ object Configuration extends Logging {
val CLOUD_CONSOLE_VARIABLE_SPRING_APPLICATION_NAME =
CommonVars("wds.linkis.console.variable.application.name", "linkis-ps-publicservice")
+ val JOBHISTORY_SPRING_APPLICATION_NAME =
+ CommonVars("wds.linkis.jobhistory.application.name", "linkis-ps-jobhistory")
+
// read from env
val PREFER_IP_ADDRESS: Boolean = CommonVars(
"linkis.discovery.prefer-ip-address",
@@ -67,6 +72,9 @@ object Configuration extends Logging {
val JOB_HISTORY_DEPARTMENT_ADMIN = CommonVars("wds.linkis.jobhistory.department.admin", "hadoop")
+ val JOB_RESULT_DEPARTMENT_LIMIT =
+ CommonVars("linkis.jobhistory.result.limit.department", "")
+
// Only the specified token has permission to call some api
val GOVERNANCE_STATION_ADMIN_TOKEN_STARTWITH = "ADMIN-"
@@ -75,13 +83,26 @@ object Configuration extends Logging {
val IS_VIEW_FS_ENV = CommonVars("wds.linkis.env.is.viewfs", true)
+ val LINKIS_RSA_TOKEN_SWITCH = CommonVars("linkis.rsa.token.switch", false).getValue
+
+ val LINKIS_RSA_PUBLIC_KEY = CommonVars("linkis.rsa.public.key", "")
+
+ val LINKIS_RSA_PRIVATE_KEY = CommonVars("linkis.rsa.private.key", "")
+
val ERROR_MSG_TIP =
CommonVars(
"linkis.jobhistory.error.msg.tip",
"The request interface %s is abnormal. You can try to troubleshoot common problems in the knowledge base document"
)
- val LINKIS_TOKEN = CommonVars("wds.linkis.token", "")
+ val LINKIS_TOKEN = CommonVars("wds.linkis.token", "LINKIS-UNAVAILABLE-TOKEN")
+
+ val HDFS_HOUR_DIR_SWITCH = CommonVars("linkis.hdfs.hour.dir.switch", false).getValue
+
+ val LINKIS_KEYTAB_SWITCH: Boolean = CommonVars("linkis.keytab.switch", false).getValue
+
+ val METRICS_INCREMENTAL_UPDATE_ENABLE =
+ CommonVars[Boolean]("linkis.jobhistory.metrics.incremental.update.enable", false)
val GLOBAL_CONF_CHN_NAME = "全局设置"
@@ -97,7 +118,14 @@ object Configuration extends Logging {
if (StringUtils.isBlank(token)) {
false
} else {
- token.toUpperCase().startsWith(GOVERNANCE_STATION_ADMIN_TOKEN_STARTWITH)
+ if (Configuration.LINKIS_RSA_TOKEN_SWITCH && token.startsWith(RSAUtils.PREFIX)) {
+ RSAUtils
+ .dncryptWithLinkisPublicKey(token)
+ .toUpperCase()
+ .contains(GOVERNANCE_STATION_ADMIN_TOKEN_STARTWITH)
+ } else {
+ token.toUpperCase().contains(GOVERNANCE_STATION_ADMIN_TOKEN_STARTWITH)
+ }
}
}
@@ -159,4 +187,9 @@ object Configuration extends Logging {
case _ => creator
}
+ def canResultSetByDepartment(departmentId: String): Boolean = {
+ val jobResultLimit = JOB_RESULT_DEPARTMENT_LIMIT.getHotValue.split(",")
+ !jobResultLimit.exists(departmentId.equalsIgnoreCase)
+ }
+
}
diff --git a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtils.scala b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtils.scala
index e6e63a97798..9bbd3201186 100644
--- a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtils.scala
+++ b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtils.scala
@@ -33,7 +33,7 @@ object CodeAndRunTypeUtils {
*/
val CODE_TYPE_AND_RUN_TYPE_RELATION = CommonVars(
"linkis.codeType.language.relation",
- "sql=>sql|hql|jdbc|hive|psql|fql|tsql|nebula|ngql,python=>python|py|pyspark,java=>java,scala=>scala,shell=>sh|shell,json=>json|data_calc"
+ "sql=>sql|hql|jdbc|hive|psql|fql|tsql|nebula|ngql|aisql|starrocks,python=>python|py|pyspark|py3,java=>java,scala=>scala,shell=>sh|shell,json=>json|data_calc"
)
val LANGUAGE_TYPE_SQL = "sql"
@@ -48,6 +48,8 @@ object CodeAndRunTypeUtils {
val LANGUAGE_TYPE_JSON = "json"
+ val LANGUAGE_TYPE_AI_SQL = "aisql"
+
private var codeTypeAndLanguageTypeRelationMap: Map[String, List[String]] = null
/**
diff --git a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/LDAPUtils.scala b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/LDAPUtils.scala
index e021b9a482e..f298d5af5bc 100644
--- a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/LDAPUtils.scala
+++ b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/LDAPUtils.scala
@@ -46,7 +46,7 @@ object LDAPUtils extends Logging {
private val storeUser: Cache[String, String] = CacheBuilder
.newBuilder()
.maximumSize(1000)
- .expireAfterWrite(60, TimeUnit.MINUTES)
+ .expireAfterWrite(20, TimeUnit.MINUTES)
.removalListener(new RemovalListener[String, String] {
override def onRemoval(removalNotification: RemovalNotification[String, String]): Unit = {
diff --git a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/RSAUtils.scala b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/RSAUtils.scala
index bded200e244..4a34db89765 100644
--- a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/RSAUtils.scala
+++ b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/RSAUtils.scala
@@ -17,17 +17,23 @@
package org.apache.linkis.common.utils
+import org.apache.linkis.common.conf.Configuration
+
import org.apache.commons.codec.binary.Hex
import org.apache.commons.net.util.Base64
import javax.crypto.Cipher
+import java.net.URLDecoder
import java.nio.charset.StandardCharsets
-import java.security.{KeyPair, KeyPairGenerator, PrivateKey, PublicKey}
+import java.security.{KeyFactory, KeyPair, KeyPairGenerator, PrivateKey, PublicKey}
+import java.security.spec.{PKCS8EncodedKeySpec, X509EncodedKeySpec}
-object RSAUtils {
+object RSAUtils extends Logging {
private implicit val keyPair = genKeyPair(2048)
+ implicit val PREFIX = "{RSA}"
+
def genKeyPair(keyLength: Int): KeyPair = {
val keyPair = KeyPairGenerator.getInstance("RSA")
keyPair.initialize(keyLength)
@@ -64,4 +70,112 @@ object RSAUtils {
}
def decrypt(data: Array[Byte]): Array[Byte] = decrypt(data, keyPair.getPrivate)
+
+ /**
+ * 将字符串形式的公钥转换为 PublicKey 对象。
+ *
+ * @param publicKeyStr
+ * 公钥字符串,Base64 编码
+ * @return
+ * 转换后的 PublicKey 对象
+ */
+ def stringToPublicKey(publicKeyStr: String): PublicKey = {
+ val keyBytes = Base64.decodeBase64(publicKeyStr)
+ val keySpec = new X509EncodedKeySpec(keyBytes)
+ val keyFactory = KeyFactory.getInstance("RSA")
+ keyFactory.generatePublic(keySpec)
+ }
+
+ /**
+ * 将字符串形式的私钥转换为 PrivateKey 对象。
+ *
+ * @param privateKeyStr
+ * 私钥字符串,Base64 编码
+ * @return
+ * 转换后的 PrivateKey 对象
+ */
+ def stringToPrivateKey(privateKeyStr: String): PrivateKey = {
+ val keyBytes = Base64.decodeBase64(privateKeyStr)
+ val keySpec = new PKCS8EncodedKeySpec(keyBytes)
+ val keyFactory = KeyFactory.getInstance("RSA")
+ keyFactory.generatePrivate(keySpec)
+ }
+
+ /**
+ * 使用 Linkis 配置文件中的公钥对数据进行加密。
+ *
+ * @param data
+ * 需要加密的原始数据字符串
+ * @return
+ * 加密后的数据字符串,带有前缀
+ */
+ def encryptWithLinkisPublicKey(data: String): String = {
+ // 从配置文件中获取公钥和私钥字符串
+ val publicKey = Configuration.LINKIS_RSA_PUBLIC_KEY.getValue
+ val privateKey = Configuration.LINKIS_RSA_PRIVATE_KEY.getValue
+ // 将公钥和私钥字符串转换为 KeyPair 对象
+ val keyPair =
+ new KeyPair(RSAUtils.stringToPublicKey(publicKey), RSAUtils.stringToPrivateKey(privateKey))
+ // 使用公钥对数据进行加密
+ val encryptedData = RSAUtils.encrypt(data.getBytes, keyPair.getPublic)
+ // 将加密后的数据进行 Base64 编码,并添加前缀
+ val encodedEncryptedData =
+ PREFIX + new String(Base64.encodeBase64URLSafe(encryptedData))
+ encodedEncryptedData
+ }
+
+ /**
+ * 使用 Linkis 配置文件中的私钥对数据进行解密。
+ *
+ * @param data
+ * 需要解密的加密数据字符串,带有前缀
+ * @return
+ * 解密后的原始数据字符串
+ */
+ def dncryptWithLinkisPublicKey(data: String): String = {
+ // 从配置文件中获取公钥和私钥字符串
+ val publicKey = Configuration.LINKIS_RSA_PUBLIC_KEY.getValue
+ val privateKey = Configuration.LINKIS_RSA_PRIVATE_KEY.getValue
+ val decodedData = URLDecoder.decode(data, "UTF-8")
+ // 将公钥和私钥字符串转换为 KeyPair 对象
+ val keyPair =
+ new KeyPair(RSAUtils.stringToPublicKey(publicKey), RSAUtils.stringToPrivateKey(privateKey))
+ // 检查数据是否以指定前缀开头
+ if (decodedData.startsWith(PREFIX)) {
+ // 去掉前缀,获取加密数据部分
+ val dataSub = decodedData.substring(5)
+ // 将加密数据进行 Base64 解码
+ val decodedEncryptedData = Base64.decodeBase64(dataSub)
+ // 使用私钥对数据进行解密
+ val decryptedData = RSAUtils.decrypt(decodedEncryptedData, keyPair.getPrivate)
+ // 将解密后的数据转换为字符串
+ val decryptedString = new String(decryptedData)
+ decryptedString
+ } else {
+ logger.warn(s"token信息非$PREFIX 开头,不执行解密!")
+ data
+ }
+ }
+
+ /**
+ * 从给定的 token 中提取前半部分字符串。
+ *
+ * @param token
+ * 输入的完整 token 字符串。
+ * @return
+ * 提取的 token 后半部分字符串。
+ */
+ def tokenSubRule(token: String): String = {
+ val lowerToken = token.toLowerCase()
+ // 判断条件:
+ // 1. 以 "-auth" 结尾(不区分大小写)且长度 < 12
+ // 2. 或者长度 < 10
+ if ((lowerToken.endsWith("-auth") && lowerToken.length < 12) || lowerToken.length < 10) {
+ token // 不截取,原样返回
+ } else {
+ // 否则,取后半部分(原逻辑)
+ token.substring(token.length / 2, token.length)
+ }
+ }
+
}
diff --git a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/Utils.scala b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/Utils.scala
index deac2f24645..67dfc0971a6 100644
--- a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/Utils.scala
+++ b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/utils/Utils.scala
@@ -183,6 +183,15 @@ object Utils extends Logging {
): ExecutionContextExecutorService =
ExecutionContext.fromExecutorService(newCachedThreadPool(threadNum, threadName, isDaemon))
+ def newCachedExecutionContextWithExecutor(
+ threadNum: Int,
+ threadName: String,
+ isDaemon: Boolean = true
+ ): (ExecutionContextExecutorService, ThreadPoolExecutor) = {
+ val threadPool: ThreadPoolExecutor = newCachedThreadPool(threadNum, threadName, isDaemon)
+ (ExecutionContext.fromExecutorService(threadPool), threadPool)
+ }
+
def newFixedThreadPool(
threadNum: Int,
threadName: String,
diff --git a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/variable/DateTypeUtils.scala b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/variable/DateTypeUtils.scala
index df6dff865d7..ed97be83daf 100644
--- a/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/variable/DateTypeUtils.scala
+++ b/linkis-commons/linkis-common/src/main/scala/org/apache/linkis/common/variable/DateTypeUtils.scala
@@ -46,6 +46,10 @@ object DateTypeUtils {
override protected def initialValue = new SimpleDateFormat("yyyy-MM-dd HH")
}
+ val dateFormatSecondLocal = new ThreadLocal[SimpleDateFormat]() {
+ override protected def initialValue = new SimpleDateFormat("yyyyMMddHHmmss")
+ }
+
/**
* Get Today"s date
*
diff --git a/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/utils/SecurityUtilsTest.java b/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/utils/SecurityUtilsTest.java
index d1f2d5d123c..95b2c3f0e21 100644
--- a/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/utils/SecurityUtilsTest.java
+++ b/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/utils/SecurityUtilsTest.java
@@ -18,8 +18,11 @@
package org.apache.linkis.common.utils;
import org.apache.linkis.common.conf.BDPConfiguration;
+import org.apache.linkis.common.conf.Configuration;
import org.apache.linkis.common.exception.LinkisSecurityException;
+import org.apache.commons.lang3.StringUtils;
+
import java.util.HashMap;
import java.util.Map;
@@ -111,6 +114,18 @@ public void testGetUrl() {
Assertions.assertEquals(baseUrl + "?k1=v1&" + securityStr, SecurityUtils.getJdbcUrl(url3));
}
+ @Test
+ public void testRSA() {
+ String originalData = "rsa-test-str";
+ String pubKey = Configuration.LINKIS_RSA_PUBLIC_KEY().getValue();
+ String privKey = Configuration.LINKIS_RSA_PRIVATE_KEY().getValue();
+ if (StringUtils.isNotEmpty(pubKey) && StringUtils.isNotEmpty(privKey)) {
+ String encryptData = RSAUtils.encryptWithLinkisPublicKey(originalData);
+ String dncryptData = RSAUtils.dncryptWithLinkisPublicKey(encryptData);
+ Assertions.assertEquals(dncryptData, originalData);
+ }
+ }
+
@Test
public void testCheckJdbcConnParams() {
String host = "127.0.0.1";
diff --git a/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/variable/VariableOperationTest.java b/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/variable/VariableOperationTest.java
index 5d77cb323bc..51978feb05f 100644
--- a/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/variable/VariableOperationTest.java
+++ b/linkis-commons/linkis-common/src/test/java/org/apache/linkis/common/variable/VariableOperationTest.java
@@ -38,10 +38,10 @@ public class VariableOperationTest {
@Test
public void testSqlFormat() throws VariableOperationFailedException {
- String jsonOld = "select \n" + "\"&{yyyy-MM}\"";
+ String jsonOld = "select &{yyyy-MM}";
String jsonNew = VariableOperationUtils.replaces(zonedDateTime, jsonOld);
System.out.println(jsonNew);
- assertEquals(jsonNew, "select \n" + "\"2022-04\"");
+ assertEquals(jsonNew, "select 2022-04");
}
@Test
diff --git a/linkis-commons/linkis-common/src/test/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtilsTest.scala b/linkis-commons/linkis-common/src/test/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtilsTest.scala
index 32ce3b37bd1..8e466d814a0 100644
--- a/linkis-commons/linkis-common/src/test/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtilsTest.scala
+++ b/linkis-commons/linkis-common/src/test/scala/org/apache/linkis/common/utils/CodeAndRunTypeUtilsTest.scala
@@ -28,7 +28,7 @@ class CodeAndRunTypeUtilsTest {
val codeTypeAndRunTypeRelationMap = CodeAndRunTypeUtils.getCodeTypeAndLanguageTypeRelationMap
assertTrue(codeTypeAndRunTypeRelationMap.nonEmpty)
assertTrue(codeTypeAndRunTypeRelationMap.keySet.contains("sql"))
- assertEquals(3, codeTypeAndRunTypeRelationMap("python").size)
+ assertEquals(4, codeTypeAndRunTypeRelationMap("python").size)
}
@Test
diff --git a/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/conf/HadoopConf.scala b/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/conf/HadoopConf.scala
index 02e1762e2e8..1a75418dfc3 100644
--- a/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/conf/HadoopConf.scala
+++ b/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/conf/HadoopConf.scala
@@ -30,6 +30,8 @@ object HadoopConf {
val KEYTAB_FILE = CommonVars("wds.linkis.keytab.file", "/appcom/keytab/")
+ val LINKIS_KEYTAB_FILE = CommonVars("linkis.copy.keytab.file", "/mnt/bdap/keytab/")
+
val EXTERNAL_KEYTAB_FILE_PREFIX =
CommonVars("linkis.external.keytab.file.prefix", "/appcom/config/external-conf/keytab")
@@ -65,7 +67,7 @@ object HadoopConf {
CommonVars("linkis.hadoop.hdfs.cache.close.enable", true).getValue
val HDFS_ENABLE_NOT_CLOSE_USERS =
- CommonVars("linkis.hadoop.hdfs.cache.not.close.users", "").getValue
+ CommonVars("linkis.hadoop.hdfs.cache.not.close.users", "hadoop").getValue
val HDFS_ENABLE_CACHE_IDLE_TIME =
CommonVars("wds.linkis.hadoop.hdfs.cache.idle.time", 3 * 60 * 1000).getValue
diff --git a/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/utils/HDFSUtils.scala b/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/utils/HDFSUtils.scala
index d4b6af555ab..f6d91edbad2 100644
--- a/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/utils/HDFSUtils.scala
+++ b/linkis-commons/linkis-hadoop-common/src/main/scala/org/apache/linkis/hadoop/common/utils/HDFSUtils.scala
@@ -17,7 +17,8 @@
package org.apache.linkis.hadoop.common.utils
-import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.common.conf.Configuration.LINKIS_KEYTAB_SWITCH
+import org.apache.linkis.common.utils.{AESUtils, Logging, Utils}
import org.apache.linkis.hadoop.common.conf.HadoopConf
import org.apache.linkis.hadoop.common.conf.HadoopConf._
import org.apache.linkis.hadoop.common.entity.HDFSFileSystemContainer
@@ -29,8 +30,10 @@ import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.UserGroupInformation
import java.io.File
-import java.nio.file.Paths
+import java.nio.file.{Files, Paths}
+import java.nio.file.attribute.PosixFilePermissions
import java.security.PrivilegedExceptionAction
+import java.util.Base64
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import java.util.concurrent.atomic.AtomicLong
@@ -44,6 +47,7 @@ object HDFSUtils extends Logging {
private val LOCKER_SUFFIX = "_HDFS"
private val DEFAULT_CACHE_LABEL = "default"
private val JOINT = "_"
+ val KEYTAB_SUFFIX = ".keytab"
private val count = new AtomicLong
@@ -269,13 +273,13 @@ object HDFSUtils extends Logging {
def getUserGroupInformation(userName: String, label: String): UserGroupInformation = {
if (isKerberosEnabled(label)) {
if (!isKeytabProxyUserEnabled(label)) {
- val path = new File(getKeytabPath(label), userName + ".keytab").getPath
+ val path = getLinkisUserKeytabFile(userName, label)
val user = getKerberosUser(userName, label)
UserGroupInformation.setConfiguration(getConfigurationByLabel(userName, label))
UserGroupInformation.loginUserFromKeytabAndReturnUGI(user, path)
} else {
val superUser = getKeytabSuperUser(label)
- val path = new File(getKeytabPath(label), superUser + ".keytab").getPath
+ val path = getLinkisUserKeytabFile(superUser, label)
val user = getKerberosUser(superUser, label)
UserGroupInformation.setConfiguration(getConfigurationByLabel(superUser, label))
UserGroupInformation.createProxyUser(
@@ -340,6 +344,19 @@ object HDFSUtils extends Logging {
}
}
+ def getLinkisKeytabPath(label: String): String = {
+ if (label == null) {
+ LINKIS_KEYTAB_FILE.getValue
+ } else {
+ val prefix = if (EXTERNAL_KEYTAB_FILE_PREFIX.getValue.endsWith("/")) {
+ EXTERNAL_KEYTAB_FILE_PREFIX.getValue
+ } else {
+ EXTERNAL_KEYTAB_FILE_PREFIX.getValue + "/"
+ }
+ prefix + label
+ }
+ }
+
private def kerberosValueMapParser(configV: String): Map[String, String] = {
val confDelimiter = ","
if (configV == null || "".equals(configV)) {
@@ -363,4 +380,20 @@ object HDFSUtils extends Logging {
}
}
+ private def getLinkisUserKeytabFile(userName: String, label: String): String = {
+ val path = if (LINKIS_KEYTAB_SWITCH) {
+ // 读取文件
+ val byte = Files.readAllBytes(Paths.get(getLinkisKeytabPath(label), userName + KEYTAB_SUFFIX))
+ // 加密内容// 加密内容
+ val encryptedContent = AESUtils.decrypt(byte, AESUtils.PASSWORD)
+ val tempFile = Files.createTempFile(userName, KEYTAB_SUFFIX)
+ Files.setPosixFilePermissions(tempFile, PosixFilePermissions.fromString("rw-------"))
+ Files.write(tempFile, encryptedContent)
+ tempFile.toString
+ } else {
+ new File(getKeytabPath(label), userName + KEYTAB_SUFFIX).getPath
+ }
+ path
+ }
+
}
diff --git a/linkis-commons/linkis-httpclient/src/main/scala/org/apache/linkis/httpclient/AbstractHttpClient.scala b/linkis-commons/linkis-httpclient/src/main/scala/org/apache/linkis/httpclient/AbstractHttpClient.scala
index 5e425401020..84fb6320c5a 100644
--- a/linkis-commons/linkis-httpclient/src/main/scala/org/apache/linkis/httpclient/AbstractHttpClient.scala
+++ b/linkis-commons/linkis-httpclient/src/main/scala/org/apache/linkis/httpclient/AbstractHttpClient.scala
@@ -68,6 +68,7 @@ import org.apache.http.impl.client.{
HttpClientBuilder,
HttpClients
}
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager
import org.apache.http.message.BasicNameValuePair
import org.apache.http.ssl.SSLContextBuilder
import org.apache.http.util.EntityUtils
@@ -90,11 +91,14 @@ abstract class AbstractHttpClient(clientConfig: ClientConfig, clientName: String
protected val cookieStore = new BasicCookieStore
+ protected val connectionManager = new PoolingHttpClientConnectionManager
+
private val httpClientBuilder: HttpClientBuilder = HttpClients
.custom()
.setDefaultCookieStore(cookieStore)
.setMaxConnTotal(clientConfig.getMaxConnection)
.setMaxConnPerRoute(clientConfig.getMaxConnection / 2)
+ .setConnectionManager(connectionManager)
protected val httpClient: CloseableHttpClient = if (clientConfig.isSSL) {
val sslContext: SSLContext =
@@ -161,7 +165,6 @@ abstract class AbstractHttpClient(clientConfig: ClientConfig, clientName: String
val prepareReqTime = System.currentTimeMillis - startTime
prepareCookie(action)
val attempts = new util.ArrayList[Long]()
-
def addAttempt(): CloseableHttpResponse = {
val req = prepareReq(action)
val startTime = System.currentTimeMillis
@@ -602,4 +605,16 @@ abstract class AbstractHttpClient(clientConfig: ClientConfig, clientName: String
httpClient.close()
}
+ def getHttpConnectionStats: util.HashMap[String, Int] = {
+ val totalStats = connectionManager.getTotalStats
+ val clientConnectInfo = new util.HashMap[String, Int]()
+ clientConnectInfo.put("leased", totalStats.getLeased)
+ clientConnectInfo.put("avaiLabel", totalStats.getAvailable)
+ clientConnectInfo.put("maxTotal", connectionManager.getMaxTotal)
+ logger.info(s"BMLClient:总最大连接数:${connectionManager.getMaxTotal}")
+ logger.info(s"BMLClient:空闲连接数:${totalStats.getAvailable}")
+ logger.info(s"BMLClient:活跃连接数:${totalStats.getLeased}")
+ clientConnectInfo
+ }
+
}
diff --git a/linkis-commons/linkis-mybatis/src/main/java/org/apache/linkis/mybatis/conf/MybatisConfiguration.java b/linkis-commons/linkis-mybatis/src/main/java/org/apache/linkis/mybatis/conf/MybatisConfiguration.java
index d200ab2e095..2356e35b36f 100644
--- a/linkis-commons/linkis-mybatis/src/main/java/org/apache/linkis/mybatis/conf/MybatisConfiguration.java
+++ b/linkis-commons/linkis-mybatis/src/main/java/org/apache/linkis/mybatis/conf/MybatisConfiguration.java
@@ -45,7 +45,7 @@ public class MybatisConfiguration {
public static final CommonVars BDP_SERVER_MYBATIS_DATASOURCE_MAXACTIVE =
CommonVars.apply("wds.linkis.server.mybatis.datasource.maxActive", 20);
public static final CommonVars BDP_SERVER_MYBATIS_DATASOURCE_MAXWAIT =
- CommonVars.apply("wds.linkis.server.mybatis.datasource.maxWait", 6000);
+ CommonVars.apply("wds.linkis.server.mybatis.datasource.maxWait", 60000);
public static final CommonVars BDP_SERVER_MYBATIS_DATASOURCE_TBERM =
CommonVars.apply("wds.linkis.server.mybatis.datasource.timeBetweenEvictionRunsMillis", 60000);
public static final CommonVars BDP_SERVER_MYBATIS_DATASOURCE_MEITM =
diff --git a/linkis-commons/linkis-mybatis/src/test/java/org/apache/linkis/mybatis/conf/MybatisConfigurationTest.java b/linkis-commons/linkis-mybatis/src/test/java/org/apache/linkis/mybatis/conf/MybatisConfigurationTest.java
index 4aeddcdcc84..de7be27180d 100644
--- a/linkis-commons/linkis-mybatis/src/test/java/org/apache/linkis/mybatis/conf/MybatisConfigurationTest.java
+++ b/linkis-commons/linkis-mybatis/src/test/java/org/apache/linkis/mybatis/conf/MybatisConfigurationTest.java
@@ -66,7 +66,7 @@ public void constTest() {
Assertions.assertTrue(1 == bdpServerMybatisDatasourceInitialsize.intValue());
Assertions.assertTrue(1 == bdpServerMybatisDatasourceMinidle.intValue());
Assertions.assertTrue(20 == bdpServerMybatisDatasourceMaxactive.intValue());
- Assertions.assertTrue(6000 == bdpServerMybatisDatasourceMaxwait.intValue());
+ Assertions.assertTrue(60000 == bdpServerMybatisDatasourceMaxwait.intValue());
Assertions.assertTrue(60000 == bdpServerMybatisDatasourceTberm.intValue());
Assertions.assertTrue(300000 == bdpServerMybatisDatasourceMeitm.intValue());
Assertions.assertEquals("SELECT 1", bdpServerMybatisDatasourceValidationquery);
diff --git a/linkis-commons/linkis-protocol/src/main/java/org/apache/linkis/protocol/constants/TaskConstant.java b/linkis-commons/linkis-protocol/src/main/java/org/apache/linkis/protocol/constants/TaskConstant.java
index 48d9bb48463..4fe4ba7b57f 100644
--- a/linkis-commons/linkis-protocol/src/main/java/org/apache/linkis/protocol/constants/TaskConstant.java
+++ b/linkis-commons/linkis-protocol/src/main/java/org/apache/linkis/protocol/constants/TaskConstant.java
@@ -85,4 +85,6 @@ public interface TaskConstant {
String RECEIVER = "receiver";
String SUB_SYSTEM_ID = "subSystemId";
String EXTRA = "extra";
+ String ECM_INSTANCE = "ecmInstance";
+ String ENGINE_LOG_PATH = "engineLogPath";
}
diff --git a/linkis-commons/linkis-scheduler/src/main/java/org/apache/linkis/scheduler/util/SchedulerUtils.java b/linkis-commons/linkis-scheduler/src/main/java/org/apache/linkis/scheduler/util/SchedulerUtils.java
new file mode 100644
index 00000000000..62191aa20e4
--- /dev/null
+++ b/linkis-commons/linkis-scheduler/src/main/java/org/apache/linkis/scheduler/util/SchedulerUtils.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.scheduler.util;
+
+import org.apache.linkis.scheduler.conf.SchedulerConfiguration;
+
+import org.apache.commons.lang3.StringUtils;
+
+public class SchedulerUtils {
+ private static final String EVENT_ID_SPLIT = "_";
+ private static final String ALL_CREATORS = "ALL_CREATORS";
+ private static final String SPACIAL_USER_SPLIT = "_v_";
+
+ /**
+ * support priority queue with config username or creator
+ *
+ * @param groupName
+ * @return
+ */
+ public static boolean isSupportPriority(String groupName) {
+ String users = SchedulerConfiguration.SUPPORT_PRIORITY_TASK_USERS();
+ if (StringUtils.isEmpty(users)) {
+ return false;
+ }
+ String userName = getUserFromGroupName(groupName);
+ if (StringUtils.isEmpty(userName)) {
+ return false;
+ }
+ String creators = SchedulerConfiguration.SUPPORT_PRIORITY_TASK_CREATORS();
+ creators = creators.toLowerCase();
+ users = users.toLowerCase();
+ if (ALL_CREATORS.equalsIgnoreCase(creators)) {
+ return users.contains(userName.toLowerCase());
+ } else {
+ String creatorName = getCreatorFromGroupName(groupName);
+ return users.contains(userName.toLowerCase()) && creators.contains(creatorName.toLowerCase());
+ }
+ }
+
+ public static String getUserFromGroupName(String groupName) {
+ if (groupName.contains(SPACIAL_USER_SPLIT)) {
+ int vIndex = groupName.lastIndexOf(SPACIAL_USER_SPLIT);
+ int lastIndex = groupName.lastIndexOf(EVENT_ID_SPLIT);
+ String user = groupName.substring(vIndex + 1, lastIndex);
+ return user;
+ }
+ String[] groupNames = groupName.split(EVENT_ID_SPLIT);
+ String user = groupNames[groupNames.length - 2];
+ return user;
+ }
+
+ public static String getEngineTypeFromGroupName(String groupName) {
+ String[] groupNames = groupName.split(EVENT_ID_SPLIT);
+ String ecType = groupNames[groupNames.length - 1];
+ return ecType;
+ }
+
+ public static String getCreatorFromGroupName(String groupName) {
+ if (groupName.contains(SPACIAL_USER_SPLIT)) {
+ int vIndex = groupName.lastIndexOf(SPACIAL_USER_SPLIT);
+ String creatorName = groupName.substring(0, vIndex);
+ return creatorName;
+ }
+ int lastIndex = groupName.lastIndexOf(EVENT_ID_SPLIT);
+ int secondLastIndex = groupName.lastIndexOf(EVENT_ID_SPLIT, lastIndex - 1);
+ String creatorName = groupName.substring(0, secondLastIndex);
+ return creatorName;
+ }
+}
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/conf/SchedulerConfiguration.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/conf/SchedulerConfiguration.scala
index e3b76ac4e76..7af66d2e0c3 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/conf/SchedulerConfiguration.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/conf/SchedulerConfiguration.scala
@@ -21,6 +21,9 @@ import org.apache.linkis.common.conf.{CommonVars, TimeType}
object SchedulerConfiguration {
+ val PFIFO_SCHEDULER_STRATEGY = "pfifo"
+ val FIFO_SCHEDULER_STRATEGY = "fifo"
+
val FIFO_CONSUMER_AUTO_CLEAR_ENABLED =
CommonVars("wds.linkis.fifo.consumer.auto.clear.enabled", true)
@@ -36,4 +39,19 @@ object SchedulerConfiguration {
val MAX_GROUP_ALTER_WAITING_SIZE =
CommonVars("linkis.fifo.consumer.group.max.alter.waiting.size", 1000).getValue
+ // support fifo pfifo
+ val FIFO_QUEUE_STRATEGY =
+ CommonVars("linkis.fifo.queue.strategy", FIFO_SCHEDULER_STRATEGY).getValue
+
+ val SUPPORT_PRIORITY_TASK_USERS =
+ CommonVars("linkis.fifo.queue.support.priority.users", "").getValue
+
+ val SUPPORT_PRIORITY_TASK_CREATORS =
+ CommonVars("linkis.fifo.queue.support.priority.creators", "ALL_CREATORS").getValue
+
+ val MAX_PRIORITY_QUEUE_CACHE_SIZE =
+ CommonVars("linkis.fifo.priority.queue.max.cache.size", 1000).getValue
+
+ val ENGINE_PRIORITY_RUNTIME_KEY = "wds.linkis.engine.runtime.priority"
+
}
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/executer/ExecuteResponse.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/executer/ExecuteResponse.scala
index bcadf99d2c1..0f0e5c481d9 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/executer/ExecuteResponse.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/executer/ExecuteResponse.scala
@@ -31,6 +31,9 @@ case class AliasOutputExecuteResponse(alias: String, output: String) extends Out
case class ErrorExecuteResponse(message: String, t: Throwable) extends CompletedExecuteResponse
+case class ErrorRetryExecuteResponse(message: String, index: Int, t: Throwable)
+ extends ExecuteResponse
+
case class IncompleteExecuteResponse(message: String) extends ExecuteResponse
case class SubmitResponse(taskId: String) extends ExecuteResponse
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/listener/JobRetryListener.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/listener/JobRetryListener.scala
new file mode 100644
index 00000000000..4e95a4604b5
--- /dev/null
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/listener/JobRetryListener.scala
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.scheduler.listener
+
+import org.apache.linkis.scheduler.queue.Job
+
+import java.util
+
+trait JobRetryListener extends SchedulerListener {
+
+ def onJobFailed(
+ job: Job,
+ code: String,
+ props: util.Map[String, AnyRef],
+ errorCode: Int,
+ errorDesc: String
+ ): Boolean
+
+}
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/Job.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/Job.scala
index d513ecc050f..8034841b4ce 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/Job.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/Job.scala
@@ -22,8 +22,9 @@ import org.apache.linkis.common.listener.ListenerEventBus
import org.apache.linkis.common.log.LogUtils
import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.protocol.engine.JobProgressInfo
+import org.apache.linkis.scheduler.errorcode.LinkisSchedulerErrorCodeSummary.TASK_STATUS_FLIP_ERROR
import org.apache.linkis.scheduler.event._
-import org.apache.linkis.scheduler.exception.LinkisJobRetryException
+import org.apache.linkis.scheduler.exception.{LinkisJobRetryException, SchedulerErrorException}
import org.apache.linkis.scheduler.executer._
import org.apache.linkis.scheduler.future.BDPFuture
import org.apache.linkis.scheduler.listener._
@@ -33,6 +34,7 @@ import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.exception.ExceptionUtils
import java.io.Closeable
+import java.text.MessageFormat
import java.util.concurrent.Future
abstract class Job extends Runnable with SchedulerEvent with Closeable with Logging {
@@ -50,6 +52,7 @@ abstract class Job extends Runnable with SchedulerEvent with Closeable with Logg
private var executor: Executor = _
private var jobListener: Option[JobListener] = None
private var logListener: Option[LogListener] = None
+ private var jobRetryListener: Option[JobRetryListener] = None
private var progressListener: Option[ProgressListener] = None
private[linkis] var interrupt = false
private var progress: Float = 0f
@@ -152,6 +155,12 @@ abstract class Job extends Runnable with SchedulerEvent with Closeable with Logg
def getLogListener: Option[LogListener] = logListener
+ def setJobRetryListener(jobRetryListener: JobRetryListener): Unit = this.jobRetryListener = Some(
+ jobRetryListener
+ )
+
+ def getJobRetryListener: Option[JobRetryListener] = jobRetryListener
+
def setProgressListener(progressListener: ProgressListener): Unit = this.progressListener = Some(
progressListener
)
@@ -207,6 +216,18 @@ abstract class Job extends Runnable with SchedulerEvent with Closeable with Logg
jobListener.foreach(_.onJobCompleted(this))
}
+ protected def transitionWaitForRetry(): Unit = {
+ val state: SchedulerEventState = getState
+ if (state != Failed && state != Running) {
+ throw new SchedulerErrorException(
+ TASK_STATUS_FLIP_ERROR.getErrorCode,
+ MessageFormat.format(TASK_STATUS_FLIP_ERROR.getErrorDesc, state, WaitForRetry)
+ )
+ }
+ logger.info(s"$toString change status ${state} => ${WaitForRetry}.")
+ transition(WaitForRetry)
+ }
+
protected def transitionCompleted(executeCompleted: CompletedExecuteResponse): Unit = {
val state = getState
executeCompleted match {
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/PriorityLoopArrayQueue.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/PriorityLoopArrayQueue.scala
new file mode 100644
index 00000000000..fd3fecc71b5
--- /dev/null
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/PriorityLoopArrayQueue.scala
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.scheduler.queue
+
+import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.scheduler.conf.SchedulerConfiguration
+
+import java.util
+import java.util.Comparator
+import java.util.concurrent.PriorityBlockingQueue
+import java.util.concurrent.atomic.AtomicInteger
+import java.util.concurrent.locks.ReentrantReadWriteLock
+
+/**
+ * 优先级队列元素
+ * @param element
+ * 实际元素
+ * @param priority
+ * 优先级
+ * @param index
+ * 唯一索引
+ */
+case class PriorityQueueElement(element: Any, priority: Int, index: Int)
+
+/**
+ * 固定大小集合,元素满后会移除最先插入集合的元素
+ * @param maxSize
+ * 集合大小
+ * @tparam K
+ * @tparam V
+ */
+class FixedSizeCollection[K, V](val maxSize: Int) extends util.LinkedHashMap[K, V] {
+ // 当集合大小超过最大值时,返回true,自动删除最老的元素
+ protected override def removeEldestEntry(eldest: util.Map.Entry[K, V]): Boolean = size > maxSize
+}
+
+/**
+ * 优先级队列,优先级相同时先进先出
+ * @param group
+ */
+class PriorityLoopArrayQueue(var group: Group) extends ConsumeQueue with Logging {
+
+ private val maxCapacity = group.getMaximumCapacity
+
+ /** 优先级队列 */
+ private val priorityEventQueue = new PriorityBlockingQueue[PriorityQueueElement](
+ group.getMaximumCapacity,
+ new Comparator[PriorityQueueElement] {
+
+ override def compare(o1: PriorityQueueElement, o2: PriorityQueueElement): Int =
+ if (o1.priority != o2.priority) o2.priority - o1.priority
+ else o1.index - o2.index
+
+ }
+ )
+
+ /** 累加器 1.越先进队列值越小,优先级相同时控制先进先出 2.队列元素唯一索引,不会重复 */
+ private val index = new AtomicInteger
+
+ /** 记录队列中当前所有元素索引,元素存入优先级队列时添加,从优先级队列移除时删除 */
+ private val indexMap = new util.HashMap[Int, SchedulerEvent]()
+
+ /** 记录已经消费的元素,会有固定缓存大小,默认1000,元素从优先级队列移除时添加 */
+ private val fixedSizeCollection =
+ new FixedSizeCollection[Integer, SchedulerEvent](
+ SchedulerConfiguration.MAX_PRIORITY_QUEUE_CACHE_SIZE
+ )
+
+ private val rwLock = new ReentrantReadWriteLock
+
+ protected[this] var realSize = size
+ override def isEmpty: Boolean = size <= 0
+ override def isFull: Boolean = size >= maxCapacity
+ def size: Int = priorityEventQueue.size
+
+ /**
+ * 将元素添加进队列
+ * @param element
+ * @return
+ */
+ private def addToPriorityQueue(element: PriorityQueueElement): Boolean = {
+ priorityEventQueue.offer(element)
+ rwLock.writeLock.lock
+ Utils.tryFinally(indexMap.put(element.index, element.element.asInstanceOf[SchedulerEvent]))(
+ rwLock.writeLock.unlock()
+ )
+ true
+ }
+
+ /**
+ * 从队列中获取并移除元素
+ * @return
+ */
+ private def getAndRemoveTop: SchedulerEvent = {
+ val top: PriorityQueueElement = priorityEventQueue.take()
+ rwLock.writeLock.lock
+ Utils.tryFinally {
+ indexMap.remove(top.index)
+ fixedSizeCollection.put(top.index, top.element.asInstanceOf[SchedulerEvent])
+ }(rwLock.writeLock.unlock())
+ top.element.asInstanceOf[SchedulerEvent]
+ }
+
+ override def remove(event: SchedulerEvent): Unit = {
+ get(event).foreach(x => x.cancel())
+ }
+
+ override def getWaitingEvents: Array[SchedulerEvent] = {
+ toIndexedSeq
+ .filter(x =>
+ x.getState.equals(SchedulerEventState.Inited) || x.getState
+ .equals(SchedulerEventState.Scheduled)
+ )
+ .toArray
+ }
+
+ override def clearAll(): Unit = priorityEventQueue synchronized {
+ realSize = 0
+ index.set(0)
+ priorityEventQueue.clear()
+ fixedSizeCollection.clear()
+ indexMap.clear()
+ }
+
+ override def get(event: SchedulerEvent): Option[SchedulerEvent] = {
+ val eventSeq = toIndexedSeq.filter(x => x.getId.equals(event.getId)).seq
+ if (eventSeq.size > 0) Some(eventSeq(0)) else None
+ }
+
+ /**
+ * 根据索引获取队列元素
+ * @param index
+ * @return
+ */
+ override def get(index: Int): Option[SchedulerEvent] = {
+ if (!indexMap.containsKey(index) && !fixedSizeCollection.containsKey(index)) {
+ throw new IllegalArgumentException(
+ "The index " + index + " has already been deleted, now index must be better than " + index
+ )
+ }
+ rwLock.readLock().lock()
+ Utils.tryFinally {
+ if (fixedSizeCollection.get(index) != null) Option(fixedSizeCollection.get(index))
+ else Option(indexMap.get(index))
+ }(rwLock.readLock().unlock())
+ }
+
+ override def getGroup: Group = group
+
+ override def setGroup(group: Group): Unit = {
+ this.group = group
+ }
+
+ def toIndexedSeq: IndexedSeq[SchedulerEvent] = if (size == 0) {
+ IndexedSeq.empty[SchedulerEvent]
+ } else {
+ priorityEventQueue
+ .toArray()
+ .map(_.asInstanceOf[PriorityQueueElement].element.asInstanceOf[SchedulerEvent])
+ .toIndexedSeq
+ }
+
+ def add(event: SchedulerEvent): Int = {
+ // 每次添加的时候需要给计数器+1,优先级相同时,控制先进先出
+ event.setIndex(index.addAndGet(1))
+ addToPriorityQueue(PriorityQueueElement(event, event.getPriority, event.getIndex))
+ event.getIndex
+ }
+
+ override def waitingSize: Int = size
+
+ /**
+ * Add one, if the queue is full, it will block until the queue is
+ * available(添加一个,如果队列满了,将会一直阻塞,直到队列可用)
+ *
+ * @return
+ * Return index subscript(返回index下标)
+ */
+ override def put(event: SchedulerEvent): Int = {
+ add(event)
+ }
+
+ /**
+ * Add one, return None if the queue is full(添加一个,如果队列满了,返回None)
+ *
+ * @return
+ */
+ override def offer(event: SchedulerEvent): Option[Int] = {
+ if (isFull) None else Some(add(event))
+ }
+
+ /**
+ * Get the latest SchedulerEvent of a group, if it does not exist, it will block
+ * [ (获取某个group最新的SchedulerEvent,如果不存在,就一直阻塞 ) This method will move the pointer(该方法会移动指针)
+ *
+ * @return
+ */
+ override def take(): SchedulerEvent = {
+ getAndRemoveTop
+ }
+
+ /**
+ * Get the latest SchedulerEvent of a group, if it does not exist, block the maximum waiting
+ * time (获取某个group最新的SchedulerEvent,如果不存在,就阻塞到最大等待时间 ) This method will move the
+ * pointer(该方法会移动指针)
+ * @param mills
+ * Maximum waiting time(最大等待时间)
+ * @return
+ */
+ override def take(mills: Long): Option[SchedulerEvent] = {
+ if (waitingSize == 0) {
+ Thread.sleep(mills)
+ }
+ if (waitingSize == 0) None else Option(getAndRemoveTop)
+ }
+
+ /**
+ * Get the latest SchedulerEvent of a group and move the pointer to the next one. If not, return
+ * directly to None 获取某个group最新的SchedulerEvent,并移动指针到下一个。如果没有,直接返回None
+ *
+ * @return
+ */
+ override def poll(): Option[SchedulerEvent] = {
+ if (waitingSize == 0) None
+ else Option(getAndRemoveTop)
+ }
+
+ /**
+ * Only get the latest SchedulerEvent of a group, and do not move the pointer. If not, return
+ * directly to None 只获取某个group最新的SchedulerEvent,并不移动指针。如果没有,直接返回None
+ *
+ * @return
+ */
+ override def peek(): Option[SchedulerEvent] = {
+ val ele: PriorityQueueElement = priorityEventQueue.peek()
+ if (ele == null) None else Option(ele.element.asInstanceOf[SchedulerEvent])
+ }
+
+ /**
+ * Get the latest SchedulerEvent whose group satisfies the condition and does not move the
+ * pointer. If not, return directly to None 获取某个group满足条件的最新的SchedulerEvent,并不移动指针。如果没有,直接返回None
+ * @param op
+ * 满足的条件
+ * @return
+ */
+ override def peek(op: (SchedulerEvent) => Boolean): Option[SchedulerEvent] = {
+ val ele: PriorityQueueElement = priorityEventQueue.peek()
+ if (ele == null) return None
+ val event: Option[SchedulerEvent] = Option(
+ priorityEventQueue.peek().element.asInstanceOf[SchedulerEvent]
+ )
+ if (op(event.get)) event else None
+ }
+
+}
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/SchedulerEvent.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/SchedulerEvent.scala
index 4f384d23840..3e87a069305 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/SchedulerEvent.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/SchedulerEvent.scala
@@ -32,9 +32,13 @@ trait SchedulerEvent extends Logging {
protected var scheduledTime: Long = 0L
protected var startTime: Long = 0L
protected var endTime: Long = 0L
+ protected var priority: Int = 100
+ protected var index: Int = 0
def getEndTime: Long = endTime
def getStartTime: Long = startTime
+ def getPriority: Int = priority
+ def getIndex: Int = index
/*
* To be compatible with old versions.
@@ -50,6 +54,14 @@ trait SchedulerEvent extends Logging {
this synchronized notify()
}
+ def setPriority(priority: Int): Unit = {
+ this.priority = priority
+ }
+
+ def setIndex(index: Int): Unit = {
+ this.index = index
+ }
+
def turnToScheduled(): Boolean = if (!isWaiting) {
false
} else {
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOConsumerManager.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOConsumerManager.scala
index e95e172e066..02091e4f79e 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOConsumerManager.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOConsumerManager.scala
@@ -19,10 +19,11 @@ package org.apache.linkis.scheduler.queue.fifoqueue
import org.apache.linkis.common.utils.Utils
import org.apache.linkis.scheduler.SchedulerContext
+import org.apache.linkis.scheduler.conf.SchedulerConfiguration.FIFO_QUEUE_STRATEGY
import org.apache.linkis.scheduler.errorcode.LinkisSchedulerErrorCodeSummary._
import org.apache.linkis.scheduler.exception.SchedulerErrorException
import org.apache.linkis.scheduler.listener.ConsumerListener
-import org.apache.linkis.scheduler.queue.{Consumer, ConsumerManager, Group, LoopArrayQueue}
+import org.apache.linkis.scheduler.queue._
import java.text.MessageFormat
import java.util.concurrent.{ExecutorService, ThreadPoolExecutor}
@@ -34,7 +35,7 @@ class FIFOConsumerManager(groupName: String) extends ConsumerManager {
private var group: Group = _
private var executorService: ThreadPoolExecutor = _
private var consumerListener: ConsumerListener = _
- private var consumerQueue: LoopArrayQueue = _
+ private var consumerQueue: ConsumeQueue = _
private var consumer: Consumer = _
override def setSchedulerContext(schedulerContext: SchedulerContext): Unit = {
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOUserConsumer.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOUserConsumer.scala
index b4ffbfa4e3e..56be297c6b6 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOUserConsumer.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/fifoqueue/FIFOUserConsumer.scala
@@ -113,6 +113,7 @@ class FIFOUserConsumer(
val waitForRetryJobs = runningJobs.filter(job => job != null && job.isJobCanRetry)
waitForRetryJobs.find { job =>
isRetryJob = Utils.tryCatch(job.turnToRetry()) { t =>
+ logger.info("Job state flipped to Scheduled failed in Retry(Retry时,job状态翻转为Scheduled失败)!")
job.onFailure(
"Job state flipped to Scheduled failed in Retry(Retry时,job状态翻转为Scheduled失败)!",
t
@@ -151,6 +152,7 @@ class FIFOUserConsumer(
}
}
event.foreach { case job: Job =>
+ logger.info(s"event not empty ${job.getState} id: ${job.getId()}")
Utils.tryCatch {
val (totalDuration, askDuration) =
(fifoGroup.getMaxAskExecutorDuration, fifoGroup.getAskExecutorInterval)
diff --git a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/parallelqueue/ParallelConsumerManager.scala b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/parallelqueue/ParallelConsumerManager.scala
index c64158e6e8d..b079f120069 100644
--- a/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/parallelqueue/ParallelConsumerManager.scala
+++ b/linkis-commons/linkis-scheduler/src/main/scala/org/apache/linkis/scheduler/queue/parallelqueue/ParallelConsumerManager.scala
@@ -19,9 +19,14 @@ package org.apache.linkis.scheduler.queue.parallelqueue
import org.apache.linkis.common.utils.{ByteTimeUtils, Logging, Utils}
import org.apache.linkis.scheduler.conf.SchedulerConfiguration
+import org.apache.linkis.scheduler.conf.SchedulerConfiguration.{
+ FIFO_QUEUE_STRATEGY,
+ PFIFO_SCHEDULER_STRATEGY
+}
import org.apache.linkis.scheduler.listener.ConsumerListener
import org.apache.linkis.scheduler.queue._
import org.apache.linkis.scheduler.queue.fifoqueue.FIFOUserConsumer
+import org.apache.linkis.scheduler.util.SchedulerUtils.isSupportPriority
import java.util.concurrent.{ExecutorService, TimeUnit}
@@ -111,7 +116,16 @@ class ParallelConsumerManager(maxParallelismUsers: Int, schedulerName: String)
val newConsumer = createConsumer(groupName)
val group = getSchedulerContext.getOrCreateGroupFactory.getGroup(groupName)
newConsumer.setGroup(group)
- newConsumer.setConsumeQueue(new LoopArrayQueue(group))
+ val fifoQueueStrategy: String = FIFO_QUEUE_STRATEGY.toLowerCase()
+ // 需要判断人员是否是指定部门
+ val consumerQueue: ConsumeQueue =
+ if (
+ PFIFO_SCHEDULER_STRATEGY
+ .equals(fifoQueueStrategy) && isSupportPriority(groupName)
+ ) {
+ new PriorityLoopArrayQueue(group)
+ } else new LoopArrayQueue(group)
+ newConsumer.setConsumeQueue(consumerQueue)
consumerListener.foreach(_.onConsumerCreated(newConsumer))
newConsumer.start()
newConsumer
diff --git a/linkis-commons/linkis-scheduler/src/test/java/org/apache/linkis/scheduler/queue/PriorityLoopArrayQueueTest.java b/linkis-commons/linkis-scheduler/src/test/java/org/apache/linkis/scheduler/queue/PriorityLoopArrayQueueTest.java
new file mode 100644
index 00000000000..cd45a991f1a
--- /dev/null
+++ b/linkis-commons/linkis-scheduler/src/test/java/org/apache/linkis/scheduler/queue/PriorityLoopArrayQueueTest.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.scheduler.queue;
+
+import org.apache.linkis.scheduler.queue.fifoqueue.FIFOGroup;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import scala.Option;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+class PriorityLoopArrayQueueTest {
+ AtomicInteger productCounter = new AtomicInteger();
+ AtomicInteger consumerCounter = new AtomicInteger();
+
+ @Test
+ public void testConcurrentPutAndTake() throws Exception {
+ AtomicInteger counter = new AtomicInteger();
+ FIFOGroup group = new FIFOGroup("test", 5000, 5000);
+ PriorityLoopArrayQueue queue = new PriorityLoopArrayQueue(group);
+ boolean testFlag = false;
+
+ if (testFlag) {
+ // 获取开始时间的毫秒数
+ long startTime = System.currentTimeMillis();
+ // 10s的毫秒数
+ long threeMinutesInMillis = 10 * 1000;
+ int genLen = 50;
+ int getLen = 70;
+ final CountDownLatch latch = new CountDownLatch(genLen + getLen + 1);
+ // 5 个生产者
+ for (int i = 0; i < genLen; i++) {
+ final int id = i;
+ new Thread(
+ () -> {
+ try {
+ Thread.sleep(100 * id);
+ latch.countDown();
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ System.out.println(Thread.currentThread().getName() + "开始生产:");
+ while ((System.currentTimeMillis() - startTime) < threeMinutesInMillis) {
+ // 生产
+ try {
+ Thread.sleep(getRandom(200));
+ product(counter, queue);
+ product(counter, queue);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ // 消费
+ // consume(queue);
+ }
+ System.out.println(Thread.currentThread().getName() + "结束生产:");
+ },
+ "生产t-" + i)
+ .start();
+ }
+ // 5 个消费者
+ for (int i = 0; i < getLen; i++) {
+ final int id = i;
+ new Thread(
+ () -> {
+ try {
+ Thread.sleep(getRandom(200));
+ latch.countDown();
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ System.out.println(Thread.currentThread().getName() + "开始消费:");
+ while (true) {
+ try {
+ Thread.sleep(getRandom(200));
+ // 消费
+ consume(queue);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ },
+ "消费t-" + i)
+ .start();
+ }
+ new Thread(
+ () -> {
+ try {
+ Thread.sleep(100);
+ latch.countDown();
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ System.out.println(Thread.currentThread().getName() + "开始获取当前队列元素:");
+ while ((System.currentTimeMillis() - startTime) < threeMinutesInMillis * 2) {
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ System.out.println("生产大小:" + productCounter.get());
+ System.out.println("消费大小:" + consumerCounter.get());
+ System.out.println("队列当前大小:" + queue.size());
+ // 需要 去掉私有测试
+ // System.out.println("index size: " + queue.indexMap().size());
+ // System.out.println("cache size: " + queue.fixedSizeCollection().size());
+ }
+ })
+ .start();
+ Thread.sleep(threeMinutesInMillis * 2);
+ System.out.println(
+ "product:" + productCounter.get() + ", consumer: " + consumerCounter.get());
+ // 需要 去掉私有测试
+ // Assertions.assertEquals(1000, queue.fixedSizeCollection().size());
+ Assertions.assertEquals(productCounter.get(), consumerCounter.get());
+ }
+ }
+
+ // 消费
+ private void consume(PriorityLoopArrayQueue queue) {
+ SchedulerEvent take = null;
+ try {
+ take = queue.take();
+ consumerCounter.addAndGet(1);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ printEvent("消费", take);
+ }
+
+ // 生产
+ private void product(AtomicInteger counter, PriorityLoopArrayQueue queue) {
+ int i1 = counter.addAndGet(1);
+ // 1000-重要,100-普通,10-不重要
+ int[] proArr = {1000, 100, 10};
+ int priority = getRandom(3);
+ String name = "item-" + i1 + "-" + priority;
+ System.out.println("生产:" + name);
+ Option offer = queue.offer(getJob(name, proArr[priority]));
+ if (offer.nonEmpty()) {
+ productCounter.addAndGet(1);
+ Option schedulerEventOption = queue.get((int) offer.get());
+ printEvent("get:", schedulerEventOption.get());
+ } else {
+ System.out.println("当前队列已满,大小:" + queue.size());
+ }
+ }
+
+ @Test
+ void testFinally() {}
+
+ @Test
+ void enqueue() {
+ // 压测 offer take get
+ FIFOGroup group = new FIFOGroup("test", 100, 100);
+ PriorityLoopArrayQueue queue = new PriorityLoopArrayQueue(group);
+ Option idx = queue.offer(getJob("job1-1", 1));
+ // 插入测试
+ Assertions.assertEquals(1, (int) idx.get());
+ queue.offer(getJob("job2", 2));
+ queue.offer(getJob("job3", 3));
+ queue.offer(getJob("job1-2", 1));
+ queue.offer(getJob("job5", 5));
+ queue.offer(getJob("item1-3", 1));
+ queue.offer(getJob("item6-1", 6));
+ queue.offer(getJob("item4", 4));
+ queue.offer(getJob("item6-2", 6));
+ // peek 测试
+ Option peek = queue.peek();
+ Assertions.assertEquals("item6-1", peek.get().getId());
+ while (queue.size() > 1) {
+ queue.take();
+ }
+ SchedulerEvent event = queue.take();
+ // 优先级,以及先进先出测试
+ Assertions.assertEquals("item1-3", event.getId());
+ Assertions.assertEquals(1, event.priority());
+ Assertions.assertEquals(6, event.getIndex());
+ // 缓存测试,需要设置 linkis.fifo.priority.queue.max.cache.size 为 5
+ // Assertions.assertThrows(
+ // IllegalArgumentException.class,
+ // () -> {
+ // queue.get(7);
+ // });
+ }
+
+ private void printEvent(String opt, SchedulerEvent event) {
+ System.out.println(
+ "【"
+ + Thread.currentThread().getName()
+ + "】"
+ + opt
+ + ":"
+ + event.getId()
+ + ", priority: "
+ + event.getPriority()
+ + ", index: "
+ + event.getIndex());
+ }
+
+ private int getRandom(int bound) {
+ Random rand = new Random();
+ int res = rand.nextInt(bound);
+ return res;
+ }
+
+ private UserJob getJob(String name, int priority) {
+ UserJob job = new UserJob();
+ job.setId(name);
+ job.setPriority(priority);
+ return job;
+ }
+}
diff --git a/linkis-commons/linkis-scheduler/src/test/scala/org/apache/linkis/scheduler/queue/Test.scala b/linkis-commons/linkis-scheduler/src/test/scala/org/apache/linkis/scheduler/queue/Test.scala
new file mode 100644
index 00000000000..e8340994007
--- /dev/null
+++ b/linkis-commons/linkis-scheduler/src/test/scala/org/apache/linkis/scheduler/queue/Test.scala
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.scheduler.queue
+
+import java.util
+import java.util.{PriorityQueue, Queue}
+
+case class PriorityFIFOQueue() {
+ case class QueueItem(item: Queue[String], priority: Int)
+
+ import java.util.Comparator
+
+ val cNode: Comparator[QueueItem] = new Comparator[QueueItem]() {
+ override def compare(o1: QueueItem, o2: QueueItem): Int = o2.priority - o1.priority
+ }
+
+ private val queue = new PriorityQueue[QueueItem](cNode)
+ private var _size = 0
+ private var _count: Long = 0L
+
+ def size: Int = _size
+
+ def isEmpty: Boolean = _size == 0
+
+ def enqueue(item: String, priority: Int): Unit = {
+ val deque = new util.ArrayDeque[String]()
+ deque.add(item)
+ queue.add(QueueItem(deque, priority))
+ }
+
+}
diff --git a/linkis-commons/linkis-scheduler/src/test/scala/org/apache/linkis/scheduler/util/TestSchedulerUtils.scala b/linkis-commons/linkis-scheduler/src/test/scala/org/apache/linkis/scheduler/util/TestSchedulerUtils.scala
new file mode 100644
index 00000000000..48d7a689221
--- /dev/null
+++ b/linkis-commons/linkis-scheduler/src/test/scala/org/apache/linkis/scheduler/util/TestSchedulerUtils.scala
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.scheduler.util
+
+import org.apache.linkis.scheduler.util.SchedulerUtils.{
+ getCreatorFromGroupName,
+ getEngineTypeFromGroupName,
+ getUserFromGroupName,
+ isSupportPriority
+}
+
+import org.junit.jupiter.api.{Assertions, Test}
+
+class TestSchedulerUtils {
+
+ @Test
+ def testIsSupportPriority: Unit = {
+ // set linkis.fifo.queue.support.priority.users=hadoop
+ // set linkis.fifo.queue.support.priority.creators=IDE or ALL_CREATORS
+ val bool: Boolean = isSupportPriority("IdE_haDoop_hive")
+ Assertions.assertEquals(false, bool)
+ }
+
+ @Test
+ def testShellDangerCode: Unit = {
+ var groupName = "IDE_hadoop_hive"
+ var username: String = getUserFromGroupName(groupName)
+ var engineType: String = getEngineTypeFromGroupName(groupName)
+ var creator: String = getCreatorFromGroupName(groupName)
+ Assertions.assertEquals("hadoop", username)
+ Assertions.assertEquals("hive", engineType)
+ Assertions.assertEquals("IDE", creator)
+ groupName = "APP_TEST_v_hadoop_hive"
+ username = getUserFromGroupName(groupName)
+ engineType = getEngineTypeFromGroupName(groupName)
+ creator = getCreatorFromGroupName(groupName)
+ Assertions.assertEquals("v_hadoop", username)
+ Assertions.assertEquals("hive", engineType)
+ Assertions.assertEquals("APP_TEST", creator)
+
+ groupName = "TEST_v_hadoop_hive"
+ username = getUserFromGroupName(groupName)
+ engineType = getEngineTypeFromGroupName(groupName)
+ creator = getCreatorFromGroupName(groupName)
+ Assertions.assertEquals("v_hadoop", username)
+ Assertions.assertEquals("hive", engineType)
+ Assertions.assertEquals("TEST", creator)
+
+ groupName = "APP_TEST_hadoop_hive"
+ username = getUserFromGroupName(groupName)
+ engineType = getEngineTypeFromGroupName(groupName)
+ creator = getCreatorFromGroupName(groupName)
+ Assertions.assertEquals("hadoop", username)
+ Assertions.assertEquals("hive", engineType)
+ Assertions.assertEquals("APP_TEST", creator)
+ }
+
+}
diff --git a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/FileSystem.java b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/FileSystem.java
index ac828267bff..8a266fadf84 100644
--- a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/FileSystem.java
+++ b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/FileSystem.java
@@ -23,6 +23,8 @@
import java.io.File;
import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -53,7 +55,21 @@ public String getDefaultFolderPerm() {
public abstract long getLength(FsPath dest) throws IOException;
- public abstract String checkSum(FsPath dest) throws IOException;
+ public String getChecksumWithMD5(FsPath dest) throws IOException {
+ return null;
+ }
+
+ public String getChecksum(FsPath dest) throws IOException {
+ return null;
+ }
+
+ public long getBlockSize(FsPath dest) throws IOException {
+ return 0L;
+ }
+
+ public List getAllFilePaths(FsPath dest) throws IOException {
+ return Collections.emptyList();
+ }
public abstract boolean canExecute(FsPath dest) throws IOException;
diff --git a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/AzureBlobFileSystem.java b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/AzureBlobFileSystem.java
index 35473a535fa..f9cb73d93d1 100644
--- a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/AzureBlobFileSystem.java
+++ b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/AzureBlobFileSystem.java
@@ -393,7 +393,7 @@ public long getLength(FsPath dest) throws IOException {
}
@Override
- public String checkSum(FsPath dest) throws IOException {
+ public String getChecksum(FsPath dest) throws IOException {
return null;
}
diff --git a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/HDFSFileSystem.java b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/HDFSFileSystem.java
index 698cc7b12a7..2c095d102b6 100644
--- a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/HDFSFileSystem.java
+++ b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/HDFSFileSystem.java
@@ -42,6 +42,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -57,6 +58,13 @@ public class HDFSFileSystem extends FileSystem {
private static final Logger logger = LoggerFactory.getLogger(HDFSFileSystem.class);
+ private static final String LOCKER_SUFFIX = "refresh";
+
+ private static final int REFRESH_INTERVAL =
+ LinkisStorageConf.HDFS_FILE_SYSTEM_REFRESHE_INTERVAL() * 1000 * 60;
+
+ private static final ConcurrentHashMap lastCallTimes = new ConcurrentHashMap<>();
+
/** File System abstract method start */
@Override
public String listRoot() throws IOException {
@@ -328,9 +336,21 @@ public boolean exists(FsPath dest) throws IOException {
private void resetRootHdfs() {
if (fs != null) {
- synchronized (this) {
+ String locker = user + LOCKER_SUFFIX;
+ synchronized (locker.intern()) {
if (fs != null) {
if (HadoopConf.HDFS_ENABLE_CACHE()) {
+ long currentTime = System.currentTimeMillis();
+ Long lastCallTime = lastCallTimes.get(user);
+
+ if (lastCallTime != null && (currentTime - lastCallTime) < REFRESH_INTERVAL) {
+ logger.warn(
+ "Method call denied for username: {} Please wait for {} minutes.",
+ user,
+ REFRESH_INTERVAL / 60000);
+ return;
+ }
+ lastCallTimes.put(user, currentTime);
HDFSUtils.closeHDFSFIleSystem(fs, user, label, true);
} else {
HDFSUtils.closeHDFSFIleSystem(fs, user, label);
@@ -484,7 +504,7 @@ public long getLength(FsPath dest) throws IOException {
}
@Override
- public String checkSum(FsPath dest) throws IOException {
+ public String getChecksumWithMD5(FsPath dest) throws IOException {
String path = checkHDFSPath(dest.getPath());
if (!exists(dest)) {
throw new IOException("directory or file not exists: " + path);
@@ -493,4 +513,38 @@ public String checkSum(FsPath dest) throws IOException {
(MD5MD5CRC32FileChecksum) fs.getFileChecksum(new Path(path));
return fileChecksum.toString().split(":")[1];
}
+
+ @Override
+ public String getChecksum(FsPath dest) throws IOException {
+ String path = checkHDFSPath(dest.getPath());
+ if (!exists(dest)) {
+ throw new IOException("directory or file not exists: " + path);
+ }
+ FileChecksum fileChecksum = fs.getFileChecksum(new Path(path));
+ return fileChecksum.toString();
+ }
+
+ @Override
+ public long getBlockSize(FsPath dest) throws IOException {
+ String path = checkHDFSPath(dest.getPath());
+ if (!exists(dest)) {
+ throw new IOException("directory or file not exists: " + path);
+ }
+ return fs.getBlockSize(new Path(path));
+ }
+
+ @Override
+ public List getAllFilePaths(FsPath path) throws IOException {
+ FileStatus[] stat = fs.listStatus(new Path(checkHDFSPath(path.getPath())));
+ List fsPaths = new ArrayList<>();
+ for (FileStatus f : stat) {
+ FsPath fsPath = fillStorageFile(new FsPath(f.getPath().toUri().getPath()), f);
+ if (fs.isDirectory(f.getPath())) {
+ fsPaths.addAll(getAllFilePaths(fsPath));
+ } else {
+ fsPaths.add(fsPath);
+ }
+ }
+ return fsPaths;
+ }
}
diff --git a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/LocalFileSystem.java b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/LocalFileSystem.java
index a03a25950e8..1098a84fc40 100644
--- a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/LocalFileSystem.java
+++ b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/LocalFileSystem.java
@@ -506,7 +506,22 @@ public long getLength(FsPath dest) throws IOException {
}
@Override
- public String checkSum(FsPath dest) {
+ public String getChecksum(FsPath dest) {
return null;
}
+
+ @Override
+ public String getChecksumWithMD5(FsPath dest) {
+ return null;
+ }
+
+ @Override
+ public long getBlockSize(FsPath dest) {
+ return 0L;
+ }
+
+ @Override
+ public List getAllFilePaths(FsPath dest) {
+ return new ArrayList<>();
+ }
}
diff --git a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/OSSFileSystem.java b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/OSSFileSystem.java
index e2a2c81aea1..da036f64e91 100644
--- a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/OSSFileSystem.java
+++ b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/OSSFileSystem.java
@@ -85,11 +85,6 @@ public long getLength(FsPath dest) throws IOException {
return 0;
}
- @Override
- public String checkSum(FsPath dest) throws IOException {
- return null;
- }
-
@Override
public boolean canExecute(FsPath dest) throws IOException {
return true;
diff --git a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/S3FileSystem.java b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/S3FileSystem.java
index a9f00b60d55..e95dd36388f 100644
--- a/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/S3FileSystem.java
+++ b/linkis-commons/linkis-storage/src/main/java/org/apache/linkis/storage/fs/impl/S3FileSystem.java
@@ -441,11 +441,6 @@ public long getLength(FsPath dest) throws IOException {
.getContentLength();
}
- @Override
- public String checkSum(FsPath dest) throws IOException {
- return null;
- }
-
@Override
public boolean canExecute(FsPath dest) {
return true;
diff --git a/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/conf/LinkisStorageConf.scala b/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/conf/LinkisStorageConf.scala
index ace8509d4ad..50c60fecd2e 100644
--- a/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/conf/LinkisStorageConf.scala
+++ b/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/conf/LinkisStorageConf.scala
@@ -37,13 +37,18 @@ object LinkisStorageConf {
)
.getValue
+ val HDFS_FILE_SYSTEM_REFRESHE_INTERVAL: Int =
+ CommonVars
+ .apply("wds.linkis.hdfs.rest.interval", 10)
+ .getValue
+
val ROW_BYTE_MAX_LEN_STR = CommonVars("wds.linkis.resultset.row.max.str", "2m").getValue
val ROW_BYTE_MAX_LEN = ByteTimeUtils.byteStringAsBytes(ROW_BYTE_MAX_LEN_STR)
val FILE_TYPE = CommonVars(
"wds.linkis.storage.file.type",
- "dolphin,sql,scala,py,hql,python,out,log,text,sh,jdbc,ngql,psql,fql,tsql,txt"
+ "dolphin,sql,scala,py,py3,hql,python,out,log,text,sh,jdbc,ngql,psql,fql,tsql,txt,aisql"
).getValue
private var fileTypeArr: Array[String] = null
diff --git a/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/domain/MethodEntity.scala b/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/domain/MethodEntity.scala
index fac0a2d01b2..6da18d43fc6 100644
--- a/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/domain/MethodEntity.scala
+++ b/linkis-commons/linkis-storage/src/main/scala/org/apache/linkis/storage/domain/MethodEntity.scala
@@ -19,7 +19,7 @@ package org.apache.linkis.storage.domain
import java.lang.reflect.Type
-import com.google.gson.GsonBuilder
+import com.google.gson.{GsonBuilder, ToNumberPolicy}
/**
* @param id
@@ -56,7 +56,10 @@ case class MethodEntity(
object MethodEntitySerializer {
- val gson = new GsonBuilder().setDateFormat("yyyy-MM-dd'T'HH:mm:ssZ").create
+ val gson = new GsonBuilder()
+ .setDateFormat("yyyy-MM-dd'T'HH:mm:ssZ")
+ .setObjectToNumberStrategy(ToNumberPolicy.LAZILY_PARSED_NUMBER)
+ .create
/**
* Serialized to code as a MethodEntity object 序列化为code为MethodEntity对象
diff --git a/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/UJESClient.scala b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/UJESClient.scala
index 19ac7343d88..c72a74e2e67 100644
--- a/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/UJESClient.scala
+++ b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/UJESClient.scala
@@ -24,7 +24,7 @@ import org.apache.linkis.httpclient.dws.config.{DWSClientConfig, DWSClientConfig
import org.apache.linkis.httpclient.response.Result
import org.apache.linkis.ujes.client.request._
import org.apache.linkis.ujes.client.request.JobExecIdAction.JobServiceType
-import org.apache.linkis.ujes.client.response._
+import org.apache.linkis.ujes.client.response.{EncryptTokenResult, _}
import org.apache.linkis.ujes.client.utils.UJESClientUtils
import java.io.Closeable
@@ -183,6 +183,13 @@ abstract class UJESClient extends Closeable with Logging {
executeUJESJob(jobDeleteObserveAction).asInstanceOf[JobDeleteObserveResult]
}
+ def getEncryptToken(token: String, user: String): String = {
+ val encryptTokenResult = executeUJESJob(
+ EncryptTokenAction.newBuilder().setUser(user).setToken(token).build()
+ ).asInstanceOf[EncryptTokenResult]
+ encryptTokenResult.encryptToken
+ }
+
}
object UJESClient {
diff --git a/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/request/EncryptTokenAction.scala b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/request/EncryptTokenAction.scala
new file mode 100644
index 00000000000..71b9db3b57b
--- /dev/null
+++ b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/request/EncryptTokenAction.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.ujes.client.request
+
+import org.apache.linkis.httpclient.request.GetAction
+import org.apache.linkis.ujes.client.exception.UJESClientBuilderException
+
+import org.apache.commons.lang3.StringUtils
+
+class EncryptTokenAction extends GetAction with UJESJobAction {
+
+ override def suffixURLs: Array[String] =
+ Array("basedata-manager", "gateway-auth-token", "encrypt-token")
+
+}
+
+object EncryptTokenAction {
+ def newBuilder(): Builder = new Builder
+
+ class Builder private[EncryptTokenAction] () {
+ private var user: String = _
+ private var token: String = _
+
+ def setToken(token: String): Builder = {
+ this.token = token
+ this
+ }
+
+ def setUser(user: String): Builder = {
+ this.user = user
+ this
+ }
+
+ def build(): EncryptTokenAction = {
+ val EncryptTokenAction = new EncryptTokenAction
+ if (token == null) throw new UJESClientBuilderException("token is needed!")
+ if (StringUtils.isNotBlank(token)) EncryptTokenAction.setParameter("token", token)
+ if (StringUtils.isNotBlank(user)) EncryptTokenAction.setUser(user)
+ EncryptTokenAction
+ }
+
+ }
+
+}
diff --git a/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/EncryptTokenResult.scala b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/EncryptTokenResult.scala
new file mode 100644
index 00000000000..98e49247d31
--- /dev/null
+++ b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/EncryptTokenResult.scala
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.ujes.client.response
+
+import org.apache.linkis.httpclient.dws.annotation.DWSHttpMessageResult
+import org.apache.linkis.httpclient.dws.response.DWSResult
+
+import java.util
+
+import scala.beans.BeanProperty
+
+@DWSHttpMessageResult("/api/rest_j/v\\d+/basedata-manager/gateway-auth-token/encrypt-token")
+class EncryptTokenResult extends DWSResult {
+
+ @BeanProperty
+ var encryptToken: String = _
+
+}
diff --git a/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/JobInfoResult.scala b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/JobInfoResult.scala
index 6cb5ce08b2b..d16db5f65cb 100644
--- a/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/JobInfoResult.scala
+++ b/linkis-computation-governance/linkis-client/linkis-computation-client/src/main/scala/org/apache/linkis/ujes/client/response/JobInfoResult.scala
@@ -83,6 +83,8 @@ class JobInfoResult extends DWSResult with UserAction with Status {
case resultSetList: ResultSetListResult => resultSetList.getResultSetList
}
val numberRegex: Regex = """(\d+)""".r
+ // There are compatibility issues under Windows, which can be resolved through this method.
+ // fileName.split(java.util.regex.Pattern.quote(File.separator)).last
return resultSetList.sortBy { fileName =>
numberRegex.findFirstIn(fileName.split(File.separator).last).getOrElse("0").toInt
}
diff --git a/linkis-computation-governance/linkis-client/linkis-computation-client/src/test/java/org/apache/linkis/ujes/client/response/JobInfoResultTest.java b/linkis-computation-governance/linkis-client/linkis-computation-client/src/test/java/org/apache/linkis/ujes/client/response/JobInfoResultTest.java
new file mode 100644
index 00000000000..b9e662a33ec
--- /dev/null
+++ b/linkis-computation-governance/linkis-client/linkis-computation-client/src/test/java/org/apache/linkis/ujes/client/response/JobInfoResultTest.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.ujes.client.response;
+
+import org.apache.linkis.governance.common.entity.task.RequestPersistTask;
+import org.apache.linkis.ujes.client.UJESClient;
+
+import org.assertj.core.util.Lists;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.ArgumentMatchers.any;
+
+class JobInfoResultTest {
+
+ /** verify single path returns check point 1: return one path */
+ @Test
+ void shouldReturnResultSetWithOrder() {
+ String[] toBeReturned = new String[] {"hdfs://hdfs/path/test/mockFile_1.dolphi"};
+ String[] setList = getResultSetList(toBeReturned);
+ assertEquals(1, setList.length);
+ assertEquals("hdfs://hdfs/path/test/mockFile_1.dolphi", setList[0]);
+ }
+
+ /** verify empty path set check point 1: return empty path */
+ @Test
+ void shouldReturnEmptyResultSet() {
+ String[] toBeReturned = new String[] {};
+ String[] setList = getResultSetList(toBeReturned);
+ assertEquals(0, setList.length);
+ }
+
+ /**
+ * verify multiple result set, sorted by file name with numbers check point 1: sort asc check
+ * point 2: sort by number, not ascii
+ */
+ @Test
+ void shouldReturnMultiResultSetWithOrder() {
+ boolean isWindows = System.getProperty("os.name").toLowerCase().startsWith("windows");
+ if (!isWindows) {
+ String[] toBeReturned =
+ new String[] {
+ "/path/to/xxxx_1.txt",
+ "/some/path/xxxx_10.txt",
+ "/another/path/xxxx_0.txt",
+ "/another/path/xxxx_2.txt",
+ "/yet/another/path/xxxx_3.txt",
+ };
+ String[] setList = getResultSetList(toBeReturned);
+ assertIterableEquals(
+ Lists.newArrayList(
+ "/another/path/xxxx_0.txt",
+ "/path/to/xxxx_1.txt",
+ "/another/path/xxxx_2.txt",
+ "/yet/another/path/xxxx_3.txt",
+ "/some/path/xxxx_10.txt"),
+ Lists.newArrayList(setList));
+ }
+ }
+
+ private static String[] getResultSetList(String[] toBeReturned) {
+ JobInfoResult jobInfoResult = Mockito.spy(new JobInfoResult());
+
+ UJESClient ujesClient = Mockito.mock(UJESClient.class);
+ Mockito.doReturn("Succeed").when(jobInfoResult).getJobStatus();
+ RequestPersistTask persistTask = new RequestPersistTask();
+ persistTask.setUmUser("test");
+ persistTask.setResultLocation("mockPath");
+ Mockito.doReturn(persistTask).when(jobInfoResult).getRequestPersistTask();
+
+ ResultSetListResult t = Mockito.spy(new ResultSetListResult());
+ Mockito.when(ujesClient.executeUJESJob(any())).thenReturn(t);
+ Mockito.doReturn(toBeReturned).when(t).getResultSetList();
+
+ return jobInfoResult.getResultSetList(ujesClient);
+ }
+}
diff --git a/linkis-computation-governance/linkis-computation-governance-common/src/main/java/org/apache/linkis/governance/common/entity/job/JobAiRequest.java b/linkis-computation-governance/linkis-computation-governance-common/src/main/java/org/apache/linkis/governance/common/entity/job/JobAiRequest.java
new file mode 100644
index 00000000000..9cec73ea7b6
--- /dev/null
+++ b/linkis-computation-governance/linkis-computation-governance-common/src/main/java/org/apache/linkis/governance/common/entity/job/JobAiRequest.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.governance.common.entity.job;
+
+import org.apache.linkis.manager.label.entity.Label;
+
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** linkis_ps_ai_job_history */
+public class JobAiRequest {
+
+ private Long id;
+ private String jobHistoryId;
+ private String submitUser;
+ private String executeUser;
+ private String submitCode;
+ private String executionCode;
+ private Map metrics = new HashMap<>();
+ private Map params;
+ private List> labels;
+ private Integer errorCode;
+ private String errorDesc;
+ private String engineInstances;
+ private String engineType;
+ private Date changeTime;
+ private Date createdTime;
+ private Date updatedTime;
+
+ public Long getId() {
+ return id;
+ }
+
+ public void setId(Long id) {
+ this.id = id;
+ }
+
+ public String getJobHistoryId() {
+ return jobHistoryId;
+ }
+
+ public void setJobHistoryId(String jobHistoryId) {
+ this.jobHistoryId = jobHistoryId;
+ }
+
+ public String getSubmitUser() {
+ return submitUser;
+ }
+
+ public void setSubmitUser(String submitUser) {
+ this.submitUser = submitUser;
+ }
+
+ public String getExecuteUser() {
+ return executeUser;
+ }
+
+ public void setExecuteUser(String executeUser) {
+ this.executeUser = executeUser;
+ }
+
+ public String getSubmitCode() {
+ return submitCode;
+ }
+
+ public void setSubmitCode(String submitCode) {
+ this.submitCode = submitCode;
+ }
+
+ public String getExecutionCode() {
+ return executionCode;
+ }
+
+ public void setExecutionCode(String executionCode) {
+ this.executionCode = executionCode;
+ }
+
+ public Map getMetrics() {
+ return metrics;
+ }
+
+ public void setMetrics(Map metrics) {
+ this.metrics = metrics;
+ }
+
+ public Map getParams() {
+ return params;
+ }
+
+ public void setParams(Map params) {
+ this.params = params;
+ }
+
+ public List> getLabels() {
+ return labels;
+ }
+
+ public void setLabels(List> labels) {
+ this.labels = labels;
+ }
+
+ public Integer getErrorCode() {
+ return errorCode;
+ }
+
+ public void setErrorCode(Integer errorCode) {
+ this.errorCode = errorCode;
+ }
+
+ public String getErrorDesc() {
+ return errorDesc;
+ }
+
+ public void setErrorDesc(String errorDesc) {
+ this.errorDesc = errorDesc;
+ }
+
+ public String getEngineInstances() {
+ return engineInstances;
+ }
+
+ public void setEngineInstances(String engineInstances) {
+ this.engineInstances = engineInstances;
+ }
+
+ public String getEngineType() {
+ return engineType;
+ }
+
+ public void setEngineType(String engineType) {
+ this.engineType = engineType;
+ }
+
+ public Date getChangeTime() {
+ return changeTime;
+ }
+
+ public void setChangeTime(Date changeTime) {
+ this.changeTime = changeTime;
+ }
+
+ public Date getCreatedTime() {
+ return createdTime;
+ }
+
+ public void setCreatedTime(Date createdTime) {
+ this.createdTime = createdTime;
+ }
+
+ public Date getUpdatedTime() {
+ return updatedTime;
+ }
+
+ public void setUpdatedTime(Date updatedTime) {
+ this.updatedTime = updatedTime;
+ }
+
+ @Override
+ public String toString() {
+ return "JobAiRequest{"
+ + "id="
+ + id
+ + ", jobHistoryId='"
+ + jobHistoryId
+ + '\''
+ + ", submitUser='"
+ + submitUser
+ + '\''
+ + ", executeUser='"
+ + executeUser
+ + '\''
+ + ", labels="
+ + labels
+ + ", params="
+ + params
+ + '}';
+ }
+}
diff --git a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/conf/TenantConf.scala b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/conf/TenantConf.scala
index 948501e7720..17393bd04c0 100644
--- a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/conf/TenantConf.scala
+++ b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/conf/TenantConf.scala
@@ -25,11 +25,13 @@ case class TenantRequest(user: String, creator: String) extends TenantConf
case class TenantResponse(user: String, creator: String, isValid: String, tenant: String)
-case class DepartTenantRequest(creator: String, departmentId: String) extends TenantConf
+case class DepartTenantRequest(creator: String, departmentId: String, departmentName: String)
+ extends TenantConf
case class DepartTenantResponse(
creator: String,
departmentId: String,
+ departmentName: String,
isValid: String,
tenant: String
)
diff --git a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/job/JobReqProcotol.scala b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/job/JobReqProcotol.scala
index 829a967aab5..9635aaf0c2c 100644
--- a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/job/JobReqProcotol.scala
+++ b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/job/JobReqProcotol.scala
@@ -17,7 +17,7 @@
package org.apache.linkis.governance.common.protocol.job
-import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.governance.common.entity.job.{JobAiRequest, JobRequest}
import org.apache.linkis.protocol.RetryableProtocol
import org.apache.linkis.protocol.message.RequestProtocol
@@ -38,6 +38,8 @@ case class JobReqQuery(jobReq: JobRequest) extends JobReq
case class JobReqReadAll(jobReq: JobRequest) extends JobReq
+case class JobAiReqInsert(jobReq: JobAiRequest) extends JobReq
+
class RequestOneJob extends JobReq {
@BeanProperty
diff --git a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/task/ResponseTaskExecute.scala b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/task/ResponseTaskExecute.scala
index f59761dc436..95f1a542ac6 100644
--- a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/task/ResponseTaskExecute.scala
+++ b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/protocol/task/ResponseTaskExecute.scala
@@ -49,6 +49,15 @@ case class ResponseTaskStatus(execId: String, status: ExecutionNodeStatus)
extends RetryableProtocol
with RequestProtocol
+class ResponseTaskStatusWithExecuteCodeIndex(
+ execId: String,
+ status: ExecutionNodeStatus,
+ private var _errorIndex: Int = -1
+) extends ResponseTaskStatus(execId, status) {
+ def errorIndex: Int = _errorIndex
+ def errorIndex_=(value: Int): Unit = _errorIndex = value
+}
+
case class ResponseTaskResultSet(execId: String, output: String, alias: String)
extends RetryableProtocol
with RequestProtocol {
diff --git a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/utils/GovernanceUtils.scala b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/utils/GovernanceUtils.scala
index 63c963d2d3d..3da093558fd 100644
--- a/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/utils/GovernanceUtils.scala
+++ b/linkis-computation-governance/linkis-computation-governance-common/src/main/scala/org/apache/linkis/governance/common/utils/GovernanceUtils.scala
@@ -26,7 +26,7 @@ import org.apache.commons.lang3.StringUtils
import java.io.File
import java.text.SimpleDateFormat
import java.util
-import java.util.{ArrayList, Date, List}
+import java.util.Date
object GovernanceUtils extends Logging {
@@ -132,7 +132,7 @@ object GovernanceUtils extends Logging {
* @return
*/
def getResultParentPath(creator: String): String = {
- val resPrefix = GovernanceCommonConf.RESULT_SET_STORE_PATH.getValue
+ val resPrefix = GovernanceCommonConf.DEFAULT_LOGPATH_PREFIX
val resStb = new StringBuilder()
if (resStb.endsWith("/")) {
resStb.append(resPrefix)
@@ -140,10 +140,29 @@ object GovernanceUtils extends Logging {
resStb.append(resPrefix).append("/")
}
val dateFormat = new SimpleDateFormat("yyyy-MM-dd")
+ val hourFormat = new SimpleDateFormat("HH") // 新增:24小时制
val date = new Date(System.currentTimeMillis)
val dateString = dateFormat.format(date)
- resStb.append("result").append("/").append(dateString).append("/").append(creator)
- resStb.toString()
+ val hourString = hourFormat.format(date) // 新增:当前小时(如 "08", "14")
+ if (Configuration.HDFS_HOUR_DIR_SWITCH) {
+ resStb
+ .append("result")
+ .append("/")
+ .append(dateString)
+ .append("/")
+ .append(hourString)
+ .append("/") // 新增:小时层级
+ .append(creator)
+ .toString()
+ } else {
+ resStb
+ .append("result")
+ .append("/")
+ .append(dateString)
+ .append("/")
+ .append(creator)
+ .toString()
+ }
}
}
diff --git a/linkis-computation-governance/linkis-engineconn-manager/linkis-engineconn-manager-core/src/main/scala/org/apache/linkis/ecm/core/launch/ProcessEngineConnLaunch.scala b/linkis-computation-governance/linkis-engineconn-manager/linkis-engineconn-manager-core/src/main/scala/org/apache/linkis/ecm/core/launch/ProcessEngineConnLaunch.scala
index ba914dbbbaa..78891f62fd4 100644
--- a/linkis-computation-governance/linkis-engineconn-manager/linkis-engineconn-manager-core/src/main/scala/org/apache/linkis/ecm/core/launch/ProcessEngineConnLaunch.scala
+++ b/linkis-computation-governance/linkis-engineconn-manager/linkis-engineconn-manager-core/src/main/scala/org/apache/linkis/ecm/core/launch/ProcessEngineConnLaunch.scala
@@ -44,6 +44,8 @@ import org.apache.linkis.manager.engineplugin.common.launch.process.{
}
import org.apache.linkis.manager.engineplugin.common.launch.process.Environment._
import org.apache.linkis.manager.engineplugin.common.launch.process.LaunchConstants._
+import org.apache.linkis.manager.label.conf.LabelCommonConfig
+import org.apache.linkis.manager.label.entity.engine.EngineType
import org.apache.linkis.manager.label.utils.LabelUtil
import org.apache.commons.io.FileUtils
@@ -270,7 +272,31 @@ trait ProcessEngineConnLaunch extends EngineConnLaunch with Logging {
)
)
- val engineConnEnvKeys = request.environment.remove(ENGINECONN_ENVKEYS.toString)
+ var engineConnEnvKeys = request.environment.remove(ENGINECONN_ENVKEYS.toString)
+ // 处理spark环境问题,兼容spark切换spark-cmd后spark2能正常使用
+ val engineTypeLabel = LabelUtil.getEngineTypeLabel(request.labels)
+ if (engineTypeLabel.getEngineType.equals(EngineType.SPARK.toString)) {
+ val (sparkHome, sparkCmd, sparkEnginePath, sparkConfig) =
+ if (engineTypeLabel.getVersion.contains(LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue)) {
+ (
+ LabelCommonConfig.SPARK3_ENGINE_HOME.getValue,
+ LabelCommonConfig.SPARK3_ENGINE_CMD.getValue,
+ LabelCommonConfig.SPARK3_ENGINE_PATH.getValue,
+ LabelCommonConfig.SPARK3_ENGINE_CONFIG.getValue
+ )
+ } else {
+ (
+ LabelCommonConfig.SPARK_ENGINE_HOME.getValue,
+ LabelCommonConfig.SPARK_ENGINE_CMD.getValue,
+ LabelCommonConfig.SPARK_ENGINE_PATH.getValue,
+ LabelCommonConfig.SPARK_ENGINE_CONFIG.getValue
+ )
+ }
+ processBuilder.setEnv(LabelCommonConfig.SPARK_ENGINE_HOME_CONF, sparkHome)
+ processBuilder.setEnv(LabelCommonConfig.SPARK_ENGINE_CMD_CONF, sparkCmd)
+ processBuilder.setEnv(LabelCommonConfig.SPARK_ENGINE_PATH_CONF, sparkEnginePath)
+ processBuilder.setEnv(LabelCommonConfig.SPARK_ENGINE_CONF_DIR, sparkConfig)
+ }
logger.debug(s"ENGINECONN_ENVKEYS: " + engineConnEnvKeys)
// set other env
val engineConnEnvKeyArray = engineConnEnvKeys.split(",")
diff --git a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/conf/ComputationExecutorConf.scala b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/conf/ComputationExecutorConf.scala
index a3f7bb49e32..a6c055d4ecb 100644
--- a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/conf/ComputationExecutorConf.scala
+++ b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/conf/ComputationExecutorConf.scala
@@ -35,7 +35,7 @@ object ComputationExecutorConf {
val PRINT_TASK_PARAMS_SKIP_KEYS = CommonVars(
"linkis.engineconn.print.task.params.skip.keys",
- "jobId",
+ "jobId,wds.linkis.rm.yarnqueue",
"skip to print params key at job logs"
)
@@ -136,4 +136,23 @@ object ComputationExecutorConf {
val CLOSE_RS_OUTPUT_WHEN_RESET_BY_DEFAULT_ENABLED =
CommonVars("linkis.ec.rs.close.when.reset.enabled", true).getValue
+ val SPECIAL_UDF_CHECK_ENABLED =
+ CommonVars("linkis.ec.spacial.udf.check.enabled", false)
+
+ val SPECIAL_UDF_CHECK_BY_REGEX_ENABLED =
+ CommonVars("linkis.ec.spacial.udf.check.by.regex.enabled", false)
+
+ val SPECIAL_UDF_NAMES =
+ CommonVars("linkis.ec.spacial.udf.check.names", "")
+
+ val SUPPORT_SPECIAL_UDF_LANGUAGES =
+ CommonVars("linkis.ec.support.spacial.udf.languages", "sql,python")
+
+ val ONLY_SQL_USE_UDF_KEY = "load.only.sql.use.udf"
+
+ val CODE_TYPE = "codeType"
+
+ val SUPPORT_PARTIAL_RETRY_FOR_FAILED_TASKS_ENABLED: Boolean =
+ CommonVars[Boolean]("linkis.partial.retry.for.failed.task.enabled", false).getValue
+
}
diff --git a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/ComputationExecutor.scala b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/ComputationExecutor.scala
index 592d225a344..ee48b133096 100644
--- a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/ComputationExecutor.scala
+++ b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/ComputationExecutor.scala
@@ -21,7 +21,6 @@ import org.apache.linkis.DataWorkCloudApplication
import org.apache.linkis.common.log.LogUtils
import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.engineconn.acessible.executor.entity.AccessibleExecutor
-import org.apache.linkis.engineconn.acessible.executor.info.DefaultNodeHealthyInfoManager
import org.apache.linkis.engineconn.acessible.executor.listener.event.{
TaskLogUpdateEvent,
TaskResponseErrorEvent,
@@ -30,6 +29,7 @@ import org.apache.linkis.engineconn.acessible.executor.listener.event.{
import org.apache.linkis.engineconn.acessible.executor.utils.AccessibleExecutorUtils.currentEngineIsUnHealthy
import org.apache.linkis.engineconn.common.conf.{EngineConnConf, EngineConnConstant}
import org.apache.linkis.engineconn.computation.executor.conf.ComputationExecutorConf
+import org.apache.linkis.engineconn.computation.executor.conf.ComputationExecutorConf.SUPPORT_PARTIAL_RETRY_FOR_FAILED_TASKS_ENABLED
import org.apache.linkis.engineconn.computation.executor.entity.EngineConnTask
import org.apache.linkis.engineconn.computation.executor.exception.HookExecuteException
import org.apache.linkis.engineconn.computation.executor.hook.ComputationExecutorHook
@@ -39,19 +39,12 @@ import org.apache.linkis.engineconn.core.EngineConnObject
import org.apache.linkis.engineconn.core.executor.ExecutorManager
import org.apache.linkis.engineconn.executor.entity.{LabelExecutor, ResourceExecutor}
import org.apache.linkis.engineconn.executor.listener.ExecutorListenerBusContext
-import org.apache.linkis.governance.common.constant.job.JobRequestConstants
import org.apache.linkis.governance.common.entity.ExecutionNodeStatus
import org.apache.linkis.governance.common.paser.CodeParser
import org.apache.linkis.governance.common.protocol.task.{EngineConcurrentInfo, RequestTask}
import org.apache.linkis.governance.common.utils.{JobUtils, LoggerUtils}
-import org.apache.linkis.manager.common.entity.enumeration.{NodeHealthy, NodeStatus}
-import org.apache.linkis.manager.label.entity.engine.{
- CodeLanguageLabel,
- EngineType,
- EngineTypeLabel,
- RunType,
- UserCreatorLabel
-}
+import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
+import org.apache.linkis.manager.label.entity.engine.{EngineType, UserCreatorLabel}
import org.apache.linkis.manager.label.utils.LabelUtil
import org.apache.linkis.protocol.engine.JobProgressInfo
import org.apache.linkis.scheduler.executer._
@@ -59,12 +52,12 @@ import org.apache.linkis.scheduler.executer._
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.exception.ExceptionUtils
+import java.util
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
-import DataWorkCloudApplication.getApplicationContext
import com.google.common.cache.{Cache, CacheBuilder}
abstract class ComputationExecutor(val outputPrintLimit: Int = 1000)
@@ -262,38 +255,75 @@ abstract class ComputationExecutor(val outputPrintLimit: Int = 1000)
Array(hookedCode)
}
engineExecutionContext.setTotalParagraph(codes.length)
+
+ val retryEnable: Boolean = SUPPORT_PARTIAL_RETRY_FOR_FAILED_TASKS_ENABLED
+
codes.indices.foreach({ index =>
if (ExecutionNodeStatus.Cancelled == engineConnTask.getStatus) {
return ErrorExecuteResponse("Job is killed by user!", null)
}
- val code = codes(index)
- engineExecutionContext.setCurrentParagraph(index + 1)
-
- response = Utils.tryCatch(if (incomplete.nonEmpty) {
- executeCompletely(engineExecutionContext, code, incomplete.toString())
- } else executeLine(engineExecutionContext, code)) { t =>
- ErrorExecuteResponse(ExceptionUtils.getRootCauseMessage(t), t)
+ var executeFlag = true
+ val errorIndex: Int = Integer.valueOf(
+ engineConnTask.getProperties.getOrDefault("execute.error.code.index", "-1").toString
+ )
+ engineExecutionContext.getProperties.put("execute.error.code.index", errorIndex.toString)
+ // 重试的时候如果执行过则跳过执行
+ if (retryEnable && errorIndex > 0 && index < errorIndex) {
+ engineExecutionContext.appendStdout(
+ LogUtils.generateInfo(
+ s"aisql retry with errorIndex: ${errorIndex}, current sql index: ${index} will skip."
+ )
+ )
+ executeFlag = false
}
-
- incomplete ++= code
- response match {
- case e: ErrorExecuteResponse =>
- failedTasks.increase()
- logger.error("execute code failed!", e.t)
- return response
- case SuccessExecuteResponse() =>
- engineExecutionContext.appendStdout("\n")
- incomplete.setLength(0)
- case e: OutputExecuteResponse =>
- incomplete.setLength(0)
- val output =
- if (StringUtils.isNotEmpty(e.getOutput) && e.getOutput.length > outputPrintLimit) {
- e.getOutput.substring(0, outputPrintLimit)
- } else e.getOutput
- engineExecutionContext.appendStdout(output)
- if (StringUtils.isNotBlank(e.getOutput)) engineExecutionContext.sendResultSet(e)
- case _: IncompleteExecuteResponse =>
- incomplete ++= incompleteSplitter
+ if (executeFlag) {
+ val code = codes(index)
+ engineExecutionContext.setCurrentParagraph(index + 1)
+ response = Utils.tryCatch(if (incomplete.nonEmpty) {
+ executeCompletely(engineExecutionContext, code, incomplete.toString())
+ } else executeLine(engineExecutionContext, code)) { t =>
+ ErrorExecuteResponse(ExceptionUtils.getRootCauseMessage(t), t)
+ }
+ // info(s"Finished to execute task ${engineConnTask.getTaskId}")
+ incomplete ++= code
+ response match {
+ case e: ErrorExecuteResponse =>
+ val props: util.Map[String, String] = engineCreationContext.getOptions
+ val aiSqlEnable: String = props.getOrDefault("linkis.ai.sql.enable", "false").toString
+ val retryNum: Int =
+ Integer.valueOf(props.getOrDefault("linkis.ai.retry.num", "0").toString)
+
+ if (retryEnable && !props.isEmpty && "true".equals(aiSqlEnable) && retryNum > 0) {
+ logger.info(
+ s"aisql execute failed, with index: ${index} retryNum: ${retryNum}, and will retry",
+ e.t
+ )
+ engineExecutionContext.appendStdout(
+ LogUtils.generateInfo(
+ s"aisql execute failed, with index: ${index} retryNum: ${retryNum}, and will retry"
+ )
+ )
+ engineConnTask.getProperties.put("execute.error.code.index", index.toString)
+ return ErrorRetryExecuteResponse(e.message, index, e.t)
+ } else {
+ failedTasks.increase()
+ logger.error("execute code failed!", e.t)
+ return response
+ }
+ case SuccessExecuteResponse() =>
+ engineExecutionContext.appendStdout("\n")
+ incomplete.setLength(0)
+ case e: OutputExecuteResponse =>
+ incomplete.setLength(0)
+ val output =
+ if (StringUtils.isNotEmpty(e.getOutput) && e.getOutput.length > outputPrintLimit) {
+ e.getOutput.substring(0, outputPrintLimit)
+ } else e.getOutput
+ engineExecutionContext.appendStdout(output)
+ if (StringUtils.isNotBlank(e.getOutput)) engineExecutionContext.sendResultSet(e)
+ case _: IncompleteExecuteResponse =>
+ incomplete ++= incompleteSplitter
+ }
}
})
Utils.tryCatch(engineExecutionContext.close()) { t =>
@@ -346,10 +376,9 @@ abstract class ComputationExecutor(val outputPrintLimit: Int = 1000)
transformTaskStatus(engineConnTask, ExecutionNodeStatus.Failed)
case _ => logger.warn(s"task get response is $executeResponse")
}
+ Utils.tryAndWarn(afterExecute(engineConnTask, executeResponse))
executeResponse
}
-
- Utils.tryAndWarn(afterExecute(engineConnTask, response))
logger.info(s"Finished to execute task ${engineConnTask.getTaskId}")
// lastTask = null
response
@@ -394,12 +423,6 @@ abstract class ComputationExecutor(val outputPrintLimit: Int = 1000)
engineConnTask.getProperties.get(RequestTask.RESULT_SET_STORE_PATH).toString
)
}
- if (engineConnTask.getProperties.containsKey(JobRequestConstants.ENABLE_DIRECT_PUSH)) {
- engineExecutionContext.setEnableDirectPush(
- engineConnTask.getProperties.get(JobRequestConstants.ENABLE_DIRECT_PUSH).toString.toBoolean
- )
- logger.info(s"Enable direct push in engineTask ${engineConnTask.getTaskId}.")
- }
logger.info(s"StorePath : ${engineExecutionContext.getStorePath.orNull}.")
engineExecutionContext.setJobId(engineConnTask.getTaskId)
engineExecutionContext.getProperties.putAll(engineConnTask.getProperties)
@@ -426,10 +449,13 @@ abstract class ComputationExecutor(val outputPrintLimit: Int = 1000)
def printTaskParamsLog(engineExecutorContext: EngineExecutionContext): Unit = {
val sb = new StringBuilder
-
EngineConnObject.getEngineCreationContext.getOptions.asScala.foreach({ case (key, value) =>
// skip log jobId because it corresponding jobid when the ec created
- if (!ComputationExecutorConf.PRINT_TASK_PARAMS_SKIP_KEYS.getValue.contains(key)) {
+ if (
+ !ComputationExecutorConf.PRINT_TASK_PARAMS_SKIP_KEYS.getValue
+ .split(",")
+ .exists(_.equals(key))
+ ) {
sb.append(s"${key}=${value}\n")
}
})
diff --git a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/EngineExecutionContext.scala b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/EngineExecutionContext.scala
index 55e2b1248ba..292a92af5c3 100644
--- a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/EngineExecutionContext.scala
+++ b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/execute/EngineExecutionContext.scala
@@ -31,6 +31,7 @@ import org.apache.linkis.engineconn.acessible.executor.listener.event.{
import org.apache.linkis.engineconn.acessible.executor.log.LogHelper
import org.apache.linkis.engineconn.computation.executor.conf.ComputationExecutorConf
import org.apache.linkis.engineconn.computation.executor.cs.CSTableResultSetWriter
+import org.apache.linkis.engineconn.core.EngineConnObject
import org.apache.linkis.engineconn.executor.ExecutorExecutionContext
import org.apache.linkis.engineconn.executor.entity.Executor
import org.apache.linkis.engineconn.executor.listener.{
@@ -197,12 +198,22 @@ class EngineExecutionContext(executor: ComputationExecutor, executorUser: String
} else {
var taskLog = log
val limitLength = ComputationExecutorConf.ENGINE_SEND_LOG_TO_ENTRANCE_LIMIT_LENGTH.getValue
- if (
- ComputationExecutorConf.ENGINE_SEND_LOG_TO_ENTRANCE_LIMIT_ENABLED.getValue &&
- log.length > limitLength
- ) {
- taskLog = s"${log.substring(0, limitLength)}..."
- logger.info("The log is too long and will be intercepted,log limit length : {}", limitLength)
+ val limitEnableObj =
+ properties.get(ComputationExecutorConf.ENGINE_SEND_LOG_TO_ENTRANCE_LIMIT_ENABLED.key)
+ val limitEnable =
+ if (limitEnableObj == null) {
+ ComputationExecutorConf.ENGINE_SEND_LOG_TO_ENTRANCE_LIMIT_ENABLED.getValue
+ } else {
+ limitEnableObj.toString.toBoolean
+ }
+ if (limitEnable) {
+ if (log.length > limitLength) {
+ taskLog = s"${log.substring(0, limitLength)}..."
+ logger.info(
+ "The log is too long and will be intercepted,log limit length : {}",
+ limitLength
+ )
+ }
}
if (!AccessibleExecutorConfiguration.ENGINECONN_SUPPORT_PARALLELISM.getValue) {
LogHelper.cacheLog(taskLog)
diff --git a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/service/TaskExecutionServiceImpl.scala b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/service/TaskExecutionServiceImpl.scala
index 28df29db31b..3739f47b541 100644
--- a/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/service/TaskExecutionServiceImpl.scala
+++ b/linkis-computation-governance/linkis-engineconn/linkis-computation-engineconn/src/main/scala/org/apache/linkis/engineconn/computation/executor/service/TaskExecutionServiceImpl.scala
@@ -18,7 +18,7 @@
package org.apache.linkis.engineconn.computation.executor.service
import org.apache.linkis.common.listener.Event
-import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.common.utils.{CodeAndRunTypeUtils, Logging, Utils}
import org.apache.linkis.engineconn.acessible.executor.listener.LogListener
import org.apache.linkis.engineconn.acessible.executor.listener.event._
import org.apache.linkis.engineconn.acessible.executor.log.LogHelper
@@ -34,7 +34,6 @@ import org.apache.linkis.engineconn.computation.executor.execute.{
ComputationExecutor,
ConcurrentComputationExecutor
}
-import org.apache.linkis.engineconn.computation.executor.hook.ExecutorLabelsRestHook
import org.apache.linkis.engineconn.computation.executor.listener.{
ResultSetListener,
TaskProgressListener,
@@ -50,7 +49,6 @@ import org.apache.linkis.engineconn.core.executor.ExecutorManager
import org.apache.linkis.engineconn.executor.entity.ResourceFetchExecutor
import org.apache.linkis.engineconn.executor.listener.ExecutorListenerBusContext
import org.apache.linkis.engineconn.executor.listener.event.EngineConnSyncEvent
-import org.apache.linkis.engineconn.launch.EngineConnServer
import org.apache.linkis.governance.common.constant.ec.ECConstants
import org.apache.linkis.governance.common.entity.ExecutionNodeStatus
import org.apache.linkis.governance.common.exception.engineconn.{
@@ -60,13 +58,12 @@ import org.apache.linkis.governance.common.exception.engineconn.{
import org.apache.linkis.governance.common.protocol.task._
import org.apache.linkis.governance.common.utils.{JobUtils, LoggerUtils}
import org.apache.linkis.hadoop.common.utils.KerberosUtils
-import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
import org.apache.linkis.manager.common.protocol.resource.{
ResponseTaskRunningInfo,
ResponseTaskYarnResource
}
-import org.apache.linkis.manager.engineplugin.common.launch.process.LaunchConstants
import org.apache.linkis.manager.label.entity.Label
+import org.apache.linkis.manager.label.utils.LabelUtil
import org.apache.linkis.protocol.constants.TaskConstant
import org.apache.linkis.protocol.message.RequestProtocol
import org.apache.linkis.rpc.Sender
@@ -74,6 +71,7 @@ import org.apache.linkis.rpc.message.annotation.Receiver
import org.apache.linkis.rpc.utils.RPCUtils
import org.apache.linkis.scheduler.executer.{
ErrorExecuteResponse,
+ ErrorRetryExecuteResponse,
ExecuteResponse,
IncompleteExecuteResponse,
SubmitResponse
@@ -89,6 +87,7 @@ import org.springframework.stereotype.Component
import javax.annotation.PostConstruct
import java.util
+import java.util.Map
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger
@@ -223,6 +222,13 @@ class TaskExecutionServiceImpl
System.getProperties.put(ComputationExecutorConf.JOB_ID_TO_ENV_KEY, jobId)
logger.info(s"Received job with id ${jobId}.")
}
+
+ // only sql can use udf check, udfName set in UDFLoad
+ val codeType: String = LabelUtil.getCodeType(requestTask.getLabels)
+ val languageType: String = CodeAndRunTypeUtils.getLanguageTypeByCodeType(codeType)
+ System.getProperties.put(ComputationExecutorConf.CODE_TYPE, languageType)
+ logger.info(s"add spacial udf check for job ${jobId} with codeType: {}", languageType)
+
val task = new CommonEngineConnTask(taskId, retryAble)
task.setCode(requestTask.getCode)
task.setProperties(requestTask.getProperties)
@@ -243,6 +249,17 @@ class TaskExecutionServiceImpl
sendToEntrance(task, ResponseTaskError(task.getTaskId, message))
logger.error(message, throwable)
sendToEntrance(task, ResponseTaskStatus(task.getTaskId, ExecutionNodeStatus.Failed))
+ case ErrorRetryExecuteResponse(message, index, throwable) =>
+ sendToEntrance(task, ResponseTaskError(task.getTaskId, message))
+ logger.error(message, throwable)
+ sendToEntrance(
+ task,
+ new ResponseTaskStatusWithExecuteCodeIndex(
+ task.getTaskId,
+ ExecutionNodeStatus.Failed,
+ index
+ )
+ )
case _ =>
}
LoggerUtils.removeJobIdMDC()
@@ -371,7 +388,9 @@ class TaskExecutionServiceImpl
val sleepInterval = ComputationExecutorConf.ENGINE_PROGRESS_FETCH_INTERVAL.getValue
scheduler.submit(new Runnable {
override def run(): Unit = {
- logger.info(s"start daemon thread ${task.getTaskId}, ${task.getStatus}")
+ logger.info(
+ s"start progress daemon thread for task ${task.getTaskId}, status ${task.getStatus}"
+ )
Utils.tryQuietly(Thread.sleep(TimeUnit.MILLISECONDS.convert(1, TimeUnit.SECONDS)))
while (!ExecutionNodeStatus.isCompleted(task.getStatus)) {
Utils.tryAndWarn {
@@ -414,7 +433,9 @@ class TaskExecutionServiceImpl
Thread.sleep(TimeUnit.MILLISECONDS.convert(sleepInterval, TimeUnit.SECONDS))
)
}
- logger.info(s"daemon thread exit ${task.getTaskId}, ${task.getStatus}")
+ logger.info(
+ s"End progress daemon thread exit task ${task.getTaskId}, status ${task.getStatus}"
+ )
}
})
}
@@ -535,7 +556,7 @@ class TaskExecutionServiceImpl
} else {
val msg =
"Task null! requestTaskStatus: " + ComputationEngineUtils.GSON.toJson(requestTaskStatus)
- logger.error(msg)
+ logger.info(msg)
ResponseTaskStatus(requestTaskStatus.execId, ExecutionNodeStatus.Cancelled)
}
}
@@ -582,7 +603,7 @@ class TaskExecutionServiceImpl
if (null != task) {
sendToEntrance(task, ResponseTaskLog(logUpdateEvent.taskId, logUpdateEvent.log))
} else {
- logger.error("Task cannot null! logupdateEvent: " + logUpdateEvent.taskId)
+ logger.info("Task cannot null! logupdateEvent: " + logUpdateEvent.taskId)
}
} else if (null != lastTask) {
val executor = executorManager.getReportExecutor
@@ -624,7 +645,7 @@ class TaskExecutionServiceImpl
logger.info(s"task ${task.getTaskId} status $toStatus will not be send to entrance")
}
} else {
- logger.error(
+ logger.info(
"Task cannot null! taskStatusChangedEvent: " + ComputationEngineUtils.GSON
.toJson(taskStatusChangedEvent)
)
@@ -651,7 +672,7 @@ class TaskExecutionServiceImpl
sendToEntrance(task, respRunningInfo)
} else {
- logger.error(
+ logger.info(
"Task cannot null! taskProgressUpdateEvent : " + ComputationEngineUtils.GSON
.toJson(taskProgressUpdateEvent)
)
@@ -672,7 +693,7 @@ class TaskExecutionServiceImpl
)
)
} else {
- logger.error(s"Task cannot null! taskResultCreateEvent: ${taskResultCreateEvent.taskId}")
+ logger.info(s"Task cannot null! taskResultCreateEvent: ${taskResultCreateEvent.taskId}")
}
logger.info(s"Finished to deal result event ${taskResultCreateEvent.taskId}")
}
@@ -683,7 +704,7 @@ class TaskExecutionServiceImpl
if (null != executor) {
executor.getTaskById(taskId)
} else {
- logger.error(s"Executor of taskId : $taskId is not cached.")
+ logger.info(s"Executor of taskId : $taskId is not cached.")
null
}
}
@@ -699,7 +720,7 @@ class TaskExecutionServiceImpl
)
)
} else {
- logger.error(
+ logger.info(
"Task cannot null! taskResultSizeCreatedEvent: " + ComputationEngineUtils.GSON
.toJson(taskResultSizeCreatedEvent)
)
diff --git a/linkis-computation-governance/linkis-engineconn/linkis-engineconn-common/src/main/scala/org/apache/linkis/engineconn/common/conf/EngineConnConf.scala b/linkis-computation-governance/linkis-engineconn/linkis-engineconn-common/src/main/scala/org/apache/linkis/engineconn/common/conf/EngineConnConf.scala
index a20358b57ce..896b1bc7dae 100644
--- a/linkis-computation-governance/linkis-engineconn/linkis-engineconn-common/src/main/scala/org/apache/linkis/engineconn/common/conf/EngineConnConf.scala
+++ b/linkis-computation-governance/linkis-engineconn/linkis-engineconn-common/src/main/scala/org/apache/linkis/engineconn/common/conf/EngineConnConf.scala
@@ -84,6 +84,8 @@ object EngineConnConf {
val JOB_YARN_TASK_URL = CommonVars("linkis.job.task.yarn.url", "");
val JOB_YARN_CLUSTER_TASK_URL = CommonVars("linkis.job.task.yarn.cluster.url", "");
+
+ val ENGINE_CONF_REVENT_SWITCH = CommonVars("linkis.engine.conf.revent.switch", true)
def getWorkHome: String = System.getenv(ENGINE_CONN_LOCAL_PATH_PWD_KEY.getValue)
def getEngineTmpDir: String = System.getenv(ENGINE_CONN_LOCAL_TMP_DIR.getValue)
diff --git a/linkis-computation-governance/linkis-engineconn/linkis-engineconn-plugin-core/src/main/scala/org/apache/linkis/manager/engineplugin/common/conf/EnvConfiguration.scala b/linkis-computation-governance/linkis-engineconn/linkis-engineconn-plugin-core/src/main/scala/org/apache/linkis/manager/engineplugin/common/conf/EnvConfiguration.scala
index 5a85334f19a..e9871bb31c6 100644
--- a/linkis-computation-governance/linkis-engineconn/linkis-engineconn-plugin-core/src/main/scala/org/apache/linkis/manager/engineplugin/common/conf/EnvConfiguration.scala
+++ b/linkis-computation-governance/linkis-engineconn/linkis-engineconn-plugin-core/src/main/scala/org/apache/linkis/manager/engineplugin/common/conf/EnvConfiguration.scala
@@ -37,7 +37,7 @@ object EnvConfiguration {
val ENGINE_CONN_CLASSPATH_FILES =
CommonVars("linkis.engineConn.classpath.files", "", "engineConn额外的配置文件")
- val MAX_METASPACE_SIZE = CommonVars("linkis.engineconn.metaspace.size.max", "256m")
+ val MAX_METASPACE_SIZE = CommonVars("linkis.engineconn.metaspace.size.max", "512m")
lazy val metaspaceSize =
s"-XX:MaxMetaspaceSize=${MAX_METASPACE_SIZE.getValue} -XX:MetaspaceSize=128m"
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/conf/EntranceSpringConfiguration.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/conf/EntranceSpringConfiguration.java
index 1cf9a6b4b1b..afd300a3fe3 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/conf/EntranceSpringConfiguration.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/conf/EntranceSpringConfiguration.java
@@ -142,6 +142,8 @@ public EntranceInterceptor[] entranceInterceptors() {
new LabelCheckInterceptor(),
new ParserVarLabelInterceptor(),
new VarSubstitutionInterceptor(),
+ new AISQLTransformInterceptor(),
+ new SensitiveCheckInterceptor(),
new LogPathCreateInterceptor(),
new StorePathEntranceInterceptor(),
new ScalaCodeInterceptor(),
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/errorcode/EntranceErrorCodeSummary.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/errorcode/EntranceErrorCodeSummary.java
index 51a522d3d25..147beda8a85 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/errorcode/EntranceErrorCodeSummary.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/errorcode/EntranceErrorCodeSummary.java
@@ -49,7 +49,10 @@ public enum EntranceErrorCodeSummary implements LinkisErrorCode {
JOBRESP_PROTOCOL_NULL(
20011,
"Request jobHistory failed, because:jobRespProtocol is null (请求jobHistory失败,因为jobRespProtocol为null)"),
- READ_TASKS_FAILED(20011, "Read all tasks failed, because:{0}(获取所有任务失败)"),
+ READ_TASKS_FAILED(20011, "Read all tasks failed, because:{0}(获取所有任`务失败)"),
+ EXEC_FAILED_TO_RETRY(
+ 20503,
+ "Resources are tight, and the task is executing intelligent retry (资源紧张,当前任务正在进行智能重试)"),
SENDER_RPC_FAILED(20020, "Sender rpc failed"),
@@ -78,6 +81,8 @@ public enum EntranceErrorCodeSummary implements LinkisErrorCode {
LOGPATH_NOT_NULL(20301, "The logPath cannot be empty(日志路径不能为空)"),
+ DOCTORIS_ERROR(20302, "Doctoris data retrieval is abnormal"),
+
FAILOVER_RUNNING_TO_CANCELLED(
30001,
"Job {0} failover, status changed from Running to Cancelled (任务故障转移,状态从Running变更为Cancelled)");
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/job/EntranceExecutionJob.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/job/EntranceExecutionJob.java
index ca19f4d7306..1eb911ecec3 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/job/EntranceExecutionJob.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/job/EntranceExecutionJob.java
@@ -158,7 +158,7 @@ public ExecuteRequest jobToExecuteRequest() throws EntranceErrorException {
if (!runtimeMapTmp.containsKey(GovernanceCommonConf.RESULT_SET_STORE_PATH().key())) {
String resultParentPath = CommonLogPathUtils.getResultParentPath(jobRequest);
- CommonLogPathUtils.buildCommonPath(resultParentPath);
+ CommonLogPathUtils.buildCommonPath(resultParentPath, true);
resultSetPathRoot = CommonLogPathUtils.getResultPath(jobRequest);
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/AbstractEntranceParser.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/AbstractEntranceParser.java
index 605d736b9f5..8aabf45a201 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/AbstractEntranceParser.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/AbstractEntranceParser.java
@@ -19,6 +19,7 @@
import org.apache.linkis.entrance.EntranceContext;
import org.apache.linkis.entrance.EntranceParser;
+import org.apache.linkis.entrance.conf.EntranceConfiguration;
import org.apache.linkis.entrance.exception.EntranceErrorCode;
import org.apache.linkis.entrance.exception.EntranceIllegalParamException;
import org.apache.linkis.entrance.execute.EntranceJob;
@@ -90,6 +91,16 @@ public JobRequest parseToJobRequest(Job job) throws EntranceIllegalParamExceptio
jobRequest.setProgress("" + job.getProgress());
jobRequest.setStatus(job.getState().toString());
jobRequest.setUpdatedTime(new Date());
+
+ if (job.isCompleted()
+ && !job.isSucceed()
+ && EntranceConfiguration.TASK_RETRY_ENABLED()
+ && Integer.valueOf(20503).equals(jobRequest.getErrorCode())
+ && job.getErrorResponse() != null
+ && StringUtils.isNotEmpty(job.getErrorResponse().message())) {
+ jobRequest.setErrorDesc(job.getErrorResponse().message());
+ }
+
if (job.isCompleted()
&& !job.isSucceed()
&& job.getErrorResponse() != null
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/ParserUtils.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/ParserUtils.java
index 86af74d5c88..d5aaa1a71d5 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/ParserUtils.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/parser/ParserUtils.java
@@ -17,9 +17,10 @@
package org.apache.linkis.entrance.parser;
+import org.apache.linkis.common.conf.Configuration;
import org.apache.linkis.common.io.FsPath;
-import org.apache.linkis.entrance.conf.EntranceConfiguration$;
import org.apache.linkis.entrance.utils.CommonLogPathUtils;
+import org.apache.linkis.governance.common.conf.GovernanceCommonConf;
import org.apache.linkis.governance.common.entity.job.JobRequest;
import org.apache.linkis.manager.label.utils.LabelUtil;
import org.apache.linkis.storage.utils.StorageUtils;
@@ -28,67 +29,47 @@
import java.text.SimpleDateFormat;
import java.util.Date;
-import java.util.HashMap;
import java.util.Map;
public final class ParserUtils {
- private static final Map types = new HashMap<>();
-
- static {
- types.put("py", "python");
- types.put("python", "python");
- types.put("sql", "sql");
- types.put("pyspark", "python");
- types.put("scala", "scala");
- types.put("rspark", "r");
- types.put("r", "r");
- types.put("java", "java");
- types.put("hql", "hql");
- types.put("sparksql", "sql");
- }
-
public static void generateLogPath(JobRequest jobRequest, Map params) {
String logPath = null;
String logPathPrefix = null;
String logMid = "log";
if (StringUtils.isEmpty(logPathPrefix)) {
- logPathPrefix = EntranceConfiguration$.MODULE$.DEFAULT_LOGPATH_PREFIX().getValue();
+ logPathPrefix = GovernanceCommonConf.DEFAULT_LOGPATH_PREFIX();
}
/*Determine whether logPathPrefix is terminated with /, if it is, delete */
/*判断是否logPathPrefix是否是以 / 结尾, 如果是,就删除*/
- if (logPathPrefix.endsWith("/")) {
+ if (logPathPrefix.endsWith("/")) { // NOSONAR
logPathPrefix = logPathPrefix.substring(0, logPathPrefix.length() - 1);
}
Date date = new Date(System.currentTimeMillis());
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+ SimpleDateFormat hourFormat = new SimpleDateFormat("HH"); // 新增:小时格式(24小时制)
String dateString = dateFormat.format(date);
+ String hourString = hourFormat.format(date); // 新增:当前小时(如 "08", "14")
String creator = LabelUtil.getUserCreator(jobRequest.getLabels())._2;
String umUser = jobRequest.getExecuteUser();
FsPath lopPrefixPath = new FsPath(logPathPrefix);
if (StorageUtils.HDFS().equals(lopPrefixPath.getFsType())) {
- String commonLogPath = logPathPrefix + "/" + "log" + "/" + dateString + "/" + creator;
+ String commonLogPath = logPathPrefix + "/" + "log" + "/" + dateString + "/";
+ if (Configuration.HDFS_HOUR_DIR_SWITCH()) {
+ commonLogPath = commonLogPath + hourString + "/" + creator;
+ } else {
+ commonLogPath = commonLogPath + creator;
+ }
logPath = commonLogPath + "/" + umUser + "/" + jobRequest.getId() + ".log";
- CommonLogPathUtils.buildCommonPath(commonLogPath);
+ CommonLogPathUtils.buildCommonPath(commonLogPath, false);
} else {
- logPath =
- logPathPrefix
- + "/"
- + umUser
- + "/"
- + "log"
- + "/"
- + creator
- + "/"
- + dateString
- + "/"
- + jobRequest.getId()
- + ".log";
+ logPath = logPathPrefix + "/" + umUser + "/log/" + creator + "/";
+ if (Configuration.HDFS_HOUR_DIR_SWITCH()) {
+ logPath = logPath + dateString + "/" + hourString + "/" + jobRequest.getId() + ".log";
+ } else {
+ logPath = logPath + hourString + "/" + jobRequest.getId() + ".log";
+ }
}
jobRequest.setLogPath(logPath);
}
-
- public static String getCorrespondingType(String runType) {
- return types.get(runType.toLowerCase());
- }
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceEngine.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceEngine.java
index 2fdf4b9b4a5..7fdaf06eb27 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceEngine.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceEngine.java
@@ -17,10 +17,10 @@
package org.apache.linkis.entrance.persistence;
+import org.apache.linkis.common.conf.Configuration$;
import org.apache.linkis.common.exception.ErrorException;
import org.apache.linkis.common.utils.JsonUtils;
import org.apache.linkis.entrance.conf.EntranceConfiguration;
-import org.apache.linkis.entrance.conf.EntranceConfiguration$;
import org.apache.linkis.entrance.exception.EntranceIllegalParamException;
import org.apache.linkis.entrance.exception.EntranceRPCException;
import org.apache.linkis.entrance.exception.QueryFailedException;
@@ -65,8 +65,7 @@ public QueryPersistenceEngine() {
Get the corresponding sender through datawork-linkis-publicservice(通过datawork-linkis-publicservice 拿到对应的sender)
*/
sender =
- Sender.getSender(
- EntranceConfiguration$.MODULE$.JOBHISTORY_SPRING_APPLICATION_NAME().getValue());
+ Sender.getSender(Configuration$.MODULE$.JOBHISTORY_SPRING_APPLICATION_NAME().getValue());
}
private JobRespProtocol sendToJobHistoryAndRetry(RequestProtocol jobReq, String msg)
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceManager.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceManager.java
index b912b58ebbb..f256e112cde 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceManager.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/persistence/QueryPersistenceManager.java
@@ -18,23 +18,39 @@
package org.apache.linkis.entrance.persistence;
import org.apache.linkis.common.exception.ErrorException;
+import org.apache.linkis.common.utils.LinkisUtils;
import org.apache.linkis.entrance.EntranceContext;
import org.apache.linkis.entrance.cli.heartbeat.CliHeartbeatMonitor;
+import org.apache.linkis.entrance.conf.EntranceConfiguration;
import org.apache.linkis.entrance.cs.CSEntranceHelper;
import org.apache.linkis.entrance.execute.EntranceJob;
import org.apache.linkis.entrance.log.FlexibleErrorCodeManager;
import org.apache.linkis.governance.common.conf.GovernanceCommonConf;
import org.apache.linkis.governance.common.entity.job.JobRequest;
+import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext;
+import org.apache.linkis.manager.label.entity.Label;
+import org.apache.linkis.manager.label.entity.entrance.ExecuteOnceLabel;
import org.apache.linkis.protocol.engine.JobProgressInfo;
+import org.apache.linkis.protocol.utils.TaskUtils;
import org.apache.linkis.scheduler.executer.OutputExecuteResponse;
import org.apache.linkis.scheduler.queue.Job;
+import org.apache.commons.lang3.StringUtils;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
import scala.Option;
import scala.Tuple2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.linkis.entrance.errorcode.EntranceErrorCodeSummary.EXEC_FAILED_TO_RETRY;
+
public class QueryPersistenceManager extends PersistenceManager {
private static final Logger logger = LoggerFactory.getLogger(QueryPersistenceManager.class);
@@ -104,9 +120,16 @@ public void onProgressUpdate(Job job, float progress, JobProgressInfo[] progress
} catch (Exception e) {
logger.warn("Invalid progress : " + entranceJob.getJobRequest().getProgress(), e);
}
+ boolean notUpdate = false;
if (job.getProgress() >= 0
&& persistedProgress >= updatedProgress
&& entranceJob.getUpdateMetrisFlag()) {
+ notUpdate = true;
+ if (EntranceConfiguration.TASK_RETRY_ENABLED() && updatedProgress == 0) { // NOSONAR
+ notUpdate = false;
+ }
+ }
+ if (notUpdate) {
return;
}
if (updatedProgress > 1) {
@@ -119,6 +142,119 @@ public void onProgressUpdate(Job job, float progress, JobProgressInfo[] progress
updateJobStatus(job);
}
+ @Override
+ public boolean onJobFailed(
+ Job job, String code, Map props, int errorCode, String errorDesc) {
+ if (!EntranceConfiguration.TASK_RETRY_ENABLED()) {
+ return false;
+ }
+
+ if (!(job instanceof EntranceJob)) {
+ return false;
+ }
+
+ boolean containsAny = false;
+ String errorDescArray = EntranceConfiguration.SUPPORTED_RETRY_ERROR_DESC();
+ String errorCodeArray = EntranceConfiguration.SUPPORTED_RETRY_ERROR_CODES();
+ for (String keyword : errorDescArray.split(",")) {
+ if (errorDesc.contains(keyword.trim()) || errorCodeArray.contains(errorCode + "")) {
+ containsAny = true;
+ break;
+ }
+ }
+
+ if (!containsAny) {
+ return false;
+ }
+
+ AtomicBoolean canRetry = new AtomicBoolean(false);
+ String aiSqlKey = EntranceConfiguration.AI_SQL_KEY().key();
+ String retryNumKey = EntranceConfiguration.RETRY_NUM_KEY().key();
+
+ final EntranceJob entranceJob = (EntranceJob) job;
+
+ // 处理广播表
+ String dataFrameKey = EntranceConfiguration.SUPPORT_ADD_RETRY_CODE_KEYS();
+ if (containsAny(errorDesc, dataFrameKey)) {
+ entranceJob
+ .getJobRequest()
+ .setExecutionCode("set spark.sql.autoBroadcastJoinThreshold=-1; " + code);
+ }
+
+ Map startupMap = TaskUtils.getStartupMap(props);
+ // 只对 aiSql 做重试
+ if ("true".equals(startupMap.get(aiSqlKey))) {
+ LinkisUtils.tryAndWarn(
+ () -> {
+ int retryNum = (int) startupMap.getOrDefault(retryNumKey, 1);
+ boolean canRetryCode = canRetryCode(code);
+ if (retryNum > 0 && canRetryCode) {
+ logger.info(
+ "mark task: {} status to WaitForRetry, current retryNum: {}, for errorCode: {}, errorDesc: {}",
+ entranceJob.getJobInfo().getId(),
+ retryNum,
+ errorCode,
+ errorDesc);
+ // 重试
+ job.transitionWaitForRetry();
+
+ // 修改错误码和错误描述
+ entranceJob.getJobRequest().setErrorCode(EXEC_FAILED_TO_RETRY.getErrorCode());
+ entranceJob.getJobRequest().setErrorDesc(EXEC_FAILED_TO_RETRY.getErrorDesc());
+ canRetry.set(true);
+ startupMap.put(retryNumKey, retryNum - 1);
+ // once 引擎
+ if ((boolean) EntranceConfiguration.AI_SQL_RETRY_ONCE().getValue()) {
+ // once 引擎
+ ExecuteOnceLabel onceLabel =
+ LabelBuilderFactoryContext.getLabelBuilderFactory()
+ .createLabel(ExecuteOnceLabel.class);
+ List> labels = entranceJob.getJobRequest().getLabels();
+ labels.add(onceLabel);
+ logger.info("aisql retry add once label for task id:{}", job.getJobInfo().getId());
+ startupMap.put("executeOnce", true);
+ }
+ TaskUtils.addStartupMap(props, startupMap);
+ logger.info("task {} set retry status success.", entranceJob.getJobInfo().getId());
+ } else {
+ logger.info("task {} not support retry.", entranceJob.getJobInfo().getId());
+ }
+ },
+ logger);
+ }
+ return canRetry.get();
+ }
+
+ private boolean canRetryCode(String code) {
+ String exceptCode = EntranceConfiguration.UNSUPPORTED_RETRY_CODES();
+ String[] keywords = exceptCode.split(",");
+ for (String keyword : keywords) {
+ // 使用空格分割关键字,并移除空字符串
+ String[] parts = keyword.trim().split("\\s+");
+ StringBuilder regexBuilder = new StringBuilder("\\s*");
+ for (String part : parts) {
+ regexBuilder.append(part);
+ regexBuilder.append("\\s*");
+ }
+ if (keyword.startsWith("CREATE")) {
+ regexBuilder.delete(regexBuilder.length() - 3, regexBuilder.length());
+ regexBuilder.append("\\b(?!\\s+IF\\s+NOT\\s+EXISTS)");
+ }
+ if (keyword.startsWith("DROP")) {
+ regexBuilder.delete(regexBuilder.length() - 3, regexBuilder.length());
+ regexBuilder.append("\\b(?!\\s+IF\\s+EXISTS)");
+ }
+
+ String regex = regexBuilder.toString();
+ Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE);
+ Matcher matcher = pattern.matcher(code);
+ if (matcher.find()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
@Override
public void onJobScheduled(Job job) {
updateJobStatus(job);
@@ -197,4 +333,23 @@ private void updateJobStatus(Job job) {
@Override
public void onResultSizeCreated(Job job, int resultSize) {}
+
+ private static boolean containsAny(String src, String target) {
+ if (StringUtils.isBlank(target)) {
+ return false;
+ }
+ return containsAny(src, target.split(","));
+ }
+
+ private static boolean containsAny(String src, String[] target) {
+ if (target == null || StringUtils.isBlank(src)) {
+ return false;
+ }
+ for (String item : target) {
+ if (src.toLowerCase().contains(item.toLowerCase())) {
+ return true;
+ }
+ }
+ return false;
+ }
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/server/DefaultEntranceServer.java b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/server/DefaultEntranceServer.java
index 7c38d27947b..2ad7a1a3afa 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/server/DefaultEntranceServer.java
+++ b/linkis-computation-governance/linkis-entrance/src/main/java/org/apache/linkis/entrance/server/DefaultEntranceServer.java
@@ -18,6 +18,7 @@
package org.apache.linkis.entrance.server;
import org.apache.linkis.common.ServiceInstance;
+import org.apache.linkis.common.conf.Configuration$;
import org.apache.linkis.entrance.EntranceContext;
import org.apache.linkis.entrance.EntranceServer;
import org.apache.linkis.entrance.conf.EntranceConfiguration;
@@ -69,8 +70,7 @@ private void cleanUpEntranceDirtyData() {
if ((Boolean) EntranceConfiguration$.MODULE$.ENABLE_ENTRANCE_DIRTY_DATA_CLEAR().getValue()) {
logger.info("start to clean up entrance dirty data.");
Sender sender =
- Sender.getSender(
- EntranceConfiguration$.MODULE$.JOBHISTORY_SPRING_APPLICATION_NAME().getValue());
+ Sender.getSender(Configuration$.MODULE$.JOBHISTORY_SPRING_APPLICATION_NAME().getValue());
ServiceInstance thisServiceInstance = Sender.getThisServiceInstance();
sender.ask(new EntranceInstanceConfRequest(thisServiceInstance.getInstance()));
}
@@ -100,7 +100,7 @@ private void shutdownEntrance(ContextClosedEvent event) {
} else {
if (EntranceConfiguration.ENTRANCE_SHUTDOWN_FAILOVER_CONSUME_QUEUE_ENABLED()) {
logger.warn("Entrance exit to update and clean all ConsumeQueue task instances");
- updateAllNotExecutionTaskInstances(false);
+ // updateAllNotExecutionTaskInstances(false);
}
logger.warn("Entrance exit to stop all job");
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/EntranceServer.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/EntranceServer.scala
index a610d524b22..68735d23ae7 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/EntranceServer.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/EntranceServer.scala
@@ -39,16 +39,22 @@ import org.apache.linkis.governance.common.utils.LoggerUtils
import org.apache.linkis.manager.common.protocol.engine.EngineStopRequest
import org.apache.linkis.manager.label.entity.entrance.ExecuteOnceLabel
import org.apache.linkis.protocol.constants.TaskConstant
+import org.apache.linkis.protocol.utils.TaskUtils
import org.apache.linkis.rpc.Sender
import org.apache.linkis.rpc.conf.RPCConfiguration
+import org.apache.linkis.scheduler.conf.SchedulerConfiguration.{
+ ENGINE_PRIORITY_RUNTIME_KEY,
+ FIFO_QUEUE_STRATEGY,
+ PFIFO_SCHEDULER_STRATEGY
+}
import org.apache.linkis.scheduler.queue.{Job, SchedulerEventState}
import org.apache.linkis.server.conf.ServerConfiguration
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.exception.ExceptionUtils
-import java.{lang, util}
import java.text.{MessageFormat, SimpleDateFormat}
+import java.util
import java.util.Date
import java.util.concurrent.TimeUnit
@@ -98,7 +104,52 @@ abstract class EntranceServer extends Logging {
LoggerUtils.setJobIdMDC(jobRequest.getId.toString)
val logAppender = new java.lang.StringBuilder()
- jobRequest = dealInitedJobRequest(jobRequest, logAppender)
+ Utils.tryThrow(
+ getEntranceContext
+ .getOrCreateEntranceInterceptors()
+ .foreach(int => jobRequest = int.apply(jobRequest, logAppender))
+ ) { t =>
+ LoggerUtils.removeJobIdMDC()
+ val error = t match {
+ case error: ErrorException => error
+ case t1: Throwable =>
+ val exception = new EntranceErrorException(
+ FAILED_ANALYSIS_TASK.getErrorCode,
+ MessageFormat.format(
+ FAILED_ANALYSIS_TASK.getErrorDesc,
+ ExceptionUtils.getRootCauseMessage(t)
+ )
+ )
+ exception.initCause(t1)
+ exception
+ case _ =>
+ new EntranceErrorException(
+ FAILED_ANALYSIS_TASK.getErrorCode,
+ MessageFormat.format(
+ FAILED_ANALYSIS_TASK.getErrorDesc,
+ ExceptionUtils.getRootCauseMessage(t)
+ )
+ )
+ }
+ jobRequest match {
+ case t: JobRequest =>
+ t.setErrorCode(error.getErrCode)
+ t.setErrorDesc(error.getDesc)
+ t.setStatus(SchedulerEventState.Failed.toString)
+ t.setProgress(EntranceJob.JOB_COMPLETED_PROGRESS.toString)
+ val infoMap = new util.HashMap[String, AnyRef]
+ infoMap.put(TaskConstant.ENGINE_INSTANCE, "NULL")
+ infoMap.put(TaskConstant.TICKET_ID, "")
+ infoMap.put("message", "Task interception failed and cannot be retried")
+ JobHistoryHelper.updateJobRequestMetrics(jobRequest, null, infoMap)
+ case _ =>
+ }
+ getEntranceContext
+ .getOrCreatePersistenceManager()
+ .createPersistenceEngine()
+ .updateIfNeeded(jobRequest)
+ error
+ }
val job = getEntranceContext.getOrCreateEntranceParser().parseToJob(jobRequest)
Utils.tryThrow {
@@ -106,6 +157,7 @@ abstract class EntranceServer extends Logging {
job.setLogListener(getEntranceContext.getOrCreateLogManager())
job.setProgressListener(getEntranceContext.getOrCreatePersistenceManager())
job.setJobListener(getEntranceContext.getOrCreatePersistenceManager())
+ job.setJobRetryListener(getEntranceContext.getOrCreatePersistenceManager())
job match {
case entranceJob: EntranceJob =>
entranceJob.setEntranceListenerBus(getEntranceContext.getOrCreateEventListenerBus)
@@ -136,6 +188,36 @@ abstract class EntranceServer extends Logging {
SUBMIT_CODE_ISEMPTY.getErrorDesc
)
}
+
+ Utils.tryAndWarn {
+ // 如果是使用优先级队列,设置下优先级
+ val configMap = params
+ .getOrDefault(TaskConstant.PARAMS, new util.HashMap[String, AnyRef]())
+ .asInstanceOf[util.Map[String, AnyRef]]
+ val properties: util.Map[String, AnyRef] = TaskUtils.getRuntimeMap(configMap)
+ val fifoStrategy: String = FIFO_QUEUE_STRATEGY
+ if (
+ PFIFO_SCHEDULER_STRATEGY.equalsIgnoreCase(
+ fifoStrategy
+ ) && properties != null && !properties.isEmpty
+ ) {
+ val priorityValue: AnyRef = properties.get(ENGINE_PRIORITY_RUNTIME_KEY)
+ if (priorityValue != null) {
+ val value: Int = getPriority(priorityValue.toString)
+ logAppender.append(LogUtils.generateInfo(s"The task set priority is ${value} \n"))
+ job.setPriority(value)
+ }
+ }
+ }
+
+ Utils.tryCatch {
+ if (logAppender.length() > 0) {
+ job.getLogListener.foreach(_.onLogUpdate(job, logAppender.toString.trim))
+ }
+ } { t =>
+ logger.error("Failed to write init log, reason: ", t)
+ }
+
getEntranceContext.getOrCreateScheduler().submit(job)
val msg = LogUtils.generateInfo(
s"Job with jobId : ${jobRequest.getId} and execID : ${job.getId()} submitted "
@@ -175,44 +257,30 @@ abstract class EntranceServer extends Logging {
}
}
- def logReader(execId: String): LogReader
+ def updateAllNotExecutionTaskInstances(retryWhenUpdateFail: Boolean): Unit = {
+ val consumeQueueTasks = getAllConsumeQueueTask()
- def getJob(execId: String): Option[Job] =
- getEntranceContext.getOrCreateScheduler().get(execId).map(_.asInstanceOf[Job])
+ clearAllConsumeQueue()
+ logger.info("Finished to clean all ConsumeQueue")
- private[entrance] def getEntranceWebSocketService: Option[EntranceWebSocketService] =
- if (ServerConfiguration.BDP_SERVER_SOCKET_MODE.getValue) {
- if (entranceWebSocketService.isEmpty) synchronized {
- if (entranceWebSocketService.isEmpty) {
- entranceWebSocketService = Some(new EntranceWebSocketService)
- entranceWebSocketService.foreach(_.setEntranceServer(this))
- entranceWebSocketService.foreach(
- getEntranceContext.getOrCreateEventListenerBus.addListener
- )
+ if (consumeQueueTasks != null && consumeQueueTasks.length > 0) {
+ val taskIds = new util.ArrayList[Long]()
+ consumeQueueTasks.foreach(job => {
+ taskIds.add(job.getJobRequest.getId.asInstanceOf[Long])
+ job match {
+ case entranceExecutionJob: EntranceExecutionJob =>
+ val msg = LogUtils.generateWarn(
+ s"job ${job.getJobRequest.getId} clean from ConsumeQueue, wait for failover"
+ )
+ entranceExecutionJob.getLogListener.foreach(_.onLogUpdate(entranceExecutionJob, msg))
+ entranceExecutionJob.getLogWriter.foreach(_.close())
+ case _ =>
}
- }
- entranceWebSocketService
- } else None
+ })
- def getAllUndoneTask(filterWords: String): Array[EntranceJob] = {
- val consumers = getEntranceContext
- .getOrCreateScheduler()
- .getSchedulerContext
- .getOrCreateConsumerManager
- .listConsumers()
- .toSet
- val filterConsumer = if (StringUtils.isNotBlank(filterWords)) {
- consumers.filter(_.getGroup.getGroupName.contains(filterWords))
- } else {
- consumers
+ JobHistoryHelper.updateAllConsumeQueueTask(taskIds, retryWhenUpdateFail)
+ logger.info("Finished to update all not execution task instances")
}
- filterConsumer
- .flatMap { consumer =>
- consumer.getRunningEvents ++ consumer.getConsumeQueue.getWaitingEvents
- }
- .filter(job => job != null && job.isInstanceOf[EntranceJob])
- .map(_.asInstanceOf[EntranceJob])
- .toArray
}
def getAllConsumeQueueTask(): Array[EntranceJob] = {
@@ -241,32 +309,6 @@ abstract class EntranceServer extends Logging {
.foreach(_.getConsumeQueue.clearAll())
}
- def updateAllNotExecutionTaskInstances(retryWhenUpdateFail: Boolean): Unit = {
- val consumeQueueTasks = getAllConsumeQueueTask()
-
- clearAllConsumeQueue()
- logger.info("Finished to clean all ConsumeQueue")
-
- if (consumeQueueTasks != null && consumeQueueTasks.length > 0) {
- val taskIds = new util.ArrayList[Long]()
- consumeQueueTasks.foreach(job => {
- taskIds.add(job.getJobRequest.getId.asInstanceOf[Long])
- job match {
- case entranceExecutionJob: EntranceExecutionJob =>
- val msg = LogUtils.generateWarn(
- s"job ${job.getJobRequest.getId} clean from ConsumeQueue, wait for failover"
- )
- entranceExecutionJob.getLogListener.foreach(_.onLogUpdate(entranceExecutionJob, msg))
- entranceExecutionJob.getLogWriter.foreach(_.close())
- case _ =>
- }
- })
-
- JobHistoryHelper.updateAllConsumeQueueTask(taskIds, retryWhenUpdateFail)
- logger.info("Finished to update all not execution task instances")
- }
- }
-
/**
* execute failover job (提交故障转移任务,返回新的execId)
*
@@ -306,7 +348,7 @@ abstract class EntranceServer extends Logging {
}
}
- def killOldEC(jobRequest: JobRequest, logAppender: lang.StringBuilder): Unit = {
+ def killOldEC(jobRequest: JobRequest, logAppender: java.lang.StringBuilder): Unit = {
Utils.tryCatch {
logAppender.append(
LogUtils
@@ -384,7 +426,7 @@ abstract class EntranceServer extends Logging {
}
}
- def dealInitedJobRequest(jobReq: JobRequest, logAppender: lang.StringBuilder): JobRequest = {
+ def dealInitedJobRequest(jobReq: JobRequest, logAppender: java.lang.StringBuilder): JobRequest = {
var jobRequest = jobReq
Utils.tryThrow(
getEntranceContext
@@ -435,7 +477,7 @@ abstract class EntranceServer extends Logging {
jobRequest
}
- def dealRunningJobRequest(jobRequest: JobRequest, logAppender: lang.StringBuilder): Unit = {
+ def dealRunningJobRequest(jobRequest: JobRequest, logAppender: java.lang.StringBuilder): Unit = {
Utils.tryCatch {
// error_msg
val msg =
@@ -480,7 +522,10 @@ abstract class EntranceServer extends Logging {
}
}
- def initAndSubmitJobRequest(jobRequest: JobRequest, logAppender: lang.StringBuilder): Unit = {
+ def initAndSubmitJobRequest(
+ jobRequest: JobRequest,
+ logAppender: java.lang.StringBuilder
+ ): Unit = {
// init properties
initJobRequestProperties(jobRequest, logAppender)
@@ -569,7 +614,7 @@ abstract class EntranceServer extends Logging {
private def initJobRequestProperties(
jobRequest: JobRequest,
- logAppender: lang.StringBuilder
+ logAppender: java.lang.StringBuilder
): Unit = {
logger.info(s"job ${jobRequest.getId} start to initialize the properties")
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
@@ -643,6 +688,25 @@ abstract class EntranceServer extends Logging {
logger.info(s"job ${jobRequest.getId} success to initialize the properties")
}
+ def logReader(execId: String): LogReader
+
+ def getJob(execId: String): Option[Job] =
+ getEntranceContext.getOrCreateScheduler().get(execId).map(_.asInstanceOf[Job])
+
+ private[entrance] def getEntranceWebSocketService: Option[EntranceWebSocketService] =
+ if (ServerConfiguration.BDP_SERVER_SOCKET_MODE.getValue) {
+ if (entranceWebSocketService.isEmpty) synchronized {
+ if (entranceWebSocketService.isEmpty) {
+ entranceWebSocketService = Some(new EntranceWebSocketService)
+ entranceWebSocketService.foreach(_.setEntranceServer(this))
+ entranceWebSocketService.foreach(
+ getEntranceContext.getOrCreateEventListenerBus.addListener
+ )
+ }
+ }
+ entranceWebSocketService
+ } else None
+
def getAllUndoneTask(filterWords: String, ecType: String = null): Array[EntranceJob] = {
val consumers = getEntranceContext
.getOrCreateScheduler()
@@ -705,6 +769,23 @@ abstract class EntranceServer extends Logging {
startTimeOutCheck()
}
+ val DOT = "."
+ val DEFAULT_PRIORITY = 100
+
+ private def getPriority(value: String): Int = {
+ var priority: Int = -1
+ Utils.tryAndWarn({
+ priority =
+ if (value.contains(DOT)) value.substring(0, value.indexOf(DOT)).toInt else value.toInt
+ })
+ if (priority < 0 || priority > Integer.MAX_VALUE - 1) {
+ logger.warn(s"illegal queue priority: ${value}")
+ DEFAULT_PRIORITY
+ } else {
+ priority
+ }
+ }
+
}
object EntranceServer {
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/conf/EntranceConfiguration.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/conf/EntranceConfiguration.scala
index 9e09374e4d0..c129c37a8df 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/conf/EntranceConfiguration.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/conf/EntranceConfiguration.scala
@@ -30,20 +30,6 @@ object EntranceConfiguration {
val JOB_MAX_PERSIST_WAIT_TIME =
CommonVars("wds.linkis.entrance.job.persist.wait.max", new TimeType("5m"))
- val MULTI_ENTRANCE_CONDITION = CommonVars("wds.linkis.entrance.multi.entrance.flag", true)
-
- val JOBHISTORY_SPRING_APPLICATION_NAME =
- CommonVars("wds.linkis.jobhistory.application.name", "linkis-ps-jobhistory")
-
- /**
- * DEFAULT_LOGPATH_PREFIX is the prefix that represents the default log storage path
- * DEFAULT_LOGPATH_PREFIX 是表示默认的日志存储路径的前缀
- */
- val DEFAULT_LOGPATH_PREFIX = CommonVars[String](
- "wds.linkis.entrance.config.log.path",
- CommonVars[String]("wds.linkis.filesystem.hdfs.root.path").getValue
- )
-
/**
* Default_Cache_Max is used to specify the size of the LoopArray of the CacheLogWriter
* Default_Cache_Max 是用来指定CacheLogWriter的LoopArray的大小
@@ -96,6 +82,9 @@ object EntranceConfiguration {
*/
val WDS_LINKIS_INSTANCE = CommonVars("wds.linkis.rm.instance", 10)
+ val WDS_LINKIS_ENTRANCE_RUNNING_JOB =
+ CommonVars("wds.linkis.engine.running.job.max", WDS_LINKIS_INSTANCE.getValue)
+
val ENTRANCE_INSTANCE_MIN = CommonVars("wds.linkis.entrance.runningjob.min", 5)
val LOG_EXCLUDE_ALL = CommonVars("wds.linkis.log.exclude.all", "com.netflix")
@@ -221,11 +210,14 @@ object EntranceConfiguration {
CommonVars("wds.linkis.entrance.user.creator.ip.interceptor.switch", false)
val TEMPLATE_CONF_SWITCH =
- CommonVars("wds.linkis.entrance.template.conf.interceptor.switch", false)
+ CommonVars("wds.linkis.entrance.template.conf.interceptor.switch", true)
val TEMPLATE_CONF_ADD_ONCE_LABEL_ENABLE =
CommonVars("wds.linkis.entrance.template.add.once.label.enable", false)
+ val SUPPORT_TEMPLATE_CONF_RETRY_ENABLE =
+ CommonVars("linkis.entrance.template.retry.enable", false)
+
val ENABLE_ENTRANCE_DIRTY_DATA_CLEAR: CommonVars[Boolean] =
CommonVars[Boolean]("linkis.entrance.auto.clean.dirty.data.enable", true)
@@ -270,7 +262,7 @@ object EntranceConfiguration {
// if true, the job in ConsumeQueue can be failover
val ENTRANCE_SHUTDOWN_FAILOVER_CONSUME_QUEUE_ENABLED =
- CommonVars("linkis.entrance.shutdown.failover.consume.queue.enable", true).getValue
+ CommonVars("linkis.entrance.shutdown.failover.consume.queue.enable", false).getValue
val ENTRANCE_GROUP_SCAN_ENABLED = CommonVars("linkis.entrance.group.scan.enable", false)
@@ -294,4 +286,124 @@ object EntranceConfiguration {
val ENABLE_HDFS_RES_DIR_PRIVATE =
CommonVars[Boolean]("linkis.entrance.enable.hdfs.res.dir.private", false).getValue
+ val UNSUPPORTED_RETRY_CODES =
+ CommonVars("linkis.entrance.unsupported.retry.codes", "NOCODE").getValue
+
+ val SUPPORTED_RETRY_ERROR_CODES =
+ CommonVars(
+ "linkis.entrance.supported.retry.error.codes",
+ "01002,01003,13005,13006,13012"
+ ).getValue
+
+ val SUPPORTED_RETRY_ERROR_DESC =
+ CommonVars(
+ "linkis.entrance.supported.retry.error.desc",
+ "Spark application has already stopped,Spark application sc has already stopped,Failed to allocate a page,dataFrame to local exception,org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator"
+ ).getValue
+
+ val SUPPORT_ADD_RETRY_CODE_KEYS =
+ CommonVars(
+ "linkis.entrance.supported.add.retry.code.keys",
+ "dataFrame to local exception,org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator"
+ ).getValue
+
+ val TASK_RETRY_ENABLED: Boolean =
+ CommonVars[Boolean]("linkis.task.retry.enabled", true).getValue
+
+ val AI_SQL_DEFAULT_SPARK_ENGINE_TYPE: String =
+ CommonVars[String]("linkis.ai.sql.default.spark.engine.type", "spark-3.4.4").getValue
+
+ val AI_SQL_DEFAULT_HIVE_ENGINE_TYPE: String =
+ CommonVars[String]("linkis.ai.sql.default.hive.engine.type", "hive-2.3.3").getValue
+
+ val AI_SQL_HIVE_TEMPLATE_KEYS: String =
+ CommonVars[String]("linkis.ai.sql.hive.template.keys", "hive,mapreduce").getValue
+
+ val AI_SQL_CREATORS: String =
+ CommonVars[String]("linkis.ai.sql.support.creators", "IDE,MCP").getValue
+
+ val AI_SQL_KEY: CommonVars[String] =
+ CommonVars[String]("linkis.ai.sql.enable", "true")
+
+ val RETRY_NUM_KEY: CommonVars[Int] =
+ CommonVars[Int]("linkis.ai.retry.num", 1)
+
+ val AI_SQL_RETRY_ONCE: CommonVars[Boolean] =
+ CommonVars[Boolean]("linkis.ai.sql.once.enable", true)
+
+ val SPARK3_VERSION_COERCION_USERS: String =
+ CommonVars[String]("spark.version.coercion.users", "").getHotValue()
+
+ val SPARK3_VERSION_COERCION_DEPARTMENT: String =
+ CommonVars[String]("spark.version.coercion.department.id", "").getHotValue()
+
+ val SPARK3_VERSION_COERCION_SWITCH: Boolean =
+ CommonVars[Boolean]("spark.version.coercion.switch", false).getValue
+
+ val PYTHON_SAFE_CHECK_SWITCH = CommonVars("linkis.python.safe.check.switch", false).getValue
+
+ val DOCTOR_URL = CommonVars("linkis.doctor.url", "").getValue
+
+ val DOCTOR_DYNAMIC_ENGINE_URL = CommonVars(
+ "linkis.aisql.doctor.api",
+ "/api/v1/external/engine/diagnose?app_id=$app_id×tamp=$timestamp&nonce=$nonce&signature=$signature"
+ ).getValue
+
+ val DOCTOR_ENCRYPT_SQL_URL = CommonVars(
+ "linkis.encrypt.doctor.api",
+ "/api/v1/external/plaintext/diagnose?app_id=$app_id×tamp=$timestamp&nonce=$nonce&signature=$signature"
+ ).getValue
+
+ val DOCTOR_SIGNATURE_TOKEN = CommonVars("linkis.doctor.signature.token", "").getValue
+
+ val DOCTOR_NONCE = CommonVars.apply("linkis.doctor.signature.nonce", "").getValue
+
+ val LINKIS_SYSTEM_NAME = CommonVars("linkis.system.name", "").getValue
+
+ val DOCTOR_CLUSTER = CommonVars("linkis.aisql.doctor.cluster", "").getValue
+
+ val AI_SQL_DYNAMIC_ENGINE_SWITCH =
+ CommonVars("linkis.aisql.dynamic.engine.type.switch", false).getValue
+
+ val DOCTOR_REQUEST_TIMEOUT = CommonVars("linkis.aisql.doctor.http.timeout", 30000).getValue
+
+ val DOCTOR_HTTP_MAX_CONNECT = CommonVars("linkis.aisql.doctor.http.max.connect", 20).getValue
+
+ val SPARK_EXECUTOR_CORES = CommonVars.apply("spark.executor.cores", "2");
+
+ var SPARK_EXECUTOR_MEMORY = CommonVars.apply("spark.executor.memory", "6G");
+
+ var SPARK_DYNAMIC_ALLOCATION_MAX_EXECUTORS =
+ CommonVars.apply("spark.dynamicAllocation.maxExecutors", "50");
+
+ var SPARK_EXECUTOR_INSTANCES = CommonVars.apply("spark.executor.instances", "1");
+
+ var SPARK_EXECUTOR_MEMORY_OVERHEAD = CommonVars.apply("spark.executor.memoryOverhead", "2G");
+
+ var SPARK3_PYTHON_VERSION = CommonVars.apply("spark.python.version", "python3");
+
+ var SPARK_DYNAMIC_ALLOCATION_ENABLED =
+ CommonVars.apply("spark.dynamic.allocation.enabled", false).getValue
+
+ var SPARK_DYNAMIC_ALLOCATION_ADDITIONAL_CONFS =
+ CommonVars.apply("spark.dynamic.allocation.additional.confs", "").getValue
+
+ var DOCTOR_SENSITIVE_SQL_CHECK_SWITCH =
+ CommonVars[Boolean]("linkis.doctor.sensitive.sql.check.switch", false).getValue
+
+ var DOCTOR_SENSITIVE_SQL_CHECK_RUNTYPE =
+ CommonVars[String]("linkis.doctor.sensitive.sql.check.run.Type", "sql,python").getValue
+
+ var DOCTOR_SENSITIVE_SQL_CHECK_CREATOR =
+ CommonVars[String]("linkis.doctor.sensitive.sql.check.creator", "").getValue
+
+ var DOCTOR_SENSITIVE_SQL_CHECK_DEPARTMENT =
+ CommonVars[String]("linkis.doctor.sensitive.sql.check.department", "").getValue
+
+ var DOCTOR_SENSITIVE_SQL_CHECK_WHITELIST =
+ CommonVars[String]("linkis.doctor.sensitive.sql.check.whitelist", "").getValue
+
+ var DOCTOR_SENSITIVE_SQL_CHECK_ENGINETYPE =
+ CommonVars[String]("linkis.doctor.sensitive.sql.check.engine.type", "hive,spark").getValue
+
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/DefaultEntranceExecutor.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/DefaultEntranceExecutor.scala
index 0638ef59d2f..b63734279c1 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/DefaultEntranceExecutor.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/DefaultEntranceExecutor.scala
@@ -20,12 +20,13 @@ package org.apache.linkis.entrance.execute
import org.apache.linkis.common.log.LogUtils
import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.entrance.exception.{EntranceErrorCode, EntranceErrorException}
-import org.apache.linkis.entrance.job.EntranceExecuteRequest
+import org.apache.linkis.entrance.job.{EntranceExecuteRequest, EntranceExecutionJob}
import org.apache.linkis.entrance.orchestrator.EntranceOrchestrationFactory
import org.apache.linkis.entrance.utils.JobHistoryHelper
import org.apache.linkis.governance.common.entity.ExecutionNodeStatus
import org.apache.linkis.governance.common.protocol.task.ResponseTaskStatus
import org.apache.linkis.governance.common.utils.LoggerUtils
+import org.apache.linkis.manager.label.constant.LabelKeyConstant
import org.apache.linkis.manager.label.entity.Label
import org.apache.linkis.manager.label.entity.engine.CodeLanguageLabel
import org.apache.linkis.manager.label.utils.LabelUtil
@@ -47,6 +48,7 @@ import org.apache.linkis.orchestrator.execution.{
import org.apache.linkis.orchestrator.execution.impl.DefaultFailedTaskResponse
import org.apache.linkis.orchestrator.plans.unit.CodeLogicalUnit
import org.apache.linkis.protocol.constants.TaskConstant
+import org.apache.linkis.protocol.utils.TaskUtils
import org.apache.linkis.scheduler.executer._
import org.apache.linkis.server.BDPJettyServerHelper
@@ -55,6 +57,8 @@ import org.apache.commons.lang3.exception.ExceptionUtils
import java.util
import java.util.Date
+import scala.collection.JavaConverters.mapAsScalaMapConverter
+
class DefaultEntranceExecutor(id: Long)
extends EntranceExecutor(id)
with SingleTaskOperateSupport
@@ -107,7 +111,6 @@ class DefaultEntranceExecutor(id: Long)
entranceExecuteRequest: EntranceExecuteRequest,
orchestration: Orchestration
): Unit = {
- LoggerUtils.setJobIdMDC(getId.toString)
orchestrationResponse match {
case succeedResponse: SucceedTaskResponse =>
succeedResponse match {
@@ -130,35 +133,10 @@ class DefaultEntranceExecutor(id: Long)
null != arrayResultSetPathResp.getResultSets && arrayResultSetPathResp.getResultSets.length > 0
) {
val resultsetSize = arrayResultSetPathResp.getResultSets.length
- entranceExecuteRequest.getJob.setResultSize(resultsetSize)
entranceExecuteRequest.getJob
.asInstanceOf[EntranceJob]
.addAndGetResultSize(resultsetSize)
}
- val firstResultSet = arrayResultSetPathResp.getResultSets.headOption.orNull
- if (null != firstResultSet) {
- // assert that all result set files have same parent path, so we get the first
- Utils.tryCatch {
- entranceExecuteRequest.getJob
- .asInstanceOf[EntranceJob]
- .getEntranceContext
- .getOrCreatePersistenceManager()
- .onResultSetCreated(
- entranceExecuteRequest.getJob,
- AliasOutputExecuteResponse(firstResultSet.alias, firstResultSet.result)
- )
- } { case e: Exception =>
- val msg = s"Persist resultSet error. ${e.getMessage}"
- logger.error(msg)
- val errorExecuteResponse = new DefaultFailedTaskResponse(
- msg,
- EntranceErrorCode.RESULT_NOT_PERSISTED_ERROR.getErrCode,
- e
- )
- dealResponse(errorExecuteResponse, entranceExecuteRequest, orchestration)
- return
- }
- }
case _ =>
logger.info(
s"JobRequest : ${entranceExecuteRequest.jobId()} succeed to execute task,no result."
@@ -186,7 +164,6 @@ class DefaultEntranceExecutor(id: Long)
_.onLogUpdate(entranceExecuteRequest.getJob, LogUtils.generateERROR(msg))
)
}
- LoggerUtils.removeJobIdMDC()
}
def requestToComputationJobReq(entranceExecuteRequest: EntranceExecuteRequest): JobReq = {
@@ -231,12 +208,66 @@ class DefaultEntranceExecutor(id: Long)
orchestration: Orchestration,
failedResponse: FailedTaskResponse
) = {
- val msg = failedResponse.getErrorCode + ", " + failedResponse.getErrorMsg
- getEngineExecuteAsyncReturn.foreach { jobReturn =>
- jobReturn.notifyError(msg, failedResponse.getCause)
- jobReturn.notifyStatus(
- ResponseTaskStatus(entranceExecuteRequest.getJob.getId, ExecutionNodeStatus.Failed)
+ val msg: String = failedResponse.getErrorCode + ", " + failedResponse.getErrorMsg
+ var canRetry = false
+ val props: util.Map[String, AnyRef] = entranceExecuteRequest.properties()
+ val job: EntranceExecutionJob = entranceExecuteRequest.getJob
+ job.getJobRetryListener.foreach(listener => {
+ canRetry = listener.onJobFailed(
+ entranceExecuteRequest.getJob,
+ entranceExecuteRequest.code(),
+ props,
+ failedResponse.getErrorCode,
+ failedResponse.getErrorMsg
)
+ })
+ // 无法重试,更新失败状态
+ if (canRetry) {
+ // 可以重试,重置任务进度为0
+ logger.info(s"task: ${job.getId} reset progress from ${job.getProgress} to 0.0")
+ job.getProgressListener.foreach(_.onProgressUpdate(job, 0.0f, null))
+
+ // 如果有模板参数,则需要按模板参数重启动引擎
+ val params: util.Map[String, AnyRef] = entranceExecuteRequest.getJob.getJobRequest.getParams
+ val runtimeMap: util.Map[String, AnyRef] = TaskUtils.getRuntimeMap(params)
+ val startMap: util.Map[String, AnyRef] = TaskUtils.getStartupMap(params)
+ if (runtimeMap.containsKey(LabelKeyConstant.TEMPLATE_CONF_NAME_KEY)) {
+ val tempConf: AnyRef = runtimeMap
+ .getOrDefault(LabelKeyConstant.TEMPLATE_CONF_NAME_KEY, new util.HashMap[String, AnyRef]())
+ tempConf match {
+ case map: util.HashMap[String, AnyRef] =>
+ map.asScala.foreach { case (key, value) =>
+ // 保留原有已经设置的spark3相关参数
+ if (!startMap.containsKey(key)) {
+ startMap.put(key, value)
+ }
+ }
+ case _ =>
+ }
+ }
+
+ // 处理失败任务
+ failedResponse match {
+ case rte: DefaultFailedTaskResponse =>
+ if (rte.errorIndex >= 0) {
+ logger.info(s"tasks execute error with error index: ${rte.errorIndex}")
+ val newParams: util.Map[String, AnyRef] = new util.HashMap[String, AnyRef]()
+ newParams.put("execute.error.code.index", rte.errorIndex.toString)
+ LogUtils.generateInfo(
+ s"tasks execute error with error index: ${rte.errorIndex} and will retry."
+ )
+ TaskUtils.addRuntimeMap(props, newParams)
+ }
+ case _ =>
+ }
+ } else {
+ logger.debug(s"task execute Failed with : ${msg}")
+ getEngineExecuteAsyncReturn.foreach { jobReturn =>
+ jobReturn.notifyError(msg, failedResponse.getCause)
+ jobReturn.notifyStatus(
+ ResponseTaskStatus(entranceExecuteRequest.getJob.getId, ExecutionNodeStatus.Failed)
+ )
+ }
}
}
@@ -261,15 +292,6 @@ class DefaultEntranceExecutor(id: Long)
true
}
- def getRunningOrchestrationFuture: Option[OrchestrationFuture] = {
- val asyncReturn = getEngineExecuteAsyncReturn
- if (asyncReturn.isDefined) {
- asyncReturn.get.getOrchestrationFuture()
- } else {
- None
- }
- }
-
override protected def callExecute(request: ExecuteRequest): ExecuteResponse = {
val entranceExecuteRequest: EntranceExecuteRequest = request match {
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceExecutor.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceExecutor.scala
index d20b5ea8fbb..be7fb13871f 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceExecutor.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceExecutor.scala
@@ -118,6 +118,14 @@ abstract class EntranceExecutor(val id: Long) extends Executor with Logging {
super.hashCode()
}
+ def getRunningOrchestrationFuture: Option[OrchestrationFuture] = {
+ if (null != engineReturn) {
+ engineReturn.getOrchestrationFuture()
+ } else {
+ None
+ }
+ }
+
}
class EngineExecuteAsyncReturn(
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceJob.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceJob.scala
index 50efcafc855..7dda86e5a26 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceJob.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/execute/EntranceJob.scala
@@ -265,6 +265,11 @@ abstract class EntranceJob extends Job {
transitionCompleted(executeCompleted)
}
+ def transitionWaitForRetry(reason: String): Unit = {
+ logger.debug("Job failed with reason: " + reason)
+ super.transitionWaitForRetry()
+ }
+
override protected def isJobShouldRetry(errorExecuteResponse: ErrorExecuteResponse): Boolean =
isJobSupportRetry && errorExecuteResponse != null &&
(if (RPCUtils.isReceiverNotExists(errorExecuteResponse.t)) {
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/AISQLTransformInterceptor.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/AISQLTransformInterceptor.scala
new file mode 100644
index 00000000000..b457287e3fd
--- /dev/null
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/AISQLTransformInterceptor.scala
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.entrance.interceptor.impl
+
+import org.apache.linkis.common.conf.Configuration
+import org.apache.linkis.common.log.LogUtils
+import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.common.utils.CodeAndRunTypeUtils.LANGUAGE_TYPE_AI_SQL
+import org.apache.linkis.entrance.conf.EntranceConfiguration
+import org.apache.linkis.entrance.conf.EntranceConfiguration._
+import org.apache.linkis.entrance.interceptor.EntranceInterceptor
+import org.apache.linkis.entrance.utils.EntranceUtils
+import org.apache.linkis.governance.common.entity.job.{JobAiRequest, JobRequest}
+import org.apache.linkis.governance.common.protocol.job.JobAiReqInsert
+import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext
+import org.apache.linkis.manager.label.conf.LabelCommonConfig
+import org.apache.linkis.manager.label.entity.Label
+import org.apache.linkis.manager.label.entity.engine.{EngineType, EngineTypeLabel, UserCreatorLabel}
+import org.apache.linkis.manager.label.utils.LabelUtil
+import org.apache.linkis.protocol.utils.TaskUtils
+import org.apache.linkis.rpc.Sender
+
+import org.apache.commons.lang3.StringUtils
+
+import org.springframework.beans.BeanUtils
+
+import java.{lang, util}
+import java.util.Date
+
+import scala.collection.JavaConverters._
+
+class AISQLTransformInterceptor extends EntranceInterceptor with Logging {
+
+ override def apply(jobRequest: JobRequest, logAppender: lang.StringBuilder): JobRequest = {
+ val aiSqlEnable: Boolean = "true".equals(AI_SQL_KEY.getValue)
+ val supportAISQLCreator: String = AI_SQL_CREATORS.toLowerCase()
+ val sqlLanguage: String = LANGUAGE_TYPE_AI_SQL
+ val sparkEngineType: String = AI_SQL_DEFAULT_SPARK_ENGINE_TYPE
+ val hiveEngineType: String = AI_SQL_DEFAULT_HIVE_ENGINE_TYPE
+ val labels: util.List[Label[_]] = jobRequest.getLabels
+ val codeType: String = LabelUtil.getCodeType(labels)
+ // engineType and creator have been verified in LabelCheckInterceptor.
+ val userCreatorOpt: Option[Label[_]] = labels.asScala.find(_.isInstanceOf[UserCreatorLabel])
+ val creator: String = userCreatorOpt.get.asInstanceOf[UserCreatorLabel].getCreator
+ val engineTypeLabelOpt: Option[Label[_]] = labels.asScala.find(_.isInstanceOf[EngineTypeLabel])
+
+ val startMap: util.Map[String, AnyRef] = TaskUtils.getStartupMap(jobRequest.getParams)
+
+ val engineTypeLabel: EngineTypeLabel = engineTypeLabelOpt.get.asInstanceOf[EngineTypeLabel]
+
+ /**
+ * aiSql change to spark or hive
+ * 1. Use the spark engine when configuring spark parameter templates 2. Use the hive engine
+ * when configuring hive parameter templates 3. Request doctor to get engine type 4. Use
+ * spark by default or exception
+ */
+ var currentEngineType: String = engineTypeLabel.getStringValue
+ if (
+ aiSqlEnable && sqlLanguage
+ .equals(codeType) && supportAISQLCreator.contains(creator.toLowerCase())
+ ) {
+
+ logger.info(s"aisql enable for ${jobRequest.getId}")
+ startMap.put(AI_SQL_KEY.key, AI_SQL_KEY.getValue.asInstanceOf[AnyRef])
+ startMap.put(RETRY_NUM_KEY.key, RETRY_NUM_KEY.getValue.asInstanceOf[AnyRef])
+ logAppender.append(LogUtils.generateInfo(s"current code is aiSql task.\n"))
+
+ // 用户配置了模板参数
+ if (startMap.containsKey("ec.resource.name")) {
+ val hiveParamKeys: String = AI_SQL_HIVE_TEMPLATE_KEYS
+ if (containsKeySubstring(startMap, hiveParamKeys)) {
+ changeEngineLabel(hiveEngineType, labels)
+ logAppender.append(
+ LogUtils.generateInfo(
+ s"use $hiveEngineType by set ${startMap.get("ec.resource.name")} template.\n"
+ )
+ )
+ currentEngineType = hiveEngineType
+ } else {
+ changeEngineLabel(sparkEngineType, labels)
+ logAppender.append(
+ LogUtils.generateInfo(
+ s"use $sparkEngineType by set ${startMap.get("ec.resource.name")} template.\n"
+ )
+ )
+ currentEngineType = sparkEngineType
+ }
+ logger.info(
+ s"use ${startMap.get("ec.resource.name")} conf, use $currentEngineType execute task."
+ )
+ } else {
+ logger.info(s"start intelligent selection execution engine for ${jobRequest.getId}")
+ val engineType: String =
+ EntranceUtils.getDynamicEngineType(jobRequest.getExecutionCode, logAppender)
+ if ("hive".equals(engineType)) {
+ changeEngineLabel(hiveEngineType, labels)
+ logAppender.append(
+ LogUtils.generateInfo(s"use $hiveEngineType by intelligent selection.\n")
+ )
+ currentEngineType = hiveEngineType
+ } else {
+ changeEngineLabel(sparkEngineType, labels)
+ logAppender.append(
+ LogUtils.generateInfo(s"use $sparkEngineType by intelligent selection.\n")
+ )
+ currentEngineType = sparkEngineType
+ }
+ logger.info(
+ s"end intelligent selection execution engine, and engineType is ${currentEngineType} for ${jobRequest.getId}."
+ )
+ EntranceUtils.dealsparkDynamicConf(jobRequest, logAppender, jobRequest.getParams)
+ }
+
+ persist(jobRequest);
+ }
+
+ TaskUtils.addStartupMap(jobRequest.getParams, startMap)
+ jobRequest
+ }
+
+ private def persist(jobRequest: JobRequest) = {
+ val sender: Sender =
+ Sender.getSender(Configuration.JOBHISTORY_SPRING_APPLICATION_NAME.getValue)
+ val jobAiRequest: JobAiRequest = new JobAiRequest
+ BeanUtils.copyProperties(jobRequest, jobAiRequest)
+ jobAiRequest.setId(null)
+ jobAiRequest.setJobHistoryId(jobRequest.getId + "")
+ jobAiRequest.setChangeTime(new Date())
+ jobAiRequest.setEngineType(LabelUtil.getEngineType(jobRequest.getLabels))
+ jobAiRequest.setSubmitCode(jobRequest.getExecutionCode)
+ val jobAiReqInsert: JobAiReqInsert = JobAiReqInsert(jobAiRequest)
+ logger.info(s"${jobRequest.getId} insert into ai_history: ${jobAiRequest}")
+ sender.ask(jobAiReqInsert)
+ logger.info(s"${jobRequest.getId} insert into ai_history end.")
+ }
+
+ private def containsKeySubstring(map: util.Map[String, AnyRef], keywords: String): Boolean = {
+ if (StringUtils.isBlank(keywords) || map == null || map.isEmpty) {
+ false
+ } else {
+ // 将关键词字符串按逗号分隔成数组
+ val keywordArray: Array[String] = keywords.split(",").map(_.trim)
+
+ // 遍历 Map 的键,检查是否包含任何一个关键词
+ map.keySet().asScala.exists { key =>
+ keywordArray.exists(key.contains)
+ }
+ }
+ }
+
+ private def changeEngineLabel(sparkEngineType: String, labels: util.List[Label[_]]): Unit = {
+ val it: util.Iterator[Label[_]] = labels.iterator()
+ // 移除引擎标签
+ while (it.hasNext) {
+ if (it.next().isInstanceOf[EngineTypeLabel]) {
+ it.remove()
+ }
+ }
+ // 添加正确的引擎标签
+ val newEngineTypeLabel: EngineTypeLabel =
+ LabelBuilderFactoryContext.getLabelBuilderFactory.createLabel(classOf[EngineTypeLabel])
+ newEngineTypeLabel.setEngineType(sparkEngineType.split("-")(0))
+ newEngineTypeLabel.setVersion(sparkEngineType.split("-")(1))
+ labels.add(newEngineTypeLabel)
+ }
+
+}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/SensitiveCheckInterceptor.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/SensitiveCheckInterceptor.scala
new file mode 100644
index 00000000000..313b0073ce9
--- /dev/null
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/SensitiveCheckInterceptor.scala
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.entrance.interceptor.impl
+
+import org.apache.linkis.common.log.LogUtils
+import org.apache.linkis.common.utils.CodeAndRunTypeUtils
+import org.apache.linkis.entrance.conf.EntranceConfiguration
+import org.apache.linkis.entrance.interceptor.EntranceInterceptor
+import org.apache.linkis.entrance.interceptor.exception.CodeCheckException
+import org.apache.linkis.entrance.utils.EntranceUtils
+import org.apache.linkis.entrance.utils.EntranceUtils.logInfo
+import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.manager.label.utils.LabelUtil
+
+import org.apache.commons.lang3.StringUtils
+
+import java.lang
+
+class SensitiveCheckInterceptor extends EntranceInterceptor {
+
+ override def apply(jobRequest: JobRequest, logAppender: lang.StringBuilder): JobRequest = {
+ if (!EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_SWITCH) {
+ return jobRequest
+ }
+
+ val isWhiteList = EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_WHITELIST.contains(
+ jobRequest.getExecuteUser
+ ) ||
+ EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_WHITELIST.contains(jobRequest.getSubmitUser)
+ if (isWhiteList) {
+ logAppender.append(
+ LogUtils
+ .generateInfo(s"Sensitive SQL Check: whiteList contains user ! Skip Check\n")
+ )
+ return jobRequest
+ }
+ val labellist = jobRequest.getLabels
+
+ val engineType = LabelUtil.getEngineTypeLabel(labellist).getEngineType
+ if (!EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_ENGINETYPE.contains(engineType)) {
+ return jobRequest
+ }
+
+ val codeType = Option(LabelUtil.getCodeType(labellist))
+ .map(_.toLowerCase())
+ .getOrElse("")
+
+ val languageType = CodeAndRunTypeUtils.getLanguageTypeByCodeType(codeType)
+ if (!EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_RUNTYPE.contains(languageType)) {
+ return jobRequest
+ }
+
+ val creator = LabelUtil.getUserCreatorLabel(labellist).getCreator
+ if (
+ StringUtils.isNotBlank(
+ EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_CREATOR
+ ) && (!EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_CREATOR.contains(creator))
+ ) {
+ return jobRequest
+ }
+
+ val executeUserDepartmentId = EntranceUtils.getUserDepartmentId(jobRequest.getExecuteUser)
+ val submitUserDepartmentId = EntranceUtils.getUserDepartmentId(jobRequest.getSubmitUser)
+ if (
+ (StringUtils.isNotBlank(
+ executeUserDepartmentId
+ ) && EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_DEPARTMENT.contains(
+ executeUserDepartmentId
+ )) || (
+ StringUtils.isNotBlank(
+ submitUserDepartmentId
+ ) && EntranceConfiguration.DOCTOR_SENSITIVE_SQL_CHECK_DEPARTMENT.contains(
+ submitUserDepartmentId
+ )
+ )
+ ) {
+ val (result, reason) =
+ EntranceUtils.sensitiveSqlCheck(
+ jobRequest.getExecutionCode,
+ languageType,
+ engineType,
+ jobRequest.getExecuteUser,
+ logAppender
+ )
+ if (result) {
+ throw CodeCheckException(20054, "当前操作涉及明文信息读取,禁止执行该操作, 原因:" + reason)
+ }
+ }
+ jobRequest
+ }
+
+}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtils.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtils.scala
index 99ae8b07dfa..d7c95c964a3 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtils.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtils.scala
@@ -21,16 +21,25 @@ import org.apache.linkis.common.conf.Configuration
import org.apache.linkis.common.exception.LinkisCommonErrorException
import org.apache.linkis.common.log.LogUtils
import org.apache.linkis.common.utils.{CodeAndRunTypeUtils, Logging, Utils}
+import org.apache.linkis.common.utils.CodeAndRunTypeUtils.LANGUAGE_TYPE_AI_SQL
import org.apache.linkis.entrance.conf.EntranceConfiguration
+import org.apache.linkis.entrance.utils.EntranceUtils
import org.apache.linkis.governance.common.entity.TemplateConfKey
import org.apache.linkis.governance.common.entity.job.JobRequest
import org.apache.linkis.governance.common.protocol.conf.{TemplateConfRequest, TemplateConfResponse}
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext
+import org.apache.linkis.manager.label.conf.LabelCommonConfig
import org.apache.linkis.manager.label.constant.LabelKeyConstant
+import org.apache.linkis.manager.label.entity.engine.{
+ EngineType,
+ EngineTypeLabel,
+ FixedEngineConnLabel
+}
import org.apache.linkis.manager.label.entity.entrance.ExecuteOnceLabel
import org.apache.linkis.manager.label.utils.LabelUtil
import org.apache.linkis.protocol.utils.TaskUtils
import org.apache.linkis.rpc.Sender
+import org.apache.linkis.server.BDPJettyServerHelper
import org.apache.commons.lang3.StringUtils
@@ -38,13 +47,18 @@ import java.{lang, util}
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
+import scala.util.matching.{Regex, UnanchoredRegex}
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
object TemplateConfUtils extends Logging {
val confTemplateNameKey = "ec.resource.name"
+ val confFixedEngineConnLabelKey = "ec.fixed.sessionId"
+ /**
+ * 按模板uuid缓存模板配置
+ */
private val templateCache: LoadingCache[String, util.List[TemplateConfKey]] = CacheBuilder
.newBuilder()
.maximumSize(1000)
@@ -77,6 +91,9 @@ object TemplateConfUtils extends Logging {
})
+ /**
+ * 按模板名称缓存模板配置
+ */
private val templateCacheName: LoadingCache[String, util.List[TemplateConfKey]] = CacheBuilder
.newBuilder()
.maximumSize(1000)
@@ -120,62 +137,98 @@ object TemplateConfUtils extends Logging {
* @return
* String the last one of template conf name
*/
- def getCustomTemplateConfName(code: String, codeType: String): String = {
+ def getCustomTemplateConfName(
+ jobRequest: JobRequest,
+ codeType: String,
+ logAppender: lang.StringBuilder
+ ): String = {
+ var code = jobRequest.getExecutionCode
var templateConfName = "";
var varString: String = null
var errString: String = null
- var rightVarString: String = null
+ var fixECString: String = null
val languageType = CodeAndRunTypeUtils.getLanguageTypeByCodeType(codeType)
languageType match {
case CodeAndRunTypeUtils.LANGUAGE_TYPE_SQL =>
varString = s"""\\s*---@set ${confTemplateNameKey}=\\s*.+\\s*"""
+ fixECString = s"""\\s*---@set\\s+${confFixedEngineConnLabelKey}\\s*=\\s*([^;]+)(?:\\s*;)?"""
errString = """\s*---@.*"""
case CodeAndRunTypeUtils.LANGUAGE_TYPE_PYTHON | CodeAndRunTypeUtils.LANGUAGE_TYPE_SHELL =>
varString = s"""\\s*##@set ${confTemplateNameKey}=\\s*.+\\s*"""
+ fixECString = s"""\\s*##@set\\s+${confFixedEngineConnLabelKey}\\s*=\\s*([^;]+)(?:\\s*;)?"""
errString = """\s*##@"""
case CodeAndRunTypeUtils.LANGUAGE_TYPE_SCALA =>
varString = s"""\\s*///@set ${confTemplateNameKey}=\\s*.+\\s*"""
+ fixECString = s"""\\s*///@set\\s+${confFixedEngineConnLabelKey}\\s*=\\s*([^;]+)(?:\\s*;)?"""
errString = """\s*///@.+"""
case _ =>
return templateConfName
}
val customRegex = varString.r.unanchored
+ val fixECRegex: UnanchoredRegex = fixECString.r.unanchored
val errRegex = errString.r.unanchored
var codeRes = code.replaceAll("\r\n", "\n")
- // only allow set at fisrt line
- val res = codeRes.split("\n")
- if (res.size > 0) {
- val str = res(0)
- str match {
- case customRegex() =>
- val clearStr = if (str.endsWith(";")) str.substring(0, str.length - 1) else str
- val res: Array[String] = clearStr.split("=")
- if (res != null && res.length == 2) {
- templateConfName = res(1).trim
- logger.info(s"get template conf name $templateConfName")
- } else {
- if (res.length > 2) {
- throw new LinkisCommonErrorException(
- 20044,
- s"$str template conf name var defined uncorrectly"
- )
+
+ // 匹配任意行,只能是单独的行
+ if (codeRes.contains(confTemplateNameKey) || codeRes.contains(confFixedEngineConnLabelKey)) {
+ val res = codeRes.split("\n")
+ // 用于标识,匹配到就退出
+ var matchFlag = false
+ res.foreach(str => {
+ if (matchFlag) {
+ return templateConfName
+ }
+ str match {
+ case customRegex() =>
+ val clearStr = if (str.endsWith(";")) str.substring(0, str.length - 1) else str
+ val res: Array[String] = clearStr.split("=")
+ if (res != null && res.length == 2) {
+ templateConfName = res(1).trim
+ logger.info(s"get template conf name $templateConfName")
} else {
- throw new LinkisCommonErrorException(
- 20045,
- s"template conf name var was defined uncorrectly:$str"
+ if (res.length > 2) {
+ throw new LinkisCommonErrorException(
+ 20044,
+ s"$str template conf name var defined uncorrectly"
+ )
+ } else {
+ throw new LinkisCommonErrorException(
+ 20045,
+ s"template conf name var was defined uncorrectly:$str"
+ )
+ }
+ }
+ matchFlag = true
+ case fixECRegex(sessionId) =>
+ // deal with fixedEngineConn configuration, add fixedEngineConn label if setting @set ec.fixed.sessionId=xxx
+ if (StringUtils.isNotBlank(sessionId)) {
+ val fixedEngineConnLabel =
+ LabelBuilderFactoryContext.getLabelBuilderFactory.createLabel(
+ classOf[FixedEngineConnLabel]
+ )
+ fixedEngineConnLabel.setSessionId(sessionId)
+ jobRequest.getLabels.add(fixedEngineConnLabel)
+ logger.info(
+ s"The task ${jobRequest.getId} is set to fixed engine conn, labelValue: ${sessionId}"
)
+ logAppender.append(
+ s"The task ${jobRequest.getId} is set to fixed engine conn, labelValue: ${sessionId}"
+ )
+ } else {
+ logger.info(s"The task ${jobRequest.getId} not set fixed engine conn")
}
- }
- case errRegex() =>
- logger.warn(
- s"The template conf name var definition is incorrect:$str,if it is not used, it will not run the error, but it is recommended to use the correct specification to define"
- )
- case _ =>
- }
+ matchFlag = true
+ case errRegex() =>
+ logger.warn(
+ s"The template conf name var definition is incorrect:$str,if it is not used, it will not run the error, but it is recommended to use the correct specification to define"
+ )
+ case _ =>
+ }
+ })
}
templateConfName
}
@@ -185,15 +238,39 @@ object TemplateConfUtils extends Logging {
case requestPersistTask: JobRequest =>
val params = requestPersistTask.getParams
val startMap = TaskUtils.getStartupMap(params)
+ val runtimeMap: util.Map[String, AnyRef] = TaskUtils.getRuntimeMap(params)
var templateConflist: util.List[TemplateConfKey] = new util.ArrayList[TemplateConfKey]()
var templateName: String = ""
// only for Creator:IDE, try to get template conf name from code string. eg:---@set ec.resource.name=xxxx
+ val codeType = LabelUtil.getCodeType(jobRequest.getLabels)
val (user, creator) = LabelUtil.getUserCreator(jobRequest.getLabels)
if (EntranceConfiguration.DEFAULT_REQUEST_APPLICATION_NAME.getValue.equals(creator)) {
- val codeType = LabelUtil.getCodeType(jobRequest.getLabels)
+ templateName = getCustomTemplateConfName(jobRequest, codeType, logAppender)
+ if (StringUtils.isNotBlank(templateName)) {
+ logAppender.append(
+ LogUtils
+ .generateInfo(s"Try to execute task with template: $templateName in script.\n")
+ )
+ }
+ }
+
+ // 处理runtime参数中的模板名称,用于失败任务重试的时候使用模板参数重试
+ var runtimeTemplateFlag = false
+ if (
+ EntranceConfiguration.SUPPORT_TEMPLATE_CONF_RETRY_ENABLE.getValue && StringUtils
+ .isBlank(templateName)
+ ) {
templateName =
- TemplateConfUtils.getCustomTemplateConfName(jobRequest.getExecutionCode, codeType)
+ runtimeMap.getOrDefault(LabelKeyConstant.TEMPLATE_CONF_NAME_KEY, "").toString
+ if (StringUtils.isNotBlank(templateName)) {
+ runtimeTemplateFlag = true
+ logAppender.append(
+ LogUtils.generateInfo(
+ s"Try to execute task with template: $templateName in runtime params.\n"
+ )
+ )
+ }
}
// code template name > start params template uuid
@@ -209,15 +286,17 @@ object TemplateConfUtils extends Logging {
logger.info("try to get template conf list with template uid:{} ", templateUuid)
logAppender.append(
LogUtils
- .generateInfo(s"Try to get template conf data with template uid:$templateUuid\nn")
+ .generateInfo(s"Try to get template conf data with template uid:$templateUuid\n")
)
templateConflist = templateCache.get(templateUuid)
if (templateConflist == null || templateConflist.size() == 0) {
logAppender.append(
- LogUtils.generateWarn(
+ LogUtils.generateInfo(
s"Can not get any template conf data with template uid:$templateUuid\n"
)
)
+ } else {
+ templateName = templateConflist.get(0).getTemplateName
}
}
} else {
@@ -226,10 +305,11 @@ object TemplateConfUtils extends Logging {
LogUtils
.generateInfo(s"Try to get template conf data with template name:[$templateName]\n")
)
- templateConflist = templateCacheName.get(templateName)
+ val cacheList: util.List[TemplateConfKey] = templateCacheName.get(templateName)
+ templateConflist.addAll(cacheList)
if (templateConflist == null || templateConflist.size() == 0) {
logAppender.append(
- LogUtils.generateWarn(
+ LogUtils.generateInfo(
s"Can not get any template conf data with template name:$templateName\n"
)
)
@@ -248,6 +328,30 @@ object TemplateConfUtils extends Logging {
}
}
+ // 针对aisql处理模板参数
+ val isAisql = LANGUAGE_TYPE_AI_SQL.equals(codeType)
+ if (
+ isAisql && runtimeTemplateFlag && templateConflist != null && templateConflist
+ .size() > 0
+ ) {
+ logger.info("aisql deal with template in runtime params.")
+ logAppender.append(
+ LogUtils.generateInfo(
+ s"If task execution fails, the template $templateName configuration parameters will be used to rerun the task\n"
+ )
+ )
+ val keyList = new util.HashMap[String, AnyRef]()
+ templateConflist.asScala.foreach(ele => {
+ keyList.put(ele.getKey, ele.getConfigValue)
+ })
+ val confRuntimeMap = new util.HashMap[String, AnyRef]()
+ confRuntimeMap.put(LabelKeyConstant.TEMPLATE_CONF_NAME_KEY, keyList)
+ // 缓存配置到runtime
+ TaskUtils.addRuntimeMap(params, confRuntimeMap)
+ // 如果是aisql则不需要手动处理模板参数
+ templateConflist.clear()
+ }
+
if (templateConflist != null && templateConflist.size() > 0) {
val keyList = new util.HashMap[String, AnyRef]()
templateConflist.asScala.foreach(ele => {
@@ -263,13 +367,19 @@ object TemplateConfUtils extends Logging {
}
keyList.put(key, newValue)
}
-
})
if (keyList.size() > 0) {
+ logger.info(s"use template conf for templateName: ${templateName}")
+ keyList.put(confTemplateNameKey, templateName)
+ logAppender.append(
+ LogUtils
+ .generateInfo(s"use template conf with templateName: ${templateName} \n")
+ )
TaskUtils.addStartupMap(params, keyList)
}
+ } else if (!isAisql) {
+ EntranceUtils.dealsparkDynamicConf(jobRequest, logAppender, jobRequest.getParams)
}
-
case _ =>
}
jobRequest
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/orchestrator/plugin/EntranceUserParallelOrchestratorPlugin.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/orchestrator/plugin/EntranceUserParallelOrchestratorPlugin.scala
index e5c657023ef..4a143a0160f 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/orchestrator/plugin/EntranceUserParallelOrchestratorPlugin.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/orchestrator/plugin/EntranceUserParallelOrchestratorPlugin.scala
@@ -20,6 +20,7 @@ package org.apache.linkis.entrance.orchestrator.plugin
import org.apache.linkis.common.conf.Configuration
import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.entrance.conf.EntranceConfiguration
+import org.apache.linkis.entrance.scheduler.EntranceGroupFactory
import org.apache.linkis.entrance.utils.EntranceUtils
import org.apache.linkis.governance.common.protocol.conf.{
RequestQueryEngineConfigWithGlobalConfig,
@@ -68,10 +69,11 @@ class EntranceUserParallelOrchestratorPlugin extends UserParallelOrchestratorPlu
.containsKey(EntranceConfiguration.WDS_LINKIS_INSTANCE.key)
) {
logger.warn(
- s"cannot found user configuration key:${EntranceConfiguration.WDS_LINKIS_INSTANCE.key}," + s"will use default value "
+ s"cannot found user configuration key:${EntranceConfiguration.WDS_LINKIS_INSTANCE.key}, will use default value "
)
}
- val maxRunningJobs = EntranceConfiguration.WDS_LINKIS_INSTANCE.getValue(keyAndValue, true)
+ val maxRunningJobs = EntranceGroupFactory.getUserMaxRunningJobs(keyAndValue)
+ logger.info(s"$key load orchestrator user maxRunningJobs=$maxRunningJobs")
maxRunningJobs
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/parser/CommonEntranceParser.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/parser/CommonEntranceParser.scala
index 58fc1f45c3d..80059906afb 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/parser/CommonEntranceParser.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/parser/CommonEntranceParser.scala
@@ -18,14 +18,20 @@
package org.apache.linkis.entrance.parser
import org.apache.linkis.common.conf.Configuration
-import org.apache.linkis.common.utils.Logging
+import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.entrance.conf.EntranceConfiguration
+import org.apache.linkis.entrance.conf.EntranceConfiguration.{
+ SPARK3_VERSION_COERCION_DEPARTMENT,
+ SPARK3_VERSION_COERCION_SWITCH,
+ SPARK3_VERSION_COERCION_USERS
+}
import org.apache.linkis.entrance.errorcode.EntranceErrorCodeSummary._
import org.apache.linkis.entrance.exception.{EntranceErrorCode, EntranceIllegalParamException}
import org.apache.linkis.entrance.persistence.PersistenceManager
import org.apache.linkis.entrance.timeout.JobTimeoutManager
-import org.apache.linkis.governance.common.conf.GovernanceCommonConf
+import org.apache.linkis.entrance.utils.EntranceUtils
import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.governance.common.protocol.conf.{DepartmentRequest, DepartmentResponse}
import org.apache.linkis.manager.common.conf.RMConfiguration
import org.apache.linkis.manager.label.builder.factory.{
LabelBuilderFactory,
@@ -35,15 +41,23 @@ import org.apache.linkis.manager.label.conf.LabelCommonConfig
import org.apache.linkis.manager.label.constant.LabelKeyConstant
import org.apache.linkis.manager.label.entity.Label
import org.apache.linkis.manager.label.entity.cluster.ClusterLabel
-import org.apache.linkis.manager.label.entity.engine.{CodeLanguageLabel, UserCreatorLabel}
-import org.apache.linkis.manager.label.utils.EngineTypeLabelCreator
+import org.apache.linkis.manager.label.entity.engine.{
+ CodeLanguageLabel,
+ EngineType,
+ UserCreatorLabel
+}
+import org.apache.linkis.manager.label.utils.{EngineTypeLabelCreator, LabelUtil}
import org.apache.linkis.protocol.constants.TaskConstant
+import org.apache.linkis.protocol.utils.TaskUtils
+import org.apache.linkis.rpc.Sender
import org.apache.linkis.scheduler.queue.SchedulerEventState
+import org.apache.linkis.storage.script.VariableParser
import org.apache.commons.lang3.StringUtils
import java.util
import java.util.Date
+import java.util.regex.Pattern
import scala.collection.JavaConverters._
@@ -94,6 +108,7 @@ class CommonEntranceParser(val persistenceManager: PersistenceManager)
s"${EntranceErrorCode.PARAM_CANNOT_EMPTY.getDesc}, labels is null"
)
}
+ addUserToRuntime(submitUser, executeUser, configMap)
// 3. set Code
var code: String = null
var runType: String = null
@@ -117,13 +132,14 @@ class CommonEntranceParser(val persistenceManager: PersistenceManager)
if (formatCode) code = format(code)
jobRequest.setExecutionCode(code)
// 4. parse label
- val labels: util.Map[String, Label[_]] = buildLabel(labelMap)
+ var labels: util.HashMap[String, Label[_]] = buildLabel(labelMap)
JobTimeoutManager.checkTimeoutLabel(labels)
checkEngineTypeLabel(labels)
generateAndVerifyCodeLanguageLabel(runType, labels)
generateAndVerifyUserCreatorLabel(executeUser, labels)
generateAndVerifyClusterLabel(labels)
-
+ // sparkVersion cover,only spark use
+ labels = sparkVersionCoercion(labels, executeUser, submitUser)
jobRequest.setLabels(new util.ArrayList[Label[_]](labels.values()))
jobRequest.setSource(source)
jobRequest.setStatus(SchedulerEventState.Inited.toString)
@@ -131,6 +147,8 @@ class CommonEntranceParser(val persistenceManager: PersistenceManager)
jobRequest.setMetrics(new util.HashMap[String, AnyRef]())
jobRequest.getMetrics.put(TaskConstant.JOB_SUBMIT_TIME, new Date(System.currentTimeMillis))
jobRequest.setParams(configMap)
+ // Set Progress
+ jobRequest.setProgress("0.0")
jobRequest
}
@@ -229,8 +247,10 @@ class CommonEntranceParser(val persistenceManager: PersistenceManager)
jobReq.setExecuteUser(umUser)
var executionCode = params.get(TaskConstant.EXECUTIONCODE).asInstanceOf[String]
val _params = params.get(TaskConstant.PARAMS)
+
+ addUserToRuntime(submitUser, umUser, _params)
_params match {
- case mapParams: java.util.Map[String, AnyRef] => jobReq.setParams(mapParams)
+ case mapParams: util.Map[String, AnyRef] => jobReq.setParams(mapParams)
case _ =>
}
val formatCode = params.get(TaskConstant.FORMATCODE).asInstanceOf[Boolean]
@@ -265,38 +285,62 @@ class CommonEntranceParser(val persistenceManager: PersistenceManager)
if (formatCode) executionCode = format(executionCode)
jobReq.setExecutionCode(executionCode)
}
- val engineTypeLabel = EngineTypeLabelCreator.createEngineTypeLabel(executeApplicationName)
+ var engineTypeLabel = EngineTypeLabelCreator.createEngineTypeLabel(executeApplicationName)
val runTypeLabel =
labelBuilderFactory.createLabel[Label[_]](LabelKeyConstant.CODE_TYPE_KEY, runType)
+ val variableMap =
+ jobReq.getParams.get(VariableParser.VARIABLE).asInstanceOf[util.Map[String, String]]
+ if (
+ null != variableMap && variableMap.containsKey(LabelCommonConfig.SPARK3_ENGINE_VERSION_CONF)
+ ) {
+ var version = variableMap.get(LabelCommonConfig.SPARK3_ENGINE_VERSION_CONF)
+ val pattern = Pattern.compile(EntranceUtils.sparkVersionRegex).matcher(version)
+ if (pattern.matches()) {
+ version = LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue
+ } else {
+ version = LabelCommonConfig.SPARK_ENGINE_VERSION.getValue
+ }
+ engineTypeLabel =
+ EngineTypeLabelCreator.createEngineTypeLabel(EngineType.SPARK.toString, version)
+ }
val userCreatorLabel = labelBuilderFactory
.createLabel[Label[_]](LabelKeyConstant.USER_CREATOR_TYPE_KEY, umUser + "-" + creator)
- val labelList = new util.ArrayList[Label[_]](3)
- labelList.add(engineTypeLabel)
- labelList.add(runTypeLabel)
- labelList.add(userCreatorLabel)
+ var labels = new util.HashMap[String, Label[_]]()
+ labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, engineTypeLabel)
+ labels.put(LabelKeyConstant.CODE_TYPE_KEY, runTypeLabel)
+ labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, userCreatorLabel)
if (jobReq.getParams != null) {
val labelMap = params
.getOrDefault(TaskConstant.LABELS, new util.HashMap[String, AnyRef]())
.asInstanceOf[util.Map[String, AnyRef]]
- if (null != labelMap && !labelMap.isEmpty) {
- val list: util.List[Label[_]] =
- labelBuilderFactory.getLabels(labelMap)
- labelList.addAll(list)
- }
+ labels.putAll(buildLabel(labelMap))
}
jobReq.setProgress("0.0")
jobReq.setSource(source)
// In order to be compatible with the code, let enginetype and runtype have the same attribute
jobReq.setStatus(SchedulerEventState.Inited.toString)
// Package labels
- jobReq.setLabels(labelList)
+ // sparkVersion cover,only spark use
+ labels = sparkVersionCoercion(labels, umUser, submitUser)
+ jobReq.setLabels(new util.ArrayList[Label[_]](labels.values()))
jobReq.setMetrics(new util.HashMap[String, AnyRef]())
jobReq.getMetrics.put(TaskConstant.JOB_SUBMIT_TIME, new Date(System.currentTimeMillis))
jobReq
}
- private def buildLabel(labelMap: util.Map[String, AnyRef]): util.Map[String, Label[_]] = {
+ private def addUserToRuntime(submitUser: String, umUser: String, _params: AnyRef): Unit = {
+ val runtimeMap: util.Map[String, AnyRef] = new util.HashMap[String, AnyRef]()
+ runtimeMap.put(TaskConstant.SUBMIT_USER, submitUser)
+ runtimeMap.put(TaskConstant.EXECUTE_USER, umUser)
+ _params match {
+ case map: util.Map[String, AnyRef] =>
+ TaskUtils.addRuntimeMap(map, runtimeMap)
+ case _ =>
+ }
+ }
+
+ private def buildLabel(labelMap: util.Map[String, AnyRef]): util.HashMap[String, Label[_]] = {
val labelKeyValueMap = new util.HashMap[String, Label[_]]()
if (null != labelMap && !labelMap.isEmpty) {
val list: util.List[Label[_]] =
@@ -310,6 +354,64 @@ class CommonEntranceParser(val persistenceManager: PersistenceManager)
labelKeyValueMap
}
+ private def sparkVersionCoercion(
+ labels: util.HashMap[String, Label[_]],
+ executeUser: String,
+ submitUser: String
+ ): util.HashMap[String, Label[_]] = {
+ // 个人>部门
+ // 是否强制转换
+ if (SPARK3_VERSION_COERCION_SWITCH && (null != labels && !labels.isEmpty)) {
+ val engineTypeLabel = labels.get(LabelKeyConstant.ENGINE_TYPE_KEY)
+ val engineType = LabelUtil.getFromLabelStr(engineTypeLabel.getStringValue, "engine")
+ val version = LabelUtil.getFromLabelStr(engineTypeLabel.getStringValue, "version")
+ if (
+ engineType.equals(EngineType.SPARK.toString) && (!version.equals(
+ LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue
+ ))
+ ) {
+ Utils.tryAndWarnMsg {
+ // 判断用户是否是个人配置中的一员
+ if (
+ SPARK3_VERSION_COERCION_USERS.contains(executeUser) || SPARK3_VERSION_COERCION_USERS
+ .contains(submitUser)
+ ) {
+ logger.info(
+ s"Spark version will be change 3.4.4,submitUser:${submitUser},executeUser:${executeUser} "
+ )
+ labels.replace(
+ LabelKeyConstant.ENGINE_TYPE_KEY,
+ EngineTypeLabelCreator.createEngineTypeLabel(
+ EngineType.SPARK.toString,
+ LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue
+ )
+ )
+ return labels
+ }
+ val executeUserDepartmentId = EntranceUtils.getUserDepartmentId(executeUser)
+ val submitUserDepartmentId = EntranceUtils.getUserDepartmentId(submitUser)
+ if (
+ (StringUtils.isNotBlank(executeUserDepartmentId) && SPARK3_VERSION_COERCION_DEPARTMENT
+ .contains(executeUserDepartmentId)) ||
+ (StringUtils.isNotBlank(submitUserDepartmentId) && SPARK3_VERSION_COERCION_DEPARTMENT
+ .contains(submitUserDepartmentId))
+ ) {
+ logger.info(s"Spark version will be change 3.4.4 by department:${executeUser} ")
+ labels.replace(
+ LabelKeyConstant.ENGINE_TYPE_KEY,
+ EngineTypeLabelCreator.createEngineTypeLabel(
+ EngineType.SPARK.toString,
+ LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue
+ )
+ )
+ return labels
+ }
+ }(s"error to Spark 3 version coercion: ${executeUser}")
+ }
+ }
+ labels;
+ }
+
// todo to format code using proper way
private def format(code: String): String = code
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/persistence/PersistenceManager.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/persistence/PersistenceManager.scala
index b60fa2d795b..b0cacb806b8 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/persistence/PersistenceManager.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/persistence/PersistenceManager.scala
@@ -18,9 +18,13 @@
package org.apache.linkis.entrance.persistence
import org.apache.linkis.entrance.EntranceContext
-import org.apache.linkis.scheduler.listener.{JobListener, ProgressListener}
+import org.apache.linkis.scheduler.listener.{JobListener, JobRetryListener, ProgressListener}
-abstract class PersistenceManager extends JobListener with ResultSetListener with ProgressListener {
+abstract class PersistenceManager
+ extends JobListener
+ with ResultSetListener
+ with ProgressListener
+ with JobRetryListener {
def getEntranceContext: EntranceContext
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/scheduler/EntranceGroupFactory.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/scheduler/EntranceGroupFactory.scala
index de4c025e300..cb1b610e2b5 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/scheduler/EntranceGroupFactory.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/scheduler/EntranceGroupFactory.scala
@@ -35,6 +35,7 @@ import org.apache.linkis.rpc.Sender
import org.apache.linkis.scheduler.queue.{Group, GroupFactory, SchedulerEvent}
import org.apache.linkis.scheduler.queue.parallelqueue.ParallelGroup
+import org.apache.commons.collections.MapUtils
import org.apache.commons.lang3.StringUtils
import java.util
@@ -78,6 +79,9 @@ class EntranceGroupFactory extends GroupFactory with Logging {
val groupName = EntranceGroupFactory.getGroupNameByLabels(labels)
val cacheGroup = groupNameToGroups.getIfPresent(groupName)
if (null == cacheGroup) synchronized {
+ if (groupNameToGroups.getIfPresent(groupName) != null) {
+ return groupNameToGroups.getIfPresent(groupName)
+ }
val maxAskExecutorTimes = EntranceConfiguration.MAX_ASK_EXECUTOR_TIME.getValue.toLong
val sender: Sender =
Sender.getSender(Configuration.CLOUD_CONSOLE_CONFIGURATION_SPRING_APPLICATION_NAME.getValue)
@@ -94,7 +98,7 @@ class EntranceGroupFactory extends GroupFactory with Logging {
}(
"Get user configurations from configuration server failed! Next use the default value to continue."
)
- val maxRunningJobs = getUserMaxRunningJobs(keyAndValue)
+ val maxRunningJobs = EntranceGroupFactory.getUserMaxRunningJobs(keyAndValue)
val initCapacity = GROUP_INIT_CAPACITY.getValue(keyAndValue)
val maxCapacity = if (null != specifiedUsernameRegexPattern) {
if (specifiedUsernameRegexPattern.matcher(userCreatorLabel.getUser).find()) {
@@ -134,21 +138,6 @@ class EntranceGroupFactory extends GroupFactory with Logging {
group
}
- /**
- * User task concurrency control is controlled for multiple Entrances, which will be evenly
- * distributed based on the number of existing Entrances
- * @param keyAndValue
- * @return
- */
- private def getUserMaxRunningJobs(keyAndValue: util.Map[String, String]): Int = {
- val userDefinedRunningJobs = EntranceConfiguration.WDS_LINKIS_INSTANCE.getValue(keyAndValue)
- val entranceNum = EntranceUtils.getRunningEntranceNumber()
- Math.max(
- EntranceConfiguration.ENTRANCE_INSTANCE_MIN.getValue,
- userDefinedRunningJobs / entranceNum
- )
- }
-
}
object EntranceGroupFactory {
@@ -170,4 +159,28 @@ object EntranceGroupFactory {
groupName
}
+ /**
+ * User task concurrency control is controlled for multiple Entrances, which will be evenly
+ * distributed based on the number of existing Entrances
+ * @param keyAndValue
+ * @return
+ */
+ def getUserMaxRunningJobs(keyAndValue: util.Map[String, String]): Int = {
+ val userDefinedRunningJobs =
+ if (
+ MapUtils.isNotEmpty(keyAndValue) && keyAndValue.containsKey(
+ EntranceConfiguration.WDS_LINKIS_ENTRANCE_RUNNING_JOB.key
+ )
+ ) {
+ EntranceConfiguration.WDS_LINKIS_ENTRANCE_RUNNING_JOB.getValue(keyAndValue)
+ } else {
+ EntranceConfiguration.WDS_LINKIS_INSTANCE.getValue(keyAndValue)
+ }
+ val entranceNum = EntranceUtils.getRunningEntranceNumber()
+ Math.max(
+ EntranceConfiguration.ENTRANCE_INSTANCE_MIN.getValue,
+ userDefinedRunningJobs / entranceNum
+ )
+ }
+
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/CommonLogPathUtils.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/CommonLogPathUtils.scala
index 3430c1809b5..e8ba06ef094 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/CommonLogPathUtils.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/CommonLogPathUtils.scala
@@ -20,19 +20,16 @@ package org.apache.linkis.entrance.utils
import org.apache.linkis.common.io.FsPath
import org.apache.linkis.common.utils.Utils
import org.apache.linkis.entrance.conf.EntranceConfiguration
-import org.apache.linkis.governance.common.conf.GovernanceCommonConf
import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.governance.common.utils.GovernanceUtils
import org.apache.linkis.manager.label.utils.LabelUtil
import org.apache.linkis.storage.FSFactory
import org.apache.linkis.storage.fs.FileSystem
import org.apache.linkis.storage.utils.{FileSystemUtils, StorageConfiguration, StorageUtils}
-import java.text.SimpleDateFormat
-import java.util.Date
-
object CommonLogPathUtils {
- def buildCommonPath(commonPath: String): Unit = {
+ def buildCommonPath(commonPath: String, isResPath: Boolean): Unit = {
val fileSystem = getRootFs(commonPath)
fileSystem.init(null)
val realPath: String = if (commonPath.endsWith("/")) {
@@ -45,6 +42,16 @@ object CommonLogPathUtils {
FileSystemUtils.mkdirs(fileSystem, fsPath, StorageUtils.getJvmUser)
fileSystem.setPermission(fsPath, "770")
}
+ // create defalut creator path
+ if (isResPath) {
+ val defaultPath =
+ GovernanceUtils.getResultParentPath(GovernanceUtils.LINKIS_DEFAULT_RES_CREATOR)
+ val resPath = new FsPath(defaultPath)
+ if (!fileSystem.exists(resPath)) {
+ FileSystemUtils.mkdirs(fileSystem, resPath, StorageUtils.getJvmUser)
+ fileSystem.setPermission(resPath, "770")
+ }
+ }
Utils.tryQuietly(fileSystem.close())
}
@@ -52,8 +59,6 @@ object CommonLogPathUtils {
val fsPath = new FsPath(commonPath)
if (StorageUtils.HDFS.equals(fsPath.getFsType)) {
FSFactory.getFs(StorageUtils.HDFS).asInstanceOf[FileSystem]
- } else if (StorageUtils.S3.equals(fsPath.getFsType)) {
- FSFactory.getFs(StorageUtils.S3).asInstanceOf[FileSystem]
} else {
FSFactory
.getFs(StorageUtils.FILE, StorageConfiguration.LOCAL_ROOT_USER.getValue)
@@ -61,34 +66,20 @@ object CommonLogPathUtils {
}
}
- private val resPrefix = GovernanceCommonConf.RESULT_SET_STORE_PATH.getValue
-
- /**
- * get result path parentPath: resPrefix + dateStr + result + creator subPath: parentPath +
- * executeUser + taskid + filename
- * @param jobRequest
- * @return
- */
def getResultParentPath(jobRequest: JobRequest): String = {
- val resStb = new StringBuilder()
- if (resStb.endsWith("/")) {
- resStb.append(resPrefix)
- } else {
- resStb.append(resPrefix).append("/")
- }
- val dateFormat = new SimpleDateFormat("yyyy-MM-dd")
- val date = new Date(System.currentTimeMillis)
- val dateString = dateFormat.format(date)
val userCreator = LabelUtil.getUserCreatorLabel(jobRequest.getLabels)
val creator =
- if (null == userCreator) EntranceConfiguration.DEFAULT_CREATE_SERVICE
+ if (null == userCreator) EntranceConfiguration.DEFAULT_CREATE_SERVICE.getValue
else userCreator.getCreator
- resStb.append("result").append("/").append(dateString).append("/").append(creator)
- resStb.toString()
+ GovernanceUtils.getResultParentPath(creator)
}
def getResultPath(jobRequest: JobRequest): String = {
- val parentPath = getResultParentPath(jobRequest)
+ val userCreator = LabelUtil.getUserCreatorLabel(jobRequest.getLabels)
+ val creator =
+ if (null == userCreator) EntranceConfiguration.DEFAULT_CREATE_SERVICE.getValue
+ else userCreator.getCreator
+ val parentPath = GovernanceUtils.getResultParentPath(creator)
parentPath + "/" + jobRequest.getExecuteUser + "/" + jobRequest.getId
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/EntranceUtils.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/EntranceUtils.scala
index 13dcefa9f92..f397aeb2b21 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/EntranceUtils.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/EntranceUtils.scala
@@ -18,19 +18,38 @@
package org.apache.linkis.entrance.utils
import org.apache.linkis.common.ServiceInstance
-import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.common.conf.Configuration
+import org.apache.linkis.common.log.LogUtils
+import org.apache.linkis.common.utils.{Logging, SHAUtils, Utils}
+import org.apache.linkis.entrance.conf.EntranceConfiguration
+import org.apache.linkis.entrance.errorcode.EntranceErrorCodeSummary
+import org.apache.linkis.entrance.exception.EntranceRPCException
+import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.governance.common.protocol.conf.{DepartmentRequest, DepartmentResponse}
import org.apache.linkis.instance.label.client.InstanceLabelClient
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext
+import org.apache.linkis.manager.label.conf.LabelCommonConfig
import org.apache.linkis.manager.label.constant.{LabelKeyConstant, LabelValueConstant}
import org.apache.linkis.manager.label.entity.Label
-import org.apache.linkis.manager.label.entity.engine.{EngineTypeLabel, UserCreatorLabel}
+import org.apache.linkis.manager.label.entity.engine.{EngineType, EngineTypeLabel, UserCreatorLabel}
import org.apache.linkis.manager.label.entity.route.RouteLabel
-import org.apache.linkis.manager.label.utils.EngineTypeLabelCreator
+import org.apache.linkis.manager.label.utils.{EngineTypeLabelCreator, LabelUtil}
+import org.apache.linkis.protocol.utils.TaskUtils
import org.apache.linkis.rpc.Sender
+import org.apache.linkis.server.BDPJettyServerHelper
+import org.apache.commons.collections.MapUtils
import org.apache.commons.lang3.StringUtils
+import org.apache.http.client.config.RequestConfig
+import org.apache.http.client.methods.{CloseableHttpResponse, HttpPost}
+import org.apache.http.entity.{ContentType, StringEntity}
+import org.apache.http.impl.client.{BasicCookieStore, CloseableHttpClient, HttpClients}
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager
+import org.apache.http.util.EntityUtils
-import java.util
+import java.{lang, util}
+import java.nio.charset.StandardCharsets
+import java.util.{HashMap, Map}
import scala.collection.JavaConverters.asScalaBufferConverter
@@ -40,6 +59,19 @@ object EntranceUtils extends Logging {
private val labelFactory = LabelBuilderFactoryContext.getLabelBuilderFactory
+ val sparkVersionRegex = "^3(\\.\\d+)*$"
+
+ protected val connectionManager = new PoolingHttpClientConnectionManager
+ protected val cookieStore = new BasicCookieStore
+
+ private val httpClient: CloseableHttpClient = HttpClients
+ .custom()
+ .setDefaultCookieStore(cookieStore)
+ .setMaxConnTotal(EntranceConfiguration.DOCTOR_HTTP_MAX_CONNECT)
+ .setMaxConnPerRoute(EntranceConfiguration.DOCTOR_HTTP_MAX_CONNECT / 2)
+ .setConnectionManager(connectionManager)
+ .build()
+
def getUserCreatorEcTypeKey(
userCreatorLabel: UserCreatorLabel,
engineTypeLabel: EngineTypeLabel
@@ -105,4 +137,310 @@ object EntranceUtils extends Logging {
}
}
+ /**
+ * 获取用户部门ID
+ */
+ def getUserDepartmentId(username: String): String = {
+ var departmentId = ""
+ val sender: Sender =
+ Sender.getSender(Configuration.CLOUD_CONSOLE_CONFIGURATION_SPRING_APPLICATION_NAME.getValue)
+ val responseSubmitUser = sender.ask(new DepartmentRequest(username))
+ responseSubmitUser match {
+ case departmentSubmitUser: DepartmentResponse =>
+ if (StringUtils.isNotBlank(departmentSubmitUser.departmentId)) {
+ departmentId = departmentSubmitUser.departmentId
+ }
+ case _ =>
+ }
+ departmentId
+ }
+
+ /**
+ * 动态引擎类型选择
+ */
+ def getDynamicEngineType(sql: String, logAppender: java.lang.StringBuilder): String = {
+ val defaultEngineType = "spark"
+
+ if (!EntranceConfiguration.AI_SQL_DYNAMIC_ENGINE_SWITCH) {
+ return defaultEngineType
+ }
+
+ logger.info(s"AISQL automatically switches engines and begins to call Doctoris")
+
+ val params = new util.HashMap[String, AnyRef]()
+ params.put("sql", sql)
+ params.put("highStability", "")
+ params.put("queueResourceUsage", "")
+
+ val request = DoctorRequest(
+ apiUrl = EntranceConfiguration.DOCTOR_DYNAMIC_ENGINE_URL,
+ params = params,
+ defaultValue = defaultEngineType,
+ successMessage = "Aisql intelligent selection engines, Suggest",
+ exceptionMessage = "Aisql intelligent selection component exception"
+ )
+
+ val response = callDoctorService(request, logAppender)
+ response.result
+ }
+
+ def dealsparkDynamicConf(
+ jobRequest: JobRequest,
+ logAppender: lang.StringBuilder,
+ params: util.Map[String, AnyRef]
+ ): Unit = {
+ // deal with spark3 dynamic allocation conf
+ // 1.只有spark3需要处理动态规划参数 2.用户未指定模板名称,则设置默认值与spark底层配置保持一致,否则使用用户模板中指定的参数
+ val properties = new util.HashMap[String, AnyRef]()
+ val label: EngineTypeLabel = LabelUtil.getEngineTypeLabel(jobRequest.getLabels)
+ val sparkDynamicAllocationEnabled: Boolean =
+ EntranceConfiguration.SPARK_DYNAMIC_ALLOCATION_ENABLED
+ if (
+ sparkDynamicAllocationEnabled && label.getEngineType.equals(
+ EngineType.SPARK.toString
+ ) && label.getVersion.contains(LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue)
+ ) {
+ properties.put(
+ EntranceConfiguration.SPARK_EXECUTOR_CORES.key,
+ EntranceConfiguration.SPARK_EXECUTOR_CORES.getValue
+ )
+ properties.put(
+ EntranceConfiguration.SPARK_EXECUTOR_MEMORY.key,
+ EntranceConfiguration.SPARK_EXECUTOR_MEMORY.getValue
+ )
+ properties.put(
+ EntranceConfiguration.SPARK_DYNAMIC_ALLOCATION_MAX_EXECUTORS.key,
+ EntranceConfiguration.SPARK_DYNAMIC_ALLOCATION_MAX_EXECUTORS.getValue
+ )
+ properties.put(
+ EntranceConfiguration.SPARK_EXECUTOR_INSTANCES.key,
+ EntranceConfiguration.SPARK_EXECUTOR_INSTANCES.getValue
+ )
+ properties.put(
+ EntranceConfiguration.SPARK_EXECUTOR_MEMORY_OVERHEAD.key,
+ EntranceConfiguration.SPARK_EXECUTOR_MEMORY_OVERHEAD.getValue
+ )
+ properties.put(
+ EntranceConfiguration.SPARK3_PYTHON_VERSION.key,
+ EntranceConfiguration.SPARK3_PYTHON_VERSION.getValue
+ )
+ Utils.tryAndWarn {
+ val extraConfs: String =
+ EntranceConfiguration.SPARK_DYNAMIC_ALLOCATION_ADDITIONAL_CONFS
+ if (StringUtils.isNotBlank(extraConfs)) {
+ val confs: Array[String] = extraConfs.split(",")
+ for (conf <- confs) {
+ val confKey: String = conf.split("=")(0)
+ val confValue: String = conf.split("=")(1)
+ properties.put(confKey, confValue)
+ }
+ }
+ }
+ logAppender.append(
+ LogUtils
+ .generateInfo(s"use spark3 default conf. \n")
+ )
+ TaskUtils.addStartupMap(params, properties)
+ }
+ }
+
+ /**
+ * 敏感信息SQL检查
+ */
+ def sensitiveSqlCheck(
+ code: String,
+ codeType: String,
+ engine: String,
+ user: String,
+ logAppender: java.lang.StringBuilder
+ ): (Boolean, String) = {
+ val params = new util.HashMap[String, AnyRef]()
+ params.put("code", code)
+ params.put("user", user)
+ params.put("engine", engine)
+ params.put("codeType", codeType)
+
+ val request = DoctorRequest(
+ apiUrl = EntranceConfiguration.DOCTOR_ENCRYPT_SQL_URL,
+ params = params,
+ defaultValue = "false",
+ successMessage = "Sensitive SQL Check result",
+ exceptionMessage = "Sensitive SQL Check exception"
+ )
+
+ val response = callDoctorService(request, logAppender)
+ (response.result.toBoolean, response.reason)
+ }
+
+ /**
+ * Doctor服务调用通用框架
+ */
+ case class DoctorRequest(
+ apiUrl: String,
+ params: util.Map[String, AnyRef],
+ defaultValue: String,
+ successMessage: String,
+ exceptionMessage: String
+ )
+
+ case class DoctorResponse(
+ success: Boolean,
+ result: String,
+ reason: String = "",
+ duration: Double = 0.0
+ )
+
+ /**
+ * 通用Doctor服务调用方法
+ */
+ private def callDoctorService(
+ request: DoctorRequest,
+ logAppender: java.lang.StringBuilder
+ ): DoctorResponse = {
+ // 检查必要的配置参数
+ if (!isValidDoctorConfiguration()) {
+ logInfo(s"${request.exceptionMessage}, using default: ${request.defaultValue}", logAppender)
+ return DoctorResponse(success = false, result = request.defaultValue)
+ }
+
+ try {
+ val startTime = System.currentTimeMillis()
+ val url = buildDoctorRequestUrl(request.apiUrl)
+ val response = executeDoctorHttpRequest(url, request.params)
+
+ if (StringUtils.isBlank(response)) {
+ return DoctorResponse(success = false, result = request.defaultValue)
+ }
+
+ parseDoctorResponse(response, startTime, request, logAppender)
+ } catch {
+ case e: Exception =>
+ logger.warn(s"${request.exceptionMessage}: params: ${request.params}", e)
+ logInfo(s"${request.exceptionMessage}, using default: ${request.defaultValue}", logAppender)
+ DoctorResponse(success = false, result = request.defaultValue)
+ }
+ }
+
+ /**
+ * 检查Doctor配置参数是否有效
+ */
+ private def isValidDoctorConfiguration(): Boolean = {
+ StringUtils.isNotBlank(EntranceConfiguration.LINKIS_SYSTEM_NAME) &&
+ StringUtils.isNotBlank(EntranceConfiguration.DOCTOR_SIGNATURE_TOKEN) &&
+ StringUtils.isNotBlank(EntranceConfiguration.DOCTOR_CLUSTER) &&
+ StringUtils.isNotBlank(EntranceConfiguration.DOCTOR_URL)
+ }
+
+ /**
+ * 构建Doctor请求URL
+ */
+ private def buildDoctorRequestUrl(apiUrl: String): String = {
+ val timestampStr = String.valueOf(System.currentTimeMillis)
+ val signature = SHAUtils.Encrypt(
+ SHAUtils.Encrypt(
+ EntranceConfiguration.LINKIS_SYSTEM_NAME + EntranceConfiguration.DOCTOR_NONCE + timestampStr,
+ null
+ ) + EntranceConfiguration.DOCTOR_SIGNATURE_TOKEN,
+ null
+ )
+
+ (EntranceConfiguration.DOCTOR_URL + apiUrl)
+ .replace("$app_id", EntranceConfiguration.LINKIS_SYSTEM_NAME)
+ .replace("$timestamp", timestampStr)
+ .replace("$nonce", EntranceConfiguration.DOCTOR_NONCE)
+ .replace("$signature", signature)
+ }
+
+ /**
+ * 执行Doctor HTTP请求
+ */
+ private def executeDoctorHttpRequest(url: String, params: util.Map[String, AnyRef]): String = {
+ val httpPost = new HttpPost(url)
+ // 添加通用参数
+ params.put("cluster", EntranceConfiguration.DOCTOR_CLUSTER)
+
+ val json = BDPJettyServerHelper.gson.toJson(params)
+ val requestConfig = RequestConfig
+ .custom()
+ .setConnectTimeout(EntranceConfiguration.DOCTOR_REQUEST_TIMEOUT)
+ .setConnectionRequestTimeout(EntranceConfiguration.DOCTOR_REQUEST_TIMEOUT)
+ .setSocketTimeout(EntranceConfiguration.DOCTOR_REQUEST_TIMEOUT)
+ .build()
+
+ val entity = new StringEntity(
+ json,
+ ContentType.create(ContentType.APPLICATION_JSON.getMimeType, StandardCharsets.UTF_8.toString)
+ )
+ entity.setContentEncoding(StandardCharsets.UTF_8.toString)
+ httpPost.setConfig(requestConfig)
+ httpPost.setEntity(entity)
+
+ val execute = httpClient.execute(httpPost)
+ EntityUtils.toString(execute.getEntity, StandardCharsets.UTF_8.toString)
+ }
+
+ /**
+ * 解析Doctor响应结果
+ */
+ private def parseDoctorResponse(
+ responseStr: String,
+ startTime: Long,
+ request: DoctorRequest,
+ logAppender: java.lang.StringBuilder
+ ): DoctorResponse = {
+ try {
+ val endTime = System.currentTimeMillis()
+ val responseMapJson: Map[String, Object] =
+ BDPJettyServerHelper.gson.fromJson(responseStr, classOf[Map[_, _]])
+
+ if (MapUtils.isNotEmpty(responseMapJson) && responseMapJson.containsKey("data")) {
+ val dataMap = MapUtils.getMap(responseMapJson, "data")
+ val duration = (endTime - startTime) / 1000.0
+
+ // 根据不同的API返回不同的结果
+ if (request.apiUrl.contains("plaintext")) {
+ // 敏感信息检查API
+ val sensitive = dataMap.get("sensitive").toString.toBoolean
+ val reason = dataMap.get("reason").toString
+ logInfo(
+ s"${request.successMessage}: $sensitive, This decision took $duration seconds",
+ logAppender
+ )
+ DoctorResponse(
+ success = true,
+ result = sensitive.toString,
+ reason = reason,
+ duration = duration
+ )
+ } else {
+ // 动态引擎选择API
+ val engineType = dataMap.get("engine").toString
+ val reason = dataMap.get("reason").toString
+ logInfo(
+ s"${request.successMessage}: $engineType, Hit rules: $reason, This decision took $duration seconds",
+ logAppender
+ )
+ DoctorResponse(success = true, result = engineType, reason = reason, duration = duration)
+ }
+ } else {
+ throw new EntranceRPCException(
+ EntranceErrorCodeSummary.DOCTORIS_ERROR.getErrorCode,
+ EntranceErrorCodeSummary.DOCTORIS_ERROR.getErrorDesc
+ )
+ }
+ } catch {
+ case e: Exception =>
+ logger.warn(s"Doctoris返回数据解析失败:json: $responseStr", e)
+ logInfo(s"${request.exceptionMessage}, using default: ${request.defaultValue}", logAppender)
+ DoctorResponse(success = false, result = request.defaultValue)
+ }
+ }
+
+ /**
+ * 记录日志信息
+ */
+ private def logInfo(message: String, logAppender: java.lang.StringBuilder): Unit = {
+ logAppender.append(LogUtils.generateInfo(s"$message\n"))
+ }
+
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/JobHistoryHelper.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/JobHistoryHelper.scala
index 44e2357b341..600960d8809 100644
--- a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/JobHistoryHelper.scala
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/JobHistoryHelper.scala
@@ -17,6 +17,7 @@
package org.apache.linkis.entrance.utils
+import org.apache.linkis.common.conf.Configuration
import org.apache.linkis.common.exception.ErrorException
import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.entrance.conf.EntranceConfiguration
@@ -45,7 +46,7 @@ import com.google.common.net.InetAddresses
object JobHistoryHelper extends Logging {
private val sender =
- Sender.getSender(EntranceConfiguration.JOBHISTORY_SPRING_APPLICATION_NAME.getValue)
+ Sender.getSender(Configuration.JOBHISTORY_SPRING_APPLICATION_NAME.getValue)
private val SUCCESS_FLAG = 0
@@ -191,12 +192,12 @@ object JobHistoryHelper extends Logging {
response match {
case resp: util.List[JobRespProtocol] =>
// todo filter success data, rpc have bug
-// resp.asScala
-// .filter(r =>
-// r.getStatus == SUCCESS_FLAG && r.getData.containsKey(JobRequestConstants.JOB_ID)
-// )
-// .map(_.getData.get(JobRequestConstants.JOB_ID).asInstanceOf[java.lang.Long])
-// .toList
+ // resp.asScala
+ // .filter(r =>
+ // r.getStatus == SUCCESS_FLAG && r.getData.containsKey(JobRequestConstants.JOB_ID)
+ // )
+ // .map(_.getData.get(JobRequestConstants.JOB_ID).asInstanceOf[java.lang.Long])
+ // .toList
taskIdList
case _ =>
@@ -359,6 +360,9 @@ object JobHistoryHelper extends Logging {
if (null != infoMap && infoMap.containsKey(TaskConstant.ENGINE_INSTANCE)) {
metricsMap.put(TaskConstant.ENGINE_INSTANCE, infoMap.get(TaskConstant.ENGINE_INSTANCE))
}
+ if (null != infoMap && infoMap.containsKey(TaskConstant.JOB_IS_REUSE)) {
+ metricsMap.put(TaskConstant.JOB_IS_REUSE, infoMap.get(TaskConstant.JOB_IS_REUSE))
+ }
}
}
diff --git a/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/SafeUtils.scala b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/SafeUtils.scala
new file mode 100644
index 00000000000..eda8b13fc43
--- /dev/null
+++ b/linkis-computation-governance/linkis-entrance/src/main/scala/org/apache/linkis/entrance/utils/SafeUtils.scala
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.entrance.utils
+
+import org.apache.linkis.common.utils.Logging
+
+import java.util.regex.Pattern
+
+object SafeUtils extends Logging {
+
+ private val DANGEROUS_CODE_PATTERN = "/etc/passwd|" +
+ "/etc/shadow|" +
+ "/etc/group|" +
+ "open\\(\\s*['\"]/etc/[^'\"]+['\"]\\s*,|" +
+ "subprocess|" +
+ "os\\.system|" +
+ "os\\.popen|" +
+ "shutil\\.execute|" +
+ "eval|`.*?`|" +
+ "import\\s+os\\.env|" +
+ "import\\s+os\\.getlogin|" +
+ "import\\s+os\\.getpid|" +
+ "import\\s+os\\.getppid|" +
+ "import\\s+os\\.getcwd|" +
+ "import\\s+os\\.getexecname|" +
+ "import\\s+os\\.startfile|" +
+ "import\\s+os\\.fork|" +
+ "import\\s+os\\.kill|" +
+ "import\\s+os\\.wait|" +
+ "import\\s+os\\.waitpid|" +
+ "import\\s+os\\.wait3|" +
+ "import\\s+os\\.wait4|" +
+ "import\\s+os\\.confstr|" +
+ "import\\s+os\\.sysconf|" +
+ "import\\s+os\\.uname|" +
+ "import\\s+os\\.urandom|" +
+ "import\\s+os\\.chroot|" +
+ "import\\s+os\\.setuid|" +
+ "import\\s+os\\.setgid|" +
+ "import\\s+os\\.setgroups|" +
+ "import\\s+os\\.initgroups|" +
+ "import\\s+os\\.getgrouplist|" +
+ "import\\s+os\\.getlogin|" +
+ "import\\s+os\\.getpgid|" +
+ "import\\s+os\\.getpgrp|" +
+ "import\\s+os\\.getsid|" +
+ "import\\s+os\\.setpgid|" +
+ "import\\s+os\\.setpgrp|" +
+ "import\\s+os\\.setsid|" +
+ "import\\s+os\\.forkpty|" +
+ "import\\s+os\\.setreuid|" +
+ "import\\s+os\\.setregid|" +
+ "import\\s+os\\.getresuid|" +
+ "import\\s+os\\.getresgid"
+
+ private val ANNOTATION_PATTERN = "\\s*#.*$"
+
+ private val SAFETY_PASS = "SAFETY_PASS"
+
+ def isCodeSafe(code: String): Boolean = {
+ var isSafe = true
+ // 在匹配高危代码前,先移除注释
+ val commentPattern = Pattern.compile(ANNOTATION_PATTERN, Pattern.MULTILINE)
+ val cleanCode = commentPattern.matcher(code).replaceAll("")
+ val code_pattern =
+ Pattern.compile(DANGEROUS_CODE_PATTERN, Pattern.DOTALL | Pattern.CASE_INSENSITIVE)
+ val code_matcher = code_pattern.matcher(cleanCode)
+ while (code_matcher.find) {
+ isSafe = false
+ val mather = commentPattern.matcher(code)
+ while (mather.find)
+ if (mather.group.toUpperCase().contains(SAFETY_PASS)) isSafe = true
+ }
+ isSafe
+ }
+
+}
diff --git a/linkis-computation-governance/linkis-entrance/src/test/java/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtilsTest.java b/linkis-computation-governance/linkis-entrance/src/test/java/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtilsTest.java
index c965529b579..7a6846df577 100644
--- a/linkis-computation-governance/linkis-entrance/src/test/java/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtilsTest.java
+++ b/linkis-computation-governance/linkis-entrance/src/test/java/org/apache/linkis/entrance/interceptor/impl/TemplateConfUtilsTest.java
@@ -17,13 +17,18 @@
package org.apache.linkis.entrance.interceptor.impl;
-import org.junit.jupiter.api.Assertions;
+import org.apache.linkis.governance.common.entity.job.JobRequest;
+
import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.*;
+
class TemplateConfUtilsTest {
@Test
void getCustomTemplateConfName() {
+ JobRequest js = new JobRequest();
+ StringBuilder logBuilder = new StringBuilder();
String sqlCode =
""
+ "--注解\n"
@@ -34,8 +39,9 @@ void getCustomTemplateConfName() {
+ " --@set yy=123\n"
+ " --注解";
- String res = TemplateConfUtils.getCustomTemplateConfName(sqlCode, "sql");
- Assertions.assertEquals(res, "");
+ js.setExecutionCode(sqlCode);
+ String res = TemplateConfUtils.getCustomTemplateConfName(js, "sql", logBuilder);
+ assertEquals(res, "");
String sqlCode2 =
""
@@ -46,9 +52,9 @@ void getCustomTemplateConfName() {
+ " select \"--注解\" as test\n"
+ " --@set yy=123\n"
+ " --注解";
-
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode2, "sql");
- Assertions.assertEquals(res, "");
+ js.setExecutionCode(sqlCode2);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "sql", logBuilder);
+ assertEquals(res, "");
String sqlCode3 =
""
@@ -60,9 +66,9 @@ void getCustomTemplateConfName() {
+ " select \"--注解\" as test\n"
+ " --@set yy=123\n"
+ " --注解";
-
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode3, "sql");
- Assertions.assertEquals(res, "345");
+ js.setExecutionCode(sqlCode3);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "sql", logBuilder);
+ assertEquals(res, "345");
String sqlCode4 =
""
@@ -70,9 +76,9 @@ void getCustomTemplateConfName() {
+ " select \"--注解\" as test\n"
+ " --@set yy=123\n"
+ " --注解";
-
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode4, "sql");
- Assertions.assertEquals(res, "name1");
+ js.setExecutionCode(sqlCode4);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "sql", logBuilder);
+ assertEquals(res, "name1");
String sqlCode5 =
""
@@ -84,9 +90,9 @@ void getCustomTemplateConfName() {
+ " select \"--注解\" as test\n"
+ "#@set yy=123\n"
+ " #注解";
-
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode5, "python");
- Assertions.assertEquals(res, "pyname1");
+ js.setExecutionCode(sqlCode5);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "python", logBuilder);
+ assertEquals(res, "pyname1");
String sqlCode6 =
""
@@ -94,9 +100,9 @@ void getCustomTemplateConfName() {
+ " select \"//注解\" as test\n"
+ "//@set yy=123\n"
+ " #注解";
-
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode6, "scala");
- Assertions.assertEquals(res, "scalaname1");
+ js.setExecutionCode(sqlCode6);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "scala", logBuilder);
+ assertEquals(res, "scalaname1");
String sqlCode7 =
""
@@ -104,25 +110,27 @@ void getCustomTemplateConfName() {
+ " select \"--注解\" as test\n"
+ " --@set yy=123\n"
+ " --注解";
-
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode7, "hql");
- Assertions.assertEquals(res, "hqlname1");
+ js.setExecutionCode(sqlCode7);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "hql", logBuilder);
+ assertEquals(res, "hqlname1");
String sqlCode8 =
"---@set ec.resource.name=linkis_test2;\n"
+ " ---@set ec.resource.name=scriptis_test hive;\n"
+ " select * from dss autotest.demo data limit 100;";
- res = TemplateConfUtils.getCustomTemplateConfName(sqlCode8, "hql");
- Assertions.assertEquals(res, "linkis_test2");
+ js.setExecutionCode(sqlCode8);
+ res = TemplateConfUtils.getCustomTemplateConfName(js, "hql", logBuilder);
+ assertEquals(res, "linkis_test2");
}
@Test
void getCustomTemplateConfName2() {
-
+ JobRequest js = new JobRequest();
+ StringBuilder logBuilder = new StringBuilder();
String sqlCode9 =
"---@set ec.resource.name=linkis_test2;\r\n---@set ec.resource.name=scriptis_test_hive;\r\n--@set limitn=100\r\nselect * from dss_autotest.demo_data limit ${limitn};\r\n";
-
- String res = TemplateConfUtils.getCustomTemplateConfName(sqlCode9, "hql");
- Assertions.assertEquals(res, "linkis_test2");
+ js.setExecutionCode(sqlCode9);
+ String res = TemplateConfUtils.getCustomTemplateConfName(js, "hql", logBuilder);
+ assertEquals(res, "linkis_test2");
}
}
diff --git a/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLDriverMain.scala b/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLDriverMain.scala
index 44686981e80..3bcaaadbc1c 100644
--- a/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLDriverMain.scala
+++ b/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLDriverMain.scala
@@ -38,7 +38,12 @@ class UJESSQLDriverMain extends Driver with Logging {
override def connect(url: String, properties: Properties): Connection = if (acceptsURL(url)) {
val props = if (properties != null) properties else new Properties
- props.putAll(parseURL(url))
+ // The putting is performed iteratively in order to avoid this error (Java > 8):
+ // [error] both method putAll in class Properties of type (x$1: java.util.Map[_, _])Unit
+ // [error] and method putAll in class Hashtable of type (x$1: java.util.Map[_ <: Object, _ <: Object])Unit
+ parseURL(url).asScala.foreach { case (key, value) =>
+ props.put(key, value)
+ }
logger.info(s"input url:$url, properties:$properties")
val ujesClient = UJESClientFactory.getUJESClient(props)
new LinkisSQLConnection(ujesClient, props)
@@ -100,7 +105,12 @@ class UJESSQLDriverMain extends Driver with Logging {
override def getPropertyInfo(url: String, info: Properties): Array[DriverPropertyInfo] = {
val props = if (info != null) info else new Properties
- props.putAll(parseURL(url))
+ // The putting is performed iteratively in order to avoid this error (Java > 8):
+ // [error] both method putAll in class Properties of type (x$1: java.util.Map[_, _])Unit
+ // [error] and method putAll in class Hashtable of type (x$1: java.util.Map[_ <: Object, _ <: Object])Unit
+ parseURL(url).asScala.foreach { case (key, value) =>
+ props.put(key, value)
+ }
val hostProp = new DriverPropertyInfo(HOST, props.getProperty(HOST))
hostProp.required = true
val portProp = new DriverPropertyInfo(PORT, props.getProperty(PORT))
diff --git a/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLResultSet.scala b/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLResultSet.scala
index 37c379a7ca5..f506bde5e6b 100644
--- a/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLResultSet.scala
+++ b/linkis-computation-governance/linkis-jdbc-driver/src/main/scala/org/apache/linkis/ujes/jdbc/UJESSQLResultSet.scala
@@ -175,13 +175,24 @@ class UJESSQLResultSet(
if (null == resultSetResult) {
return
}
- metaData = resultSetResult.getMetadata.asInstanceOf[util.List[util.Map[String, String]]]
- if (null != metaData) {
- for (cursor <- 1 to metaData.size()) {
- val col = metaData.get(cursor - 1)
- resultSetMetaData.setColumnNameProperties(cursor, col.get("columnName"))
- resultSetMetaData.setDataTypeProperties(cursor, col.get("dataType"))
- resultSetMetaData.setCommentPropreties(cursor, col.get("comment"))
+ val metaTmp = resultSetResult.getMetadata
+ if (NULL_VALUE.equals(String.valueOf(metaTmp))) {
+ val fileContentList =
+ resultSetResult.getFileContent.asInstanceOf[util.List[util.List[String]]]
+ if (null != fileContentList) {
+ resultSetMetaData.setColumnNameProperties(1, "linkis_string")
+ resultSetMetaData.setDataTypeProperties(1, "String")
+ resultSetMetaData.setCommentPropreties(1, NULL_VALUE)
+ }
+ } else {
+ metaData = metaTmp.asInstanceOf[util.List[util.Map[String, String]]]
+ if (null != metaData) {
+ for (cursor <- 1 to metaData.size()) {
+ val col = metaData.get(cursor - 1)
+ resultSetMetaData.setColumnNameProperties(cursor, col.get("columnName"))
+ resultSetMetaData.setDataTypeProperties(cursor, col.get("dataType"))
+ resultSetMetaData.setCommentPropreties(cursor, col.get("comment"))
+ }
}
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/conf/AMConfiguration.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/conf/AMConfiguration.java
index 72b72d8ebf0..e558191efd8 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/conf/AMConfiguration.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/conf/AMConfiguration.java
@@ -75,7 +75,7 @@ public class AMConfiguration {
CommonVars.apply("wds.linkis.governance.admin.operations", "");
public static final CommonVars ENGINE_START_MAX_TIME =
- CommonVars.apply("wds.linkis.manager.am.engine.start.max.time", new TimeType("11m"));
+ CommonVars.apply("wds.linkis.manager.am.engine.start.max.time", new TimeType("8m"));
public static final CommonVars ENGINE_CONN_START_REST_MAX_WAIT_TIME =
CommonVars.apply("wds.linkis.manager.am.engine.rest.start.max.time", new TimeType("40s"));
@@ -95,15 +95,18 @@ public class AMConfiguration {
public static final CommonVars MULTI_USER_ENGINE_TYPES =
CommonVars.apply(
"wds.linkis.multi.user.engine.types",
- "jdbc,es,presto,io_file,appconn,openlookeng,trino,jobserver,nebula,hbase,doris");
+ "es,presto,io_file,appconn,openlookeng,trino,jobserver,nebula,hbase,doris");
public static final CommonVars ALLOW_BATCH_KILL_ENGINE_TYPES =
CommonVars.apply("wds.linkis.allow.batch.kill.engine.types", "spark,hive,python");
public static final CommonVars UNALLOW_BATCH_KILL_ENGINE_TYPES =
- CommonVars.apply("wds.linkis.allow.batch.kill.engine.types", "trino,appconn,io_file,nebula");
+ CommonVars.apply(
+ "wds.linkis.unallow.batch.kill.engine.types", "trino,appconn,io_file,nebula,jdbc");
public static final CommonVars MULTI_USER_ENGINE_USER =
CommonVars.apply("wds.linkis.multi.user.engine.user", getDefaultMultiEngineUser());
+ public static final String UDF_KILL_ENGINE_TYPE =
+ CommonVars.apply("linkis.udf.kill.engine.type", "spark,hive").getValue();
public static final CommonVars ENGINE_LOCKER_MAX_TIME =
CommonVars.apply("wds.linkis.manager.am.engine.locker.max.time", 1000 * 60 * 5);
@@ -113,6 +116,15 @@ public class AMConfiguration {
"wds.linkis.manager.am.can.retry.logs", "already in use;Cannot allocate memory")
.getValue();
+ public static final int REUSE_ENGINE_ASYNC_MAX_THREAD_SIZE =
+ CommonVars.apply("wds.linkis.manager.reuse.max.thread.size", 200).getValue();
+
+ public static final int CREATE_ENGINE_ASYNC_MAX_THREAD_SIZE =
+ CommonVars.apply("wds.linkis.manager.create.max.thread.size", 200).getValue();
+
+ public static final int ASK_ENGINE_ERROR_ASYNC_MAX_THREAD_SIZE =
+ CommonVars.apply("wds.linkis.manager.ask.error.max.thread.size", 100).getValue();
+
public static final int ASK_ENGINE_ASYNC_MAX_THREAD_SIZE =
CommonVars.apply("wds.linkis.ecm.launch.max.thread.size", 200).getValue();
@@ -131,9 +143,15 @@ public class AMConfiguration {
public static final Boolean NODE_SELECT_HOTSPOT_EXCLUSION_RULE =
CommonVars.apply("linkis.node.select.hotspot.exclusion.rule.enable", true).getValue();
+ public static final CommonVars NODE_SELECT_HOTSPOT_EXCLUSION_SHUFFLE_RULER =
+ CommonVars.apply("linkis.node.select.hotspot.exclusion.shuffle.ruler", "size-limit");
+
public static final boolean EC_REUSE_WITH_RESOURCE_RULE_ENABLE =
CommonVars.apply("linkis.ec.reuse.with.resource.rule.enable", false).getValue();
+ public static final boolean EC_REUSE_WITH_TEMPLATE_RULE_ENABLE =
+ CommonVars.apply("linkis.ec.reuse.with.template.rule.enable", false).getValue();
+
public static final String EC_REUSE_WITH_RESOURCE_WITH_ECS =
CommonVars.apply("linkis.ec.reuse.with.resource.with.ecs", "spark,hive,shell,python")
.getValue();
@@ -176,6 +194,20 @@ public class AMConfiguration {
public static final boolean AM_USER_RESET_RESOURCE =
CommonVars.apply("linkis.am.user.reset.resource.enable", true).getValue();
+ public static final CommonVars ENGINE_REUSE_ENABLE_CACHE =
+ CommonVars.apply("wds.linkis.manager.am.engine.reuse.enable.cache", false);
+
+ public static final CommonVars ENGINE_REUSE_CACHE_EXPIRE_TIME =
+ CommonVars.apply("wds.linkis.manager.am.engine.reuse.cache.expire.time", new TimeType("5s"));
+
+ public static final CommonVars ENGINE_REUSE_CACHE_MAX_SIZE =
+ CommonVars.apply("wds.linkis.manager.am.engine.reuse.cache.max.size", 1000L);
+
+ public static final CommonVars ENGINE_REUSE_CACHE_SUPPORT_ENGINES =
+ CommonVars.apply("wds.linkis.manager.am.engine.reuse.cache.support.engines", "shell");
+ public static final CommonVars ENGINE_REUSE_SHUFF_SUPPORT_ENGINES =
+ CommonVars.apply("wds.linkis.manager.am.engine.reuse.shuff.support.engines", "shell");
+
public static String getDefaultMultiEngineUser() {
String jvmUser = Utils.getJvmUser();
return String.format(
@@ -203,7 +235,7 @@ public static boolean isUnAllowKilledEngineType(String engineType) {
AMConfiguration.UNALLOW_BATCH_KILL_ENGINE_TYPES.getValue().split(",");
Optional findResult =
Arrays.stream(unAllowBatchKillEngine)
- .filter(e -> e.equalsIgnoreCase(engineType))
+ .filter(e -> engineType.toLowerCase().contains(e))
.findFirst();
return findResult.isPresent();
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/converter/DefaultMetricsConverter.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/converter/DefaultMetricsConverter.java
index c7620157a1b..132409c1545 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/converter/DefaultMetricsConverter.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/converter/DefaultMetricsConverter.java
@@ -136,6 +136,7 @@ public AMNode fillMetricsToNode(AMNode amNode, NodeMetrics metrics) {
amNode.setNodeHealthyInfo(parseHealthyInfo(metrics));
amNode.setNodeOverLoadInfo(parseOverLoadInfo(metrics));
amNode.setUpdateTime(metrics.getUpdateTime());
+ amNode.setNodeDescription(metrics.getDescription());
return amNode;
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/manager/DefaultEngineNodeManager.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/manager/DefaultEngineNodeManager.java
index 02b143d5cd6..e3e9689b4c3 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/manager/DefaultEngineNodeManager.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/manager/DefaultEngineNodeManager.java
@@ -31,9 +31,11 @@
import org.apache.linkis.manager.common.entity.metrics.NodeMetrics;
import org.apache.linkis.manager.common.entity.node.*;
import org.apache.linkis.manager.common.entity.persistence.PersistenceLabel;
+import org.apache.linkis.manager.common.entity.persistence.PersistenceNode;
import org.apache.linkis.manager.common.protocol.engine.EngineOperateRequest;
import org.apache.linkis.manager.common.protocol.engine.EngineOperateResponse;
import org.apache.linkis.manager.common.protocol.node.NodeHeartbeatMsg;
+import org.apache.linkis.manager.dao.NodeManagerMapper;
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactory;
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext;
import org.apache.linkis.manager.label.entity.engine.EngineInstanceLabel;
@@ -73,6 +75,8 @@ public class DefaultEngineNodeManager implements EngineNodeManager {
@Autowired private ResourceManager resourceManager;
+ @Autowired private NodeManagerMapper nodeManagerMapper;
+
@Autowired private LabelManagerPersistence labelManagerPersistence;
private final LabelBuilderFactory labelBuilderFactory =
@@ -223,6 +227,7 @@ public EngineNode[] getEngineNodes(ScoreServiceInstance[] scoreServiceInstances)
if (scoreServiceInstances == null || scoreServiceInstances.length == 0) {
return null;
}
+ List instances = new ArrayList();
List scoreServiceInstancesList = Arrays.asList(scoreServiceInstances);
EngineNode[] engineNodes =
scoreServiceInstancesList.stream()
@@ -231,6 +236,7 @@ public EngineNode[] getEngineNodes(ScoreServiceInstance[] scoreServiceInstances)
AMEngineNode engineNode = new AMEngineNode();
engineNode.setScore(scoreServiceInstance.getScore());
engineNode.setServiceInstance(scoreServiceInstance.getServiceInstance());
+ instances.add(scoreServiceInstance.getServiceInstance().getInstance());
return engineNode;
})
.toArray(EngineNode[]::new);
@@ -241,9 +247,10 @@ public EngineNode[] getEngineNodes(ScoreServiceInstance[] scoreServiceInstances)
.collect(Collectors.toList());
try {
+ logger.info("start getEngineNodes.");
ResourceInfo resourceInfo =
resourceManager.getResourceInfo(serviceInstancesList.toArray(new ServiceInstance[0]));
-
+ logger.info("end resourceInfo {}", resourceInfo);
if (serviceInstancesList.isEmpty()) {
throw new LinkisRetryException(
AMConstant.ENGINE_ERROR_CODE, "Service instances cannot be empty.");
@@ -251,6 +258,15 @@ public EngineNode[] getEngineNodes(ScoreServiceInstance[] scoreServiceInstances)
List nodeMetrics =
nodeMetricManagerPersistence.getNodeMetrics(Arrays.asList(engineNodes));
+ logger.info(
+ "get nodeMetrics, with engineNode size: {}, res size: {}",
+ engineNodes.length,
+ nodeMetrics.size());
+ List persistenceNodes = nodeManagerMapper.getNodesByInstances(instances);
+ logger.info(
+ "get persistenceNodes, with instance size: {}, res size: {}",
+ instances.size(),
+ persistenceNodes.size());
for (EngineNode engineNode : engineNodes) {
Optional optionMetrics =
@@ -269,6 +285,12 @@ public EngineNode[] getEngineNodes(ScoreServiceInstance[] scoreServiceInstances)
optionMetrics.ifPresent(metrics -> metricsConverter.fillMetricsToNode(engineNode, metrics));
optionRMNode.ifPresent(rmNode -> engineNode.setNodeResource(rmNode.getNodeResource()));
+
+ persistenceNodes.stream()
+ .filter(
+ node -> node.getInstance().equals(engineNode.getServiceInstance().getInstance()))
+ .findFirst()
+ .ifPresent(persistenceNode -> engineNode.setParams(persistenceNode.getParams()));
}
} catch (Exception e) {
LinkisRetryException linkisRetryException =
@@ -276,6 +298,7 @@ public EngineNode[] getEngineNodes(ScoreServiceInstance[] scoreServiceInstances)
linkisRetryException.initCause(e);
throw linkisRetryException;
}
+ logger.info("end getEngineNodes");
return engineNodes;
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/pointer/AbstractNodePointer.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/pointer/AbstractNodePointer.java
index fc0a1fca244..1c6f8100339 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/pointer/AbstractNodePointer.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/pointer/AbstractNodePointer.java
@@ -85,4 +85,9 @@ public boolean equals(Object obj) {
}
return false;
}
+
+ @Override
+ public int hashCode() {
+ return getNode().getServiceInstance().hashCode();
+ }
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EMRestfulApi.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EMRestfulApi.java
index 414a11bf13f..c9fe9ff0ca2 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EMRestfulApi.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EMRestfulApi.java
@@ -438,6 +438,7 @@ public Message openEngineLog(HttpServletRequest req, @RequestBody JsonNode jsonN
if (!logType.equals("stdout")
&& !logType.equals("stderr")
&& !logType.equals("gc")
+ && !logType.equals("udfLog")
&& !logType.equals("yarnApp")) {
throw new AMErrorException(
AMErrorCode.PARAM_ERROR.getErrorCode(), AMErrorCode.PARAM_ERROR.getErrorDesc());
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EngineRestfulApi.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EngineRestfulApi.java
index 4f68937e63c..1e118e0ee7c 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EngineRestfulApi.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/restful/EngineRestfulApi.java
@@ -737,7 +737,7 @@ public Message executeEngineConnOperation(HttpServletRequest req, @RequestBody J
@ApiOperation(
value = "kill egineconns of a ecm",
- notes = "Kill engine after updating configuration",
+ notes = "Kill engine by cteator or engineType",
response = Message.class)
@ApiImplicitParams({
@ApiImplicitParam(name = "creator", dataType = "String", required = true, example = "IDE"),
@@ -748,7 +748,7 @@ public Message executeEngineConnOperation(HttpServletRequest req, @RequestBody J
example = "hive-2.3.3"),
})
@ApiOperationSupport(ignoreParameters = {"param"})
- @RequestMapping(path = "/rm/killEngineByUpdateConfig", method = RequestMethod.POST)
+ @RequestMapping(path = "/rm/killEngineByCreatorEngineType", method = RequestMethod.POST)
public Message killEngineByUpdateConfig(HttpServletRequest req, @RequestBody JsonNode jsonNode)
throws AMErrorException {
String userName = ModuleUserUtils.getOperationUser(req);
@@ -770,7 +770,15 @@ public Message killEngineByUpdateConfig(HttpServletRequest req, @RequestBody Jso
&& AMConfiguration.isUnAllowKilledEngineType(engineType)) {
return Message.error("multi user engine does not support this feature(多用户引擎不支持此功能)");
}
- engineStopService.stopUnlockECByUserCreatorAndECType(userName, creatorStr, engineType);
+ if (Configuration.GLOBAL_CONF_SYMBOL().equals(engineType)) {
+ Arrays.stream(AMConfiguration.UDF_KILL_ENGINE_TYPE.split(","))
+ .forEach(
+ engine ->
+ engineStopService.stopUnlockECByUserCreatorAndECType(
+ userName, creatorStr, engine));
+ } else {
+ engineStopService.stopUnlockECByUserCreatorAndECType(userName, creatorStr, engineType);
+ }
return Message.ok("Kill engineConn succeed");
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/service/impl/ECResourceInfoServiceImpl.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/service/impl/ECResourceInfoServiceImpl.java
index 12d66163b16..02fd4476fc3 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/service/impl/ECResourceInfoServiceImpl.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/am/service/impl/ECResourceInfoServiceImpl.java
@@ -31,6 +31,7 @@
import org.apache.linkis.manager.dao.NodeManagerMapper;
import org.apache.linkis.manager.label.service.NodeLabelService;
import org.apache.linkis.manager.persistence.LabelManagerPersistence;
+import org.apache.linkis.protocol.constants.TaskConstant;
import org.apache.linkis.server.BDPJettyServerHelper;
import org.apache.commons.collections.MapUtils;
@@ -187,7 +188,7 @@ public List> getECResourceInfoList(
}
item.put("lastUnlockTimestamp", lastUnlockTimestamp);
item.put("useResource", ECResourceInfoUtils.getStringToMap(usedResourceStr));
- item.put("ecmInstance", latestRecord.getEcmInstance());
+ item.put(TaskConstant.ECM_INSTANCE, latestRecord.getEcmInstance());
String engineType = latestRecord.getEngineType();
item.put("engineType", engineType);
if (StringUtils.isNotBlank(queueName)) {
@@ -216,10 +217,10 @@ public List> getECResourceInfoList(
.collect((Collectors.toMap(EMNodeVo::getInstance, item -> item)));
for (Map stringObjectMap : resultList) {
if (isCrossCluster
- && clusterMap.containsKey(stringObjectMap.get("ecmInstance").toString())) {
+ && clusterMap.containsKey(stringObjectMap.get(TaskConstant.ECM_INSTANCE).toString())) {
resultListByCluster.add(stringObjectMap);
} else if (!isCrossCluster
- && !clusterMap.containsKey(stringObjectMap.get("ecmInstance").toString())) {
+ && !clusterMap.containsKey(stringObjectMap.get(TaskConstant.ECM_INSTANCE).toString())) {
resultListByCluster.add(stringObjectMap);
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/rm/external/yarn/YarnResourceRequester.java b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/rm/external/yarn/YarnResourceRequester.java
index 006b58157f3..4f6a158f7c8 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/rm/external/yarn/YarnResourceRequester.java
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/java/org/apache/linkis/manager/rm/external/yarn/YarnResourceRequester.java
@@ -17,6 +17,7 @@
package org.apache.linkis.manager.rm.external.yarn;
+import org.apache.linkis.engineplugin.server.conf.EngineConnPluginConfiguration;
import org.apache.linkis.manager.common.entity.resource.CommonNodeResource;
import org.apache.linkis.manager.common.entity.resource.NodeResource;
import org.apache.linkis.manager.common.entity.resource.ResourceType;
@@ -42,6 +43,9 @@
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
@@ -56,7 +60,9 @@ public class YarnResourceRequester implements ExternalResourceRequester {
private final String HASTATE_ACTIVE = "active";
private static final ObjectMapper objectMapper = new ObjectMapper();
+ private static final JsonFactory factory = new JsonFactory();
private final Map rmAddressMap = new ConcurrentHashMap<>();
+ private final String queuePrefix = EngineConnPluginConfiguration.QUEUE_PREFIX().getValue();
private static final HttpClient httpClient = HttpClients.createDefault();
@@ -74,7 +80,12 @@ public NodeResource requestResourceInfo(
logger.info("rmWebAddress: " + rmWebAddress);
String queueName = ((YarnResourceIdentifier) identifier).getQueueName();
- String realQueueName = "root." + queueName;
+ if (queueName.startsWith(queuePrefix)) {
+ logger.info(
+ "Queue name {} starts with '{}', remove '{}'", queueName, queuePrefix, queuePrefix);
+ queueName = queueName.substring(queuePrefix.length());
+ }
+ String realQueueName = queuePrefix + queueName;
try {
YarnQueueInfo resources = getResources(rmWebAddress, realQueueName, queueName, provider);
@@ -212,7 +223,7 @@ public YarnQueueInfo getResources(
realQueueName = queueName;
JsonNode childQueues = getChildQueuesOfCapacity(schedulerInfo);
Optional queue = getQueueOfCapacity(childQueues, realQueueName);
- if (queue == null || !queue.isPresent()) {
+ if (!queue.isPresent()) {
logger.debug(
"cannot find any information about queue " + queueName + ", response: " + resp);
throw new RMWarnException(
@@ -234,7 +245,7 @@ public YarnQueueInfo getResources(
JsonNode childQueues = getChildQueues(schedulerInfo.path("rootQueue"));
queue = getQueue(childQueues, realQueueName);
}
- if (queue == null || !queue.isPresent()) {
+ if (!queue.isPresent()) {
logger.debug(
"cannot find any information about queue " + queueName + ", response: " + resp);
throw new RMWarnException(
@@ -301,7 +312,7 @@ public List requestAppInfo(
String rmWebAddress = getAndUpdateActiveRmWebAddress(provider);
String queueName = ((YarnResourceIdentifier) identifier).getQueueName();
- String realQueueName = "root." + queueName;
+ String realQueueName = queuePrefix + queueName;
JsonNode resp = getResponseByUrl("apps", rmWebAddress, provider).path("apps").path("app");
if (resp.isMissingNode()) {
@@ -337,6 +348,108 @@ public ResourceType getResourceType() {
return ResourceType.Yarn;
}
+ public JsonNode parseJsonWithDuplicatesToJsonNode(String json) throws IOException {
+ try (JsonParser parser = factory.createParser(json)) {
+ if (parser.nextToken() != JsonToken.START_OBJECT) {
+ throw new IllegalStateException("Expected content to be an object");
+ }
+ return parseObject(parser);
+ }
+ }
+
+ private JsonNode parseObject(JsonParser parser) throws IOException {
+ ObjectNode rootNode = objectMapper.createObjectNode();
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ if (parser.currentToken() == JsonToken.FIELD_NAME) {
+ String fieldName = parser.getCurrentName();
+ parser.nextToken(); // 移动到值
+ if (parser.currentToken() == JsonToken.START_ARRAY) {
+ // 处理数组
+ ArrayNode arrayNode = objectMapper.createArrayNode();
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ if (parser.currentToken() == JsonToken.START_OBJECT) {
+ arrayNode.add(parseNestedObject(parser));
+ } else {
+ arrayNode.add(parser.getText());
+ }
+ }
+ rootNode.set(fieldName, arrayNode);
+ } else {
+ // 处理普通字段
+ rootNode.set(fieldName, objectMapper.valueToTree(parseValue(parser)));
+ }
+ }
+ }
+ return rootNode;
+ }
+
+ private JsonNode parseNestedObject(JsonParser parser) throws IOException {
+ ObjectNode nestedNode = objectMapper.createObjectNode();
+ Map> fieldMap = new LinkedHashMap<>();
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ if (parser.currentToken() == JsonToken.FIELD_NAME) {
+ String fieldName = parser.getCurrentName();
+ parser.nextToken(); // 移动到值
+ Object value = parseValue(parser);
+ // 将值添加到对应的列表中
+ fieldMap.computeIfAbsent(fieldName, k -> new ArrayList<>()).add(value);
+ }
+ }
+ // 将字段映射转换为 JsonNode
+ for (Map.Entry> entry : fieldMap.entrySet()) {
+ String key = entry.getKey();
+ List values = entry.getValue();
+ if (values.size() == 1) {
+ // 如果只有一个值,直接添加到节点
+ nestedNode.set(key, objectMapper.valueToTree(values.get(0)));
+ } else {
+ // 如果有多个值,创建数组节点
+ ArrayNode arrayNode = objectMapper.createArrayNode();
+ for (Object value : values) {
+ arrayNode.add(objectMapper.valueToTree(value));
+ }
+ nestedNode.set(key, arrayNode);
+ }
+ }
+ return nestedNode;
+ }
+
+ private Object parseValue(JsonParser parser) throws IOException {
+ JsonToken token = parser.currentToken();
+ switch (token) {
+ case START_OBJECT:
+ return parseNestedObject(parser);
+ case START_ARRAY:
+ return parseArray(parser);
+ case VALUE_STRING:
+ return parser.getText();
+ case VALUE_NUMBER_INT:
+ return parser.getLongValue();
+ case VALUE_NUMBER_FLOAT:
+ return parser.getDoubleValue();
+ case VALUE_TRUE:
+ return true;
+ case VALUE_FALSE:
+ return false;
+ case VALUE_NULL:
+ return null;
+ default:
+ throw new IllegalStateException("Unexpected token: " + token);
+ }
+ }
+
+ private ArrayNode parseArray(JsonParser parser) throws IOException {
+ ArrayNode arrayNode = objectMapper.createArrayNode();
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ if (parser.currentToken() == JsonToken.START_OBJECT) {
+ arrayNode.add(parseNestedObject(parser));
+ } else {
+ arrayNode.add(parser.getText());
+ }
+ }
+ return arrayNode;
+ }
+
private JsonNode getResponseByUrl(
String url, String rmWebAddress, ExternalResourceProvider provider) {
@@ -394,11 +507,16 @@ private JsonNode getResponseByUrl(
}
JsonNode jsonNode = null;
try {
- jsonNode = objectMapper.readTree(entityString);
+ jsonNode = parseJsonWithDuplicatesToJsonNode(entityString);
} catch (Exception e) {
- logger.warn("getResponseByUrl failed", e);
- throw new RMErrorException(
- YARN_QUEUE_EXCEPTION.getErrorCode(), YARN_QUEUE_EXCEPTION.getErrorDesc(), e);
+ logger.warn("parse json with duplicates failed.", e);
+ try {
+ jsonNode = objectMapper.readTree(entityString);
+ } catch (Exception ie) {
+ logger.warn("origin parse json failed", ie);
+ throw new RMErrorException(
+ YARN_QUEUE_EXCEPTION.getErrorCode(), YARN_QUEUE_EXCEPTION.getErrorDesc(), ie);
+ }
}
return jsonNode;
}
@@ -407,7 +525,7 @@ public String getAndUpdateActiveRmWebAddress(ExternalResourceProvider provider)
String haAddress = (String) provider.getConfigMap().get("rmWebAddress");
String activeAddress = rmAddressMap.get(haAddress);
if (StringUtils.isBlank(activeAddress)) {
- synchronized (haAddress.intern()) {
+ synchronized (haAddress.intern()) { // NOSONAR
if (StringUtils.isBlank(activeAddress)) {
if (logger.isDebugEnabled()) {
logger.debug(
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/engineplugin/server/conf/EngineConnPluginConfiguration.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/engineplugin/server/conf/EngineConnPluginConfiguration.scala
index 9a16d9b9e6a..0968b050c21 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/engineplugin/server/conf/EngineConnPluginConfiguration.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/engineplugin/server/conf/EngineConnPluginConfiguration.scala
@@ -39,4 +39,7 @@ object EngineConnPluginConfiguration {
val EC_BML_VERSION_MAY_WITH_PREFIX_V: CommonVars[Boolean] =
CommonVars("linkis.engineconn.bml.version.may.with.prefix", true)
+ val QUEUE_PREFIX: CommonVars[String] =
+ CommonVars("wds.linkis.queue.prefix", "root.")
+
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRule.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRule.scala
index 74d9383163d..8dd9d903f39 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRule.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRule.scala
@@ -35,7 +35,13 @@ class HotspotExclusionRule extends NodeSelectRule with Logging {
override def ruleFiltering(nodes: Array[Node]): Array[Node] = {
if (AMConfiguration.NODE_SELECT_HOTSPOT_EXCLUSION_RULE) {
- randomShuffle(nodes)
+ if (
+ AMConfiguration.NODE_SELECT_HOTSPOT_EXCLUSION_SHUFFLE_RULER.defaultValue == AMConfiguration.NODE_SELECT_HOTSPOT_EXCLUSION_SHUFFLE_RULER.getValue
+ ) {
+ randomShuffle(nodes)
+ } else {
+ Random.shuffle(nodes.toSeq).toArray
+ }
} else {
nodes
}
@@ -51,8 +57,12 @@ class HotspotExclusionRule extends NodeSelectRule with Logging {
if (null == nodes || nodes.length < 3) {
return nodes
}
- val shuffleSize = if (nodes.length <= 10) {
- nodes.length / 2
+ val shuffleSize: Int = if (nodes.length == 2) {
+ 2
+ } else if (nodes.length <= 6) {
+ 3
+ } else if (nodes.length <= 10) {
+ 4
} else {
5
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/em/DefaultEMInfoService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/em/DefaultEMInfoService.scala
index b82e32cd18d..126d413727c 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/em/DefaultEMInfoService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/em/DefaultEMInfoService.scala
@@ -292,18 +292,32 @@ class DefaultEMInfoService extends EMInfoService with Logging {
resourceArray: Array[NodeResource],
resourceType: ResourceType
): (Resource, Resource, Resource) = {
- resourceArray.foldLeft(
- (
+ if (resourceArray.isEmpty) {
+ return (
Resource.initResource(resourceType),
Resource.initResource(resourceType),
Resource.initResource(resourceType)
)
- ) { case ((accSum, accUed, accLock), nodeResource) =>
- (
- accSum.add(nodeResource.getUsedResource.add(nodeResource.getLockedResource)),
- accUed.add(nodeResource.getUsedResource),
- accLock.add(nodeResource.getLockedResource)
- )
+ } else {
+ resourceArray.foldLeft(
+ (
+ Resource.initResource(resourceType),
+ Resource.initResource(resourceType),
+ Resource.initResource(resourceType)
+ )
+ ) { case ((accSum, accUed, accLock), nodeResource) =>
+ if (null == nodeResource.getUsedResource) {
+ nodeResource.setUsedResource(Resource.initResource(resourceType))
+ }
+ if (null == nodeResource.getLockedResource) {
+ nodeResource.setLockedResource(Resource.initResource(resourceType))
+ }
+ (
+ accSum.add(nodeResource.getUsedResource.add(nodeResource.getLockedResource)),
+ accUed.add(nodeResource.getUsedResource),
+ accLock.add(nodeResource.getLockedResource)
+ )
+ }
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineAskEngineService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineAskEngineService.scala
index 9240ec0dd45..06bffcc8cb6 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineAskEngineService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineAskEngineService.scala
@@ -44,7 +44,7 @@ import org.springframework.stereotype.Service
import java.net.SocketTimeoutException
import java.util
import java.util.Locale
-import java.util.concurrent.{ConcurrentHashMap, Semaphore}
+import java.util.concurrent.{ConcurrentHashMap, Semaphore, ThreadPoolExecutor}
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters.asScalaBufferConverter
@@ -78,10 +78,25 @@ class DefaultEngineAskEngineService
private val labelBuilderFactory = LabelBuilderFactoryContext.getLabelBuilderFactory
- private implicit val executor: ExecutionContextExecutorService =
- Utils.newCachedExecutionContext(
- AMConfiguration.ASK_ENGINE_ASYNC_MAX_THREAD_SIZE,
- "AskEngineService-Thread-"
+ private val (reuseExecutor, reuseThreadPool)
+ : (ExecutionContextExecutorService, ThreadPoolExecutor) =
+ Utils.newCachedExecutionContextWithExecutor(
+ AMConfiguration.REUSE_ENGINE_ASYNC_MAX_THREAD_SIZE,
+ "ReuseEngineService-Thread-"
+ )
+
+ private val (createExecutor, createThreadPool)
+ : (ExecutionContextExecutorService, ThreadPoolExecutor) =
+ Utils.newCachedExecutionContextWithExecutor(
+ AMConfiguration.CREATE_ENGINE_ASYNC_MAX_THREAD_SIZE,
+ "CreateEngineService-Thread-"
+ )
+
+ private val (errorSendExecutor, errorSendThreadPool)
+ : (ExecutionContextExecutorService, ThreadPoolExecutor) =
+ Utils.newCachedExecutionContextWithExecutor(
+ AMConfiguration.ASK_ENGINE_ERROR_ASYNC_MAX_THREAD_SIZE,
+ "AskEngineErrorService-Thread-"
)
private val engineCreateSemaphoreMap: java.util.Map[String, Semaphore] =
@@ -122,11 +137,74 @@ class DefaultEngineAskEngineService
}
val engineAskAsyncId = getAsyncId
+ if (!engineAskRequest.getLabels.containsKey(LabelKeyConstant.EXECUTE_ONCE_KEY)) {
+ val reuseNodeThread = Future {
+ LoggerUtils.setJobIdMDC(taskId)
+ val engineReuseRequest = new EngineReuseRequest()
+ engineReuseRequest.setLabels(engineAskRequest.getLabels)
+ engineReuseRequest.setTimeOut(engineAskRequest.getTimeOut)
+ engineReuseRequest.setUser(engineAskRequest.getUser)
+ engineReuseRequest.setProperties(engineAskRequest.getProperties)
+ val reuseNode = Utils.tryCatch(engineReuseService.reuseEngine(engineReuseRequest, sender)) {
+ t: Throwable =>
+ t match {
+ case retryException: LinkisRetryException =>
+ logger.info(
+ s"Task: $taskId user ${engineAskRequest.getUser} reuse engine failed ${t.getMessage}"
+ )
+ case _ =>
+ logger.info(
+ s"Task: $taskId user ${engineAskRequest.getUser} reuse engine failed",
+ t
+ )
+ }
+ null
+ }
+ if (reuseNode != null) {
+ logger.info(
+ s"Task: $taskId finished to ask engine for user ${engineAskRequest.getUser} by reuse node $reuseNode"
+ )
+ if (null != sender) {
+ sender.send(EngineCreateSuccess(engineAskAsyncId, reuseNode, true))
+ logger.info(
+ s"Task: $taskId has sent EngineCreateSuccess($engineAskAsyncId, reuse=true) to Entrance."
+ )
+ } else {
+ logger.warn(f"Task: $taskId will not send async using null sender.")
+ }
+ } else {
+ logger.info(
+ s"Task: $taskId reuse engine failed, will try to start a thread to async($engineAskAsyncId) createEngine, ${engineAskRequest.getCreateService}"
+ )
+ createEngine(engineAskRequest, taskId, engineAskAsyncId, sender)
+ }
+ LoggerUtils.removeJobIdMDC()
+ }(reuseExecutor)
+ logger.info(
+ s"reuseExecutor: poolSize: ${reuseThreadPool.getPoolSize}, activeCount: ${reuseThreadPool.getActiveCount}, queueSize: ${reuseThreadPool.getQueue.size()}"
+ )
+ futureDeal(reuseNodeThread, taskId, engineAskAsyncId, sender, "reuse")
+ } else {
+ createEngine(engineAskRequest, taskId, engineAskAsyncId, sender)
+ }
+ LoggerUtils.removeJobIdMDC()
+ EngineAskAsyncResponse(engineAskAsyncId, Sender.getThisServiceInstance)
+ }
+
+ private def createEngine(
+ engineAskRequest: EngineAskRequest,
+ taskId: String,
+ engineAskAsyncId: String,
+ sender: Sender
+ ): Unit = {
val createNodeThread = Future {
LoggerUtils.setJobIdMDC(taskId)
val (engineCreateKey, semaphore) =
Utils.tryAndWarn(getKeyAndSemaphore(engineAskRequest.getLabels))
Utils.tryFinally {
+ logger.info(
+ s"Task: $taskId start to async($engineAskAsyncId) createEngine, ${engineAskRequest.getCreateService}"
+ )
if (null != semaphore) {
try {
semaphore.acquire()
@@ -139,6 +217,13 @@ class DefaultEngineAskEngineService
)
}
}
+ // If the original labels contain engineInstance, remove it first (如果原来的labels含engineInstance ,先去掉)
+ engineAskRequest.getLabels.remove(LabelKeyConstant.ENGINE_INSTANCE_KEY)
+ // 添加引擎启动驱动任务id标签
+ val labels: util.Map[String, AnyRef] = engineAskRequest.getLabels
+ labels.put(LabelKeyConstant.DRIVER_TASK_KEY, taskId)
+
+ logger.info(s"Task: ${taskId} start to reuse engine.")
var reuseNode: EngineNode = null
if (!engineAskRequest.getLabels.containsKey(LabelKeyConstant.EXECUTE_ONCE_KEY)) {
val engineReuseRequest = new EngineReuseRequest()
@@ -163,45 +248,36 @@ class DefaultEngineAskEngineService
}
}
- if (null != reuseNode) {
- logger.info(
- s"Task: $taskId finished to ask engine for user ${engineAskRequest.getUser} by reuse node $reuseNode"
- )
- (reuseNode, true)
- } else {
- logger.info(
- s"Task: $taskId start to async($engineAskAsyncId) createEngine, ${engineAskRequest.getCreateService}"
- )
- // If the original labels contain engineInstance, remove it first (如果原来的labels含engineInstance ,先去掉)
- engineAskRequest.getLabels.remove(LabelKeyConstant.ENGINE_INSTANCE_KEY)
- // 添加引擎启动驱动任务id标签
- val labels: util.Map[String, AnyRef] = engineAskRequest.getLabels
- labels.put(LabelKeyConstant.DRIVER_TASK_KEY, taskId)
-
- val engineCreateRequest = new EngineCreateRequest
- engineCreateRequest.setLabels(engineAskRequest.getLabels)
- engineCreateRequest.setTimeout(engineAskRequest.getTimeOut)
- engineCreateRequest.setUser(engineAskRequest.getUser)
- engineCreateRequest.setProperties(engineAskRequest.getProperties)
- engineCreateRequest.setCreateService(engineAskRequest.getCreateService)
+ val engineCreateRequest = new EngineCreateRequest
+ engineCreateRequest.setLabels(engineAskRequest.getLabels)
+ engineCreateRequest.setTimeout(engineAskRequest.getTimeOut)
+ engineCreateRequest.setUser(engineAskRequest.getUser)
+ engineCreateRequest.setProperties(engineAskRequest.getProperties)
+ engineCreateRequest.setCreateService(engineAskRequest.getCreateService)
- val createNode = engineCreateService.createEngine(engineCreateRequest, sender)
- val timeout =
- if (engineCreateRequest.getTimeout <= 0) {
- AMConfiguration.ENGINE_START_MAX_TIME.getValue.toLong
- } else engineCreateRequest.getTimeout
- // UseEngine requires a timeout (useEngine 需要加上超时)
- val createEngineNode = getEngineNodeManager.useEngine(createNode, timeout)
- if (null == createEngineNode) {
- throw new LinkisRetryException(
- AMConstant.EM_ERROR_CODE,
- s"create engine${createNode.getServiceInstance} success, but to use engine failed"
- )
- }
+ val createNode = engineCreateService.createEngine(engineCreateRequest, sender)
+ val timeout =
+ if (engineCreateRequest.getTimeout <= 0) {
+ AMConfiguration.ENGINE_START_MAX_TIME.getValue.toLong
+ } else engineCreateRequest.getTimeout
+ // UseEngine requires a timeout (useEngine 需要加上超时)
+ val createEngineNode = getEngineNodeManager.useEngine(createNode, timeout)
+ if (null == createEngineNode) {
+ throw new LinkisRetryException(
+ AMConstant.EM_ERROR_CODE,
+ s"create engine${createNode.getServiceInstance} success, but to use engine failed"
+ )
+ }
+ logger.info(
+ s"Task: $taskId finished to ask engine for user ${engineAskRequest.getUser} by create node $createEngineNode"
+ )
+ if (null != sender) {
+ sender.send(EngineCreateSuccess(engineAskAsyncId, createEngineNode))
logger.info(
- s"Task: $taskId finished to ask engine for user ${engineAskRequest.getUser} by create node $createEngineNode"
+ s"Task: $taskId has sent EngineCreateSuccess($engineAskAsyncId, reuse=false) to Entrance."
)
- (createEngineNode, false)
+ } else {
+ logger.warn(s"Task: $taskId will not send async using null sender.")
}
} {
Utils.tryAndWarn {
@@ -213,30 +289,23 @@ class DefaultEngineAskEngineService
LoggerUtils.removeJobIdMDC()
}
- }
+ }(createExecutor)
- createNodeThread.onComplete {
- case Success((engineNode, isReuse)) =>
- LoggerUtils.setJobIdMDC(taskId)
- Utils.tryFinally {
- if (isReuse) {
- logger.info(
- s"Task: $taskId Success to async($engineAskAsyncId) reuseEngine $engineNode"
- )
- } else {
- logger.info(
- s"Task: $taskId Success to async($engineAskAsyncId) createEngine $engineNode"
- )
- }
- if (null != sender) {
- sender.send(EngineCreateSuccess(engineAskAsyncId, engineNode, isReuse))
+ logger.info(
+ s"createExecutor: poolSize: ${createThreadPool.getPoolSize}, activeCount: ${createThreadPool.getActiveCount}, queueSize: ${createThreadPool.getQueue.size()}"
+ )
+ futureDeal(createNodeThread, taskId, engineAskAsyncId, sender, "create")
+ }
- } else {
- logger.info("Will not send async useing null sender.")
- }
- } {
- LoggerUtils.removeJobIdMDC()
- }
+ private def futureDeal(
+ future: Future[_],
+ taskId: String,
+ engineAskAsyncId: String,
+ sender: Sender,
+ functionStr: String
+ ): Unit = {
+ future.onComplete {
+ case Success(_) => ()
case Failure(exception) =>
LoggerUtils.setJobIdMDC(taskId)
val retryFlag = exception match {
@@ -251,7 +320,7 @@ class DefaultEngineAskEngineService
}
}
val msg =
- s"Task: $taskId Failed to async($engineAskAsyncId) create/reuse Engine, can Retry $retryFlag";
+ s"Task: $taskId Failed to async($engineAskAsyncId) $functionStr EngineConn, can Retry $retryFlag";
if (!retryFlag) {
logger.info(msg, exception)
} else {
@@ -269,9 +338,10 @@ class DefaultEngineAskEngineService
} {
LoggerUtils.removeJobIdMDC()
}
- }
- LoggerUtils.removeJobIdMDC()
- EngineAskAsyncResponse(engineAskAsyncId, Sender.getThisServiceInstance)
+ }(errorSendExecutor)
+ logger.info(
+ s"errorSendExecutor: poolSize: ${errorSendThreadPool.getPoolSize}, activeCount: ${errorSendThreadPool.getActiveCount}, queueSize: ${errorSendThreadPool.getQueue.size()}"
+ )
}
/**
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineCreateService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineCreateService.scala
index 8ac1e53a96a..27a961b852a 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineCreateService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineCreateService.scala
@@ -18,16 +18,20 @@
package org.apache.linkis.manager.am.service.engine
import org.apache.linkis.common.ServiceInstance
+import org.apache.linkis.common.conf.Configuration
import org.apache.linkis.common.exception.LinkisRetryException
import org.apache.linkis.common.utils.{ByteTimeUtils, Logging, Utils}
import org.apache.linkis.engineplugin.server.service.EngineConnResourceFactoryService
import org.apache.linkis.governance.common.conf.GovernanceCommonConf
import org.apache.linkis.governance.common.conf.GovernanceCommonConf.ENGINE_CONN_MANAGER_SPRING_NAME
+import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.governance.common.protocol.job.{JobReqQuery, JobReqUpdate}
import org.apache.linkis.governance.common.utils.JobUtils
import org.apache.linkis.manager.am.conf.{AMConfiguration, EngineConnConfigurationService}
import org.apache.linkis.manager.am.exception.AMErrorException
import org.apache.linkis.manager.am.label.{EngineReuseLabelChooser, LabelChecker}
import org.apache.linkis.manager.am.selector.{ECAvailableRule, NodeSelector}
+import org.apache.linkis.manager.am.utils.AMUtils
import org.apache.linkis.manager.am.vo.CanCreateECRes
import org.apache.linkis.manager.common.constant.AMConstant
import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
@@ -41,15 +45,17 @@ import org.apache.linkis.manager.engineplugin.common.launch.entity.{
}
import org.apache.linkis.manager.engineplugin.common.resource.TimeoutEngineResourceRequest
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext
+import org.apache.linkis.manager.label.conf.LabelCommonConfig
import org.apache.linkis.manager.label.entity.{EngineNodeLabel, Label}
-import org.apache.linkis.manager.label.entity.engine.EngineTypeLabel
+import org.apache.linkis.manager.label.entity.engine.{EngineType, EngineTypeLabel}
import org.apache.linkis.manager.label.entity.node.AliasServiceInstanceLabel
import org.apache.linkis.manager.label.service.{NodeLabelService, UserLabelService}
-import org.apache.linkis.manager.label.utils.LabelUtils
+import org.apache.linkis.manager.label.utils.{LabelUtil, LabelUtils}
import org.apache.linkis.manager.persistence.NodeMetricManagerPersistence
import org.apache.linkis.manager.rm.{AvailableResource, NotEnoughResource}
import org.apache.linkis.manager.rm.service.ResourceManager
import org.apache.linkis.manager.service.common.label.LabelFilter
+import org.apache.linkis.protocol.constants.TaskConstant
import org.apache.linkis.rpc.Sender
import org.apache.linkis.rpc.message.annotation.Receiver
import org.apache.linkis.server.BDPJettyServerHelper
@@ -146,7 +152,7 @@ class DefaultEngineCreateService
if (null == emScoreNodeList || emScoreNodeList.isEmpty) {
throw new LinkisRetryException(
AMConstant.EM_ERROR_CODE,
- s" The em of labels ${engineCreateRequest.getLabels} not found"
+ s" The ecm of labels ${engineCreateRequest.getLabels} not found"
)
}
@@ -186,6 +192,7 @@ class DefaultEngineCreateService
if (engineCreateRequest.getProperties == null) {
engineCreateRequest.setProperties(new util.HashMap[String, String]())
}
+
val resource =
generateResource(
engineCreateRequest.getProperties,
@@ -249,6 +256,10 @@ class DefaultEngineCreateService
)
engineNode.setTicketId(resourceTicketId)
+ val params: String = BDPJettyServerHelper.gson.toJson(engineCreateRequest.getProperties)
+ logger.info(s"Task: $taskId finished to create engineConn with params: $params")
+ engineNode.setParams(params)
+
// 7.Update persistent information: including inserting engine/metrics
Utils.tryCatch(getEngineNodeManager.updateEngineNode(oldServiceInstance, engineNode)) { t =>
logger.warn(s"Failed to update engineNode $engineNode", t)
@@ -278,8 +289,20 @@ class DefaultEngineCreateService
s"Failed to update engineNode: ${t.getMessage}"
)
}
-
- // 8. Add the Label of EngineConn, and add the Alias of engineConn
+ if (Configuration.METRICS_INCREMENTAL_UPDATE_ENABLE.getValue) {
+ val emInstance = engineNode.getServiceInstance.getInstance
+ val ecmInstance = engineNode.getEMNode.getServiceInstance.getInstance
+ // 8. Update job history metrics after successful engine creation - 异步执行
+ AMUtils.updateMetricsAsync(
+ taskId,
+ resourceTicketId,
+ emInstance,
+ ecmInstance,
+ null,
+ isReuse = false
+ )
+ }
+ // 9. Add the Label of EngineConn, and add the Alias of engineConn
val engineConnAliasLabel = labelBuilderFactory.createLabel(classOf[AliasServiceInstanceLabel])
engineConnAliasLabel.setAlias(GovernanceCommonConf.ENGINE_CONN_SPRING_NAME.getValue)
labelList.add(engineConnAliasLabel)
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineInfoService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineInfoService.scala
index cd40e8ebd2e..b5e0e055da4 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineInfoService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineInfoService.scala
@@ -93,18 +93,24 @@ class DefaultEngineInfoService extends AbstractEngineService with EngineInfoServ
*/
override def listEMEngines(em: EMNode): java.util.List[EngineNode] = {
val nodes = emNodeManager.listEngines(em)
- val resourceInfo =
- resourceManager.getResourceInfo(nodes.asScala.map(_.getServiceInstance).toArray).resourceInfo
- val resourceInfoMap = resourceInfo.asScala.map(r => (r.getServiceInstance.toString, r)).toMap
- nodes.asScala.map { node =>
- resourceInfoMap
- .get(node.getServiceInstance.toString)
- .map(_.getNodeResource)
- .foreach(node.setNodeResource)
- node.setLabels(labelService.getNodeLabels(node.getServiceInstance))
- node
+ if (nodes.isEmpty) {
+ nodes
+ } else {
+ val resourceInfo =
+ resourceManager
+ .getResourceInfo(nodes.asScala.map(_.getServiceInstance).toArray)
+ .resourceInfo
+ val resourceInfoMap = resourceInfo.asScala.map(r => (r.getServiceInstance.toString, r)).toMap
+ nodes.asScala.map { node =>
+ resourceInfoMap
+ .get(node.getServiceInstance.toString)
+ .map(_.getNodeResource)
+ .foreach(node.setNodeResource)
+ node.setLabels(labelService.getNodeLabels(node.getServiceInstance))
+ node
+ }
+ nodes
}
- nodes
}
@Receiver
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineReuseService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineReuseService.scala
index cbc7c06eb76..fb79c9e062f 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineReuseService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineReuseService.scala
@@ -17,25 +17,37 @@
package org.apache.linkis.manager.am.service.engine
+import org.apache.linkis.common.conf.Configuration
import org.apache.linkis.common.exception.LinkisRetryException
-import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.common.utils.{CodeAndRunTypeUtils, Logging, Utils}
import org.apache.linkis.governance.common.conf.GovernanceCommonConf
import org.apache.linkis.governance.common.utils.JobUtils
import org.apache.linkis.manager.am.conf.AMConfiguration
import org.apache.linkis.manager.am.label.EngineReuseLabelChooser
import org.apache.linkis.manager.am.selector.NodeSelector
+import org.apache.linkis.manager.am.service.ECResourceInfoService
import org.apache.linkis.manager.am.utils.AMUtils
+import org.apache.linkis.manager.common.conf.RMConfiguration
import org.apache.linkis.manager.common.constant.AMConstant
import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
-import org.apache.linkis.manager.common.entity.node.EngineNode
+import org.apache.linkis.manager.common.entity.node.{EngineNode, ScoreServiceInstance}
import org.apache.linkis.manager.common.protocol.engine.{EngineReuseRequest, EngineStopRequest}
import org.apache.linkis.manager.common.utils.ManagerUtils
+import org.apache.linkis.manager.engineplugin.common.conf.EngineConnPluginConf.{
+ PYTHON_VERSION_KEY,
+ SPARK_PYTHON_VERSION_KEY
+}
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext
import org.apache.linkis.manager.label.entity.{EngineNodeLabel, Label}
-import org.apache.linkis.manager.label.entity.engine.ReuseExclusionLabel
+import org.apache.linkis.manager.label.entity.engine.{
+ EngineTypeLabel,
+ ReuseExclusionLabel,
+ UserCreatorLabel
+}
import org.apache.linkis.manager.label.entity.node.AliasServiceInstanceLabel
import org.apache.linkis.manager.label.service.{NodeLabelService, UserLabelService}
import org.apache.linkis.manager.label.utils.{LabelUtil, LabelUtils}
+import org.apache.linkis.manager.persistence.NodeManagerPersistence
import org.apache.linkis.manager.service.common.label.LabelFilter
import org.apache.linkis.rpc.Sender
import org.apache.linkis.rpc.message.annotation.Receiver
@@ -52,6 +64,8 @@ import java.util.concurrent.{TimeoutException, TimeUnit}
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
+import com.google.common.cache.{Cache, CacheBuilder}
+
@Service
class DefaultEngineReuseService extends AbstractEngineService with EngineReuseService with Logging {
@@ -76,6 +90,32 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
@Autowired
private var labelFilter: LabelFilter = _
+ @Autowired
+ private var nodeManagerPersistence: NodeManagerPersistence = _
+
+ @Autowired
+ private var ecResourceInfoService: ECResourceInfoService = _
+
+ private val instanceCache: Cache[String, util.Map[ScoreServiceInstance, util.List[Label[_]]]] =
+ CacheBuilder
+ .newBuilder()
+ .maximumSize(AMConfiguration.ENGINE_REUSE_CACHE_MAX_SIZE.getValue)
+ .expireAfterWrite(
+ AMConfiguration.ENGINE_REUSE_CACHE_EXPIRE_TIME.getValue.toLong,
+ TimeUnit.MILLISECONDS
+ )
+ .build()
+
+ private val engineNodesCache: Cache[String, Array[EngineNode]] =
+ CacheBuilder
+ .newBuilder()
+ .maximumSize(AMConfiguration.ENGINE_REUSE_CACHE_MAX_SIZE.getValue)
+ .expireAfterWrite(
+ AMConfiguration.ENGINE_REUSE_CACHE_EXPIRE_TIME.getValue.toLong,
+ TimeUnit.MILLISECONDS
+ )
+ .build()
+
/**
* 1. Obtain the EC corresponding to all labels 2. Judging reuse exclusion tags and fixed engine
* labels 3. Select the EC with the lowest load available 4. Lock the corresponding EC
@@ -90,13 +130,11 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
val taskId = JobUtils.getJobIdFromStringMap(engineReuseRequest.getProperties)
logger.info(s"Task $taskId Start to reuse Engine for request: $engineReuseRequest")
val labelBuilderFactory = LabelBuilderFactoryContext.getLabelBuilderFactory
+ val labels: util.List[Label[_]] = labelBuilderFactory.getLabels(engineReuseRequest.getLabels)
val labelList = LabelUtils
- .distinctLabel(
- labelBuilderFactory.getLabels(engineReuseRequest.getLabels),
- userLabelService.getUserLabels(engineReuseRequest.getUser)
- )
+ .distinctLabel(labels, userLabelService.getUserLabels(engineReuseRequest.getUser))
.asScala
-
+ logger.info(s"Task ${taskId} labelList size: ${labelList.size}");
val exclusionInstances: Array[String] =
labelList.find(_.isInstanceOf[ReuseExclusionLabel]) match {
case Some(l) =>
@@ -117,6 +155,7 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
}
var filterLabelList = labelList.filter(_.isInstanceOf[EngineNodeLabel]).asJava
+ logger.info(s"Task ${taskId} filterLabelList size: ${filterLabelList.size}");
val engineConnAliasLabel = labelBuilderFactory.createLabel(classOf[AliasServiceInstanceLabel])
engineConnAliasLabel.setAlias(GovernanceCommonConf.ENGINE_CONN_SPRING_NAME.getValue)
@@ -129,7 +168,31 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
}
}
- val instances = nodeLabelService.getScoredNodeMapsByLabels(filterLabelList)
+ val userCreatorLabel: UserCreatorLabel = LabelUtil.getUserCreatorLabel(labels)
+ val engineTypeLabel: EngineTypeLabel = LabelUtil.getEngineTypeLabel(labels)
+ val cacheKey: String = userCreatorLabel.getStringValue + "_" + engineTypeLabel.getEngineType
+
+ val cacheEnable: Boolean = AMConfiguration.ENGINE_REUSE_CACHE_SUPPORT_ENGINES.getValue.contains(
+ engineTypeLabel.getEngineType
+ ) && AMConfiguration.ENGINE_REUSE_ENABLE_CACHE.getValue
+
+ val shuffEnable: Boolean = AMConfiguration.ENGINE_REUSE_SHUFF_SUPPORT_ENGINES.getValue.contains(
+ engineTypeLabel.getEngineType
+ ) && RMConfiguration.LABEL_SERVICE_INSTANCE_SHUFF_SWITCH.getValue
+
+ val instances = if (cacheEnable) {
+ var localInstances: util.Map[ScoreServiceInstance, util.List[Label[_]]] =
+ instanceCache.getIfPresent(cacheKey)
+ if (localInstances == null) this synchronized {
+ localInstances = instanceCache.getIfPresent(cacheKey)
+ if (localInstances == null) {
+ localInstances =
+ nodeLabelService.getScoredNodeMapsByLabelsReuse(filterLabelList, shuffEnable)
+ instanceCache.put(cacheKey, localInstances)
+ }
+ }
+ localInstances
+ } else nodeLabelService.getScoredNodeMapsByLabelsReuse(filterLabelList, shuffEnable)
if (null != instances && null != exclusionInstances && exclusionInstances.nonEmpty) {
val instancesKeys = instances.asScala.keys.toArray
@@ -150,11 +213,43 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
s"No engine can be reused, cause from db is null"
)
}
- var engineScoreList =
- getEngineNodeManager.getEngineNodes(instances.asScala.keys.toSeq.toArray)
+
+ var engineScoreList = if (cacheEnable) {
+ var localEngineList: Array[EngineNode] = engineNodesCache.getIfPresent(cacheKey)
+ if (localEngineList == null) this synchronized {
+ localEngineList = engineNodesCache.getIfPresent(cacheKey)
+ if (localEngineList == null) {
+ localEngineList =
+ getEngineNodeManager.getEngineNodes(instances.asScala.keys.toSeq.toArray)
+ engineNodesCache.put(cacheKey, localEngineList)
+ }
+ }
+ localEngineList
+ } else getEngineNodeManager.getEngineNodes(instances.asScala.keys.toSeq.toArray)
+ logger.info(s"Task ${taskId} engineScoreList size: ${engineScoreList.length}")
+
+ // reuse EC according to template name
+ val confTemplateNameKey = "ec.resource.name"
+ val templateName: String =
+ getValueByKeyFromProps(confTemplateNameKey, engineReuseRequest.getProperties)
+ if (
+ StringUtils.isNotBlank(templateName) && AMConfiguration.EC_REUSE_WITH_TEMPLATE_RULE_ENABLE
+ ) {
+ engineScoreList = engineScoreList
+ .filter(engine => engine.getNodeStatus == NodeStatus.Unlock)
+ .filter(engine => {
+ val oldTemplateName: String =
+ getValueByKeyFromProps(confTemplateNameKey, parseParamsToMap(engine.getParams))
+ templateName.equalsIgnoreCase(oldTemplateName)
+ })
+ logger.info(s"${engineScoreList.length} engine by templateName can be reused.")
+ }
// 获取需要的资源
if (AMConfiguration.EC_REUSE_WITH_RESOURCE_RULE_ENABLE) {
+ logger.info(
+ s"Task ${taskId} start to filter resources, the engine size: ${engineScoreList.length}"
+ );
val labels: util.List[Label[_]] =
engineCreateService.buildLabel(engineReuseRequest.getLabels, engineReuseRequest.getUser)
if (engineReuseRequest.getProperties == null) {
@@ -172,21 +267,45 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
labelFilter.choseEngineLabel(labels),
AMConfiguration.ENGINE_START_MAX_TIME.getValue.toLong
)
+ val pythonVersion: String = getPythonVersion(engineReuseRequest.getProperties)
+
+ // 只对python相关的引擎做python版本匹配
+ val codeType = LabelUtil.getCodeType(labels)
+ val languageType = CodeAndRunTypeUtils.getLanguageTypeByCodeType(codeType)
+ val pythonFlag: Boolean = languageType == CodeAndRunTypeUtils.LANGUAGE_TYPE_PYTHON
// 过滤掉资源不满足的引擎
engineScoreList = engineScoreList
.filter(engine => engine.getNodeStatus == NodeStatus.Unlock)
.filter(engine => {
+ val enginePythonVersion: String = getPythonVersion(parseParamsToMap(engine.getParams))
+ var pythonVersionMatch: Boolean = true
+ if (
+ StringUtils.isNotBlank(pythonVersion) && StringUtils
+ .isNotBlank(enginePythonVersion) && pythonFlag
+ ) {
+ pythonVersionMatch = pythonVersion.equalsIgnoreCase(enginePythonVersion)
+ }
+ if (!pythonVersionMatch) {
+ logger.info(
+ s"will be not reuse ${engine.getServiceInstance}, cause engine python version: $enginePythonVersion , param python version $pythonVersion is not match"
+ )
+ }
if (engine.getNodeResource.getUsedResource != null) {
// 引擎资源只有满足需要的资源才复用
- engine.getNodeResource.getUsedResource.notLess(resource.getMaxResource)
+ pythonVersionMatch && engine.getNodeResource.getUsedResource
+ .notLess(resource.getMaxResource)
} else {
// 引擎正在启动中,比较锁住的资源,最终是否复用沿用之前复用逻辑
- engine.getNodeResource.getLockedResource.notLess(resource.getMaxResource)
+ pythonVersionMatch && engine.getNodeResource.getLockedResource
+ .notLess(resource.getMaxResource)
}
})
}
+ logger.info(
+ s"Task ${taskId} end filter resources, the engine size: ${engineScoreList.length}"
+ );
if (engineScoreList.isEmpty) {
throw new LinkisRetryException(
AMConstant.ENGINE_ERROR_CODE,
@@ -264,7 +383,46 @@ class DefaultEngineReuseService extends AbstractEngineService with EngineReuseSe
.toJson(engine) + " from engineLabelMap : " + AMUtils.GSON.toJson(instances)
)
}
+ if (Configuration.METRICS_INCREMENTAL_UPDATE_ENABLE.getValue) {
+ val engineNode =
+ ecResourceInfoService.getECResourceInfoRecordByInstance(
+ engine.getServiceInstance.getInstance
+ )
+ // 异步更新 metrics
+ AMUtils.updateMetricsAsync(
+ taskId,
+ engineNode.getTicketId,
+ engineNode.getServiceInstance,
+ engineNode.getEcmInstance,
+ engineNode.getLogDirSuffix,
+ isReuse = true
+ )
+ }
engine
}
+ private def parseParamsToMap(params: String) = {
+ if (StringUtils.isNotBlank(params)) {
+ AMUtils.GSON.fromJson(params, classOf[util.Map[String, String]])
+ } else {
+ null
+ }
+ }
+
+ private def getValueByKeyFromProps(key: String, paramsMap: util.Map[String, String]) = {
+ if (paramsMap != null) {
+ paramsMap.getOrDefault(key, "")
+ } else {
+ ""
+ }
+ }
+
+ private def getPythonVersion(prop: util.Map[String, String]): String = {
+ var pythonVersion: String = getValueByKeyFromProps(PYTHON_VERSION_KEY, prop)
+ if (StringUtils.isBlank(pythonVersion)) {
+ pythonVersion = getValueByKeyFromProps(SPARK_PYTHON_VERSION_KEY, prop)
+ }
+ pythonVersion
+ }
+
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineStopService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineStopService.scala
index 3c6d7eaffd6..7dab84f332c 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineStopService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/service/engine/DefaultEngineStopService.scala
@@ -290,24 +290,28 @@ class DefaultEngineStopService extends AbstractEngineService with EngineStopServ
dealEngineByEngineNode(engineNodes.toList, userName)
}
// kill EMnode by user creator
- if (StringUtils.isNotBlank(engineType) && !creator.equals(Configuration.GLOBAL_CONF_SYMBOL)) {
- val filterEngineNode = engineNodes
- .filter(_.getOwner.equals(userName))
- .filter(node => {
- var filterResult = false
- if (!node.getLabels.isEmpty) {
- val userCreator = LabelUtil.getUserCreatorLabel(node.getLabels)
- val engineTypeLabel = LabelUtil.getEngineTypeLabel(node.getLabels).getStringValue
- if (
- userCreator.getUser.equals(userName) && userCreator.getCreator
- .equals(creator) && engineTypeLabel.equals(engineType)
- ) {
- filterResult = true
- }
- }
- filterResult
- })
- .toList
+ if (StringUtils.isNotBlank(engineType)) {
+ val filterEngineNode = creator match {
+ case Configuration.GLOBAL_CONF_SYMBOL =>
+ engineNodes
+ .filter(_.getOwner.equals(userName))
+ .filter(!_.getLabels.isEmpty)
+ .filter(node =>
+ LabelUtil.getUserCreatorLabel(node.getLabels).getUser.equals(userName)
+ && LabelUtil.getEngineTypeLabel(node.getLabels).getEngineType.equals(engineType)
+ )
+ .toList
+ case _ =>
+ engineNodes
+ .filter(_.getOwner.equals(userName))
+ .filter(!_.getLabels.isEmpty)
+ .filter(node =>
+ LabelUtil.getUserCreatorLabel(node.getLabels).getUser.equals(userName)
+ && LabelUtil.getUserCreatorLabel(node.getLabels).getCreator.equals(creator)
+ && LabelUtil.getEngineTypeLabel(node.getLabels).getStringValue.equals(engineType)
+ )
+ .toList
+ }
dealEngineByEngineNode(filterEngineNode, userName)
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/utils/AMUtils.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/utils/AMUtils.scala
index a2f4ad97ae4..89084ebe9a3 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/utils/AMUtils.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/am/utils/AMUtils.scala
@@ -17,6 +17,13 @@
package org.apache.linkis.manager.am.utils
+import org.apache.linkis.common.conf.Configuration
+import org.apache.linkis.common.exception.ErrorException
+import org.apache.linkis.common.utils.{Logging, Utils}
+import org.apache.linkis.governance.common.constant.job.JobRequestConstants
+import org.apache.linkis.governance.common.entity.job.JobRequest
+import org.apache.linkis.governance.common.protocol.job.{JobReqQuery, JobReqUpdate, JobRespProtocol}
+import org.apache.linkis.governance.common.utils.ECPathUtils
import org.apache.linkis.manager.am.vo.{AMEngineNodeVo, EMNodeVo}
import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
import org.apache.linkis.manager.common.entity.node.{EMNode, EngineNode}
@@ -26,20 +33,26 @@ import org.apache.linkis.manager.common.entity.resource.{
ResourceType
}
import org.apache.linkis.manager.label.entity.engine.EngineTypeLabel
+import org.apache.linkis.manager.label.utils.{LabelUtil, LabelUtils}
+import org.apache.linkis.protocol.constants.TaskConstant
+import org.apache.linkis.rpc.Sender
import org.apache.linkis.server.BDPJettyServerHelper
import org.apache.commons.lang3.StringUtils
+import java.io.File
import java.util
import scala.collection.JavaConverters._
import com.google.gson.JsonObject
-object AMUtils {
+object AMUtils extends Logging {
lazy val GSON = BDPJettyServerHelper.gson
+ private val SUCCESS_FLAG = 0
+
val mapper = BDPJettyServerHelper.jacksonJson
def copyToEMVo(EMNodes: Array[EMNode]): util.ArrayList[EMNodeVo] = {
@@ -167,8 +180,6 @@ object AMUtils {
AMEngineNodeVo.setLabels(node.getLabels)
AMEngineNodeVo.setApplicationName(node.getServiceInstance.getApplicationName)
AMEngineNodeVo.setInstance(node.getServiceInstance.getInstance)
- AMEngineNodeVo.setMappingHost(node.getServiceInstance.getMappingHost)
- AMEngineNodeVo.setMappingPorts(node.getServiceInstance.getMappingPorts)
if (null != node.getEMNode) {
AMEngineNodeVo.setEmInstance(node.getEMNode.getServiceInstance.getInstance)
}
@@ -309,4 +320,103 @@ object AMUtils {
}
}
+ def getTaskByTaskID(taskID: Long): JobRequest = Utils.tryCatch {
+ val jobRequest = new JobRequest()
+ jobRequest.setId(taskID)
+ jobRequest.setSource(null)
+ val jobReqQuery = JobReqQuery(jobRequest)
+ Sender
+ .getSender(Configuration.JOBHISTORY_SPRING_APPLICATION_NAME.getValue)
+ .ask(jobReqQuery) match {
+ case response: JobRespProtocol if response.getStatus == SUCCESS_FLAG =>
+ response.getData.get(JobRequestConstants.JOB_HISTORY_LIST) match {
+ case tasks: util.List[JobRequest] if !tasks.isEmpty => tasks.get(0)
+ case _ => null
+ }
+ case _ => null
+ }
+ } { case e: Exception =>
+ throw new RuntimeException(s"Failed to get task by ID: $taskID", e)
+ }
+
+ def updateMetrics(
+ taskId: String,
+ resourceTicketId: String,
+ emInstance: String,
+ ecmInstance: String,
+ engineLogPath: String,
+ isReuse: Boolean
+ ): Unit =
+ Utils.tryCatch {
+ if (taskId != null) {
+ val job = getTaskByTaskID(taskId.toLong)
+ val engineMetrics = job.getMetrics
+ val engineconnMap = new util.HashMap[String, Object]
+ val ticketIdMap = new util.HashMap[String, Object]
+ ticketIdMap.put(TaskConstant.ENGINE_INSTANCE, emInstance)
+ ticketIdMap.put(TaskConstant.TICKET_ID, resourceTicketId)
+ engineconnMap.put(resourceTicketId, ticketIdMap)
+ engineMetrics.put(TaskConstant.JOB_ENGINECONN_MAP, engineconnMap)
+ engineMetrics.put(TaskConstant.ECM_INSTANCE, ecmInstance: String)
+ engineMetrics.put(TaskConstant.ENGINE_INSTANCE, emInstance)
+ val pathSuffix = if (isReuse && StringUtils.isNotBlank(engineLogPath)) {
+ engineLogPath
+ } else {
+ ECPathUtils.getECWOrkDirPathSuffix(
+ job.getExecuteUser,
+ resourceTicketId,
+ LabelUtil.getEngineType(job.getLabels)
+ ) + File.separator + "logs"
+ }
+ engineMetrics.put(TaskConstant.ENGINE_LOG_PATH, pathSuffix)
+ // 通过RPC调用JobHistory服务更新metrics
+ job.setMetrics(engineMetrics)
+ val jobReqUpdate = JobReqUpdate(job)
+ // 发送RPC请求到JobHistory服务
+ val sender: Sender =
+ Sender.getSender(Configuration.JOBHISTORY_SPRING_APPLICATION_NAME.getValue)
+ sender.ask(jobReqUpdate)
+ } else {
+ logger.debug("No taskId found in properties, skip updating job history metrics")
+ }
+ } { t =>
+ logger.warn(s"Failed to update job history metrics for engine ${emInstance}", t)
+ }
+
+ /**
+ * 异步更新job history metrics
+ * @param taskId
+ * 任务ID
+ * @param resourceTicketId
+ * 资源票据ID
+ * @param emInstance
+ * 引擎实例
+ * @param ecmInstance
+ * ECM实例
+ * @param engineLogPath
+ * 引擎日志路径
+ * @param isReuse
+ * 是否复用引擎
+ */
+ def updateMetricsAsync(
+ taskId: String,
+ resourceTicketId: String,
+ emInstance: String,
+ ecmInstance: String,
+ engineLogPath: String,
+ isReuse: Boolean
+ ): Unit = {
+ import scala.concurrent.Future
+ import scala.util.{Failure, Success}
+
+ Future {
+ updateMetrics(taskId, resourceTicketId, emInstance, ecmInstance, engineLogPath, isReuse)
+ }(Utils.newCachedExecutionContext(1, "UpdateMetrics-Thread-")).onComplete {
+ case Success(_) =>
+ logger.debug(s"Task: $taskId metrics update completed successfully for engine: $emInstance")
+ case Failure(t) =>
+ logger.warn(s"Task: $taskId metrics update failed for engine: $emInstance", t)
+ }(Utils.newCachedExecutionContext(1, "UpdateMetrics-Thread-"))
+ }
+
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/NodeLabelService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/NodeLabelService.scala
index a4893a38bf7..267b4226af9 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/NodeLabelService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/NodeLabelService.scala
@@ -91,6 +91,11 @@ trait NodeLabelService {
labels: util.List[Label[_]]
): util.Map[ScoreServiceInstance, util.List[Label[_]]]
+ def getScoredNodeMapsByLabelsReuse(
+ labels: util.List[Label[_]],
+ shuffEnable: Boolean
+ ): util.Map[ScoreServiceInstance, util.List[Label[_]]]
+
def getNodeLabelsByInstanceList(
instanceList: util.List[ServiceInstance]
): util.HashMap[String, util.List[Label[_]]]
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/impl/DefaultNodeLabelService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/impl/DefaultNodeLabelService.scala
index b9be35f7ca3..bb688e8cd6a 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/impl/DefaultNodeLabelService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/label/service/impl/DefaultNodeLabelService.scala
@@ -21,6 +21,7 @@ import org.apache.linkis.common.ServiceInstance
import org.apache.linkis.common.utils.{Logging, Utils}
import org.apache.linkis.manager.am.conf.AMConfiguration
import org.apache.linkis.manager.am.converter.MetricsConverter
+import org.apache.linkis.manager.common.conf.RMConfiguration
import org.apache.linkis.manager.common.entity.node.{EngineNode, ScoreServiceInstance}
import org.apache.linkis.manager.common.entity.persistence.PersistenceLabel
import org.apache.linkis.manager.common.entity.resource.Resource
@@ -51,7 +52,7 @@ import org.springframework.transaction.annotation.Transactional
import org.springframework.util.CollectionUtils
import java.util
-import java.util.{ArrayList, List}
+import java.util.{ArrayList, Collections, List}
import java.util.stream.Collectors
import scala.collection.JavaConverters._
@@ -344,6 +345,22 @@ class DefaultNodeLabelService extends NodeLabelService with Logging {
new util.HashMap[ScoreServiceInstance, util.List[Label[_]]]()
}
+ override def getScoredNodeMapsByLabelsReuse(
+ labels: util.List[Label[_]],
+ shuffEnable: Boolean
+ ): util.Map[ScoreServiceInstance, util.List[Label[_]]] = {
+ // Try to convert the label list to key value list
+ if (null != labels && labels.asScala.nonEmpty) {
+ // Get the persistence labels by kvList
+ val requireLabels = labels.asScala.filter(_.getFeature == Feature.CORE)
+ // Extra the necessary labels whose feature equals Feature.CORE or Feature.SUITABLE
+ val necessaryLabels = requireLabels.map(LabelManagerUtils.convertPersistenceLabel)
+ val inputLabels = labels.asScala.map(LabelManagerUtils.convertPersistenceLabel)
+ return getScoredNodeMapsByLabels(inputLabels.asJava, necessaryLabels.asJava, shuffEnable)
+ }
+ new util.HashMap[ScoreServiceInstance, util.List[Label[_]]]()
+ }
+
/**
* 1. Get the relationship between the incoming label and node 2. get all instances by input
* labels 3. get instance all labels 4. Judge labels
@@ -353,9 +370,11 @@ class DefaultNodeLabelService extends NodeLabelService with Logging {
*/
private def getScoredNodeMapsByLabels(
labels: util.List[PersistenceLabel],
- necessaryLabels: util.List[PersistenceLabel]
+ necessaryLabels: util.List[PersistenceLabel],
+ shuffEnable: Boolean = false
): util.Map[ScoreServiceInstance, util.List[Label[_]]] = {
// Get the in-degree relations ( Label -> Nodes )
+ logger.info(s"get node relations by labels size: ${labels.size()}")
val inNodeDegree = labelManagerPersistence.getNodeRelationsByLabels(
if (necessaryLabels.asScala.nonEmpty) necessaryLabels else labels
)
@@ -384,8 +403,14 @@ class DefaultNodeLabelService extends NodeLabelService with Logging {
val matchInstanceAndLabels = new util.HashMap[ScoreServiceInstance, util.List[Label[_]]]()
// Get the out-degree relations ( Node -> Label )
- val outNodeDegree =
+ val instancesList = instances.toList.asJava
+ val outNodeDegree = if (shuffEnable) {
+ labelManagerPersistence.getLabelRelationsByServiceInstance(
+ serviceInstanceShuff(instancesList)
+ )
+ } else {
labelManagerPersistence.getLabelRelationsByServiceInstance(instances.toList.asJava)
+ }
// outNodeDegree cannot be empty
if (outNodeDegree.asScala.nonEmpty) {
val necessaryLabelKeys =
@@ -453,18 +478,22 @@ class DefaultNodeLabelService extends NodeLabelService with Logging {
val resultMap = new util.HashMap[String, util.List[Label[_]]]()
val map = labelManagerPersistence.getLabelRelationsByServiceInstance(serviceInstanceList)
serviceInstanceList.asScala.foreach(serviceInstance => {
- val LabelList = map
- .get(serviceInstance)
- .asScala
- .map { label =>
- val realyLabel: Label[_] = labelFactory.createLabel(
- label.getLabelKey,
- if (!CollectionUtils.isEmpty(label.getValue)) label.getValue else label.getStringValue
- )
- realyLabel
- }
- .toList
- .asJava
+ val LabelList = Option(map.get(serviceInstance))
+ .map(
+ _.asScala
+ .filter(_ != null)
+ .map { label =>
+ val realyLabel: Label[_] = labelFactory.createLabel(
+ label.getLabelKey,
+ if (!CollectionUtils.isEmpty(label.getValue)) label.getValue
+ else label.getStringValue
+ )
+ realyLabel
+ }
+ .toList
+ .asJava
+ )
+ .getOrElse(new util.ArrayList[Label[_]]())
resultMap.put(serviceInstance.toString, LabelList)
})
resultMap
@@ -546,4 +575,17 @@ class DefaultNodeLabelService extends NodeLabelService with Logging {
.toArray
}
+ private def serviceInstanceShuff(
+ serviceInstances: java.util.List[ServiceInstance]
+ ): util.List[ServiceInstance] = {
+ var shuffledInstances = new util.ArrayList[ServiceInstance](serviceInstances)
+ if (shuffledInstances.size > RMConfiguration.LABEL_SERVICE_INSTANCE_SHUFF_NUM.getValue) {
+ Collections.shuffle(shuffledInstances)
+ // 截取前limit个元素
+ shuffledInstances.subList(0, RMConfiguration.LABEL_SERVICE_INSTANCE_SHUFF_NUM.getValue)
+ } else {
+ serviceInstances
+ }
+ }
+
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/LabelResourceMap.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/LabelResourceMap.scala
index 30c49a2b1ab..50c5c12f8ed 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/LabelResourceMap.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/LabelResourceMap.scala
@@ -24,7 +24,8 @@ import org.apache.linkis.manager.rm.entity.ResourceOperationType.ResourceOperati
class LabelResourceMapping(
label: Label[_],
resource: Resource,
- resourceOperationType: ResourceOperationType
+ resourceOperationType: ResourceOperationType,
+ user: String
) {
override def equals(obj: Any): Boolean = {
@@ -45,6 +46,8 @@ class LabelResourceMapping(
def getResourceOperationType: ResourceOperationType = resourceOperationType
+ def getUser: String = user
+
override def toString: String = {
s"Label ${label.getStringValue} mapping resource ${resource}"
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/ResourceOperationType.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/ResourceOperationType.scala
index 699816ca353..81446bdd664 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/ResourceOperationType.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/entity/ResourceOperationType.scala
@@ -21,6 +21,6 @@ object ResourceOperationType extends Enumeration {
type ResourceOperationType = Value
- val LOCK, USED, USED_RELEASE, LOCKER_RELEASE = Value
+ val LOCK, USED, RELEASE = Value
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/restful/RMMonitorRest.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/restful/RMMonitorRest.scala
index b39eb91ac9d..41512c31cc6 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/restful/RMMonitorRest.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/restful/RMMonitorRest.scala
@@ -396,7 +396,7 @@ class RMMonitorRest extends Logging {
queueInfo.put("queuename", maxResource)
queueInfo.put(
"maxResources",
- Map("memory" -> maxResource.getQueueName, "cores" -> maxResource.getQueueCores)
+ Map("memory" -> maxResource.getQueueMemory, "cores" -> maxResource.getQueueCores)
)
queueInfo.put(
"usedResources",
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/ResourceLockService.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/ResourceLockService.scala
index c90ced3abf7..dc9f484803d 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/ResourceLockService.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/ResourceLockService.scala
@@ -49,7 +49,7 @@ class ResourceLockService extends Logging {
lockManagerPersistence.lock(persistenceLock, Long.MaxValue)
}
if (isLocked) {
- logger.info("successfully locked label" + persistenceLock.getLockObject)
+ logger.info("successfully locked label " + persistenceLock.getLockObject)
}
isLocked
} { case t: Throwable =>
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/impl/DefaultResourceManager.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/impl/DefaultResourceManager.scala
index d0a32e4900e..b6a01e34700 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/impl/DefaultResourceManager.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/main/scala/org/apache/linkis/manager/rm/service/impl/DefaultResourceManager.scala
@@ -36,6 +36,7 @@ import org.apache.linkis.manager.common.errorcode.ManagerCommonErrorCodeSummary
import org.apache.linkis.manager.common.exception.{RMErrorException, RMWarnException}
import org.apache.linkis.manager.common.protocol.engine.{EngineAskRequest, EngineCreateRequest}
import org.apache.linkis.manager.common.utils.{ManagerUtils, ResourceUtils}
+import org.apache.linkis.manager.label.LabelManagerUtils
import org.apache.linkis.manager.label.builder.factory.LabelBuilderFactoryContext
import org.apache.linkis.manager.label.constant.LabelKeyConstant
import org.apache.linkis.manager.label.entity.Label
@@ -56,7 +57,7 @@ import org.apache.linkis.manager.rm.{
}
import org.apache.linkis.manager.rm.domain.RMLabelContainer
import org.apache.linkis.manager.rm.entity.{LabelResourceMapping, ResourceOperationType}
-import org.apache.linkis.manager.rm.entity.ResourceOperationType.{LOCK, USED}
+import org.apache.linkis.manager.rm.entity.ResourceOperationType.{LOCK, RELEASE, USED}
import org.apache.linkis.manager.rm.exception.{RMErrorCode, RMLockFailedRetryException}
import org.apache.linkis.manager.rm.external.service.ExternalResourceService
import org.apache.linkis.manager.rm.service.{
@@ -75,8 +76,8 @@ import org.springframework.stereotype.Component
import java.text.MessageFormat
import java.util
-import java.util.{Date, UUID}
-import java.util.concurrent.TimeUnit
+import java.util.{Date, List, UUID}
+import java.util.concurrent.{LinkedBlockingDeque, LinkedBlockingQueue, TimeUnit}
import scala.collection.JavaConverters._
import scala.collection.mutable
@@ -121,6 +122,9 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
private var requestResourceServices: Array[RequestResourceService] = _
+ // 不设置容量大小,因为EC的个数有限
+ private val waitForDealResourceLabels = new LinkedBlockingDeque[LabelResourceMapping]()
+
override def afterPropertiesSet(): Unit = {
requestResourceServices = Array(
new DefaultReqResourceService(labelResourceService),
@@ -141,6 +145,23 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
RMConfiguration.LOCK_RELEASE_CHECK_INTERVAL.getValue.toLong,
TimeUnit.MILLISECONDS
)
+ // submit force dealing locked failed resourceLabel resources
+ Utils.defaultScheduler.scheduleAtFixedRate(
+ new Runnable {
+ override def run(): Unit = {
+ logger.debug(
+ s"Start force dealing locked failed resourceLabel resources. waitForDealResourceLabels size ${waitForDealResourceLabels.size()}"
+ )
+ val labelResourceMapping = waitForDealResourceLabels.poll(100, TimeUnit.MILLISECONDS)
+ if (labelResourceMapping != null) {
+ resourceDeal(labelResourceMapping)
+ }
+ }
+ },
+ RMConfiguration.LOCK_FAILED_LABEL_RESOURCE_DEAL_INTERVAL.getValue.toLong,
+ RMConfiguration.LOCK_FAILED_LABEL_RESOURCE_DEAL_INTERVAL.getValue.toLong,
+ TimeUnit.MILLISECONDS
+ )
}
/**
@@ -478,9 +499,22 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
labelContainer.getResourceLabels.asScala
.filter(!_.isInstanceOf[EngineInstanceLabel])
.foreach { label =>
- val persistenceLock =
+ val persistenceLock = Utils.tryCatch {
tryLockOneLabel(label, -1, labelContainer.getUserCreatorLabel.getUser)
- Utils.tryFinally {
+ } { case t: Exception =>
+ logger.warn(
+ s"${engineInstanceLabel.getStringValue} used resource for resourceLabel $label failed, wait for dealing! Reason: ${t.getMessage}."
+ )
+ val labelResourceMapping = new LabelResourceMapping(
+ label,
+ addedResource,
+ ResourceOperationType.USED,
+ labelContainer.getUserCreatorLabel.getUser
+ )
+ waitForDealResourceLabels.offer(labelResourceMapping)
+ null
+ }
+ if (persistenceLock != null) Utils.tryFinally {
labelContainer.setCurrentLabel(label)
val labelResource = labelResourceService.getLabelResource(label)
if (labelResource != null) {
@@ -494,9 +528,15 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
labelResource,
labelContainer.getCombinedResourceLabel.getStringValue
)
- labelResourceSet.add(
- new LabelResourceMapping(label, addedResource, ResourceOperationType.USED)
- )
+ labelResourceSet
+ .add(
+ new LabelResourceMapping(
+ label,
+ addedResource,
+ ResourceOperationType.USED,
+ labelContainer.getUserCreatorLabel.getUser
+ )
+ )
resourceCheck(label, labelResource)
}
} {
@@ -513,7 +553,6 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
}
}
} { case exception: Exception =>
- resourceRollback(labelResourceSet, labelContainer.getUserCreatorLabel.getUser)
logger.error(
s"${labelContainer.getEngineInstanceLabel.getStringValue} used resource failed!, resource: ${lockedResource}",
exception
@@ -581,41 +620,43 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
}
}
- private def resourceRollback(
- labelResourceSet: mutable.Set[LabelResourceMapping],
- user: String
- ): Unit = {
- labelResourceSet.foreach { labelResourceMapping =>
- val persistenceLock = tryLockOneLabel(labelResourceMapping.getLabel(), -1, user)
- Utils.tryCatch {
- val resource = labelResourceService.getLabelResource(labelResourceMapping.getLabel())
- labelResourceMapping.getResourceOperationType match {
- case LOCK =>
- resource.setLeftResource(
- resource.getLeftResource.add(labelResourceMapping.getResource())
- )
- resource.setLockedResource(
- resource.getLockedResource.minus(labelResourceMapping.getResource())
- )
- case USED =>
- resource.setLockedResource(
- resource.getLeftResource.add(labelResourceMapping.getResource())
- )
- resource.setUsedResource(
- resource.getLockedResource.minus(labelResourceMapping.getResource())
- )
- case _ =>
- }
- labelResourceService.setLabelResource(
- labelResourceMapping.getLabel(),
- resource,
- labelResourceMapping.getResourceOperationType.toString
- )
- } { case e: Exception =>
- logger.error(s"Failed to roll back resource " + labelResourceSet.mkString("\n"), e)
+ private def resourceDeal(labelResourceMapping: LabelResourceMapping): Unit = {
+ logger.info(s"try to deal labelResource $labelResourceMapping.")
+ val persistenceLock = Utils.tryCatch {
+ tryLockOneLabel(labelResourceMapping.getLabel(), -1, labelResourceMapping.getUser)
+ } { case t: Exception =>
+ logger.error(s"Failed to deal labelResource $labelResourceMapping, wait for retry.", t)
+ waitForDealResourceLabels.addFirst(labelResourceMapping)
+ return
+ }
+ Utils.tryCatch {
+ val resource = labelResourceService.getLabelResource(labelResourceMapping.getLabel())
+ labelResourceMapping.getResourceOperationType match {
+ case RELEASE =>
+ resource.setLeftResource(resource.getLeftResource.add(labelResourceMapping.getResource()))
+ resource.setLockedResource(
+ resource.getLockedResource.minus(labelResourceMapping.getResource())
+ )
+ case USED =>
+ resource.setLockedResource(
+ resource.getLeftResource.minus(labelResourceMapping.getResource())
+ )
+ resource.setUsedResource(
+ resource.getLockedResource.add(labelResourceMapping.getResource())
+ )
+ case _ =>
}
- resourceLockService.unLock(persistenceLock)
+ labelResourceService.setLabelResource(
+ labelResourceMapping.getLabel(),
+ resource,
+ labelResourceMapping.getResourceOperationType.toString
+ )
+ logger.info(s"succeed to deal labelResource $labelResourceMapping.")
+ } { case e: Exception =>
+ logger.error(s"Failed to deal labelResource $labelResourceMapping, wait for retry.", e)
+ waitForDealResourceLabels.addFirst(labelResourceMapping)
}
+ resourceLockService.unLock(persistenceLock)
}
override def tryLockOneLabel(
@@ -685,12 +726,48 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
.filter(!_.isInstanceOf[EngineInstanceLabel])
.foreach { label =>
Utils.tryCatch {
- val persistenceLock = tryLockOneLabel(
- label,
- RMUtils.RM_RESOURCE_LOCK_WAIT_TIME.getValue,
- labelContainer.getUserCreatorLabel.getUser
- )
- Utils.tryFinally {
+ val persistenceLock = Utils.tryCatch {
+ tryLockOneLabel(
+ label,
+ RMUtils.RM_RESOURCE_LOCK_WAIT_TIME.getValue,
+ labelContainer.getUserCreatorLabel.getUser
+ )
+ } { case t: Exception =>
+ logger.warn(
+ s"${labelContainer.getEngineInstanceLabel.getServiceInstance} release resource for resourceLabel $label failed, wait for dealing! Reason: ${t.getMessage}."
+ )
+ if (
+ null != usedResource.getUsedResource && usedResource.getUsedResource.moreThan(
+ Resource
+ .initResource(usedResource.getResourceType)
+ )
+ ) {
+ val labelResourceMapping = new LabelResourceMapping(
+ label,
+ usedResource.getUsedResource,
+ ResourceOperationType.RELEASE,
+ labelContainer.getUserCreatorLabel.getUser
+ )
+ waitForDealResourceLabels.offer(labelResourceMapping)
+ }
+ if (
+ null != usedResource.getLockedResource && usedResource.getLockedResource
+ .moreThan(
+ Resource
+ .initResource(usedResource.getResourceType)
+ )
+ ) {
+ val labelResourceMapping = new LabelResourceMapping(
+ label,
+ usedResource.getLockedResource,
+ ResourceOperationType.RELEASE,
+ labelContainer.getUserCreatorLabel.getUser
+ )
+ waitForDealResourceLabels.offer(labelResourceMapping)
+ }
+ null
+ }
+ if (persistenceLock != null) Utils.tryFinally {
val labelResource = labelResourceService.getLabelResource(label)
if (labelResource != null) {
if (label.isInstanceOf[EMInstanceLabel]) timeCheck(labelResource, usedResource)
@@ -829,13 +906,69 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
*/
override def getResourceInfo(serviceInstances: Array[ServiceInstance]): ResourceInfo = {
+ var resourceInfo: ResourceInfo = null
+ val getBatchEnable = RMConfiguration.GET_RESOURCE_BY_LABEL_VALUE_ENABLED.getValue
+ val startTime = System.currentTimeMillis
+ if (getBatchEnable) {
+ resourceInfo = getInstancesResourceBatch(serviceInstances)
+ } else {
+ // old
+ resourceInfo = getInstancesResource(serviceInstances)
+ }
+ logger.info(
+ s"getResourceInfo with serviceInstances size: ${serviceInstances.length} resource size: ${resourceInfo.resourceInfo
+ .size()}, cost: ${(System.currentTimeMillis - startTime) / 1000.0} s"
+ )
+ resourceInfo
+ }
+
+ private def getInstancesResourceBatch(serviceInstances: Array[ServiceInstance]): ResourceInfo = {
+ val resourceInfo = ResourceInfo(Lists.newArrayList())
+ val labelValues: java.util.ArrayList[String] = new java.util.ArrayList[String]()
+ for (serviceInstance <- serviceInstances) {
+ val engineInstanceLabel: EngineInstanceLabel =
+ LabelBuilderFactoryContext.getLabelBuilderFactory.createLabel(classOf[EngineInstanceLabel])
+ engineInstanceLabel.setServiceName(serviceInstance.getApplicationName)
+ engineInstanceLabel.setInstance(serviceInstance.getInstance)
+ val label = LabelManagerUtils.convertPersistenceLabel(engineInstanceLabel)
+ val labelValue = label.getStringValue()
+ labelValues.add(labelValue)
+ }
+ val persistenceResources: util.List[PersistenceResource] =
+ resourceManagerPersistence.getResourceByLabelValues(labelValues)
+ if (persistenceResources != null && persistenceResources.size() > 0) {
+ if (persistenceResources.size() != serviceInstances.length) {
+ logger.error(
+ s"resource query failed. serviceInstances size: ${serviceInstances.length}, resource size: ${persistenceResources.size()}"
+ )
+ return resourceInfo
+ }
+ persistenceResources.asScala.foreach({ resource =>
+ val rmNode = new InfoRMNode
+ val persistenceResource = resource.asInstanceOf[PersistenceResource]
+ val serviceInstanceArray: Array[ServiceInstance] =
+ serviceInstances.filter { serviceInstance =>
+ persistenceResource.getLabelValue.contains(serviceInstance.getInstance)
+ }
+ if (serviceInstanceArray == null || serviceInstanceArray.length != 1) {
+ logger.error("logic error. please optimization.")
+ }
+ val serviceInstance = serviceInstanceArray(0)
+ val aggregatedResource = ResourceUtils.fromPersistenceResource(persistenceResource)
+ rmNode.setServiceInstance(serviceInstance)
+ rmNode.setNodeResource(aggregatedResource)
+ resourceInfo.resourceInfo.add(rmNode)
+ })
+
+ }
+ resourceInfo
+ }
+
+ private def getInstancesResource(serviceInstances: Array[ServiceInstance]): ResourceInfo = {
val resourceInfo = ResourceInfo(Lists.newArrayList())
serviceInstances.foreach({ serviceInstance =>
val rmNode = new InfoRMNode
var aggregatedResource: NodeResource = null
- val engineConnSpringName = GovernanceCommonConf.ENGINE_CONN_SPRING_NAME.getValue
- val engineConnManagerSpringName =
- GovernanceCommonConf.ENGINE_CONN_MANAGER_SPRING_NAME.getValue
serviceInstance.getApplicationName match {
case GovernanceCommonConf.ENGINE_CONN_SPRING_NAME.getValue =>
val engineInstanceLabel = LabelBuilderFactoryContext.getLabelBuilderFactory.createLabel(
@@ -844,12 +977,14 @@ class DefaultResourceManager extends ResourceManager with Logging with Initializ
engineInstanceLabel.setServiceName(serviceInstance.getApplicationName)
engineInstanceLabel.setInstance(serviceInstance.getInstance)
aggregatedResource = labelResourceService.getLabelResource(engineInstanceLabel)
+ logger.info("getLabelResource engineconn, engineInstanceLabel {}", engineInstanceLabel)
case GovernanceCommonConf.ENGINE_CONN_MANAGER_SPRING_NAME.getValue =>
val emInstanceLabel =
LabelBuilderFactoryContext.getLabelBuilderFactory.createLabel(classOf[EMInstanceLabel])
emInstanceLabel.setServiceName(serviceInstance.getApplicationName)
emInstanceLabel.setInstance(serviceInstance.getInstance)
aggregatedResource = labelResourceService.getLabelResource(emInstanceLabel)
+ logger.info("getLabelResource ecm, emInstanceLabel {}", emInstanceLabel)
}
rmNode.setServiceInstance(serviceInstance)
rmNode.setNodeResource(aggregatedResource)
diff --git a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/test/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRuleTest.scala b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/test/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRuleTest.scala
index 9916daea8e0..45a8216ca12 100644
--- a/linkis-computation-governance/linkis-manager/linkis-application-manager/src/test/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRuleTest.scala
+++ b/linkis-computation-governance/linkis-manager/linkis-application-manager/src/test/scala/org/apache/linkis/manager/am/selector/rule/HotspotExclusionRuleTest.scala
@@ -29,13 +29,13 @@ class HotspotExclusionRuleTest {
@Test def testRuleFiltering(): Unit = {
val hotspotExclusionRule = new HotspotExclusionRule()
val bufferNodes = new ArrayBuffer[AMEMNode]()
- for (i <- 0 until 3) {
+ for (i <- 0 until 1) {
val amEmNode = new AMEMNode();
amEmNode.setServiceInstance(ServiceInstance("ecm", s"ecm:$i"))
bufferNodes.append(amEmNode)
}
val res = hotspotExclusionRule.ruleFiltering(bufferNodes.toArray)
- for (i <- 0 until 3) {
+ for (i <- 0 until 1) {
assert(res(i).getServiceInstance.equals(bufferNodes(i).getServiceInstance))
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/builder/CombinedLabelBuilder.java b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/builder/CombinedLabelBuilder.java
index 5ba5950f719..4c204cc5a45 100644
--- a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/builder/CombinedLabelBuilder.java
+++ b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/builder/CombinedLabelBuilder.java
@@ -46,7 +46,7 @@ public boolean canBuild(String labelKey) {
@Override
public Label> build(String labelKey, @Nullable Object valueObj) throws LabelErrorException {
- if (null != valueObj || valueObj instanceof List) {
+ if (null != valueObj && valueObj instanceof List) {
try {
List> labels = (List>) valueObj;
return new CombinedLabelImpl(labels);
diff --git a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/conf/LabelCommonConfig.java b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/conf/LabelCommonConfig.java
index 2d180c496e8..d8ef089eb7f 100644
--- a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/conf/LabelCommonConfig.java
+++ b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/conf/LabelCommonConfig.java
@@ -35,7 +35,34 @@ public class LabelCommonConfig {
public static final CommonVars SPARK_ENGINE_VERSION =
CommonVars.apply("wds.linkis.spark.engine.version", "3.2.1");
-
+ public static final String SPARK3_ENGINE_VERSION_CONF = "sparkVersion";
+ public static final String SPARK_ENGINE_HOME_CONF = "SPARK_HOME";
+ public static final String SPARK_ENGINE_CMD_CONF = "SPARK_CMD";
+ public static final String SPARK_ENGINE_PATH_CONF = "PATH";
+ public static final String SPARK_ENGINE_CONF_DIR = "SPARK_CONF_DIR";
+ public static final CommonVars SPARK3_ENGINE_VERSION =
+ CommonVars.apply("linkis.spark3.engine.version", "3.4.4");
+
+ public static final CommonVars SPARK_ENGINE_HOME =
+ CommonVars.apply("linkis.spark.engine.home", "/appcom/Install/spark");
+ public static final CommonVars SPARK3_ENGINE_HOME =
+ CommonVars.apply("linkis.spark3.engine.home", "/appcom/Install/spark3");
+ public static final CommonVars USER_DEFAULT_SPAKR_SWITCH =
+ CommonVars.apply("linkis.user.default.spark3.switch", false);
+ public static final CommonVars SPARK3_ENGINE_CMD =
+ CommonVars.apply("linkis.spark3.engine.cmd", "/appcom/Install/spark3-cmd");
+ public static final CommonVars SPARK_ENGINE_CMD =
+ CommonVars.apply("linkis.spark.engine.cmd", "/appcom/Install/spark-cmd");
+ public static final CommonVars SPARK3_ENGINE_PATH =
+ CommonVars.apply("linkis.spark3.engine.path", "$SPARK_CMD/bin:$PATH");
+ public static final CommonVars SPARK_ENGINE_PATH =
+ CommonVars.apply("linkis.spark.engine.path", "$SPARK_CMD/bin:$PATH");
+
+ public static final CommonVars SPARK3_ENGINE_CONFIG =
+ CommonVars.apply("linkis.spark3.engine.config", "/appcom/config/spark3-config/spark-submit");
+
+ public static final CommonVars SPARK_ENGINE_CONFIG =
+ CommonVars.apply("linkis.spark.engine.config", "/appcom/config/spark-config/spark-submit");
public static final CommonVars HIVE_ENGINE_VERSION =
CommonVars.apply("wds.linkis.hive.engine.version", "3.1.3");
diff --git a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/entity/cluster/ClusterLabel.java b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/entity/cluster/ClusterLabel.java
index b07c77f4d84..06fbfd6293c 100644
--- a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/entity/cluster/ClusterLabel.java
+++ b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/entity/cluster/ClusterLabel.java
@@ -84,6 +84,11 @@ public boolean equals(Object other) {
}
}
+ @Override
+ public int hashCode() {
+ return getClusterName().hashCode() + getClusterType().hashCode();
+ }
+
@Override
public void valueCheck(String stringValue) throws LabelErrorException {
if (!StringUtils.isEmpty(stringValue)) {
diff --git a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/utils/EngineTypeLabelCreator.java b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/utils/EngineTypeLabelCreator.java
index 88cc9139ec3..16a79cb257a 100644
--- a/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/utils/EngineTypeLabelCreator.java
+++ b/linkis-computation-governance/linkis-manager/linkis-label-common/src/main/java/org/apache/linkis/manager/label/utils/EngineTypeLabelCreator.java
@@ -40,12 +40,17 @@ public class EngineTypeLabelCreator {
}
private static void init() {
- if (null == defaultVersion) {
+ if (null == defaultVersion) { // NOSONAR
synchronized (EngineTypeLabelCreator.class) {
if (null == defaultVersion) {
defaultVersion = new HashMap<>(16);
- defaultVersion.put(
- EngineType.SPARK().toString(), LabelCommonConfig.SPARK_ENGINE_VERSION.getValue());
+ if (LabelCommonConfig.USER_DEFAULT_SPAKR_SWITCH.getValue()) {
+ defaultVersion.put(
+ EngineType.SPARK().toString(), LabelCommonConfig.SPARK3_ENGINE_VERSION.getValue());
+ } else {
+ defaultVersion.put(
+ EngineType.SPARK().toString(), LabelCommonConfig.SPARK_ENGINE_VERSION.getValue());
+ }
defaultVersion.put(
EngineType.HIVE().toString(), LabelCommonConfig.HIVE_ENGINE_VERSION.getValue());
defaultVersion.put(
@@ -123,4 +128,18 @@ public static void registerVersion(String type, String version) {
}
defaultVersion.put(type, version);
}
+
+ public static EngineTypeLabel createEngineTypeLabel(String type, String version) {
+ if (null == defaultVersion) {
+ init();
+ }
+ EngineTypeLabel label = labelBuilderFactory.createLabel(EngineTypeLabel.class);
+ label.setEngineType(type);
+ if (StringUtils.isNotBlank(version)) {
+ label.setVersion(version);
+ } else {
+ defaultVersion.get(type);
+ }
+ return label;
+ }
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/conf/RMConfiguration.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/conf/RMConfiguration.java
index a28736d630a..c33956146d2 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/conf/RMConfiguration.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/conf/RMConfiguration.java
@@ -27,11 +27,14 @@ public class RMConfiguration {
CommonVars.apply("wds.linkis.rm.wait.event.time.out", 1000 * 60 * 12L);
public static final CommonVars LOCK_RELEASE_TIMEOUT =
- CommonVars.apply("wds.linkis.manager.rm.lock.release.timeout", new TimeType("5m"));
+ CommonVars.apply("wds.linkis.manager.rm.lock.release.timeout", new TimeType("10m"));
public static final CommonVars LOCK_RELEASE_CHECK_INTERVAL =
CommonVars.apply("wds.linkis.manager.rm.lock.release.check.interval", new TimeType("5m"));
+ public static final CommonVars LOCK_FAILED_LABEL_RESOURCE_DEAL_INTERVAL =
+ CommonVars.apply("wds.linkis.manager.rm.lock.failed.deal.interval", new TimeType("10s"));
+
// Resource parameter(资源参数)
public static final CommonVars USER_AVAILABLE_CPU =
CommonVars.apply("wds.linkis.rm.client.core.max", 10);
@@ -73,4 +76,16 @@ public class RMConfiguration {
CommonVars.apply("wds.linkis.rm.default.kubernetes.cluster.type", "K8S");
public static final CommonVars EXTERNAL_RETRY_NUM =
CommonVars.apply("wds.linkis.rm.external.retry.num", 3);
+
+ public static final CommonVars LABEL_SERVICE_PARTITION_NUM =
+ CommonVars.apply("wds.linkis.label.service.partition.num", 1000);
+
+ public static final CommonVars LABEL_SERVICE_INSTANCE_SHUFF_NUM =
+ CommonVars.apply("wds.linkis.label.service.instance.shuff.num", 100);
+
+ public static final CommonVars LABEL_SERVICE_INSTANCE_SHUFF_SWITCH =
+ CommonVars.apply("wds.linkis.label.service.instance.shuff.switch", false);
+
+ public static final CommonVars GET_RESOURCE_BY_LABEL_VALUE_ENABLED =
+ CommonVars.apply("wds.linkis.get.resource.by.label.value.enable", false);
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/AMEngineNode.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/AMEngineNode.java
index 6214045bb6a..3c68502d292 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/AMEngineNode.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/AMEngineNode.java
@@ -66,6 +66,8 @@ public class AMEngineNode implements EngineNode, ScoreServiceInstance {
private String nodeDescription;
+ private String params;
+
public AMEngineNode() {}
public AMEngineNode(double score, ServiceInstance serviceInstance) {
@@ -224,6 +226,16 @@ public void setEcMetrics(String metrics) {
this.ecMetrics = metrics;
}
+ @Override
+ public String getParams() {
+ return params;
+ }
+
+ @Override
+ public void setParams(String params) {
+ this.params = params;
+ }
+
@Override
public Date getUpdateTime() {
return updateTime;
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/EngineNode.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/EngineNode.java
index 627b41bc556..e337d517ab1 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/EngineNode.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/node/EngineNode.java
@@ -34,4 +34,8 @@ public interface EngineNode extends AMNode, RMNode, LabelNode {
String getEcMetrics();
void setEcMetrics(String metrics);
+
+ String getParams();
+
+ void setParams(String params);
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceNode.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceNode.java
index 026f5c64534..eea1bb5073e 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceNode.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceNode.java
@@ -35,6 +35,7 @@ public class PersistenceNode {
private Date createTime;
private String updator;
private String creator;
+ private String params;
private String mappingPorts;
private String mappingHost;
@@ -142,4 +143,16 @@ public String getMappingHost() {
public void setMappingHost(String mappingHost) {
this.mappingHost = mappingHost;
}
+
+ public void setId(int id) {
+ this.id = id;
+ }
+
+ public String getParams() {
+ return params;
+ }
+
+ public void setParams(String params) {
+ this.params = params;
+ }
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceResource.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceResource.java
index e89b6b6bd71..a126ba8f52b 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceResource.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/persistence/PersistenceResource.java
@@ -30,6 +30,10 @@ public class PersistenceResource {
private String resourceType;
+ private String labelKey;
+
+ private String labelValue;
+
public String getTicketId() {
return ticketId;
}
@@ -140,4 +144,20 @@ public String getCreator() {
public void setCreator(String creator) {
this.creator = creator;
}
+
+ public String getLabelKey() {
+ return labelKey;
+ }
+
+ public void setLabelKey(String labelKey) {
+ this.labelKey = labelKey;
+ }
+
+ public String getLabelValue() {
+ return labelValue;
+ }
+
+ public void setLabelValue(String labelValue) {
+ this.labelValue = labelValue;
+ }
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadInstanceResource.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadInstanceResource.java
index accb25c3501..ffde021c297 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadInstanceResource.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadInstanceResource.java
@@ -26,6 +26,8 @@ public class LoadInstanceResource extends Resource {
private final long memory;
private final int cores;
private final int instances;
+ // 兼容性适配
+ private final int instance;
private LoadInstanceResource(Resource r) {
if (r instanceof LoadInstanceResource) {
@@ -33,35 +35,42 @@ private LoadInstanceResource(Resource r) {
this.memory = t.memory;
this.cores = t.cores;
this.instances = t.instances;
+ this.instance = t.instances;
} else if (r instanceof LoadResource) {
LoadResource l = (LoadResource) r;
this.memory = l.getMemory();
this.cores = l.getCores();
this.instances = 0;
+ this.instance = 0;
} else if (r instanceof MemoryResource) {
MemoryResource m = (MemoryResource) r;
this.memory = m.getMemory();
this.cores = 0;
this.instances = 0;
+ this.instance = 0;
} else if (r instanceof CPUResource) {
CPUResource c = (CPUResource) r;
this.memory = 0;
this.cores = c.getCores();
this.instances = 0;
+ this.instance = 0;
} else if (r instanceof DriverAndYarnResource) {
DriverAndYarnResource d = (DriverAndYarnResource) r;
this.memory = d.getLoadInstanceResource().getMemory();
this.cores = d.getLoadInstanceResource().getCores();
this.instances = d.getLoadInstanceResource().getInstances();
+ this.instance = d.getLoadInstanceResource().getInstances();
} else if (r instanceof DriverAndKubernetesResource) {
DriverAndKubernetesResource d = (DriverAndKubernetesResource) r;
this.memory = d.getLoadInstanceResource().getMemory();
this.cores = d.getLoadInstanceResource().getCores();
this.instances = d.getLoadInstanceResource().getInstances();
+ this.instance = d.getLoadInstanceResource().getInstances();
} else {
this.memory = Long.MAX_VALUE;
this.cores = Integer.MAX_VALUE;
this.instances = Integer.MAX_VALUE;
+ this.instance = Integer.MAX_VALUE;
}
}
@@ -73,6 +82,7 @@ public LoadInstanceResource(long memory, int cores, int instances) {
this.memory = memory;
this.cores = cores;
this.instances = instances;
+ this.instance = instances;
}
public LoadInstanceResource add(Resource r) {
@@ -182,6 +192,10 @@ public int getCores() {
}
public int getInstances() {
- return instances;
+ return this.instances;
+ }
+
+ public int getInstance() { // NOSONAR
+ return this.instances; // NOSONAR
}
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadResource.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadResource.java
index 313ce7b92bd..5a4a586b903 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadResource.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/LoadResource.java
@@ -125,7 +125,7 @@ public int compare(Resource r) {
@Override
public String toJson() {
return String.format(
- "{\"memory\":%s,\"cpu\":%d}", ByteTimeUtils.bytesToString(this.memory), this.cores);
+ "{\"memory\":\"%s\",\"cpu\":%d}", ByteTimeUtils.bytesToString(this.memory), this.cores);
}
@Override
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/MemoryResource.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/MemoryResource.java
index b7e10d829d4..52b3f9271e8 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/MemoryResource.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/MemoryResource.java
@@ -112,7 +112,7 @@ public boolean equalsTo(Resource resource) {
@Override
public String toJson() {
- return " {\"memory\":\"" + ByteTimeUtils.bytesToString(memory) + " }";
+ return " {\"memory\":\"" + ByteTimeUtils.bytesToString(memory) + "\" }";
}
@Override
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/SpecialResource.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/SpecialResource.java
index 80b3bc57291..f7934b0bc40 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/SpecialResource.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/entity/resource/SpecialResource.java
@@ -19,6 +19,7 @@
import org.apache.linkis.manager.common.exception.ResourceWarnException;
+import java.math.BigDecimal;
import java.text.MessageFormat;
import java.util.AbstractMap;
import java.util.HashMap;
@@ -292,11 +293,11 @@ public boolean equalsTo(Resource r) {
if (v instanceof Integer && rsV instanceof Integer) {
return (int) v != (int) rsV;
} else if (v instanceof Double && rsV instanceof Double) {
- return (double) v != (double) rsV;
+ return !BigDecimal.valueOf((double) v).equals(BigDecimal.valueOf((double) rsV));
} else if (v instanceof Long && rsV instanceof Long) {
return (long) v != (long) rsV;
} else if (v instanceof Float && rsV instanceof Float) {
- return (float) v != (float) rsV;
+ return !BigDecimal.valueOf((float) v).equals(BigDecimal.valueOf((float) rsV));
} else if (v instanceof Short && rsV instanceof Short) {
return (short) v != (short) rsV;
} else {
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/utils/ResourceUtils.java b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/utils/ResourceUtils.java
index 1451a5b563b..823df089965 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/utils/ResourceUtils.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-common/src/main/java/org/apache/linkis/manager/common/utils/ResourceUtils.java
@@ -131,6 +131,9 @@ public static PersistenceResource toPersistenceResource(NodeResource nodeResourc
if (nodeResource.getUsedResource() != null) {
persistenceResource.setUsedResource(serializeResource(nodeResource.getUsedResource()));
}
+ if (nodeResource.getId() != null) {
+ persistenceResource.setId(nodeResource.getId());
+ }
persistenceResource.setResourceType(nodeResource.getResourceType().toString());
return persistenceResource;
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/dao/LabelManagerMapper.java b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/dao/LabelManagerMapper.java
index fed4d96cd67..aebac1d5bdd 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/dao/LabelManagerMapper.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/dao/LabelManagerMapper.java
@@ -152,6 +152,16 @@ List getNodeByLabelKeyValue(
*/
List listResourceByLaBelId(Integer labelId);
+ /**
+ * 通过labelValues获取实例资源,labelValues实际上为 {"instance":"ip:port","serviceName":"linkis-cg-engineconn"}
+ * EC/ECM
+ *
+ * @param labelValues
+ * @return
+ */
+ List getInstanceResourceByLabelValue(
+ @Param("labelValues") List labelValues);
+
/**
* 通过labelId删除资源,还有资源和label的关联,并不会删除label表记录
*
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/ResourceManagerPersistence.java b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/ResourceManagerPersistence.java
index 35195b4d008..63f09d59633 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/ResourceManagerPersistence.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/ResourceManagerPersistence.java
@@ -54,6 +54,12 @@ void registerResource(ServiceInstance serviceInstance, PersistenceResource persi
List getResourceByLabels(List extends Label> labels)
throws PersistenceErrorException;
+ /**
+ * @param labelValues
+ * @return
+ */
+ List getResourceByLabelValues(List labelValues);
+
/**
* 根据用户获取资源
*
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultLabelManagerPersistence.java b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultLabelManagerPersistence.java
index 5e515f589bc..bd5a4a1ff45 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultLabelManagerPersistence.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultLabelManagerPersistence.java
@@ -18,6 +18,7 @@
package org.apache.linkis.manager.persistence.impl;
import org.apache.linkis.common.ServiceInstance;
+import org.apache.linkis.manager.common.conf.RMConfiguration;
import org.apache.linkis.manager.common.entity.persistence.PersistenceLabel;
import org.apache.linkis.manager.common.entity.persistence.PersistenceLabelRel;
import org.apache.linkis.manager.common.entity.persistence.PersistenceNode;
@@ -42,6 +43,7 @@
import java.util.*;
import java.util.stream.Collectors;
+import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -329,38 +331,40 @@ public Map> getNodeRelationsByLabels(
public Map> getLabelRelationsByServiceInstance(
List serviceInstances) {
if (CollectionUtils.isEmpty(serviceInstances)) return Collections.emptyMap();
- try {
- List> nodeRelationsByLabels =
- labelManagerMapper.listLabelRelationByServiceInstance(serviceInstances);
- List> arrays =
- new ArrayList>();
- for (Map nodeRelationsByLabel : nodeRelationsByLabels) {
- Optional instanceOption =
- serviceInstances.stream()
- .filter(
- serviceInstance1 ->
- serviceInstance1
- .getInstance()
- .equalsIgnoreCase(String.valueOf(nodeRelationsByLabel.get("instance"))))
- .findFirst();
- PersistenceLabel persistenceLabel = new PersistenceLabel();
- BeanUtils.populate(persistenceLabel, nodeRelationsByLabel);
- PersistenceUtils.setValue(persistenceLabel);
- instanceOption.ifPresent(
- serviceInstance -> arrays.add(new Tunple(serviceInstance, persistenceLabel)));
- }
- return arrays.stream()
- .collect(Collectors.groupingBy(Tunple::getKey))
- .entrySet()
- .stream()
- .collect(
- Collectors.toMap(
- Map.Entry::getKey,
- f -> f.getValue().stream().map(Tunple::getValue).collect(Collectors.toList())));
- } catch (InvocationTargetException | IllegalAccessException e) {
- throw new PersistenceWarnException(
- BEANUTILS_POPULATE_FAILED.getErrorCode(), BEANUTILS_POPULATE_FAILED.getErrorDesc(), e);
- }
+ Map> resultMap = new HashMap<>();
+ List> nodeRelationsByLabels =
+ listLabelRelationByServiceInstance(
+ serviceInstances, RMConfiguration.LABEL_SERVICE_PARTITION_NUM.getValue());
+ logger.info("list label relation end, with size: {}", nodeRelationsByLabels.size());
+ Map>> groupByInstanceMap =
+ nodeRelationsByLabels.stream()
+ .collect(
+ Collectors.groupingBy(
+ nodeRelationsByLabel -> nodeRelationsByLabel.get("instance").toString()));
+ serviceInstances.stream()
+ .filter(serviceInstance -> groupByInstanceMap.containsKey(serviceInstance.getInstance()))
+ .forEach(
+ serviceInstance -> {
+ List persistenceLabelList = new ArrayList<>();
+ groupByInstanceMap
+ .get(serviceInstance.getInstance())
+ .forEach(
+ map -> {
+ try {
+ PersistenceLabel persistenceLabel = new PersistenceLabel();
+ BeanUtils.populate(persistenceLabel, map);
+ PersistenceUtils.setValue(persistenceLabel);
+ persistenceLabelList.add(persistenceLabel);
+ } catch (IllegalAccessException | InvocationTargetException e) {
+ throw new PersistenceWarnException(
+ BEANUTILS_POPULATE_FAILED.getErrorCode(),
+ BEANUTILS_POPULATE_FAILED.getErrorDesc(),
+ e);
+ }
+ });
+ resultMap.put(serviceInstance, persistenceLabelList);
+ });
+ return resultMap;
}
@Override
@@ -372,4 +376,13 @@ public PersistenceLabel getLabelByKeyValue(String labelKey, String stringValue)
public List getNodeByLabelKeyValue(String labelKey, String stringValue) {
return labelManagerMapper.getNodeByLabelKeyValue(labelKey, stringValue);
}
+
+ public List> listLabelRelationByServiceInstance(
+ List nodes, int batchSize) {
+
+ return Lists.partition(nodes, batchSize).stream()
+ .map(batch -> labelManagerMapper.listLabelRelationByServiceInstance(batch))
+ .flatMap(List::stream)
+ .collect(Collectors.toList());
+ }
}
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeManagerPersistence.java b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeManagerPersistence.java
index 40f479f4968..49fe33e9d47 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeManagerPersistence.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeManagerPersistence.java
@@ -105,6 +105,10 @@ public void updateEngineNode(ServiceInstance serviceInstance, Node node)
persistenceNode.setOwner(node.getOwner());
persistenceNode.setMark(node.getMark());
persistenceNode.setUpdateTime(new Date());
+ if (node instanceof EngineNode) {
+ EngineNode engineNode = (EngineNode) node;
+ persistenceNode.setParams(engineNode.getParams());
+ }
persistenceNode.setCreator(
node.getOwner()); // The creator is not given when inserting records in rm, so you need to
// set this value(rm中插入记录的时候并未给出creator,所以需要set这个值)
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeMetricManagerPersistence.java b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeMetricManagerPersistence.java
index 30e18467fcd..eb8f1d4ccff 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeMetricManagerPersistence.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultNodeMetricManagerPersistence.java
@@ -148,6 +148,7 @@ public List getNodeMetrics(List extends Node> nodes)
List persistenceNodeMetricsList =
nodeMetricManagerMapper.getNodeMetricsByInstances(instances);
+ logger.info("persistenceNodeMetricsList size: {}", persistenceNodeMetricsList.size());
for (PersistenceNodeMetrics persistenceNodeMetric : persistenceNodeMetricsList) {
for (Node node : nodes) {
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultResourceManagerPersistence.java b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultResourceManagerPersistence.java
index a517ef0ccfa..81183e15659 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultResourceManagerPersistence.java
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/java/org/apache/linkis/manager/persistence/impl/DefaultResourceManagerPersistence.java
@@ -112,6 +112,11 @@ public List getResourceByLabels(List extends Label> label
}
}
+ @Override
+ public List getResourceByLabelValues(List labelValues) {
+ return labelManagerMapper.getInstanceResourceByLabelValue(labelValues);
+ }
+
@Override
public List getResourceByUser(String user) throws PersistenceErrorException {
List persistenceResourceList =
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/LabelManagerMapper.xml b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/LabelManagerMapper.xml
index f81c679bdbe..2b99ebe1514 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/LabelManagerMapper.xml
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/LabelManagerMapper.xml
@@ -322,38 +322,21 @@
-
- SELECT
- t.id AS 'id',
- t.label_value_size AS 'labelValueSize',
- t.label_key AS 'labelKey',
- t.label_value AS 'stringValue',
- si.instance,
- si.name AS 'applicationName'
- FROM
- (
- SELECT
- l.id,
- l.label_value_size ,
- l.label_key,
- l.label_value,
- lsi.service_instance
- FROM
- (
- SELECT
- id,
- label_value_size,
- label_key,
- label_value
- FROM
- linkis_cg_manager_label
- WHERE
- label_key = #{label.labelKey}
- and label_value = #{label.stringValue}
- ) l
- INNER JOIN linkis_cg_manager_label_service_instance lsi ON l.id = lsi.label_id
- ) t
- INNER JOIN linkis_cg_manager_service_instance si ON si.instance = t.service_instance
+ SELECT
+ l.id,
+ l.label_value_size AS 'labelValueSize',
+ l.label_key AS 'labelKey',
+ l.label_value AS 'stringValue',
+ si.instance,
+ si.name AS 'applicationName'
+ FROM
+ linkis_cg_manager_label l
+ INNER JOIN linkis_cg_manager_label_service_instance lsi ON l.id = lsi.label_id
+ INNER JOIN linkis_cg_manager_service_instance si ON si.instance = lsi.service_instance
+ WHERE
+ (l.label_key, l.label_value) IN
+
+ (#{label.labelKey}, #{label.stringValue})
@@ -366,7 +349,18 @@
lr.resource_id = r.id
-
+
+ SELECT
+ r.*, l.label_key, l.label_value
+ FROM linkis_cg_manager_label l
+ JOIN linkis_cg_manager_label_resource lr ON l.id = lr.label_id
+ JOIN linkis_cg_manager_linkis_resources r ON lr.resource_id = r.id
+ WHERE l.label_value IN
+
+ #{value}
+
+
delete r from linkis_cg_manager_linkis_resources r
diff --git a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/NodeManagerMapper.xml b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/NodeManagerMapper.xml
index 3d7782c21f1..bfdeb545854 100644
--- a/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/NodeManagerMapper.xml
+++ b/linkis-computation-governance/linkis-manager/linkis-manager-persistence/src/main/resources/mapper/common/NodeManagerMapper.xml
@@ -72,6 +72,9 @@
mapping_host = #{persistenceNode.mappingHost},
+
+ params = #{persistenceNode.params},
+
WHERE instance = #{instance}
diff --git a/linkis-dist/bin/install-linkis-to-kubernetes.sh b/linkis-dist/bin/install-linkis-to-kubernetes.sh
index 00681b27b94..a10519342b1 100644
--- a/linkis-dist/bin/install-linkis-to-kubernetes.sh
+++ b/linkis-dist/bin/install-linkis-to-kubernetes.sh
@@ -86,6 +86,8 @@ tag(){
make_linkis_image_with_mysql_jdbc(){
${ROOT_DIR}/docker/scripts/make-linkis-image-with-mysql-jdbc.sh
docker tag linkis:with-jdbc linkis:dev
+ ${ROOT_DIR}/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
+ docker tag linkis-ldh:with-jdbc linkis-ldh:dev
}
#creating a kind cluster
create_kind_cluster(){
diff --git a/linkis-dist/docker/ldh-with-mysql-jdbc.Dockerfile b/linkis-dist/docker/ldh-with-mysql-jdbc.Dockerfile
new file mode 100644
index 00000000000..515ce3e32cf
--- /dev/null
+++ b/linkis-dist/docker/ldh-with-mysql-jdbc.Dockerfile
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ARG LINKIS_IMAGE=linkis-ldh:dev
+
+######################################################################
+# linkis-ldh image with mysql jdbc
+######################################################################
+FROM ${LINKIS_IMAGE}
+
+ARG LDH_HOME=/opt/ldh/current
+ARG MYSQL_JDBC_VERSION=8.0.28
+
+COPY mysql-connector-java-${MYSQL_JDBC_VERSION}.jar ${LDH_HOME}/hive/lib/
+COPY mysql-connector-java-${MYSQL_JDBC_VERSION}.jar ${LDH_HOME}/spark/lib/
diff --git a/linkis-dist/docker/ldh.Dockerfile b/linkis-dist/docker/ldh.Dockerfile
index 8a1d64abce2..5c4683ddb6f 100644
--- a/linkis-dist/docker/ldh.Dockerfile
+++ b/linkis-dist/docker/ldh.Dockerfile
@@ -75,6 +75,10 @@ ADD ldh-tars/spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION}.tgz /opt/l
ADD ldh-tars/flink-${FLINK_VERSION}-bin-scala_2.11.tgz /opt/ldh/${LINKIS_VERSION}/
ADD ldh-tars/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz /opt/ldh/${LINKIS_VERSION}/
+RUN ln -s /opt/ldh/${LINKIS_VERSION}/spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION} /opt/ldh/current/spark \
+ && ln -s /opt/ldh/${LINKIS_VERSION}/hadoop-${HADOOP_VERSION} /opt/ldh/current/hadoop \
+ && ln -s /opt/ldh/${LINKIS_VERSION}/apache-hive-${HIVE_VERSION}-bin /opt/ldh/current/hive
+
RUN mkdir -p /etc/ldh \
&& mkdir -p /var/log/hadoop && chmod 777 -R /var/log/hadoop \
&& mkdir -p /var/log/hive && chmod 777 -R /var/log/hive \
@@ -91,9 +95,10 @@ RUN mkdir -p /etc/ldh \
#ADD ldh-tars/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar /opt/ldh/current/hive/lib/
#ADD ldh-tars/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar /opt/ldh/current/spark/jars/
-ENV JAVA_HOME /etc/alternatives/jre
-ENV PATH /opt/ldh/current/hadoop/bin:/opt/ldh/current/hive/bin:/opt/ldh/current/spark/bin:/opt/ldh/current/flink/bin:/opt/ldh/current/zookeeper/bin:$PATH
+ENV JAVA_HOME=/etc/alternatives/jre
+ENV PATH=/opt/ldh/current/hadoop/bin:/opt/ldh/current/hive/bin:/opt/ldh/current/spark/bin:/opt/ldh/current/flink/bin:/opt/ldh/current/zookeeper/bin:$PATH
ENV HADOOP_CONF_DIR=/etc/ldh/hadoop
+ENV YARN_CONF_DIR=/etc/ldh/hadoop
ENV HIVE_CONF_DIR=/etc/ldh/hive
ENV SPARK_CONF_DIR=/etc/ldh/spark
ENV FLINK_CONF_DIR=/etc/ldh/flink
diff --git a/linkis-dist/docker/scripts/make-ldh-image-with-mysql-jdbc.sh b/linkis-dist/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
new file mode 100755
index 00000000000..6b91cddf201
--- /dev/null
+++ b/linkis-dist/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+WORK_DIR=`cd $(dirname $0); pwd -P`
+
+. ${WORK_DIR}/utils.sh
+
+IMAGE_NAME=${IMAGE_NAME:-linkis-ldh:with-jdbc}
+LINKIS_IMAGE=${LINKIS_IMAGE:-linkis-ldh:dev}
+LINKIS_HOME=${LINKIS_HOME:-/opt/ldh/current}
+MYSQL_JDBC_VERSION=${MYSQL_JDBC_VERSION:-8.0.28}
+MYSQL_JDBC_FILENAME=mysql-connector-java-${MYSQL_JDBC_VERSION}.jar
+MYSQL_JDBC_URL="https://repo1.maven.org/maven2/mysql/mysql-connector-java/${MYSQL_JDBC_VERSION}/${MYSQL_JDBC_FILENAME}"
+
+BUILD_DIR=`mktemp -d -t linkis-build-XXXXX`
+
+echo "# build dir: ${BUILD_DIR}"
+echo "# base image: ${LINKIS_IMAGE}"
+echo "# mysql jdbc version: ${MYSQL_JDBC_VERSION}"
+
+download ${MYSQL_JDBC_URL} ${MYSQL_JDBC_FILENAME} ${BUILD_DIR}
+
+echo "try to exec: docker build -f ${WORK_DIR}/../ldh-with-mysql-jdbc.Dockerfile \
+ -t ${IMAGE_NAME} \
+ --build-arg LINKIS_IMAGE=${LINKIS_IMAGE} \
+ --build-arg LINKIS_HOME=${LINKIS_HOME} \
+ --build-arg MYSQL_JDBC_VERSION=${MYSQL_JDBC_VERSION} \
+ ${BUILD_DIR}"
+
+docker build -f ${WORK_DIR}/../ldh-with-mysql-jdbc.Dockerfile \
+ -t ${IMAGE_NAME} \
+ --build-arg LINKIS_IMAGE=${LINKIS_IMAGE} \
+ --build-arg LINKIS_HOME=${LINKIS_HOME} \
+ --build-arg MYSQL_JDBC_VERSION=${MYSQL_JDBC_VERSION} \
+ ${BUILD_DIR}
+
+echo "# done, image: ${IMAGE_NAME}"
diff --git a/linkis-dist/helm/charts/linkis/templates/configmap-linkis-config.yaml b/linkis-dist/helm/charts/linkis/templates/configmap-linkis-config.yaml
index e7042d0089a..e802735b601 100644
--- a/linkis-dist/helm/charts/linkis/templates/configmap-linkis-config.yaml
+++ b/linkis-dist/helm/charts/linkis/templates/configmap-linkis-config.yaml
@@ -126,7 +126,9 @@ data:
spring:
application:
name: linkis-mg-eureka
- profiles: eureka
+ config:
+ activate:
+ on-profile: eureka
cloud:
loadbalancer:
cache:
@@ -212,7 +214,7 @@ data:
wds.linkis.client.common.gatewayUrl={{- include "linkis.gateway.url" . }}
wds.linkis.client.common.authStrategy=token
wds.linkis.client.common.tokenKey=Validation-Code
- wds.linkis.client.common.tokenValue=LINKIS-UNAVAILABLE-TOKE
+ wds.linkis.client.common.tokenValue=LINKIS-UNAVAILABLE-TOKEN
spring.spring.mvc.pathmatch.matching-strategy=ant_path_matcher
spring.spring.cloud.loadbalancer.cache.enabled=false
springfox.documentation.enabled=false
diff --git a/linkis-dist/helm/charts/linkis/templates/jobs.yaml b/linkis-dist/helm/charts/linkis/templates/jobs.yaml
index 38d97ee2e29..5daebb04add 100644
--- a/linkis-dist/helm/charts/linkis/templates/jobs.yaml
+++ b/linkis-dist/helm/charts/linkis/templates/jobs.yaml
@@ -28,7 +28,16 @@ spec:
command:
- /bin/bash
- -ecx
- - >-
+ - |-
+ sed -i 's#@YARN_RESTFUL_URL#{{ .Values.linkis.deps.yarn.restfulUrl }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@HADOOP_VERSION#{{ .Values.linkis.deps.hadoop.version }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_AUTH_ENABLE#{{ .Values.linkis.deps.yarn.authEnable }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_AUTH_USER#{{ .Values.linkis.deps.yarn.authUser }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_AUTH_PWD#{{ .Values.linkis.deps.yarn.authPassword }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_KERBEROS_ENABLE#{{ .Values.linkis.deps.yarn.kerberosEnable }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_PRINCIPAL_NAME#{{ .Values.linkis.deps.yarn.principal }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_KEYTAB_PATH#{{ .Values.linkis.deps.yarn.keytab }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+ sed -i 's#@YARN_KRB5_PATH#{{ .Values.linkis.deps.yarn.krb5 }}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
mysql -h{{ .Values.linkis.datasource.host }} -P{{ .Values.linkis.datasource.port }} -u{{ .Values.linkis.datasource.username }} -p{{ .Values.linkis.datasource.password }} --default-character-set=utf8 -e "CREATE DATABASE IF NOT EXISTS {{ .Values.linkis.datasource.database }} DEFAULT CHARSET utf8 COLLATE utf8_general_ci";
mysql -h{{ .Values.linkis.datasource.host }} -P{{ .Values.linkis.datasource.port }} -u{{ .Values.linkis.datasource.username }} -p{{ .Values.linkis.datasource.password }} -D{{ .Values.linkis.datasource.database }} --default-character-set=utf8 -e "source {{ .Values.linkis.locations.homeDir }}/db//linkis_ddl.sql";
mysql -h{{ .Values.linkis.datasource.host }} -P{{ .Values.linkis.datasource.port }} -u{{ .Values.linkis.datasource.username }} -p{{ .Values.linkis.datasource.password }} -D{{ .Values.linkis.datasource.database }} --default-character-set=utf8 -e "source {{ .Values.linkis.locations.homeDir }}/db//linkis_dml.sql"
diff --git a/linkis-dist/helm/scripts/install-ldh.sh b/linkis-dist/helm/scripts/install-ldh.sh
index 3ada87befdb..74d1960f5f8 100755
--- a/linkis-dist/helm/scripts/install-ldh.sh
+++ b/linkis-dist/helm/scripts/install-ldh.sh
@@ -16,7 +16,7 @@
#
WORK_DIR=`cd $(dirname $0); pwd -P`
-
+ROOT_DIR=${WORK_DIR}/../..
. ${WORK_DIR}/common.sh
set -e
@@ -27,6 +27,9 @@ echo "# LDH version: ${LINKIS_IMAGE_TAG}"
# load image
if [[ "X${USING_KIND}" == "Xtrue" ]]; then
+ echo "# Preparing LDH image ..."
+ ${ROOT_DIR}/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
+ docker tag linkis-ldh:with-jdbc linkis-ldh:dev
echo "# Loading LDH image ..."
kind load docker-image linkis-ldh:${LINKIS_IMAGE_TAG} --name ${KIND_CLUSTER_NAME}
fi
diff --git a/linkis-dist/helm/scripts/prepare-for-spark.sh b/linkis-dist/helm/scripts/prepare-for-spark.sh
index 5b2b35a8245..8519e0bdee7 100644
--- a/linkis-dist/helm/scripts/prepare-for-spark.sh
+++ b/linkis-dist/helm/scripts/prepare-for-spark.sh
@@ -21,22 +21,16 @@ WORK_DIR=`cd $(dirname $0); pwd -P`
## copy spark resource from ldh to linkis-cg-engineconnmanager
-LDH_POD_NAME=`kubectl get pods -n ldh -l app=ldh -o jsonpath='{.items[0].metadata.name}'`
-kubectl cp -n ldh ${LDH_POD_NAME}:/opt/ldh/ ./ldh
-
+LDH_POD_NAME=`kubectl get pods -n ldh -l app=ldh -o jsonpath='{.items[0].metadata.name}'`
ECM_POD_NAME=`kubectl get pods -n linkis -l app.kubernetes.io/instance=linkis-demo-cg-engineconnmanager -o jsonpath='{.items[0].metadata.name}'`
-kubectl cp ./ldh -n linkis ${ECM_POD_NAME}:/opt/ ;
-
-
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "chmod +x /opt/ldh/1.3.0/spark-3.2.1-bin-hadoop3.2/bin/*"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "ln -s /opt/ldh/1.3.0/spark-3.2.1-bin-hadoop3.2 /opt/ldh/current/spark"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "ln -s /opt/ldh/1.3.0/hadoop-3.3.4 /opt/ldh/current/hadoop"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "ln -s /opt/ldh/1.3.0/apache-hive-3.1.3-bin /opt/ldh/current/hive"
+kubectl exec -n ldh ${LDH_POD_NAME} -- tar -C /opt -cf - ldh | \
+kubectl exec -i -n linkis ${ECM_POD_NAME} -- tar -C /opt -xf - --no-same-owner
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export SPARK_HOME=/opt/ldh/current/spark' |sudo tee --append /etc/profile"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export PATH=\$SPARK_HOME/bin:\$PATH' |sudo tee --append /etc/profile"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "source /etc/profile"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "sudo mkdir -p /appcom/Install && sudo chmod 0777 /appcom/Install && ln -s /opt/ldh/current/spark /appcom/Install/spark"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export SPARK_HOME=/opt/ldh/current/spark' |sudo tee --append /etc/profile"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export PATH=\$SPARK_HOME/bin:\$PATH' |sudo tee --append /etc/profile"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "source /etc/profile"
# add ecm dns for ldh pod
ECM_POD_IP=`kubectl get pods -n linkis -l app.kubernetes.io/instance=linkis-demo-cg-engineconnmanager -o jsonpath='{.items[0].status.podIP}'`
@@ -45,7 +39,4 @@ ECM_POD_SUBDOMAIN=`kubectl get pods -n linkis -l app.kubernetes.io/instance=link
ECM_DNS="${ECM_POD_IP} ${ECM_POD_NAME}.${ECM_POD_SUBDOMAIN}.linkis.svc.cluster.local"
-kubectl exec -it -n ldh ${LDH_POD_NAME} -- bash -c "echo ${ECM_DNS} |sudo tee --append /etc/hosts"
-
-
-rm -rf ldh;
\ No newline at end of file
+kubectl exec -n ldh ${LDH_POD_NAME} -- bash -c "echo ${ECM_DNS} |sudo tee --append /etc/hosts"
diff --git a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
index fa74a304c9d..904e88bc3ff 100644
--- a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
@@ -591,4 +591,127 @@ data:
*
-
\ No newline at end of file
+
+ capacity-scheduler.xml: |
+
+
+
+
+ yarn.scheduler.capacity.maximum-applications
+ 4
+
+ Maximum number of applications that can be pending and running.
+
+
+
+
+ yarn.scheduler.capacity.maximum-am-resource-percent
+ 0.5
+
+ Maximum percent of resources in the cluster which can be used to run
+ application masters i.e. controls number of concurrent running
+ applications.
+
+
+
+
+ yarn.scheduler.capacity.resource-calculator
+ org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator
+
+ The ResourceCalculator implementation to be used to compare
+ Resources in the scheduler.
+ The default i.e. DefaultResourceCalculator only uses Memory while
+ DominantResourceCalculator uses dominant-resource to compare
+ multi-dimensional resources such as Memory, CPU etc.
+
+
+
+
+ yarn.scheduler.capacity.root.queues
+ default
+
+ The queues at the this level (root is the root queue).
+
+
+
+
+ yarn.scheduler.capacity.root.default.capacity
+ 100
+ Default queue target capacity.
+
+
+
+ yarn.scheduler.capacity.root.default.user-limit-factor
+ 1
+
+ Default queue user limit a percentage from 0.0 to 1.0.
+
+
+
+
+ yarn.scheduler.capacity.root.default.maximum-capacity
+ 100
+
+ The maximum capacity of the default queue.
+
+
+
+
+ yarn.scheduler.capacity.root.default.state
+ RUNNING
+
+ The state of the default queue. State can be one of RUNNING or STOPPED.
+
+
+
+
+ yarn.scheduler.capacity.root.default.acl_submit_applications
+ *
+
+ The ACL of who can submit jobs to the default queue.
+
+
+
+
+ yarn.scheduler.capacity.root.default.acl_administer_queue
+ *
+
+ The ACL of who can administer jobs on the default queue.
+
+
+
+
+ yarn.scheduler.capacity.node-locality-delay
+ 40
+
+ Number of missed scheduling opportunities after which the CapacityScheduler
+ attempts to schedule rack-local containers.
+ Typically this should be set to number of nodes in the cluster, By default is setting
+ approximately number of nodes in one rack which is 40.
+
+
+
+
+ yarn.scheduler.capacity.queue-mappings-override.enable
+ false
+
+ If a queue mapping is present, will it override the value specified
+ by the user? This can be used by administrators to place jobs in queues
+ that are different than the one specified by the user.
+ The default is false.
+
+
+
+
diff --git a/linkis-dist/package/admin/clear_bml_resources_task.sh b/linkis-dist/package/admin/clear_bml_resources_task.sh
new file mode 100644
index 00000000000..9a9814821dd
--- /dev/null
+++ b/linkis-dist/package/admin/clear_bml_resources_task.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# description: clear linkis_ps_job_history_group_history 3 month record
+#
+if [ -f ${LINKIS_CONF_DIR}/db.sh ]
+then
+ export LINKIS_DB_CONFIG_PATH=${LINKIS_CONF_DIR}/db.sh
+else
+ if [ -f ${LINKIS_HOME}/conf/db.sh ]
+ then
+ export LINKIS_DB_CONFIG_PATH=${LINKIS_HOME}/conf/db.sh
+ else
+ echo "can not find db.sh"
+ exit
+ fi
+fi
+source ${LINKIS_DB_CONFIG_PATH}
+MYSQL_EXEC_CMD="mysqlsec --dpmc $LINKIS_DB_CONFIG_PATH -h$MYSQL_HOST -P$MYSQL_PORT $MYSQL_DB -ss -e "
+if [ "$is_mysqlsec" == "false" ]; then
+ echo "使用 mysql 进行数据处理"
+ MYSQL_EXEC_CMD="mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DB -ss -e "
+fi
+echo "start to delete linkis_ps_bml_resources_task version data"
+
+#查询版本信息大于10的resouce_id,并进行遍历处理
+$MYSQL_EXEC_CMD"SELECT resource_id FROM (SELECT resource_id, COUNT(version) AS version_num FROM linkis_ps_bml_resources_task GROUP BY resource_id) a WHERE a.version_num > 10" | while IFS=$'\t' read -r resource_id; do
+ #统计resource_id版本数量
+ total_count=$($MYSQL_EXEC_CMD"SELECT COUNT(*) FROM linkis_ps_bml_resources_task WHERE resource_id = \"$resource_id\"")
+ while [ $total_count -gt 10 ];do
+ echo "Resource_id: $resource_id, Total count: $total_count"
+ #拼接需要删除的id
+ ids_to_delete=$($MYSQL_EXEC_CMD"SELECT GROUP_CONCAT(id) FROM (SELECT id FROM linkis_ps_bml_resources_task WHERE resource_id = \"$resource_id\" ORDER BY version DESC LIMIT 5000 OFFSET 10) t")
+ echo "$resource_id,will to delete: $ids_to_delete"
+ if [ -n "$ids_to_delete" ]; then
+ #执行删除操作
+ $MYSQL_EXEC_CMD"DELETE FROM linkis_ps_bml_resources_task WHERE resource_id = \"$resource_id\" AND id IN ($ids_to_delete)"
+ echo "($ids_to_delete) delete over"
+ #更新resource_id版本数量
+ total_count=$($MYSQL_EXEC_CMD"SELECT COUNT(*) FROM linkis_ps_bml_resources_task WHERE resource_id = \"$resource_id\"")
+ echo "Resource_id: $resource_id, left count: $total_count"
+ fi
+ done
+done
diff --git a/linkis-dist/package/admin/clear_history_task.sh b/linkis-dist/package/admin/clear_history_task.sh
index 75c49cb7151..c864b3255e4 100644
--- a/linkis-dist/package/admin/clear_history_task.sh
+++ b/linkis-dist/package/admin/clear_history_task.sh
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
#
# description: clear linkis_ps_job_history_group_history 3 month record
#
@@ -30,20 +29,44 @@ else
fi
fi
source ${LINKIS_DB_CONFIG_PATH}
+delete_day_num=90
+if [[ $1 =~ ^[0-9]+$ ]]; then
+ if [ $1 -gt 0 ]; then
+ delete_day_num=$1
+ fi
+fi
-delete_day=`date -d "-90 days" "+%Y-%m-%d"`
+MYSQL_EXEC_CMD="mysqlsec --dpmc $LINKIS_DB_CONFIG_PATH -h$MYSQL_HOST -P$MYSQL_PORT $MYSQL_DB -ss -e "
+if [ "$is_mysqlsec" == "false" ]; then
+ echo "使用 mysql 进行数据处理"
+ MYSQL_EXEC_CMD="mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DB -ss -e "
+fi
+delete_day=`date -d "-$delete_day_num days" "+%Y-%m-%d"`
delte_time="$delete_day 00:00:00"
echo "start to delete linkis_ps_job_history_group_history before $delte_time"
parm="created_time <=\"$delte_time\" "
-count=`mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DB -ss -e "SELECT count(1) FROM linkis_ps_job_history_group_history where $parm limit 1 "`
-maxid=`mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DB -ss -e "SELECT MAX(id) FROM linkis_ps_job_history_group_history where $parm limit 1 "`
-echo "will delete count:$count"
-echo "maxid:$maxid"
+count=$($MYSQL_EXEC_CMD "SELECT count(1) FROM linkis_ps_job_history_group_history where $parm limit 1" 2>&1)
+if [ $? -ne 0 ]; then
+ echo "执行 count 查询出错,错误信息为: $count"
+else
+ echo "count 查询执行成功,结果为: $count"
+fi
+
+maxid=$($MYSQL_EXEC_CMD "SELECT MAX(id) FROM linkis_ps_job_history_group_history where $parm limit 1" 2>&1)
+if [ $? -ne 0 ]; then
+ echo "执行 maxid 查询出错,错误信息为: $maxid"
+else
+ echo "maxid 查询执行成功,结果为: $maxid"
+fi
-while [ $count -gt 1 ];do
- mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DB -ss -e "DELETE FROM linkis_ps_job_history_group_history where id <= $maxid limit 5000;"
- count=`mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DB -ss -e "SELECT count(1) FROM linkis_ps_job_history_group_history where $parm limit 1 "`
- echo "count change : $count"
- sleep 1s
+while [ $count -gt 0 ];do
+ $MYSQL_EXEC_CMD "DELETE FROM linkis_ps_job_history_group_history where id <= $maxid limit 5000;"
+ count=$($MYSQL_EXEC_CMD "SELECT count(1) FROM linkis_ps_job_history_group_history where $parm limit 1" 2>&1)
+ if [ $? -ne 0 ]; then
+ echo "执行 count 查询出错,错误信息为: $count"
+ else
+ echo "删除成功,剩余数量: $count"
+ fi
+ sleep 1s
done
\ No newline at end of file
diff --git a/linkis-dist/package/admin/linkis_task_archive.sh b/linkis-dist/package/admin/linkis_task_archive.sh
new file mode 100644
index 00000000000..978931b7150
--- /dev/null
+++ b/linkis-dist/package/admin/linkis_task_archive.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#脚本使用说明:
+#HDFS文件 LOG/RESULT 目录递归
+#递归规则:
+#1.目录:
+#/appcom/logs/linkis/log/2023-11-27 -> /appcom/logs/linkis/archive/log/2023-11-27//LINKISCLI.har
+#/appcom/logs/linkis/result/2023-11-27 -> /appcom/logs/linkis/archive/result/2023-11-27//LINKISCLI.har
+#2.优先递归DOPS(一周之前所有数据)
+#3.递归其他应用数据(两个月之前所有数据)
+#4.2025-03 优化成直接删除不执行归档,保留三个月数据即可
+
+HDFS_BASE_DIR="/appcom/logs/linkis"
+#HDFS_CHECK_DIR="${HDFS_BASE_DIR}/archive"
+## 检查目录是否存在
+#hadoop fs -test -d "${HDFS_CHECK_DIR}"
+## 获取检查结果的返回码
+#CHECK_RESULT=$?
+#if [ ${CHECK_RESULT} -eq 0 ]; then
+# echo "目录已存在: ${HDFS_CHECK_DIR}"
+#else
+# echo "目录不存在,正在创建: ${HDFS_CHECK_DIR}"
+# hadoop fs -mkdir -p "${HDFS_CHECK_DIR}"
+#fi
+
+#LINKIS
+#清理log
+echo "开始扫描 Linkis Log 目录(超过三个月的目录将被自动清理)"
+LOG_DATE_LIST=$(hadoop fs -ls $HDFS_BASE_DIR/log/ | awk '{print $8}'| awk -F'/' '{print $NF}'| awk -v cutoff_date=$(date -d "3 months ago" "+%Y-%m-%d") '$1 < cutoff_date')
+for LOG_DATE in $LOG_DATE_LIST
+do
+ echo "目录超三个月,删除log目录:$LOG_DATE"
+ hadoop fs -rm -r $HDFS_BASE_DIR/log/$LOG_DATE
+done
+
+#清理result
+echo "开始扫描 Linkis Result 目录(超过三个月的目录将被自动清理)"
+RESULT_DATE_LIST=$(hadoop fs -ls $HDFS_BASE_DIR/result/ | awk '{print $8}'| awk -F'/' '{print $NF}'| awk -v cutoff_date=$(date -d "3 months ago" "+%Y-%m-%d") '$1 < cutoff_date')
+for RESULT_DATE in $RESULT_DATE_LIST
+do
+ echo "目录超三个月,删除result目录:$RESULT_DATE"
+ hadoop fs -rm -r $HDFS_BASE_DIR/result/$RESULT_DATE
+done
+
+#清理DOPS log
+echo "开始扫描 Dops Log 目录(超过七天的目录将被自动清理)"
+DOPS_LOG_DATE_LIST=$(hadoop fs -ls $HDFS_BASE_DIR/log/ | awk '{print $8}'| awk -F'/' '{print $NF}'| awk -v cutoff_date=$(date -d "7 days ago" "+%Y-%m-%d") '$1 < cutoff_date')
+for LOG_DATE in $DOPS_LOG_DATE_LIST
+do
+ DOPS_LOG_DIR=$(hdfs dfs -ls $HDFS_BASE_DIR/log/$LOG_DATE | grep DOPS | awk '{print $8}')
+ for LOG_DIR in $DOPS_LOG_DIR
+ do
+ echo "DOPS目录超7天,删除log目录:$LOG_DIR"
+ hadoop fs -rm -r $LOG_DIR
+ done
+done
+
+#清理DOPS result
+echo "开始扫描 Dops Result 目录(超过七天的目录将被自动清理)"
+DOPS_RESULT_DATE_LIST=$(hadoop fs -ls $HDFS_BASE_DIR/result/ | awk '{print $8}'| awk -F'/' '{print $NF}'| awk -v cutoff_date=$(date -d "7 days ago" "+%Y-%m-%d") '$1 < cutoff_date')
+for RESULT_DATE in $DOPS_RESULT_DATE_LIST
+do
+ # result目录
+ DOPS_RESULT_DIR=$(hdfs dfs -ls $HDFS_BASE_DIR/result/$RESULT_DATE | grep DOPS | awk '{print $8}')
+ for RESULT_DIR in $DOPS_RESULT_DIR
+ do
+ echo "DOPS目录超7天,删除result目录:$RESULT_DIR"
+ hadoop fs -rm -r $RESULT_DIR
+ done
+done
\ No newline at end of file
diff --git a/linkis-dist/package/admin/linkis_udf_get_python_methods.py b/linkis-dist/package/admin/linkis_udf_get_python_methods.py
new file mode 100644
index 00000000000..b1561a29374
--- /dev/null
+++ b/linkis-dist/package/admin/linkis_udf_get_python_methods.py
@@ -0,0 +1,32 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import sys
+import ast
+import json
+
+def extract_method_names(file_path):
+ with open(file_path, 'r') as file:
+ code = file.read()
+ tree = ast.parse(code)
+ method_names = set()
+
+ for node in ast.walk(tree):
+ if isinstance(node, ast.FunctionDef):
+ method_names.add(node.name)
+
+ return json.dumps(list(method_names), indent=4)
+
+file_path = sys.argv[1]
+print(extract_method_names(file_path))
diff --git a/linkis-dist/package/admin/tools/linkis-analyze.sh b/linkis-dist/package/admin/tools/linkis-analyze.sh
new file mode 100644
index 00000000000..2b91f699f3d
--- /dev/null
+++ b/linkis-dist/package/admin/tools/linkis-analyze.sh
@@ -0,0 +1,601 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source /appcom/Install/linkis/conf/linkis-env.sh
+
+if [ "$LINKIS_LOG_DIR" = "" ]; then
+ export LINKIS_LOG_DIR="/data/logs/bdpe-ujes"
+fi
+# Set LINKIS_HOME
+if [ -z "$LINKIS_HOME" ]; then
+ export LINKIS_HOME="$INSTALL_HOME"
+fi
+
+# Set LINKIS_CONF_DIR
+if [ -z "$LINKIS_CONF_DIR" ]; then
+ export LINKIS_CONF_DIR="$LINKIS_HOME/conf"
+fi
+
+# Read configuration
+linkisMainConf="$LINKIS_CONF_DIR/linkis.properties"
+
+linkisLogToolPath="$LINKIS_HOME/admin/tools/linkis-log-tool.sh"
+# 任务id
+job_id=$1
+task_path="$LINKIS_LOG_DIR/task"
+json_path="$LINKIS_LOG_DIR/task/json"
+gateway_url=$(grep wds.linkis.gateway.url $linkisMainConf | cut -d"=" -f2)
+doctor_token=$(grep linkis.doctor.signature.token $linkisMainConf | cut -d"=" -f2)
+doctor_url=$(grep linkis.doctor.url $linkisMainConf | cut -d"=" -f2)
+system_name=$(grep linkis.system.name $linkisMainConf | cut -d"=" -f2)
+# 定义全局历史接口url
+export task_list_result_url="$gateway_url/api/rest_j/v1/jobhistory/list?taskID=$job_id&pageNow=1&pageSize=50&isAdminView=true"
+
+# 引擎常量(用于判断是否已经提交到引擎)
+submit_engine_constants="Task submit to ec"
+# 日志路径(用于查看ec端日志)
+engine_local_log_path="EngineConn local log path"
+# 链路日志路径
+link_log_path="$LINKIS_LOG_DIR"
+# 链路日志前缀
+linkis_cg="linkis-cg-"
+hive_contact="冯朝阁"
+spark_contact="冯朝阁"
+jdbc_contact="邵宇"
+engine_type=""
+# linkis token
+token=$(grep wds.linkis.token $linkisMainConf | cut -d"=" -f2)
+
+if [ ! -d $task_path ]; then
+ mkdir $task_path
+fi
+
+if [ ! -d $json_path ]; then
+ mkdir $json_path
+fi
+
+# 根据key获取值
+function getValueForTasks() {
+ value=$(echo "$1" | jq -r ".data.tasks[0].$2")
+ echo "$value"
+}
+
+function check_tasks_stauts() {
+ code=$(echo "$task_list_result" | jq -r ".status")
+ if [ "$code" -ne 0 ]; then # 判断code是否等于0
+ echo "获取日志失败,无法执行诊断"
+ exit 0
+ fi
+}
+
+function getValueForDetail() {
+ echo $(echo $1 | python -c "import sys, json; print (json.load(sys.stdin)['data'])")
+}
+
+function getValueForDoctor() {
+ echo $(echo $1 | python -c "import sys, json; print (json.load(sys.stdin)['$2'])")
+}
+
+function getDoctorReport() {
+ code=$(echo $1 | jq -r ".code" 2>/dev/null || echo "0")
+ if [ "$code" -eq 200 ]; then # 判断code是否等于200
+ echo $(echo $1| jq -r ".data.$2[] | select(.conclusion != null and .conclusion.conclusion != \"未检测到异常\" and .conclusion.conclusion != \"运行过程发生错误异常,请根据关键日志和相应的诊断建议进行问题修改\" and .conclusion.conclusion != \"No exception detected\")")
+ else
+ echo "调用Doctoris失败,无法执行Doctoris诊断,异常日志信息:$1"
+ exit 0
+ fi
+}
+
+function print_color_green(){
+ echo -e "\e[32m $1 \e[0m"$2
+}
+
+function print_color_red(){
+ echo -e "\e[31m $1 \e[0m"$2
+}
+
+function print_color_black(){
+ echo -e "\e[30m $1 \e[0m"$2
+}
+
+function getApplcationId() {
+ echo $(cat "$task_path/json/$job_id"_detail.json | grep -oP 'application_\d+_\d+'| tail -n 1)
+}
+
+
+# 全局历史任务列表接口
+function get_job_list_by_id() {
+ # 将根据任务id查询出来的结果保存起来
+ result=$(curl -s -X GET --header "Token-Code: $token" --header "Token-User: hadoop" --header 'Accept: application/json' $task_list_result_url)
+ echo "$result"
+}
+
+# 日志明细查询
+function get_job_detail_log() {
+ log_path=$(getValueForTasks "$1" logPath)
+ # 定义openLog接口url
+ open_log_url="$gateway_url/api/rest_j/v1/filesystem/openLog?path=$log_path"
+ # 调用openLog接口
+ echo $(curl -s -X GET --header "Token-Code: $token" --header "Token-User: hadoop" --header 'Accept: application/json' $open_log_url)
+}
+
+function get_log_info(){
+ echo $(cat $task_path"/json/$job_id"_detail.json | grep -a "$1"| uniq | tail -n 1)
+}
+
+function get_time_info(){
+ echo $(echo "$1"| grep -oP '^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}'| uniq | tail -n 1)
+}
+
+function get_instance_info(){
+ echo $(echo "$1" | grep -oP 'ServiceInstance\((.*?)(?=\))\)'| uniq | tail -n 1)
+}
+
+function get_doctoris_signature(){
+ doctor_signature_map=$(java -cp $LINKIS_HOME/lib/linkis-commons/public-module/linkis-common-1.10.0-wds.jar:$LINKIS_HOME/lib/linkis-commons/public-module/* org.apache.linkis.common.utils.SHAUtils $1 $system_name $doctor_token)
+ echo $(echo "$doctor_signature_map")
+}
+
+# doctor诊断接口
+function get_doctor_diagnose() {
+ # 将根据任务id查询出来的结果保存起来
+ task_list_result=$(get_doctoris_signature "$1")
+ # 提取 applicationId
+ nonce=$(echo "$task_list_result" | grep -oP 'nonce=(.*?),' | sed 's/nonce=//'| sed 's/,/ /g'| xargs)
+ appId=$(echo "$task_list_result" | grep -oP 'app_id=(.*?),' | sed 's/app_id=//'| sed 's/,/ /g'| xargs)
+ timestamp=$(echo "$task_list_result" | grep -oP 'timestamp=(.*$)' | sed 's/timestamp=//'| sed 's/}/ /g'| xargs)
+ signature=$(echo "$task_list_result" | grep -oP 'signature=(.*?),' | sed 's/signature=//'| sed 's/,/ /g'| xargs)
+ doctor_request="$doctor_url/api/v1/external/diagnose/offline/batchApp?applicationId=$1&app_id=$appId×tamp=$timestamp&nonce=$nonce&signature=$signature"
+ doctor_result=$(curl -s -X GET --header 'Accept: application/json' "$doctor_request")
+ doctor_report_url="$doctor_url/api/v1/external/diagnose/offline/report?applicationId=$1&app_id=$appId×tamp=$timestamp&nonce=$nonce&signature=$signature"
+ doctor_report_result=$(curl -s -X GET --header 'Accept: application/json' "$doctor_report_url")
+ resourcesAnalyze=$(getDoctorReport "$doctor_report_result" "resourcesAnalyze")
+ runErrorAnalyze=$(getDoctorReport "$doctor_report_result" "runErrorAnalyze")
+ runTimeAnalyze=$(getDoctorReport "$doctor_report_result" "runTimeAnalyze")
+ print_color_black "ResourcesAnalyze诊断结果:$(if [ -z "$resourcesAnalyze" ]; then echo "无异常"; else echo "$resourcesAnalyze"; fi)"
+ print_color_black "RunErrorAnalyze诊断结果:$(if [ -z "$runErrorAnalyze" ]; then echo "无异常"; else echo "$runErrorAnalyze"; fi)"
+ print_color_black "RunTimeAnalyze诊断结果:$(if [ -z "$runTimeAnalyze" ]; then echo "无异常"; else echo "$runTimeAnalyze"; fi)"
+}
+
+# -------------------------------------------------------------提供一些帮助查看命令-------------------------------------------------------
+help() {
+ echo "<-----------------------下面是一些简单命令------------------------------------>"
+ echo "NAME"
+ echo " linkis log query tool"
+ echo ""
+ echo " linkis-analyze -job jobid info, 查看job 的基础信息 (请求参数 日志目录 等)"
+ echo ""
+ echo " linkis-analyze -job jobid info, 查看job相关的日志"
+ echo ""
+ echo " linkis-analyze -job jobid ecinfo, 查看job ec日志"
+ exit 1
+}
+
+# 查看job 的基础信息 (请求参数 日志目录 等)
+info() {
+ #
+ echo "【日志目录说明】"
+ echo "【错误日志】$task_path/error/JobId-"$1"_error.log"
+ echo "【明细接口返回】$task_path/json/JobId-"$1"_detail.log"
+ echo "【链路日志】$task_path/link/JobId-"$1"_entrance.log"
+ echo "【链路日志】$task_path/link/JobId-"$1"_linkismanager.log"
+ echo "【ec日志】$task_path/ec/JobId-"$1"_engine.log"
+}
+
+# 查看job相关的日志
+log() {
+ echo "【job相关的日志】"
+}
+
+# 查看job ec日志
+eclog() {
+ echo "【ec日志】"
+ job_id=$1
+ task_list_result_url="$gateway_url/api/rest_j/v1/jobhistory/list?taskID=$1&pageNow=1&pageSize=50&isAdminView=false"
+ task_list_result_ec=$(echo $(curl -X GET --header "Token-Code: $token" --header "Token-User: hadoop" --header 'Accept: application/json' $task_list_result_url))
+ instance_ec=$(getValueForTasks "$task_list_result_ec" instance)
+ instance_arr_ec=(${instance//:/ })
+ open_log_result_ec=$(get_job_detail_log "$task_list_result_ec")
+ log_detail_ec=$(getValueForDetail "$open_log_result_ec" log)
+ echo -e "$log_detail_ec" >$task_path"/json/JobId-"$job_id"_detail.log"
+
+ local_log_path=$(cat $task_path"/json/JobId-$job_id"_detail.log | grep "$engine_local_log_path")
+ local_log_path_arr=(${local_log_path//:/ })
+ thirdToLastIndex=$((${#local_log_path_arr[@]} - 3))
+ server_name=${local_log_path_arr[thirdToLastIndex]}
+ lastIndex=$((${#local_log_path_arr[@]} - 1))
+ log_path=${local_log_path_arr[lastIndex]}
+
+ echo "【ec服务地址】"$server_name
+ echo "【ec日志路径】"$log_path
+}
+
+option() {
+ while [ -n "$1" ]; do
+ case $1 in
+ -h)
+ help
+ break
+ ;;
+ -help)
+ help
+ break
+ ;;
+ -job)
+ if [ $3 == "info" ]; then
+ info $2
+ elif [ $3 == "log" ]; then
+ log $2
+ elif [ $3 == "eclog" ]; then
+ eclog $2
+ elif [ $3 == "h" ]; then
+ help
+ elif [ $3 == "help" ]; then
+ help
+ else
+ echo $3": unknow command"
+ fi
+ break
+ ;;
+ *)
+ echo $1": unknow option"
+ break
+ ;;
+ esac
+ done
+}
+
+if [ $# -eq 1 ] && [ $1 == "-help" -o $1 == "-h" ]; then
+ option $*
+ exit 1
+fi
+
+if [ $# -eq 3 ]; then
+ option $*
+ exit 1
+fi
+
+# --------------------------------------------------------------------根据jobid拉取日志到本地-------------------------------------------------------
+# 参数检查
+function check() {
+ if [ $# -ne 1 ]; then
+ echo "请输入任务id"
+ exit 1
+ fi
+ # 校验第一个参数合法性,只能是数字
+ expr $1 + 0 &>/dev/null
+ if [ $? -ne 0 ]; then
+ echo "请输入合法的任务id,任务id只能是数字!"
+ exit 1
+ fi
+}
+
+# 检查接口状态
+function check_status() {
+ code=$(getValueForTasks "$task_list_result" status)
+ # 接口返回成功直接返回
+ if [[ $code -ne 'Failed' ]]; then
+ echo "任务已成功执行,不需要分析日志"
+ exit 1
+ elif [[ -z $code ]]; then
+ echo "不存在的任务,请检查下任务id"
+ exit 1
+ fi
+}
+
+function remote_module_log() {
+ module_ip_map=$1
+ for module_name in ${!module_ip_map[@]}; do
+ hostname=${module_ip_map[$module_name]}
+ if [[ $hostname ]]; then
+ # 脚本调用 获取远端链路日志
+ source $linkisLogToolPath $hostname $job_id $link_log_path 1 $module_name
+
+ if [ $? == 1 ]; then
+ exit 0
+ fi
+
+ print_color_green "服务日志所在路径:" "$hostname:$port_id($link_log_path)"
+ print_color_green "获取的完整日志见:" "$task_path/$job_id/$module_name_$server_name".log
+ error_log=$(cat $task_path$job_id/$module_name"_$server_name".log | grep "ERROR")
+ if [[ $error_log ]]; then
+ print_color_green "$module_name异常日志信息如下:"
+ fi
+ print_color_red "$error_log"
+ fi
+ done
+}
+
+# 获取远程EC日志
+function remote_access_to_ec_logs() {
+ # 获取local_log_path
+ local_log_path=$(cat $task_path"/json/$job_id"_detail.json | grep -a "$engine_local_log_path")
+ # 2023-08-12 12:02:32.002 INFO EngineConn local log path: ServiceInstance(linkis-cg-engineconn, gz.xg.bdpdws110001.webank:21715) /data/bdp/linkis/hadoop/20230812/shell/663e2ca0-f5df-42e1-b0b1-34728373eabc/logs 2023-08-12 12:02:32.002 INFO EngineConn local log path: ServiceInstance(linkis-cg-engineconn, gz.xg.bdpdws110001.webank:21715) /data/bdp/linkis/hadoop/20230812/shell/663e2ca0-f5df-42e1-b0b1-34728373eabc/logs
+ local_log_path_arr=(${local_log_path//:/ })
+ # 数据格式见日志工具脚本使用文档:http://docs.weoa.com/docs/Jbve3vgjEN8zCpT2
+ # 获取倒数第三个元素下标
+ thirdToLastIndex=$((${#local_log_path_arr[@]} - 3))
+ # 服务名
+ server_name=${local_log_path_arr[thirdToLastIndex]}
+ port_id_s=${local_log_path_arr[$((${#local_log_path_arr[@]} - 2))]}
+ port_id=${port_id_s%)*}
+ # 最后一个元素下标
+ lastIndex=$((${#local_log_path_arr[@]} - 1))
+ # 日志地址
+ log_path=${local_log_path_arr[lastIndex]}
+ title="步骤3:引擎请求成功,任务提交给底层运行"
+ # 脚本调用 获取远端ec日志
+ source $linkisLogToolPath $server_name $job_id $log_path 0
+ error_log=$(cat $task_path/$job_id/engineconn_"$server_name".log | grep "ERROR")
+ if [[ $error_log ]]; then
+ print_color_red "$title"
+ print_color_red "对应的引擎日志所在路径为:"$server_name:$port_id"($log_path)"
+ print_color_red "异常日志信息如下:"
+ print_color_red "$(cat $task_path/$job_id/engineconn_"$server_name".log | grep "ERROR")"
+ engine_type_prefix=$(echo "$engine_type" | cut -d '-' -f 1)
+ case $engine_type_prefix in
+ hive)
+ contact_user="$hive_contact"
+ ;;
+ spark)
+ contact_user="$spark_contact"
+ ;;
+ jdbc)
+ contact_user="$jdbc_contact"
+ ;;
+ esac
+ print_color_black "可能是您的代码语法错误或者底层执行异常,可以联系$engine_type_prefix运维人员($contact_user)进行处理"
+ else
+ print_color_green "$title"
+ print_color_green "对应的引擎日志所在路径为:" "$server_name:$port_id($log_path)"
+ fi
+ application_id=$(cat "$task_path/json/$job_id"_detail.json | grep -a -oP 'application_\d+_\d+'| tail -n 1)
+ print_color_green "Yarn appid:" "$application_id"
+ if [ -n "$application_id" ]; then
+ # 检查 doctor_token 是否为空
+ if [ -z "$doctor_token" ]; then
+ echo "doctor_token 参数为空,无法执行doctor诊断"
+ exit 0
+ fi
+ # 检查 doctor_url 是否为空
+ if [ -z "$doctor_url" ]; then
+ echo "doctor_url 参数为空,无法执行doctor诊断"
+ exit 0
+ fi
+ print_color_green "Doctoris诊断报告:"
+ get_doctor_diagnose $application_id
+ else
+ print_color_black "Yarn appid为空,不执行Doctoris诊断"
+ fi
+}
+
+# 获取远端链路日志
+function remote_access_to_link_logs() {
+ # 格式:"instance": "bdpujes110003:9205",
+ instance=$(getValueForTasks "$task_list_result" instance)
+ instance_arr=(${instance//:/ })
+ servername=${instance_arr[0]}
+ port_id=${instance_arr[1]}
+
+ # ip: bdpdws110002 ,port: 9101 ,serviceKind: linkis-cg-linkismanager ,ip: bdpujes110003 ,port: 9205 ,serviceKind: linkis-cg-entrance
+ error_log_str=$(cat $task_path/json/$job_id"_detail.json" | grep "ERROR")
+ error_log=${error_log_str/\"/}
+ if [[ $error_log ]]; then
+ ip_array=(${error_log//ip: /})
+ ip_contain=",bdp"
+ ip_arr=()
+ # 构造ip数组:bdpdws110002 bdpujes110003 bdpdws110002 bdpujes110003
+ for i in "${!ip_array[@]}"; do
+ if [[ ${ip_array[i]} =~ $ip_contain ]]; then
+ ip_s1=${ip_array[i]}
+ ip_s2=${ip_s1/\'/}
+ ip_s3=${ip_s2/,/}
+ ip=${ip_s3/\}/}
+ ip_arr[${#ip_arr[*]} + 1]=$ip
+ fi
+ done
+
+ linkis_cg_contain=",linkis-cg-"
+ module_array=(${error_log//serviceKind: /})
+ # 构造模块数组:linkis-cg-linkismanager linkis-cg-entrance linkis-cg-linkismanager linkis-cg-entrance
+ module_name_arr=()
+ for i in "${!module_array[@]}"; do
+ if [[ ${module_array[i]} =~ $linkis_cg_contain ]]; then
+ module_name_s1=${module_array[i]}
+ # ,linkis-cg-linkismanager, 去掉linkis-cg-之前的字符
+ module_name_s2=${module_name_s1#*$linkis_cg}
+ module_name_s3=${module_name_s2/\'/}
+ module_name_s4=${module_name_s3/,/}
+ module_name=${module_name_s4/\}/}
+ module_name_arr[${#module_name_arr[*]} + 1]=$linkis_cg$module_name
+ fi
+ done
+ # 构建一个map key:module;value:ip
+ declare -A module_ip_map
+ for ((i = 1; i <= ${#module_name_arr[@]}; i++)); do
+ module_name=${module_name_arr[i]}
+ ip=${ip_arr[i]}
+ module_ip_map[$module_name]=$ip
+ done
+ remote_module_log $module_ip_map
+ else
+ declare -A module_ip_map
+ module_ip_map["linkis-cg-entrance"]=$servername
+ module_ip_map["linkis-cg-linkismanager"]=$servername
+ module_ip_map["linkis-ps-publicservice"]=$servername
+ remote_module_log $module_ip_map
+ fi
+}
+
+# 任务提示信息
+function print_task_info() {
+ # 查询语句
+ #execution_code=$(getValueForTasks "$task_list_result" executionCode)
+ # 标签
+ labels=$(getValueForTasks "$task_list_result" labels)
+ engine_type=$(echo "$labels" | jq -r '.[] | select(startswith("engineType:")) | split(":")[1]')
+ user_creator=$(echo "$labels" | jq -r '.[] | select(startswith("userCreator:")) | split(":")[1]')
+ # 状态
+ status=$(getValueForTasks "$task_list_result" status)
+ # 已耗时
+ cost_time=$(getValueForTasks "$task_list_result" costTime)
+ # 创建时间
+ yyyy_mm_dd_hh_mm_ss=$(date -d @$created_time_date "+%Y-%m-%d %H:%M:%S")
+ # Entrance实例
+ instance=$(getValueForTasks "$task_list_result" instance)
+ # EC引擎实例
+ engine_instance=$(getValueForTasks "$task_list_result" engineInstance)
+ # 请求相关配置参数
+ print_color_green "任务id:" "$job_id"
+ # 只显示500个字符
+# if [ ${#execution_code} -gt 500 ]; then
+# # 截取前500个字符并加上省略号
+# execution_code="${execution_code:0:500}..."
+# fi
+# print_color_green "查询语句:$execution_code"
+ print_color_green "标签:" "$user_creator,$engine_type"
+ print_color_green "状态:" "$status"
+ print_color_green "已耗时:" "$(($cost_time / 1000))"
+ print_color_green "创建时间:" "$yyyy_mm_dd_hh_mm_ss"
+ print_color_green "Entrance实例:" "$instance"
+ print_color_green "EC引擎实例:" "$engine_instance"
+}
+
+# step2 提示信息
+function print_step1_echo() {
+ title="步骤1:任务进入Linkis排队"
+ log_info=$(get_log_info "INFO Your job is accepted")
+ check_error_log "$log_info" "$title"
+ print_color_green "$title:" "如果您的任务卡在这里,需要调整并发,通过管理台--参数配置--调整对应引擎最大并发数(wds.linkis.rm.instance)"
+ print_color_black "调度时间:$(get_time_info "$log_info")"
+ print_color_black "调度的Entrance服务为:$(get_instance_info "$log_info")"
+}
+
+function print_step2_echo() {
+ title="步骤2:任务进入Linkis运行,请求引擎中"
+ log_info=$(get_log_info "INFO Request LinkisManager:EngineAskAsyncResponse")
+ if [ -z "$log_info" ]; then
+ print_color_red "$title"
+ print_color_red "异常日志信息:"
+ print_color_red "$(cat $task_path"/json/$job_id"_detail.json | grep 'ERROR'| uniq)"
+ echo -e "请联系linkis运维人员,进行排查处理"
+ exit 0
+ fi
+ request_time=$(get_time_info "$log_info")
+ linkis_manager_instance=$(get_instance_info "$log_info")
+ log_info=$(get_log_info "INFO Succeed to create new ec")
+ if [ -z "$log_info" ]; then
+ log_info=$(get_log_info "INFO Succeed to reuse ec")
+ if [ ! -z "$log_info" ]; then
+ print_color_green "$title"
+ print_color_black "请求引擎时间:$request_time"
+ print_color_black "请求LinkisManager服务:$linkis_manager_instance"
+ print_color_black "请求引擎为复用引擎:$(get_instance_info "$log_info")"
+ else
+ print_color_red "$title"
+ print_color_red "请求引擎时间:$request_time"
+ print_color_red "请求LinkisManager服务:$linkis_manager_instance"
+ print_color_red "异常日志信息:"
+ print_color_red "$(cat $task_path"/json/$job_id"_detail.json | grep 'ERROR'| uniq)"
+ echo -e "请联系linkis运维人员,进行排查处理"
+ exit 0
+ fi
+ else
+ print_color_green "$title"
+ print_color_black "请求引擎时间:$request_time"
+ print_color_black "请求LinkisManager服务:$linkis_manager_instance"
+ print_color_black "请求引擎为新建引擎:$(get_instance_info "$log_info")"
+ fi
+ log_info=$(get_log_info "$submit_engine_constants")
+ ec_start_time=$(get_time_info "$log_info")
+ duration=$(($(date -d "$ec_start_time" +%s) - $(date -d "$request_time" +%s)))
+ print_color_black "请求引擎耗时: $duration 秒"
+}
+
+function print_step3_echo() {
+ if [[ $open_log_result =~ $submit_engine_constants ]]; then
+ # 如果log path存在,需要通过任务id拉取EC端的stdout的日志
+ if [[ $open_log_result =~ $engine_local_log_path ]]; then
+
+ # 获取远程EC日志
+ remote_access_to_ec_logs
+ else
+ # 获取 task_submit_to_ec_desc
+ task_submit_to_ec_desc=$(cat $task_path"/json/"$job_id"_detail.json" | grep "$submit_engine_constants")
+ print_color_green "任务提交给底层运行,但EC日志地址解析失败,请手动查看EC日志" "$task_submit_to_ec_desc"
+ fi
+ else
+ print_color_green "步骤3:引擎请求失败,开始分析服务日志"
+ # 获取链路日志
+ remote_access_to_link_logs
+ fi
+
+}
+
+function check_error_log() {
+ log_info=$1
+ title=$2
+ if [ -z "$log_info" ]; then
+ if [ -n "$title" ]; then
+ print_color_red "$title"
+ fi
+ print_color_red "异常日志信息:"
+ print_color_red "$(cat $task_path"/json/$job_id"_detail.json | grep 'ERROR'| uniq)"
+ echo -e "请联系linkis运维人员,进行排查处理"
+ exit 0
+ fi
+}
+
+function check_task_date() {
+ # 获取任务创建时间
+ created_time=$(getValueForTasks "$task_list_result" createdTime)
+
+ # 截取时间戳
+ export created_time_date=$(echo $created_time | cut -b 1-10)
+ export yyyy_mm_dd=$(date -d @$created_time_date "+%Y-%m-%d")
+ export yyy_mm=$(date -d @$created_time_date "+%Y-%m")
+
+ # 当前日期与任务创建时间相差不能大于90
+ days_between=$((($(date +%s) - $created_time_date) / (24 * 60 * 60)))
+ if [ $days_between -gt 90 ]; then
+ echo "只支持最近90天任务查询"
+ exit 1
+ fi
+}
+
+# 参数校验
+check $*
+print_color_green "***【任务基础信息】***"
+# 任务查询
+task_list_result=$(get_job_list_by_id)
+# 根据任务列表接口返回判断status
+check_tasks_stauts
+# 日期校验
+check_task_date
+# 任务信息展示
+print_task_info
+# 将log日志写入到文件中
+open_log_result=$(get_job_detail_log "$task_list_result")
+log_detail=$(getValueForDetail "$open_log_result" log)
+echo -e $log_detail >$task_path"/json/"$job_id"_detail.json"
+# 诊断信息展示
+print_color_green "***【任务诊断信息】***"
+# 排队
+print_step1_echo
+# 运行
+print_step2_echo
+# 底层
+print_step3_echo
diff --git a/linkis-dist/package/admin/tools/linkis-log-tool.sh b/linkis-dist/package/admin/tools/linkis-log-tool.sh
new file mode 100644
index 00000000000..976d1c7acfb
--- /dev/null
+++ b/linkis-dist/package/admin/tools/linkis-log-tool.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+source /appcom/Install/linkis/conf/linkis-env.sh
+
+if [ "$LINKIS_LOG_DIR" = "" ]; then
+ export LINKIS_LOG_DIR="/data/logs/bdpe-ujes"
+fi
+# Set LINKIS_HOME
+if [ -z "$LINKIS_HOME" ]; then
+ export LINKIS_HOME="$INSTALL_HOME"
+fi
+linkisRemoteEnginePath="$LINKIS_HOME/admin/tools/remote-engine.sh"
+
+server_name=$1
+job_id=$2
+log_path=$3
+option_flag=$4
+module_name=$5
+task_path="$LINKIS_LOG_DIR/task"
+job_path=$task_path/$job_id
+
+if [ ! -d $job_path ]; then
+ mkdir $job_path
+fi
+
+if [ "$option_flag" == "0" ]; then
+ # 获取远端ec日志
+ ssh -q hadoop@$server_name 'bash -s' $job_id $log_path $yyy_mm $yyyy_mm_dd "$created_time_date" <$linkisRemoteEnginePath
+
+ # 符合远程的ec日志复制到本地
+ scp -q -r hadoop@$server_name:$log_path/"$job_id"_engineconn.log $job_path/engineconn_"$server_name".log
+
+ # 删除远端临时日志
+ ssh -q hadoop@$server_name "rm -f $log_path/$2_engineconn.log"
+elif [ "$option_flag" == "1" ]; then
+ if [ $module_name == "linkis-cg-engineconn" ]; then
+ echo "此版本暂分析不出来,请走人工排查!"
+ exit 1
+ else
+ ssh -q hadoop@$server_name 'bash -s' $job_id $log_path $yyy_mm $yyyy_mm_dd $created_time_date 1 $module_name <$linkisRemoteEnginePath
+
+ scp -q -r hadoop@$server_name:$log_path/"$job_id"_$module_name.log $job_path/$module_name"_"$server_name.log
+
+ ssh -q hadoop@$server_name "rm -f $log_path/$2_$module_name.log"
+ fi
+else
+ echo "暂不支持的日志分析!"
+fi
diff --git a/linkis-dist/package/admin/tools/remote-engine.sh b/linkis-dist/package/admin/tools/remote-engine.sh
new file mode 100644
index 00000000000..4b5baaf5979
--- /dev/null
+++ b/linkis-dist/package/admin/tools/remote-engine.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+job_id=$1
+log_path=$2
+yyy_mm=$3
+yyyy_mm_dd=$4
+created_time_date=$5
+module_name=$7
+
+# 远端引擎日志分组写入
+function remote_engine_logs() {
+ cd $log_path
+ echo -e "$(cat ./stdout | grep "JobId-$job_id")" >"./"$job_id"_engineconn.log"
+}
+
+function remote_link_logs() {
+ cd $log_path
+ if [ ! -f "./$module_name.log" ]; then
+ exit 1
+ fi
+ # 获取首行日志信息
+ first_row=$(head -n 1 ./$module_name.log | cut -b 1-23)
+ first_row_timestamp=$(date -d "$first_row" +%s)
+ # 首行日期大于任务创建时间,代表已经滚动
+ if [ "$first_row_timestamp" -gt "$created_time_date" ]; then
+ # 链路日志已滚动 (grep "JobId-$1" 必须双引号,命令行可以单引号)
+ # echo -e > 写入本地,必须带引号,否则不能换行
+ echo -e "$(cat ./$module_name.log ./$yyy_mm/$module_name/* | grep "JobId-$job_id")" >"./"$job_id"_$module_name.log"
+ else
+ # 链路日志未滚动
+ echo -e "$(cat ./$module_name.log | grep "JobId-$job_id")" >"./"$job_id"_$module_name.log"
+ fi
+}
+
+if [ "$6" == "1" ]; then
+ remote_link_logs $*
+else
+ remote_engine_logs $*
+fi
diff --git a/linkis-dist/package/conf/application-eureka.yml b/linkis-dist/package/conf/application-eureka.yml
index 90220a75820..1b75515fb3b 100644
--- a/linkis-dist/package/conf/application-eureka.yml
+++ b/linkis-dist/package/conf/application-eureka.yml
@@ -16,7 +16,9 @@
spring:
application:
name: linkis-mg-eureka
- profiles: eureka
+ config:
+ activate:
+ on-profile: eureka
mvc:
pathmatch:
matching-strategy: ant_path_matcher
@@ -49,4 +51,4 @@ management:
endpoints:
web:
exposure:
- include: refresh,info,health,metrics
\ No newline at end of file
+ include: refresh,info,health,metrics
diff --git a/linkis-dist/package/db/linkis_ddl.sql b/linkis-dist/package/db/linkis_ddl.sql
index 9f9d800ceb6..8356b41dd8f 100644
--- a/linkis-dist/package/db/linkis_ddl.sql
+++ b/linkis-dist/package/db/linkis_ddl.sql
@@ -26,36 +26,29 @@
-- 组合索引建议包含所有字段名,过长的字段名可以采用缩写形式。例如idx_age_name_add
-- 索引名尽量不超过50个字符,命名应该使用小写
-
--- 注意事项
--- 1. TDSQL层面做了硬性规定,对于varchar索引,字段总长度不能超过768个字节,建议组合索引的列的长度根据实际列数值的长度定义,比如身份证号定义长度为varchar(20),不要定位为varchar(100),
--- 同时,由于TDSQL默认采用UTF8字符集,一个字符3个字节,因此,实际索引所包含的列的长度要小于768/3=256字符长度。
--- 2. AOMP 执行sql 语句 create table 可以带反撇号,alter 语句不能带反撇号
--- 3. 使用 alter 添加、修改字段时请带要字符集和排序规则 CHARSET utf8mb4 COLLATE utf8mb4_bin
-
SET FOREIGN_KEY_CHECKS=0;
DROP TABLE IF EXISTS `linkis_ps_configuration_config_key`;
CREATE TABLE `linkis_ps_configuration_config_key`(
- `id` bigint(20) NOT NULL AUTO_INCREMENT,
- `key` varchar(50) DEFAULT NULL COMMENT 'Set key, e.g. spark.executor.instances',
- `description` varchar(200) DEFAULT NULL,
- `name` varchar(50) DEFAULT NULL,
- `default_value` varchar(200) DEFAULT NULL COMMENT 'Adopted when user does not set key',
- `validate_type` varchar(50) DEFAULT NULL COMMENT 'Validate type, one of the following: None, NumInterval, FloatInterval, Include, Regex, OPF, Custom Rules',
- `validate_range` varchar(150) DEFAULT NULL COMMENT 'Validate range',
- `engine_conn_type` varchar(50) DEFAULT '' COMMENT 'engine type,such as spark,hive etc',
- `is_hidden` tinyint(1) DEFAULT NULL COMMENT 'Whether it is hidden from user. If set to 1(true), then user cannot modify, however, it could still be used in back-end',
- `is_advanced` tinyint(1) DEFAULT NULL COMMENT 'Whether it is an advanced parameter. If set to 1(true), parameters would be displayed only when user choose to do so',
- `level` tinyint(1) DEFAULT NULL COMMENT 'Basis for displaying sorting in the front-end. Higher the level is, higher the rank the parameter gets',
- `treeName` varchar(20) DEFAULT NULL COMMENT 'Reserved field, representing the subdirectory of engineType',
- `boundary_type` tinyint(2) NOT NULL DEFAULT '0' COMMENT '0 none/ 1 with mix /2 with max / 3 min and max both',
- `en_description` varchar(200) DEFAULT NULL COMMENT 'english description',
- `en_name` varchar(100) DEFAULT NULL COMMENT 'english name',
- `en_treeName` varchar(100) DEFAULT NULL COMMENT 'english treeName',
- `template_required` tinyint(1) DEFAULT 0 COMMENT 'template required 0 none / 1 must',
- UNIQUE INDEX `uniq_key_ectype` (`key`,`engine_conn_type`),
- PRIMARY KEY (`id`)
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `key` varchar(50) DEFAULT NULL COMMENT 'Set key, e.g. spark.executor.instances',
+ `description` varchar(200) DEFAULT NULL,
+ `name` varchar(50) DEFAULT NULL,
+ `default_value` varchar(200) DEFAULT NULL COMMENT 'Adopted when user does not set key',
+ `validate_type` varchar(50) DEFAULT NULL COMMENT 'Validate type, one of the following: None, NumInterval, FloatInterval, Include, Regex, OPF, Custom Rules',
+ `validate_range` varchar(150) DEFAULT NULL COMMENT 'Validate range',
+ `engine_conn_type` varchar(50) DEFAULT '' COMMENT 'engine type,such as spark,hive etc',
+ `is_hidden` tinyint(1) DEFAULT NULL COMMENT 'Whether it is hidden from user. If set to 1(true), then user cannot modify, however, it could still be used in back-end',
+ `is_advanced` tinyint(1) DEFAULT NULL COMMENT 'Whether it is an advanced parameter. If set to 1(true), parameters would be displayed only when user choose to do so',
+ `level` tinyint(1) DEFAULT NULL COMMENT 'Basis for displaying sorting in the front-end. Higher the level is, higher the rank the parameter gets',
+ `treeName` varchar(20) DEFAULT NULL COMMENT 'Reserved field, representing the subdirectory of engineType',
+ `boundary_type` TINYINT(2) NULL DEFAULT '0' COMMENT '0 none/ 1 with mix /2 with max / 3 min and max both',
+ `en_description` varchar(200) DEFAULT NULL COMMENT 'english description',
+ `en_name` varchar(100) DEFAULT NULL COMMENT 'english name',
+ `en_treeName` varchar(100) DEFAULT NULL COMMENT 'english treeName',
+ `template_required` tinyint(1) DEFAULT 0 COMMENT 'template required 0 none / 1 must',
+ UNIQUE INDEX `uniq_key_ectype` (`key`,`engine_conn_type`),
+ PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
@@ -92,62 +85,7 @@ CREATE TABLE `linkis_ps_configuration_category` (
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE INDEX `uniq_label_id` (`label_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-
-DROP TABLE IF EXISTS `linkis_ps_configuration_template_config_key`;
-CREATE TABLE IF NOT EXISTS `linkis_ps_configuration_template_config_key` (
- `id` BIGINT(20) NOT NULL AUTO_INCREMENT,
- `template_name` VARCHAR(200) NOT NULL COMMENT 'Configuration template name redundant storage',
- `template_uuid` VARCHAR(36) NOT NULL COMMENT 'uuid template id recorded by the third party',
- `key_id` BIGINT(20) NOT NULL COMMENT 'id of linkis_ps_configuration_config_key',
- `config_value` VARCHAR(200) NULL DEFAULT NULL COMMENT 'configuration value',
- `max_value` VARCHAR(50) NULL DEFAULT NULL COMMENT 'upper limit value',
- `min_value` VARCHAR(50) NULL DEFAULT NULL COMMENT 'Lower limit value (reserved)',
- `validate_range` VARCHAR(50) NULL DEFAULT NULL COMMENT 'Verification regularity (reserved)',
- `is_valid` VARCHAR(2) DEFAULT 'Y' COMMENT 'Is it valid? Reserved Y/N',
- `create_by` VARCHAR(50) NOT NULL COMMENT 'Creator',
- `create_time` DATETIME DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
- `update_by` VARCHAR(50) NULL DEFAULT NULL COMMENT 'Update by',
- `update_time` DATETIME DEFAULT CURRENT_TIMESTAMP COMMENT 'update time',
- PRIMARY KEY (`id`),
- UNIQUE INDEX `uniq_tid_kid` (`template_uuid`, `key_id`),
- UNIQUE INDEX `uniq_tname_kid` (`template_uuid`, `key_id`)
- )ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-
-DROP TABLE IF EXISTS `linkis_ps_configuration_key_limit_for_user`;
-CREATE TABLE IF NOT EXISTS `linkis_ps_configuration_key_limit_for_user` (
- `id` BIGINT(20) NOT NULL AUTO_INCREMENT,
- `user_name` VARCHAR(50) NOT NULL COMMENT 'username',
- `combined_label_value` VARCHAR(128) NOT NULL COMMENT 'Combined label combined_userCreator_engineType such as hadoop-IDE,spark-2.4.3',
- `key_id` BIGINT(20) NOT NULL COMMENT 'id of linkis_ps_configuration_config_key',
- `config_value` VARCHAR(200) NULL DEFAULT NULL COMMENT 'configuration value',
- `max_value` VARCHAR(50) NULL DEFAULT NULL COMMENT 'upper limit value',
- `min_value` VARCHAR(50) NULL DEFAULT NULL COMMENT 'Lower limit value (reserved)',
- `latest_update_template_uuid` VARCHAR(36) NOT NULL COMMENT 'uuid template id recorded by the third party',
- `is_valid` VARCHAR(2) DEFAULT 'Y' COMMENT 'Is it valid? Reserved Y/N',
- `create_by` VARCHAR(50) NOT NULL COMMENT 'Creator',
- `create_time` DATETIME DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
- `update_by` VARCHAR(50) NULL DEFAULT NULL COMMENT 'Update by',
- `update_time` DATETIME DEFAULT CURRENT_TIMESTAMP COMMENT 'update time',
- PRIMARY KEY (`id`),
- UNIQUE INDEX `uniq_com_label_kid` (`combined_label_value`, `key_id`)
-)ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-
-DROP TABLE IF EXISTS `linkis_ps_configutation_lm_across_cluster_rule`;
-CREATE TABLE IF NOT EXISTS linkis_ps_configutation_lm_across_cluster_rule (
- id INT AUTO_INCREMENT COMMENT 'Rule ID, auto-increment primary key',
- cluster_name char(32) NOT NULL COMMENT 'Cluster name, cannot be empty',
- creator char(32) NOT NULL COMMENT 'Creator, cannot be empty',
- username char(32) NOT NULL COMMENT 'User, cannot be empty',
- create_time datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Creation time, cannot be empty',
- create_by char(32) NOT NULL COMMENT 'Creator, cannot be empty',
- update_time datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Modification time, cannot be empty',
- update_by char(32) NOT NULL COMMENT 'Updater, cannot be empty',
- rules varchar(256) NOT NULL COMMENT 'Rule content, cannot be empty',
- is_valid VARCHAR(2) DEFAULT 'N' COMMENT 'Is it valid Y/N',
- PRIMARY KEY (id),
- UNIQUE KEY idx_creator_username (creator, username)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
--
-- New linkis job
@@ -256,7 +194,7 @@ DROP TABLE IF EXISTS `linkis_ps_udf_tree`;
CREATE TABLE `linkis_ps_udf_tree` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`parent` bigint(20) NOT NULL,
- `name` varchar(50) DEFAULT NULL COMMENT 'Category name of the function. It would be displayed in the front-end',
+ `name` varchar(100) DEFAULT NULL COMMENT 'Category name of the function. It would be displayed in the front-end',
`user_name` varchar(50) NOT NULL,
`description` varchar(255) DEFAULT NULL,
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
@@ -520,15 +458,15 @@ CREATE TABLE `linkis_ps_cs_context_id` (
`source` varchar(255) DEFAULT NULL,
`expire_type` varchar(32) DEFAULT NULL,
`expire_time` datetime DEFAULT NULL,
- `instance` varchar(64) DEFAULT NULL,
- `backup_instance` varchar(64) DEFAULT NULL,
+ `instance` varchar(128) DEFAULT NULL,
+ `backup_instance` varchar(255) DEFAULT NULL,
`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'update unix timestamp',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
`access_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'last access time',
PRIMARY KEY (`id`),
- KEY `idx_instance` (`instance`),
- KEY `idx_backup_instance` (`backup_instance`),
- KEY `idx_instance_bin` (`instance`,`backup_instance`)
+ KEY `idx_instance` (`instance`(128)),
+ KEY `idx_backup_instance` (`backup_instance`(191)),
+ KEY `idx_instance_bin` (`instance`(128),`backup_instance`(128))
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- ----------------------------
@@ -814,7 +752,7 @@ DROP TABLE IF EXISTS `linkis_cg_manager_label`;
CREATE TABLE `linkis_cg_manager_label` (
`id` int(20) NOT NULL AUTO_INCREMENT,
`label_key` varchar(32) COLLATE utf8_bin NOT NULL,
- `label_value` varchar(128) COLLATE utf8_bin NOT NULL,
+ `label_value` varchar(255) COLLATE utf8_bin NOT NULL,
`label_feature` varchar(16) COLLATE utf8_bin NOT NULL,
`label_value_size` int(20) NOT NULL,
`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
@@ -826,7 +764,7 @@ CREATE TABLE `linkis_cg_manager_label` (
DROP TABLE IF EXISTS `linkis_cg_manager_label_value_relation`;
CREATE TABLE `linkis_cg_manager_label_value_relation` (
`id` int(20) NOT NULL AUTO_INCREMENT,
- `label_value_key` varchar(128) COLLATE utf8_bin NOT NULL,
+ `label_value_key` varchar(255) COLLATE utf8_bin NOT NULL,
`label_value_content` varchar(255) COLLATE utf8_bin DEFAULT NULL,
`label_id` int(20) DEFAULT NULL,
`update_time` datetime DEFAULT CURRENT_TIMESTAMP,
@@ -850,11 +788,11 @@ CREATE TABLE `linkis_cg_manager_label_resource` (
DROP TABLE IF EXISTS `linkis_cg_ec_resource_info_record`;
CREATE TABLE `linkis_cg_ec_resource_info_record` (
`id` INT(20) NOT NULL AUTO_INCREMENT,
- `label_value` VARCHAR(128) NOT NULL COMMENT 'ec labels stringValue',
+ `label_value` VARCHAR(255) NOT NULL COMMENT 'ec labels stringValue',
`create_user` VARCHAR(128) NOT NULL COMMENT 'ec create user',
`service_instance` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'ec instance info',
`ecm_instance` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'ecm instance info ',
- `ticket_id` VARCHAR(36) NOT NULL COMMENT 'ec ticket id',
+ `ticket_id` VARCHAR(100) NOT NULL COMMENT 'ec ticket id',
`status` varchar(50) DEFAULT NULL COMMENT 'EC status: Starting,Unlock,Locked,Idle,Busy,Running,ShuttingDown,Failed,Success',
`log_dir_suffix` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'log path',
`request_times` INT(8) COMMENT 'resource request times',
@@ -1335,4 +1273,23 @@ CREATE TABLE `linkis_ps_python_module_info` (
`create_time` datetime NOT NULL COMMENT '创建时间',
`update_time` datetime NOT NULL COMMENT '修改时间',
PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='Python模块包信息表';
\ No newline at end of file
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='Python模块包信息表';
+
+-- ----------------------------
+-- Table structure for linkis_org_user_sync
+-- ----------------------------
+DROP TABLE IF EXISTS `linkis_org_user_sync`;
+CREATE TABLE `linkis_org_user_sync` (
+ `cluster_code` varchar(16) COMMENT '集群',
+ `user_type` varchar(64) COMMENT '用户类型',
+ `user_name` varchar(128) COMMENT '授权用户',
+ `org_id` varchar(16) COMMENT '部门ID',
+ `org_name` varchar(64) COMMENT '部门名字',
+ `queue_name` varchar(64) COMMENT '默认资源队列',
+ `db_name` varchar(64) COMMENT '默认操作数据库',
+ `interface_user` varchar(64) COMMENT '接口人',
+ `is_union_analyse` varchar(64) COMMENT '是否联合分析人',
+ `create_time` varchar(64) COMMENT '用户创建时间',
+ `user_itsm_no` varchar(64) COMMENT '用户创建单号',
+ PRIMARY KEY (`user_name`)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE=utf8mb4_bin COMMENT ='用户部门统计INC表';
\ No newline at end of file
diff --git a/linkis-dist/package/db/upgrade/1.4.1_schema/mysql/linkis_ddl.sql b/linkis-dist/package/db/upgrade/1.4.1_schema/mysql/linkis_ddl.sql
new file mode 100644
index 00000000000..33f663e5983
--- /dev/null
+++ b/linkis-dist/package/db/upgrade/1.4.1_schema/mysql/linkis_ddl.sql
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+ALTER TABLE linkis_ps_udf_user_load ADD CONSTRAINT uniq_uid_uname UNIQUE (`udf_id`, `user_name`);
+ALTER TABLE linkis_ps_bml_resources ADD CONSTRAINT uniq_rid_eflag UNIQUE (`resource_id`, `enable_flag`);
+
+
+ALTER TABLE linkis_ps_configuration_config_key ADD UNIQUE uniq_key_ectype (`key`,`engine_conn_type`);
+
+ALTER TABLE linkis_ps_configuration_config_key modify column engine_conn_type varchar(50) DEFAULT '' COMMENT 'engine type,such as spark,hive etc';
+
+ALTER TABLE linkis_ps_common_lock ADD COLUMN locker VARCHAR(255) NOT NULL COMMENT 'locker';
+
+ALTER TABLE linkis_ps_configuration_config_key ADD column template_required tinyint(1) DEFAULT 0 COMMENT 'template required 0 none / 1 must'
+
+ALTER TABLE linkis_ps_configuration_config_value modify COLUMN config_value varchar(500);
+
+
+-- ----------------------------
+-- Table structure for linkis_org_user
+-- ----------------------------
+DROP TABLE IF EXISTS `linkis_org_user`;
+CREATE TABLE `linkis_org_user` (
+ `cluster_code` varchar(16) COMMENT '集群',
+ `user_type` varchar(64) COMMENT '用户类型',
+ `user_name` varchar(128) COMMENT '授权用户',
+ `org_id` varchar(16) COMMENT '部门ID',
+ `org_name` varchar(64) COMMENT '部门名字',
+ `queue_name` varchar(64) COMMENT '默认资源队列',
+ `db_name` varchar(64) COMMENT '默认操作数据库',
+ `interface_user` varchar(64) COMMENT '接口人',
+ `is_union_analyse` varchar(64) COMMENT '是否联合分析人',
+ `create_time` varchar(64) COMMENT '用户创建时间',
+ `user_itsm_no` varchar(64) COMMENT '用户创建单号',
+ PRIMARY KEY (`user_name`)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE=utf8mb4_bin COMMENT ='用户部门统计INC表';
+-- ----------------------------
+-- Table structure for linkis_cg_tenant_department_config
+-- ----------------------------
+DROP TABLE IF EXISTS `linkis_cg_tenant_department_config`;
+CREATE TABLE `linkis_cg_tenant_department_config` (
+ `id` int(20) NOT NULL AUTO_INCREMENT COMMENT 'ID',
+ `creator` varchar(50) COLLATE utf8_bin NOT NULL COMMENT '应用',
+ `department` varchar(64) COLLATE utf8_bin NOT NULL COMMENT '部门名称',
+ `department_id` varchar(16) COLLATE utf8_bin NOT NULL COMMENT '部门ID',
+ `tenant_value` varchar(128) COLLATE utf8_bin NOT NULL COMMENT '部门租户标签',
+ `create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间',
+ `create_by` varchar(50) COLLATE utf8_bin NOT NULL COMMENT '创建用户',
+ `is_valid` varchar(1) COLLATE utf8_bin NOT NULL DEFAULT 'Y' COMMENT '是否有效',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uniq_creator_department` (`creator`,`department`)
+) ENGINE=InnoDB AUTO_INCREMENT=0 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
+
+ALTER TABLE linkis_cg_tenant_label_config ADD COLUMN is_valid varchar(1) CHARSET utf8mb4 COLLATE utf8mb4_bin DEFAULT 'Y' COMMENT '是否有效';
+
+ALTER TABLE linkis_cg_manager_service_instance_metrics ADD COLUMN description varchar(256) CHARSET utf8mb4 COLLATE utf8mb4_bin DEFAULT '';
+ALTER TABLE linkis_ps_bml_resources_task ADD CONSTRAINT uniq_rid_version UNIQUE (`resource_id`, `version`);
+ALTER TABLE linkis_cg_ec_resource_info_record ADD UNIQUE INDEX uniq_sinstance_status_cuser_ctime (`service_instance`, `status`, `create_user`, `create_time`);
+
+ALTER TABLE linkis_cg_manager_service_instance ADD COLUMN params text COLLATE utf8_bin DEFAULT NULL;
+
+-- ----------------------------
+-- Table structure for linkis_org_user_sync
+-- ----------------------------
+DROP TABLE IF EXISTS `linkis_org_user_sync`;
+CREATE TABLE `linkis_org_user_sync` (
+ `cluster_code` varchar(16) COMMENT '集群',
+ `user_type` varchar(64) COMMENT '用户类型',
+ `user_name` varchar(128) COMMENT '授权用户',
+ `org_id` varchar(16) COMMENT '部门ID',
+ `org_name` varchar(64) COMMENT '部门名字',
+ `queue_name` varchar(64) COMMENT '默认资源队列',
+ `db_name` varchar(64) COMMENT '默认操作数据库',
+ `interface_user` varchar(64) COMMENT '接口人',
+ `is_union_analyse` varchar(64) COMMENT '是否联合分析人',
+ `create_time` varchar(64) COMMENT '用户创建时间',
+ `user_itsm_no` varchar(64) COMMENT '用户创建单号',
+ PRIMARY KEY (`user_name`)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE=utf8mb4_bin COMMENT ='用户部门统计INC表';
\ No newline at end of file
diff --git a/linkis-dist/release-docs/NOTICE b/linkis-dist/release-docs/NOTICE
index bd6807f7122..4a68b8fd1a9 100644
--- a/linkis-dist/release-docs/NOTICE
+++ b/linkis-dist/release-docs/NOTICE
@@ -150,7 +150,7 @@ This product includes software developed by
The Apache Software Foundation (http://www.apache.org/).
--------------------------------------------------------------------------------
-src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java contains
+src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java contains
test data from http://aspell.sourceforge.net/test/batch0.tab.
Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org). Verbatim copying
@@ -409,14 +409,14 @@ and are placed here unaltered.
(C) Copyright 1997,2004 International Business Machines Corporation. All rights reserved.
-(C) Copyright IBM Corp. 2003.
+(C) Copyright IBM Corp. 2003.
=========================================================================
-The portion of the functionTests under 'nist' was originally
-developed by the National Institute of Standards and Technology (NIST),
+The portion of the functionTests under 'nist' was originally
+developed by the National Institute of Standards and Technology (NIST),
an agency of the United States Department of Commerce, and adapted by
International Business Machines Corporation in accordance with the NIST
Software Acknowledgment and Redistribution document at
@@ -502,13 +502,13 @@ Some data files (under analysis/icu/src/data) are derived from Unicode data such
as the Unicode Character Database. See http://unicode.org/copyright.html for more
details.
-Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is
+Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is
BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/
The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were
automatically generated with the moman/finenight FSA library, created by
Jean-Philippe Barrette-LaPierre. This library is available under an MIT license,
-see http://sites.google.com/site/rrettesite/moman and
+see http://sites.google.com/site/rrettesite/moman and
http://bitbucket.org/jpbarrette/moman/overview/
The class org.apache.lucene.util.WeakIdentityMap was derived from
@@ -562,7 +562,7 @@ analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.ja
analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java
analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java
-The Stempel analyzer (stempel) includes BSD-licensed software developed
+The Stempel analyzer (stempel) includes BSD-licensed software developed
by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil,
and Edmond Nolan.
@@ -574,8 +574,8 @@ See http://project.carrot2.org/license.html.
The SmartChineseAnalyzer source code (smartcn) was
provided by Xiaoping Gao and copyright 2009 by www.imdict.net.
-WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/)
-is derived from Unicode data such as the Unicode Character Database.
+WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/)
+is derived from Unicode data such as the Unicode Character Database.
See http://unicode.org/copyright.html for more details.
The Morfologik analyzer (morfologik) includes BSD-licensed software
@@ -1444,9 +1444,9 @@ Jayasoft SARL (http://www.jayasoft.fr/)
and are licensed to the Apache Software Foundation under the
"Software Grant License Agreement"
-SSH and SFTP support is provided by the JCraft JSch package,
+SSH and SFTP support is provided by the JCraft JSch package,
which is open source software, available under
-the terms of a BSD style license.
+the terms of a BSD style license.
The original software and related information is available
at http://www.jcraft.com/jsch/.
@@ -1933,13 +1933,13 @@ https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html
Oracle OpenJDK
If ALPN is used to negotiate HTTP/2 connections, then the following
-artifacts may be included in the distribution or downloaded when ALPN
-module is selected.
+artifacts may be included in the distribution or downloaded when ALPN
+module is selected.
* java.sun.security.ssl
These artifacts replace/modify OpenJDK classes. The modififications
-are hosted at github and both modified and original are under GPL v2 with
+are hosted at github and both modified and original are under GPL v2 with
classpath exceptions.
http://openjdk.java.net/legal/gplv2+ce.html
@@ -1966,7 +1966,7 @@ org.apache.taglibs:taglibs-standard-impl
------
MortBay
-The following artifacts are ASL2 licensed. Based on selected classes from
+The following artifacts are ASL2 licensed. Based on selected classes from
following Apache Tomcat jars, all ASL2 licensed.
org.mortbay.jasper:apache-jsp
@@ -2370,7 +2370,7 @@ This product contains the Piccolo XML Parser for Java
This product contains the chunks_parse_cmds.tbl file from the vsdump program.
Copyright (C) 2006-2007 Valek Filippov (frob@df.ru)
-This product contains parts of the eID Applet project
+This product contains parts of the eID Applet project
and .
Copyright (c) 2009-2014
FedICT (federal ICT department of Belgium), e-Contract.be BVBA (https://www.e-contract.be),
@@ -3497,3 +3497,642 @@ Copyright 2021-2022 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
+
+-------------------------------------- Azure SDK for Java NOTICE --------------------------------------
+NOTICES AND INFORMATION
+Do Not Translate or Localize
+
+This software incorporates material from third parties. Microsoft makes certain
+open source code available at https://3rdpartysource.microsoft.com, or you may
+send a check or money order for US $5.00, including the product name, the open
+source component name, and version number, to:
+
+Source Code Compliance Team
+Microsoft Corporation
+One Microsoft Way
+Redmond, WA 98052
+USA
+
+Notwithstanding any other terms, you may reverse engineer this software to the
+extent required to debug changes to any libraries licensed under the GNU Lesser
+General Public License.
+
+------------------------------------------------------------------------------
+
+Azure SDK for Java uses third-party libraries or other resources that may be
+distributed under licenses different than the Azure SDK for Java software.
+
+In the event that we accidentally failed to list a required notice, please
+bring it to our attention. Post an issue or email us:
+
+ azjavasdkhelp@microsoft.com
+
+The attached notices are provided for information only.
+
+License notice for Hamcrest
+------------------------------------------------------------------------------
+
+The 3-Clause BSD License
+
+Copyright (c) 2000-2015 www.hamcrest.org
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this list of
+conditions and the following disclaimer. Redistributions in binary form must reproduce
+the above copyright notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the distribution.
+
+Neither the name of Hamcrest nor the names of its contributors may be used to endorse
+or promote products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+License notice for Slf4j API
+------------------------------------------------------------------------------
+
+ Copyright (c) 2004-2017 QOS.ch
+ All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+License notice for Slf4j Simple
+------------------------------------------------------------------------------
+
+ Copyright (c) 2004-2017 QOS.ch
+ All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+License notice for Guava (https://github.com/google/guava)
+------------------------------------------------------------------------------
+
+Copyright (C) 2010 The Guava Authors
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing permissions and limitations under
+the License.
+
+License notice for Netty
+------------------------------------------------------------------------------
+
+Copyright 2014 The Netty Project
+
+The Netty Project licenses this file to you under the Apache License,
+version 2.0 (the "License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at:
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+License notice for JUG Java Uuid Generator
+------------------------------------------------------------------------------
+
+JUG Java Uuid Generator
+
+Copyright (c) 2002- Tatu Saloranta, tatu.saloranta@iki.fi
+
+Licensed under the License specified in the file LICENSE which is
+included with the source code.
+You may not use this file except in compliance with the License.
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+License notice for Jackson
+------------------------------------------------------------------------------
+
+Copyright (c) 2007 Jackson Project
+
+Jackson-annotations (http://github.com/FasterXML/jackson
+Jackson-core (https://github.com/FasterXML/jackson-core
+jackson-databind (http://github.com/FasterXML/jackson
+Jackson-dataformat-XML (https://github.com/FasterXML/jackson-dataformat-xml)
+Jackson datatype: JSR310 (https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
+Jackson module: Afterburner (https://github.com/FasterXML/jackson-modules-base)
+Jackson module: JAXB Annotations (https://github.com/FasterXML/jackson-modules-base)
+Woodstox (https://github.com/FasterXML/woodstox)
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing permissions and limitations under
+the License.
+
+
+License notice for Metrics Core
+------------------------------------------------------------------------------
+
+Copyright (c) 2010-2013 Coda Hale, Yammer.com, 2014-2020 Dropwizard Team
+
+Metrics Core (https://github.com/dropwizard/metrics)
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing permissions and limitations under
+the License.
+
+License notice for micrometer-core
+------------------------------------------------------------------------------
+
+Copyright (c) 2017-Present VMware, Inc. All Rights Reserved.
+
+micrometer-core (https://github.com/micrometer-metrics/micrometer)
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing permissions and limitations under
+the License.
+
+
+License notice for project Reactor
+------------------------------------------------------------------------------
+
+Copyright (c) 2011-2017 Pivotal Software Inc, All Rights Reserved.
+
+Non-Blocking Reactive Foundation for the JVM (https://github.com/reactor/reactor-core)
+reactor-scala-extensions (https://github.com/reactor/reactor-scala-extensions)
+Reactive Streams Netty driver (https://github.com/reactor/reactor-netty)
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing permissions and limitations under
+the License.
+
+
+License notice for JavaBeans Activation Framework API
+------------------------------------------------------------------------------
+
+JavaBeans Activation Framework API jar (https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
+
+Eclipse Distribution License - v 1.0
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation and/or other materials
+provided with the distribution.
+Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to
+endorse or promote products derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+License notice for jakarta.xml.bind-api
+------------------------------------------------------------------------------
+
+jakarta.xml.bind-api (https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
+
+Eclipse Distribution License - v 1.0
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation and/or other materials
+provided with the distribution.
+Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to
+endorse or promote products derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+License notice for jakarta.xml.bind-api
+------------------------------------------------------------------------------
+
+Stax2 API (http://github.com/FasterXML/stax2-api)
+Copyright (c) 2008 FasterXML LLC info@fasterxml.com
+
+This source code is licensed under standard BSD license, which is compatible with all Free and Open Software (OSS) licenses.
+
+
+License notice for HdrHistogrami
+------------------------------------------------------------------------------
+HdrHistogram (http://hdrhistogram.github.io/HdrHistogram/)
+
+The code in this repository code was Written by Gil Tene, Michael Barker,
+and Matt Warren, and released to the public domain, as explained at
+http://creativecommons.org/publicdomain/zero/1.0/
+
+For users of this code who wish to consume it under the "BSD" license
+rather than under the public domain or CC0 contribution text mentioned
+above, the code found under this directory is *also* provided under the
+following license (commonly referred to as the BSD 2-Clause License). This
+license does not detract from the above stated release of the code into
+the public domain, and simply represents an additional license granted by
+the Author.
+
+-----------------------------------------------------------------------------
+** Beginning of "BSD 2-Clause License" text. **
+
+ Copyright (c) 2012, 2013, 2014, 2015, 2016 Gil Tene
+ Copyright (c) 2014 Michael Barker
+ Copyright (c) 2014 Matt Warren
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
+
+
+License notice for LatencyUtils
+------------------------------------------------------------------------------
+
+LatencyUtils (http://latencyutils.github.io/LatencyUtils/)
+
+ * This code was Written by Gil Tene of Azul Systems, and released to the
+ * public domain, as explained at http://creativecommons.org/publicdomain/zero/1.0/
+
+ For users of this code who wish to consume it under the "BSD" license
+ rather than under the public domain or CC0 contribution text mentioned
+ above, the code found under this directory is *also* provided under the
+ following license (commonly referred to as the BSD 2-Clause License). This
+ license does not detract from the above stated release of the code into
+ the public domain, and simply represents an additional license granted by
+ the Author.
+
+ -----------------------------------------------------------------------------
+ ** Beginning of "BSD 2-Clause License" text. **
+
+ Copyright (c) 2012, 2013, 2014 Gil Tene
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
+
+
+License notice for reactive-streams
+------------------------------------------------------------------------------
+
+reactive-streams (http://www.reactive-streams.org/)
+
+Copyright Statement for Contributions to the Reactive Streams Project
+=====================================================================
+
+I hereby represent that all present, past and future contributions I make to
+the Reactive Streams project (which includes all repositories owned by the
+“reactive-streams” github organization) are governed by the Creative Commons
+Zero 1.0 Universal copyright statement, placing my contributions in the public
+domain. This entails that to the extent possible under law I waive all
+copyright and related or neighboring rights to the code or documents I
+contribute. I also represent that I have the authority to perform the above
+waiver with respect to the entirety of my contributions.
+
+The text of the copyright statement is included in the COPYING file at the root
+of the reactive-streams repository at
+https://github.com/reactive-streams/reactive-streams-jvm/blob/master/COPYING.
+
+Underwriting parties:
+
+github name | Real Name, Email Address used for git commits, Company
+---------------+----------------------------------------------------------------------------
+rkuhn | Roland Kuhn, rk@rkuhn.info, Typesafe Inc.
+benjchristensen| Ben Christensen, benjchristensen@gmail.com, Netflix Inc.
+viktorklang | Viktor Klang, viktor.klang@gmail.com, Typesafe Inc.
+smaldini | Stephane Maldini, stephane.maldini@gmail.com, Pivotal Software Inc.
+savulchik | Stanislav Savulchik, s.savulchik@gmail.com
+ktoso | Konrad Malawski, konrad.malawski@project13.pl, Typesafe Inc.
+ouertani | Slim Ouertani, ouertani@gmail.com
+2m | Martynas Mickevičius, mmartynas@gmail.com, Typesafe Inc.
+ldaley | Luke Daley, luke.daley@gradleware.com, Gradleware Inc.
+colinrgodsey | Colin Godsey, crgodsey@gmail.com, MediaMath Inc.
+davidmoten | Dave Moten, davidmoten@gmail.com
+briantopping | Brian Topping, brian.topping@gmail.com, Mauswerks LLC
+rstoyanchev | Rossen Stoyanchev, rstoyanchev@pivotal.io, Pivotal
+BjornHamels | Björn Hamels, bjorn@hamels.nl
+JakeWharton | Jake Wharton, jakewharton@gmail.com
+anthonyvdotbe | Anthony Vanelverdinghe, anthonyv.be@outlook.com
+seratch | Kazuhiro Sera, seratch@gmail.com, SmartNews, Inc.
+akarnokd | David Karnok, akarnokd@gmail.com
+egetman | Evgeniy Getman, getman.eugene@gmail.com
+patriknw | Patrik Nordwall, patrik.nordwall@gmail.com, Lightbend Inc
+angelsanz | Ángel Sanz, angelsanz@users.noreply.github.com
+shenghaiyang | 盛海洋, shenghaiyang@aliyun.com
+kiiadi | Kyle Thomson, kylthoms@amazon.com, Amazon.com
+jroper | James Roper, james@jazzy.id.au, Lightbend Inc.
+olegdokuka | Oleh Dokuka, shadowgun@.i.ua, Netifi Inc.
+Scottmitch | Scott Mitchell, scott_mitchell@apple.com, Apple Inc.
+retronym | Jason Zaugg, jzaugg@gmail.com, Lightbend Inc.
+
+Licensed under Public Domain (CC0)
+
+To the extent possible under law, the person who associated CC0 with
+this code has waived all copyright and related or neighboring
+rights to this code.
+
+You should have received a copy of the CC0 legalcode along with this
+work. If not, see .
+
+License notice for Bouncy Castle
+------------------------------------------------------------------------------
+
+Copyright (c) 2000 - 2024 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
+to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+License notice for Apache Commons
+------------------------------------------------------------------------------
+
+Copyright 2002-2019 The Apache Software Foundation
+
+commons-codec (https://github.com/apache/commons-codec)
+commons-text (https://github.com/apache/commons-text)
+commons-lang (https://github.com/apache/commons-lang)
+commons-collections (https://github.com/apache/commons-collections)
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and limitations under the License.
+
+-------------------------------------------------------------------------------------------------
+
+License notice for GraalVM
+------------------------------------------------------------------------------
+org.graalvm.sdk:graal-sdk - https://github.com/graalvm/native-build-tools/blob/master/common/junit-platform-native/LICENSE
+The Universal Permissive License (UPL), Version 1.0
+
+Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this software,
+associated documentation and/or data (collectively the "Software"), free of charge and under any and all copyright rights
+in the Software, and any and all patent rights owned or freely licensable by each licensor hereunder covering either
+(i) the unmodified Software as contributed to or provided by such licensor, or (ii) the Larger Works (as defined below),
+to deal in both
+
+(a) the Software, and
+
+(b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software each a
+"Larger Work" to which the Software is contributed by such licensors), without restriction, including without limitation
+the rights to copy, create derivative works of, display, perform, and distribute the Software and make, use, sell, offer
+for sale, import, export, have made, and have sold the Software and the Larger Work(s), and to sublicense the foregoing
+rights on either these or other terms.
+
+This license is subject to the following condition:
+
+The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+-------------------------------------------------------------------------------------------------
+
+License notice for JUnit platform native plugin
+------------------------------------------------------------------------------
+org.graalvm.buildtools:junit-platform-native - https://github.com/graalvm/native-build-tools/blob/master/common/junit-platform-native/LICENSE
+
+Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+The Universal Permissive License (UPL), Version 1.0
+
+Subject to the condition set forth below, permission is hereby granted to any
+person obtaining a copy of this software, associated documentation and/or
+data (collectively the "Software"), free of charge and under any and all
+copyright rights in the Software, and any and all patent rights owned or
+freely licensable by each licensor hereunder covering either (i) the
+unmodified Software as contributed to or provided by such licensor, or (ii)
+the Larger Works (as defined below), to deal in both
+
+(a) the Software, and
+
+(b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
+one is included with the Software each a "Larger Work" to which the Software
+is contributed by such licensors),
+
+without restriction, including without limitation the rights to copy, create
+derivative works of, display, perform, and distribute the Software and make,
+use, sell, offer for sale, import, export, have made, and have sold the
+Software and the Larger Work(s), and to sublicense the foregoing rights on
+either these or other terms.
+
+This license is subject to the following condition:
+
+The above copyright notice and either this complete permission notice or at a
+minimum a reference to the UPL must be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+-------------------------------------------------------------------------------------------------
+
+License notice for OpenTelemetry Java instrumentation
+https://github.com/open-telemetry/opentelemetry-java-instrumentation/
+------------------------------------------------------------------------------
+
+Copyright [2023] [OpenTelemetry]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-------------------------------------------------------------------------------
+
+License notice for Public Suffix List
+-------------------------------------------------------------------------------
+
+This project includes the file `effective_tld_names.dat`, which is part of the Public Suffix List maintained by Mozilla
+and used under the Mozilla Public License 2.0. See https://publicsuffix.org/ for more information.
+
+This file is licensed under the Mozilla Public License, Version 2.0 (the "License"); you may not use this file except in
+compliance with the License. You may obtain a copy of the License at:
+
+ https://www.mozilla.org/MPL/2.0/
+
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
+language governing permissions and limitations under the License.
+
+-------------------------------------------------------------------------------
+
+License notice for Brotli
+------------------------------------------------------------------------------
+
+Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/linkis-dist/release-docs/licenses/LICENSE-azure.txt b/linkis-dist/release-docs/licenses/LICENSE-azure.txt
new file mode 100644
index 00000000000..49d21669aee
--- /dev/null
+++ b/linkis-dist/release-docs/licenses/LICENSE-azure.txt
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/linkis-dist/release-docs/licenses/LICENSE-content-type.txt b/linkis-dist/release-docs/licenses/LICENSE-content-type.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/linkis-dist/release-docs/licenses/LICENSE-content-type.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/linkis-dist/release-docs/licenses/LICENSE-lang-tag.txt b/linkis-dist/release-docs/licenses/LICENSE-lang-tag.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/linkis-dist/release-docs/licenses/LICENSE-lang-tag.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/linkis-dist/release-docs/licenses/LICENSE-msal4j.txt b/linkis-dist/release-docs/licenses/LICENSE-msal4j.txt
new file mode 100644
index 00000000000..49d21669aee
--- /dev/null
+++ b/linkis-dist/release-docs/licenses/LICENSE-msal4j.txt
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/linkis-dist/release-docs/licenses/LICENSE-oauth2-oidc-sdk.txt b/linkis-dist/release-docs/licenses/LICENSE-oauth2-oidc-sdk.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/linkis-dist/release-docs/licenses/LICENSE-oauth2-oidc-sdk.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/linkis-engineconn-plugins/hive/src/main/assembly/distribution.xml b/linkis-engineconn-plugins/hive/src/main/assembly/distribution.xml
index d7ed2759054..aee427c61b5 100644
--- a/linkis-engineconn-plugins/hive/src/main/assembly/distribution.xml
+++ b/linkis-engineconn-plugins/hive/src/main/assembly/distribution.xml
@@ -66,6 +66,9 @@
com.google.guava:guava:jar
com.google.protobuf:protobuf-java:jar
com.ning:async-http-client:jar
+ com.sun.jersey:jersey-json:jar
+ com.sun.jersey:jersey-server:jar
+ com.sun.jersey:jersey-servlet:jar
com.sun.xml.bind:jaxb-impl:jar
commons-beanutils:commons-beanutils:jar
commons-beanutils:commons-beanutils-core:jar
@@ -104,6 +107,7 @@
javax.xml.bind:jaxb-api:jar
javax.xml.stream:stax-api:jar
mysql:mysql-connector-java:jar
+ org.antlr:antlr-runtime:jar
org.antlr:stringtemplate:jar
org.apache.commons:commons-compress:jar
org.apache.commons:commons-math:jar
diff --git a/linkis-engineconn-plugins/hive/src/main/resources/log4j2.xml b/linkis-engineconn-plugins/hive/src/main/resources/log4j2.xml
index de932ee75f0..52ba651cf69 100644
--- a/linkis-engineconn-plugins/hive/src/main/resources/log4j2.xml
+++ b/linkis-engineconn-plugins/hive/src/main/resources/log4j2.xml
@@ -31,7 +31,10 @@
-
+
+
+
+
@@ -107,5 +110,11 @@
+
+
+
+
+
+
diff --git a/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/conf/HiveEngineConfiguration.scala b/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/conf/HiveEngineConfiguration.scala
index 4de8f02f52b..c4a8204e846 100644
--- a/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/conf/HiveEngineConfiguration.scala
+++ b/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/conf/HiveEngineConfiguration.scala
@@ -46,4 +46,16 @@ object HiveEngineConfiguration {
val HIVE_RANGER_ENABLE = CommonVars[Boolean]("linkis.hive.ranger.enabled", false).getValue
+ val HIVE_ENGINE_CONN_JAVA_EXTRA_OPTS = CommonVars(
+ "wds.linkis.hive.engineConn.java.extraOpts",
+ "-Djava.library.path=/appcom/Install/hadoop/lib/native",
+ "Specify the option parameter of the java process (please modify it carefully!!!)"
+ )
+
+ val HIVE_QUEUE_NAME: String = "mapreduce.job.queuename"
+
+ val BDP_QUEUE_NAME: String = "wds.linkis.rm.yarnqueue"
+
+ val HIVE_TEZ_QUEUE_NAME: String = "tez.queue.name"
+
}
diff --git a/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/executor/HiveEngineConnExecutor.scala b/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/executor/HiveEngineConnExecutor.scala
index 03036d30b7b..793a65b1eb8 100644
--- a/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/executor/HiveEngineConnExecutor.scala
+++ b/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/executor/HiveEngineConnExecutor.scala
@@ -19,6 +19,8 @@ package org.apache.linkis.engineplugin.hive.executor
import org.apache.linkis.common.exception.ErrorException
import org.apache.linkis.common.utils.{ByteTimeUtils, Logging, Utils}
+import org.apache.linkis.engineconn.common.conf.EngineConnConf
+import org.apache.linkis.engineconn.computation.executor.entity.EngineConnTask
import org.apache.linkis.engineconn.computation.executor.execute.{
ComputationExecutor,
EngineExecutionContext
@@ -127,6 +129,8 @@ class HiveEngineConnExecutor(
private var readResByObject = false
+ private var hiveTmpConf = Map[String, String]()
+
override def init(): Unit = {
LOG.info(s"Ready to change engine state!")
if (HadoopConf.KEYTAB_PROXYUSER_ENABLED.getValue) {
@@ -140,7 +144,7 @@ class HiveEngineConnExecutor(
engineExecutorContext: EngineExecutionContext,
code: String
): ExecuteResponse = {
- readResByObject = MapUtils.getBooleanValue(
+ readResByObject = MapUtils.getBoolean(
engineExecutorContext.getProperties,
JobRequestConstants.LINKIS_HIVE_EC_READ_RESULT_BY_OBJECT,
false
@@ -664,6 +668,49 @@ class HiveEngineConnExecutor(
override def getId(): String = namePrefix + id
+ override protected def beforeExecute(engineConnTask: EngineConnTask): Unit = {
+ super.beforeExecute(engineConnTask)
+ if (EngineConnConf.ENGINE_CONF_REVENT_SWITCH.getValue && hiveTmpConf.isEmpty) {
+ hiveTmpConf = sessionState.getConf.getAllProperties.asScala.toMap
+ }
+ }
+
+ override protected def afterExecute(
+ engineConnTask: EngineConnTask,
+ executeResponse: ExecuteResponse
+ ): Unit = {
+ try {
+ if (EngineConnConf.ENGINE_CONF_REVENT_SWITCH.getValue && hiveTmpConf.nonEmpty) {
+ val sessionConf = sessionState.getConf
+ if (sessionConf != null) {
+ val currentProps = Option(sessionConf.getAllProperties)
+ .map(_.asScala.toMap)
+ .getOrElse(Map.empty)
+ hiveTmpConf.foreach { case (key, value) =>
+ currentProps.get(key).filter(_ != value).foreach { userValue =>
+ logger.info(s"Resetting configuration key: $key,value: $value cover $userValue")
+ sessionConf.set(key, value)
+ }
+ }
+ // 清理多出来的配置
+ currentProps.keys.foreach { key =>
+ if (!hiveTmpConf.contains(key)) {
+ logger.info(s"Clearing extra configuration key: $key")
+ sessionConf.set(key, "")
+ }
+ }
+ } else {
+ logger.warn("Session configuration is null, cannot reset hive configurations")
+ }
+ }
+ } catch {
+ case e: Exception =>
+ logger.error("Error occurred while resetting hive configurations", e)
+ } finally {
+ super.afterExecute(engineConnTask, executeResponse)
+ }
+ }
+
}
class HiveDriverProxy(driver: Any) extends Logging {
diff --git a/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/launch/HiveProcessEngineConnLaunchBuilder.scala b/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/launch/HiveProcessEngineConnLaunchBuilder.scala
index 3aa760f8242..b2dfb5d9a8a 100644
--- a/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/launch/HiveProcessEngineConnLaunchBuilder.scala
+++ b/linkis-engineconn-plugins/hive/src/main/scala/org/apache/linkis/engineplugin/hive/launch/HiveProcessEngineConnLaunchBuilder.scala
@@ -17,9 +17,12 @@
package org.apache.linkis.engineplugin.hive.launch
+import org.apache.linkis.engineplugin.hive.conf.HiveEngineConfiguration.HIVE_ENGINE_CONN_JAVA_EXTRA_OPTS
import org.apache.linkis.manager.engineplugin.common.launch.entity.EngineConnBuildRequest
import org.apache.linkis.manager.engineplugin.common.launch.process.JavaProcessEngineConnLaunchBuilder
+import org.apache.commons.lang3.StringUtils
+
import java.util
import com.google.common.collect.Lists
@@ -34,4 +37,13 @@ class HiveProcessEngineConnLaunchBuilder extends JavaProcessEngineConnLaunchBuil
Lists.newArrayList("JarUDFLoadECMHook")
}
+ override protected def getExtractJavaOpts: String = {
+ val hiveExtraOpts: String = HIVE_ENGINE_CONN_JAVA_EXTRA_OPTS.getValue
+ if (StringUtils.isNotBlank(hiveExtraOpts)) {
+ super.getExtractJavaOpts + " " + hiveExtraOpts
+ } else {
+ super.getExtractJavaOpts
+ }
+ }
+
}
diff --git a/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/executor/IoEngineConnExecutor.scala b/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/executor/IoEngineConnExecutor.scala
index e8feb0d3546..a9ec71ba004 100644
--- a/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/executor/IoEngineConnExecutor.scala
+++ b/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/executor/IoEngineConnExecutor.scala
@@ -60,6 +60,8 @@ import java.util.concurrent.atomic.AtomicLong
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
+import com.google.gson.internal.LinkedTreeMap
+
class IoEngineConnExecutor(val id: Int, val outputLimit: Int = 10)
extends ConcurrentComputationExecutor(outputLimit)
with Logging {
@@ -322,7 +324,8 @@ class IoEngineConnExecutor(val id: Int, val outputLimit: Int = 10)
s"Creator ${methodEntity.creatorUser} for user ${methodEntity.proxyUser} init fs $methodEntity"
)
var fsId = methodEntity.id
- val properties = methodEntity.params(0).asInstanceOf[Map[String, String]]
+ val properties =
+ methodEntity.params(0).asInstanceOf[LinkedTreeMap[String, String]].asScala.toMap
val proxyUser = methodEntity.proxyUser
if (!fsProxyService.canProxyUser(methodEntity.creatorUser, proxyUser, methodEntity.fsType)) {
throw new StorageErrorException(
diff --git a/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/utils/IOHelp.scala b/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/utils/IOHelp.scala
index b6f35a2e8e2..8734dd7191f 100644
--- a/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/utils/IOHelp.scala
+++ b/linkis-engineconn-plugins/io_file/src/main/scala/org/apache/linkis/manager/engineplugin/io/utils/IOHelp.scala
@@ -62,16 +62,16 @@ object IOHelp {
writer.toString()
} else if (method.params.length == 3) {
val position =
- if (method.params(1).toString.toInt < 0) {
+ if (method.params(1).toString.toDouble.toInt < 0) {
0
} else {
- method.params(1).toString.toInt
+ method.params(1).toString.toDouble.toInt
}
val fetchSize =
- if (method.params(2).toString.toInt > maxPageSize) {
+ if (method.params(2).toString.toDouble.toInt > maxPageSize) {
maxPageSize.toInt
} else {
- method.params(2).toString.toInt
+ method.params(2).toString.toDouble.toInt
}
if (position > 0) {
inputStream.skip(position)
diff --git a/linkis-engineconn-plugins/jdbc/pom.xml b/linkis-engineconn-plugins/jdbc/pom.xml
index ab06661b9db..8136e290802 100644
--- a/linkis-engineconn-plugins/jdbc/pom.xml
+++ b/linkis-engineconn-plugins/jdbc/pom.xml
@@ -70,12 +70,6 @@
linkis-common
${project.version}
provided
-
-
- commons-dbcp
- commons-dbcp
-
-
org.apache.hive
diff --git a/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/ConnectionManager.java b/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/ConnectionManager.java
index a5679f1cf59..b3a9867d387 100644
--- a/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/ConnectionManager.java
+++ b/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/ConnectionManager.java
@@ -17,6 +17,7 @@
package org.apache.linkis.manager.engineplugin.jdbc;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.common.utils.SecurityUtils;
import org.apache.linkis.hadoop.common.utils.KerberosUtils;
import org.apache.linkis.manager.engineplugin.jdbc.conf.JDBCConfiguration$;
@@ -200,6 +201,10 @@ protected DataSource buildDataSource(String dbUrl, Map propertie
LOG.info("Database connection address information(数据库连接地址信息)=" + dbUrl);
datasource.setUrl(dbUrl);
datasource.setUsername(username);
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ // decrypt
+ password = AESUtils.decrypt(password, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ }
datasource.setPassword(password);
datasource.setConnectProperties(SecurityUtils.getMysqlSecurityParams());
datasource.setDriverClassName(driverClassName);
diff --git a/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/constant/JDBCEngineConnConstant.java b/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/constant/JDBCEngineConnConstant.java
index 16f6a7e324c..50e1c0e6530 100644
--- a/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/constant/JDBCEngineConnConstant.java
+++ b/linkis-engineconn-plugins/jdbc/src/main/java/org/apache/linkis/manager/engineplugin/jdbc/constant/JDBCEngineConnConstant.java
@@ -46,6 +46,12 @@ private JDBCEngineConnConstant() {}
public static final String JDBC_ENGINE_RUN_TIME_DS_SYSTEM_QUERY_PARAM =
"wds.linkis.engine.runtime.datasource.systemQueryParam";
+ public static final String JDBC_ENGINE_RUN_TIME_DS_TYPE = "linkis.datasource.type";
+ public static final String JDBC_ENGINE_RUN_TIME_DS_PARAM_HOST = "linkis.datasource.params.host";
+ public static final String JDBC_ENGINE_RUN_TIME_DS_PARAM_PORT = "linkis.datasource.params.port";
+ public static final String JDBC_ENGINE_RUN_TIME_DS_PARAM_USERNAME =
+ "linkis.datasource.params.username";
+
public static final String JDBC_POOL_TEST_ON_BORROW = "wds.linkis.jdbc.pool.testOnBorrow";
public static final String JDBC_POOL_TEST_ON_RETURN = "wds.linkis.jdbc.pool.testOnReturn";
public static final String JDBC_POOL_TEST_WHILE_IDLE = "wds.linkis.jdbc.pool.testWhileIdle";
@@ -63,8 +69,9 @@ private JDBCEngineConnConstant() {}
"wds.linkis.jdbc.pool.poolPreparedStatements";
public static final String JDBC_POOL_REMOVE_ABANDONED_ENABLED =
"wds.linkis.jdbc.pool.remove.abandoned.enabled";
- public static final String JDBC_POOL_REMOVE_ABANDONED_TIMEOUT =
- "wds.linkis.jdbc.pool.remove.abandoned.timeout";
+ public static final String JDBC_POOL_REMOVE_ABANDONED_LOG_ENABLED =
+ "wds.linkis.jdbc.pool.remove.abandoned.log.enabled";
+ public static final String JDBC_POOL_REMOVE_ABANDONED_TIMEOUT = "linkisJDBCPoolAbandonedTimeout";
public static final String DS_JDBC_HOST = "host";
public static final String DS_JDBC_PORT = "port";
diff --git a/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/conf/JDBCConfiguration.scala b/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/conf/JDBCConfiguration.scala
index 819110e229c..2b70663ea82 100644
--- a/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/conf/JDBCConfiguration.scala
+++ b/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/conf/JDBCConfiguration.scala
@@ -32,9 +32,15 @@ object JDBCConfiguration {
val JDBC_KERBEROS_ENABLE = CommonVars[Boolean]("wds.linkis.keytab.enable", false)
- val CHANGE_DS_TYPE_TO_MYSQL = CommonVars[Boolean]("linkis.change.ds.type.to.mysql", true).getValue
+ val CHANGE_DS_TYPE_TO_MYSQL =
+ CommonVars[String]("linkis.change.ds.type.to.mysql", "starrocks").getValue
+
+ val NOT_SUPPORT_LIMIT_DBS = CommonVars[String]("linkis.not.support.limit.dbs", "oracle").getValue
val DS_TYPES_TO_EXECUTE_TASK_BY_JDBC =
CommonVars[String]("linkis.can.execute.task.ds.types.by.jdbc", "starrocks").getValue
+ val SUPPORT_CONN_PARAM_EXECUTE_ENABLE: Boolean =
+ CommonVars[Boolean]("linkis.support.conn.param.execute.enable", true).getValue
+
}
diff --git a/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCEngineConnExecutor.scala b/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCEngineConnExecutor.scala
index 2a6e8dd4b2a..fba3a02d276 100644
--- a/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCEngineConnExecutor.scala
+++ b/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCEngineConnExecutor.scala
@@ -35,10 +35,13 @@ import org.apache.linkis.manager.common.entity.resource.{
LoadResource,
NodeResource
}
-import org.apache.linkis.manager.engineplugin.common.conf.EngineConnPluginConf
import org.apache.linkis.manager.engineplugin.common.util.NodeResourceUtils
import org.apache.linkis.manager.engineplugin.jdbc.ConnectionManager
import org.apache.linkis.manager.engineplugin.jdbc.conf.JDBCConfiguration
+import org.apache.linkis.manager.engineplugin.jdbc.conf.JDBCConfiguration.{
+ NOT_SUPPORT_LIMIT_DBS,
+ SUPPORT_CONN_PARAM_EXECUTE_ENABLE
+}
import org.apache.linkis.manager.engineplugin.jdbc.constant.JDBCEngineConnConstant
import org.apache.linkis.manager.engineplugin.jdbc.errorcode.JDBCErrorCodeSummary.JDBC_GET_DATASOURCEINFO_ERROR
import org.apache.linkis.manager.engineplugin.jdbc.exception.{
@@ -49,6 +52,7 @@ import org.apache.linkis.manager.engineplugin.jdbc.monitor.ProgressMonitor
import org.apache.linkis.manager.label.entity.Label
import org.apache.linkis.manager.label.entity.engine.{EngineTypeLabel, UserCreatorLabel}
import org.apache.linkis.protocol.CacheableProtocol
+import org.apache.linkis.protocol.constants.TaskConstant
import org.apache.linkis.protocol.engine.JobProgressInfo
import org.apache.linkis.rpc.{RPCMapCache, Sender}
import org.apache.linkis.scheduler.executer.{
@@ -61,14 +65,14 @@ import org.apache.linkis.storage.domain.{Column, DataType}
import org.apache.linkis.storage.resultset.ResultSetFactory
import org.apache.linkis.storage.resultset.table.{TableMetaData, TableRecord}
+import org.apache.commons.collections.MapUtils
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.StringUtils
import org.springframework.util.CollectionUtils
-import java.sql.{Connection, ResultSet, SQLException, Statement}
+import java.sql.{Connection, ResultSet, Statement}
import java.util
-import java.util.Collections
import java.util.concurrent.ConcurrentHashMap
import scala.collection.mutable.ArrayBuffer
@@ -130,6 +134,8 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
if (StringUtils.isNotBlank(dataSourceName)) {
dataSourceIdentifier = s"$dataSourceName-$dataSourceMaxVersionId"
}
+ // deal with url param for ds conn
+ parseJdbcUrl(jdbcUrl, properties)
val connection = connectionManager.getConnection(dataSourceIdentifier, properties)
if (StringUtils.isNotBlank(taskId)) {
connectionCache.put(taskId, connection)
@@ -137,6 +143,31 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
connection
}
+ def parseJdbcUrl(jdbcUrl: String, parameters: util.Map[String, String]): Unit = {
+ if (StringUtils.isEmpty(jdbcUrl)) {
+ return None
+ }
+ val queryIndex = jdbcUrl.indexOf('?')
+ if (queryIndex != -1) {
+ val query = jdbcUrl.substring(queryIndex + 1)
+ val pairs = query.split("&")
+
+ pairs.foreach { pair =>
+ try {
+ val keyValue = pair.split("=", 2)
+ if (keyValue.length == 2) {
+ val key = keyValue(0)
+ val value = keyValue(1)
+ parameters.put(key, value)
+ }
+ } catch {
+ case e: Exception =>
+ logger.info(s"wrong link parameters: ${pair}")
+ }
+ }
+ }
+ }
+
override def executeLine(
engineExecutorContext: EngineExecutionContext,
code: String
@@ -153,7 +184,6 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
statement.setQueryTimeout(JDBCConfiguration.JDBC_QUERY_TIMEOUT.getValue)
}
statement.setFetchSize(outputPrintLimit)
- statement.setMaxRows(outputPrintLimit)
val monitor = ProgressMonitor.attachMonitor(statement)
if (monitor != null) {
@@ -166,7 +196,18 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
}
logger.info(s"create statement is: $statement")
connectionManager.saveStatement(taskId, statement)
- val isResultSetAvailable = statement.execute(code)
+ val properties: util.Map[String, String] = getJDBCRuntimeParams(engineExecutorContext)
+ val jdbcUrl: String = properties.get(JDBCEngineConnConstant.JDBC_URL)
+ var newCode = code
+ val dbs: Array[String] = NOT_SUPPORT_LIMIT_DBS.split(",")
+ if (StringUtils.isNotBlank(jdbcUrl) && dbs.length > 0) {
+ dbs.foreach(dbName => {
+ if (jdbcUrl.toLowerCase().contains(dbName.toLowerCase())) {
+ newCode = code.replaceAll("(?i)limit[^;]*;?$", "").trim
+ }
+ })
+ }
+ val isResultSetAvailable = statement.execute(newCode)
logger.info(s"Is ResultSet available ? : $isResultSetAvailable")
if (monitor != null) {
/* refresh progress */
@@ -251,6 +292,43 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
if (StringUtils.isBlank(dataSourceName)) {
dataSourceName = JDBCEngineConnConstant.JDBC_DEFAULT_DATASOURCE_TAG
}
+ if (MapUtils.isEmpty(dataSourceInfo) && SUPPORT_CONN_PARAM_EXECUTE_ENABLE) {
+ val connHost: String =
+ executorProperties.getOrDefault(
+ JDBCEngineConnConstant.JDBC_ENGINE_RUN_TIME_DS_PARAM_HOST,
+ ""
+ )
+ val connPort: String =
+ executorProperties.getOrDefault(
+ JDBCEngineConnConstant.JDBC_ENGINE_RUN_TIME_DS_PARAM_PORT,
+ ""
+ )
+ val connDsType: String =
+ executorProperties.getOrDefault(JDBCEngineConnConstant.JDBC_ENGINE_RUN_TIME_DS_TYPE, "")
+ val submitUser: String = executorProperties.getOrDefault(TaskConstant.SUBMIT_USER, "")
+ val executeUser: String =
+ executorProperties.getOrDefault(TaskConstant.EXECUTE_USER, submitUser)
+
+ if (
+ StringUtils.isNotBlank(connHost) && StringUtils
+ .isNotBlank(connPort) && StringUtils
+ .isNotBlank(connDsType) && StringUtils.isNotBlank(executeUser)
+ ) {
+ logger.info(
+ s"use conn ip and port get dataSourceInfo: executeUser:${execSqlUser} ip:${connHost}, " +
+ s"port:${connPort}, dsType:${connDsType}, " +
+ s"createUser:${submitUser} connUser: ${executeUser}"
+ )
+ dataSourceInfo = JDBCMultiDatasourceParser.queryDatasourceInfoByConnParams(
+ executeUser,
+ executeUser,
+ connHost,
+ connPort,
+ connDsType
+ )
+ }
+ }
+
// runtime jdbc params > jdbc datasource info > jdbc engine global config
if (dataSourceInfo != null && !dataSourceInfo.isEmpty) {
globalConfig.putAll(dataSourceInfo)
@@ -290,9 +368,8 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
val resultSetWriter =
engineExecutorContext.createResultSetWriter(ResultSetFactory.TABLE_TYPE)
resultSetWriter.addMetaData(metaData)
- var count = 0
Utils.tryCatch({
- while (count < outputPrintLimit && resultSet.next()) {
+ while (resultSet.next()) {
val r: Array[Any] = columns.indices.map { i =>
val data = resultSet.getObject(i + 1) match {
case value: Array[Byte] =>
@@ -303,7 +380,6 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
data
}.toArray
resultSetWriter.addRecord(new TableRecord(r.asInstanceOf[Array[Any]]))
- count += 1
}
}) { case e: Exception =>
return ErrorExecuteResponse("query jdbc failed", e)
@@ -351,10 +427,12 @@ class JDBCEngineConnExecutor(override val outputPrintLimit: Int, val id: Int)
override def close(): Unit = {
logger.info("Start closing the jdbc engine.")
+ Utils.tryQuietly(killAll())
connectionManager.close()
if (JDBCConfiguration.JDBC_KERBEROS_ENABLE.getValue) {
connectionManager.shutdownRefreshKerberosLoginService()
}
+
logger.info("The jdbc engine has closed successfully.")
}
diff --git a/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCMultiDatasourceParser.scala b/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCMultiDatasourceParser.scala
index 8cb52640bea..8a12c5788e6 100644
--- a/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCMultiDatasourceParser.scala
+++ b/linkis-engineconn-plugins/jdbc/src/main/scala/org/apache/linkis/manager/engineplugin/jdbc/executor/JDBCMultiDatasourceParser.scala
@@ -19,7 +19,10 @@ package org.apache.linkis.manager.engineplugin.jdbc.executor
import org.apache.linkis.common.utils.{JsonUtils, Logging, Utils}
import org.apache.linkis.datasource.client.impl.LinkisDataSourceRemoteClient
-import org.apache.linkis.datasource.client.request.GetInfoPublishedByDataSourceNameAction
+import org.apache.linkis.datasource.client.request.{
+ GetInfoPublishedByDataSourceNameAction,
+ GetInfoPublishedByUserIpPortAction
+}
import org.apache.linkis.datasourcemanager.common.domain.DataSource
import org.apache.linkis.manager.engineplugin.jdbc.JdbcAuthType
import org.apache.linkis.manager.engineplugin.jdbc.conf.JDBCConfiguration.{
@@ -42,6 +45,10 @@ import scala.collection.JavaConverters._
object JDBCMultiDatasourceParser extends Logging {
+ private val MYSQL_SQL_CONNECT_URL = "jdbc:mysql://%s:%s/%s"
+ private val ORACLE_SQL_CONNECT_URL = "jdbc:oracle:thin:@%s:%s:%s"
+ private val POSTGRESQL_SQL_CONNECT_URL = "jdbc:postgresql://%s:%s/%s"
+
def queryDatasourceInfoByName(
datasourceName: String,
username: String,
@@ -126,7 +133,14 @@ object JDBCMultiDatasourceParser extends Logging {
)
}
- if (CHANGE_DS_TYPE_TO_MYSQL) {
+ // check dbType
+ if (!DS_TYPES_TO_EXECUTE_TASK_BY_JDBC.contains(dbType)) {
+ throw new JDBCGetDatasourceInfoException(
+ UNSUPPORTED_DS_TYPE.getErrorCode,
+ MessageFormat.format(UNSUPPORTED_DS_TYPE.getErrorDesc, dbType)
+ )
+ }
+ if (CHANGE_DS_TYPE_TO_MYSQL.contains(dbType)) {
dbType = "mysql"
}
@@ -154,9 +168,23 @@ object JDBCMultiDatasourceParser extends Logging {
}
var jdbcUrl = s"jdbc:$dbType://$host:$port"
val dbName = dbConnParams.get(JDBCEngineConnConstant.DS_JDBC_DB_NAME)
- if (strObjIsNotBlank(dbName)) {
- jdbcUrl = s"$jdbcUrl/$dbName"
+ dbType match {
+ case "oracle" =>
+ val instance: Object = dbConnParams.get("instance")
+ jdbcUrl = String.format(ORACLE_SQL_CONNECT_URL, host, port, instance)
+ case "postgresql" =>
+ var instance: Object = dbConnParams.get("instance")
+ if (strObjIsBlank(instance) && strObjIsNotBlank(dbName)) {
+ instance = dbName
+ }
+ jdbcUrl = String.format(POSTGRESQL_SQL_CONNECT_URL, host, port, instance)
+ case _ =>
+ jdbcUrl = s"jdbc:$dbType://$host:$port"
+ if (strObjIsNotBlank(dbName)) {
+ jdbcUrl = s"$jdbcUrl/$dbName"
+ }
}
+ logger.info(s"jdbc ${dbType} connection_url: $jdbcUrl")
val params = dbConnParams.get(JDBCEngineConnConstant.DS_JDBC_PARAMS)
val paramsMap =
@@ -264,4 +292,29 @@ object JDBCMultiDatasourceParser extends Logging {
!strObjIsNotBlank(str)
}
+ def queryDatasourceInfoByConnParams(
+ createUser: String,
+ proxyUser: String,
+ ip: String,
+ port: String,
+ datasourceTypeName: String
+ ): util.Map[String, String] = {
+ val dataSourceClient = new LinkisDataSourceRemoteClient()
+ val action: GetInfoPublishedByUserIpPortAction = GetInfoPublishedByUserIpPortAction.builder
+ .setDatasourceTypeName(datasourceTypeName)
+ .setUser(createUser)
+ .setDatasourceUser(proxyUser)
+ .setIp(ip)
+ .setPort(port)
+ .build // ignore parameter 'system'
+
+ val dataSource: DataSource = dataSourceClient.getInfoPublishedByIpPort(action).getDataSource
+ if (dataSource != null) {
+ queryDatasourceInfo(dataSource.getDataSourceName, dataSource)
+ } else {
+ null
+ }
+
+ }
+
}
diff --git a/linkis-engineconn-plugins/nebula/src/main/java/org/apache/linkis/engineplugin/nebula/executor/NebulaEngineConnExecutor.java b/linkis-engineconn-plugins/nebula/src/main/java/org/apache/linkis/engineplugin/nebula/executor/NebulaEngineConnExecutor.java
index a853313ae09..3b1a931b469 100644
--- a/linkis-engineconn-plugins/nebula/src/main/java/org/apache/linkis/engineplugin/nebula/executor/NebulaEngineConnExecutor.java
+++ b/linkis-engineconn-plugins/nebula/src/main/java/org/apache/linkis/engineplugin/nebula/executor/NebulaEngineConnExecutor.java
@@ -20,6 +20,7 @@
import org.apache.linkis.common.exception.ErrorException;
import org.apache.linkis.common.io.resultset.ResultSetWriter;
import org.apache.linkis.common.log.LogUtils;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.common.utils.OverloadUtils;
import org.apache.linkis.engineconn.common.conf.EngineConnConf;
import org.apache.linkis.engineconn.common.conf.EngineConnConstant;
@@ -280,7 +281,8 @@ private Session getSession(String taskId, NebulaPool nebulaPool) {
} else {
Session session = null;
String username = NebulaConfiguration.NEBULA_USER_NAME.getValue(configMap);
- String password = NebulaConfiguration.NEBULA_PASSWORD.getValue(configMap);
+ String password =
+ AESUtils.isDecryptByConf(NebulaConfiguration.NEBULA_PASSWORD.getValue(configMap));
Boolean reconnect = NebulaConfiguration.NEBULA_RECONNECT_ENABLED.getValue(configMap);
String space = NebulaConfiguration.NEBULA_SPACE.getValue(configMap);
try {
diff --git a/linkis-engineconn-plugins/shell/src/main/scala/org/apache/linkis/manager/engineplugin/shell/configuration/AccessibleExecutorConfiguration.scala b/linkis-engineconn-plugins/shell/src/main/scala/org/apache/linkis/manager/engineplugin/shell/configuration/AccessibleExecutorConfiguration.scala
new file mode 100644
index 00000000000..dd861f2c8d5
--- /dev/null
+++ b/linkis-engineconn-plugins/shell/src/main/scala/org/apache/linkis/manager/engineplugin/shell/configuration/AccessibleExecutorConfiguration.scala
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.manager.engineplugin.shell.configuration
+
+import org.apache.linkis.common.utils.Logging
+import org.apache.linkis.engineconn.acessible.executor.info.{
+ DefaultNodeOverLoadInfoManager,
+ NodeOverLoadInfoManager
+}
+import org.apache.linkis.engineconn.acessible.executor.service.{
+ EngineConnConcurrentLockService,
+ EngineConnTimedLockService,
+ LockService
+}
+import org.apache.linkis.engineconn.executor.listener.ExecutorListenerBusContext
+
+import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean
+import org.springframework.context.annotation.{Bean, Configuration}
+
+@Configuration
+class AccessibleExecutorConfiguration extends Logging {
+
+ @Bean(Array("lockService"))
+ @ConditionalOnMissingBean
+ def createLockManager(): LockService = {
+ val lockService = new EngineConnConcurrentLockService
+ logger.info("use shell engine conn configuration to create concurrent lockService")
+ lockService
+ }
+
+}
diff --git a/linkis-engineconn-plugins/spark/pom.xml b/linkis-engineconn-plugins/spark/pom.xml
index 49a73d911ef..ff5b2c93303 100644
--- a/linkis-engineconn-plugins/spark/pom.xml
+++ b/linkis-engineconn-plugins/spark/pom.xml
@@ -121,6 +121,7 @@
com.google.guava
guava
+ ${guava.version}
provided
@@ -146,6 +147,11 @@
provided
+
+ com.fasterxml.jackson.core
+ jackson-databind
+
+
org.apache.linkis
linkis-common
@@ -342,6 +348,10 @@
org.codehaus.janino
janino
+
+ org.codehaus.jackson
+ jackson-mapper-asl
+
@@ -421,8 +431,26 @@
io.fabric8
kubernetes-model-core
+
+ org.yaml
+ snakeyaml
+
+
+ com.squareup.okio
+ okio
+
+
+ org.yaml
+ snakeyaml
+ ${snakeyaml.version}
+
+
+ com.squareup.okio
+ okio
+ ${okio.version}
+
io.fabric8
kubernetes-model-common
@@ -513,4 +541,109 @@
+
+
+ spark-3
+
+ 1.11.0-wds-spark3
+ 3.7.0-M11
+ 3.4.4
+ 2.12.17
+ 2.12
+ 2.14.2
+
+
+
+
+ spark-2.4-hadoop-3.3
+
+ ${hadoop-hdfs-client-shade.version}
+
+
+
+ org.apache.linkis
+ linkis-hadoop-hdfs-client-shade
+ ${project.version}
+
+
+ commmons-logging
+ commons-logging
+
+
+ log4j
+ log4j
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ com.sun.jersey
+ jersey-core
+
+
+ com.sun.jersey
+ jersey-server
+
+
+ com.sun.jersey
+ jersey-json
+
+
+ javax.ws.rs
+ jsr311-api
+
+
+ net.java.dev.jets3t
+ jets3t
+
+
+ com.jcraft
+ jsch
+
+
+ com.google.code.findbugs
+ jsr305
+
+
+ xmlenc
+ xmlenc
+
+
+ net.java.dev.jets3t
+ jets3t
+
+
+ org.apache.avro
+ avro
+
+
+ com.jcraft
+ jsch
+
+
+ com.google.code.findbugs
+ jsr305
+
+
+ javax.servlet
+ servlet-api
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ org.eclipse.jetty
+ *
+
+
+
+
+
+
diff --git a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/FileUtils.java b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/FileUtils.java
index 2b311b91bda..57c375c804c 100644
--- a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/FileUtils.java
+++ b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/FileUtils.java
@@ -63,6 +63,8 @@ public final class FileUtils {
public static final String PACKAGE_SEPARATOR = ".";
+ public static final Random rnd = new Random();
+
// ------------------------------------------------------------------------
public static void writeCompletely(WritableByteChannel channel, ByteBuffer src)
@@ -91,7 +93,6 @@ public static Path[] listDirectory(Path directory) throws IOException {
* @return the generated random filename with the given prefix
*/
public static String getRandomFilename(final String prefix) {
- final Random rnd = new Random();
final StringBuilder stringBuilder = new StringBuilder(prefix);
for (int i = 0; i < RANDOM_FILE_NAME_LENGTH; i++) {
diff --git a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/KubernetesHelper.java b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/KubernetesHelper.java
index 709308d084a..fe58e7bfd24 100644
--- a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/KubernetesHelper.java
+++ b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/client/deployment/util/KubernetesHelper.java
@@ -60,7 +60,7 @@ public static KubernetesClient getKubernetesClient(
kubeConfigFile,
k8sMasterUrl);
- if (StringUtils.isBlank(kubeConfigFile) && StringUtils.isBlank(kubeConfigFile)) {
+ if (StringUtils.isBlank(kubeConfigFile)) {
throw new KubernetesClientException(
"Both kubeConfigFile and k8sMasterUrl are empty. Initializing KubernetesClient failed.");
}
diff --git a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/common/MultiTreeNode.java b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/common/MultiTreeNode.java
new file mode 100644
index 00000000000..00f7511b8b2
--- /dev/null
+++ b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/common/MultiTreeNode.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.engineplugin.spark.common;
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class MultiTreeNode {
+ int level;
+ LogicalPlan logicalPlan;
+ MultiTreeNode parent;
+ List children = new ArrayList<>();
+
+ public MultiTreeNode(LogicalPlan logicalPlan) {
+ this.logicalPlan = logicalPlan;
+ }
+
+ public int getLevel() {
+ return level;
+ }
+
+ public void setLevel(int level) {
+ this.level = level;
+ }
+
+ public LogicalPlan getLogicalPlan() {
+ return logicalPlan;
+ }
+
+ public void setLogicalPlan(LogicalPlan logicalPlan) {
+ this.logicalPlan = logicalPlan;
+ }
+
+ public MultiTreeNode getParent() {
+ return parent;
+ }
+
+ public void setParent(MultiTreeNode parent) {
+ this.parent = parent;
+ }
+
+ public List getChildren() {
+ return children;
+ }
+
+ public void setChildren(List children) {
+ this.children = children;
+ }
+}
diff --git a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/errorcode/SparkErrorCodeSummary.java b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/errorcode/SparkErrorCodeSummary.java
index 42f0b66e4d1..ca61d8776fe 100644
--- a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/errorcode/SparkErrorCodeSummary.java
+++ b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/errorcode/SparkErrorCodeSummary.java
@@ -66,6 +66,7 @@ public enum SparkErrorCodeSummary implements LinkisErrorCode {
43032, "The application start failed, since yarn applicationId is null."),
NOT_SUPPORT_METHOD(43040, "Not support method for requestExpectedResource."),
+ NOT_SUPPORT_FUNCTION(43050, "Not support spacial udf in non-SQL script.(特殊UDF不支持在非sql脚本中使用)"),
LINKIS_SPARK_YARN_CLUSTER_JARS_ERROR(
43042,
diff --git a/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/utils/PlanParseUtil.java b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/utils/PlanParseUtil.java
new file mode 100644
index 00000000000..f5224057969
--- /dev/null
+++ b/linkis-engineconn-plugins/spark/src/main/java/org/apache/linkis/engineplugin/spark/utils/PlanParseUtil.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.engineplugin.spark.utils;
+
+import org.apache.linkis.engineplugin.spark.common.MultiTreeNode;
+
+import org.apache.spark.sql.catalyst.analysis.UnresolvedAlias;
+import org.apache.spark.sql.catalyst.expressions.*;
+import org.apache.spark.sql.catalyst.plans.QueryPlan;
+import org.apache.spark.sql.catalyst.plans.logical.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import scala.collection.Seq;
+
+/** 解析Spark sql执行计划工具类 */
+public class PlanParseUtil {
+
+ /**
+ * 将执行计划转换为多叉树
+ *
+ * @param logicalPlan 逻辑计划
+ * @param level 多叉树层级
+ * @return 转换后的多叉树
+ */
+ private static MultiTreeNode convert(LogicalPlan logicalPlan, int level) {
+ if (logicalPlan == null) {
+ return null;
+ }
+ MultiTreeNode multiRoot = new MultiTreeNode(logicalPlan);
+ multiRoot.setLevel(level);
+ LogicalPlan sub = null;
+
+ if (logicalPlan instanceof Project) {
+ sub = ((Project) logicalPlan).child();
+ }
+ if (logicalPlan instanceof SubqueryAlias) {
+ sub = ((SubqueryAlias) logicalPlan).child();
+ }
+
+ if (logicalPlan instanceof Filter) {
+ sub = ((Filter) logicalPlan).child();
+ }
+
+ if (logicalPlan instanceof Aggregate) {
+ sub = ((Aggregate) logicalPlan).child();
+ }
+
+ if (sub == null) {
+ return multiRoot;
+ }
+ List children = new ArrayList<>();
+ children.add(sub);
+
+ Seq> seq = logicalPlan.innerChildren();
+ if (seq != null && seq.size() > 0) {
+ scala.collection.Iterator> it = seq.iterator();
+ while (it.hasNext()) {
+ children.add((LogicalPlan) it.next());
+ }
+ }
+
+ for (LogicalPlan childItem : children) {
+ sub = childItem;
+ if (sub instanceof Join) {
+ Join join = (Join) sub;
+ LogicalPlan right = join.right();
+ if (right != null) {
+ MultiTreeNode rightTree = convert(right, level + 1);
+ rightTree.setParent(multiRoot);
+ multiRoot.getChildren().add(rightTree);
+ }
+ LogicalPlan left = join.left();
+ while (left != null) {
+ // 处理left的right为project,subQuery的情况
+ MultiTreeNode childNode = new MultiTreeNode(left);
+ childNode.setParent(multiRoot);
+ childNode.setLevel(level + 1);
+ multiRoot.getChildren().add(childNode);
+ if (left instanceof Join) {
+ Join leftJoin = (Join) left;
+ left = leftJoin.left();
+ LogicalPlan subRight = leftJoin.right();
+ if (subRight != null
+ && (subRight instanceof Project || subRight instanceof SubqueryAlias)) {
+ MultiTreeNode subRightNode = convert(subRight, level + 2);
+ subRightNode.setParent(childNode);
+ childNode.getChildren().add(subRightNode);
+ }
+ } else if (left instanceof SubqueryAlias) {
+ MultiTreeNode subNode = convert(((SubqueryAlias) left).child(), level + 2);
+ subNode.setParent(childNode);
+ childNode.getChildren().add(subNode);
+ left = null;
+ } else {
+ left = null;
+ }
+ }
+ }
+
+ // 处理子查询中有limit的场景
+ if (sub instanceof GlobalLimit) {
+ GlobalLimit gl = (GlobalLimit) sub;
+ sub = gl.child();
+ if (sub instanceof LocalLimit) {
+ LocalLimit ll = (LocalLimit) sub;
+ sub = ll.child();
+ }
+ }
+
+ if (sub instanceof Project
+ || sub instanceof SubqueryAlias
+ || sub instanceof Aggregate
+ || sub instanceof Filter) {
+ MultiTreeNode childNode = convert(sub, level + 1);
+ childNode.setParent(multiRoot);
+ multiRoot.getChildren().add(childNode);
+ }
+ }
+ return multiRoot;
+ }
+
+ /**
+ * 检测执行计划查询字段中是否使用了给定udf中的一个
+ *
+ * @param logicalPlan 逻辑计划
+ * @param udfNames 待检查的udf函数名
+ * @return 检查结果,包含一个则为true
+ */
+ public static boolean checkUdf(LogicalPlan logicalPlan, String[] udfNames) {
+ if (udfNames == null || udfNames.length == 0) {
+ return false;
+ }
+
+ // 处理 limit
+ if (logicalPlan instanceof GlobalLimit) {
+ GlobalLimit gl = (GlobalLimit) logicalPlan;
+ logicalPlan = gl.child();
+ if (logicalPlan instanceof LocalLimit) {
+ LocalLimit ll = (LocalLimit) logicalPlan;
+ logicalPlan = ll.child();
+ }
+ }
+ // 处理 order by
+ if (logicalPlan instanceof Sort) {
+ Sort sort = (Sort) logicalPlan;
+ logicalPlan = sort.child();
+ }
+
+ MultiTreeNode root = convert(logicalPlan, 0);
+ for (String udfName : udfNames) {
+ if (containsUdf(root, udfName)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * 检测执行计划中是否使用给定的udf
+ *
+ * @param multiTreeNode 逻辑计划转换后的多叉树
+ * @param udfName 待检查udf名
+ * @return 检查结果
+ */
+ public static boolean containsUdf(MultiTreeNode multiTreeNode, String udfName) {
+ if (multiTreeNode == null) {
+ return false;
+ }
+ LogicalPlan logicalPlan = multiTreeNode.getLogicalPlan();
+ if (logicalPlan == null) {
+ return false;
+ }
+
+ if (logicalPlan instanceof Filter) {
+ Filter filter = (Filter) logicalPlan;
+ logicalPlan = filter.child();
+ }
+
+ // SubqueryAlias Filter Aggregate
+ Seq seq = null;
+ if (logicalPlan instanceof Aggregate) {
+ seq = ((Aggregate) logicalPlan).aggregateExpressions();
+ }
+ if (logicalPlan instanceof Project) {
+ seq = ((Project) logicalPlan).projectList();
+ }
+
+ if (seq != null && !seq.isEmpty()) {
+ scala.collection.Iterator it = seq.iterator();
+ while (it.hasNext()) {
+ NamedExpression next = it.next();
+ if (next instanceof Alias) {
+ Alias alias = (Alias) next;
+ if (alias.name().contains(udfName)) {
+ return true;
+ }
+ Expression child = alias.child();
+ if (child instanceof ScalaUDF) {
+ ScalaUDF su = (ScalaUDF) child;
+ String useUdfName = su.udfName().get();
+ if (udfName.equals(useUdfName)) {
+ return true;
+ }
+ }
+ if (child instanceof PythonUDF) {
+ PythonUDF pu = (PythonUDF) child;
+ String useUdfName = pu.name();
+ if (udfName.equals(useUdfName)) {
+ return true;
+ }
+ }
+ }
+ if (next instanceof UnresolvedAlias) {
+ UnresolvedAlias alias = (UnresolvedAlias) next;
+ Expression child = alias.child();
+ if (child instanceof ScalaUDF) {
+ ScalaUDF su = (ScalaUDF) child;
+ String useUdfName = su.udfName().get();
+ if (udfName.equals(useUdfName)) {
+ return true;
+ }
+ }
+ if (child instanceof PythonUDF) {
+ PythonUDF pu = (PythonUDF) child;
+ String useUdfName = pu.name();
+ if (udfName.equals(useUdfName)) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ if (multiTreeNode.getChildren() == null) {
+ return false;
+ }
+ for (MultiTreeNode node : multiTreeNode.getChildren()) {
+ boolean subRes = containsUdf(node, udfName);
+ if (subRes) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/linkis-engineconn-plugins/spark/src/main/resources/log4j2.xml b/linkis-engineconn-plugins/spark/src/main/resources/log4j2.xml
index 298cf8cf703..547883bf548 100644
--- a/linkis-engineconn-plugins/spark/src/main/resources/log4j2.xml
+++ b/linkis-engineconn-plugins/spark/src/main/resources/log4j2.xml
@@ -31,7 +31,10 @@
-
+
+
+
+
@@ -84,5 +87,8 @@
+
+
+
diff --git a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/config/SparkConfiguration.scala b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/config/SparkConfiguration.scala
index 9b0e184b73e..ad3c5ef232b 100644
--- a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/config/SparkConfiguration.scala
+++ b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/config/SparkConfiguration.scala
@@ -188,6 +188,10 @@ object SparkConfiguration extends Logging {
val SCALA_PARSE_APPEND_CODE =
CommonVars("linkis.scala.parse.append.code", "val linkisVar=1").getValue
+// 多个配置用英文逗号分割
+ val SPARK_ENGINE_EXTENSION_CONF =
+ CommonVars("linkis.spark.engine.extension.conf", "spark.sql.shuffle.partitions=200").getValue
+
val SPARKMEASURE_AGGREGATE_TYPE = "linkis.sparkmeasure.aggregate.type"
val SPARKMEASURE_FLIGHT_RECORDER_TYPE =
diff --git a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/exception/RuleCheckFailedException.scala b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/exception/RuleCheckFailedException.scala
new file mode 100644
index 00000000000..8c7adb7a3f5
--- /dev/null
+++ b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/exception/RuleCheckFailedException.scala
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.engineplugin.spark.exception
+
+import org.apache.linkis.common.exception.ErrorException
+
+/**
+ */
+class RuleCheckFailedException(errCode: Int, desc: String) extends ErrorException(errCode, desc) {}
diff --git a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SQLSession.scala b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SQLSession.scala
index de63fa0ef1b..0b58e115e03 100644
--- a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SQLSession.scala
+++ b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SQLSession.scala
@@ -123,6 +123,20 @@ object SQLSession extends Logging {
columns.foreach(c => logger.info(s"c is ${c.columnName}, comment is ${c.comment}"))
if (columns == null || columns.isEmpty) return
val metaData = new TableMetaData(columns)
+
+ // 失败任务重试处理结果集
+ val errorIndex: Integer = Integer.valueOf(
+ engineExecutionContext.getProperties.getOrDefault("execute.error.code.index", "-1").toString
+ )
+ val hasSetResultSetNum: Boolean = engineExecutionContext.getProperties
+ .getOrDefault("hasSetResultSetNum", "true")
+ .toString
+ .toBoolean
+ if (hasSetResultSetNum && errorIndex > 0) {
+ engineExecutionContext.setResultSetNum(errorIndex)
+ engineExecutionContext.getProperties.put("hasSetResultSetNum", "false")
+ }
+
val writer =
if (StringUtils.isNotBlank(alias)) {
engineExecutionContext.createResultSetWriter(ResultSetFactory.TABLE_TYPE, alias)
diff --git a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkEngineConnExecutor.scala b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkEngineConnExecutor.scala
index 388cc4f27ea..58ef97fcfb8 100644
--- a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkEngineConnExecutor.scala
+++ b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkEngineConnExecutor.scala
@@ -18,8 +18,10 @@
package org.apache.linkis.engineplugin.spark.executor
import org.apache.linkis.common.log.LogUtils
-import org.apache.linkis.common.utils.{ByteTimeUtils, Logging, Utils}
+import org.apache.linkis.common.utils.{ByteTimeUtils, CodeAndRunTypeUtils, Logging, Utils}
import org.apache.linkis.engineconn.common.conf.{EngineConnConf, EngineConnConstant}
+import org.apache.linkis.engineconn.computation.executor.conf.ComputationExecutorConf
+import org.apache.linkis.engineconn.computation.executor.entity.EngineConnTask
import org.apache.linkis.engineconn.computation.executor.execute.{
ComputationExecutor,
EngineExecutionContext
@@ -34,6 +36,8 @@ import org.apache.linkis.engineconn.executor.entity.{ResourceFetchExecutor, Yarn
import org.apache.linkis.engineplugin.spark.common.{Kind, SparkDataCalc}
import org.apache.linkis.engineplugin.spark.config.SparkConfiguration
import org.apache.linkis.engineplugin.spark.cs.CSSparkHelper
+import org.apache.linkis.engineplugin.spark.errorcode.SparkErrorCodeSummary
+import org.apache.linkis.engineplugin.spark.exception.RuleCheckFailedException
import org.apache.linkis.engineplugin.spark.extension.{
SparkPostExecutionHook,
SparkPreExecutionHook
@@ -41,6 +45,10 @@ import org.apache.linkis.engineplugin.spark.extension.{
import org.apache.linkis.engineplugin.spark.utils.JobProgressUtil
import org.apache.linkis.governance.common.conf.GovernanceCommonConf
import org.apache.linkis.governance.common.exception.LinkisJobRetryException
+import org.apache.linkis.governance.common.exception.engineconn.{
+ EngineConnExecutorErrorCode,
+ EngineConnExecutorErrorException
+}
import org.apache.linkis.governance.common.utils.JobUtils
import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
import org.apache.linkis.manager.common.entity.resource._
@@ -48,8 +56,10 @@ import org.apache.linkis.manager.common.protocol.resource.ResourceWithStatus
import org.apache.linkis.manager.label.constant.LabelKeyConstant
import org.apache.linkis.manager.label.entity.Label
import org.apache.linkis.manager.label.entity.engine.CodeLanguageLabel
+import org.apache.linkis.manager.label.utils.{LabelUtil, LabelUtils}
import org.apache.linkis.protocol.engine.JobProgressInfo
import org.apache.linkis.scheduler.executer.ExecuteResponse
+import org.apache.linkis.server.toJavaMap
import org.apache.commons.lang3.StringUtils
import org.apache.spark.SparkContext
@@ -83,6 +93,7 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
private var applicationId: String = sc.applicationId
+ private var sparkTmpConf = Map[String, String]()
override def getApplicationId: String = applicationId
override def getApplicationURL: String = ""
@@ -131,6 +142,31 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
)
}
+ // 正则匹配校验
+ val ready = EngineConnObject.isReady
+ val jobId: String = JobUtils.getJobIdFromMap(engineExecutorContext.getProperties)
+ val udfNames: String = System.getProperty(ComputationExecutorConf.ONLY_SQL_USE_UDF_KEY, "")
+ if (ready && StringUtils.isNotBlank(udfNames) && StringUtils.isNotBlank(jobId)) {
+ val codeType: String = LabelUtil.getCodeType(engineExecutorContext.getLabels.toList.asJava)
+ val languageType: String = CodeAndRunTypeUtils.getLanguageTypeByCodeType(codeType)
+ // sql 或者 python
+ if (!ComputationExecutorConf.SUPPORT_SPECIAL_UDF_LANGUAGES.getValue.contains(languageType)) {
+ val udfNames: String = ComputationExecutorConf.SPECIAL_UDF_NAMES.getValue
+ if (StringUtils.isNotBlank(udfNames)) {
+ val funcNames: Array[String] = udfNames.split(",")
+ funcNames.foreach(funcName => {
+ if (code.contains(funcName)) {
+ logger.info("contains specific functionName: {}", udfNames)
+ throw new RuleCheckFailedException(
+ SparkErrorCodeSummary.NOT_SUPPORT_FUNCTION.getErrorCode,
+ SparkErrorCodeSummary.NOT_SUPPORT_FUNCTION.getErrorDesc
+ )
+ }
+ })
+ }
+ }
+ }
+
// Pre-execution hook
var executionHook: SparkPreExecutionHook = null
Utils.tryCatch {
@@ -155,7 +191,6 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
case _ => Kind.getRealCode(preCode)
}
logger.info(s"Ready to run code with kind $kind.")
- val jobId = JobUtils.getJobIdFromMap(engineExecutorContext.getProperties)
val jobGroupId = if (StringUtils.isNotBlank(jobId)) {
jobId
} else {
@@ -166,8 +201,11 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
logger.info("Set jobGroup to " + jobGroup)
sc.setJobGroup(jobGroup, _code, true)
- // print job configuration, only the first paragraph
- if (isFirstParagraph == true) {
+ // print job configuration, only the first paragraph or retry
+ val errorIndex: Integer = Integer.valueOf(
+ engineExecutionContext.getProperties.getOrDefault("execute.error.code.index", "-1").toString
+ )
+ if (isFirstParagraph || (errorIndex + 1 == engineExecutorContext.getCurrentParagraph)) {
Utils.tryCatch({
val executorNum: Int = sc.getConf.get("spark.executor.instances").toInt
val executorMem: Long =
@@ -182,6 +220,11 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
val pythonVersion = SparkConfiguration.SPARK_PYTHON_VERSION.getValue(
EngineConnObject.getEngineCreationContext.getOptions
)
+ var engineType = ""
+ val labels = engineExecutorContext.getLabels
+ if (labels.length > 0) {
+ engineType = LabelUtil.getEngineTypeLabel(labels.toList.asJava).getStringValue
+ }
val sb = new StringBuilder
sb.append(s"spark.executor.instances=$executorNum\n")
sb.append(s"spark.executor.memory=${executorMem}G\n")
@@ -191,6 +234,18 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
sb.append(s"spark.yarn.queue=$queue\n")
sb.append(s"spark.executor.memoryOverhead=${memoryOverhead}\n")
sb.append(s"spark.python.version=$pythonVersion\n")
+ sb.append(s"spark.engineType=$engineType\n")
+ val dynamicAllocation: String = sc.getConf.get("spark.dynamicAllocation.enabled", "false")
+ if ("true".equals(dynamicAllocation)) {
+ val shuffleEnabled: String = sc.getConf.get("spark.shuffle.service.enabled", "false")
+ val minExecutors: Int = sc.getConf.get("spark.dynamicAllocation.minExecutors", "1").toInt
+ val maxExecutors: Int =
+ sc.getConf.get("spark.dynamicAllocation.maxExecutors", "50").toInt
+ sb.append("spark.dynamicAllocation.enabled=true\n")
+ sb.append(s"spark.shuffle.service.enabled=$shuffleEnabled\n")
+ sb.append(s"spark.dynamicAllocation.minExecutors=$minExecutors\n")
+ sb.append(s"spark.dynamicAllocation.maxExecutors=$maxExecutors\n")
+ }
sb.append("\n")
engineExecutionContext.appendStdout(
LogUtils.generateInfo(s" Your spark job exec with configs:\n${sb.toString()}")
@@ -373,4 +428,62 @@ abstract class SparkEngineConnExecutor(val sc: SparkContext, id: Long)
super.close()
}
+ override protected def beforeExecute(engineConnTask: EngineConnTask): Unit = {
+ super.beforeExecute(engineConnTask)
+ if (
+ EngineConnConf.ENGINE_CONF_REVENT_SWITCH.getValue && sparkTmpConf.isEmpty && this
+ .isInstanceOf[SparkSqlExecutor]
+ ) {
+ val sqlContext = this.asInstanceOf[SparkSqlExecutor].getSparkEngineSession.sqlContext
+ sparkTmpConf = sqlContext.getAllConfs
+ // 维护spark扩展配置,防止不同版本的sprk 默认配置与用户配置匹配不上,导致配置无法回滚
+ SparkConfiguration.SPARK_ENGINE_EXTENSION_CONF
+ .split(',')
+ .foreach(keyValue => {
+ val key = keyValue.split("=")(0).trim
+ val value = keyValue.split("=")(1).trim
+ if (!sparkTmpConf.containsKey(key)) {
+ sparkTmpConf += key -> value
+ }
+ })
+ }
+ }
+
+ override protected def afterExecute(
+ engineConnTask: EngineConnTask,
+ executeResponse: ExecuteResponse
+ ): Unit = {
+ try {
+ if (
+ EngineConnConf.ENGINE_CONF_REVENT_SWITCH.getValue
+ && sparkTmpConf.nonEmpty
+ && this.isInstanceOf[SparkSqlExecutor]
+ ) {
+
+ val sqlExecutor = this.asInstanceOf[SparkSqlExecutor]
+ Option(sqlExecutor.getSparkEngineSession)
+ .flatMap(session => Option(session.sqlContext))
+ .foreach { sqlContext =>
+ sparkTmpConf.foreach { case (key, value) =>
+ if (value != null && !value.equals(sqlContext.getConf(key))) {
+ sqlContext.setConf(key, value)
+ }
+ }
+ // 清理多出来的配置
+ sqlContext.getAllConfs.keys.foreach { key =>
+ if (!sparkTmpConf.contains(key)) {
+ logger.info(s"Clearing extra configuration key: $key")
+ sqlContext.setConf(key, "")
+ }
+ }
+ }
+ }
+ } catch {
+ case e: Exception =>
+ logger.error(s"Error in afterExecute for task ${engineConnTask.getTaskId}", e)
+ } finally {
+ super.afterExecute(engineConnTask, executeResponse)
+ }
+ }
+
}
diff --git a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkSqlExecutor.scala b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkSqlExecutor.scala
index 17f13c78038..b32a0744058 100644
--- a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkSqlExecutor.scala
+++ b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/executor/SparkSqlExecutor.scala
@@ -182,4 +182,5 @@ class SparkSqlExecutor(
}
override protected def getExecutorIdPreFix: String = "SparkSqlExecutor_"
+ def getSparkEngineSession: SparkEngineSession = sparkEngineSession
}
diff --git a/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/extension/SparkUDFCheckRule.scala b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/extension/SparkUDFCheckRule.scala
new file mode 100644
index 00000000000..87dbc23db71
--- /dev/null
+++ b/linkis-engineconn-plugins/spark/src/main/scala/org/apache/linkis/engineplugin/spark/extension/SparkUDFCheckRule.scala
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.engineplugin.spark.extension
+
+import org.apache.linkis.engineconn.computation.executor.conf.ComputationExecutorConf
+import org.apache.linkis.engineplugin.spark.errorcode.SparkErrorCodeSummary
+import org.apache.linkis.engineplugin.spark.exception.RuleCheckFailedException
+import org.apache.linkis.engineplugin.spark.utils.PlanParseUtil
+
+import org.apache.commons.lang3.StringUtils
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.rules.Rule
+
+import org.slf4j.{Logger, LoggerFactory}
+
+case class SparkUDFCheckRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
+ val logger: Logger = LoggerFactory.getLogger(getClass)
+
+ override def apply(plan: LogicalPlan): LogicalPlan = {
+ // 从系统属性中获取代码类型
+ val codeType: String = System.getProperty(ComputationExecutorConf.CODE_TYPE, "sql")
+ // 从系统属性中获取udf函数名
+ val udfNames: String = System.getProperty(ComputationExecutorConf.ONLY_SQL_USE_UDF_KEY, "")
+
+ if (
+ ComputationExecutorConf.SUPPORT_SPECIAL_UDF_LANGUAGES.getValue.contains(
+ codeType
+ ) || StringUtils.isBlank(udfNames)
+ ) {
+ // 如果是 SQL 类型,或者未注册特殊udf函数,直接返回原始计划
+ plan
+ } else {
+ // 如果不是 SQL 类型,则检查逻辑计划
+ try {
+ val udfName: Array[String] = udfNames.split(",")
+ if (PlanParseUtil.checkUdf(plan, udfName)) {
+ logger.info("contains specific functionName: {}", udfNames)
+ throw new RuleCheckFailedException(
+ SparkErrorCodeSummary.NOT_SUPPORT_FUNCTION.getErrorCode,
+ SparkErrorCodeSummary.NOT_SUPPORT_FUNCTION.getErrorDesc
+ )
+ }
+ } catch {
+ case e: RuleCheckFailedException =>
+ throw e
+ case e: Exception =>
+ logger.info("check udf function error: {}", e.getMessage)
+ }
+ plan
+ }
+ }
+
+}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/config/ApplicationConfiguration.java b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/config/ApplicationConfiguration.java
index 95fa5b959f0..7318accdcd7 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/config/ApplicationConfiguration.java
+++ b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/config/ApplicationConfiguration.java
@@ -28,7 +28,6 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
-import de.codecentric.boot.admin.server.utils.jackson.AdminServerModule;
@Configuration
class ApplicationConfiguration implements WebMvcConfigurer {
@@ -36,7 +35,6 @@ class ApplicationConfiguration implements WebMvcConfigurer {
@Primary
public ObjectMapper jsonMapper() {
ObjectMapper mapper = new ObjectMapper();
- mapper.registerModule(new AdminServerModule(new String[] {".*password$"}));
mapper.registerModule(new JavaTimeModule());
return mapper;
}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/JobHistoryMonitor.java b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/JobHistoryMonitor.java
index c1a7a16d065..1d96819e09b 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/JobHistoryMonitor.java
+++ b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/JobHistoryMonitor.java
@@ -30,6 +30,10 @@
import org.apache.linkis.monitor.jobhistory.index.JobIndexRule;
import org.apache.linkis.monitor.jobhistory.index.JobIndexSender;
import org.apache.linkis.monitor.jobhistory.jobtime.*;
+import org.apache.linkis.monitor.jobhistory.jobtime.JobTimeExceedAlertSender;
+import org.apache.linkis.monitor.jobhistory.jobtime.JobTimeExceedRule;
+import org.apache.linkis.monitor.jobhistory.jobtime.StarrocksTimeExceedAlterSender;
+import org.apache.linkis.monitor.jobhistory.jobtime.StarrocksTimeExceedRule;
import org.apache.linkis.monitor.jobhistory.labels.JobHistoryLabelsAlertSender;
import org.apache.linkis.monitor.jobhistory.labels.JobHistoryLabelsRule;
import org.apache.linkis.monitor.jobhistory.runtime.CommonJobRunTimeRule;
@@ -222,6 +226,9 @@ public void jobHistoryUnfinishedScan() {
jobTimeAlerts.keySet(), new JobTimeExceedAlertSender(jobTimeAlerts));
scanner.addScanRule(jobTimeExceedRule);
}
+ StarrocksTimeExceedRule starrocksTimeExceedRule =
+ new StarrocksTimeExceedRule(new StarrocksTimeExceedAlterSender());
+ scanner.addScanRule(starrocksTimeExceedRule);
JobMonitorUtils.run(scanner, fetchers, shouldStart);
}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/TaskArchiveClear.java b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/TaskArchiveClear.java
index 913b3a41deb..96518e153b1 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/TaskArchiveClear.java
+++ b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/TaskArchiveClear.java
@@ -18,6 +18,7 @@
package org.apache.linkis.monitor.scheduled;
import org.apache.linkis.monitor.config.MonitorConfig;
+import org.apache.linkis.monitor.constants.Constants;
import org.apache.linkis.monitor.until.ThreadUtils;
import org.apache.linkis.monitor.utils.log.LogUtils;
@@ -44,9 +45,9 @@ public void taskLogClear() {
logger.info("Start to linkis_task_archive shell");
List cmdlist = new ArrayList<>();
cmdlist.add("sh");
- cmdlist.add(MonitorConfig.shellPath + "linkis_task_archive.sh");
- logger.info("linkis_task_archive.sh shell command {}", cmdlist);
- String exec = ThreadUtils.run(cmdlist, "linkis_task_archive.sh");
+ cmdlist.add(MonitorConfig.shellPath + Constants.TASK_ARCHIVE_SH());
+ logger.info(Constants.TASK_ARCHIVE_SH() + "shell command {}", cmdlist);
+ String exec = ThreadUtils.run(cmdlist, Constants.TASK_ARCHIVE_SH());
logger.info("shell log {}", exec);
logger.info("End to linkis_task_archive shell ");
}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/UserDepartmentInfoSync.java b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/UserDepartmentInfoSync.java
index 338d220489b..bdc4720b72e 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/UserDepartmentInfoSync.java
+++ b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/scheduled/UserDepartmentInfoSync.java
@@ -53,7 +53,7 @@ public class UserDepartmentInfoSync {
private static final UserDepartmentInfoMapper userDepartmentInfoMapper =
MapperFactory.getUserDepartmentInfoMapper();
- @Scheduled(cron = "${linkis.monitor.org.user.sync.cron:0 0 11 1/7 * ?}")
+ @Scheduled(cron = "${linkis.monitor.org.user.sync.cron:0 0 0 1/7 * ?}")
public static void DepartmentInfoSync() {
// 获取linkis_org_user_sync信息
// 收集异常用户
diff --git a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/until/ThreadUtils.java b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/until/ThreadUtils.java
index 5e4133aa906..12302104563 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/until/ThreadUtils.java
+++ b/linkis-extensions/linkis-et-monitor/src/main/java/org/apache/linkis/monitor/until/ThreadUtils.java
@@ -46,13 +46,20 @@ public class ThreadUtils extends ApplicationContextEvent {
public static ExecutionContextExecutorService executors_analyze =
Utils.newCachedExecutionContext(50, "analyze-pool-thread-", false);
+ public static ExecutionContextExecutorService executors_archive =
+ Utils.newCachedExecutionContext(10, "archive-pool-thread-", false);
+
public ThreadUtils(ApplicationContext source) {
super(source);
}
public static String run(List cmdList, String shellName) {
FutureTask future = new FutureTask(() -> Utils.exec(cmdList.toArray(new String[2]), -1));
- executors.submit(future);
+ if (shellName.equals(Constants.TASK_ARCHIVE_SH())) {
+ executors_archive.submit(future);
+ } else {
+ executors.submit(future);
+ }
String msg = "";
try {
msg = future.get(MonitorConfig.SHELL_TIMEOUT.getValue(), TimeUnit.MINUTES).toString();
diff --git a/linkis-extensions/linkis-et-monitor/src/main/resources/mapper/common/UserDepartmentInfoMapper.xml b/linkis-extensions/linkis-et-monitor/src/main/resources/mapper/common/UserDepartmentInfoMapper.xml
index 295cee560f2..176e3b67030 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/resources/mapper/common/UserDepartmentInfoMapper.xml
+++ b/linkis-extensions/linkis-et-monitor/src/main/resources/mapper/common/UserDepartmentInfoMapper.xml
@@ -1,4 +1,21 @@
+
+
diff --git a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/constants/Constants.scala b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/constants/Constants.scala
index affa0ccb836..995cb9bb0a3 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/constants/Constants.scala
+++ b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/constants/Constants.scala
@@ -113,4 +113,5 @@ object Constants {
val JDBC_ENGINE = "jdbc"
+ val TASK_ARCHIVE_SH = "linkis_task_archive.sh"
}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/JobTimeExceedRule.scala b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/JobTimeExceedRule.scala
index 0367bfc05e1..9e633496a7b 100644
--- a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/JobTimeExceedRule.scala
+++ b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/JobTimeExceedRule.scala
@@ -73,6 +73,7 @@ class JobTimeExceedRule(thresholds: util.Set[String], hitObserver: Observer)
val alertData: util.List[JobHistory] = new util.ArrayList[JobHistory]()
for (sd <- data.asScala) {
if (sd != null && sd.getData() != null) {
+ var idLong = 0L
for (d <- sd.getData().asScala) {
if (d.isInstanceOf[JobHistory]) {
val jobHistory = d.asInstanceOf[JobHistory]
@@ -83,11 +84,24 @@ class JobTimeExceedRule(thresholds: util.Set[String], hitObserver: Observer)
alertData.add(d.asInstanceOf[JobHistory])
}
}
- scanRuleList.put("jobhistoryScan", jobHistory.getId)
+ if (idLong == 0L || jobHistory.getId < idLong) {
+ idLong = jobHistory.getId
+ }
} else {
logger.warn("Ignored wrong input data Type : " + d + ", " + d.getClass.getCanonicalName)
}
}
+ if (idLong > 0L) {
+ val id = Optional
+ .ofNullable(CacheUtils.cacheBuilder.getIfPresent("jobhistoryScan"))
+ .orElse(MonitorConfig.JOB_HISTORY_TIME_EXCEED.getValue)
+ if (id == 0) {
+ scanRuleList.put("jobhistoryScan", idLong)
+ }
+ if (id > idLong) {
+ scanRuleList.put("jobhistoryScan", idLong)
+ }
+ }
} else {
logger.warn("Ignored null scanned data")
}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/StarrocksTimeExceedAlterSender.scala b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/StarrocksTimeExceedAlterSender.scala
new file mode 100644
index 00000000000..492d1f0a233
--- /dev/null
+++ b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/StarrocksTimeExceedAlterSender.scala
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.monitor.jobhistory.jobtime
+
+import org.apache.linkis.common.utils.Logging
+import org.apache.linkis.monitor.constants.Constants
+import org.apache.linkis.monitor.core.ob.{Event, Observer}
+import org.apache.linkis.monitor.jobhistory.entity.JobHistory
+import org.apache.linkis.monitor.jobhistory.exception.AnomalyScannerException
+import org.apache.linkis.monitor.until.HttpsUntils
+import org.apache.linkis.monitor.utils.alert.ims.{MonitorAlertUtils, PooledImsAlertUtils}
+
+import org.apache.commons.collections.MapUtils
+import org.apache.commons.lang3.StringUtils
+
+import java.util
+
+import scala.collection.JavaConverters.asScalaBufferConverter
+
+class StarrocksTimeExceedAlterSender extends Observer with Logging {
+
+ /**
+ * Observer Pattern
+ */
+ override def update(e: Event, jobHistroyList: scala.Any): Unit = {
+ if (!e.isInstanceOf[StarrocksTimeExceedHitEvent]) {
+ throw new AnomalyScannerException(
+ 21304,
+ "Wrong event that triggers JobHistoryErrorCodeAlertSender. Input DataType: " + e.getClass.getCanonicalName
+ )
+ }
+ if (null == jobHistroyList || !jobHistroyList.isInstanceOf[util.List[_]]) {
+ throw new AnomalyScannerException(
+ 21304,
+ "Wrong input for JobHistoryErrorCodeAlertSender. Input DataType: " + jobHistroyList.getClass.getCanonicalName
+ )
+ }
+ for (a <- jobHistroyList.asInstanceOf[util.List[_]].asScala) {
+ if (a == null) {
+ logger.warn("Ignore null input data")
+ } else if (!a.isInstanceOf[JobHistory]) {
+ logger.warn("Ignore wrong input data Type : " + a.getClass.getCanonicalName)
+ } else {
+ val jobHistory = a.asInstanceOf[JobHistory]
+ val timeValue =
+ HttpsUntils.getJDBCConf(jobHistory.getSubmitUser, Constants.JDBC_ALERT_TIME)
+ val userValue =
+ HttpsUntils.getJDBCConf(jobHistory.getSubmitUser, Constants.JDBC_ALERT_USER)
+ var levelValue =
+ HttpsUntils.getJDBCConf(jobHistory.getSubmitUser, Constants.JDBC_ALERT_LEVEL)
+ if (StringUtils.isNotBlank(timeValue) && StringUtils.isNotBlank(userValue)) {
+ val replaceParm: util.HashMap[String, String] = new util.HashMap[String, String]
+ replaceParm.put("$id", String.valueOf(jobHistory.getId))
+ replaceParm.put("$timeoutTime", timeValue)
+ replaceParm.put("$alteruser", userValue)
+ replaceParm.put("$eccAlertUser", userValue)
+ replaceParm.put("$submitUser", jobHistory.getSubmitUser)
+ if (StringUtils.isBlank(levelValue)) {
+ levelValue = "3";
+ }
+ replaceParm.put("$alterLevel", levelValue)
+ val alters = MonitorAlertUtils.getAlerts(Constants.USER_LABEL_MONITOR, replaceParm)
+ PooledImsAlertUtils.addAlert(alters.get("12020"))
+ }
+ }
+ }
+ }
+
+}
diff --git a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/request/UserAction.scala b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/StarrocksTimeKillHitEvent.scala
similarity index 100%
rename from linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/request/UserAction.scala
rename to linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/jobhistory/jobtime/StarrocksTimeKillHitEvent.scala
diff --git a/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/request/MonitorResourceAction.scala b/linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/request/MonitorAction.scala
similarity index 100%
rename from linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/request/MonitorResourceAction.scala
rename to linkis-extensions/linkis-et-monitor/src/main/scala/org/apache/linkis/monitor/request/MonitorAction.scala
diff --git a/linkis-extensions/linkis-io-file-client/src/main/scala/org/apache/linkis/storage/io/iteraceptor/IOMethodInterceptor.scala b/linkis-extensions/linkis-io-file-client/src/main/scala/org/apache/linkis/storage/io/iteraceptor/IOMethodInterceptor.scala
index 92feb8a5616..4671413c793 100644
--- a/linkis-extensions/linkis-io-file-client/src/main/scala/org/apache/linkis/storage/io/iteraceptor/IOMethodInterceptor.scala
+++ b/linkis-extensions/linkis-io-file-client/src/main/scala/org/apache/linkis/storage/io/iteraceptor/IOMethodInterceptor.scala
@@ -40,7 +40,6 @@ import java.net.InetAddress
import scala.beans.BeanProperty
import scala.collection.JavaConverters._
-import scala.collection.mutable
import com.google.gson.reflect.TypeToken
@@ -48,7 +47,7 @@ class IOMethodInterceptor(fsType: String) extends MethodInterceptor with Logging
@BeanProperty var ioClient: IOClient = _
- private val properties: mutable.HashMap[String, String] = mutable.HashMap[String, String]()
+ private var properties: java.util.Map[String, String] = new java.util.HashMap[String, String]()
private var inited = false
@@ -69,7 +68,7 @@ class IOMethodInterceptor(fsType: String) extends MethodInterceptor with Logging
label.setJobGroupId(IOClientUtils.generateJobGrupID())
}
- def getProxyUser: String = StorageConfiguration.PROXY_USER.getValue(properties.asJava)
+ def getProxyUser: String = StorageConfiguration.PROXY_USER.getValue(properties)
def getCreatorUser: String = StorageUtils.getJvmUser
@@ -103,7 +102,7 @@ class IOMethodInterceptor(fsType: String) extends MethodInterceptor with Logging
}
def initFS(methodName: String = "init"): Unit = {
- if (!properties.contains(StorageConfiguration.PROXY_USER.key)) {
+ if (!properties.containsKey(StorageConfiguration.PROXY_USER.key)) {
throw new StorageErrorException(NO_PROXY_USER.getErrorCode, NO_PROXY_USER.getErrorDesc)
}
bindEngineLabel.setIsJobGroupHead("true")
@@ -117,7 +116,7 @@ class IOMethodInterceptor(fsType: String) extends MethodInterceptor with Logging
getProxyUser,
getLocalIP,
methodName,
- Array(properties.toMap)
+ Array(properties)
),
bindEngineLabel
)
@@ -172,7 +171,7 @@ class IOMethodInterceptor(fsType: String) extends MethodInterceptor with Logging
case "init" =>
case "storageName" => return fsType
case "setUser" =>
- properties += StorageConfiguration.PROXY_USER.key -> args(0).asInstanceOf[String];
+ properties.put(StorageConfiguration.PROXY_USER.key, args(0).asInstanceOf[String]);
return Unit
case _ =>
if (inited) {
@@ -185,23 +184,23 @@ class IOMethodInterceptor(fsType: String) extends MethodInterceptor with Logging
method.getName match {
case "init" =>
val user =
- if (properties.contains(StorageConfiguration.PROXY_USER.key)) {
- StorageConfiguration.PROXY_USER.getValue(properties.toMap)
+ if (properties.containsKey(StorageConfiguration.PROXY_USER.key)) {
+ StorageConfiguration.PROXY_USER.getValue(properties)
} else {
null
}
if (args.length > 0 && args(0).isInstanceOf[java.util.Map[String, String]]) {
- properties ++= args(0).asInstanceOf[java.util.Map[String, String]].asScala
+ properties.putAll(args(0).asInstanceOf[java.util.Map[String, String]])
}
if (StringUtils.isNoneBlank(user)) {
- properties += StorageConfiguration.PROXY_USER.key -> user
+ properties.put(StorageConfiguration.PROXY_USER.key, user)
}
initFS()
logger.warn(s"For user($user)inited a $fsType storage($id) .")
Unit
case "fsName" => fsType
case "setUser" =>
- properties += StorageConfiguration.PROXY_USER.key -> args(0).asInstanceOf[String]; Unit
+ properties.put(StorageConfiguration.PROXY_USER.key, args(0).asInstanceOf[String]); Unit
case "read" =>
if (!inited) throw new IllegalAccessException("storage has not been inited.")
new IOInputStream(args)
diff --git a/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/monitor/EngineConnMonitor.scala b/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/monitor/EngineConnMonitor.scala
index 4ae71461330..e89193418ad 100644
--- a/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/monitor/EngineConnMonitor.scala
+++ b/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/monitor/EngineConnMonitor.scala
@@ -29,10 +29,11 @@ import org.apache.linkis.governance.common.protocol.engineconn.{
import org.apache.linkis.governance.common.utils.GovernanceConstant
import org.apache.linkis.manager.common.entity.enumeration.NodeStatus
import org.apache.linkis.manager.common.protocol.node.{RequestNodeStatus, ResponseNodeStatus}
+import org.apache.linkis.manager.label.entity.Label
+import org.apache.linkis.manager.label.utils.LabelUtil
import org.apache.linkis.orchestrator.computation.conf.ComputationOrchestratorConf
import org.apache.linkis.orchestrator.computation.execute.{CodeExecTaskExecutor, EngineConnTaskInfo}
import org.apache.linkis.orchestrator.listener.task.{
- EngineQuitedUnexpectedlyEvent,
TaskErrorResponseEvent,
TaskLogEvent,
TaskStatusEvent
@@ -201,14 +202,25 @@ object EngineConnMonitor extends Logging {
executors.foreach { executor =>
val execTask = executor.getExecTask
Utils.tryAndError {
+ val labels: Array[Label[_]] = executor.getEngineConnExecutor.getLabels()
+ val engineType: String = LabelUtil.getEngineTypeLabel(labels.toList.asJava).getEngineType
logger.warn(
s"Will kill task ${execTask.getIDInfo()} because the engine ${executor.getEngineConnExecutor.getServiceInstance.toString} quited unexpectedly."
)
- val event = EngineQuitedUnexpectedlyEvent(
+ val errLog = LogUtils.generateERROR(
+ s"Your job : ${execTask.getIDInfo()} was failed because the ${engineType} engine quitted unexpectedly(任务${execTask
+ .getIDInfo()}失败," +
+ s"原因是引擎意外退出,可能是复杂任务导致引擎退出,如OOM)."
+ )
+ val logEvent = TaskLogEvent(execTask, errLog)
+ execTask.getPhysicalContext.pushLog(logEvent)
+ val errorResponseEvent = TaskErrorResponseEvent(
execTask,
- executor.getEngineConnExecutor.getServiceInstance.toString
+ "task failed,Engine quitted unexpectedly(任务运行失败原因是引擎意外退出,可能是复杂任务导致引擎退出,如OOM)."
)
- execTask.getPhysicalContext.broadcastSyncEvent(event)
+ execTask.getPhysicalContext.broadcastSyncEvent(errorResponseEvent)
+ val statusEvent = TaskStatusEvent(execTask, ExecutionNodeStatus.Failed)
+ execTask.getPhysicalContext.broadcastSyncEvent(statusEvent)
}
}
}
diff --git a/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/physical/CodeLogicalUnitExecTask.scala b/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/physical/CodeLogicalUnitExecTask.scala
index 5b61159ab36..79484a6efb2 100644
--- a/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/physical/CodeLogicalUnitExecTask.scala
+++ b/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/physical/CodeLogicalUnitExecTask.scala
@@ -29,6 +29,7 @@ import org.apache.linkis.orchestrator.computation.execute.{
CodeExecTaskExecutorManager
}
import org.apache.linkis.orchestrator.ecm.conf.ECMPluginConf
+import org.apache.linkis.orchestrator.ecm.service.impl.ComputationEngineConnExecutor
import org.apache.linkis.orchestrator.exception.{
OrchestratorErrorCodeSummary,
OrchestratorErrorException,
@@ -46,6 +47,7 @@ import org.apache.linkis.orchestrator.strategy.async.AsyncExecTask
import org.apache.linkis.orchestrator.utils.OrchestratorIDCreator
import org.apache.linkis.protocol.constants.TaskConstant
import org.apache.linkis.scheduler.executer.{ErrorExecuteResponse, SubmitResponse}
+import org.apache.linkis.server.BDPJettyServerHelper
import org.apache.commons.lang3.StringUtils
diff --git a/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/service/ComputationTaskExecutionReceiver.scala b/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/service/ComputationTaskExecutionReceiver.scala
index 101914724cb..21451dbde88 100644
--- a/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/service/ComputationTaskExecutionReceiver.scala
+++ b/linkis-orchestrator/linkis-computation-orchestrator/src/main/scala/org/apache/linkis/orchestrator/computation/service/ComputationTaskExecutionReceiver.scala
@@ -27,6 +27,7 @@ import org.apache.linkis.orchestrator.computation.monitor.EngineConnMonitor
import org.apache.linkis.orchestrator.core.ResultSet
import org.apache.linkis.orchestrator.ecm.service.TaskExecutionReceiver
import org.apache.linkis.orchestrator.listener.task._
+import org.apache.linkis.orchestrator.plans.physical.ExecTask
import org.apache.linkis.orchestrator.utils.OrchestratorLoggerUtils
import org.apache.linkis.rpc.Sender
import org.apache.linkis.rpc.message.annotation.Receiver
@@ -92,6 +93,15 @@ class ComputationTaskExecutionReceiver extends TaskExecutionReceiver with Loggin
.getByEngineConnAndTaskId(serviceInstance, taskStatus.execId)
.foreach { codeExecutor =>
OrchestratorLoggerUtils.setJobIdMDC(codeExecutor.getExecTask)
+ val task: ExecTask = codeExecutor.getExecTask.getPhysicalContext.getRootTask
+ taskStatus match {
+ case rte: ResponseTaskStatusWithExecuteCodeIndex =>
+ logger.info(s"execute error with index: ${rte.errorIndex}")
+ task.updateParams("execute.error.code.index", rte.errorIndex.toString)
+ case _ =>
+ }
+ // 标识当前方法执行过,该方法是异步的,处理失败任务需要该方法执行完
+ task.updateParams("task.error.receiver.flag", "true")
val event = TaskStatusEvent(codeExecutor.getExecTask, taskStatus.status)
logger.info(
s"From engineConn receive status info:$taskStatus, now post to listenerBus event: $event"
diff --git a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/conf/OrchestratorConfiguration.scala b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/conf/OrchestratorConfiguration.scala
index 10f3a64d139..b66c2819f3e 100644
--- a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/conf/OrchestratorConfiguration.scala
+++ b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/conf/OrchestratorConfiguration.scala
@@ -48,7 +48,7 @@ object OrchestratorConfiguration {
CommonVars("wds.linkis.orchestrator.execution.task.max.parallelism", 5)
val TASK_RUNNER_MAX_SIZE =
- CommonVars("wds.linkis.orchestrator.execution.task.runner.max.size", 1000)
+ CommonVars("wds.linkis.orchestrator.execution.task.runner.max.size", 200)
val EXEC_RUNNER_FACTORY_CLASS =
CommonVars("wds.linkis.orchestrator.exec.task.runner.factory.class", "")
@@ -56,6 +56,9 @@ object OrchestratorConfiguration {
val TASK_MAX_PERSIST_WAIT_TIME =
CommonVars("wds.linkis.orchestrator.task.persist.wait.max", new TimeType("5m"))
+ val ERROR_TASK_RECEIVER_WAIT_TIME =
+ CommonVars("linkis.error.task.receiver.wait.time", new TimeType("30s"))
+
val RETRY_TASK_WAIT_TIME = CommonVars("wds.linkis.orchestrator.task.retry.wait.time", 30000)
val RETRYTASK_MAXIMUM_AGE = CommonVars("wds.linkis.computation.orchestrator.retry.max.age", 10)
diff --git a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/execution/impl/DefaultFailedTaskResponse.scala b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/execution/impl/DefaultFailedTaskResponse.scala
index 01e5aa8b893..2ded3662978 100644
--- a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/execution/impl/DefaultFailedTaskResponse.scala
+++ b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/execution/impl/DefaultFailedTaskResponse.scala
@@ -24,6 +24,12 @@ import org.apache.linkis.orchestrator.execution.FailedTaskResponse
class DefaultFailedTaskResponse(errorMsg: String, errorCode: Int, throwable: Throwable)
extends FailedTaskResponse {
+ private var _errorIndex: Int = -1
+
+ def errorIndex: Int = _errorIndex
+
+ def errorIndex_=(value: Int): Unit = _errorIndex = value
+
override def getCause: Throwable = throwable
override def getErrorMsg: String = errorMsg
diff --git a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/ExecTask.scala b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/ExecTask.scala
index 6d1950789a8..b2b085668b3 100644
--- a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/ExecTask.scala
+++ b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/ExecTask.scala
@@ -33,6 +33,8 @@ trait ExecTask extends PhysicalOrchestration[ExecTask] {
def isLocalMode: Boolean
+ private var _params: Map[String, String] = Map.empty
+
override def hashCode(): Int = getId.hashCode
override def equals(obj: Any): Boolean = obj match {
@@ -40,6 +42,14 @@ trait ExecTask extends PhysicalOrchestration[ExecTask] {
case _ => false
}
+ def params: Map[String, String] = _params
+
+ def updateParams(key: String, value: String): Unit = {
+ _params += (key -> value)
+ }
+
+ def getIndexValue(key: String): Option[String] = _params.get(key)
+
def getIDInfo(): String = {
val desc = getTaskDesc
val jobID = desc.getOrigin.getASTOrchestration match {
diff --git a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/PhysicalContextImpl.scala b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/PhysicalContextImpl.scala
index 0dcfba0d8b0..d7909259af5 100644
--- a/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/PhysicalContextImpl.scala
+++ b/linkis-orchestrator/linkis-orchestrator-core/src/main/scala/org/apache/linkis/orchestrator/plans/physical/PhysicalContextImpl.scala
@@ -18,8 +18,11 @@
package org.apache.linkis.orchestrator.plans.physical
import org.apache.linkis.common.listener.Event
-import org.apache.linkis.common.log.LogUtils
+import org.apache.linkis.common.utils.Logging
import org.apache.linkis.governance.common.entity.ExecutionNodeStatus
+import org.apache.linkis.manager.label.entity.Label
+import org.apache.linkis.manager.label.utils.LabelUtil
+import org.apache.linkis.orchestrator.conf.OrchestratorConfiguration
import org.apache.linkis.orchestrator.exception.OrchestratorErrorCodeSummary
import org.apache.linkis.orchestrator.execution.{
CompletedTaskResponse,
@@ -31,9 +34,10 @@ import org.apache.linkis.orchestrator.listener._
import org.apache.linkis.orchestrator.listener.task.{
RootTaskResponseEvent,
TaskLogEvent,
- TaskRunningInfoEvent,
- TaskYarnResourceEvent
+ TaskRunningInfoEvent
}
+import org.apache.linkis.orchestrator.plans.ast.AbstractJob
+import org.apache.linkis.orchestrator.plans.logical.EndJobTaskDesc
import java.util
@@ -41,7 +45,8 @@ import scala.collection.mutable
import scala.collection.mutable.ListBuffer
class PhysicalContextImpl(private var rootTask: ExecTask, private var leafTasks: Array[ExecTask])
- extends PhysicalContext {
+ extends PhysicalContext
+ with Logging {
private var syncListenerBus: OrchestratorSyncListenerBus = _
@@ -70,6 +75,33 @@ class PhysicalContextImpl(private var rootTask: ExecTask, private var leafTasks:
OrchestratorErrorCodeSummary.EXECUTION_ERROR_CODE,
cause
)
+ // 标识失败代码索引,以便重试的时候只执行未执行代码
+ this.rootTask.getTaskDesc match {
+ case taskDesc: EndJobTaskDesc =>
+ taskDesc.job match {
+ case job: AbstractJob =>
+ val labels: util.List[Label[_]] = job.getLabels
+ val codeType: String = LabelUtil.getCodeType(labels)
+ if ("aisql".equals(codeType)) {
+ val params: Map[String, String] = this.rootTask.params
+ var flag: Boolean = params.getOrElse("task.error.receiver.flag", "false").toBoolean
+ val startTime: Long = System.currentTimeMillis()
+ while (
+ System
+ .currentTimeMillis() - startTime < OrchestratorConfiguration.ERROR_TASK_RECEIVER_WAIT_TIME.getValue.toLong && !flag
+ ) {
+ logger.info("task error receiver not end.")
+ Thread.sleep(1000)
+ flag = params.getOrElse("task.error.receiver.flag", "false").toBoolean
+ }
+ logger.info("task error receiver end.")
+ failedResponse.errorIndex = params.getOrElse("execute.error.code.index", "-1").toInt
+ }
+ case _ =>
+ }
+ case _ =>
+ }
+
this.response = failedResponse
syncListenerBus.postToAll(RootTaskResponseEvent(getRootTask, failedResponse))
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/DepartmentTenantMapper.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/DepartmentTenantMapper.java
index 336ad698bb6..adc47b9486a 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/DepartmentTenantMapper.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/DepartmentTenantMapper.java
@@ -32,10 +32,13 @@ public interface DepartmentTenantMapper {
List queryTenantList(
@Param("creator") String creator,
@Param("departmentId") String departmentId,
+ @Param("department") String department,
@Param("tenantValue") String tenantValue);
void deleteTenant(@Param("id") Integer id);
DepartmentTenantVo queryTenant(
- @Param("creator") String creator, @Param("departmentId") String departmentId);
+ @Param("creator") String creator,
+ @Param("departmentId") String departmentI,
+ @Param("department") String department);
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/TemplateConfigKeyMapper.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/TemplateConfigKeyMapper.java
index 6862650f27e..e77d3faffd6 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/TemplateConfigKeyMapper.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/dao/TemplateConfigKeyMapper.java
@@ -46,4 +46,8 @@ List selectInfoListByTemplateName(
@Param("templateName") String templateName);
List selectEngineTypeByTemplateUuid(@Param("templateUuid") String templateUuid);
+
+ List selectListByKeyId(@Param("keyId") Long keyId);
+
+ void updateConfigValue(TemplateConfigKey confKey);
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/ConfigurationRestfulApi.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/ConfigurationRestfulApi.java
index cad72337dc9..50119f7ee24 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/ConfigurationRestfulApi.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/ConfigurationRestfulApi.java
@@ -17,6 +17,7 @@
package org.apache.linkis.configuration.restful.api;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.configuration.conf.Configuration;
import org.apache.linkis.configuration.entity.*;
import org.apache.linkis.configuration.exception.ConfigurationException;
@@ -294,6 +295,16 @@ public Message saveFullTree(HttpServletRequest req, @RequestBody JsonNode json)
creator = "*";
}
String username = ModuleUserUtils.getOperationUser(req, "saveFullTree");
+ String engine = null;
+ String version = null;
+ if (engineType != null) {
+ String[] tmpString = engineType.split("-");
+ if (tmpString.length != 2) {
+ throw new ConfigurationException(INCORRECT_FIXED_SUCH.getErrorDesc());
+ }
+ engine = tmpString[0];
+ version = tmpString[1];
+ }
ArrayList createList = new ArrayList<>();
ArrayList updateList = new ArrayList<>();
ArrayList> chekList = new ArrayList<>();
@@ -309,6 +320,37 @@ public Message saveFullTree(HttpServletRequest req, @RequestBody JsonNode json)
sparkConf = configKeyValue.getConfigValue().trim();
configKeyValue.setConfigValue(sparkConf);
}
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && Configuration.CONFIGURATION_AES_CONF().contains(configKeyValue.getKey())
+ && StringUtils.isNotBlank(configKeyValue.getConfigValue())) {
+ List userConfigValue =
+ configKeyService.getUserConfigValue(
+ engine, configKeyValue.getKey(), creator, username);
+ for (ConfigUserValue configUserValue : userConfigValue) {
+ if (Configuration.CONFIGURATION_AES_CONF().contains(configKeyValue.getKey())
+ && !configUserValue.getConfigValue().equals(configKeyValue.getConfigValue())) {
+ configKeyValue.setConfigValue(
+ AESUtils.encrypt(
+ configKeyValue.getConfigValue(),
+ AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ }
+ }
+ }
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && configKeyValue.getKey().equals("linkis.nebula.password")
+ && StringUtils.isNotBlank(configKeyValue.getConfigValue())) {
+ List configByLabelIds =
+ configurationService.getConfigByLabelId(configKeyValue.getConfigLabelId(), null);
+ for (ConfigKeyValue configByLabelId : configByLabelIds) {
+ if (configByLabelId.getKey().equals("linkis.nebula.password")
+ && !configByLabelId.getConfigValue().equals(configKeyValue.getConfigValue())) {
+ configKeyValue.setConfigValue(
+ AESUtils.encrypt(
+ configKeyValue.getConfigValue(),
+ AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ }
+ }
+ }
}
}
for (List settings : chekList) {
@@ -319,16 +361,6 @@ public Message saveFullTree(HttpServletRequest req, @RequestBody JsonNode json)
configurationService.updateUserValue(setting, userLabelId, createList, updateList);
}
}
- String engine = null;
- String version = null;
- if (engineType != null) {
- String[] tmpString = engineType.split("-");
- if (tmpString.length != 2) {
- throw new ConfigurationException(INCORRECT_FIXED_SUCH.getErrorDesc());
- }
- engine = tmpString[0];
- version = tmpString[1];
- }
configurationService.updateUserValue(createList, updateList);
// TODO: Add a refresh cache interface later
if (StringUtils.isNotBlank(creator) && creator.equals("*")) {
@@ -552,6 +584,22 @@ public Message saveKeyValue(HttpServletRequest req, @RequestBody Map userConfigValue =
+ configKeyService.getUserConfigValue(engineType, configKeyValue.getKey(), creator, user);
+ if (userConfigValue.stream()
+ .anyMatch(
+ configValue ->
+ configValue.getConfigValue().equals(configKeyValue.getConfigValue()))) {
+ passwd = configKeyValue.getConfigValue();
+ }
+ configKeyValue.setConfigValue(passwd);
+ }
ConfigValue configValue = configKeyService.saveConfigValue(configKeyValue, labelList);
configurationService.clearAMCacheConf(username, creator, engineType, version);
return message.data("configValue", configValue);
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TemplateRestfulApi.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TemplateRestfulApi.java
index f6963811162..ff8127fc7ae 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TemplateRestfulApi.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TemplateRestfulApi.java
@@ -19,19 +19,19 @@
import org.apache.linkis.common.conf.Configuration;
import org.apache.linkis.common.utils.JsonUtils;
+import org.apache.linkis.configuration.entity.ConfigKey;
import org.apache.linkis.configuration.entity.ConfigKeyLimitVo;
import org.apache.linkis.configuration.exception.ConfigurationException;
+import org.apache.linkis.configuration.service.ConfigKeyService;
import org.apache.linkis.configuration.service.TemplateConfigKeyService;
import org.apache.linkis.server.Message;
import org.apache.linkis.server.utils.ModuleUserUtils;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.web.bind.annotation.RequestBody;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.RequestMethod;
-import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.bind.annotation.*;
import javax.servlet.http.HttpServletRequest;
@@ -57,6 +57,7 @@ public class TemplateRestfulApi {
private static final Logger logger = LoggerFactory.getLogger(TemplateRestfulApi.class);
@Autowired private TemplateConfigKeyService templateConfigKeyService;
+ @Autowired private ConfigKeyService configKeyService;
@ApiOperation(
value = "updateKeyMapping",
@@ -164,7 +165,7 @@ public Message queryKeyInfoList(HttpServletRequest req, @RequestBody JsonNode js
if (StringUtils.isNotBlank(token)) {
if (!Configuration.isAdminToken(token)) {
logger.warn("Token:{} has no permission to queryKeyInfoList.", token);
- return Message.error("Token:" + token + " has no permission to queryKeyInfoList.");
+ return Message.error("Token has no permission to queryKeyInfoList.");
}
} else if (!Configuration.isAdmin(username)) {
logger.warn("User:{} has no permission to queryKeyInfoList.", username);
@@ -277,4 +278,29 @@ public Message apply(HttpServletRequest req, @RequestBody JsonNode jsonNode)
message.getData().putAll(result);
return message;
}
+
+ @ApiOperation(
+ value = "encryptDatasourcePassword",
+ notes = "encrypt datasource password",
+ response = Message.class)
+ @RequestMapping(value = "/encrypt", method = RequestMethod.GET)
+ public Message encryptDatasourcePassword(
+ @RequestParam(value = "isEncrypt", required = false) String isEncrypt,
+ HttpServletRequest request) {
+ List jdbc =
+ configKeyService.getConfigBykey(
+ "jdbc", org.apache.linkis.configuration.conf.Configuration.JDBC_PASSWORD_CONF(), null);
+ List nebula =
+ configKeyService.getConfigBykey(
+ "nebula",
+ org.apache.linkis.configuration.conf.Configuration.NEBULA_PASSWORD_CONF(),
+ null);
+ if (CollectionUtils.isNotEmpty(jdbc)) {
+ templateConfigKeyService.dealDatasourcePwdByKeyId(jdbc.get(0).getId(), isEncrypt);
+ }
+ if (CollectionUtils.isNotEmpty(nebula)) {
+ templateConfigKeyService.dealDatasourcePwdByKeyId(nebula.get(0).getId(), isEncrypt);
+ }
+ return Message.ok();
+ }
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TenantConfigrationRestfulApi.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TenantConfigrationRestfulApi.java
index 69e00193497..f1f8a184e8f 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TenantConfigrationRestfulApi.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/restful/api/TenantConfigrationRestfulApi.java
@@ -298,7 +298,9 @@ public Message saveDepartmentTenant(
if (StringUtils.isBlank(departmentTenantVo.getId())) {
DepartmentTenantVo departTenant =
tenantConfigService.queryDepartTenant(
- departmentTenantVo.getCreator(), departmentTenantVo.getDepartmentId());
+ departmentTenantVo.getCreator(),
+ departmentTenantVo.getDepartmentId(),
+ departmentTenantVo.getDepartment());
if (null != departTenant) {
return Message.error("department creator is exist");
}
@@ -335,6 +337,7 @@ public Message saveDepartmentTenant(
public Message queryDepartmentTenant(
HttpServletRequest req,
@RequestParam(value = "departmentId", required = false) String departmentId,
+ @RequestParam(value = "department", required = false) String department,
@RequestParam(value = "creator", required = false) String creator,
@RequestParam(value = "tenantValue", required = false) String tenantValue,
@RequestParam(value = "pageNow", required = false, defaultValue = "1") Integer pageNow,
@@ -344,11 +347,12 @@ public Message queryDepartmentTenant(
return Message.error("Failed to query-tenant-list,msg: only administrator users to use");
}
if (StringUtils.isBlank(departmentId)) departmentId = null;
+ if (StringUtils.isBlank(department)) department = null;
if (StringUtils.isBlank(creator)) creator = null;
if (StringUtils.isBlank(tenantValue)) tenantValue = null;
Map resultMap =
tenantConfigService.queryDepartmentTenant(
- departmentId, creator, tenantValue, pageNow, pageSize);
+ departmentId, department, creator, tenantValue, pageNow, pageSize);
return Message.ok()
.data("tenantList", resultMap.get("tenantList"))
.data(JobRequestConstants.TOTAL_PAGE(), resultMap.get(JobRequestConstants.TOTAL_PAGE()));
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TemplateConfigKeyService.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TemplateConfigKeyService.java
index bde686c6d05..fdde83d5129 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TemplateConfigKeyService.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TemplateConfigKeyService.java
@@ -48,4 +48,6 @@ Map apply(
throws ConfigurationException;
TemplateConfResponse queryKeyInfoList(TemplateConfRequest templateConfRequest);
+
+ void dealDatasourcePwdByKeyId(Long keyId, String isEncrypt) throws ConfigurationException;
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TenantConfigService.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TenantConfigService.java
index 64350cfe4c6..33544bddfc3 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TenantConfigService.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/TenantConfigService.java
@@ -43,11 +43,16 @@ Map queryTenantList(
void saveDepartmentTenant(DepartmentTenantVo departmentTenantVo) throws ConfigurationException;
Map queryDepartmentTenant(
- String departmentId, String creator, String tenantValue, Integer pageNow, Integer pageSize);
+ String departmentId,
+ String department,
+ String creator,
+ String tenantValue,
+ Integer pageNow,
+ Integer pageSize);
void deleteDepartmentTenant(Integer id);
- DepartmentTenantVo queryDepartTenant(String creator, String departmentId);
+ DepartmentTenantVo queryDepartTenant(String creator, String departmentId, String department);
List queryDepartmentList();
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TemplateConfigKeyServiceImpl.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TemplateConfigKeyServiceImpl.java
index 63623bbf140..5d8ee1b5d3f 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TemplateConfigKeyServiceImpl.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TemplateConfigKeyServiceImpl.java
@@ -17,6 +17,9 @@
package org.apache.linkis.configuration.service.impl;
+import org.apache.linkis.common.exception.ErrorException;
+import org.apache.linkis.common.utils.AESUtils;
+import org.apache.linkis.configuration.conf.Configuration;
import org.apache.linkis.configuration.dao.ConfigKeyLimitForUserMapper;
import org.apache.linkis.configuration.dao.ConfigMapper;
import org.apache.linkis.configuration.dao.LabelMapper;
@@ -35,6 +38,7 @@
import org.apache.linkis.manager.label.entity.CombinedLabel;
import org.apache.linkis.rpc.message.annotation.Receiver;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
@@ -174,6 +178,26 @@ public Boolean updateKeyMapping(
templateConfigKey.setTemplateName(templateName);
templateConfigKey.setTemplateUuid(templateUid);
templateConfigKey.setKeyId(keyId);
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && Configuration.CONFIGURATION_AES_CONF().contains(key)
+ && StringUtils.isNotBlank(configValue)) {
+ List oldList =
+ templateConfigKeyMapper.selectListByTemplateUuid(templateUid);
+ if (CollectionUtils.isEmpty(oldList)) {
+ // 代表新增数据
+ configValue =
+ AESUtils.encrypt(configValue, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ } else {
+ // 代表更新数据
+ for (TemplateConfigKey configKey : oldList) {
+ if (configKey.getKeyId().equals(keyId)
+ && !configKey.getConfigValue().equals(configValue)) {
+ configValue =
+ AESUtils.encrypt(configValue, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ }
+ }
+ }
+ }
templateConfigKey.setConfigValue(configValue);
templateConfigKey.setMaxValue(maxValue);
templateConfigKey.setCreateBy(operator);
@@ -498,4 +522,39 @@ public TemplateConfResponse queryKeyInfoList(TemplateConfRequest templateConfReq
result.setList(data);
return result;
}
+
+ @Override
+ public void dealDatasourcePwdByKeyId(Long keyId, String isEncrypt) {
+ Boolean aeswitch = AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue();
+ List templateConfigKeyList =
+ templateConfigKeyMapper.selectListByKeyId(keyId);
+ templateConfigKeyList.forEach(
+ confKey -> {
+ String configValue = confKey.getConfigValue();
+ if (aeswitch && isEncrypt.equals("1")) {
+ // 加密之前先解密,解密失败才执行加密,目的是为了防止重复加密
+ try {
+ AESUtils.decrypt(configValue, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ } catch (ErrorException e) {
+ if (e.getErrCode() == 21304) {
+ configValue =
+ AESUtils.encrypt(configValue, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ confKey.setConfigValue(configValue);
+ templateConfigKeyMapper.updateConfigValue(confKey);
+ }
+ }
+ }
+ if (!aeswitch && isEncrypt.equals("0")) {
+ // 解密失败维持原密码
+ try {
+ configValue =
+ AESUtils.decrypt(configValue, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ confKey.setConfigValue(configValue);
+ templateConfigKeyMapper.updateConfigValue(confKey);
+ } catch (Exception e) {
+ logger.warn("此密码无需解密,维持原密码:" + confKey.getKeyId());
+ }
+ }
+ });
+ }
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantConfigServiceImpl.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantConfigServiceImpl.java
index dee1467d0d1..099ba5d7e16 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantConfigServiceImpl.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantConfigServiceImpl.java
@@ -38,7 +38,6 @@
import java.io.IOException;
import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
-import java.util.stream.Collectors;
import com.github.pagehelper.PageHelper;
import com.github.pagehelper.PageInfo;
@@ -218,12 +217,18 @@ public void saveDepartmentTenant(DepartmentTenantVo departmentTenantVo)
*/
@Override
public Map queryDepartmentTenant(
- String departmentId, String creator, String tenantValue, Integer pageNow, Integer pageSize) {
+ String departmentId,
+ String department,
+ String creator,
+ String tenantValue,
+ Integer pageNow,
+ Integer pageSize) {
Map result = new HashMap<>(2);
List tenantVos = null;
PageHelper.startPage(pageNow, pageSize);
try {
- tenantVos = departmentTenantMapper.queryTenantList(creator, departmentId, tenantValue);
+ tenantVos =
+ departmentTenantMapper.queryTenantList(creator, departmentId, department, tenantValue);
} finally {
PageHelper.clearPage();
}
@@ -238,20 +243,14 @@ public void deleteDepartmentTenant(Integer id) {
}
@Override
- public DepartmentTenantVo queryDepartTenant(String creator, String departmentId) {
- return departmentTenantMapper.queryTenant(creator, departmentId);
+ public DepartmentTenantVo queryDepartTenant(
+ String creator, String departmentId, String department) {
+ return departmentTenantMapper.queryTenant(creator, departmentId, department);
}
@Override
public List queryDepartmentList() {
- return new ArrayList<>(
- departmentMapper.queryDepartmentList().stream()
- .collect(
- Collectors.toMap(
- DepartmentVo::getOrgId,
- department -> department,
- (existing, replacement) -> existing))
- .values());
+ return departmentMapper.queryDepartmentList();
}
@Override
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantServiceImpl.java b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantServiceImpl.java
index 91523cc4a6e..aab3d5c8f58 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantServiceImpl.java
+++ b/linkis-public-enhancements/linkis-configuration/src/main/java/org/apache/linkis/configuration/service/impl/TenantServiceImpl.java
@@ -64,18 +64,25 @@ public DepartTenantResponse getDepartTenantData(
DepartTenantRequest departTenantRequest, Sender sender) {
DepartmentTenantVo departmentTenantVo =
tenantConfigService.queryDepartTenant(
- departTenantRequest.creator(), departTenantRequest.departmentId());
+ departTenantRequest.creator(),
+ departTenantRequest.departmentId(),
+ departTenantRequest.departmentName());
if (null == departmentTenantVo) {
logger.warn(
"DepartTenant data loading failed creator {} department {},departTenant cache will set '' ",
departTenantRequest.creator(),
departTenantRequest.departmentId());
return new DepartTenantResponse(
- departTenantRequest.creator(), departTenantRequest.departmentId(), "Y", "");
+ departTenantRequest.creator(),
+ departTenantRequest.departmentId(),
+ departTenantRequest.departmentName(),
+ "Y",
+ "");
} else {
return new DepartTenantResponse(
departmentTenantVo.getCreator(),
departmentTenantVo.getDepartmentId(),
+ departmentTenantVo.getDepartment(),
departmentTenantVo.getIsValid(),
departmentTenantVo.getTenantValue());
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentMapper.xml b/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentMapper.xml
index 9b7db1e8308..342667e06bd 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentMapper.xml
+++ b/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentMapper.xml
@@ -33,6 +33,6 @@
- select from linkis_org_user where org_id is NOT NULL
+ select DISTINCT org_id,org_name from linkis_org_user where org_id is NOT NULL and org_name is NOT NULL
\ No newline at end of file
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentTenantMapper.xml b/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentTenantMapper.xml
index c10bd85b80e..6a9fa74dae0 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentTenantMapper.xml
+++ b/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/DepartmentTenantMapper.xml
@@ -58,6 +58,7 @@
from linkis_cg_tenant_department_config
creator = #{creator}
+ and department = #{department}
and department_id = #{departmentId}
and tenant_value = #{tenantValue}
@@ -91,6 +92,7 @@
creator = #{creator}
and department_id = #{departmentId}
+ and department = #{department}
\ No newline at end of file
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/TemplateConfigKeyMapper.xml b/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/TemplateConfigKeyMapper.xml
index 50a11607c61..3136b80bff3 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/TemplateConfigKeyMapper.xml
+++ b/linkis-public-enhancements/linkis-configuration/src/main/resources/mapper/common/TemplateConfigKeyMapper.xml
@@ -61,6 +61,15 @@
)
+
+ UPDATE linkis_ps_configuration_template_config_key
+
+
+ config_value = #{configValue,jdbcType=VARCHAR},
+
+
+ WHERE id = #{id}
+
select
@@ -157,5 +166,11 @@
where t.template_uuid = #{templateUuid,jdbcType=VARCHAR}
)
+
+ select
+
+ from linkis_ps_configuration_template_config_key
+ where key_id = #{keyId}
+
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/conf/Configuration.scala b/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/conf/Configuration.scala
index ac2968d4986..eb40514dcc9 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/conf/Configuration.scala
+++ b/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/conf/Configuration.scala
@@ -41,4 +41,18 @@ object Configuration {
val USE_USER_DEFAULE_VALUE =
CommonVars.apply("wds.linkis.configuration.use.user.default.value", true).getValue
+ val JDBC_PASSWORD_CONF = "wds.linkis.jdbc.password"
+
+ val NEBULA_PASSWORD_CONF = "linkis.nebula.password"
+
+ val CONFIGURATION_AES_CONF: List[String] =
+ CommonVars
+ .apply(
+ "linkis.configuration.password.aes.conf",
+ JDBC_PASSWORD_CONF + "," + NEBULA_PASSWORD_CONF
+ )
+ .getValue
+ .split(",")
+ .toList
+
}
diff --git a/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/service/ConfigurationService.scala b/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/service/ConfigurationService.scala
index e94c65b715e..5a8cd9ded0d 100644
--- a/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/service/ConfigurationService.scala
+++ b/linkis-public-enhancements/linkis-configuration/src/main/scala/org/apache/linkis/configuration/service/ConfigurationService.scala
@@ -239,9 +239,11 @@ class ConfigurationService extends Logging {
"config key is null, please check again!(配置信息为空,请重新检查key值)"
)
}
- logger.info(
- s"parameter ${key.getKey} value ${setting.getConfigValue} is not empty, enter checksum...(参数${key.getKey} 值${setting.getConfigValue}不为空,进入校验...)"
- )
+ if (!Configuration.CONFIGURATION_AES_CONF.contains(key.getKey)) {
+ logger.info(
+ s"parameter ${key.getKey} value ${setting.getConfigValue} is not empty, enter checksum...(参数${key.getKey} 值${setting.getConfigValue}不为空,进入校验...)"
+ )
+ }
if (
!validatorManager
.getOrCreateValidator(key.getValidateType)
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/conf/DatasourceConf.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/conf/DatasourceConf.java
new file mode 100644
index 00000000000..96b899459ab
--- /dev/null
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/conf/DatasourceConf.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.linkis.datasourcemanager.core.conf;
+
+import org.apache.linkis.common.conf.CommonVars;
+
+public class DatasourceConf {
+
+ public static CommonVars INSERT_DATAESOURCE_LIMIT =
+ CommonVars.apply("linkis.create.datasource.allow", "starrocks");
+}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceDao.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceDao.java
index 190dfa83a56..2a8c7f04065 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceDao.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceDao.java
@@ -120,4 +120,8 @@ int setPublishedVersionId(
*/
void updateVersionId(
@Param("dataSourceId") Long datasourceId, @Param("versionId") long versionId);
+
+ List selectDatasourcesByType(
+ @Param("datasourceTypeName") String datasourceTypeName,
+ @Param("datasourceUser") String datasourceUser);
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceVersionDao.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceVersionDao.java
index f58bd9456bf..4f4880026d0 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceVersionDao.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/dao/DataSourceVersionDao.java
@@ -64,4 +64,6 @@ public interface DataSourceVersionDao {
* @return
*/
int removeFromDataSourceId(Long dataSourceId);
+
+ void updateByDatasourceVersion(DatasourceVersion datasourceVersion);
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/receivers/DsmReceiver.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/receivers/DsmReceiver.java
index 4702ff82d48..20ab3d28734 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/receivers/DsmReceiver.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/receivers/DsmReceiver.java
@@ -17,6 +17,7 @@
package org.apache.linkis.datasourcemanager.core.receivers;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.datasourcemanager.common.domain.DataSource;
import org.apache.linkis.datasourcemanager.common.protocol.DsInfoQueryRequest;
import org.apache.linkis.datasourcemanager.common.protocol.DsInfoResponse;
@@ -75,10 +76,12 @@ public DsInfoResponse dealDsInfoQueryRequest(DsInfoQueryRequest dsInfoQueryReque
MessageFormat.format(
"Datasource name:{0} is not published.", dataSource.getDataSourceName()));
}
-
- RestfulApiHelper.decryptPasswordKey(
- dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId()),
- dataSource.getConnectParams());
+ // Decrypt
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(
+ dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId()),
+ dataSource.getConnectParams());
+ }
return new DsInfoResponse(
true,
dataSource.getDataSourceType().getName(),
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceCoreRestfulApi.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceCoreRestfulApi.java
index c6c6a58b799..cf49d316871 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceCoreRestfulApi.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceCoreRestfulApi.java
@@ -18,12 +18,17 @@
package org.apache.linkis.datasourcemanager.core.restful;
import org.apache.linkis.common.exception.ErrorException;
+import org.apache.linkis.common.utils.AESUtils;
+import org.apache.linkis.common.variable.DateTypeUtils;
import org.apache.linkis.datasourcemanager.common.auth.AuthContext;
import org.apache.linkis.datasourcemanager.common.domain.DataSource;
import org.apache.linkis.datasourcemanager.common.domain.DataSourceParamKeyDefinition;
import org.apache.linkis.datasourcemanager.common.domain.DataSourceType;
import org.apache.linkis.datasourcemanager.common.domain.DatasourceVersion;
+import org.apache.linkis.datasourcemanager.common.util.CryptoUtils;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
+import org.apache.linkis.datasourcemanager.core.conf.DatasourceConf;
+import org.apache.linkis.datasourcemanager.core.dao.DataSourceVersionDao;
import org.apache.linkis.datasourcemanager.core.formdata.FormDataTransformerFactory;
import org.apache.linkis.datasourcemanager.core.formdata.MultiPartFormDataTransformer;
import org.apache.linkis.datasourcemanager.core.service.DataSourceInfoService;
@@ -34,9 +39,11 @@
import org.apache.linkis.datasourcemanager.core.validate.ParameterValidator;
import org.apache.linkis.datasourcemanager.core.vo.DataSourceVo;
import org.apache.linkis.metadata.query.common.MdmConfiguration;
+import org.apache.linkis.server.BDPJettyServerHelper;
import org.apache.linkis.server.Message;
import org.apache.linkis.server.utils.ModuleUserUtils;
+import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
@@ -55,14 +62,8 @@
import javax.validation.groups.Default;
import java.io.UnsupportedEncodingException;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
+import java.util.*;
+
import com.github.pagehelper.PageInfo;
import com.github.xiaoymin.knife4j.annotations.ApiOperationSupport;
import io.swagger.annotations.Api;
@@ -81,7 +82,7 @@
produces = {"application/json"})
public class DataSourceCoreRestfulApi {
- private static final Logger LOG = LoggerFactory.getLogger(DataSourceCoreRestfulApi.class);
+ private static final Logger logger = LoggerFactory.getLogger(DataSourceCoreRestfulApi.class);
@Autowired private DataSourceInfoService dataSourceInfoService;
@@ -92,7 +93,7 @@ public class DataSourceCoreRestfulApi {
@Autowired private Validator beanValidator;
@Autowired private MetadataOperateService metadataOperateService;
-
+ @Autowired private DataSourceVersionDao dataSourceVersionDao;
private MultiPartFormDataTransformer formDataTransformer;
@Autowired private List dataSourceParamsHooks = new ArrayList<>();
@@ -137,6 +138,31 @@ public Message getKeyDefinitionsByType(
"Fail to get key definitions of data source type[查询数据源参数键值对失败]");
}
+ @ApiOperation(
+ value = "getKeyDefinitionsByTypeName",
+ notes = "get key definitions by typeName",
+ response = Message.class)
+ @ApiImplicitParams({@ApiImplicitParam(name = "typeName", required = true, dataType = "String")})
+ @RequestMapping(value = "/key-define/{typeName}", method = RequestMethod.GET)
+ public Message getKeyDefinitionsByTypeName(
+ @PathVariable("typeName") String typeName, HttpServletRequest request) {
+ return RestfulApiHelper.doAndResponse(
+ () -> {
+ String userName = ModuleUserUtils.getOperationUser(request, "getKeyDefinitionsByType");
+ DataSourceType targetDataSourceType = getDatasoutceTypeID(typeName, request);
+ if (targetDataSourceType != null) {
+ List keyDefinitions =
+ dataSourceRelateService.getKeyDefinitionsByType(
+ Long.valueOf(targetDataSourceType.getId()),
+ request.getHeader("Content-Language"));
+ return Message.ok().data("keyDefine", keyDefinitions);
+ } else {
+ return Message.error("No data source type found with name: " + typeName);
+ }
+ },
+ "Fail to get key definitions of data source type[查询数据源参数键值对失败]");
+ }
+
@ApiOperation(value = "insertJsonInfo", notes = "insert json info", response = Message.class)
@ApiOperationSupport(ignoreParameters = {"dataSource"})
@ApiImplicitParams({
@@ -161,15 +187,6 @@ public Message insertJsonInfo(@RequestBody DataSource dataSource, HttpServletReq
return RestfulApiHelper.doAndResponse(
() -> {
String userName = ModuleUserUtils.getOperationUser(request, "insertJsonInfo");
-
- // Bean validation
- Set> result =
- beanValidator.validate(dataSource, Default.class);
- if (result.size() > 0) {
- throw new ConstraintViolationException(result);
- }
- // Escape the data source name
- dataSource.setCreateUser(userName);
if (dataSourceInfoService.existDataSource(dataSource.getDataSourceName())) {
return Message.error(
"The data source named: "
@@ -178,12 +195,98 @@ public Message insertJsonInfo(@RequestBody DataSource dataSource, HttpServletReq
+ dataSource.getDataSourceName()
+ " 已经存在]");
}
- insertDataSource(dataSource);
+ insertDatasource(dataSource, userName);
return Message.ok().data("insertId", dataSource.getId());
},
"Fail to insert data source[新增数据源失败]");
}
+ @ApiOperation(value = "insertJsonInfo", notes = "insert json info", response = Message.class)
+ @ApiOperationSupport(ignoreParameters = {"dataSource"})
+ @ApiImplicitParams({
+ @ApiImplicitParam(
+ name = "createSystem",
+ required = true,
+ dataType = "String",
+ example = "linkis"),
+ @ApiImplicitParam(name = "dataSourceDesc", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "dataSourceName", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "dataSourceTypeName", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "labels", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "connectParams", required = true, dataType = "List"),
+ @ApiImplicitParam(name = "host", dataType = "String"),
+ @ApiImplicitParam(name = "password", dataType = "String"),
+ @ApiImplicitParam(name = "port", dataType = "String"),
+ @ApiImplicitParam(name = "subSystem", dataType = "String"),
+ @ApiImplicitParam(name = "username", dataType = "String")
+ })
+ @RequestMapping(value = "/info/json/create", method = RequestMethod.POST)
+ public Message insertJson(@RequestBody DataSource dataSource, HttpServletRequest request) {
+ ModuleUserUtils.getOperationUser(request, "insertJsonCreate");
+ String datasourceUser = dataSource.getCreateUser();
+ String dataSourceTypeName = dataSource.getDataSourceTypeName();
+ // 参数校验
+ if (StringUtils.isBlank(datasourceUser)) {
+ return Message.error("Parameter createUser cannot be empty (参数 createUser 不能为空)");
+ }
+ if (StringUtils.isBlank(dataSourceTypeName)) {
+ return Message.error(
+ "Parameter dataSourceTypeName cannot be empty (参数 dataSourceTypeName 不能为空)");
+ }
+ Map connectParams = dataSource.getConnectParams();
+ if (MapUtils.isEmpty(connectParams)) {
+ return Message.error("Parameter connectParams cannot be empty (参数 connectParams 不能为空)");
+ }
+ // 定义需要校验的参数
+ String[] requiredParams = {"host", "port", "driverClassName", "username", "password"};
+ for (String param : requiredParams) {
+ Object value = connectParams.get(param);
+ if (value == null || StringUtils.isEmpty(value.toString())) {
+ return Message.error("Parameter " + param + " cannot be empty (参数 " + param + " 不能为空)");
+ }
+ }
+ // 限制仅支持starrocks
+ if (!DatasourceConf.INSERT_DATAESOURCE_LIMIT.getValue().contains(dataSourceTypeName)) {
+ return Message.error("DataSource Create Only Support starrocks");
+ }
+ // 参数调整
+ dataSource.setDataSourceName(
+ String.join(
+ "_",
+ dataSourceTypeName,
+ datasourceUser,
+ DateTypeUtils.dateFormatSecondLocal().get().format(new Date())));
+ if (dataSourceInfoService.existDataSource(dataSource.getDataSourceName())) {
+ return Message.error(
+ "The data source named: "
+ + dataSource.getDataSourceName()
+ + " has been existed [数据源: "
+ + dataSource.getDataSourceName()
+ + " 已经存在]");
+ }
+ DataSourceType dataSourceType = getDatasoutceTypeID(dataSourceTypeName, request);
+ if (dataSourceType != null)
+ dataSource.setDataSourceTypeId(Long.valueOf(dataSourceType.getId()));
+ // 创建数据源
+ insertDatasource(dataSource, datasourceUser);
+ Map stringHashMap = new HashMap<>();
+ stringHashMap.put("connectParams", dataSource.getConnectParams());
+ stringHashMap.put("comment", "初始化版本");
+ // 创建数据源version
+ Message message = insertJsonParameter(dataSource.getId(), stringHashMap, request);
+ if (message.getStatus() == 1) {
+ return message;
+ }
+ long publishedVersionId = Long.parseLong(message.getData().get("version").toString());
+ dataSource.setPublishedVersionId(publishedVersionId);
+ // 发布数据源version
+ message = publishByDataSourceId(dataSource.getId(), publishedVersionId, request);
+ if (message.getStatus() == 1) {
+ return message;
+ }
+ return Message.ok().data("datasource", dataSource);
+ }
+
@ApiOperation(
value = "updateDataSourceInJson",
notes = "update data source in json",
@@ -200,22 +303,22 @@ public Message insertJsonInfo(@RequestBody DataSource dataSource, HttpServletReq
required = true,
dataType = "String",
example = "1650426189000"),
- @ApiImplicitParam(name = "createUser", required = true, dataType = "String", example = "hive"),
+ @ApiImplicitParam(name = "createUser", required = true, dataType = "String"),
@ApiImplicitParam(name = "dataSourceDesc", required = true, dataType = "String"),
@ApiImplicitParam(name = "dataSourceName", required = true, dataType = "String"),
@ApiImplicitParam(name = "dataSourceTypeId", required = true, dataType = "String"),
@ApiImplicitParam(name = "labels", required = true, dataType = "String"),
@ApiImplicitParam(name = "connectParams", required = true, dataType = "List"),
- @ApiImplicitParam(name = "host", dataType = "String", example = "127.0.0.1"),
+ @ApiImplicitParam(name = "host", dataType = "String"),
@ApiImplicitParam(name = "password", dataType = "String"),
- @ApiImplicitParam(name = "port", dataType = "String", example = "9523"),
+ @ApiImplicitParam(name = "port", dataType = "String"),
@ApiImplicitParam(name = "subSystem", dataType = "String"),
@ApiImplicitParam(name = "username", dataType = "String"),
- @ApiImplicitParam(name = "expire", dataType = "boolean", example = "false"),
- @ApiImplicitParam(name = "file", dataType = "String", example = "adn"),
- @ApiImplicitParam(name = "modifyTime", dataType = "String", example = "1657611440000"),
- @ApiImplicitParam(name = "modifyUser", dataType = "String", example = "hadoop"),
- @ApiImplicitParam(name = "versionId", dataType = "String", example = "18")
+ @ApiImplicitParam(name = "expire", dataType = "boolean"),
+ @ApiImplicitParam(name = "file", dataType = "String"),
+ @ApiImplicitParam(name = "modifyTime", dataType = "String"),
+ @ApiImplicitParam(name = "modifyUser", dataType = "String"),
+ @ApiImplicitParam(name = "versionId", dataType = "String")
})
@ApiOperationSupport(ignoreParameters = {"dataSource"})
@RequestMapping(value = "/info/{dataSourceId}/json", method = RequestMethod.PUT)
@@ -257,6 +360,20 @@ public Message updateDataSourceInJson(
dataSource.setKeyDefinitions(keyDefinitionList);
Map connectParams = dataSource.getConnectParams();
+
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && connectParams.containsKey(AESUtils.PASSWORD)) {
+ dataSource
+ .getConnectParams()
+ .replace(
+ AESUtils.PASSWORD,
+ AESUtils.encrypt(
+ connectParams.get(AESUtils.PASSWORD).toString(),
+ AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ // 标记密码已经加密
+ dataSource.getConnectParams().put(AESUtils.IS_ENCRYPT, AESUtils.ENCRYPT);
+ }
+
// add default value filed
keyDefinitionList.forEach(
keyDefinition -> {
@@ -361,7 +478,9 @@ public Message getInfoByDataSourceId(
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
// Decrypt
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ }
return Message.ok().data("info", dataSource);
},
"Fail to access data source[获取数据源信息失败]");
@@ -397,8 +516,9 @@ public Message getInfoByDataSourceName(
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
// Decrypt
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
-
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ }
return Message.ok().data("info", dataSource);
},
"Fail to access data source[获取数据源信息失败]");
@@ -433,8 +553,61 @@ public Message getPublishedInfoByDataSourceName(
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
// Decrypt
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ }
+ return Message.ok().data("info", dataSource);
+ },
+ "Fail to access data source[获取数据源信息失败]");
+ }
+ @ApiOperation(
+ value = "Get published info by data source name, IP and port",
+ notes = "Retrieve published information of a data source by its type name, IP and port",
+ response = Message.class)
+ @ApiImplicitParams({
+ @ApiImplicitParam(name = "datasourceTypeName", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "datasourceUser", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "ip", required = true, dataType = "String"),
+ @ApiImplicitParam(name = "port", required = true, dataType = "String")
+ })
+ @RequestMapping(
+ value = "/publishedInfo/{datasourceTypeName}/{datasourceUser}/{ip}/{port}",
+ method = RequestMethod.GET)
+ public Message getPublishedInfoByIpPort(
+ @PathVariable("datasourceTypeName") String datasourceTypeName,
+ @PathVariable("datasourceUser") String datasourceUser,
+ @PathVariable("ip") String ip,
+ @PathVariable("port") String port,
+ HttpServletRequest request) {
+ return RestfulApiHelper.doAndResponse(
+ () -> {
+ String username =
+ ModuleUserUtils.getOperationUser(
+ request, "getPublishedInfoByIpPort ip:" + ip + ",port:" + port);
+ if (StringUtils.isBlank(datasourceUser)) {
+ return Message.error(
+ "Parameter datasourceUser cannot be empty (参数 datasourceUser 不能为空)");
+ }
+
+ DataSource dataSource =
+ dataSourceInfoService.getDataSourcePublishInfo(
+ datasourceTypeName, ip, port, datasourceUser);
+ if (dataSource == null) {
+ return Message.error("No Exists The DataSource [不存在该数据源]");
+ }
+ if (!AuthContext.hasPermission(dataSource, username)) {
+ return Message.error("Don't have query permission for data source [没有数据源的查询权限]");
+ }
+ List keyDefinitionList =
+ dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
+ // Decrypt
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ }
+ DataSourceType dataSourceType = new DataSourceType();
+ dataSourceType.setName(datasourceTypeName);
+ dataSource.setDataSourceType(dataSourceType);
return Message.ok().data("info", dataSource);
},
"Fail to access data source[获取数据源信息失败]");
@@ -478,7 +651,9 @@ public Message getInfoByDataSourceIdAndVersion(
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
// Decrypt
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, dataSource.getConnectParams());
+ }
return Message.ok().data("info", dataSource);
},
"Fail to access data source[获取数据源信息失败]");
@@ -516,7 +691,7 @@ public Message getVersionList(
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
// Decrypt
- if (null != versions) {
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue() && null != versions) {
versions.forEach(
version -> {
RestfulApiHelper.decryptPasswordKey(
@@ -666,7 +841,9 @@ public Message getConnectParams(
Map connectParams = dataSource.getConnectParams();
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
+ }
return Message.ok().data("connectParams", connectParams);
},
"Fail to connect data source[连接数据源失败]");
@@ -702,7 +879,9 @@ public Message getConnectParams(
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
+ }
return Message.ok().data("connectParams", connectParams);
},
"Fail to connect data source[连接数据源失败]");
@@ -740,8 +919,9 @@ public Message connectDataSource(
// Get definitions
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
- RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
-
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
+ }
// For connecting, also need to handle the parameters
for (DataSourceParamsHook hook : dataSourceParamsHooks) {
hook.beforePersist(connectParams, keyDefinitionList);
@@ -771,7 +951,7 @@ public Message queryDataSource(
String userName =
ModuleUserUtils.getOperationUser(request, "queryDataSourceByIds ids:" + idsJson);
- List ids = new ObjectMapper().readValue(idsJson, List.class);
+ List ids = BDPJettyServerHelper.jacksonJson().readValue(idsJson, List.class);
List dataSourceList = dataSourceInfoService.queryDataSourceInfo(ids);
return Message.ok()
.data("queryList", dataSourceList)
@@ -819,6 +999,128 @@ public Message queryDataSource(
"Fail to query page of data source[查询数据源失败]");
}
+ @ApiOperation(
+ value = "queryDataSourceWithConnectParms",
+ notes = "query datasource",
+ response = Message.class)
+ @ApiImplicitParams({
+ @ApiImplicitParam(name = "system", dataType = "String"),
+ @ApiImplicitParam(name = "name", dataType = "Long"),
+ @ApiImplicitParam(name = "typeId", dataType = "Long"),
+ @ApiImplicitParam(name = "identifies", dataType = "String"),
+ @ApiImplicitParam(name = "currentPage", dataType = "Integer"),
+ @ApiImplicitParam(name = "pageSize", dataType = "Integer")
+ })
+ @RequestMapping(value = "/info/connect-params", method = RequestMethod.GET)
+ public Message queryDataSourceWithConnectParms(
+ @RequestParam(value = "system", required = false) String createSystem,
+ @RequestParam(value = "name", required = false) String dataSourceName,
+ @RequestParam(value = "typeId", required = false) Long dataSourceTypeId,
+ @RequestParam(value = "identifies", required = false) String identifies,
+ @RequestParam(value = "currentPage", required = false) Integer currentPage,
+ @RequestParam(value = "pageSize", required = false) Integer pageSize,
+ HttpServletRequest request) {
+ return RestfulApiHelper.doAndResponse(
+ () -> {
+ String permissionUser = ModuleUserUtils.getOperationUser(request, "queryDataSource");
+
+ DataSourceVo dataSourceVo =
+ new DataSourceVo(dataSourceName, dataSourceTypeId, identifies, createSystem);
+ dataSourceVo.setCurrentPage(null != currentPage ? currentPage : 1);
+ dataSourceVo.setPageSize(null != pageSize ? pageSize : 10);
+
+ if (AuthContext.isAdministrator(permissionUser)) {
+ permissionUser = null;
+ }
+ dataSourceVo.setPermissionUser(permissionUser);
+ PageInfo pageInfo =
+ dataSourceInfoService.queryDataSourceInfoPage(dataSourceVo);
+ List queryList = pageInfo.getList();
+ for (DataSource dataSource : queryList) {
+ DataSource dataSourceConnect =
+ dataSourceInfoService.getDataSourceInfoForConnect(dataSource.getDataSourceName());
+ if (dataSourceConnect == null) {
+ return Message.error("No Exists The DataSource [不存在该数据源]");
+ }
+ Map connectParams = dataSourceConnect.getConnectParams();
+ List keyDefinitionList =
+ dataSourceRelateService.getKeyDefinitionsByType(
+ dataSourceConnect.getDataSourceTypeId());
+ if (!AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ RestfulApiHelper.decryptPasswordKey(keyDefinitionList, connectParams);
+ }
+ connectParams.remove(AESUtils.PASSWORD);
+ dataSource.setConnectParams(connectParams);
+ }
+ return Message.ok().data("queryList", queryList).data("totalPage", pageInfo.getTotal());
+ },
+ "Fail to query page of data source[查询数据源失败]");
+ }
+
+ @ApiOperation(
+ value = "encryptDatasourcePassword",
+ notes = "encrypt datasource password",
+ response = Message.class)
+ @RequestMapping(value = "/encrypt", method = RequestMethod.GET)
+ public Message encryptDatasourcePassword(
+ @RequestParam(value = "isEncrypt", required = false) String isEncrypt,
+ HttpServletRequest request) {
+ return RestfulApiHelper.doAndResponse(
+ () -> {
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && StringUtils.isNotBlank(isEncrypt)) {
+ // 处理linkis_ps_dm_datasource表和处理linkis_ps_dm_datasource_version的password字段加密
+ String permissionUser = ModuleUserUtils.getOperationUser(request, "encrypt");
+ DataSourceVo dataSourceVo = new DataSourceVo();
+ dataSourceVo.setCurrentPage(1);
+ dataSourceVo.setPageSize(10000);
+ if (AuthContext.isAdministrator(permissionUser)) {
+ permissionUser = null;
+ }
+ dataSourceVo.setPermissionUser(permissionUser);
+ dataSourceInfoService
+ .queryDataSourceInfoPage(dataSourceVo)
+ .getList()
+ .forEach(s -> dealDatasoueceData(s, isEncrypt));
+ }
+ return Message.ok();
+ },
+ "Fail to aes of data source[加密数据源密码失败]");
+ }
+
+ @ApiOperation(
+ value = "getDataSourceByTypeName",
+ notes = "get data source by datasource type name",
+ response = Message.class)
+ @RequestMapping(value = "/info-by-type", method = RequestMethod.GET)
+ public Message getDataSourceListByTypes(
+ HttpServletRequest request,
+ @RequestParam String typeName,
+ @RequestParam(required = false, defaultValue = "1") Integer currentPage,
+ @RequestParam(required = false, defaultValue = "10") Integer pageSize) {
+ return RestfulApiHelper.doAndResponse(
+ () -> {
+ String userName = ModuleUserUtils.getOperationUser(request, "getDataSourceByTypeName");
+ if (AuthContext.isAdministrator(userName)) {
+ userName = null;
+ }
+ DataSourceType targetDataSourceType = getDatasoutceTypeID(typeName, request);
+ if (targetDataSourceType != null) {
+ DataSourceVo dataSourceVo = new DataSourceVo();
+ dataSourceVo.setDataSourceTypeId(Long.valueOf(targetDataSourceType.getId()));
+ dataSourceVo.setPermissionUser(userName);
+ dataSourceVo.setCurrentPage(currentPage);
+ dataSourceVo.setPageSize(pageSize);
+ PageInfo pageInfo =
+ dataSourceInfoService.queryDataSourceInfoPage(dataSourceVo);
+ List queryList = pageInfo.getList();
+ return Message.ok().data("queryList", queryList).data("totalPage", pageInfo.getTotal());
+ } else {
+ return Message.error("No data source type found with name: " + typeName);
+ }
+ },
+ "Fail to get all types of data source[获取数据源列表失败]");
+ }
/**
* Inner method to insert data source
*
@@ -850,6 +1152,10 @@ private void formatConnectParams(
if (StringUtils.isNotBlank(keyDefinition.getDefaultValue())
&& !connectParams.containsKey(key)) {
connectParams.put(key, keyDefinition.getDefaultValue());
+ logger.info(
+ "connectParams put key:{} with default value:{}",
+ key,
+ keyDefinition.getDefaultValue());
}
});
@@ -857,6 +1163,145 @@ private void formatConnectParams(
(k, v) -> {
if (v instanceof String) {
connectParams.put(k, v.toString().trim());
+ if (!k.equals(AESUtils.PASSWORD)) {
+ logger.info("connectParams put key:{} with value:{}", k, v.toString().trim());
+ }
+ }
+ });
+ }
+
+ private DataSourceType getDatasoutceTypeID(
+ String dataSourceTypeName, HttpServletRequest request) {
+ List dataSourceTypes =
+ dataSourceRelateService.getAllDataSourceTypes(request.getHeader("Content-Language"));
+ return dataSourceTypes.stream()
+ .filter(type -> type.getName().equals(dataSourceTypeName))
+ .findFirst()
+ .orElse(null);
+ }
+
+ private DataSource insertDatasource(DataSource dataSource, String userName) {
+ // Bean validation
+ Set> result = beanValidator.validate(dataSource, Default.class);
+ if (result.size() > 0) {
+ throw new ConstraintViolationException(result);
+ }
+ // Escape the data source name
+ dataSource.setCreateUser(userName);
+
+ Map connectParams = dataSource.getConnectParams();
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && connectParams.containsKey(AESUtils.PASSWORD)) {
+ dataSource
+ .getConnectParams()
+ .replace(
+ AESUtils.PASSWORD,
+ AESUtils.encrypt(
+ connectParams.get(AESUtils.PASSWORD).toString(),
+ AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ // 标记密码已经加密
+ dataSource.getConnectParams().put(AESUtils.IS_ENCRYPT, AESUtils.ENCRYPT);
+ }
+ insertDataSource(dataSource);
+ return dataSource;
+ }
+
+ private void dealDatasoueceData(DataSource dataSourceInfo, String isEncrypt) {
+ DataSource dataSourceInfoBrief =
+ dataSourceInfoService.getDataSourceInfoBrief(dataSourceInfo.getId());
+ if (StringUtils.isNotBlank(dataSourceInfoBrief.getParameter())
+ && dataSourceInfoBrief.getParameter().contains(AESUtils.PASSWORD)) {
+ Map datasourceParmMap =
+ BDPJettyServerHelper.gson()
+ .fromJson(dataSourceInfoBrief.getParameter().toString(), Map.class);
+ if (!datasourceParmMap
+ .getOrDefault(AESUtils.IS_ENCRYPT, AESUtils.DECRYPT)
+ .equals(AESUtils.ENCRYPT)
+ && isEncrypt.equals(AESUtils.ENCRYPT)) {
+ datasourceParmMap.put(
+ AESUtils.PASSWORD,
+ AESUtils.encrypt(
+ datasourceParmMap.get(AESUtils.PASSWORD).toString(),
+ AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ datasourceParmMap.put(AESUtils.IS_ENCRYPT, AESUtils.ENCRYPT);
+ dataSourceInfoBrief.setParameter(BDPJettyServerHelper.gson().toJson(datasourceParmMap));
+ dataSourceInfoService.updateDataSourceInfo(dataSourceInfoBrief);
+ }
+ if (datasourceParmMap
+ .getOrDefault(AESUtils.IS_ENCRYPT, AESUtils.DECRYPT)
+ .equals(AESUtils.ENCRYPT)
+ && isEncrypt.equals(AESUtils.DECRYPT)) {
+ datasourceParmMap.put(
+ AESUtils.PASSWORD,
+ AESUtils.decrypt(
+ datasourceParmMap.get(AESUtils.PASSWORD).toString(),
+ AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ datasourceParmMap.remove(AESUtils.IS_ENCRYPT);
+ dataSourceInfoBrief.setParameter(BDPJettyServerHelper.gson().toJson(datasourceParmMap));
+ dataSourceInfoService.updateDataSourceInfo(dataSourceInfoBrief);
+ }
+ dealDatasoueceVersionData(dataSourceInfoBrief, isEncrypt);
+ }
+ }
+
+ private void dealDatasoueceVersionData(DataSource dataSourceInfo, String isEncrypt) {
+ // 处理linkis_ps_dm_datasource_version中的password,解密base64,加密AES
+ List datasourceVersionList =
+ dataSourceVersionDao.getVersionsFromDatasourceId(dataSourceInfo.getId());
+ datasourceVersionList.forEach(
+ datasourceVersion -> {
+ // 加密
+ if (StringUtils.isNotBlank(datasourceVersion.getParameter())
+ && datasourceVersion.getParameter().contains(AESUtils.PASSWORD)) {
+ Map datasourceVersionMap =
+ BDPJettyServerHelper.gson().fromJson(datasourceVersion.getParameter(), Map.class);
+ if (!datasourceVersionMap
+ .getOrDefault(AESUtils.IS_ENCRYPT, AESUtils.DECRYPT)
+ .equals(AESUtils.ENCRYPT)
+ && isEncrypt.equals(AESUtils.ENCRYPT)) {
+ try {
+ Object password =
+ CryptoUtils.string2Object(
+ datasourceVersionMap.get(AESUtils.PASSWORD).toString());
+ datasourceVersionMap.put(
+ AESUtils.PASSWORD,
+ AESUtils.encrypt(
+ password.toString(), AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue()));
+ datasourceVersionMap.put(AESUtils.IS_ENCRYPT, AESUtils.ENCRYPT);
+ datasourceVersion.setParameter(
+ BDPJettyServerHelper.gson().toJson(datasourceVersionMap));
+ dataSourceVersionDao.updateByDatasourceVersion(datasourceVersion);
+ } catch (Exception e) {
+ logger.warn(
+ "error encrypt linkis_ps_dm_datasource_version id :"
+ + datasourceVersion.getDatasourceId()
+ + " version:"
+ + datasourceVersion.getVersionId());
+ }
+ }
+ // 解密
+ if (datasourceVersionMap
+ .getOrDefault(AESUtils.IS_ENCRYPT, AESUtils.DECRYPT)
+ .equals(AESUtils.ENCRYPT)
+ && isEncrypt.equals(AESUtils.DECRYPT)) {
+ try {
+ String password = datasourceVersionMap.get(AESUtils.PASSWORD).toString();
+ String decryptPassword =
+ AESUtils.decrypt(password, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ datasourceVersionMap.put(
+ AESUtils.PASSWORD, CryptoUtils.object2String(decryptPassword));
+ datasourceVersionMap.remove(AESUtils.IS_ENCRYPT);
+ datasourceVersion.setParameter(
+ BDPJettyServerHelper.gson().toJson(datasourceVersionMap));
+ dataSourceVersionDao.updateByDatasourceVersion(datasourceVersion);
+ } catch (Exception e) {
+ logger.warn(
+ "error encrypt linkis_ps_dm_datasource_version id :"
+ + datasourceVersion.getDatasourceId()
+ + " version:"
+ + datasourceVersion.getVersionId());
+ }
+ }
}
});
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceOperateRestfulApi.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceOperateRestfulApi.java
index bb5ce4109de..4937f56b3d5 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceOperateRestfulApi.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/DataSourceOperateRestfulApi.java
@@ -18,6 +18,7 @@
package org.apache.linkis.datasourcemanager.core.restful;
import org.apache.linkis.common.exception.ErrorException;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.datasourcemanager.common.domain.DataSource;
import org.apache.linkis.datasourcemanager.common.domain.DataSourceParamKeyDefinition;
import org.apache.linkis.datasourcemanager.common.domain.DataSourceType;
@@ -116,6 +117,23 @@ protected void doConnect(String operator, DataSource dataSource) throws ErrorExc
throw new ParameterValidateException(ENVID_ATYPICAL.getErrorDesc() + e);
}
}
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && !dataSource.getConnectParams().containsKey(AESUtils.IS_ENCRYPT)
+ && dataSource.getConnectParams().containsKey("password")) {
+ String password = dataSource.getConnectParams().get("password").toString();
+ String encrypt = AESUtils.encrypt(password, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ Map connectParams = dataSource.getConnectParams();
+ connectParams.replace("password", encrypt);
+ dataSource.setConnectParams(connectParams);
+ }
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()
+ && !dataSource.getConnectParams().containsKey("isEncrypt")) {
+ String password = dataSource.getConnectParams().get("password").toString();
+ String encrypt = AESUtils.encrypt(password, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ Map connectParams = dataSource.getConnectParams();
+ connectParams.replace("password", encrypt);
+ dataSource.setConnectParams(connectParams);
+ }
List keyDefinitionList =
dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId());
dataSource.setKeyDefinitions(keyDefinitionList);
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/RestfulApiHelper.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/RestfulApiHelper.java
index 72f5c9f899f..b42e2abe3b9 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/RestfulApiHelper.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/restful/RestfulApiHelper.java
@@ -18,22 +18,27 @@
package org.apache.linkis.datasourcemanager.core.restful;
import org.apache.linkis.common.exception.WarnException;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.datasourcemanager.common.auth.AuthContext;
import org.apache.linkis.datasourcemanager.common.domain.DataSourceParamKeyDefinition;
+import org.apache.linkis.datasourcemanager.common.util.CryptoUtils;
import org.apache.linkis.datasourcemanager.core.restful.exception.BeanValidationExceptionMapper;
import org.apache.linkis.datasourcemanager.core.validate.ParameterValidateException;
import org.apache.linkis.server.Message;
-import org.apache.commons.codec.binary.Base64;
-
import javax.validation.ConstraintViolationException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
/** Helper of restful api entrance */
public class RestfulApiHelper {
+
+ private static final Logger logger = LoggerFactory.getLogger(RestfulApiHelper.class);
/**
* If is administrator
*
@@ -68,16 +73,24 @@ public static void encryptPasswordKey(
if (keyDefinition.getValueType() == DataSourceParamKeyDefinition.ValueType.PASSWORD) {
Object password = connectParams.get(keyDefinition.getKey());
if (null != password) {
- connectParams.put(
- keyDefinition.getKey(),
- new String(new Base64().encode(String.valueOf(password).getBytes())));
+ String passwordStr = String.valueOf(password);
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ if (!connectParams.containsKey(AESUtils.IS_ENCRYPT)) {
+ passwordStr =
+ AESUtils.encrypt(passwordStr, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ connectParams.put(AESUtils.IS_ENCRYPT, AESUtils.ENCRYPT);
+ }
+ } else {
+ passwordStr = CryptoUtils.object2String(passwordStr);
+ }
+ connectParams.put(keyDefinition.getKey(), passwordStr);
}
}
});
}
/**
- * Encrypt key of password type
+ * dncrypt key of password type
*
* @param keyDefinitionList definition list
* @param connectParams connection parameters
@@ -89,9 +102,14 @@ public static void decryptPasswordKey(
if (keyDefinition.getValueType() == DataSourceParamKeyDefinition.ValueType.PASSWORD) {
Object password = connectParams.get(keyDefinition.getKey());
if (null != password) {
- connectParams.put(
- keyDefinition.getKey(),
- new String(new Base64().decode(String.valueOf(password).getBytes())));
+ String passwordStr = String.valueOf(password);
+ if (AESUtils.LINKIS_DATASOURCE_AES_SWITCH.getValue()) {
+ passwordStr =
+ AESUtils.decrypt(passwordStr, AESUtils.LINKIS_DATASOURCE_AES_KEY.getValue());
+ } else {
+ passwordStr = String.valueOf(CryptoUtils.string2Object(passwordStr));
+ }
+ connectParams.put(keyDefinition.getKey(), passwordStr);
}
}
});
@@ -116,26 +134,6 @@ public static Message doAndResponse(TryOperation tryOperation, String failMessag
}
}
- // /**
- // * @param tryOperation operate function
- // * @param failMessage message
- // */
- // public static Message doAndResponse(
- // TryOperation tryOperation, String method, String failMessage) {
- // try {
- // Message message = tryOperation.operateAndGetMessage();
- // return setMethod(message, method);
- // } catch (ParameterValidateException e) {
- // return setMethod(Message.error(e.getMessage()), method);
- // } catch (ConstraintViolationException e) {
- // return new BeanValidationExceptionMapper().toResponse(e);
- // } catch (WarnException e) {
- // return setMethod(Message.warn(e.getMessage()), method);
- // } catch (Exception e) {
- // return setMethod(Message.error(failMessage, e), method);
- // }
- // }
-
private static Message setMethod(Message message, String method) {
message.setMethod(method);
return message;
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/DataSourceInfoService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/DataSourceInfoService.java
index 5dd24497a73..e037d45fe9a 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/DataSourceInfoService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/DataSourceInfoService.java
@@ -72,6 +72,18 @@ public interface DataSourceInfoService {
*/
DataSource getDataSourcePublishInfo(String dataSourceName);
+ /**
+ * * Get data source for latest published version by ip, port
+ *
+ * @param datasourceTypeName
+ * @param ip
+ * @param port
+ * @param datasourceUser
+ * @return
+ */
+ DataSource getDataSourcePublishInfo(
+ String datasourceTypeName, String ip, String port, String datasourceUser);
+
/**
* Get data source
*
@@ -183,7 +195,7 @@ public interface DataSourceInfoService {
List queryDataSourceEnvPage(DataSourceEnvVo dataSourceEnvVo);
/**
- * expire data source
+ * exoire data source
*
* @param dataSourceId
* @return
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/impl/DataSourceInfoServiceImpl.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/impl/DataSourceInfoServiceImpl.java
index 7fa4880140c..1e5aa95e723 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/impl/DataSourceInfoServiceImpl.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/datasourcemanager/core/service/impl/DataSourceInfoServiceImpl.java
@@ -124,6 +124,58 @@ public DataSource getDataSourcePublishInfo(String dataSourceName) {
return dataSource;
}
+ @Override
+ public DataSource getDataSourcePublishInfo(
+ String datasourceTypeName, String ip, String port, String datasourceUser) {
+ try {
+ // 1. 查询数据源列表
+ List dataSourceList =
+ dataSourceDao.selectDatasourcesByType(datasourceTypeName, datasourceUser);
+ if (CollectionUtils.isEmpty(dataSourceList)) {
+ LOG.debug(
+ "No datasource found for type:{} and owner:{}", datasourceTypeName, datasourceUser);
+ return null;
+ }
+ // 2. 筛选符合条件的已发布数据源
+ return dataSourceList.stream()
+ .filter(
+ dataSource ->
+ (dataSource.getPublishedVersionId() != null) && (!dataSource.isExpire()))
+ .map(
+ dataSource -> {
+ String parameter =
+ dataSourceVersionDao.selectOneVersion(
+ dataSource.getId(), dataSource.getPublishedVersionId());
+ return new AbstractMap.SimpleEntry<>(dataSource, parameter);
+ })
+ .filter(
+ entry ->
+ StringUtils.isNotBlank(entry.getValue())
+ && entry.getValue().contains(ip)
+ && entry.getValue().contains(port)
+ && entry.getValue().contains(datasourceUser))
+ .sorted(
+ Comparator.comparing(
+ entry -> entry.getKey().getCreateTime(), Comparator.reverseOrder()))
+ .findFirst()
+ .map(
+ entry -> {
+ DataSource result = entry.getKey();
+ result.setParameter(entry.getValue());
+ LOG.info("Found matched datasource:{}", result.getId());
+ return result;
+ })
+ .orElse(null);
+ } catch (Exception e) {
+ LOG.error(
+ "Get published datasource failed, type:{}, datasourceUser:{}",
+ datasourceTypeName,
+ datasourceUser,
+ e);
+ return null;
+ }
+ }
+
@Override
public DataSource getDataSourceInfo(Long dataSourceId, Long version) {
DataSource dataSource = dataSourceDao.selectOneDetail(dataSourceId);
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/metadata/query/server/restful/MetadataQueryRestful.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/metadata/query/server/restful/MetadataQueryRestful.java
index ddb7214b333..eb2d6195639 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/metadata/query/server/restful/MetadataQueryRestful.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/java/org/apache/linkis/metadata/query/server/restful/MetadataQueryRestful.java
@@ -467,62 +467,6 @@ public Message getSparkSql(
}
}
- @ApiOperation(value = "getJdbcSql", notes = "get jdbc sql", response = Message.class)
- @ApiImplicitParams({
- @ApiImplicitParam(name = "dataSourceName", required = true, dataType = "String"),
- @ApiImplicitParam(name = "envId", required = false, dataType = "String"),
- @ApiImplicitParam(name = "system", required = true, dataType = "String"),
- @ApiImplicitParam(name = "database", required = true, dataType = "String"),
- @ApiImplicitParam(name = "table", required = true, dataType = "String")
- })
- @RequestMapping(value = "/getJdbcSql", method = RequestMethod.GET)
- public Message getJdbcSql(
- @RequestParam("dataSourceName") String dataSourceName,
- @RequestParam(value = "envId", required = false) String envId,
- @RequestParam("database") String database,
- @RequestParam("table") String table,
- @RequestParam("system") String system,
- HttpServletRequest request) {
- try {
- if (StringUtils.isBlank(system)) {
- return Message.error("'system' is missing[缺少系统名]");
- }
- if (!MetadataUtils.nameRegexPattern.matcher(system).matches()) {
- return Message.error("'system' is invalid[系统名错误]");
- }
- if (!MetadataUtils.nameRegexPattern.matcher(database).matches()) {
- return Message.error("'database' is invalid[数据库名错误]");
- }
- if (!MetadataUtils.nameRegexPattern.matcher(table).matches()) {
- return Message.error("'table' is invalid[表名错误]");
- }
- if (!MetadataUtils.nameRegexPattern.matcher(dataSourceName).matches()) {
- return Message.error("'dataSourceName' is invalid[数据源错误]");
- }
-
- String userName =
- ModuleUserUtils.getOperationUser(request, "getJdbcSql, dataSourceName:" + dataSourceName);
-
- GenerateSqlInfo sparkSql =
- metadataQueryService.getJdbcSqlByDsNameAndEnvId(
- dataSourceName, database, table, system, userName, envId);
- return Message.ok().data("jdbcSql", sparkSql);
- } catch (Exception e) {
- return errorToResponseMessage(
- "Fail to jdbc sql[获取getJdbcSql信息失败], name:["
- + dataSourceName
- + "]"
- + ", system:["
- + system
- + "], database:["
- + database
- + "], table:["
- + table
- + "]",
- e);
- }
- }
-
private Message errorToResponseMessage(String uiMessage, Exception e) {
if (e instanceof MetaMethodInvokeException) {
MetaMethodInvokeException invokeException = (MetaMethodInvokeException) e;
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSouceMapper.xml b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSouceMapper.xml
index 24425e4d5a1..d9cf5ff06d4 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSouceMapper.xml
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSouceMapper.xml
@@ -247,6 +247,15 @@
ORDER BY create_time DESC
+
+
+
+
+
+ t.`name` = #{datasourceTypeName} and d.`create_user` = #{datasourceUser}
+
+
UPDATE linkis_ps_dm_datasource
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSourceVersionMapper.xml b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSourceVersionMapper.xml
index 533e567bc78..1738419edc6 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSourceVersionMapper.xml
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/server/src/main/resources/mapper/mysql/DataSourceVersionMapper.xml
@@ -30,6 +30,10 @@
`version_id`, `datasource_id`, `parameter`, `comment`, `create_time`, `create_user`
+
+ update linkis_ps_dm_datasource_version set parameter = #{parameter}
+ where datasource_id = #{datasourceId} and version_id = #{versionId}
+
SELECT ifnull(max(version_id),0 ) FROM `linkis_ps_dm_datasource_version` WHERE datasource_id = #{dataSourceId};
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/elasticsearch/src/main/java/org/apache/linkis/metadata/query/service/EsMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/elasticsearch/src/main/java/org/apache/linkis/metadata/query/service/EsMetaService.java
index f862c7a15a6..e462d088ee1 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/elasticsearch/src/main/java/org/apache/linkis/metadata/query/service/EsMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/elasticsearch/src/main/java/org/apache/linkis/metadata/query/service/EsMetaService.java
@@ -17,6 +17,7 @@
package org.apache.linkis.metadata.query.service;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
@@ -39,13 +40,14 @@ public MetadataConnection getConnection(
} else {
endPoints = ((List) urls).toArray(endPoints);
}
+ String password =
+ String.valueOf(params.getOrDefault(ElasticParamsMapper.PARAM_ES_PASSWORD.getValue(), ""));
ElasticConnection conn =
new ElasticConnection(
endPoints,
String.valueOf(
params.getOrDefault(ElasticParamsMapper.PARAM_ES_USERNAME.getValue(), "")),
- String.valueOf(
- params.getOrDefault(ElasticParamsMapper.PARAM_ES_PASSWORD.getValue(), "")));
+ AESUtils.isDecryptByConf(password));
return new MetadataConnection<>(conn, false);
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/AbstractSqlConnection.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/AbstractSqlConnection.java
index 0b7dd26e783..970bee1e189 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/AbstractSqlConnection.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/AbstractSqlConnection.java
@@ -17,12 +17,11 @@
package org.apache.linkis.metadata.query.service;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
+import org.apache.linkis.common.conf.CommonVars;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.service.GenerateSqlTemplate;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
+import org.apache.logging.log4j.util.Strings;
import java.io.Closeable;
import java.io.IOException;
@@ -35,13 +34,18 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public abstract class AbstractSqlConnection implements Closeable {
-
+public class AbstractSqlConnection implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(AbstractSqlConnection.class);
- public Connection conn;
+ private static final CommonVars SQL_DRIVER_CLASS =
+ CommonVars.apply("wds.linkis.server.mdm.service.postgre.driver", "org.postgresql.Driver");
+
+ private static final CommonVars SQL_CONNECT_URL =
+ CommonVars.apply("wds.linkis.server.mdm.service.postgre.url", "jdbc:postgresql://%s:%s/%s");
+
+ private Connection conn;
- public ConnectMessage connectMessage;
+ private ConnectMessage connectMessage;
public AbstractSqlConnection(
String host,
@@ -52,14 +56,50 @@ public AbstractSqlConnection(
Map extraParams)
throws ClassNotFoundException, SQLException {
connectMessage = new ConnectMessage(host, port, username, password, extraParams);
+ if (Strings.isBlank(database)) {
+ database = "";
+ }
conn = getDBConnection(connectMessage, database);
// Try to create statement
Statement statement = conn.createStatement();
statement.close();
}
- public abstract Connection getDBConnection(ConnectMessage connectMessage, String database)
- throws ClassNotFoundException, SQLException;
+ public List getAllDatabases() throws SQLException {
+ List dataBaseName = new ArrayList<>();
+ Statement stmt = null;
+ ResultSet rs = null;
+ try {
+ stmt = conn.createStatement();
+ rs = stmt.executeQuery("select datname from pg_database");
+ while (rs.next()) {
+ dataBaseName.add(rs.getString(1));
+ }
+ } finally {
+ closeResource(null, stmt, rs);
+ }
+ return dataBaseName;
+ }
+
+ public List getAllTables(String schemaname) throws SQLException {
+ List tableNames = new ArrayList<>();
+ Statement stmt = null;
+ ResultSet rs = null;
+ try {
+ stmt = conn.createStatement();
+ rs =
+ stmt.executeQuery(
+ "SELECT tablename FROM pg_tables where schemaname = '" + schemaname + "'");
+ // rs = stmt.executeQuery("SELECT table_name FROM
+ // information_schema.tables");
+ while (rs.next()) {
+ tableNames.add(rs.getString(1));
+ }
+ return tableNames;
+ } finally {
+ closeResource(null, stmt, rs);
+ }
+ }
public List getColumns(String schemaname, String table)
throws SQLException, ClassNotFoundException {
@@ -69,7 +109,8 @@ public List getColumns(String schemaname, String table)
ResultSet rs = null;
ResultSetMetaData meta;
try {
- List primaryKeys = getPrimaryKeys(table);
+ List primaryKeys =
+ getPrimaryKeys(/*getDBConnection(connectMessage, schemaname), */ table);
ps = conn.prepareStatement(columnSql);
rs = ps.executeQuery();
meta = rs.getMetaData();
@@ -77,8 +118,6 @@ public List getColumns(String schemaname, String table)
for (int i = 1; i < columnCount + 1; i++) {
MetaColumnInfo info = new MetaColumnInfo();
info.setIndex(i);
- info.setLength(meta.getColumnDisplaySize(i));
- info.setNullable((meta.isNullable(i) == ResultSetMetaData.columnNullable));
info.setName(meta.getColumnName(i));
info.setType(meta.getColumnTypeName(i));
if (primaryKeys.contains(meta.getColumnName(i))) {
@@ -99,79 +138,22 @@ public List getColumns(String schemaname, String table)
* @return
* @throws SQLException
*/
- public List getPrimaryKeys(String table) throws SQLException {
+ private List getPrimaryKeys(
+ /*Connection connection, */ String table) throws SQLException {
ResultSet rs = null;
List primaryKeys = new ArrayList<>();
- try {
- DatabaseMetaData dbMeta = conn.getMetaData();
- rs = dbMeta.getPrimaryKeys(null, null, table);
- while (rs.next()) {
- primaryKeys.add(rs.getString("column_name"));
- }
- return primaryKeys;
- } finally {
- if (null != rs) {
- rs.close();
- }
+ // try {
+ DatabaseMetaData dbMeta = conn.getMetaData();
+ rs = dbMeta.getPrimaryKeys(null, null, table);
+ while (rs.next()) {
+ primaryKeys.add(rs.getString("column_name"));
}
- }
-
- public GenerateSqlInfo queryJdbcSql(String database, String table) {
- GenerateSqlInfo generateSqlInfo = new GenerateSqlInfo();
- String ddl = generateJdbcDdlSql(database, table);
- generateSqlInfo.setDdl(ddl);
-
- generateSqlInfo.setDml(GenerateSqlTemplate.generateDmlSql(table));
-
- String columnStr = "*";
- try {
- List columns = getColumns(database, table);
- if (CollectionUtils.isNotEmpty(columns)) {
- columnStr =
- columns.stream().map(column -> column.getName()).collect(Collectors.joining(","));
- }
- } catch (Exception e) {
- LOG.warn("Fail to get Sql columns(获取字段列表失败)", e);
- }
- generateSqlInfo.setDql(GenerateSqlTemplate.generateDqlSql(columnStr, table));
-
- return generateSqlInfo;
- }
-
- public String generateJdbcDdlSql(String database, String table) {
- StringBuilder ddl = new StringBuilder();
- ddl.append("CREATE TABLE ").append(String.format("%s.%s", database, table)).append(" (");
-
- try {
- List columns = getColumns(database, table);
- if (CollectionUtils.isNotEmpty(columns)) {
- for (MetaColumnInfo column : columns) {
- ddl.append("\n\t").append(column.getName()).append(" ").append(column.getType());
- if (column.getLength() > 0) {
- ddl.append("(").append(column.getLength()).append(")");
- }
- if (!column.isNullable()) {
- ddl.append(" NOT NULL");
- }
- ddl.append(",");
+ return primaryKeys;
+ /*}finally{
+ if(null != rs){
+ closeResource(connection, null, rs);
}
- String primaryKeys =
- columns.stream()
- .filter(MetaColumnInfo::isPrimaryKey)
- .map(MetaColumnInfo::getName)
- .collect(Collectors.joining(", "));
- if (StringUtils.isNotBlank(primaryKeys)) {
- ddl.append(String.format("\n\tPRIMARY KEY (%s),", primaryKeys));
- }
- ddl.deleteCharAt(ddl.length() - 1);
- }
- } catch (Exception e) {
- LOG.warn("Fail to get Sql columns(获取字段列表失败)", e);
- }
-
- ddl.append("\n)");
-
- return ddl.toString();
+ }*/
}
/**
@@ -181,7 +163,7 @@ public String generateJdbcDdlSql(String database, String table) {
* @param statement statement
* @param resultSet result set
*/
- public void closeResource(Connection connection, Statement statement, ResultSet resultSet) {
+ private void closeResource(Connection connection, Statement statement, ResultSet resultSet) {
try {
if (null != resultSet && !resultSet.isClosed()) {
resultSet.close();
@@ -208,18 +190,34 @@ public void close() throws IOException {
* @return
* @throws ClassNotFoundException
*/
+ private Connection getDBConnection(ConnectMessage connectMessage, String database)
+ throws ClassNotFoundException, SQLException {
+ String extraParamString =
+ connectMessage.extraParams.entrySet().stream()
+ .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
+ .collect(Collectors.joining("&"));
+ Class.forName(SQL_DRIVER_CLASS.getValue());
+ String url =
+ String.format(
+ SQL_CONNECT_URL.getValue(), connectMessage.host, connectMessage.port, database);
+ if (!connectMessage.extraParams.isEmpty()) {
+ url += "?" + extraParamString;
+ }
+ return DriverManager.getConnection(
+ url, connectMessage.username, AESUtils.isDecryptByConf(connectMessage.password));
+ }
/** Connect message */
- public static class ConnectMessage {
- public String host;
+ private static class ConnectMessage {
+ private String host;
- public Integer port;
+ private Integer port;
- public String username;
+ private String username;
- public String password;
+ private String password;
- public Map extraParams;
+ private Map extraParams;
public ConnectMessage(
String host,
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/ClickhouseMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/ClickhouseMetaService.java
index b1abb06774f..ec099b11dc3 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/ClickhouseMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/ClickhouseMetaService.java
@@ -18,16 +18,12 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.clickhouse.SqlConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
-import org.apache.commons.lang3.StringUtils;
-
import org.springframework.stereotype.Component;
import java.sql.SQLException;
@@ -57,10 +53,8 @@ public MetadataConnection getConnection(
Object sqlParamObj = params.get(SqlParamsMapper.PARAM_SQL_EXTRA_PARAMS.getValue());
if (null != sqlParamObj) {
if (!(sqlParamObj instanceof Map)) {
- String paramStr = String.valueOf(sqlParamObj);
- if (StringUtils.isNotBlank(paramStr)) {
- extraParams = Json.fromJson(paramStr, Map.class, String.class, Object.class);
- }
+ extraParams =
+ Json.fromJson(String.valueOf(sqlParamObj), Map.class, String.class, Object.class);
} else {
extraParams = (Map) sqlParamObj;
}
@@ -97,18 +91,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/Db2MetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/Db2MetaService.java
index 309cc9c51db..3a405dcbf9c 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/Db2MetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/Db2MetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -98,18 +96,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/DmMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/DmMetaService.java
index d4e5a530d9c..7b3164c7b49 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/DmMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/DmMetaService.java
@@ -18,16 +18,12 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
import org.apache.linkis.metadata.query.service.dm.SqlConnection;
-import org.apache.commons.lang3.StringUtils;
-
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
@@ -55,10 +51,8 @@ public MetadataConnection getConnection(
Object sqlParamObj = params.get(SqlParamsMapper.PARAM_SQL_EXTRA_PARAMS.getValue());
if (null != sqlParamObj) {
if (!(sqlParamObj instanceof Map)) {
- String paramStr = String.valueOf(sqlParamObj);
- if (StringUtils.isNotBlank(paramStr)) {
- extraParams = Json.fromJson(paramStr, Map.class, String.class, Object.class);
- }
+ extraParams =
+ Json.fromJson(String.valueOf(sqlParamObj), Map.class, String.class, Object.class);
} else {
extraParams = (Map) sqlParamObj;
}
@@ -95,18 +89,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/GreenplumMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/GreenplumMetaService.java
index 2eb91bdfd16..c5e4ebd53ae 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/GreenplumMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/GreenplumMetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -103,18 +101,4 @@ public List queryColumns(SqlConnection connection, String schema
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/KingbaseMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/KingbaseMetaService.java
index f5ea7fda5d6..15dde4787d8 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/KingbaseMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/KingbaseMetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -96,18 +94,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/MysqlMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/MysqlMetaService.java
index 958245fc88b..1ccbb16688d 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/MysqlMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/MysqlMetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -96,18 +94,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/OracleMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/OracleMetaService.java
index e048d71bdac..84bb057e99e 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/OracleMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/OracleMetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -110,18 +108,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/PostgresqlMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/PostgresqlMetaService.java
index e9f292e136d..13145eeef1e 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/PostgresqlMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/PostgresqlMetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -105,18 +103,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/SqlserverMetaService.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/SqlserverMetaService.java
index f9ddd99e84c..9a2001cdadc 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/SqlserverMetaService.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/SqlserverMetaService.java
@@ -18,9 +18,7 @@
package org.apache.linkis.metadata.query.service;
import org.apache.linkis.datasourcemanager.common.util.json.Json;
-import org.apache.linkis.metadata.query.common.domain.GenerateSqlInfo;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.common.exception.MetaRuntimeException;
import org.apache.linkis.metadata.query.common.service.AbstractDbMetaService;
import org.apache.linkis.metadata.query.common.service.MetadataConnection;
import org.apache.linkis.metadata.query.service.conf.SqlParamsMapper;
@@ -91,18 +89,4 @@ public List queryColumns(
throw new RuntimeException("Fail to get Sql columns(获取字段列表失败)", e);
}
}
-
- @Override
- public String querySqlConnectUrl(SqlConnection connection) {
- return connection.getSqlConnectUrl();
- }
-
- @Override
- public GenerateSqlInfo queryJdbcSql(SqlConnection connection, String database, String table) {
- try {
- return connection.queryJdbcSql(database, table);
- } catch (Exception e) {
- throw new MetaRuntimeException("Fail to get jdbc sql (获取jdbcSql失败)", e);
- }
- }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/clickhouse/SqlConnection.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/clickhouse/SqlConnection.java
index fb65235e722..50e6a0f3fcc 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/clickhouse/SqlConnection.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/clickhouse/SqlConnection.java
@@ -18,11 +18,13 @@
package org.apache.linkis.metadata.query.service.clickhouse;
import org.apache.linkis.common.conf.CommonVars;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.service.AbstractSqlConnection;
import org.apache.commons.collections.MapUtils;
+import java.io.Closeable;
+import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
@@ -32,7 +34,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class SqlConnection extends AbstractSqlConnection {
+public class SqlConnection implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SqlConnection.class);
@@ -49,6 +51,10 @@ public class SqlConnection extends AbstractSqlConnection {
private static final CommonVars SQL_SOCKET_TIMEOUT =
CommonVars.apply("wds.linkis.server.mdm.service.sql.socket.timeout", 6000);
+ private Connection conn;
+
+ private ConnectMessage connectMessage;
+
public SqlConnection(
String host,
Integer port,
@@ -57,9 +63,11 @@ public SqlConnection(
String database,
Map extraParams)
throws ClassNotFoundException, SQLException {
- super(host, port, username, password, database, extraParams);
connectMessage.extraParams.put("connectTimeout", SQL_CONNECT_TIMEOUT.getValue());
connectMessage.extraParams.put("socketTimeout", SQL_SOCKET_TIMEOUT.getValue());
+ // Try to create statement
+ Statement statement = conn.createStatement();
+ statement.close();
}
public List getAllDatabases() throws SQLException {
@@ -102,7 +110,7 @@ public List getColumns(String database, String table)
ResultSet rs = null;
ResultSetMetaData meta = null;
try {
- List primaryKeys = getPrimaryKeys(table);
+ List primaryKeys = getPrimaryKeys(getDBConnection(connectMessage, database), table);
ps = conn.prepareStatement(columnSql);
rs = ps.executeQuery();
meta = rs.getMetaData();
@@ -110,8 +118,6 @@ public List getColumns(String database, String table)
for (int i = 1; i < columnCount + 1; i++) {
MetaColumnInfo info = new MetaColumnInfo();
info.setIndex(i);
- info.setLength(meta.getColumnDisplaySize(i));
- info.setNullable((meta.isNullable(i) == ResultSetMetaData.columnNullable));
info.setName(meta.getColumnName(i));
info.setType(meta.getColumnTypeName(i));
if (primaryKeys.contains(meta.getColumnName(i))) {
@@ -125,6 +131,31 @@ public List getColumns(String database, String table)
return columns;
}
+ /**
+ * Get primary keys
+ *
+ * @param connection connection
+ * @param table table name
+ * @return
+ * @throws SQLException
+ */
+ private List getPrimaryKeys(Connection connection, String table) throws SQLException {
+ ResultSet rs = null;
+ List primaryKeys = new ArrayList<>();
+ try {
+ DatabaseMetaData dbMeta = connection.getMetaData();
+ rs = dbMeta.getPrimaryKeys(null, null, table);
+ while (rs.next()) {
+ primaryKeys.add(rs.getString("column_name"));
+ }
+ return primaryKeys;
+ } finally {
+ if (null != rs) {
+ closeResource(connection, null, rs);
+ }
+ }
+ }
+
/**
* @param connectMessage
* @param database
@@ -144,10 +175,67 @@ public Connection getDBConnection(ConnectMessage connectMessage, String database
.collect(Collectors.joining("&"));
url += "?" + extraParamString;
}
- return DriverManager.getConnection(url, connectMessage.username, connectMessage.password);
+ return DriverManager.getConnection(
+ url, connectMessage.username, AESUtils.isDecryptByConf(connectMessage.password));
}
public String getSqlConnectUrl() {
return SQL_CONNECT_URL.getValue();
}
+
+ /**
+ * close database resource
+ *
+ * @param connection connection
+ * @param statement statement
+ * @param resultSet result set
+ */
+ private void closeResource(Connection connection, Statement statement, ResultSet resultSet) {
+ try {
+ if (null != resultSet && !resultSet.isClosed()) {
+ resultSet.close();
+ }
+ if (null != statement && !statement.isClosed()) {
+ statement.close();
+ }
+ if (null != connection && !connection.isClosed()) {
+ connection.close();
+ }
+ } catch (SQLException e) {
+ LOG.warn("Fail to release resource [" + e.getMessage() + "]", e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ closeResource(conn, null, null);
+ }
+
+ /** Connect message */
+ private static class ConnectMessage {
+ private String host;
+
+ private Integer port;
+
+ private String username;
+
+ private String password;
+
+ private Map extraParams;
+
+ public ConnectMessage(
+ String host,
+ Integer port,
+ String username,
+ String password,
+ Map extraParams) {
+ this.host = host;
+ this.port = port;
+ this.username = username;
+ this.password = password;
+ this.extraParams = extraParams;
+ this.extraParams.put("connectTimeout", SQL_CONNECT_TIMEOUT.getValue());
+ this.extraParams.put("socketTimeout", SQL_SOCKET_TIMEOUT.getValue());
+ }
+ }
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/db2/SqlConnection.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/db2/SqlConnection.java
index 09201d58d8a..e1b4afd0112 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/db2/SqlConnection.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/db2/SqlConnection.java
@@ -18,12 +18,13 @@
package org.apache.linkis.metadata.query.service.db2;
import org.apache.linkis.common.conf.CommonVars;
-import org.apache.linkis.common.exception.LinkisSecurityException;
-import org.apache.linkis.metadata.query.service.AbstractSqlConnection;
+import org.apache.linkis.common.utils.AESUtils;
+import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.commons.collections.MapUtils;
import org.apache.logging.log4j.util.Strings;
+import java.io.Closeable;
+import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
@@ -33,7 +34,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class SqlConnection extends AbstractSqlConnection {
+public class SqlConnection implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SqlConnection.class);
@@ -43,9 +44,9 @@ public class SqlConnection extends AbstractSqlConnection {
private static final CommonVars SQL_CONNECT_URL =
CommonVars.apply("wds.linkis.server.mdm.service.db2.url", "jdbc:db2://%s:%s/%s");
- /** clientRerouteServerListJNDIName */
- private static final CommonVars DB2_SENSITIVE_PARAMS =
- CommonVars.apply("linkis.db2.sensitive.params", "clientRerouteServerListJNDIName");
+ private Connection conn;
+
+ private ConnectMessage connectMessage;
public SqlConnection(
String host,
@@ -55,13 +56,14 @@ public SqlConnection(
String database,
Map extraParams)
throws ClassNotFoundException, SQLException {
- super(
- host,
- port,
- username,
- password,
- Strings.isBlank(database) ? "SAMPLE" : database,
- extraParams);
+ if (Strings.isBlank(database)) {
+ database = "SAMPLE";
+ }
+ connectMessage = new ConnectMessage(host, port, username, password, extraParams);
+ conn = getDBConnection(connectMessage, database);
+ // Try to create statement
+ Statement statement = conn.createStatement();
+ statement.close();
}
public List getAllDatabases() throws SQLException {
@@ -72,6 +74,8 @@ public List getAllDatabases() throws SQLException {
try {
stmt = conn.createStatement();
rs = stmt.executeQuery("list database directory");
+ // rs = stmt.executeQuery("SELECT * FROM SYSIBMADM.APPLICATIONS WITH UR");
+ // rs = stmt.executeQuery("select * from syscat.tables");
while (rs.next()) {
dataBaseName.add(rs.getString(1));
}
@@ -101,32 +105,141 @@ public List getAllTables(String tabschema) throws SQLException {
}
}
+ public List getColumns(String schemaname, String table)
+ throws SQLException, ClassNotFoundException {
+ List columns = new ArrayList<>();
+ // String columnSql = "SELECT * FROM syscat.columns WHERE TABSCHEMA = '" + schemaname
+ // + "' AND TABNAME = '" + table + "'";
+ String columnSql = "SELECT * FROM " + schemaname + "." + table + " WHERE 1 = 2";
+ PreparedStatement ps = null;
+ ResultSet rs = null;
+ ResultSetMetaData meta = null;
+ try {
+ // List primaryKeys = getPrimaryKeys(getDBConnection(connectMessage,
+ // schemaname), table);
+ List primaryKeys = getPrimaryKeys(conn, table);
+ ps = conn.prepareStatement(columnSql);
+ rs = ps.executeQuery();
+ meta = rs.getMetaData();
+ int columnCount = meta.getColumnCount();
+ for (int i = 1; i < columnCount + 1; i++) {
+ MetaColumnInfo info = new MetaColumnInfo();
+ info.setIndex(i);
+ info.setName(meta.getColumnName(i));
+ info.setType(meta.getColumnTypeName(i));
+ if (primaryKeys.contains(meta.getColumnName(i))) {
+ info.setPrimaryKey(true);
+ }
+ columns.add(info);
+ }
+ } finally {
+ closeResource(null, ps, rs);
+ }
+ return columns;
+ }
+
+ /**
+ * Get primary keys
+ *
+ * @param connection connection
+ * @param table table name
+ * @return
+ * @throws SQLException
+ */
+ private List getPrimaryKeys(Connection connection, String table) throws SQLException {
+ ResultSet rs = null;
+ List primaryKeys = new ArrayList<>();
+ try {
+ DatabaseMetaData dbMeta = connection.getMetaData();
+ rs = dbMeta.getPrimaryKeys(null, null, table);
+ while (rs.next()) {
+ primaryKeys.add(rs.getString("column_name"));
+ }
+ return primaryKeys;
+ } finally {
+ if (null != rs) {
+ rs.close();
+ }
+ // if(null != rs){
+ // closeResource(connection, null, rs);
+ // }
+ }
+ }
+
+ /**
+ * close database resource
+ *
+ * @param connection connection
+ * @param statement statement
+ * @param resultSet result set
+ */
+ private void closeResource(Connection connection, Statement statement, ResultSet resultSet) {
+ try {
+ if (null != resultSet && !resultSet.isClosed()) {
+ resultSet.close();
+ }
+ if (null != statement && !statement.isClosed()) {
+ statement.close();
+ }
+ if (null != connection && !connection.isClosed()) {
+ connection.close();
+ }
+ } catch (SQLException e) {
+ LOG.warn("Fail to release resource [" + e.getMessage() + "]", e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ closeResource(conn, null, null);
+ }
+
/**
* @param connectMessage
* @param database
* @return
* @throws ClassNotFoundException
*/
- public Connection getDBConnection(ConnectMessage connectMessage, String database)
+ private Connection getDBConnection(ConnectMessage connectMessage, String database)
throws ClassNotFoundException, SQLException {
+ String extraParamString =
+ connectMessage.extraParams.entrySet().stream()
+ .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
+ .collect(Collectors.joining("&"));
Class.forName(SQL_DRIVER_CLASS.getValue());
String url =
String.format(
SQL_CONNECT_URL.getValue(), connectMessage.host, connectMessage.port, database);
- if (MapUtils.isNotEmpty(connectMessage.extraParams)) {
- String extraParamString =
- connectMessage.extraParams.entrySet().stream()
- .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
- .collect(Collectors.joining("&"));
+ if (!connectMessage.extraParams.isEmpty()) {
url += "?" + extraParamString;
}
- if (url.toLowerCase().contains(DB2_SENSITIVE_PARAMS.getValue().toLowerCase())) {
- throw new LinkisSecurityException(35000, "Invalid db2 connection params.");
- }
- return DriverManager.getConnection(url, connectMessage.username, connectMessage.password);
+ return DriverManager.getConnection(
+ url, connectMessage.username, AESUtils.isDecryptByConf(connectMessage.password));
}
- public String getSqlConnectUrl() {
- return SQL_CONNECT_URL.getValue();
+ /** Connect message */
+ private static class ConnectMessage {
+ private String host;
+
+ private Integer port;
+
+ private String username;
+
+ private String password;
+
+ private Map extraParams;
+
+ public ConnectMessage(
+ String host,
+ Integer port,
+ String username,
+ String password,
+ Map extraParams) {
+ this.host = host;
+ this.port = port;
+ this.username = username;
+ this.password = password;
+ this.extraParams = extraParams;
+ }
}
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/dm/SqlConnection.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/dm/SqlConnection.java
index cb4e7dd560a..eacdfafe38f 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/dm/SqlConnection.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/dm/SqlConnection.java
@@ -18,12 +18,13 @@
package org.apache.linkis.metadata.query.service.dm;
import org.apache.linkis.common.conf.CommonVars;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.service.AbstractSqlConnection;
-import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
+import java.io.Closeable;
+import java.io.IOException;
import java.sql.*;
import java.util.*;
import java.util.stream.Collectors;
@@ -31,7 +32,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class SqlConnection extends AbstractSqlConnection {
+public class SqlConnection implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SqlConnection.class);
@@ -41,6 +42,10 @@ public class SqlConnection extends AbstractSqlConnection {
private static final CommonVars SQL_CONNECT_URL =
CommonVars.apply("wds.linkis.server.mdm.service.dameng.url", "jdbc:dm://%s:%s/%s");
+ private Connection conn;
+
+ private ConnectMessage connectMessage;
+
public SqlConnection(
String host,
Integer port,
@@ -49,7 +54,11 @@ public SqlConnection(
String database,
Map extraParams)
throws ClassNotFoundException, SQLException {
- super(host, port, username, password, database, extraParams);
+ connectMessage = new ConnectMessage(host, port, username, password, extraParams);
+ conn = getDBConnection(connectMessage, database);
+ // Try to create statement
+ Statement statement = conn.createStatement();
+ statement.close();
}
public List getAllDatabases() throws SQLException {
@@ -104,7 +113,6 @@ public List getColumns(String database, String table)
MetaColumnInfo info = new MetaColumnInfo();
info.setIndex(i);
info.setLength(meta.getColumnDisplaySize(i));
- info.setNullable((meta.isNullable(i) == ResultSetMetaData.columnNullable));
info.setName(meta.getColumnName(i));
info.setType(meta.getColumnTypeName(i));
if (primaryKeys.contains(meta.getColumnName(i))) {
@@ -124,15 +132,22 @@ public List getColumns(String database, String table)
return columns;
}
- private List getPrimaryKeys(String schema, String table) throws SQLException {
+ private List getPrimaryKeys(
+ /*Connection connection, */ String schema, String table) throws SQLException {
ResultSet rs = null;
List primaryKeys = new ArrayList<>();
+ // try {
DatabaseMetaData dbMeta = conn.getMetaData();
rs = dbMeta.getPrimaryKeys(null, schema, table);
while (rs.next()) {
primaryKeys.add(rs.getString("COLUMN_NAME"));
}
return primaryKeys;
+ /*}finally{
+ if(null != rs){
+ closeResource(connection, null, rs);
+ }
+ }*/
}
/**
* Get Column Comment
@@ -153,29 +168,59 @@ private Map getColumnComment(String schema, String table) throws
return columnComment;
}
+ /**
+ * close database resource
+ *
+ * @param connection connection
+ * @param statement statement
+ * @param resultSet result set
+ */
+ private void closeResource(Connection connection, Statement statement, ResultSet resultSet) {
+ try {
+ if (null != resultSet && !resultSet.isClosed()) {
+ resultSet.close();
+ }
+ if (null != statement && !statement.isClosed()) {
+ statement.close();
+ }
+ if (null != connection && !connection.isClosed()) {
+ connection.close();
+ }
+ } catch (SQLException e) {
+ LOG.warn("Fail to release resource [" + e.getMessage() + "]", e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ closeResource(conn, null, null);
+ }
+
/**
* @param connectMessage
* @param database
* @return
* @throws ClassNotFoundException
*/
- public Connection getDBConnection(ConnectMessage connectMessage, String database)
+ private Connection getDBConnection(ConnectMessage connectMessage, String database)
throws ClassNotFoundException, SQLException {
+ String extraParamString =
+ connectMessage.extraParams.entrySet().stream()
+ .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
+ .collect(Collectors.joining("&"));
Class.forName(SQL_DRIVER_CLASS.getValue());
String url =
String.format(
SQL_CONNECT_URL.getValue(), connectMessage.host, connectMessage.port, database);
- if (MapUtils.isNotEmpty(connectMessage.extraParams)) {
- String extraParamString =
- connectMessage.extraParams.entrySet().stream()
- .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
- .collect(Collectors.joining("&"));
+ if (!connectMessage.extraParams.isEmpty()) {
url += "?" + extraParamString;
}
try {
+ // return DriverManager.getConnection(url, connectMessage.username,
+ // connectMessage.password);
Properties prop = new Properties();
prop.put("user", connectMessage.username);
- prop.put("password", connectMessage.password);
+ prop.put("password", AESUtils.isDecryptByConf(connectMessage.password));
prop.put("remarksReporting", "true");
return DriverManager.getConnection(url, prop);
} catch (Exception e) {
@@ -184,30 +229,29 @@ public Connection getDBConnection(ConnectMessage connectMessage, String database
}
}
- public String getSqlConnectUrl() {
- return SQL_CONNECT_URL.getValue();
- }
+ /** Connect message */
+ private static class ConnectMessage {
+ private String host;
- @Override
- public String generateJdbcDdlSql(String database, String table) {
- String columnSql =
- String.format(
- "SELECT DBMS_METADATA.GET_DDL('TABLE', '%s', '%s') AS DDL FROM DUAL ",
- table, database);
- PreparedStatement ps = null;
- ResultSet rs = null;
- String ddl = "";
- try {
- ps = conn.prepareStatement(columnSql);
- rs = ps.executeQuery();
- if (rs.next()) {
- ddl = rs.getString("DDL");
- }
- } catch (SQLException e) {
- throw new RuntimeException(e);
- } finally {
- closeResource(null, ps, rs);
+ private Integer port;
+
+ private String username;
+
+ private String password;
+
+ private Map extraParams;
+
+ public ConnectMessage(
+ String host,
+ Integer port,
+ String username,
+ String password,
+ Map extraParams) {
+ this.host = host;
+ this.port = port;
+ this.username = username;
+ this.password = password;
+ this.extraParams = extraParams;
}
- return ddl;
}
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/greenplum/SqlConnection.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/greenplum/SqlConnection.java
index 494511a4a44..938c343d5bf 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/greenplum/SqlConnection.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/greenplum/SqlConnection.java
@@ -18,11 +18,13 @@
package org.apache.linkis.metadata.query.service.greenplum;
import org.apache.linkis.common.conf.CommonVars;
-import org.apache.linkis.metadata.query.service.AbstractSqlConnection;
+import org.apache.linkis.common.utils.AESUtils;
+import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.commons.collections.MapUtils;
import org.apache.logging.log4j.util.Strings;
+import java.io.Closeable;
+import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
@@ -32,7 +34,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class SqlConnection extends AbstractSqlConnection {
+public class SqlConnection implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SqlConnection.class);
private static final CommonVars SQL_DRIVER_CLASS =
@@ -44,6 +46,10 @@ public class SqlConnection extends AbstractSqlConnection {
"wds.linkis.server.mdm.service.greenplum.url",
"jdbc:pivotal:greenplum://%s:%s;DatabaseName=%s");
+ private Connection conn;
+
+ private ConnectMessage connectMessage;
+
public SqlConnection(
String host,
Integer port,
@@ -52,7 +58,14 @@ public SqlConnection(
String database,
Map extraParams)
throws ClassNotFoundException, SQLException {
- super(host, port, username, password, Strings.isBlank(database) ? "" : database, extraParams);
+ connectMessage = new ConnectMessage(host, port, username, password, extraParams);
+ if (Strings.isBlank(database)) {
+ database = "";
+ }
+ conn = getDBConnection(connectMessage, database);
+ // Try to create statement
+ Statement statement = conn.createStatement();
+ statement.close();
}
public List getAllDatabases() throws SQLException {
@@ -80,6 +93,8 @@ public List getAllTables(String schemaname) throws SQLException {
rs =
stmt.executeQuery(
"SELECT tablename FROM pg_tables where schemaname = '" + schemaname + "'");
+ // rs = stmt.executeQuery("SELECT table_name FROM
+ // information_schema.tables");
while (rs.next()) {
tableNames.add(rs.getString(1));
}
@@ -89,29 +104,136 @@ public List getAllTables(String schemaname) throws SQLException {
}
}
+ public List getColumns(String schemaname, String table)
+ throws SQLException, ClassNotFoundException {
+ List columns = new ArrayList<>();
+ String columnSql = "SELECT * FROM " + schemaname + "." + table + " WHERE 1 = 2";
+ PreparedStatement ps = null;
+ ResultSet rs = null;
+ ResultSetMetaData meta;
+ try {
+ List primaryKeys =
+ getPrimaryKeys(/*getDBConnection(connectMessage, schemaname), */ table);
+ ps = conn.prepareStatement(columnSql);
+ rs = ps.executeQuery();
+ meta = rs.getMetaData();
+ int columnCount = meta.getColumnCount();
+ for (int i = 1; i < columnCount + 1; i++) {
+ MetaColumnInfo info = new MetaColumnInfo();
+ info.setIndex(i);
+ info.setName(meta.getColumnName(i));
+ info.setType(meta.getColumnTypeName(i));
+ if (primaryKeys.contains(meta.getColumnName(i))) {
+ info.setPrimaryKey(true);
+ }
+ columns.add(info);
+ }
+ } finally {
+ closeResource(null, ps, rs);
+ }
+ return columns;
+ }
+
+ /**
+ * Get primary keys
+ *
+ * @param connection connection
+ * @param table table name
+ * @return
+ * @throws SQLException
+ */
+ private List getPrimaryKeys(
+ /*Connection connection, */ String table) throws SQLException {
+ ResultSet rs = null;
+ List primaryKeys = new ArrayList<>();
+ // try {
+ DatabaseMetaData dbMeta = conn.getMetaData();
+ rs = dbMeta.getPrimaryKeys(null, null, table);
+ while (rs.next()) {
+ primaryKeys.add(rs.getString("column_name"));
+ }
+ return primaryKeys;
+ /*}finally{
+ if(null != rs){
+ closeResource(connection, null, rs);
+ }
+ }*/
+ }
+
+ /**
+ * close database resource
+ *
+ * @param connection connection
+ * @param statement statement
+ * @param resultSet result set
+ */
+ private void closeResource(Connection connection, Statement statement, ResultSet resultSet) {
+ try {
+ if (null != resultSet && !resultSet.isClosed()) {
+ resultSet.close();
+ }
+ if (null != statement && !statement.isClosed()) {
+ statement.close();
+ }
+ if (null != connection && !connection.isClosed()) {
+ connection.close();
+ }
+ } catch (SQLException e) {
+ LOG.warn("Fail to release resource [" + e.getMessage() + "]", e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ closeResource(conn, null, null);
+ }
+
/**
* @param connectMessage
* @param database
* @return
* @throws ClassNotFoundException
*/
- public Connection getDBConnection(ConnectMessage connectMessage, String database)
+ private Connection getDBConnection(ConnectMessage connectMessage, String database)
throws ClassNotFoundException, SQLException {
+ String extraParamString =
+ connectMessage.extraParams.entrySet().stream()
+ .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
+ .collect(Collectors.joining("&"));
Class.forName(SQL_DRIVER_CLASS.getValue());
String url =
String.format(
SQL_CONNECT_URL.getValue(), connectMessage.host, connectMessage.port, database);
- if (MapUtils.isNotEmpty(connectMessage.extraParams)) {
- String extraParamString =
- connectMessage.extraParams.entrySet().stream()
- .map(e -> String.join("=", e.getKey(), String.valueOf(e.getValue())))
- .collect(Collectors.joining("&"));
+ if (!connectMessage.extraParams.isEmpty()) {
url += "?" + extraParamString;
}
- return DriverManager.getConnection(url, connectMessage.username, connectMessage.password);
+ return DriverManager.getConnection(
+ url, connectMessage.username, AESUtils.isDecryptByConf(connectMessage.password));
}
- public String getSqlConnectUrl() {
- return SQL_CONNECT_URL.getValue();
+ /** Connect message */
+ private static class ConnectMessage {
+ private String host;
+
+ private Integer port;
+
+ private String username;
+
+ private String password;
+
+ private Map extraParams;
+
+ public ConnectMessage(
+ String host,
+ Integer port,
+ String username,
+ String password,
+ Map extraParams) {
+ this.host = host;
+ this.port = port;
+ this.username = username;
+ this.password = password;
+ this.extraParams = extraParams;
+ }
}
}
diff --git a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/kingbase/SqlConnection.java b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/kingbase/SqlConnection.java
index f49c43a8ecb..a753f41796a 100644
--- a/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/kingbase/SqlConnection.java
+++ b/linkis-public-enhancements/linkis-datasource/linkis-datasource-manager/service/jdbc/src/main/java/org/apache/linkis/metadata/query/service/kingbase/SqlConnection.java
@@ -18,11 +18,11 @@
package org.apache.linkis.metadata.query.service.kingbase;
import org.apache.linkis.common.conf.CommonVars;
+import org.apache.linkis.common.utils.AESUtils;
import org.apache.linkis.metadata.query.common.domain.MetaColumnInfo;
-import org.apache.linkis.metadata.query.service.AbstractSqlConnection;
-
-import org.apache.commons.collections.MapUtils;
+import java.io.Closeable;
+import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
@@ -32,7 +32,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class SqlConnection extends AbstractSqlConnection {
+public class SqlConnection implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SqlConnection.class);
@@ -44,6 +44,10 @@ public class SqlConnection extends AbstractSqlConnection {
"wds.linkis.server.mdm.service.kingbase.url",
"jdbc:kingbase8://%s:%s/%s?zeroDateTimeBehavior=convertToNull&useUnicode=true&characterEncoding=utf-8");
+ private Connection conn;
+
+ private ConnectMessage connectMessage;
+
public SqlConnection(
String host,
Integer port,
@@ -52,7 +56,11 @@ public SqlConnection(
String database,
Map extraParams)
throws ClassNotFoundException, SQLException {
- super(host, port, username, password, database, extraParams);
+ connectMessage = new ConnectMessage(host, port, username, password, extraParams);
+ conn = getDBConnection(connectMessage, database);
+ // Try to create statement
+ Statement statement = conn.createStatement();
+ statement.close();
}
public List getAllDatabases() throws SQLException {
@@ -69,6 +77,8 @@ public List getAllDatabases() throws SQLException {
closeResource(null, stmt, rs);
}
return dataBaseName;
+ // throw new UnsupportedOperationException("kingbase数据库不能像mysql show
+ // databases来获取,应该是存在某个地方来获取的");
}
public List