首页 > 其他分享 >flink开发指南

flink开发指南

时间:2022-12-02 18:34:57浏览次数:70  
标签:指南 Administrator Users repository flink jar 开发 org

flink开发指南

目录

flink 1.13.5 技巧

scala_flink135
D:\ws\GitHub\p1\scala_flink135
<flink.version>1.13.5</flink.version>

<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements.  See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.  The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied.  See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
	<modelVersion>4.0.0</modelVersion>

	<groupId>com.cw</groupId>
	<artifactId>scala_flink135</artifactId>
	<version>1.0-SNAPSHOT</version>
	<packaging>jar</packaging>

	<name>Flink Quickstart Job</name>

	<repositories>
		<repository>
			<id>apache.snapshots</id>
			<name>Apache Development Snapshot Repository</name>
			<url>https://repository.apache.org/content/repositories/snapshots/</url>
			<releases>
				<enabled>false</enabled>
			</releases>
			<snapshots>
				<enabled>true</enabled>
			</snapshots>
		</repository>
	</repositories>

	<properties>
		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
		<flink.version>1.13.5</flink.version>
		<target.java.version>1.8</target.java.version>
		<scala.binary.version>2.11</scala.binary.version>
		<scala.version>2.11.12</scala.version>
		<log4j.version>2.16.0</log4j.version>
	</properties>

	<dependencies>
		<!-- Apache Flink dependencies -->
		<!-- These dependencies are compile, because they should not be packaged into the JAR file.
		dev:	compile
		pro:	compile
		-->
		<dependency>
			<groupId>org.apache.flink</groupId>
			<artifactId>flink-scala_${scala.binary.version}</artifactId>
			<version>${flink.version}</version>
			<scope>compile</scope>
		</dependency>
		<dependency>
			<groupId>org.apache.flink</groupId>
			<artifactId>flink-streaming-scala_${scala.binary.version}</artifactId>
			<version>${flink.version}</version>
			<scope>compile</scope>
		</dependency>
		<dependency>
			<groupId>org.apache.flink</groupId>
			<artifactId>flink-clients_${scala.binary.version}</artifactId>
			<version>${flink.version}</version>
			<scope>compile</scope>
		</dependency>

		<!-- Scala Library, compile by Flink as well. -->
		<dependency>
			<groupId>org.scala-lang</groupId>
			<artifactId>scala-library</artifactId>
			<version>${scala.version}</version>
			<scope>compile</scope>
		</dependency>

		<!-- Add connector dependencies here. They must be in the default scope (compile). -->

		<!-- Example:

		<dependency>
			<groupId>org.apache.flink</groupId>
			<artifactId>flink-connector-kafka_${scala.binary.version}</artifactId>
			<version>${flink.version}</version>
		</dependency>
		-->

		<!-- Add logging framework, to produce console output when running in the IDE. -->
		<!-- These dependencies are excluded from the application JAR by default. -->
		<dependency>
			<groupId>org.apache.logging.log4j</groupId>
			<artifactId>log4j-slf4j-impl</artifactId>
			<version>${log4j.version}</version>
			<scope>runtime</scope>
		</dependency>
		<dependency>
			<groupId>org.apache.logging.log4j</groupId>
			<artifactId>log4j-api</artifactId>
			<version>${log4j.version}</version>
			<scope>runtime</scope>
		</dependency>
		<dependency>
			<groupId>org.apache.logging.log4j</groupId>
			<artifactId>log4j-core</artifactId>
			<version>${log4j.version}</version>
			<scope>runtime</scope>
		</dependency>
	</dependencies>

	<build>
		<plugins>
			<!-- We use the maven-shade plugin to create a fat jar that contains all necessary dependencies. -->
			<!-- Change the value of <mainClass>...</mainClass> if your program entry point changes. -->
			<plugin>
				<groupId>org.apache.maven.plugins</groupId>
				<artifactId>maven-shade-plugin</artifactId>
				<version>3.1.1</version>
				<executions>
					<!-- Run shade goal on package phase -->
					<execution>
						<phase>package</phase>
						<goals>
							<goal>shade</goal>
						</goals>
						<configuration>
							<artifactSet>
								<excludes>
									<exclude>org.apache.flink:force-shading</exclude>
									<exclude>com.google.code.findbugs:jsr305</exclude>
									<exclude>org.slf4j:*</exclude>
									<exclude>org.apache.logging.log4j:*</exclude>
								</excludes>
							</artifactSet>
							<filters>
								<filter>
									<!-- Do not copy the signatures in the META-INF folder.
									Otherwise, this might cause SecurityExceptions when using the JAR. -->
									<artifact>*:*</artifact>
									<excludes>
										<exclude>META-INF/*.SF</exclude>
										<exclude>META-INF/*.DSA</exclude>
										<exclude>META-INF/*.RSA</exclude>
									</excludes>
								</filter>
							</filters>
							<transformers>
								<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
									<mainClass>com.cw.StreamingJob</mainClass>
								</transformer>
							</transformers>
						</configuration>
					</execution>
				</executions>
			</plugin>

			<!-- Java Compiler -->
			<plugin>
				<groupId>org.apache.maven.plugins</groupId>
				<artifactId>maven-compiler-plugin</artifactId>
				<version>3.1</version>
				<configuration>
					<source>${target.java.version}</source>
					<target>${target.java.version}</target>
				</configuration>
			</plugin>

			<!-- Scala Compiler -->
			<plugin>
				<groupId>net.alchim31.maven</groupId>
				<artifactId>scala-maven-plugin</artifactId>
				<version>3.2.2</version>
				<executions>
					<execution>
						<goals>
							<goal>compile</goal>
							<goal>testCompile</goal>
						</goals>
					</execution>
				</executions>
				<configuration>
					<args>
						<arg>-nobootcp</arg>
						<arg>-target:jvm-${target.java.version}</arg>
					</args>
				</configuration>
			</plugin>

			<!-- Eclipse Scala Integration -->
			<plugin>
				<groupId>org.apache.maven.plugins</groupId>
				<artifactId>maven-eclipse-plugin</artifactId>
				<version>2.8</version>
				<configuration>
					<downloadSources>true</downloadSources>
					<projectnatures>
						<projectnature>org.scala-ide.sdt.core.scalanature</projectnature>
						<projectnature>org.eclipse.jdt.core.javanature</projectnature>
					</projectnatures>
					<buildcommands>
						<buildcommand>org.scala-ide.sdt.core.scalabuilder</buildcommand>
					</buildcommands>
					<classpathContainers>
						<classpathContainer>org.scala-ide.sdt.launching.SCALA_CONTAINER</classpathContainer>
						<classpathContainer>org.eclipse.jdt.launching.JRE_CONTAINER</classpathContainer>
					</classpathContainers>
					<excludes>
						<exclude>org.scala-lang:scala-library</exclude>
						<exclude>org.scala-lang:scala-compiler</exclude>
					</excludes>
					<sourceIncludes>
						<sourceInclude>**/*.scala</sourceInclude>
						<sourceInclude>**/*.java</sourceInclude>
					</sourceIncludes>
				</configuration>
			</plugin>
			<plugin>
				<groupId>org.codehaus.mojo</groupId>
				<artifactId>build-helper-maven-plugin</artifactId>
				<version>1.7</version>
				<executions>
					<!-- Add src/main/scala to eclipse build path -->
					<execution>
						<id>add-source</id>
						<phase>generate-sources</phase>
						<goals>
							<goal>add-source</goal>
						</goals>
						<configuration>
							<sources>
								<source>src/main/scala</source>
							</sources>
						</configuration>
					</execution>
					<!-- Add src/test/scala to eclipse build path -->
					<execution>
						<id>add-test-source</id>
						<phase>generate-test-sources</phase>
						<goals>
							<goal>add-test-source</goal>
						</goals>
						<configuration>
							<sources>
								<source>src/test/scala</source>
							</sources>
						</configuration>
					</execution>
				</executions>
			</plugin>
		</plugins>
	</build>
</project>



常见Bug

No new data sinks have been defined

Exception in thread "main" java.lang.RuntimeException: No new data sinks have been defined since the last execution. The last execution refers to the latest call to 'execute()', 'count()', 'collect()', or 'print()'.
at org.apache.flink.api.java.ExecutionEnvironment.createProgramPlan(ExecutionEnvironment.java:1165)

image-20221028155751307

<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
   <mainClass>com.cw.StreamingJob</mainClass>
</transformer>


<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
									<mainClass>com.cw.BatchJob</mainClass>
								</transformer>


<!-- Apache Flink dependencies -->
		<!-- These dependencies are compile, because they should not be packaged into the JAR file.
		dev:	compile
		pro:	provided
		-->


flink 本地调试启动webUI 指定 port

	<dependency>
		<groupId>org.apache.flink</groupId>
		<artifactId>flink-runtime-web_${scala.binary.version}</artifactId>
		<version>${flink.version}</version>
	</dependency>

image-20221028162131813

xx

scala 高级编程

https://www.zhihu.com/question/57208620

https://blog.csdn.net/LJFPHP/article/details/103037961

10.4.6-MariaDB-log

最大能有1017列

ENGINE=InnoDB DEFAULT CHARSET=utf8;

以上的1000列限制只适用于 5.6.9版本以下,mysql从大于5.6.9开始,innodb的列限制改为1017列!

手册地址: https://dev.mysql.com/doc/refman/5.7/en/innodb-limits.html

规则引擎Drools与SpringBoot的使用

https://github.com/MyHerux/drools-springboot

https://github.com/kylinsoong/drools-examples

http://ksoong.org/drools-examples/content/

06Flink基础(1)/构建大数据平台

Drools入门demo

SparkSQL整合Drools,并用MySQL数据库实现动态规则

https://blog.csdn.net/yxf19034516/article/details/108406509

基于Drools规则引擎通过apollo实现动态更新

https://blog.csdn.net/xp_zyl/article/details/80648260

Drools Workbench

1.docker部署workbench
workbench
docker run -p 8080:8080 -p 8001:8001 -d --name drools-wb jboss/drools-workbench-showcase:7.5.0.Final kie-server
docker run -p 8180:8180 -d --name kie-server --link drools-wb:kie_wb jboss/kie-server-showcase:7.5.0.Final

访问http://ip地址:8080/drools-wb
默认账号/密码 admin/admin

https://blog.csdn.net/weixin_30043179/article/details/113067674

https://blog.csdn.net/javaxuexilu/article/details/100738452

http://t.zoukankan.com/wanly3643-p-7919090.html

http://shiyujun.cn/posts/rools规则引擎入门指南-三-——使用Docker部署Workbench.html

https://lsqingfeng.blog.csdn.net/article/details/123553601

https://blog.csdn.net/dajiangtai007/article/details/126106575

https://nightlies.apache.org/flink/flink-docs-release-1.16/zh/docs/dev/configuration/maven/

https://nightlies.apache.org/flink/flink-docs-release-1.15/zh/docs/dev/configuration/maven/

https://nightlies.apache.org/flink/flink-docs-release-1.14/zh/docs/try-flink/table_api/

https://blog.csdn.net/weixin_45366499/article/details/115519363

https://blog.csdn.net/dajiangtai007/article/details/126106575

https://mvnrepository.com/artifact/org.apache.flink/flink-sql-connector-kafka/1.15.2

https://mvnrepository.com/artifact/org.apache.flink

Could not find any factory for identifier 'kafka' that implements 'org.apache.flink.table.factories.DynamicTableFactory' in the classpath

http://www.manongjc.com/detail/27-byorelonnmqhvpe.html

https://ci.apache.org/projects/flink/flink-docs-release-1.10/dev/table/hive/#connecting-to-hive

https://nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/hive/#connecting-to-hive

https://www.cnblogs.com/yb38156/p/15545379.html

FLINK基础(142):DS流与表转换(8) Handling of Changelog Streams(3) toChangelogStream

https://www.cnblogs.com/qiu-hua/p/15204234.html

https://blog.csdn.net/qq_17310871/article/details/126561782

https://blog.csdn.net/qq_44665283/article/details/125908709

https://www.cnblogs.com/duzhuo/p/6819484.html

https://blog.csdn.net/jsjsjs1789/article/details/109121186

https://www.cnblogs.com/ywjfx/p/14244015.html

https://blog.csdn.net/qq_17310871/article/details/126560562

https://www.bookstack.cn/read/apache-inlong-1.2-zh/e63698e991722d14.md

https://nightlies.apache.org/flink/flink-docs-release-1.15/docs/connectors/table/filesystem/

https://blog.csdn.net/u010772882/article/details/124845476

https://www.malaoshi.top/show_1IX23NIaLLkI.html

https://blog.csdn.net/HD0do/article/details/122846476

https://nightlies.apache.org/flink/flink-docs-release-1.15/docs/connectors/table/datagen/

image-20221103144703697

https://nightlies.apache.org/flink/flink-docs-release-1.15/docs/dev/configuration/overview/

https://blog.csdn.net/Samooyou/article/details/125070536

https://www.jianshu.com/p/247e709edcfb

https://blog.csdn.net/wudonglianga/article/details/121943096

https://blog.csdn.net/yanqi_vip/article/details/124162481

write-hbase

package com.wudl.flink.source;

import com.wudl.flink.bean.HbaseUser;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.util.CloseableIterator;

import java.util.ArrayList;
import java.util.List;
import java.util.function.Consumer;

/**
 * @author :wudl
 * @date :Created in 2021-12-14 23:28
 * @description:
 * @modified By:
 * @version: 1.0
 */

public class HbaseSouce {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        TableResult tableResult = tableEnv.executeSql(
                "CREATE TABLE testWudlHbase (" +
                        " rowkey STRING," +
                        " cf ROW<name STRING,address STRING,age STRING>," +
                        " PRIMARY KEY (rowkey) NOT ENFORCED" +
                        " ) WITH (" +
                        " 'connector' = 'hbase-2.2' ," +
                        " 'table-name' = 'wudlHbase' ," +
                        " 'zookeeper.quorum' = '192.168.1.161:2181'" +
                        " )");

        TableResult outPutTable = tableEnv.executeSql("CREATE TABLE  MySqlwudlHbase (" +
                "id INT ," +
                "name STRING ," +
                "address STRING , " +
                "age INT " +
                ") " +
                "WITH (" +
                "'connector' = 'jdbc'," +
                "'url' = 'jdbc:mysql://192.168.1.180:3306/test?useUnicode=true&characterEncoding=UTF-8'," +
                "'table-name' = 'wudlHbase'," +
                " 'username' = 'root'," +
                " 'password' = '123456'" +
                " )");

//        tableEnv.executeSql(" SELECT  cast(rowkey as INT) id, cf.name name ,cf.address address , cast(cf.age as  BIGINT) age FROM testWudlHbase");
        tableEnv.executeSql(" insert into  MySqlwudlHbase SELECT  cast(rowkey as INT) id, cf.name name ,cf.address address , cast(cf.age as  INT) age FROM testWudlHbase");

read-hbase

package com.wudl.flink.source;

import com.wudl.flink.bean.HbaseUser;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.util.CloseableIterator;

import java.util.ArrayList;
import java.util.List;
import java.util.function.Consumer;

/**
 * @author :wudl
 * @date :Created in 2021-12-14 23:28
 * @description:
 * @modified By:
 * @version: 1.0
 */

public class HbaseSouce {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        TableResult tableResult = tableEnv.executeSql(
                "CREATE TABLE testWudlHbase (" +
                        " rowkey STRING," +
                        " cf ROW<name STRING,address STRING,age STRING>," +
                        " PRIMARY KEY (rowkey) NOT ENFORCED" +
                        " ) WITH (" +
                        " 'connector' = 'hbase-2.2' ," +
                        " 'table-name' = 'wudlHbase' ," +
                        " 'zookeeper.quorum' = '192.168.1.161:2181'" +
                        " )");

        TableResult outPutTable = tableEnv.executeSql("CREATE TABLE  MySqlwudlHbase (" +
                "id INT ," +
                "name STRING ," +
                "address STRING , " +
                "age INT " +
                ") " +
                "WITH (" +
                "'connector' = 'jdbc'," +
                "'url' = 'jdbc:mysql://192.168.1.180:3306/test?useUnicode=true&characterEncoding=UTF-8'," +
                "'table-name' = 'wudlHbase'," +
                " 'username' = 'root'," +
                " 'password' = '123456'" +
                " )");

//        tableEnv.executeSql(" SELECT  cast(rowkey as INT) id, cf.name name ,cf.address address , cast(cf.age as  BIGINT) age FROM testWudlHbase");
//        tableEnv.executeSql(" insert into  MySqlwudlHbase SELECT  cast(rowkey as INT) id, cf.name name ,cf.address address , cast(cf.age as  INT) age FROM testWudlHbase");
        // 相当于 scan
        Table table = tableEnv.sqlQuery("SELECT * FROM testWudlHbase");
        // 查询的结果
        TableResult executeResult = table.execute();
        // 获取查询结果
        CloseableIterator<Row> collect = executeResult.collect();
        // 输出 (执行print或者下面的 Consumer之后,数据就被消费了。两个只能留下一个)
//        executeResult.print();
        List<HbaseUser> hbaseUsers = new ArrayList<>();
        collect.forEachRemaining(new Consumer<Row>() {
            @Override
            public void accept(Row row) {
                String field0 = String.valueOf(row.getField(0));
                String cl = String.valueOf(row.getField(1));
                String[] hUser = cl.split(",");
                hbaseUsers.add(new HbaseUser(hUser[0].toString(),hUser[1].toString()));
            }
        });
        System.out.println("................");

        for(HbaseUser um : hbaseUsers){
            System.out.println(um);
        }
    }
}


·官网参考·
https://nightlies.apache.org/flink/flink-docs-release-1.12/dev/table/connectors/jdbc.html
https://nightlies.apache.org/flink/flink-docs-release-1.12/dev/table/connectors/hbase.html

建表语句


CREATE TABLE `wudlHbase` (
  `id` int(11) DEFAULT NULL,
  `name` varchar(64) CHARACTER SET latin1 DEFAULT NULL,
  `address` varchar(64) CHARACTER SET latin1 DEFAULT NULL,
  `age` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
然后给列加编码: 
ALTER TABLE `wudlHbase` MODIFY NAME VARCHAR(64) CHARACTER SET utf8;
ALTER TABLE `wudlHbase` MODIFY id VARCHAR(64) CHARACTER SET utf8;
ALTER TABLE `wudlHbase` MODIFY address VARCHAR(64) CHARACTER SET utf8;
ALTER TABLE `wudlHbase` MODIFY age VARCHAR(64) CHARACTER SET utf8;
ALTER TABLE `wudlHbase` MODIFY NAME VARCHAR(20) CHARACTER SET utf8;


hbase(main):003:0> desc 'wudlHbase'
Table wudlHbase is ENABLED                                                                                                                                                 
wudlHbase                                                                                                                                                                  
COLUMN FAMILIES DESCRIPTION                                                                                                                                                
{NAME => 'cf', VERSIONS => '1', EVICT_BLOCKS_ON_CLOSE => 'false', NEW_VERSION_BEHAVIOR => 'false', KEEP_DELETED_CELLS => 'FALSE', CACHE_DATA_ON_WRITE => 'false', DATA_BLOC
K_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', CACHE_INDEX_ON_WRITE => 'false', IN_MEMORY => 'false', CACHE_B
LOOMS_ON_WRITE => 'false', PREFETCH_BLOCKS_ON_OPEN => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536'}                                          
1 row(s)
Took 0.0686 seconds                                                                                                                                                        
hbase(main):004:0> 

scan 'wudlHbase'

pom文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.wudlflink12</groupId>
    <artifactId>wudl-flink-12</artifactId>
    <version>1.0-SNAPSHOT</version>

    <!-- 指定仓库位置,依次为aliyun、apache和cloudera仓库 -->
    <repositories>
        <repository>
            <id>aliyun</id>
            <url>http://maven.aliyun.com/nexus/content/groups/public/</url>
        </repository>
        <repository>
            <id>apache</id>
            <url>https://repository.apache.org/content/repositories/snapshots/</url>
        </repository>
        <repository>
            <id>cloudera</id>
            <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
        </repository>

            <repository>
                <id>spring-plugin</id>
                <url>https://repo.spring.io/plugins-release/</url>
            </repository>

    </repositories>

    <properties>
        <encoding>UTF-8</encoding>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <maven.compiler.source>1.8</maven.compiler.source>
        <maven.compiler.target>1.8</maven.compiler.target>
        <java.version>1.8</java.version>
        <scala.version>2.11</scala.version>
        <flink.version>1.12.0</flink.version>
    </properties>
    <dependencies>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-elasticsearch7_2.11</artifactId>
            <version>1.12.3</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-hbase-2.2_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-jdbc_2.11</artifactId>
            <version>1.10.3</version>
        </dependency>

        <!--依赖Scala语言-->
        <dependency>
            <groupId>org.scala-lang</groupId>
            <artifactId>scala-library</artifactId>
            <version>2.12.11</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients_2.12</artifactId>
            <version>${flink.version}</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-scala_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-scala_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-scala-bridge_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-bridge_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!-- blink执行计划,1.11+默认的-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_2.12</artifactId>
            <version>${flink.version}</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-common</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-cep_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!-- flink连接器-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-sql-connector-kafka_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-jdbc_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-csv</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!-- <dependency>
           <groupId>org.apache.flink</groupId>
           <artifactId>flink-connector-filesystem_2.12</artifactId>
           <version>${flink.version}</version>
       </dependency>-->
        <!--<dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-jdbc_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>-->
        <!--<dependency>
              <groupId>org.apache.flink</groupId>
              <artifactId>flink-parquet_2.12</artifactId>
              <version>${flink.version}</version>
         </dependency>-->
        <!--<dependency>
            <groupId>org.apache.avro</groupId>
            <artifactId>avro</artifactId>
            <version>1.9.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.parquet</groupId>
            <artifactId>parquet-avro</artifactId>
            <version>1.10.0</version>
        </dependency>-->


        <dependency>
            <groupId>org.apache.bahir</groupId>
            <artifactId>flink-connector-redis_2.11</artifactId>
            <version>1.0</version>
            <exclusions>
                <exclusion>
                    <artifactId>flink-streaming-java_2.11</artifactId>
                    <groupId>org.apache.flink</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>flink-runtime_2.11</artifactId>
                    <groupId>org.apache.flink</groupId>
                </exclusion>
<!--                <exclusion>-->
<!--                    <artifactId>flink-core</artifactId>-->
<!--                    <groupId>org.apache.flink</groupId>-->
<!--                </exclusion>-->
                <exclusion>
                    <artifactId>flink-java</artifactId>
                    <groupId>org.apache.flink</groupId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-hive_2.12</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-metastore</artifactId>
            <version>2.1.0</version>
            <exclusions>
                <exclusion>
                    <artifactId>hadoop-hdfs</artifactId>
                    <groupId>org.apache.hadoop</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-exec</artifactId>
            <version>2.1.0</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-shaded-hadoop-2-uber</artifactId>
            <version>2.7.5-10.0</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-log4j12</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>2.1.0</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>slf4j-log4j12</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.38</version>
            <!--<version>8.0.20</version>-->
        </dependency>

        <!-- 高性能异步组件:Vertx-->
        <dependency>
            <groupId>io.vertx</groupId>
            <artifactId>vertx-core</artifactId>
            <version>3.9.0</version>
        </dependency>
        <dependency>
            <groupId>io.vertx</groupId>
            <artifactId>vertx-jdbc-client</artifactId>
            <version>3.9.0</version>
        </dependency>
        <dependency>
            <groupId>io.vertx</groupId>
            <artifactId>vertx-redis-client</artifactId>
            <version>3.9.0</version>
        </dependency>

        <!-- 日志 -->
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>1.7.7</version>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
            <scope>runtime</scope>
        </dependency>

        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.44</version>
        </dependency>

        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.2</version>
            <scope>provided</scope>
        </dependency>

        <!-- 参考:https://blog.csdn.net/f641385712/article/details/84109098-->
        <!--<dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-collections4</artifactId>
            <version>4.4</version>
        </dependency>-->
        <!--<dependency>
            <groupId>org.apache.thrift</groupId>
            <artifactId>libfb303</artifactId>
            <version>0.9.3</version>
            <type>pom</type>
            <scope>provided</scope>
         </dependency>-->
        <!--<dependency>
           <groupId>com.google.guava</groupId>
           <artifactId>guava</artifactId>
           <version>28.2-jre</version>
       </dependency>-->


        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>3.1.3</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>3.1.3</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>slf4j-log4j12</artifactId>
                    <groupId>org.slf4j</groupId>
                </exclusion>
            </exclusions>
        </dependency>

    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>3.0.0</version>
                <configuration>
                    <descriptorRefs>
                        <descriptorRef>jar-with-dependencies</descriptorRef>
                    </descriptorRefs>
                </configuration>
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

</project>




sinkA

https://blog.csdn.net/qq_37334150/article/details/115070400

https://www.cnblogs.com/21airuirui1123/p/14644933.html

https://blog.csdn.net/qq_42009405/article/details/127262065

https://blog.csdn.net/jmx_bigdata/article/details/112572194

flinksql读写redis

code2

public class CreateDDLPorduce {
	public static void main(String[] args) {
		  final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	        EnvironmentSettings settings = EnvironmentSettings.newInstance()
	                .inStreamingMode()
	                //.useOldPlanner() // flink
	                .useBlinkPlanner() // blink
	                .build();
	        StreamTableEnvironment ste = StreamTableEnvironment.create(env, settings);



	        String ddl = "CREATE TABLE CbryProduce(\n" +
	                "customerId int,\n" +
	                "oldStatus int,\n" +
	                "newStatus int,\n" +
	                "eventTime bigint\n" +
	                ") WITH(\n" +
	                "'connector.type'='kafka',\n" +
	                "'connector.version'='universal',\n" +
	                "'connector.properties.bootstrap.servers'='KAFKA的IP地址',\n" +
	                "'connector.topic'='event_topic_1',\n" +
	                "'format.type'='json'\n" +
	                ")\n"
	                ;
	        ste.executeSql(ddl);


	        while (true) {
	            try {
	                TimeUnit.SECONDS.sleep(3);
	                int status = (int) (System.currentTimeMillis() % 3);
	                String insert = "insert into CbryProduce(customerId,oldStatus,newStatus,eventTime)" +
	                        "values(1024,1," + status + "," + System.currentTimeMillis() + ")";
	                ste.executeSql(insert);
	            } catch (Exception ex) {

	            }
	        }
	}

}

code3

public class CreateDDLConsumer {
	public static void main(String[] args) {
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode()
				// .useOldPlanner() // flink
				.useBlinkPlanner() // blink
				.build();
		StreamTableEnvironment ste = StreamTableEnvironment.create(env, settings);

		String ddl = "CREATE TABLE CbryConsumer(\n" + "customerId int,\n" + "oldStatus int,\n"
				+ "newStatus int,\n" + "eventTime bigint\n" + ") WITH(\n" + "'connector.type'='kafka',\n"
				+ "'connector.version'='universal',\n" + "'connector.properties.group.id'='event1_group',\n"
				+ "'connector.properties.bootstrap.servers'='KAFKA的IP地址',\n"
				+ "'connector.topic'='event_topic_1',\n" + "'connector.startup-mode' = 'latest-offset',\n"
				+ "'format.type'='json'\n" + ")\n";
		ste.executeSql(ddl);

		Table queryTable = ste.sqlQuery("select customerId,newStatus as status "
				+ " from CbryConsumer" + " where newStatus in(1,2)");

		/*
		 * DataStream<Tuple2<Boolean, Tuple2<Integer, Integer>>> result =
		 * ste.toRetractStream(resultTb, Types.TUPLE(Types.INT,
		 * Types.INT));
		 * 
		 */
		DataStream<Row> result = ste.toAppendStream(queryTable, Row.class);
		result.printToErr();

		try {
			env.execute();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

}


### --- 输出到kafka
 
~~~     # 往kafka上输出表
DataStreamSource<String> data = env.addSource(new SourceFunction<String>
    () {
        @Override
        public void run(SourceContext<String> ctx) throws Exception {
        int num = 0;
        while (true) {
        num++;
        ctx.collect("name"+num);
        Thread.sleep(1000);
        }
        }
 
        @Override
        public void cancel() {
        }
        });
        Table name = tEnv.fromDataStream(data, $("name"));
        ConnectTableDescriptor descriptor = tEnv.connect(
        // declare the external system to connect to
        new Kafka()
        .version("universal")
        .topic("animal")
        .startFromEarliest()
        .property("bootstrap.servers", "hdp-2:9092")
        )
        // declare a format for this system
        .withFormat(
        // new Json()
        new Csv()
        )
        // declare the schema of the table
        .withSchema(
        new Schema()
        // .field("rowtime", DataTypes.TIMESTAMP(3))
        // .rowtime(new Rowtime()
        // .timestampsFromField("timestamp")
        // .watermarksPeriodicBounded(60000)
        // )
        // .field("user", DataTypes.BIGINT())
        .field("message", DataTypes.STRING())
        );
        // create a table with given name
        descriptor.createTemporaryTable("MyUserTable");
        name.executeInsert("MyUserTable");
        
### --- 输出到mysql (了解)
 
CREATE TABLE MyUserTable (
    ...
) WITH (
    'connector.type' = 'jdbc', 
    -- required: specify this table type is jdbc 'connector.url' = 'jdbc:mysql://localhost:3306/flink-test', 
    -- required: JDBCDB url 'connector.table' = 'jdbc_table_name', 
    -- required: jdbc table name
    -- optional: the class name of the JDBC driver to use to connect to this URL.
    -- If not set, it will automatically be derived from the URL.'connector.driver' = 'com.mysql.jdbc.Driver',
    -- optional: jdbc user name and password 'connector.username' = 'name', 'connector.password' = 'password',
    -- **followings are scan options, optional, used when reading from a table**
    -- optional: SQL query / prepared statement.
    -- If set, this will take precedence over the 'connector.table' setting 'connector.read.query' = 'SELECT * FROM sometable',
    -- These options must all be specified if any of them is specified. Inaddition,
    -- partition.num must be specified. They describe how to partition the tablewhen
    -- reading in parallel from multiple tasks. partition.column must be anumeric,
    -- date, or timestamp column from the table in question. Notice that lowerBound and
    -- upperBound are just used to decide the partition stride, not for filtering the
    -- rows in table. So all rows in the table will be partitioned and returned.'connector.read.partition.column' = 'column_name', 
    -- optional: the column name used for partitioning the input.'connector.read.partition.num' = '50', 
    -- optional: the number of partitions.'connector.read.partition.lower-bound' = '500', 
    -- optional: the smallest value of the first partition.'connector.read.partition.upper-bound' = '1000', 
    -- optional: the largest value of the last partition.
    -- optional, Gives the reader a hint as to the number of rows that should be fetched
    -- from the database when reading per round trip. If the value specified is zero, then
    -- the hint is ignored. The default value is zero. 'connector.read.fetch-size' = '100',
    -- **followings are lookup options, optional, used in temporary join**
    -- optional, max number of rows of lookup cache, over this value, the oldest rows will
    -- be eliminated. "cache.max-rows" and "cache.ttl" options must all be specified if any
    -- of them is specified. Cache is not enabled as default.'connector.lookup.cache.max-rows' = '5000',
    -- optional, the max time to live for each rows in lookup cache, over this time, the oldest rows
    -- will be expired. "cache.max-rows" and "cache.ttl" options must all be specified if any of
    -- them is specified. Cache is not enabled as default.'connector.lookup.cache.ttl' = '10s','connector.lookup.max-retries' = '3', 
    -- optional, max retry times if lookup database failed
    -- **followings are sink options, optional, used when writing into table**
    -- optional, flush max size (includes all append, upsert and delete records),
    -- over this number of records, will flush data. The default value is "5000".'connector.write.flush.max-rows' = '5000',
    -- optional, flush interval mills, over this time, asynchronous threads will flush data.
    -- The default value is "0s", which means no asynchronous flush thread will be scheduled.'connector.write.flush.interval' = '2s',
    -- optional, max retry times if writing records to database failed 'connector.write.max-retries' = '3'
)

code1

### --- 编程代码实现:输出表到kafka
 
package com.yanqi.tableql;
 
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Csv;
import org.apache.flink.table.descriptors.Kafka;
import org.apache.flink.table.descriptors.Schema;
 
import static org.apache.flink.table.api.Expressions.$;
 
public class ToKafka {
    public static void main(String[] args) throws Exception {
        //env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //tEnv
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
//                .inBatchMode()
                .withBuiltInCatalogName("default_catalog")
                .withBuiltInDatabaseName("default_database")
                .build();
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings);
 
        //读数据
        DataStreamSource<String> data = env.addSource(new SourceFunction<String>() {
            @Override
            public void run(SourceContext<String> ctx) throws Exception {
                int num = 0;
                while (true) {
                    num++;
                    ctx.collect("name" + num);
                    Thread.sleep(1000);
                }
            }
 
            @Override
            public void cancel() {
 
            }
        });
 
        Table nameTable = tEnv.fromDataStream(data, $("name"));
 
        tEnv.connect(
                new Kafka()
                        .version("universal")
                        .topic("lucasone")
                        .startFromEarliest()
                        .property("bootstrap.servers","hadoop01:9092")
        )
                .withFormat(new Csv())
                .withSchema(
                        new Schema().field("name", DataTypes.STRING())
                )
                .createTemporaryTable("animalTable");
 
        nameTable.executeInsert("animalTable");
 
        env.execute();
    }
}

show1

### --- 编译打印
 
~~~     # 启动kafka主题:启动消费者
[root@hadoop01 ~]# kafka-console-consumer.sh --bootstrap-server hadoop01:9092 --topic lucasone --from-beginning
name1
name2
name3
name4
name5
~~~省略部分参数

~~~     # 编译打印
 
D:\JAVA\jdk1.8.0_231\bin\java.exe "-javaagent:D:\IntelliJIDEA\IntelliJ IDEA 2019.3.3\lib\idea_rt.jar=55127:D:\IntelliJIDEA\IntelliJ IDEA 2019.3.3\bin" -Dfile.encoding=UTF-8 -classpath D:\JAVA\jdk1.8.0_231\jre\lib\charsets.jar;D:\JAVA\jdk1.8.0_231\jre\lib\deploy.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\access-bridge-64.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\cldrdata.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\dnsns.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\jaccess.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\jfxrt.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\localedata.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\nashorn.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunec.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunjce_provider.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunmscapi.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunpkcs11.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\zipfs.jar;D:\JAVA\jdk1.8.0_231\jre\lib\javaws.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jce.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jfr.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jfxswt.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jsse.jar;D:\JAVA\jdk1.8.0_231\jre\lib\management-agent.jar;D:\JAVA\jdk1.8.0_231\jre\lib\plugin.jar;D:\JAVA\jdk1.8.0_231\jre\lib\resources.jar;D:\JAVA\jdk1.8.0_231\jre\lib\rt.jar;E:\NO.Z.80000.Hadoop.spark\FirstFlink\target\classes;D:\JAVA\scala-2.12.2\lib\scala-library.jar;D:\JAVA\scala-2.12.2\lib\scala-reflect.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-java\1.11.1\flink-java-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-core\1.11.1\flink-core-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-annotations\1.11.1\flink-annotations-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-metrics-core\1.11.1\flink-metrics-core-1.11.1.jar;C:\Users\Administrator\.m2\repository\com\esotericsoftware\kryo\kryo\2.24.0\kryo-2.24.0.jar;C:\Users\Administrator\.m2\repository\com\esotericsoftware\minlog\minlog\1.2\minlog-1.2.jar;C:\Users\Administrator\.m2\repository\org\objenesis\objenesis\2.1\objenesis-2.1.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-lang3\3.3.2\commons-lang3-3.3.2.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-math3\3.5\commons-math3-3.5.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-api\1.7.15\slf4j-api-1.7.15.jar;C:\Users\Administrator\.m2\repository\com\google\code\findbugs\jsr305\1.3.9\jsr305-1.3.9.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\force-shading\1.11.1\force-shading-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-streaming-java_2.12\1.11.1\flink-streaming-java_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-runtime_2.12\1.11.1\flink-runtime_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-queryable-state-client-java\1.11.1\flink-queryable-state-client-java-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-hadoop-fs\1.11.1\flink-hadoop-fs-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-netty\4.1.39.Final-11.0\flink-shaded-netty-4.1.39.Final-11.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-jackson\2.10.1-11.0\flink-shaded-jackson-2.10.1-11.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-zookeeper-3\3.4.14-11.0\flink-shaded-zookeeper-3-3.4.14-11.0.jar;C:\Users\Administrator\.m2\repository\org\javassist\javassist\3.24.0-GA\javassist-3.24.0-GA.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-actor_2.12\2.5.21\akka-actor_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\com\typesafe\config\1.3.3\config-1.3.3.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\modules\scala-java8-compat_2.12\0.8.0\scala-java8-compat_2.12-0.8.0.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-stream_2.12\2.5.21\akka-stream_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\org\reactivestreams\reactive-streams\1.0.2\reactive-streams-1.0.2.jar;C:\Users\Administrator\.m2\repository\com\typesafe\ssl-config-core_2.12\0.3.7\ssl-config-core_2.12-0.3.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\modules\scala-parser-combinators_2.12\1.1.1\scala-parser-combinators_2.12-1.1.1.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-protobuf_2.12\2.5.21\akka-protobuf_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-slf4j_2.12\2.5.21\akka-slf4j_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\org\clapper\grizzled-slf4j_2.12\1.3.2\grizzled-slf4j_2.12-1.3.2.jar;C:\Users\Administrator\.m2\repository\com\github\scopt\scopt_2.12\3.5.0\scopt_2.12-3.5.0.jar;C:\Users\Administrator\.m2\repository\org\xerial\snappy\snappy-java\1.1.4\snappy-java-1.1.4.jar;C:\Users\Administrator\.m2\repository\com\twitter\chill_2.12\0.7.6\chill_2.12-0.7.6.jar;C:\Users\Administrator\.m2\repository\com\twitter\chill-java\0.7.6\chill-java-0.7.6.jar;C:\Users\Administrator\.m2\repository\org\lz4\lz4-java\1.6.0\lz4-java-1.6.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-guava\18.0-11.0\flink-shaded-guava-18.0-11.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-clients_2.12\1.11.1\flink-clients_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-optimizer_2.12\1.11.1\flink-optimizer_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\commons-cli\commons-cli\1.3.1\commons-cli-1.3.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-scala_2.12\1.11.1\flink-scala_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-asm-7\7.1-11.0\flink-shaded-asm-7-7.1-11.0.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\scala-reflect\2.12.7\scala-reflect-2.12.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\scala-library\2.12.7\scala-library-2.12.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\scala-compiler\2.12.7\scala-compiler-2.12.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\modules\scala-xml_2.12\1.0.6\scala-xml_2.12-1.0.6.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-streaming-scala_2.12\1.11.1\flink-streaming-scala_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-hadoop-compatibility_2.11\1.11.1\flink-hadoop-compatibility_2.11-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-common\2.8.5\hadoop-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-annotations\2.8.5\hadoop-annotations-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\google\guava\guava\11.0.2\guava-11.0.2.jar;C:\Users\Administrator\.m2\repository\xmlenc\xmlenc\0.52\xmlenc-0.52.jar;C:\Users\Administrator\.m2\repository\org\apache\httpcomponents\httpclient\4.5.2\httpclient-4.5.2.jar;C:\Users\Administrator\.m2\repository\org\apache\httpcomponents\httpcore\4.4.4\httpcore-4.4.4.jar;C:\Users\Administrator\.m2\repository\commons-codec\commons-codec\1.4\commons-codec-1.4.jar;C:\Users\Administrator\.m2\repository\commons-io\commons-io\2.4\commons-io-2.4.jar;C:\Users\Administrator\.m2\repository\commons-net\commons-net\3.1\commons-net-3.1.jar;C:\Users\Administrator\.m2\repository\commons-collections\commons-collections\3.2.2\commons-collections-3.2.2.jar;C:\Users\Administrator\.m2\repository\javax\servlet\servlet-api\2.5\servlet-api-2.5.jar;C:\Users\Administrator\.m2\repository\org\mortbay\jetty\jetty\6.1.26\jetty-6.1.26.jar;C:\Users\Administrator\.m2\repository\org\mortbay\jetty\jetty-util\6.1.26\jetty-util-6.1.26.jar;C:\Users\Administrator\.m2\repository\org\mortbay\jetty\jetty-sslengine\6.1.26\jetty-sslengine-6.1.26.jar;C:\Users\Administrator\.m2\repository\javax\servlet\jsp\jsp-api\2.1\jsp-api-2.1.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-core\1.9\jersey-core-1.9.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-json\1.9\jersey-json-1.9.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jettison\jettison\1.1\jettison-1.1.jar;C:\Users\Administrator\.m2\repository\com\sun\xml\bind\jaxb-impl\2.2.3-1\jaxb-impl-2.2.3-1.jar;C:\Users\Administrator\.m2\repository\javax\xml\bind\jaxb-api\2.2.2\jaxb-api-2.2.2.jar;C:\Users\Administrator\.m2\repository\javax\xml\stream\stax-api\1.0-2\stax-api-1.0-2.jar;C:\Users\Administrator\.m2\repository\javax\activation\activation\1.1\activation-1.1.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-jaxrs\1.8.3\jackson-jaxrs-1.8.3.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-xc\1.8.3\jackson-xc-1.8.3.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-server\1.9\jersey-server-1.9.jar;C:\Users\Administrator\.m2\repository\asm\asm\3.1\asm-3.1.jar;C:\Users\Administrator\.m2\repository\commons-logging\commons-logging\1.1.3\commons-logging-1.1.3.jar;C:\Users\Administrator\.m2\repository\log4j\log4j\1.2.17\log4j-1.2.17.jar;C:\Users\Administrator\.m2\repository\net\java\dev\jets3t\jets3t\0.9.0\jets3t-0.9.0.jar;C:\Users\Administrator\.m2\repository\com\jamesmurty\utils\java-xmlbuilder\0.4\java-xmlbuilder-0.4.jar;C:\Users\Administrator\.m2\repository\commons-lang\commons-lang\2.6\commons-lang-2.6.jar;C:\Users\Administrator\.m2\repository\commons-configuration\commons-configuration\1.6\commons-configuration-1.6.jar;C:\Users\Administrator\.m2\repository\commons-digester\commons-digester\1.8\commons-digester-1.8.jar;C:\Users\Administrator\.m2\repository\commons-beanutils\commons-beanutils\1.7.0\commons-beanutils-1.7.0.jar;C:\Users\Administrator\.m2\repository\commons-beanutils\commons-beanutils-core\1.8.0\commons-beanutils-core-1.8.0.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-log4j12\1.7.10\slf4j-log4j12-1.7.10.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-core-asl\1.9.13\jackson-core-asl-1.9.13.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-mapper-asl\1.9.13\jackson-mapper-asl-1.9.13.jar;C:\Users\Administrator\.m2\repository\org\apache\avro\avro\1.7.4\avro-1.7.4.jar;C:\Users\Administrator\.m2\repository\com\thoughtworks\paranamer\paranamer\2.3\paranamer-2.3.jar;C:\Users\Administrator\.m2\repository\com\google\protobuf\protobuf-java\2.5.0\protobuf-java-2.5.0.jar;C:\Users\Administrator\.m2\repository\com\google\code\gson\gson\2.2.4\gson-2.2.4.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-auth\2.8.5\hadoop-auth-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\nimbusds\nimbus-jose-jwt\4.41.1\nimbus-jose-jwt-4.41.1.jar;C:\Users\Administrator\.m2\repository\com\github\stephenc\jcip\jcip-annotations\1.0-1\jcip-annotations-1.0-1.jar;C:\Users\Administrator\.m2\repository\net\minidev\json-smart\2.3\json-smart-2.3.jar;C:\Users\Administrator\.m2\repository\net\minidev\accessors-smart\1.2\accessors-smart-1.2.jar;C:\Users\Administrator\.m2\repository\org\ow2\asm\asm\5.0.4\asm-5.0.4.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\server\apacheds-kerberos-codec\2.0.0-M15\apacheds-kerberos-codec-2.0.0-M15.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\server\apacheds-i18n\2.0.0-M15\apacheds-i18n-2.0.0-M15.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\api\api-asn1-api\1.0.0-M20\api-asn1-api-1.0.0-M20.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\api\api-util\1.0.0-M20\api-util-1.0.0-M20.jar;C:\Users\Administrator\.m2\repository\org\apache\curator\curator-framework\2.7.1\curator-framework-2.7.1.jar;C:\Users\Administrator\.m2\repository\com\jcraft\jsch\0.1.54\jsch-0.1.54.jar;C:\Users\Administrator\.m2\repository\org\apache\curator\curator-client\2.7.1\curator-client-2.7.1.jar;C:\Users\Administrator\.m2\repository\org\apache\curator\curator-recipes\2.7.1\curator-recipes-2.7.1.jar;C:\Users\Administrator\.m2\repository\org\apache\htrace\htrace-core4\4.0.1-incubating\htrace-core4-4.0.1-incubating.jar;C:\Users\Administrator\.m2\repository\org\apache\zookeeper\zookeeper\3.4.6\zookeeper-3.4.6.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-compress\1.4.1\commons-compress-1.4.1.jar;C:\Users\Administrator\.m2\repository\org\tukaani\xz\1.0\xz-1.0.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-hdfs\2.8.5\hadoop-hdfs-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-hdfs-client\2.8.5\hadoop-hdfs-client-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\squareup\okhttp\okhttp\2.4.0\okhttp-2.4.0.jar;C:\Users\Administrator\.m2\repository\com\squareup\okio\okio\1.4.0\okio-1.4.0.jar;C:\Users\Administrator\.m2\repository\commons-daemon\commons-daemon\1.0.13\commons-daemon-1.0.13.jar;C:\Users\Administrator\.m2\repository\io\netty\netty\3.6.2.Final\netty-3.6.2.Final.jar;C:\Users\Administrator\.m2\repository\io\netty\netty-all\4.0.23.Final\netty-all-4.0.23.Final.jar;C:\Users\Administrator\.m2\repository\xerces\xercesImpl\2.9.1\xercesImpl-2.9.1.jar;C:\Users\Administrator\.m2\repository\xml-apis\xml-apis\1.3.04\xml-apis-1.3.04.jar;C:\Users\Administrator\.m2\repository\org\fusesource\leveldbjni\leveldbjni-all\1.8\leveldbjni-all-1.8.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-client\2.8.5\hadoop-client-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-app\2.8.5\hadoop-mapreduce-client-app-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-common\2.8.5\hadoop-mapreduce-client-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-client\2.8.5\hadoop-yarn-client-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-server-common\2.8.5\hadoop-yarn-server-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-shuffle\2.8.5\hadoop-mapreduce-client-shuffle-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-api\2.8.5\hadoop-yarn-api-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-core\2.8.5\hadoop-mapreduce-client-core-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-common\2.8.5\hadoop-yarn-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-client\1.9\jersey-client-1.9.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-jobclient\2.8.5\hadoop-mapreduce-client-jobclient-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-connector-kafka_2.11\1.11.1\flink-connector-kafka_2.11-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-connector-kafka-base_2.11\1.11.1\flink-connector-kafka-base_2.11-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\kafka\kafka-clients\2.4.1\kafka-clients-2.4.1.jar;C:\Users\Administrator\.m2\repository\com\github\luben\zstd-jni\1.4.3-1\zstd-jni-1.4.3-1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-connector-redis_2.11\1.1.5\flink-connector-redis_2.11-1.1.5.jar;C:\Users\Administrator\.m2\repository\redis\clients\jedis\2.8.0\jedis-2.8.0.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-pool2\2.3\commons-pool2-2.3.jar;C:\Users\Administrator\.m2\repository\mysql\mysql-connector-java\8.0.21\mysql-connector-java-8.0.21.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-java-bridge_2.12\1.11.1\flink-table-api-java-bridge_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-java\1.11.1\flink-table-api-java-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-streaming-java_2.12\1.11.1\flink-streaming-java_2.12-1.11.1-tests.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-scala-bridge_2.12\1.11.1\flink-table-api-scala-bridge_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-scala_2.12\1.11.1\flink-table-api-scala_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-planner-blink_2.12\1.11.1\flink-table-planner-blink_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-common\1.11.1\flink-table-common-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-runtime-blink_2.12\1.11.1\flink-table-runtime-blink_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\codehaus\janino\janino\3.0.9\janino-3.0.9.jar;C:\Users\Administrator\.m2\repository\org\codehaus\janino\commons-compiler\3.0.9\commons-compiler-3.0.9.jar;C:\Users\Administrator\.m2\repository\org\apache\calcite\avatica\avatica-core\1.16.0\avatica-core-1.16.0.jar;C:\Users\Administrator\.m2\repository\org\reflections\reflections\0.9.10\reflections-0.9.10.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-cep_2.12\1.11.1\flink-cep_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-json\1.11.1\flink-json-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-csv\1.11.1\flink-csv-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-orc_2.12\1.11.1\flink-orc_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\orc\orc-core\1.5.6\orc-core-1.5.6.jar;C:\Users\Administrator\.m2\repository\org\apache\orc\orc-shims\1.5.6\orc-shims-1.5.6.jar;C:\Users\Administrator\.m2\repository\io\airlift\aircompressor\0.10\aircompressor-0.10.jar;C:\Users\Administrator\.m2\repository\org\apache\hive\hive-storage-api\2.6.0\hive-storage-api-2.6.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-hbase_2.12\1.10.2\flink-hbase_2.12-1.10.2.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-server\1.4.3\hbase-server-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-common\1.4.3\hbase-common-1.4.3.jar;C:\Users\Administrator\.m2\repository\com\github\stephenc\findbugs\findbugs-annotations\1.3.9-1\findbugs-annotations-1.3.9-1.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-protocol\1.4.3\hbase-protocol-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-procedure\1.4.3\hbase-procedure-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-common\1.4.3\hbase-common-1.4.3-tests.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-client\1.4.3\hbase-client-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-prefix-tree\1.4.3\hbase-prefix-tree-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-metrics-api\1.4.3\hbase-metrics-api-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-metrics\1.4.3\hbase-metrics-1.4.3.jar;C:\Users\Administrator\.m2\repository\io\dropwizard\metrics\metrics-core\3.1.2\metrics-core-3.1.2.jar;C:\Users\Administrator\.m2\repository\commons-httpclient\commons-httpclient\3.1\commons-httpclient-3.1.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-hadoop-compat\1.4.3\hbase-hadoop-compat-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-hadoop2-compat\1.4.3\hbase-hadoop2-compat-1.4.3.jar;C:\Users\Administrator\.m2\repository\com\yammer\metrics\metrics-core\2.2.0\metrics-core-2.2.0.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-math\2.2\commons-math-2.2.jar;C:\Users\Administrator\.m2\repository\org\apache\htrace\htrace-core\3.1.0-incubating\htrace-core-3.1.0-incubating.jar;C:\Users\Administrator\.m2\repository\com\lmax\disruptor\3.3.0\disruptor-3.3.0.jar;C:\Users\Administrator\.m2\repository\junit\junit\4.12\junit-4.12.jar;C:\Users\Administrator\.m2\repository\org\hamcrest\hamcrest-core\1.3\hamcrest-core-1.3.jar;C:\Users\Administrator\.m2\repository\org\postgresql\postgresql\42.2.16\postgresql-42.2.16.jar;C:\Users\Administrator\.m2\repository\org\checkerframework\checker-qual\3.5.0\checker-qual-3.5.0.jar;C:\Users\Administrator\.m2\repository\com\github\housepower\clickhouse-native-jdbc\1.6-stable\clickhouse-native-jdbc-1.6-stable.jar;C:\Users\Administrator\.m2\repository\net\jpountz\lz4\lz4\1.3.0\lz4-1.3.0.jar;C:\Users\Administrator\.m2\repository\joda-time\joda-time\2.9.9\joda-time-2.9.9.jar;C:\Users\Administrator\.m2\repository\org\apache\kudu\kudu-client\1.5.0\kudu-client-1.5.0.jar;C:\Users\Administrator\.m2\repository\com\stumbleupon\async\1.4.1\async-1.4.1.jar;C:\Users\Administrator\.m2\repository\org\apache\yetus\audience-annotations\0.4.0\audience-annotations-0.4.0.jar com.yanqi.tableql.ToKafka


x

标签:指南,Administrator,Users,repository,flink,jar,开发,org
From: https://www.cnblogs.com/cw2blog/p/16945311.html

相关文章