首页 > 其他分享 >hive on spark配置

hive on spark配置

时间:2024-05-10 16:25:50浏览次数:21  
标签:logger log 配置 hive DRFA spark appender

 

hive-site.xml 

<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>

    <!--配置Hive保存元数据信息所需的 MySQL URL地址-->

    <property>

        <name>javax.jdo.option.ConnectionURL</name>

        <value>jdbc:mysql://192.168.12.65:3306/metastore?useSSL=false&amp;useUnicode=true&amp;characterEncoding=UTF-8&amp;allowPublicKeyRetrieval=true</value>

    </property>

 

    <!--配置Hive连接MySQL的驱动全类名-->

    <property>

        <name>javax.jdo.option.ConnectionDriverName</name>

        <value>com.mysql.cj.jdbc.Driver</value>

    </property>

 

    <!--配置Hive连接MySQL的用户名 -->

    <property>

        <name>javax.jdo.option.ConnectionUserName</name>

        <value>root</value>

    </property>

 

    <!--配置Hive连接MySQL的密码 -->

    <property>

        <name>javax.jdo.option.ConnectionPassword</name>

        <value>mysql</value>

    </property>

 

 

    <property>

        <name>hive.metastore.warehouse.dir</name>

        <value>/user/hive/warehouse</value>

    </property>

 

    <property>

        <name>hive.metastore.schema.verification</name>

        <value>false</value>

    </property>

 

 

   <property>

       <name>hive.server2.enable.doAs</name>

       <value>false</value>

    </property>

    <property>

    <name>hive.server2.thrift.port</name>

    <value>10000</value>

    </property>

 

    <property>

        <name>hive.server2.thrift.bind.host</name>

        <value>192.168.12.65</value>

    </property>

 

    <property>

        <name>hive.metastore.event.db.notification.api.auth</name>

        <value>false</value>

    </property>

    

    <property>

        <name>hive.cli.print.header</name>

        <value>true</value>

    </property>

 

    <property>

        <name>hive.cli.print.current.db</name>

        <value>true</value>

    </property>

<!--

    <property>

     <name>hive.zookeeper.quorum</name>

     <value>datanebula7:2181,datanebula8:2181,datanebula9:2181</value>

    </property>

 

    <property>

     <name>hive.zookeeper.client.port</name>

     <value>2181</value>

    </property>

-->

 

<!--Spark依赖位置-->

<property>

    <name>spark.yarn.jars</name>

    <value>hdfs://zszccluster/spark/spark-jars/*</value>

</property>

  

<!--Hive执行引擎-->

<!--

<property>

    <name>hive.execution.engine</name>

    <value>spark</value>

</property>

-->

<!--提交任务超时时间,单位ms-->

<property>

    <name>hive.spark.client.connect.timeout</name>

    <value>50000</value>

</property>

 

<property>

   <name>hive.heapsize</name>

   <value>4096</value>

</property>

 

<property>

  <name>hive.exec.spark.default.parallelism</name>

  <value>30</value>

</property>

 

<property>

  <name>spark.executor.memory</name>

  <value>5g</value>

</property>

 

</configuration>

 

spark-default.conf

# 指定提交到 yarn 运行

spark.master                             yarn

# 开启日志并存储到 HDFS 上

spark.eventLog.enabled                   true

spark.eventLog.dir                       hdfs://zszccluster/spark/logs

# 指定每个执行器的内存

spark.executor.memory                    4g

# 指定每个调度器的内存

spark.driver.memory         2g

 

log4j.properties

log4j.rootLogger=WARN, CA

log4j.appender.CA=org.apache.log4j.ConsoleAppender

log4j.appender.CA.layout=org.apache.log4j.PatternLayout

log4j.appender.CA.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

 

hive-log4j2.properties

# Licensed to the Apache Software Foundation (ASF) under one

# or more contributor license agreements.  See the NOTICE file

# distributed with this work for additional information

# regarding copyright ownership.  The ASF licenses this file

# to you under the Apache License, Version 2.0 (the

# "License"); you may not use this file except in compliance

# with the License.  You may obtain a copy of the License at

#

#     http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS,

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

# See the License for the specific language governing permissions and

# limitations under the License.

 

status = WARN

name = HiveLog4j2

packages = org.apache.hadoop.hive.ql.log

 

# list of properties

property.hive.log.level = WARN

property.hive.root.logger = DRFA

property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}

property.hive.log.file = hive.log

property.hive.perflogger.log.level = INFO

 

# list of all appenders

appenders = console, DRFA

 

# console appender

appender.console.type = Console

appender.console.name = console

appender.console.target = SYSTEM_ERR

appender.console.layout.type = PatternLayout

appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n

 

# daily rolling file appender

appender.DRFA.type = RollingRandomAccessFile

appender.DRFA.name = DRFA

appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}

# Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session

appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}

appender.DRFA.layout.type = PatternLayout

appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n

appender.DRFA.policies.type = Policies

appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy

appender.DRFA.policies.time.interval = 1

appender.DRFA.policies.time.modulate = true

appender.DRFA.strategy.type = DefaultRolloverStrategy

appender.DRFA.strategy.max = 30

 

# list of all loggers

loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, PerfLogger, AmazonAws, ApacheHttp

 

logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn

logger.NIOServerCnxn.level = WARN

 

logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO

logger.ClientCnxnSocketNIO.level = WARN

 

logger.DataNucleus.name = DataNucleus

logger.DataNucleus.level = ERROR

 

logger.Datastore.name = Datastore

logger.Datastore.level = ERROR

 

logger.JPOX.name = JPOX

logger.JPOX.level = ERROR

 

logger.AmazonAws.name=com.amazonaws

logger.AmazonAws.level = INFO

 

logger.ApacheHttp.name=org.apache.http

logger.ApacheHttp.level = INFO

 

logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger

logger.PerfLogger.level = ${sys:hive.perflogger.log.level}

 

# root logger

rootLogger.level = ${sys:hive.log.level}

rootLogger.appenderRefs = root

rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}

标签:logger,log,配置,hive,DRFA,spark,appender
From: https://www.cnblogs.com/Mr--Wang/p/18184702

相关文章

  • Spring Cloud 部署时如何使用 Kubernetes 作为注册中心和配置中心
    一、SpringCloud支持的常见注册中心和配置中心。SpringCloud自带的注册中心Eureka以及config配置中心Nacos,支持注册中心和配置中心等,可以参考:https://www.cnblogs.com/laoqing/p/17797759.htmlZookeeperConsulEtcdKubernetes,当SpringCloud服务都是通过Kubernetes部......
  • docker-compose spark集群搭建
    需求满足产品数据团队计算相关需求,搭建spark集群,本集群为一主两从(两台设备)部署环境服务器资源docker-compose安装curl-Lhttps://github.com/docker/compose/releases/download/1.24.1/docker-compose-`uname-s`-`uname-m`-o/usr/local/bin/docker-compose......
  • 使用win-acme在windows+iis服务器下配置自动续期SSL证书【转】
    发现阿里云免费证书只有3个月有效期了,手动操作太麻烦,咨询阿里云客服,阿里云说这是大势所趋,遂转向其他云,后来发现百度云还有1年的免费证书,继续问阿里云客服,其他友商都还在免费1年的,为啥阿里云免费的只有3个月,阿里云客服说,只能说阿里云走在了前面。曾经阿里云给我的印象是价格便宜,稳......
  • 有了k8s还需要gateway网关,nacos配置中心吗
    在Kubernetes(k8s)环境中,您可能仍然需要使用服务网关(Gateway)和配置中心如Nacos,尽管k8s本身提供了一些类似的功能。以下是一些分析:1.服务发现与注册虽然k8s通过其内置的Service资源和服务发现机制提供了服务注册和发现的功能,但在某些情况下,您可能需要更高级的动态路由和负载均衡功......
  • react中使用craco,针对路径转换,修改webpack别名路径配置
    1.0首先下载craco依赖包npminstall@craco/craco-D2.0在项目根目录下面新建craco.config.js文件,里面内容配置为constpath=require('path')module.exports={webpack:{alias:{'@':path.resolve(__dirname,'src')}......
  • jenkins安装配置管理(旧)
    jenkins安装配置管理(旧)jenkins安装配置管理1.安装依赖yuminstall-yjava-1.8.0-openjdkjava-1.8.0-openjdk-develjava-version2.添加yum仓库源下载yum仓库源配置文件wget-O/etc/yum.repos.d/jenkins.repohttps://pkg.jenkins.io/redhat-stable/jenkins.repo导......
  • 1-安装_配置
    1.安装&配置源码安��装依赖yuminstallcurl-develexpat-develgettext-developenssl-develzlib-develyuminstallgccperl-ExtUtils-MakeMaker移除已有gityumremovegitrm-rf/usr/bin/git解压安装cd/usr/src/tarzxfgit-2.27.0.tgzcdgit-2.27.0makeprefix......
  • 2-jenkins安装-配置
    2.jenkins安装&配置非容器化CI/CD:代码构建完成后直接部署到服务器容器化CI/CD:代码(镜像)构件完成后部署到harbor,通过镜像提供服务官方推荐sudowget-O/etc/yum.repos.d/jenkins.repohttps://pkg.jenkins.io/redhat-stable/jenkins.reposudorpm--importhttps://pkg.jenk......
  • 配置node_exporter
    配置node_exporter配置node_exporter可以通过--web.listen-address和--web.telemetry-path参数来设置端口和路径。例如:node_exporter--web.listen-address=":9600"--web.telemetry-path="/node_metrics"禁用收集器许多收集器默认都是启用的。使用no-前缀来修改状态。例如,......
  • 配置mysql多实例
    配置mysql多实例需要专用的、支持多实例的mysql软件。这里用到的是mysql-5.7.24-linux-glibc2.12-x86_64.tar.gz解压mysql软件包tar-xfmysql-5.7.34-linux-glibc2.12-x86_64.tar.gz-C/usr/local/mysql配置多实例vim/etc/my.cnf[mysqld_multi]            ......