1、配置hive-site.xml
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&characterEncoding=UTF-8</value>
<description>JDBC connection string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>username for metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
<description></description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/app/data/hive/warehouse</value>
<description></description>
</property>
#如果不配置下面的部分会产生错误1.
<property>
<name>hive.exec.local.scratchdir</name>
<value>/app/data/hive/iotmp</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/app/data/hive/iotmp</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/app/data/hive/iotmp/log</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/app/data/hive/iotmp/operation_logs</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
2、配置hive-env.sh
export HIVE_HOME=/app/bigdata/hive/apache-hive-1.2.1-bin
export HIVE_CONF_DIR=/app/bigdata/hive/apache-hive-1.2.1-bin/conf
3、配置 hive-config.sh
export JAVA_HOME=/app/bigdata/java/jdk1.7.0_79
export HADOOP_HOME=/app/bigdata/hadoop/hadoop-2.6.4
export SPARK_HOME=/app/bigdata/spark/spark-1.6.2-bin-hadoop2.6
4、配置log
vim hive-log4j.properties
hive.log.dir=/app/bigdata/hive/hive/log/
5、mysql给hive表授权
grant select,insert,update,delete,create,drop on vtdc.employee to joe@10.163.225.87 identified by ‘123′;
给来自10.163.225.87的用户joe分配可对数据库vtdc的employee表进行select,insert,update,delete,create,drop等操作的权限,并设定口令为123。
grant all on hive.* to root@'master' identified by 'root';
flush privileges;
6、启动hadoop服务:http://192.168.1.10:50070/
sh sbin/start-dfs.sh
sbin/start-yarn.sh
7、启动Hive
8、Hive数据库CRUD操作集合
Create DataBase:create database testdb;
Show DataBase: show databases;
show tables: show tables; user table;
Create Table:create table sudent(int id);
9、Hive数据导入导出
1、第一种加载数据到student中
注意:使用load加载数据到数据库中是不使用mapreduce的,而桶类型的表用insert要用到mapreduce
Import Data: load data local inpath '/app/bigdata/hive/apache-hive-1.2.1-bin/student' into table student;
使用select * 不加条件时,不执行MapReduce,执行比较快;最后一行显示的是null,原因是文件中有一行空格;
2、第二种加载数据到student中的方法
在/usr/local/hive/目录下创建student_1文件,并写入一列数字;
执行命令hadoop fs -put student /app/data/hive/warehouse/testdb.db/student
或者 hdfs dfs -put student /app/data/hive/warehouse/testdb.db/student
10、批量kill linux qemu 进程
ps aux|grep hadoop|grep -v grep|awk '{print $2}'|xargs kill -9
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&characterEncoding=UTF-8</value>
<description>JDBC connection string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>username for metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
<description></description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/app/data/hive/warehouse</value>
<description></description>
</property>
#如果不配置下面的部分会产生错误1.
<property>
<name>hive.exec.local.scratchdir</name>
<value>/app/data/hive/iotmp</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/app/data/hive/iotmp</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/app/data/hive/iotmp/log</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/app/data/hive/iotmp/operation_logs</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
2、配置hive-env.sh
export HIVE_HOME=/app/bigdata/hive/apache-hive-1.2.1-bin
export HIVE_CONF_DIR=/app/bigdata/hive/apache-hive-1.2.1-bin/conf
3、配置 hive-config.sh
export JAVA_HOME=/app/bigdata/java/jdk1.7.0_79
export HADOOP_HOME=/app/bigdata/hadoop/hadoop-2.6.4
export SPARK_HOME=/app/bigdata/spark/spark-1.6.2-bin-hadoop2.6
4、配置log
vim hive-log4j.properties
hive.log.dir=/app/bigdata/hive/hive/log/
5、mysql给hive表授权
grant select,insert,update,delete,create,drop on vtdc.employee to joe@10.163.225.87 identified by ‘123′;
给来自10.163.225.87的用户joe分配可对数据库vtdc的employee表进行select,insert,update,delete,create,drop等操作的权限,并设定口令为123。
grant all on hive.* to root@'master' identified by 'root';
flush privileges;
6、启动hadoop服务:http://192.168.1.10:50070/
sh sbin/start-dfs.sh
sbin/start-yarn.sh
7、启动Hive
8、Hive数据库CRUD操作集合
Create DataBase:create database testdb;
Show DataBase: show databases;
show tables: show tables; user table;
Create Table:create table sudent(int id);
9、Hive数据导入导出
1、第一种加载数据到student中
注意:使用load加载数据到数据库中是不使用mapreduce的,而桶类型的表用insert要用到mapreduce
Import Data: load data local inpath '/app/bigdata/hive/apache-hive-1.2.1-bin/student' into table student;
使用select * 不加条件时,不执行MapReduce,执行比较快;最后一行显示的是null,原因是文件中有一行空格;
2、第二种加载数据到student中的方法
在/usr/local/hive/目录下创建student_1文件,并写入一列数字;
执行命令hadoop fs -put student /app/data/hive/warehouse/testdb.db/student
或者 hdfs dfs -put student /app/data/hive/warehouse/testdb.db/student
10、批量kill linux qemu 进程
ps aux|grep hadoop|grep -v grep|awk '{print $2}'|xargs kill -9