You are on page 1of 5

[cloudera@quickstart ~]$ sudo hive

Logging initialized using configuration in file:/etc/hive/conf.dist/hive-


log4j.properties
WARNING: Hive CLI is deprecated and migration to Beeline is recommended.
hive> create database emp_built;
OK
Time taken: 0.623 seconds
hive> use emp_built;
OK
Time taken: 0.075 seconds
hive> create table employee_data (Id int, Name string , Salary float)
>
> row format delimited
>
> fields terminated by ',' ;
OK
Time taken: 0.497 seconds
hive> LOAD DATA LOCAL INPATH '/home/cloudera/employee_data.csv' INTO TABLE
employee_data;
Loading data to table emp_built.employee_data
Table emp_built.employee_data stats: [numFiles=1, totalSize=113]
OK
Time taken: 0.862 seconds
hive> select * from employee_data;
OK
1 Gaurav 30000.0
2 Aryan 20000.0
3 Vishal 40000.0
4 John 10000.0
5 Henry 25000.0
6 William 9000.0
7 Lisa 25000.0
8 Ronit 20000.0
Time taken: 0.997 seconds, Fetched: 8 row(s)
hive> select Id, Name, sqrt(Salary) from employee_data ;
OK
1 Gaurav 173.20508075688772
2 Aryan 141.4213562373095
3 Vishal 200.0
4 John 100.0
5 Henry 158.11388300841898
6 William 94.86832980505137
7 Lisa 158.11388300841898
8 Ronit 141.4213562373095
Time taken: 0.192 seconds, Fetched: 8 row(s)
hive> select Id, Name, ceil(Salary) from employee_data ;
OK
1 Gaurav 30000
2 Aryan 20000
3 Vishal 40000
4 John 10000
5 Henry 25000
6 William 9000
7 Lisa 25000
8 Ronit 20000
Time taken: 0.188 seconds, Fetched: 8 row(s)
hive> select Id, Name, exp(Salary) from employee_data ;
OK
1 Gaurav Infinity
2 Aryan Infinity
3 Vishal Infinity
4 John Infinity
5 Henry Infinity
6 William Infinity
7 Lisa Infinity
8 Ronit Infinity
Time taken: 0.128 seconds, Fetched: 8 row(s)
hive> select count(*) from employee_data;
Query ID = root_20211025102121_3c262e77-ddaf-439a-ac85-b3ba071e6f31
Total jobs = 1
Launching Job 1 out of 1
Number of reduce tasks determined at compile time: 1
In order to change the average load for a reducer (in bytes):
set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
set mapreduce.job.reduces=<number>
Starting Job = job_1634536607590_0007, Tracking URL =
http://quickstart.cloudera:8088/proxy/application_1634536607590_0007/
Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1634536607590_0007
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
2021-10-25 10:21:47,660 Stage-1 map = 0%, reduce = 0%
2021-10-25 10:21:58,150 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 1.69 sec
2021-10-25 10:22:12,178 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.1 sec
MapReduce Total cumulative CPU time: 4 seconds 100 msec
Ended Job = job_1634536607590_0007
MapReduce Jobs Launched:
Stage-Stage-1: Map: 1 Reduce: 1 Cumulative CPU: 4.1 sec HDFS Read: 6959 HDFS
Write: 2 SUCCESS
Total MapReduce CPU Time Spent: 4 seconds 100 msec
OK
8
Time taken: 43.369 seconds, Fetched: 1 row(s)
hive> select max(Salary) from employee_data;
Query ID = root_20211025102323_7478c372-c6ab-46fc-8766-85ca8c70f293
Total jobs = 1
Launching Job 1 out of 1
Number of reduce tasks determined at compile time: 1
In order to change the average load for a reducer (in bytes):
set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
set mapreduce.job.reduces=<number>
Starting Job = job_1634536607590_0008, Tracking URL =
http://quickstart.cloudera:8088/proxy/application_1634536607590_0008/
Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1634536607590_0008
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
2021-10-25 10:23:51,109 Stage-1 map = 0%, reduce = 0%
2021-10-25 10:24:01,335 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 1.65 sec
2021-10-25 10:24:14,709 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 3.79 sec
MapReduce Total cumulative CPU time: 3 seconds 790 msec
Ended Job = job_1634536607590_0008
MapReduce Jobs Launched:
Stage-Stage-1: Map: 1 Reduce: 1 Cumulative CPU: 3.79 sec HDFS Read: 7088 HDFS
Write: 8 SUCCESS
Total MapReduce CPU Time Spent: 3 seconds 790 msec
OK
40000.0
Time taken: 37.154 seconds, Fetched: 1 row(s)
hive> select Id, upper(Name) from employee_data;
OK
1 GAURAV
2 ARYAN
3 VISHAL
4 JOHN
5 HENRY
6 WILLIAM
7 LISA
8 RONIT
Time taken: 0.133 seconds, Fetched: 8 row(s)
hive> use Org;
OK
Time taken: 0.024 seconds
hive> create table student (id int, name string, age int, institute string)
> partitioned by (course string)
> row format delimited
> fields terminated by ',';
OK
Time taken: 0.118 seconds
hive> describe student;
OK
id int
name string
age int
institute string
course string

# Partition Information
# col_name data_type comment

course string
Time taken: 0.253 seconds, Fetched: 10 row(s)
hive> LOAD DATA LOCAL INPATH '/home/cloudera/student_details1.csv' INTO TABLE
student PARTITION(course="JAVA");
Loading data to table org.student partition (course=JAVA)
Partition org.student{course=JAVA} stats: [numFiles=1, numRows=0, totalSize=63,
rawDataSize=0]
OK
Time taken: 0.89 seconds
hive> LOAD DATA LOCAL INPATH '/home/cloudera/student_details1.csv' INTO TABLE
student PARTITION(course="Hadoop");
Loading data to table org.student partition (course=Hadoop)
Partition org.student{course=Hadoop} stats: [numFiles=1, numRows=0, totalSize=63,
rawDataSize=0]
OK
Time taken: 0.522 seconds
hive> select * from student where course="JAVA";
OK
1 Abdul 21 viva JAVA
2 Rohit 23 viva JAVA
3 Rikita 22 Mit JAVA
4 Uday 23 Viva JAVA
Time taken: 1.159 seconds, Fetched: 4 row(s)
hive> select * from student;
OK
1 Abdul 21 viva Hadoop
2 Rohit 23 viva Hadoop
3 Rikita 22 Mit Hadoop
4 Uday 23 Viva Hadoop
1 Abdul 21 viva JAVA
2 Rohit 23 viva JAVA
3 Rikita 22 Mit JAVA
4 Uday 23 Viva JAVA
Time taken: 0.145 seconds, Fetched: 8 row(s)
hive> LOAD DATA LOCAL INPATH '/home/cloudera/student_details2.csv' INTO TABLE
student PARTITION(course="Hadoop");
Loading data to table org.student partition (course=Hadoop)
Partition org.student{course=Hadoop} stats: [numFiles=2, numRows=0, totalSize=131,
rawDataSize=0]
OK
Time taken: 0.748 seconds
hive> select * from student where course="Hadoop";
OK
1 Abdul 21 viva Hadoop
2 Rohit 23 viva Hadoop
3 Rikita 22 Mit Hadoop
4 Uday 23 Viva Hadoop
5 Priyanka 21 viva Hadoop
6 Hadi 23 viva Hadoop
7 Ronaldo 22 Mit Hadoop
8 Mbappe 23 Viva Hadoop
Time taken: 0.16 seconds, Fetched: 8 row(s)
hive> set hive.exec.dynamic.partition=true;
hive> set hive.exec.dynamic.partition.mode=nonstrict;
hive> create table stud_demo(id int, name string, age int, institute string, course
string)
>
> row format delimited
>
>
> fields terminated by ',';
OK
Time taken: 0.166 seconds
hive> create table student_part (id int, name string, age int, institute string)
> partitioned by (course string)
> row format delimited
> fields terminated by ',';
OK
Time taken: 0.094 seconds

//set hive.exec.dynamic.partition.mode=nonstrict;//

////
insert into student_part
partition(course)
select id, name, age, institute, course
from stud_demo;
////

hive> select * from stud_demo;


OK
Time taken: 0.096 seconds
hive> LOAD DATA LOCAL INPATH '/home/cloudera/stud_demo.csv' INTO TABLE stud_demo;
FAILED: SemanticException Line 1:23 Invalid path ''/home/cloudera/stud_demo.csv'':
No files matching path file:/home/cloudera/stud_demo.csv
hive> show tables;
OK
employee
employee1
new_emp
stud_demo
student
student_part
Time taken: 0.08 seconds, Fetched: 6 row(s)
hive> LOAD DATA LOCAL INPATH '/home/cloudera/student_demo.csv' INTO TABLE
stud_demo;
FAILED: SemanticException Line 1:23 Invalid path
''/home/cloudera/student_demo.csv'': No files matching path
file:/home/cloudera/student_demo.csv
hive> describe stud_demo;
OK
id int
name string
age int
institute string
course string
Time taken: 0.222 seconds, Fetched: 5 row(s)
hive> LOAD DATA LOCAL INPATH '/home/cloudera/student_demo.csv' INTO TABLE
stud_demo;
FAILED: SemanticException Line 1:23 Invalid path
''/home/cloudera/student_demo.csv'': No files matching path
file:/home/cloudera/student_demo.csv
hive> LOAD DATA LOCAL INPATH '/home/cloudera/stud_demo.csv' INTO TABLE stud_demo;
Loading data to table org.stud_demo
Table org.stud_demo stats: [numFiles=1, totalSize=148]
OK
Time taken: 0.279 seconds
hive> select * from stud_demo;
OK
1 Uday 23 Viva Java
2 Guru 22 ICS Java
3 Umesh 23 ICS Java
4 Omi 23 Finolex Java
5 Anvi 20 NIS Hadoop
6 Shraddha 21 SBC Hadoop
7 Ritu 21 GIT Hadoop
Time taken: 0.089 seconds, Fetched: 7 row(s)
hive>

You might also like