You are on page 1of 1

# analyze difference between df with plot_diff() # country names # IP addresses # clean dataset for downstream ml task

# plot difference between dataframes # validate True/False if country name # validate IP address clean.clean_ml(df)
df1 = df.sample(frac = 0.75) clean.validate_country('country_name') clean.validate_ip(df['column']) components and operators
df2 = df.drop(df1.index) cat_encoding
eda.plot_diff([df1, df2]) # clean country names # clean IP addresses cat_imputation
DataPrep lets you prepare your data using a clean.clean_country(df, 'column_name') clean.clean_ip(df, 'column') cat_null_value
single library with few lines of code. # create a profile report with create_report() parameters parameters fill_val
https://docs.dataprep.ai/index.html# eda.create_report(df) input_format= errors= num_imputation
output_format= report= num_null_value
Installing DataPrep report contains: strict= input_format= num_scaling
# install dataprep via PyPi Overview: data types fuzzy_dist= output_format= variance_threshold
pip install dataprep Variables: type, uniques, distinct, missing vals errors= variance
Quantile stats inplace= # Phone numbers include_operators
# install dataprep via conda or miniconda Descriptive stats # validate phone number exclude_operators
conda install -c conda-forge dataprep Text analysis for length, sample and letter # dates and times clean.validate_phone(df['column']) customized_cat_pipeline
Correlations # validate date and time customized_num_pipeline
Datasets Missing vals: bar chart, heatmap and spectrum clean.validate_date(df['column']) # clean phone numbers
clean.clean_phone(df, 'column') import pandas as pd
from dataprep.datasets import * Customizing the output # clean date parameters df = pd.read_csv('adult.csv')
# available datasets clean.clean_date(df, 'column') output_format= # split dataframe
get_dataset_names() # parameters to cutomize output parameters split= rate = 0.7
['covid19', display # control tabs, sections and sessions target_format= fix_missing= num_of_rows = len(index)
'wine-quality-red', config # dictionary of customizable parameters output_format= inplace= train = df.iloc[:int(rate*num_of_rows), :]
'iris', origin_timezone= test = df.iloc[int(rate*num_of_rows):, :]
'waste_hauler', customizing is available for: fix_empty= # clean text clean_train, clean_test = clean_ml(train, test,
'countries', plot() show_report= clean.clean_text(df, 'column') target='class)
'patient_info', plot_missin() target_timezone= parameters
'house_prices_train', plot_correlation() input_timezone= fillna= Connect
'adult', create_report() output_timezone= lowercase=
'house_prices_test', fix_missing= remove_digits= # connect to multiple APIs
'titanic'] from dataprep.eda import plot,create_report infer_day_first= remove_html= import dataprep.connector as conn
from dataprep.datasets import load_dataset report= remove_urls= connection = conn.connect('dblp')
# load dataset remove_punctuations= connection.info()
df = load_dataset('titanic') # choosing tabs # duplicate values remove_accents= await connection.query('publication', q='CVPR',
df = load_dataset('titanic') clean.clean_duplication(df, 'column') remove_stopwords= _count=2000)
EDA plot(df, 'Pclass', parameters remove_whitespace=
display=['Stats', 'Bar Chart', 'Pie Chart']) df_var_name= stopwords= # connect to DBs
import dataprep.eda as eda # choosing sections page_size= pipeline= pip install connectorx
create_report(df, import connectorx as cx
# analyze distributions with plot() display=["Overview","Interactions"]) # email addresses # URLs cx.read_sql('postgresql://username:password@server:
# plot dataset # choosing sessions # validate email address # validate URL port/database', 'SELECT * FROM lineitem')
eda.plot(df) plot(df, display=["Stats", "Insights"]) clean.validate_email() clean.validate_url(df['column'])
# plot distribution and stats for one column # customize plot supported databases
eda.plot(df, 'column_name') plot(df, "Pclass", config={'bar.bars': 10, # clean email addresses # clean URLs Postgres
# plots depicting relationship between 2 columns 'bar.sort_descending': True, 'bar.yscale': clean.clean_email(df, 'column') clean.clean_url(df, 'column') Mysql
eda.plot(df, 'column_nameA', 'column_nameB') 'linear', 'height': 400, 'width': 450, }) parameters parameters Sqlite
# customize insights remove_whitespace= inplace= SQL Server
# analyze correlations with plot_correlation() plot(df,config={'insight.missing.threshold':20, fix_domain= split= Oracle
# plot correlation 'insight.duplicates.threshold':20}) split= remove_auth= Redshift (through postgres protocol)
eda.plot_correlation(df) errors= report= Clickhouse (through mysql protocol)
# plot most correlated columns to column x CLEAN errors= Big Query
eda.plot_correlation(df, 'x') # geographic coordinates
# plot joint distribution of column x and y # clean column headers # validate coordinates # clean entire dataframe # Configuration UI of API
eda.plot_correlation(df, 'x', 'y') import dataprep.clean as clean clean.validate_lat_long() infered _dtypes, cleaned_df = from dataprep.connector imort config_generator_ui
clean.clean_df(df) config_generator_ui
# analize missing values with plot_missing() clean.clean_headers(df) # clean geographic coordinates parameters
# plot missing values parameters clean.clean_lat_lon(df, 'column') clean_headers=
eda.plot_missing(df) case= parameters data_type_detection=
# plot impact of missing values in column 'x' replace= output_format= standardize_missing_values=
eda.plot_missing(df, 'x') remove_accents= errors= remove_duplicate_entries=
# impact of missing values in 'x' and 'y' split= downcast_memory=
eda.plot_missing(df, 'x', 'y') inplace=

You might also like