Professional Documents
Culture Documents
packages("xts")
install.packages("Hmisc")
install.packages("readxl")
install.packages("corpcor")
install.packages("tseries")
install.packages("matlib")
install.packages("Matrix")
install.packages("xlsx")
install.packages("stockPortfolio")
install.packages("quadprog")
library(xts)
library(readxl)
library(Hmisc)
library(corpcor)
library(tseries)
library(matlib)
library(Matrix)
library(xlsx)
# Open Data
# Data Summary
list(FondoLatam)
summary(FondoLatam)
# Convert to Time Series
C = matrix(c(cov(FondoLatamData,use="na.or.complete")),nrow=48, ncol=48)
r = matrix(c(colMeans(FondoLatamData,na.rm = TRUE)))
X = t(r)%*%solve(C)%*%r
Y = t(r)%*%solve(C)%*%one
Z = t(one)%*%solve(C)%*%one
D = X%*%Z-Y%*%Y
g = (1/as.vector(D))*(as.vector(X)*(solve(C)%*%one) - as.vector(Y)*(solve(C)%*%r))
h = 1/as.vector(D)*(as.vector(Z)*(solve(C)%*%r) - as.vector(Y)*(solve(C)%*%one))
r_max = 0.25
r_min = 0.00
incr = 0.0005
zz = 1
w_j = g + h * j
W_p[zz,] = t(w_j)
R_P[zz,1] = t(w_j)%*%r
S_p[zz,1] = sqrt(t(w_j)%*%C%*%w_j)
zz = zz + 1}
SharpeRatio = R_P/S_p
Effdata = data.frame(W_p,R_P,S_p,SharpeRatio)
w_mvp = solve(C)%*%one/as.vector((t(one)%*%solve(C)%*%one))
sig_mvp = sqrt(t(w_mvp)%*%C%*%w_mvp)
r_mvp = t(w_mvp)%*%r
Opt=cbind(sig_mvp,r_mvp)
# max.allocation is the maximum % allowed for any one security (reduces concentration)
# risk.premium.up is the upper limit of the risk premium modeled (see for loop below)
print(covariance)
n <- ncol(covariance)
# Create initial Amat and bvec assuming only equality constraint (short-selling is
allowed, no allocation constraints)
bvec <- 1
meq <- 1
if(!is.null(max.allocation)){
# Calculate the number of loops based on how high to vary the risk premium and by
what increment
loop <- 1
# This is not necessary, but speeds up processing and uses less memory
dvec <- colMeans(returns) * i # This moves the solution up along the efficient frontier
return(as.data.frame(eff))
#340:448
write.csv(effFondoLatam, "C:/Users/Usuario/Desktop/UNIVERSIDAD/septimo
semestre/finanzas 3/RESUPUESTA3.csv")
eff.optimal.pointFondoLatam <-
effFondoLatam[effFondoLatam$sharpe==max(effFondoLatam$sharpe),]
effFondoLatam= rbind(eff.optimal.pointFondoLatam)
write.csv(effFondoLatam, "C:/Users/Usuario/Desktop/UNIVERSIDAD/septimo
semestre/finanzas 3/RESPUESTA4.csv")
install.packages("xts")
install.packages("Hmisc")
install.packages("readxl")
install.packages("corpcor")
install.packages("tseries")
install.packages("matlib")
install.packages("Matrix")
install.packages("xlsx")
install.packages("stockPortfolio")
install.packages("quadprog")
library(xts)
library(readxl)
library(Hmisc)
library(corpcor)
library(tseries)
library(matlib)
library(Matrix)
library(xlsx)
# Open Data
# Data Summary
list(FondoLatam)
summary(FondoLatam)
r = matrix(c(colMeans(FondoLatamData,na.rm = TRUE)))
X = t(r)%*%solve(C)%*%r
Y = t(r)%*%solve(C)%*%one
Z = t(one)%*%solve(C)%*%one
D = X%*%Z-Y%*%Y
g = (1/as.vector(D))*(as.vector(X)*(solve(C)%*%one) - as.vector(Y)*(solve(C)%*%r))
h = 1/as.vector(D)*(as.vector(Z)*(solve(C)%*%r) - as.vector(Y)*(solve(C)%*%one))
r_max = 1
r_min = -1
incr = 0.0005
zz = 1
w_j = g + h * j
W_p[zz,] = t(w_j)
R_P[zz,1] = t(w_j)%*%r
S_p[zz,1] = sqrt(t(w_j)%*%C%*%w_j)
zz = zz + 1}
SharpeRatio = R_P/S_p
Effdata = data.frame(W_p,R_P,S_p,SharpeRatio)
write.csv(OptimoSinRes, "C:/Users/Usuario/Desktop/UNIVERSIDAD/septimo
semestre/finanzas 3/RESPUESTASconrestricciones12.csv")
w_mvp = solve(C)%*%one/as.vector((t(one)%*%solve(C)%*%one))
sig_mvp = sqrt(t(w_mvp)%*%C%*%w_mvp)
r_mvp = t(w_mvp)%*%r
Opt=cbind(sig_mvp,r_mvp)
# max.allocation is the maximum % allowed for any one security (reduces concentration)
# risk.premium.up is the upper limit of the risk premium modeled (see for loop below)
print(covariance)
n <- ncol(covariance)
# Create initial Amat and bvec assuming only equality constraint (short-selling is
allowed, no allocation constraints)
bvec <- 1
meq <- 1
if(short=="no"){
if(!is.null(max.allocation)){
# Calculate the number of loops based on how high to vary the risk premium and by
what increment
loop <- 1
# This is not necessary, but speeds up processing and uses less memory
dvec <- colMeans(returns) * i # This moves the solution up along the efficient frontier
return(as.data.frame(eff))
#340:448
write.csv(effFondoLatam, "C:/Users/Usuario/Desktop/UNIVERSIDAD/septimo
semestre/finanzas 3/RESUPUESTA3.csv")
eff.optimal.pointFondoLatam <-
effFondoLatam[effFondoLatam$sharpe==max(effFondoLatam$sharpe),]
effFondoLatam= rbind(eff.optimal.pointFondoLatam)
write.csv(effFondoLatam, "C:/Users/Usuario/Desktop/UNIVERSIDAD/septimo
semestre/finanzas 3/RESPUESTA4.csv")