You are on page 1of 19

https://www.unixarena.com/2014/02/configure-guest-domains-ldom.

html
------------>create ldom
============================================================================
how to add cdrom.iso to control domain & LDOM

410 ldm ls-bindings LDOM-1


411 ldm ls-services
414 ldm add-vdsdev options=ro /iso/sol-11_3-live-x86.iso newiso@primary-vds0
419 ldm ls-bindings LDOM-1
420 ldm ls-bindings LDOM-2
421 ldm add-vdisk viso1 newiso@primary-vds0 LDOM-1
422 ldm ls
423 ldm bound LDOM-1
424 ldm stop LDOM-1
425 ldm ls
426 ldm add-vdisk viso1 newiso@primary-vds0 LDOM-1
427 ldm list-bindings LDOM-1
428 telnet localhost 5000
429 ldm ls
430 ldm list-bindings services
431 ldm list-bindings
432 ldm list-bindings LDOM1
433 ldm list-bindings LDOM-1

root@sparc-40:~# ldm unbind LDOM-1


root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 2.5% 38m
LDOM-2 active -t---- 5002 16 6G 6.3% 45m
LDOM-3 active -n---- 5003 16 6G 0.2% 2h 11m
LDOM-1 inactive ------ 16 6G

root@sparc-40:~# ldm remove-vdisk viso1 LDOM-1


root@sparc-40:~# ldm remove-vdsdev newiso@primary-vds0

root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 2.9% 59m
LDOM-2 active -t---- 5002 16 6G 6.3% 1h 5m
LDOM-3 active -n---- 5003 16 6G 0.1% 2h 31m
LDOM-1 inactive ------ 16 6G
root@sparc-40:~# ldm bind LDOM-1
root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 2.7% 1h 1m
LDOM-1 bound ------ 5000 16 6G
LDOM-2 active -t---- 5002 16 6G 6.3% 1h 8m
LDOM-3 active -n---- 5003 16 6G 0.2% 2h 34m
root@sparc-40:~# ldm start LDOM-1
LDom LDOM-1 started
root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 2.7% 1h 2m
LDOM-1 active -t---- 5000 16 6G 2.0% 1s
LDOM-2 active -t---- 5002 16 6G 6.3% 1h 8m
LDOM-3 active -n---- 5003 16 6G 0.2% 2h 34m
root@sparc-40:~#
==========================================================================

how to remove LDOM

root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 59% 6m
LDOM-2 active -n---- 5002 16 6G 0.1% 2h 51m
LDOM-3 active -n---- 5003 16 6G 0.1% 1h 55m
LDOM-5 active -n---- 5000 4 2G 0.3% 33m
LDOM-1 inactive ------ 16 10G
root@sparc-40:~#
root@sparc-40:~# ldm stop LDOM-5

LDom LDOM-5 stopped


root@sparc-40:~#
root@sparc-40:~#
root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 5.1% 7m
LDOM-2 active -n---- 5002 16 6G 0.3% 2h 52m
LDOM-3 active -n---- 5003 16 6G 0.5% 1h 57m
LDOM-5 bound ------ 5000 4 2G
LDOM-1 inactive ------ 16 10G
root@sparc-40:~#
root@sparc-40:~#
root@sparc-40:~# ldm unbind LDOM-5
root@sparc-40:~#
root@sparc-40:~#
root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 4.8% 8m
LDOM-2 active -n---- 5002 16 6G 0.2% 2h 52m
LDOM-3 active -n---- 5003 16 6G 0.8% 1h 57m
LDOM-1 inactive ------ 16 10G
LDOM-5 inactive ------ 4 2G
root@sparc-40:~#
root@sparc-40:~# ldm remove LDOM-5
Unknown command remove; use --help option for list of available commands
root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 1.0% 8m
LDOM-2 active -n---- 5002 16 6G 1.7% 2h 53m
LDOM-3 active -n---- 5003 16 6G 0.4% 1h 58m
LDOM-1 inactive ------ 16 10G
LDOM-5 inactive ------ 4 2G
root@sparc-40:~# ldm ls-domain
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 1.6% 8m
LDOM-2 active -n---- 5002 16 6G 1.0% 2h 53m
LDOM-3 active -n---- 5003 16 6G 0.3% 1h 58m
LDOM-1 inactive ------ 16 10G
LDOM-5 inactive ------ 4 2G
root@sparc-40:~# ldm remove-domain LDOM-5
root@sparc-40:~#
root@sparc-40:~# ldm ls
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-cv- SP 8 8G 1.7% 9m
LDOM-2 active -n---- 5002 16 6G 0.3% 2h 53m
LDOM-3 active -n---- 5003 16 6G 0.3% 1h 58m
LDOM-1 inactive ------ 16 10G
root@sparc-40:~#

============================================================================
how to enable iscsi on solaris

root@sol11up3-170:/# iscsiadm add discovery-address 192.168.11.131 -->ip server


root@sol11up3-170:/# iscsiadm modify discovery -t enable

==========================================================================

http://uadmin.nl/init/solaris-11-cheat-sheet-network-administration/

solaris 11 cheat sheet network administration


Posted on June 8, 2012 by admin

Switch to manual network configuration:


# netadm enable �p ncp defaultfixed

Show physical network interfaces:


# dladm show-phys

Create interface with static IPv4 configuration:


# ipadm create-ip net0
# ipadm create-addr �T static �a local=10.9.8.7/24 net0/addr # ipadm show-addr

Create interface with DHCP configuration:


# ipadm create-ip net0
# ipadm create-addr �T dhcp net0/addr

Create interface with auto-generated IPv6 configuration:


# ipadm create-ip net0
# ipadm create-addr �T addrconf net0/addr

# ipadm show-if

# ipadm show-addr

# ipadm create-ip net3

# ipadm create-addr -T static -a 192.168.0.203/24 net3/v4

# ipadm down-addr net3/v4

# ipadm up-addr net3/v4

# ipadm delete-addr net3/v4

Configure default route:


# route �p add default 1.1.1.1
Activate DNS configuration:
# svccfg �s dns/client setprop config/nameserver = \
net_address: 192.168.1.1
# svccfg �s dns/client setprop config/domain = \ astring: �myhost.org�
# svccfg �s name-service/switch setprop config/host = \ astring: �files dns�
# svcadm refresh name-service/switch
# svcadm refresh dns/client

Activate DNS configuration


(alternate approach by editing /etc/resolv.conf and /etc/nsswitch.conf and then
importing these modifications into SMF)
# nscfg �f svc:/system/name-service/switch:default # nscfg �f
svc:/network/dns/client:default
# svcadm refresh dns/client

NWAM!!!
Create a network configuration profile:
# netcfg create ncp datacenter
# netcfg
netcfg> select ncp datacenter
netcfg:ncp:datacenter> create ncu phys net0
Created ncu �net0�. Walking properties ...
ip-version (ipv4,ipv6) [ipv4|ipv6]> ipv4
ipv4-addsrc (dhcp) [dhcp|static]> static
ipv4-addr> 192.168.1.27
ipv4-default-route> 192.168.1.1
netcfg:ncp:datacenter:ncu:net0> end
Committed changes
netcfg:ncp:datacenter> exit

----

ipadm show-ifprop net0

ipadm set-ifprop -m ipv4 -p forwarding=off net0

=========================================================================
http://unixadminschool.com/blog/2015/08/oracle-solaris-11-administration-command-
cheat-sheet/

Solaris Admin / Solaris Commands / Solaris11 Admin / solaris11-Howto


3

Oracle Solaris 11 Administration � Command Cheat Sheet

by Ramdev � Published August 5, 2015 � Updated August 23, 2015


Solaris Installation

Automated Installer (AI) is the new network based multi-client provisioning system
on Oracle Solaris 11. AI provides hands-free installation of both SPARC and x86
systems by using an installation service that installs systems from software
package repositories on the network.

Create an install service from a downloaded ISO file, specifying x86 based DHCP
client starting at address 192.168.1.210 with a total count of 10 addresses:
# installadm create-service -n s11x86 -i 192.168.1.210 -c 10 -s
/path/to/solaris-11-1111-ai-x86.iso

List all enabled services:

# installadm list

List any installation manifests associated with the install services:

# installadm list -m

Export the default installation manifest associated with the s11x86 service:

# installadm export -n s11x86 -m orig_default > manifest.xml

Import a manifest to be associated with the s11x86 service:

# installadm update-manifest -n s11x86 -m orig_default -f manifest.xml

List any system configuration profiles associated with the install services:

# installadm list -p

Create a system configuration profile interactively, saving the contents to a file:

# sysconfig create-profile -o profile.xml

Validate a system configuration profile against the default x86 install service:

# installadm validate -n default-i386 -P profile.xml

Associate a system configuration profile with the deafult x86 install service and
give it a name sc-profile:

# installadm create-profile -n default-i386 -f profile.xml -p sc-profile

Apply a criteria that all clients must have 4096MB memory or greater to the
manifest s11manifest of s11x86 service:

# installadm set-criteria -m s11manifest -n s11x86 -a MEM=�4096-unbounded�

System Configuration

Common system configuration tasks have changed in Oracle Solaris 11 with the
Service Management Facility (SMF) configuration repository being used to store
configuration data. With the addition of configuration layers, administrators now
have better control and assurance that their configuration changes will be
preserved across system updates.

Configuring nodename:

# svccfg �s svc:/system/identity:node setprop config/nodename = �myhost�


# svcadm refresh svc:/system/identity:node
# svcadm restart svc:/system/identity:node

Configuring console keyboard layout:


# svccfg �s keymap:default setprop keymap/layout = UK-English
# svcadm refresh keymap
# svcadm restart keymap

Configuring system locale:

# svccfg �s timezone:default setprop timezone/localtime = astring: US/Mountain


# svcadm refresh timezone:default

Unconfigure a system and start an interactive configuration tool on reboot:

# sysconfig configure -s

Create a system configuration profile:

# sysconfig create-profile -o sc-profile.xml

Configure a system according to a system configuration profile:

# sysconfig configure -c sc-profile.xml

Users and Groups

The traditional root account has been changed to a �root� role on all Oracle
Solaris 11 installations as part of the Role Based Access Control (RBAC) feature
set. This change gives improved auditability across the operating system, and the
ability for administrators to delegate various system tasks to others in a safe
way.

Revert to root as normal user account:

# rolemod �K type=normal root

Configure root as a role (default):

# usermod �K type=role root

Add a new user and delegate him the System Adminstrator profile:

# useradd -d /export/home/joerg -P �System Administrator� joerg

Boot Environments

Boot Environments are individual bootable instances of the operating system that
take advantage of the Oracle Solaris ZFS filesystem snapshot and clone capability.
During a system update, new boot environments are created so that system software
updates can be applied in a safe environment. Should anything go awry,
administrators can boot back into an older boot environment. Boot environments have
low overhead and can be quickly created giving administrators an ideal best
practice for any system
maintenance work.

Create a boot environment:

# beadm create solaris-05032012


Activate a boot environment:

# beadm activate solaris-05032012

Delete a boot environment:

# beadm destroy solaris-05032012

Show boot environments from SPARC boot PROM:

ok boot -L

Boot into a boot environment from SPARC boot PROM:

ok boot -Z rpool/ROOT/solaris-05032012

Software installation and Packaging

Oracle Solaris 11 includes IPS, a new network-centric package management framework


with automatic dependency checking. IPS has integrated package and patching, and
can seamlessly manage system updates to Oracle Solaris Zones environments.

Install a package called diagnostic/wireshark:

# pkg install diagnostic/wireshark

Install a group package to provide a desktop environment:

# pkg install solaris-desktop

Update all possible packages to the newest version, including any zones:

# pkg update

Do a dry run of a system update to understand what packages may change:

# pkg update -nv

Uninstall a package called diagnostic/wireshark:

# pkg uninstall wireshark

List all packages installed on a system:

# pkg list

Get more information about an installed package called diagnostic/wireshark:

# pkg info wireshark

List the contents of an installed package called diagnostic/wireshark:

# pkg contents wireshark

Search all packages in the configured repositories for a file called math.h:

# pkg search math.h

Search for all packages installed on a system that have a dependency on


library/libxml2:

# pkg search -l -o pkg.name �depend::library/libxml2�

List currently associated package publishers:

# pkg publisher

Connect to the Oracle support repository and update the system:

# pkg set-publisher -g https://pkg.oracle.com/solaris/support -G


http://pkg.oracle.com/solaris/release -k /path/to/ssl_key -c /path/to/ssl_cert
solaris
# pkg update

File systems � Basic ZFS Administration

Oracle Solaris ZFS is the default root file system on Oracle Solaris 11. ZFS has
integrated volume management, preserves the highest levels of data integrity and
includes a wide variety of data services such as data deduplication, RAID and data
encryption.

Create a ZFS pool with a single disk:

# zpool create testpool c3t2d0

Create a ZFS pool with 3 disks in RAID0 configuration:

# zpool create testpool c3t2d0 c3t3d0 c3t4d0

Create a ZFS pool with 3 disks in RAID1 configuration:

# zpool create testpool mirror c3t2d0 c3t3d0 c3t4d0

Create a ZFS pool with 3 disks in a RAIDZ configuration (single parity):

# zpool create testpool raidz c2t2d0 c3t3d0 c3t4d0

Create a ZFS pool with 1 disk and 1 disk as seperate ZIL (ZFS Intent Log):

# zpool create testpool c3t2d0 log c3t3d0

Create a ZFS pool with 1 disk and 1 disk as L2ARC (Level 2 storage cache):

# zpool create testpool c3t2d0 cache c3t3d0

Share a filesystem via NFS:

# zfs create zpool/fs1


# zfs set share=name=fs1,path=/rpool/fs1,prot=nfs rpool/fs1
# zfs set sharenfs=on rpool/fs1
===================================================================================
=======
root@sol113-12:/opt/SUNWxvmoc/bin# zfs set share=name=NASlib-5G,path=/Naslib-
5G,prot=nfs,root=@192.168.11.0/24 Data/NASlib-5G
name=NASlib-5G,path=/Naslib-5G,prot=nfs,sec=sys,root=@192.168.11.0/24
root@sol113-12:/opt/SUNWxvmoc/bin#zfs set sharenfs=on Data/NASlib-5G
root@sol113-12:/opt/SUNWxvmoc/bin# share
opt_SUNWjet /opt/SUNWjet nfs anon=0,sec=sys,ro JET Framework
var_js /var/js nfs anon=0,sec=sys,ro Allstart Share
var_opt_sun_xvm_osp_share_allstart /var/opt/sun/xvm/osp/share/allstart nfs
sec=sys,ro Allstart Share
Backup /Backup nfs sec=sys,root=@192.168.11.0/24
NASLib-10G /NASLib-10G nfs sec=sys,root=@192.168.11.0/24
NASLib-20G /NASLib-20G nfs sec=sys,root=@192.168.11.0/24
NASLib-80G /NASLib-80G nfs sec=sys,root=@192.168.11.0/24
Repo /Repo nfs sec=sys,root=@192.168.11.0/24
Source /Source nfs sec=sys,root=@192.168.11.0/24
IPC$ smb - Remote IPC
NASlib-5G /Naslib-5G nfs sec=sys,root=@192.168.11.0/24
root@sol113-12:/opt/SUNWxvmoc/bin#
===================================================================================
=======
Share a filesystem via CIFS:

# pkg install service/filesystem/smb


# svcadm enable -r smb/server
# echo �other password required pam_smb_passwd.so.1 nowarn� >> /etc/pam.conf
# smbadm enable-user joerg
# zfs set share=name=sh1,path=/rpool/fs1,prot=smb rpool/fs1
# zfs set sharesmb=on rpool/fs1

Use shadow migration:

# pkg install shadow-migration


# svcadm enable shadowd
# zfs set readonly=on path/to/data
# zfs create -o shadoow=file:///path/to/data target/new/path/to/data

Disk Devices

Show all disks on a system:

# cfgadm -s �select=type(disk)�

Configure a disk to be used via iSCSI

# svcadm enable svc:/network/iscsi/initiator


# iscsiadm modify initiator-node -A myclient
# iscsiadm add discovery-address 10.211.55.200
# iscsiadm discovery -t enable
# devfsadm -c iscsi

Replace a faulty disk c1t1d0 from ZFS pool testpool:

# zpool offline testpool c1t1d0


# cfgadm -c unconfigure c1::dsk/c1t1d0
# cfgadm -c configure c1::dsk/c1t1d0
# zpool replace testpool c1t1d0
# zpool online testpool c1t1d0

Mirror existing boot disk c3t0d0s0 with disk c3t2d0s0

# fdisk -B c3t2d0s0
# prvtoc /dev/rdsk/c3t0d0s0 | fmthard -s � /dev/rdsk/c3t2d0s0
On x86 systems:

# installgrub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c3t2d0s0

On SPARC systems:

# installboot -F zfs /usr/platform/`uname


-i`/lib/fs/zfs/bootblk/dev/rdsk/c3t2d0s0

Oracle Solaris Zones

Oracle Solaris Zones provide isolated and secure virtual environments running on a
single operating system instance, ideal for application deployment. When
administrators create a zone, an application execution environment is produced in
which processes are isolated from the rest of the system.

Create a zone with an exclusive IP network stack:

# zonecfg -z testzone
testzone: No such zone configured
Use �create� to begin configuring a new zone.
zonecfg:testzone> create
zonecfg:testzone> set zonepath=/zones/testzone
zonecfg:testzone> set autoboot=true
zonecfg:testzone> verify
zonecfg:testzone> commit
zonecfg:testzone> exit

List all running zones verbosely:

# zoneadm list -v

List all configured zones:

# zoneadm list -c

List all installed zones:

# zoneadm list -i

Install a zone:

# zoneadm -z testzone install

Boot a zone:

# zoneadm -z testzone boot

List configuration about a zone:

# zoneadm -z testzone list

Login to a zone:

# zlogin -C testzone

Halt a zone

# zoneadm -z testzone halt


Shutdown a zone

# zoneadm -z testzone shutdown

Monitor a zone for CPU, memory and network utilization every 10 seconds:

# zonestat -z testzone 10

Service Management Facility

Service Management Facility (SMF) provides a framework for managing services on


Oracle Solaris including the ability to automatically restart any service after
failure. Each service instance is named with a fault management resource indicator
(FMRI).

Show all services (including disabled services):

# svcs

List detailed information about system/zones:

# svcs -l system/zones

List processes associated with the network/netcfg service:

# svcs -p network/netcfg

Show why services that are enabled but are not running, or preventing other
services from running:

# svcs -xv

Enable a service called network/dns/client:

# svcadm enable network/dns/client

Restart a service called network/nfs/server using an abbreviated FMRI:

# svcadm restart nfs/server

Disable a service called network/ssh:

# svcadm disable network/ssh

Display all properties and values in the SMF configuration repository for the
service network/ssh:

# svcprop network/ssh

Interactively display the general/enabled property within the SMF configuration


repository for the service network/ssh:

# svccfg
svc:> select ssh:default
svc:/network/ssh:default> listprop general/enabled
svc:/network/ssh:default> exit
Set the port number of the application/pkg/server service to 10000:

# svccfg -s application/pkg/server setprop pkg/port=10000


# svcadm refresh application/pkg/server

Configure email notifications for all services that drop from online to maintenance
state:

# svccfg setnotify -g from-online,to-maintenance mailto:admin@myhost.org

List all configuration changes that have been made in the SMF configuration
repository to the name-service/switch service:

# svccfg -s name-service/switch listcust �L

Solaris 11 Networking

Oracle Solaris 11 uses profile based networking configuration, comprised of two


configuration modes � manual and automatic. These modes differ in how
administrators configure the system, either manually using dladm and ipadm, or
through creating and applying network configuration profiles.
Networking � Manual Administration

Switch to manual network configuration:

# netadm enable �p ncp defaultfixed

Show physical network interfaces:

# dladm show-phys

Create interface with static IPv4 configuration:

# ipadm create-ip net0


# ipadm create-addr �T static �a local=10.9.8.7/24 net0/addr
# ipadm show-addr

Create interface with DHCP configuration:

# ipadm create-ip net0


# ipadm create-addr �T dhcp net0/addr

Create interface with auto-generated IPv6 configuration:

# ipadm create-ip net0


# ipadm create-addr �T addrconf net0/addr

Configure default route:

# route �p add default 192.168.1.1

Activate DNS configuration:

# svccfg �s dns/client setprop config/nameserver = net_address: 192.168.1.1


# svccfg �s dns/client setprop config/domain = astring: �myhost.org�
# svccfg �s name-service/switch setprop config/host = astring: \�files dns\�
# svcadm refresh name-service/switch
# svcadm refresh dns/client
Activate DNS configuration (alternate approach by editing /etc/resolv.conf and
/etc/nsswitch.conf and then importing these modifications into SMF)

# nscfg import �f svc:/system/name-service/switch:default


# nscfg import �f svc:/network/dns/client:default
# svcadm refresh dns/client

Networking � Automatic Administration

Create a network configuration profile:

# netcfg create ncp datacenter


# netcfg
netcfg> select ncp datacenter
netcfg:ncp:datacenter> create ncu phys net0
Created ncu �net0�. Walking properties �
ip-version (ipv4,ipv6) [ipv4|ipv6]> ipv4
ipv4-addsrc (dhcp) [dhcp|static]> static
ipv4-addr> 192.168.1.27
ipv4-default-route> 192.168.1.1
netcfg:ncp:datacenter:ncu:net0> end
Committed changes
netcfg:ncp:datacenter> exit

Create a network location profile:

# netcfg
netcfg> create loc datacenter
Created loc �datacenter�. Walking properties �
activation-mode (manual) [manual|conditional-any|conditionalall]>
conditional-any
conditions> ip-address is 192.168.1.27
nameservices (dns) [dns|files|nis|ldap] dns
nameservices-config-file (�/etc/nsswitch.dns�)>
dns-nameservice-configsrc (dhcp) [manual|dhcp]> manual
dns-nameservice-domain> datacenter.myhost.org
dns-nameservice-servers> 192.168.1.1
dns-nameservice-search>
dns-nameservice-sortlist>
dns-nameservice-options>
nfsv4-domain>
ipfilter-config-file>
ipfilter-v6-config-file>
ipnat-config-file>
ippool-config-file>
ike-config-file>
ipsecpolicy-config-file>
netcfg:loc:datacenter>
netcfg:loc:datacenter> exit
Committed changes
Activate a network configuration profile:
# netadm enable -p ncp datacenter

Networking � Advanced Administration

Create a virtual network interface over existing physical interface net0 with
address 192.168.0.80:
# dladm create-vnic -l net0 vnic0
# ipadm create-ip vnic0
# ipadm create-addr -T static -a 192.168.0.80 vnic0/v4

Create two virtual network interfaces over a virtual switch (without a physical
network interface):

# dladm create-etherstub stub0


# dladm create-vnic -l stub0 vnic0
# dladm create-vnic -l stub0 vnic1

Reduce the bandwidth of the virtual network interface vnic0 to 100Mbps:

# dladm set-linkprop -p maxbw=100 vnic0

Restrict the bandwidth going to IP address 192.168.0.30 by creating a flow on


virtual network interface vnic0, then restrict its bandwidth to 50Mbps:

# flowadm add-flow -l vnic0 -a remote_ip=192.168.0.30 flow0


# flowadm set-flowprop -p maxbw=50 flow0

Restrict network traffic to TCP for a local port 443 for network interface net0:

# flowadm add-flow -l net0 -a transport=TCP,local_port=433 flow0

Activating Jumbo Frames (ethernet packets greater than 1500 bytes):

# dladm set-linkprop -p mtu=9000 net0

Configure Link Aggregation:

# dladm create-aggr -l net0 -l net1 aggr0


# ipadm create-ip aggr0
# ipadm create-addr -T static -a 10.1.1.2/24 aggr0/v4

Configure VLANS:

# dladm create-vlan -l net0 -v 100 administration1


# dladm create-vlan -l net0 -v 2 production1
# ipadm create-ip administration1
# ipadm create-ip production1
# ipadm create-addr -T static -a 192.168.2.2/24 administration1/v4static
# ipadm create-addr -T static -a 192.168.1.2/24 production1/v4static

Configure an IPMP group:

# ipadm create-ip net0


# ipadm create-ip net1
# ipadm create-ip net2
# ipadm create-ipmp ipmp0
# ipadm add-ipmp -i net0 -i net1 -i net2 ipmp0
# ipadm create-addr -T static -a 192.168.1.27/24 ipmp0/v4
# ipadm create-addr -T static -a 192.168.1.50/24 net0/test
# ipadm create-addr -T static -a 192.168.1.51/24 net1/test
# ipadm create-addr -T static -a 192.168.1.52/24 net2/test

===================================================================================
=======

Configure a Local IPS Package Repository using HTTP

1. Download the Oracle Solaris 11 repository image from the following site:
http://www.oracle.com/technetwork/server-storage/solaris11/downloads/index.html.

# cd /export/download
# unzip sol-11-xxx-xxx-repo-full-iso-a.zip
# unzip sol-11-xxx-xxx-repo-full-iso-b.zip
# cat sol-11-1111-repo-full.iso-a sol-11-1111-repo-full.iso-b > sol-11-1111-repo-
full.iso

2. Create /export/ips file system


# zpool list
# zfs list
# zfs mount
# zfs create rpool/export/ips
# zfs set mountpoint=/export/ips rpool/export/ips
# zfs set compression=on rpool/export/ips

3. Copy the IPS repository from the ISO image to a local ZFS file system
# lofiadm �a /export/download/ sol-11-1111-repo-full.iso
# mount �F hsfs /dev/lofi/1 /mnt
# rsync �aP /mnt/repo /export/ips

4. Configure IPS server services (HTTP server)


# svcs application/pkg/server
# svccfg �s application/pkg/server setprop pkg/inst_root=/export/ips/repo
# svccfg �s application/pkg/server setprop pkg/readonly=true
# svcprop -p pkg/inst_root application/pkg/server
# svcadm refresh application/pkg/server
# svcadm enable application/pkg/server
# svcs application/pkg/server
# pkgrepo refresh �s /export/ips/repo

5. Remove the current publisher URI and add a new URI the test the IPS
# pkg publisher
# pkg set-publisher �G '*' �g http://s11-serv1.mydomain.com/ solaris
# pkg publisher
# pkg search entire

== Configure a Network Client to Access the IPS Server ==


# ping s11-serv1.mydomain.com
# pkg publisher
# pkg set-publisher �G �*� �g http://s11-serv1.mydomain.com/ solaris
# pkg publisher

==Setting Local Repository Server From Local Filesystem (Without HTTP) ==


# lofiadm �a /export/download/ sol-11-1111-repo-full.iso
# mount �F hsfs /dev/lofi/1 /mnt
# pkg publisher
# pkg set-publisher �G '*' -M '*' �g /media/SOL.../repo solaris
# pkg publisher
# pkg search entire
# pkg search slim_install
# pkg install slim_install

===================================================================================

Reset root password


ok boot cdrom -s
# zpool list
# zpool import
# zpool import rpool
# zpool list
# zfs list
# zfs mount
# zfs set mountpoint=/a rpool/ROOT/solaris
Alternatif:
# mkdir /a
# zfs set mountpoint=legacy rpool/ROOT/solaris
# mount -F zfs rpool/ROOT/solaris /a

(zfs mount -f rpool/ROOT/solaris)

# vi /a/etc/shadow
remove the password section, let it looks likes.
root::15356::::::
# cd /
# umount /a
# zfs set mountpoint=/ rpool/ROOT/solaris
# zpool export rpool

Reboot from Disk to Single User mode


ok boot disk -s

Login as Admin user with Admin's password

Assign new password to root


# passwd root

==================================================================================

How to configure password less authentication using ssh

July 30, 2012 By Lingeswaran R 2 Comments


Password less authentication may required in various setup and we need to configure
password less authentication for root user in cluster setup in order to install
cluster software on both the machine simultaneously. Here we will see how to
configure password less authentication using ssh on Solaris nodes.

Login to node1.
Create a new ssh keygen .Here i have used RSA keygen. If you want you can use DSA
instead of RSA.

Arena-Node1#ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (//.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in //.ssh/id_rsa.
Your public key has been saved in //.ssh/id_rsa.pub.
The key fingerprint is:
e4:34:90:01:7e:0a:38:45:fa:bb:4d:ef:0c:57:ce:2a root@node1

Go to the directory where the keys are stored. It will be stored in root�s home
directory by default.

Arena-Node1#cd /.ssh
Arena-Node1#ls -lrt
total 5
-rw------- 1 root root 887 Jul 29 23:03 id_rsa
-rw-r--r-- 1 root root 220 Jul 29 23:03 id_rsa.pub

Arena-Node1#cat /etc/hosts
"/etc/hosts" [Read only] 6 lines, 88 characters
#
# Internet host table
#
::1 localhost
127.0.0.1 localhost
192.168.2.5 node1 loghost
192.168.2.6 node2

Login to node2 and perform the same what we have did for node1.

Arena-Node2#ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (//.ssh/id_rsa):
Created directory '//.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in //.ssh/id_rsa.
Your public key has been saved in //.ssh/id_rsa.pub.
The key fingerprint is:
ad:14:b0:83:75:23:fa:c2:96:b6:1c:1d:85:96:b1:77 root@node2
Arena-Node2#cat /etc/hosts
"/etc/hosts" [Read only] 6 lines, 88 characters
#
# Internet host table
#
::1 localhost
127.0.0.1 localhost
192.168.2.6 node2 loghost
192.168.2.5 node1

Now i am copying the rsa key to node2 as authorized_keys where you want to login
without password. By doing this , i can login from node1 to node2 without password.

Arena-Node1#scp -r id_rsa.pub node2:/.ssh/authorized_keys


The authenticity of host 'node2 (192.168.2.6)' can't be established.
RSA key fingerprint is 93:cc:1f:07:17:bf:79:34:7e:05:2f:25:28:64:fb:60.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2,192.168.2.6' (RSA) to the list of known hosts.
Password:
id_rsa.pub 100% |*****************************| 220 00:00
Arena-Node1#

In node2,i am copying the rsa key to node1 as authorized_keys.By doing this , i can
login from node2 to node1 without password.

Arena-Node2#scp -r id_rsa.pub node1:/.ssh/authorized_keys


The authenticity of host 'node1 (192.168.2.5)' can't be established.
RSA key fingerprint is 93:cc:1f:07:17:bf:79:34:7e:05:2f:25:28:64:fb:60.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.2.5' (RSA) to the list of known hosts.
Password:
id_rsa.pub 100% |*****************************| 220 00:00
Arena-Node2#

Testing the access from node1

Arena-Node1#ssh node2
Last login: Mon Jul 30 00:18:46 2012 from node1
Oracle Corporation SunOS 5.10 Generic Patch January 2005
Arena-Node2#

Testing the access from node2

Arena-Node2#ssh node1
Last login: Mon Jul 30 00:05:53 2012 from 192.168.2.2
Oracle Corporation SunOS 5.10 Generic Patch January 2005
Arena-Node1#

Thank you for reading this article.Please leave a comment if you have any doubt ,i
will get back to you as soon as possible.

===================================================================================
============

root@solaris11_3:/# zonecfg -z sizone "set autoboot=false"


root@solaris11_3:/# zonecfg -z sizone info
zonename: sizone
zonepath: /zones/sizone
brand: solaris
autoboot: false
autoshutdown: shutdown
bootargs:
file-mac-profile:
pool:
limitpriv:
scheduling-class:
ip-type: shared
hostid:
tenant:
fs-allowed:
net:
address: 192.168.1.130
allowed-address not specified
configure-allowed-address: true
physical: net0
defrouter: 192.168.1.1
root@solaris11_3:/# zoneadm -z sizone apply
zone 'sizone': Checking: Removing net physical=net0
zone 'sizone': Checking: Adding net physical=net0
zone 'sizone': Applying the changes
zone 'sizone': warning: net0: no matching subnet found in netmasks(4):
192.168.1.130; using default of 255.255.255.0.
zone 'sizone': add net default: gateway 192.168.1.1: entry exists
root@solaris11_3:/#
=========================================================

how to rename zonename

root@solaris11_3:/# zoneadm list -cv


ID NAME STATUS PATH BRAND IP
0 global running / solaris shared
- s10-zone1 installed /data/s10zone1/s10-zone1 solaris10 shared
- zone1 installed /tespool/zone1 solaris shared
- zone2 installed /data/zone2 solaris shared

root@solaris11_3:/# zoneadm -z zone1 rename zone_138


root@solaris11_3:/# zoneadm list -cv
ID NAME STATUS PATH BRAND IP
0 global running / solaris shared
- s10-zone1 installed /data/s10zone1/s10-zone1 solaris10 shared
- zone_138 installed /tespool/zone1 solaris shared
- zone2 installed /data/zone2 solaris shared
root@solaris11_3:/#

You might also like