lost and found ( for me ? )

Deploy OpenStack and Open daylight with juju-deployer

Here are trial and error logs when I set up OpenStack and Open Daylight with juju-deployer.

MAAS : MAAS Version 1.9.4+bzr4592-0ubuntu1 (Trusty)
Juju : 1.25.6-xenial-amd64 ( Xenial )

All nodes managed by MAAS have three NICs(eth[0-2]) and two disks(/dev/vda, /dev/vdb).

eth0 : PXE ( 192.168.40.0/24)
eth1 : unconfigured ( 192.168.41.0/24)
eth2 : unconfigured ( 192.168.42.0/24 )

[ Success case ]

Deployment nodes are Xenial, not Trusty.

root@juju-xenial:~# git clone -b stable/colorado https://gerrit.opnfv.org/gerrit/p/joid.git

root@juju-xenial:~# cd joid/ci/odl/juju-deployer/

root@juju-xenial:~/joid/ci/odl/juju-deployer# pwd
/root/joid/ci/odl/juju-deployer

root@juju-xenial:~/joid/ci/odl/juju-deployer# cp ../fetch-charms.sh ./

root@juju-xenial:~/joid/ci/odl/juju-deployer# ./fetch-charms.sh xenial

root@juju-xenial:~/joid/ci/odl/juju-deployer# sed s/trusty/xenial/g ovs-odl-nonha.yaml > ovs-odl-nonha-xenial

Deploy
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju-deployer -e maas -c ovs-odl-nonha-xenial_03.yaml -v xenial-mitaka
2017-02-28 16:45:04 Waiting for relation convergence 60s
2017-02-28 16:47:44 Deployment complete in 1081.44 seconds
root@juju-xenial:~/joid/ci/odl/juju-deployer#

all services are running.
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju status --format short
- ceilometer/0: 192.168.40.55 (started) 8777/tcp
- ceph/0: 192.168.40.62 (started)
- ceph/1: 192.168.40.59 (started)
- ceph-osd/0: m-node03.maas (started)
- ceph-osd/1: m-node02.maas (started)
- ceph-radosgw/0: 192.168.40.64 (started) 80/tcp
- cinder/0: 192.168.40.58 (started) 8776/tcp
 - cinder-ceph/0: 192.168.40.58 (started)
- congress/0: 192.168.40.54 (started) 1789/tcp
- glance/0: 192.168.40.77 (started) 9292/tcp
- heat/0: 192.168.40.68 (started) 8000/tcp, 8004/tcp
- keystone/0: 192.168.40.50 (started) 5000/tcp
- mongodb/0: 192.168.40.74 (started) 27017/tcp, 27019/tcp, 27021/tcp, 28017/tcp
- mysql/0: 192.168.40.57 (started)
- neutron-api/0: 192.168.40.51 (started) 9696/tcp
 - neutron-api-odl/0: 192.168.40.51 (started)
- neutron-gateway/0: m-node03.maas (started)
 - openvswitch-odl/0: m-node03.maas (started)
- nodes/0: m-node03.maas (started)
 - ntp/1: m-node03.maas (started)
- nodes/1: m-node02.maas (started)
 - ntp/0: m-node02.maas (started)
- nova-cloud-controller/0: 192.168.40.78 (started) 8774/tcp
- nova-compute/0: m-node02.maas (started)
 - ceilometer-agent/0: m-node02.maas (started)
 - openvswitch-odl/1: m-node02.maas (started)
- odl-controller/0: 192.168.40.60 (started)
- openstack-dashboard/0: 192.168.40.52 (started) 80/tcp, 443/tcp
- opnfv-promise/0: 192.168.40.56 (started)
- rabbitmq-server/0: 192.168.40.61 (started) 5672/tcp
root@juju-xenial:~/joid/ci/odl/juju-deployer#

root@juju-xenial:~/joid/ci/odl/juju-deployer# grep -v ^# ovs-odl-nonha-xenial_03.yaml  | grep -v ^$
xenial-mitaka-nodes:
 inherits: openstack-phase1
xenial-mitaka:
 inherits: openstack-phase2
 overrides:
   os-data-network: 192.168.40.0/24
   ceph-cluster-network: 192.168.40.0/24
   #prefer-ipv6: true
   #enable-dvr: true
   #l2-population: true
   region: Canonical
   #source: "cloud:xenial-mitaka"
   openstack-origin: "cloud:xenial-mitaka"
   ceph-osd-replication-count: 2
   admin-role: admin
   keystone-admin-role: admin
openstack-phase1:
 services:
   nodes:
     charm: "cs:xenial/ubuntu"
     num_units: 2
   ntp:
     charm: "local:xenial/ntp"
 relations:
   - - "ntp:juju-info"
     - "nodes:juju-info"
openstack-phase2:
 inherits: openstack-phase1
 relations:
   - - "neutron-api:neutron-plugin-api-subordinate"
     - "neutron-api-odl:neutron-plugin-api-subordinate"
   - - "nova-compute:neutron-plugin"
     - "openvswitch-odl:neutron-plugin"
   - - "neutron-gateway"
     - "openvswitch-odl"
   - - "openvswitch-odl:ovsdb-manager"
     - "odl-controller:ovsdb-manager"
   - - "neutron-api-odl:odl-controller"
     - "odl-controller:controller-api"
   - - "keystone:shared-db"
     - "mysql:shared-db"
   - - "nova-cloud-controller:shared-db"
     - "mysql:shared-db"
   - - "nova-cloud-controller:amqp"
     - "rabbitmq-server:amqp"
   - - "nova-cloud-controller:image-service"
     - "glance:image-service"
   - - "nova-cloud-controller:identity-service"
     - "keystone:identity-service"
   - - "nova-cloud-controller:cloud-compute"
     - "nova-compute:cloud-compute"
   - - "nova-compute:amqp"
     - "rabbitmq-server:amqp"
   - - "nova-compute:image-service"
     - "glance:image-service"
   - - "glance:shared-db"
     - "mysql:shared-db"
   - - "glance:identity-service"
     - "keystone:identity-service"
   - - "glance:amqp"
     - "rabbitmq-server:amqp"
   - - "openstack-dashboard:identity-service"
     - "keystone:identity-service"
   - - "neutron-api:shared-db"
     - "mysql:shared-db"
   - - "neutron-api:amqp"
     - "rabbitmq-server:amqp"
   - - "nova-cloud-controller:neutron-api"
     - "neutron-api:neutron-api"
   - - "neutron-api:identity-service"
     - "keystone:identity-service"
   - - "neutron-gateway:amqp"
     - "rabbitmq-server:amqp"
   - - "neutron-gateway:neutron-plugin-api"
     - "neutron-api:neutron-plugin-api"
   - - "nova-cloud-controller:quantum-network-service"
     - "neutron-gateway:quantum-network-service"
   - - "ceilometer:amqp"
     - "rabbitmq-server:amqp"
   - - "ceilometer-agent:ceilometer-service"
     - "ceilometer:ceilometer-service"
   - - "ceilometer:identity-service"
     - "keystone:identity-service"
   - - "ceilometer:identity-notifications"
     - "keystone:identity-notifications"
   - - "ceilometer-agent:nova-ceilometer"
     - "nova-compute:nova-ceilometer"
   - - "ceilometer:shared-db"
     - "mongodb:database"
   - - "heat:shared-db"
     - "mysql:shared-db"
   - - "heat:identity-service"
     - "keystone:identity-service"
   - - "heat:amqp"
     - "rabbitmq-server:amqp"
   - - "cinder:image-service"
     - "glance:image-service"
   - - "cinder:amqp"
     - "rabbitmq-server:amqp"
   - - "cinder:identity-service"
     - "keystone:identity-service"
   - - "cinder:cinder-volume-service"
     - "nova-cloud-controller:cinder-volume-service"
   - - "cinder-ceph:storage-backend"
     - "cinder:storage-backend"
   - - "ceph:client"
     - "nova-compute:ceph"
   - - "cinder:shared-db"
     - "mysql:shared-db"
   - - "ceph:client"
     - "cinder-ceph:ceph"
   - - "ceph:client"
     - "glance:ceph"
   - - "ceph-osd:mon"
     - "ceph:osd"
   - - "ceph-radosgw:mon"
     - "ceph:radosgw"
   - - "ceph-radosgw:identity-service"
     - "keystone:identity-service"
   #- - nova-compute:lxd
     #- lxd:lxd
   - - congress:shared-db
     - mysql:shared-db
   - - congress:identity-service
     - keystone:identity-service
   - - congress:amqp
     - rabbitmq-server:amqp
 services:
   mysql:
     charm: "local:xenial/percona-cluster"
     options:
       dataset-size: 2G
       max-connections: 10000
     to:
       - "lxc:nodes=0"
   ceilometer:
     charm: "local:xenial/ceilometer"
     to:
       - "lxc:nodes=1"
   ceilometer-agent:
     charm: "local:xenial/ceilometer-agent"
   mongodb:
     charm: "local:xenial/mongodb"
     to:
       - "lxc:nodes=0"
   heat:
     charm: "local:xenial/heat"
     to:
       - "lxc:nodes=1"
   ceph:
     charm: "local:xenial/ceph"
     num_units: 2
     options:
       fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
       monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
       monitor-count: 2
       ceph-cluster-network: 192.168.40.0/24
       ceph-public-network: 192.168.40.0/24
     to:
       - "lxc:nodes=0"
       - "lxc:nodes=1"
   ceph-osd:
     charm: "local:xenial/ceph-osd"
     num_units: 2
     options:
       osd-devices: /dev/vdb
       osd-reformat: 'yes'
     to:
       - "nodes=0"
       - "nodes=1"
   ceph-radosgw:
     charm: "local:xenial/ceph-radosgw"
     options:
       use-embedded-webserver: true
       operator-roles: "Member,admin"
     to:
       - "lxc:nodes=0"
   cinder:
     charm: "local:xenial/cinder"
     options:
       block-device: None
       glance-api-version: 2
     to:
       - "lxc:nodes=1"
   cinder-ceph:
     charm: "local:xenial/cinder-ceph"
   rabbitmq-server:
     charm: "local:xenial/rabbitmq-server"
     to:
       - "lxc:nodes=0"
   keystone:
     charm: "local:xenial/keystone"
     options:
       admin-password: openstack
       admin-token: admin
     to:
       - "lxc:nodes=1"
   openstack-dashboard:
     charm: "local:xenial/openstack-dashboard"
     options:
       secret: admin
       webroot: /
     to:
       - "lxc:nodes=1"
   nova-compute:
     charm: local:xenial/nova-compute
     options:
       enable-live-migration: true
       enable-resize: true
       manage-neutron-plugin-legacy-mode: false
       migration-auth-type: ssh
       #virt-type: lxd
       #hugepages: "50%"
     to:
       - "nodes=1"
   nova-cloud-controller:
     charm: local:xenial/nova-cloud-controller
     options:
       console-access-protocol: novnc
       network-manager: Neutron
     to:
       - "lxc:nodes=0"
   neutron-api:
     charm: local:xenial/neutron-api
     options:
       neutron-security-groups: True
       manage-neutron-plugin-legacy-mode: False
     to:
       - "lxc:nodes=1"
   neutron-gateway:
     charm: local:xenial/neutron-gateway
     options:
       ext-port: "eth1"
       plugin: ovs-odl
       instance-mtu: 1400
     to:
       - "nodes=0"
   odl-controller:
     charm: local:xenial/odl-controller
     options:
       install-url: "https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distribution-karaf/0.4.2-Beryllium-SR2/distribution-karaf-0.4.2-Beryllium-SR2.tar.gz"
       profile: "openvswitch-odl-beryllium"
       #http-proxy: "http://squid.internal:3128"
       #https-proxy: "http://squid.internal:3128"
     to:
       - "lxc:nodes=0"
   glance:
     charm: local:xenial/glance
     to:
       - "lxc:nodes=1"
   opnfv-promise:
     charm: local:xenial/promise
     to:
       - "lxc:nodes=0"
   neutron-api-odl:
     charm: local:xenial/neutron-api-odl
     options:
       overlay-network-type: 'vxlan'
       security-groups: True
   openvswitch-odl:
     charm: local:xenial/openvswitch-odl
   congress:
     charm: local:xenial/congress
     options:
       #source-branch: stable/mitaka
     to:
       - "lxc:nodes=1"

I found one error on neutron-api/0.
   units:
     neutron-api/0:
       workload-status:
         current: error
         message: 'hook failed: "config-changed"'
         since: 01 Mar 2017 01:18:14Z
       agent-status:
         current: idle
         since: 01 Mar 2017 01:18:14Z
         version: 1.25.10
       agent-state: error
       agent-state-info: 'hook failed: "config-changed"'
       agent-version: 1.25.10
       machine: 2/lxc/6
       open-ports:

Access to neutron-api/0
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju ssh neutron-api/0

ubuntu@juju-machine-2-lxc-6:~$ sudo systemctl reboot

After rebooting neutron-api/0 node, I still have the same error..

Destroy environment and deploy again.
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju destroy-environment maas
WARNING! this command will destroy the "maas" environment (type: maas)
This includes all machines, services, data and other resources.

Continue [y/N]? y

Bootstrap
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju bootstrap --to m-node01.maas
Bootstrapping environment "maas"

edit yaml and Deploy again.
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju-deployer -c ovs-odl-nonha-xenial_04.yaml -e maas xenial-mitaka

no errors. I am not sure the root cause of this.
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju status | grep error
root@juju-xenial:~/joid/ci/odl/juju-deployer#

Here is a final configuration.
root@juju-xenial:~/joid/ci/odl/juju-deployer# grep -v ^# ovs-odl-nonha-xenial_04.yaml  | grep -v ^$
xenial-mitaka-nodes:
 inherits: openstack-phase1
xenial-mitaka:
 inherits: openstack-phase2
 overrides:
   os-data-network: 192.168.40.0/24
   ceph-cluster-network: 192.168.40.0/24
   #prefer-ipv6: true
   #enable-dvr: true
   l2-population: true
   region: Canonical
   #source: "cloud:xenial-mitaka"
   openstack-origin: "cloud:xenial-mitaka"
   ceph-osd-replication-count: 2
   admin-role: admin
   keystone-admin-role: admin
openstack-phase1:
 services:
   nodes:
     charm: "cs:xenial/ubuntu"
     num_units: 2
   ntp:
     charm: "local:xenial/ntp"
 relations:
   - - "ntp:juju-info"
     - "nodes:juju-info"
openstack-phase2:
 inherits: openstack-phase1
 relations:
   - - "neutron-api:neutron-plugin-api-subordinate"
     - "neutron-api-odl:neutron-plugin-api-subordinate"
   - - "nova-compute:neutron-plugin"
     - "openvswitch-odl:neutron-plugin"
   - - "neutron-gateway"
     - "openvswitch-odl"
   - - "openvswitch-odl:ovsdb-manager"
     - "odl-controller:ovsdb-manager"
   - - "neutron-api-odl:odl-controller"
     - "odl-controller:controller-api"
   - - "keystone:shared-db"
     - "mysql:shared-db"
   - - "nova-cloud-controller:shared-db"
     - "mysql:shared-db"
   - - "nova-cloud-controller:amqp"
     - "rabbitmq-server:amqp"
   - - "nova-cloud-controller:image-service"
     - "glance:image-service"
   - - "nova-cloud-controller:identity-service"
     - "keystone:identity-service"
   - - "nova-cloud-controller:cloud-compute"
     - "nova-compute:cloud-compute"
   - - "nova-compute:amqp"
     - "rabbitmq-server:amqp"
   - - "nova-compute:image-service"
     - "glance:image-service"
   - - "glance:shared-db"
     - "mysql:shared-db"
   - - "glance:identity-service"
     - "keystone:identity-service"
   - - "glance:amqp"
     - "rabbitmq-server:amqp"
   - - "openstack-dashboard:identity-service"
     - "keystone:identity-service"
   - - "neutron-api:shared-db"
     - "mysql:shared-db"
   - - "neutron-api:amqp"
     - "rabbitmq-server:amqp"
   - - "nova-cloud-controller:neutron-api"
     - "neutron-api:neutron-api"
   - - "neutron-api:identity-service"
     - "keystone:identity-service"
   - - "neutron-gateway:amqp"
     - "rabbitmq-server:amqp"
   - - "neutron-gateway:neutron-plugin-api"
     - "neutron-api:neutron-plugin-api"
   - - "nova-cloud-controller:quantum-network-service"
     - "neutron-gateway:quantum-network-service"
   - - "ceilometer:amqp"
     - "rabbitmq-server:amqp"
   - - "ceilometer-agent:ceilometer-service"
     - "ceilometer:ceilometer-service"
   - - "ceilometer:identity-service"
     - "keystone:identity-service"
   - - "ceilometer:identity-notifications"
     - "keystone:identity-notifications"
   - - "ceilometer-agent:nova-ceilometer"
     - "nova-compute:nova-ceilometer"
   - - "ceilometer:shared-db"
     - "mongodb:database"
   - - "heat:shared-db"
     - "mysql:shared-db"
   - - "heat:identity-service"
     - "keystone:identity-service"
   - - "heat:amqp"
     - "rabbitmq-server:amqp"
   - - "cinder:image-service"
     - "glance:image-service"
   - - "cinder:amqp"
     - "rabbitmq-server:amqp"
   - - "cinder:identity-service"
     - "keystone:identity-service"
   - - "cinder:cinder-volume-service"
     - "nova-cloud-controller:cinder-volume-service"
   - - "cinder-ceph:storage-backend"
     - "cinder:storage-backend"
   - - "ceph:client"
     - "nova-compute:ceph"
   - - "cinder:shared-db"
     - "mysql:shared-db"
   - - "ceph:client"
     - "cinder-ceph:ceph"
   - - "ceph:client"
     - "glance:ceph"
   - - "ceph-osd:mon"
     - "ceph:osd"
   - - "ceph-radosgw:mon"
     - "ceph:radosgw"
   - - "ceph-radosgw:identity-service"
     - "keystone:identity-service"
   #- - nova-compute:lxd
     #- lxd:lxd
   - - congress:shared-db
     - mysql:shared-db
   - - congress:identity-service
     - keystone:identity-service
   - - congress:amqp
     - rabbitmq-server:amqp
 services:
   mysql:
     charm: "local:xenial/percona-cluster"
     options:
       dataset-size: 2G
       max-connections: 10000
     to:
       - "lxc:nodes=0"
   ceilometer:
     charm: "local:xenial/ceilometer"
     to:
       - "lxc:nodes=1"
   ceilometer-agent:
     charm: "local:xenial/ceilometer-agent"
   mongodb:
     charm: "local:xenial/mongodb"
     to:
       - "lxc:nodes=0"
   heat:
     charm: "local:xenial/heat"
     to:
       - "lxc:nodes=1"
   ceph:
     charm: "local:xenial/ceph"
     num_units: 2
     options:
       fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7
       monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A==
       monitor-count: 2
       ceph-cluster-network: 192.168.40.0/24
       ceph-public-network: 192.168.40.0/24
     to:
       - "lxc:nodes=0"
       - "lxc:nodes=1"
   ceph-osd:
     charm: "local:xenial/ceph-osd"
     num_units: 2
     options:
       osd-devices: /dev/vdb
       osd-reformat: 'yes'
     to:
       - "nodes=0"
       - "nodes=1"
   ceph-radosgw:
     charm: "local:xenial/ceph-radosgw"
     options:
       use-embedded-webserver: true
       operator-roles: "Member,admin"
     to:
       - "lxc:nodes=0"
   cinder:
     charm: "local:xenial/cinder"
     options:
       block-device: None
       glance-api-version: 2
     to:
       - "lxc:nodes=1"
   cinder-ceph:
     charm: "local:xenial/cinder-ceph"
   rabbitmq-server:
     charm: "local:xenial/rabbitmq-server"
     to:
       - "lxc:nodes=0"
   keystone:
     charm: "local:xenial/keystone"
     options:
       admin-password: openstack
       admin-token: admin
     to:
       - "lxc:nodes=1"
   openstack-dashboard:
     charm: "local:xenial/openstack-dashboard"
     options:
       secret: admin
       webroot: /
     to:
       - "lxc:nodes=1"
   nova-compute:
     charm: local:xenial/nova-compute
     options:
       enable-live-migration: true
       enable-resize: true
       manage-neutron-plugin-legacy-mode: false
       migration-auth-type: ssh
       #virt-type: lxd
       #hugepages: "50%"
     to:
       - "nodes=1"
   nova-cloud-controller:
     charm: local:xenial/nova-cloud-controller
     options:
       console-access-protocol: novnc
       network-manager: Neutron
     to:
       - "lxc:nodes=0"
   neutron-api:
     charm: local:xenial/neutron-api
     options:
       neutron-security-groups: True
       manage-neutron-plugin-legacy-mode: False
       flat-network-providers:  physnet1
     to:
       - "lxc:nodes=1"
   neutron-gateway:
     charm: local:xenial/neutron-gateway
     options:
       bridge-mappings: physnet1:br-ex
       data-port: br-ex:eth1
       plugin: ovs-odl
       instance-mtu: 1400
     to:
       - "nodes=0"
   odl-controller:
     charm: local:xenial/odl-controller
     options:
       install-url: "https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distribution-karaf/0.4.2-Beryllium-SR2/distribution-karaf-0.4.2-Beryllium-SR2.tar.gz"
       profile: "openvswitch-odl-beryllium"
       #http-proxy: "http://squid.internal:3128"
       #https-proxy: "http://squid.internal:3128"
     to:
       - "lxc:nodes=0"
   glance:
     charm: local:xenial/glance
     to:
       - "lxc:nodes=1"
   opnfv-promise:
     charm: local:xenial/promise
     to:
       - "lxc:nodes=0"
   neutron-api-odl:
     charm: local:xenial/neutron-api-odl
     options:
       overlay-network-type: 'vxlan'
       security-groups: True
   openvswitch-odl:
     charm: local:xenial/openvswitch-odl
   congress:
     charm: local:xenial/congress
     options:
       #source-branch: stable/mitaka
     to:
       - "lxc:nodes=1"

root@juju-xenial:~/joid/ci/odl/juju-deployer# juju status --format short

- ceilometer/0: 192.168.40.73 (started) 8777/tcp
- ceph/0: 192.168.40.80 (started)
- ceph/1: 192.168.40.85 (started)
- ceph-osd/0: m-node02.maas (started)
- ceph-osd/1: m-node03.maas (started)
- ceph-radosgw/0: 192.168.40.82 (started) 80/tcp
- cinder/0: 192.168.40.66 (started) 8776/tcp
 - cinder-ceph/0: 192.168.40.66 (started)
- congress/0: 192.168.40.79 (started) 1789/tcp
- glance/0: 192.168.40.67 (started) 9292/tcp
- heat/0: 192.168.40.89 (started) 8000/tcp, 8004/tcp
- keystone/0: 192.168.40.86 (started) 5000/tcp
- mongodb/0: 192.168.40.83 (started) 27017/tcp, 27019/tcp, 27021/tcp, 28017/tcp
- mysql/0: 192.168.40.84 (started)
- neutron-api/0: 192.168.40.87 (started) 9696/tcp
 - neutron-api-odl/0: 192.168.40.87 (started)
- neutron-gateway/0: m-node02.maas (started)
 - openvswitch-odl/1: m-node02.maas (started)
- nodes/0: m-node02.maas (started)
 - ntp/1: m-node02.maas (started)
- nodes/1: m-node03.maas (started)
 - ntp/0: m-node03.maas (started)
- nova-cloud-controller/0: 192.168.40.90 (started) 8774/tcp
- nova-compute/0: m-node03.maas (started)
 - ceilometer-agent/0: m-node03.maas (started)
 - openvswitch-odl/0: m-node03.maas (started)
- odl-controller/0: 192.168.40.75 (started)
- openstack-dashboard/0: 192.168.40.74 (started) 80/tcp, 443/tcp
- opnfv-promise/0: 192.168.40.65 (started)
- rabbitmq-server/0: 192.168.40.70 (started) 5672/tcp

Access to the OpenStack dashboard.
user : admin
password : openstack

Access to the Opendaylight
https:// <ip of open daylight>:8181/index.html
user : admin
pass : admin

[ Failed case ]

Deployment nodes are trusty.
root@juju-xenial:~# git clone -b stable/colorado https://gerrit.opnfv.org/gerrit/p/joid.git

root@juju-xenial:~# cd joid/ci/odl/juju-deployer/

root@juju-xenial:~/joid/ci/odl/juju-deployer# pwd
/root/joid/ci/odl/juju-deployer

root@juju-xenial:~/joid/ci/odl/juju-deployer# cp ../fetch-charms.sh ./

root@juju-xenial:~/joid/ci/odl/juju-deployer# ./fetch-charms.sh trusty

root@juju-xenial:~/joid/ci/odl/juju-deployer# juju bootstrap --to m-node01.maas
2017-02-28 02:30:59 The following units had errors:
  unit: congress/0: machine: 2/lxc/2 agent-state: error details: hook failed: "install"
2017-02-28 02:30:59 Deployment stopped. run time: 1013.87

root@juju-xenial:~/joid/ci/odl/juju-deployer# juju status | grep error
     current: error
         current: error
       agent-state: error

Access to congress/0 node.
Something is wrong when installing congress.
root@juju-xenial:~/joid/ci/odl/juju-deployer# juju ssh congress/0
ubuntu@juju-machine-2-lxc-2:~$

ubuntu@juju-machine-2-lxc-2:/var/log$ cd /var/log/juju/
ubuntu@juju-machine-2-lxc-2:/var/log/juju$ pwd
/var/log/juju


ubuntu@juju-machine-2-lxc-2:/var/log/juju$ sudo less unit-congress-0.log
2017-02-28 02:30:28 INFO install E: Unable to locate package congress-common
2017-02-28 02:30:28 INFO juju-log Couldn't acquire DPKG lock. Will retry in 10 seconds.
2017-02-28 02:30:38 INFO install Reading package lists...
2017-02-28 02:30:40 INFO install Building dependency tree...
2017-02-28 02:30:40 INFO install Reading state information...
2017-02-28 02:30:41 INFO install E: Unable to locate package congress-server
2017-02-28 02:30:41 INFO install E: Unable to locate package congress-common
2017-02-28 02:30:41 INFO juju-log Couldn't acquire DPKG lock. Will retry in 10 seconds.
2017-02-28 02:30:52 INFO install Reading package lists...
2017-02-28 02:30:53 INFO install Building dependency tree...
2017-02-28 02:30:53 INFO install Reading state information...
2017-02-28 02:30:54 INFO install E: Unable to locate package congress-server
2017-02-28 02:30:54 INFO install E: Unable to locate package congress-common
2017-02-28 02:30:54 INFO install Traceback (most recent call last):
2017-02-28 02:30:54 INFO install   File "/var/lib/juju/agents/unit-congress-0/charm/hooks/install", line 19, in <module>
2017-02-28 02:30:54 INFO install     main()
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charms/reactive/__init__.py", line 78, in main
2017-02-28 02:30:54 INFO install     bus.dispatch()
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charms/reactive/bus.py", line 434, in dispatch
2017-02-28 02:30:54 INFO install     _invoke(other_handlers)
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charms/reactive/bus.py", line 417, in _invoke
2017-02-28 02:30:54 INFO install     handler.invoke()
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charms/reactive/bus.py", line 291, in invoke
2017-02-28 02:30:54 INFO install     self._action(*args)
2017-02-28 02:30:54 INFO install   File "/var/lib/juju/agents/unit-congress-0/charm/reactive/handlers.py", line 31, in install_packages
2017-02-28 02:30:54 INFO install     congress.install()
2017-02-28 02:30:54 INFO install   File "lib/charm/openstack/congress.py", line 36, in install
2017-02-28 02:30:54 INFO install     CongressCharm.singleton.install()
2017-02-28 02:30:54 INFO install   File "lib/charm/openstack/congress.py", line 142, in install
2017-02-28 02:30:54 INFO install     super(CongressCharm, self).install()
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charms_openstack/charm.py", line 632, in install
2017-02-28 02:30:54 INFO install     fetch.apt_install(packages, fatal=True)
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charmhelpers/fetch/ubuntu.py", line 162, in install
2017-02-28 02:30:54 INFO install     _run_apt_command(cmd, fatal)
2017-02-28 02:30:54 INFO install   File "/usr/local/lib/python3.4/dist-packages/charmhelpers/fetch/ubuntu.py", line 313, in _run_apt_command
2017-02-28 02:30:54 INFO install     result = subprocess.check_call(cmd, env=env)
2017-02-28 02:30:54 INFO install   File "/usr/lib/python3.4/subprocess.py", line 561, in check_call
2017-02-28 02:30:54 INFO install     raise CalledProcessError(retcode, cmd)
2017-02-28 02:30:54 INFO install subprocess.CalledProcessError: Command '['apt-get', '--assume-yes', '--option=Dpkg::Options::=--force-confold', 'install', 'congress-server', 'congress-common', 'python-antlr3', 'python-pymysql']' returned non-zero exit status 100
2017-02-28 02:30:55 ERROR juju.worker.uniter.operation runhook.go:107 hook "install" failed: exit status 1
(END)

Trusty container does not have congress-server and congress-common repository, which was a problem.
I found that xenial had congress-* repo over apt, so I used xenial instead of trusty.
ubuntu@juju-machine-2-lxc-2:/var/log/juju$ apt search ^congress
Sorting... Done
Full Text Search... Done
ubuntu@juju-machine-2-lxc-2:/var/log/juju$

python-antlr3/trusty-updates 3.5.2-1~cloud0 all
 ANother Tool for Language Recognition - Python 2.7 bindings

ubuntu@juju-machine-2-lxc-2:/var/log/juju$ apt search python-pymysql
Sorting... Done
Full Text Search... Done
python-pymysql/trusty-updates 0.7.2-1~cloud0 all
 Pure-Python MySQL driver - Python 2.x

ubuntu@juju-machine-2-lxc-2:/var/log/juju$ cat /etc/lsb-release
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=14.04
DISTRIB_CODENAME=trusty
DISTRIB_DESCRIPTION="Ubuntu 14.04.5 LTS"

I gave up deploying Openstack and Open Daylight on Trusty.