Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
  • Loading branch information
dviejokfs committed Dec 11, 2023
1 parent 8e67a33 commit c2bfde6
Show file tree
Hide file tree
Showing 11 changed files with 232 additions and 101 deletions.
140 changes: 113 additions & 27 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,10 +199,10 @@ EOF

```bash
export PEER_IMAGE=hyperledger/fabric-peer
export PEER_VERSION=3.0.0-preview
export PEER_VERSION=2.5.5

export ORDERER_IMAGE=hyperledger/fabric-orderer
export ORDERER_VERSION=3.0.0-preview
export ORDERER_VERSION=2.5.5

export CA_IMAGE=hyperledger/fabric-ca
export CA_VERSION=1.5.7
Expand All @@ -213,10 +213,10 @@ export CA_VERSION=1.5.7

```bash
export PEER_IMAGE=hyperledger/fabric-peer
export PEER_VERSION=3.0.0-preview
export PEER_VERSION=2.5.5

export ORDERER_IMAGE=hyperledger/fabric-orderer
export ORDERER_VERSION=3.0.0-preview
export PEER_VERSION=2.5.5

export CA_IMAGE=hyperledger/fabric-ca
export CA_VERSION=1.5.7
Expand Down Expand Up @@ -293,12 +293,12 @@ kubectl hlf ca register --name=org1-ca --user=peer --secret=peerpw --type=peer \
### Deploy a peer

```bash
kubectl hlf peer create --statedb=couchdb --image=$PEER_IMAGE --version=$PEER_VERSION --storage-class=standard --enroll-id=peer --mspid=Org1MSP \
kubectl hlf peer create --statedb=leveldb --image=$PEER_IMAGE --version=$PEER_VERSION --storage-class=standard --enroll-id=peer --mspid=Org1MSP \
--enroll-pw=peerpw --capacity=5Gi --name=org1-peer0 --ca-name=org1-ca.default \
--hosts=peer0-org1.localho.st --istio-port=443


kubectl hlf peer create --statedb=couchdb --image=$PEER_IMAGE --version=$PEER_VERSION --storage-class=standard --enroll-id=peer --mspid=Org1MSP \
kubectl hlf peer create --statedb=leveldb --image=$PEER_IMAGE --version=$PEER_VERSION --storage-class=standard --enroll-id=peer --mspid=Org1MSP \
--enroll-pw=peerpw --capacity=5Gi --name=org1-peer1 --ca-name=org1-ca.default \
--hosts=peer1-org1.localho.st --istio-port=443

Expand Down Expand Up @@ -365,16 +365,6 @@ kubectl hlf ordnode create --image=$ORDERER_IMAGE --version=$ORDERER_VERSION \
--hosts=orderer2-ord.localho.st --admin-hosts=admin-orderer2-ord.localho.st --istio-port=443


kubectl hlf ordnode create --image=$ORDERER_IMAGE --version=$ORDERER_VERSION \
--storage-class=standard --enroll-id=orderer --mspid=OrdererMSP \
--enroll-pw=ordererpw --capacity=2Gi --name=ord-node4 --ca-name=ord-ca.default \
--hosts=orderer3-ord.localho.st --admin-hosts=admin-orderer3-ord.localho.st --istio-port=443

kubectl hlf ordnode create --image=$ORDERER_IMAGE --version=$ORDERER_VERSION \
--storage-class=standard --enroll-id=orderer --mspid=OrdererMSP \
--enroll-pw=ordererpw --capacity=2Gi --name=ord-node5 --ca-name=ord-ca.default \
--hosts=orderer4-ord.localho.st --admin-hosts=admin-orderer4-ord.localho.st --istio-port=443


kubectl wait --timeout=180s --for=condition=Running fabricorderernodes.hlf.kungfusoftware.es --all
```
Expand Down Expand Up @@ -408,6 +398,20 @@ kubectl hlf ca enroll --name=ord-ca --namespace=default \
--ca-name tlsca --output orderermsp.yaml
```

### Register and enrolling Org1MSP Orderer identity

```bash
# register
kubectl hlf ca register --name=org1-ca --user=admin --secret=adminpw \
--type=admin --enroll-id enroll --enroll-secret=enrollpw --mspid=Org1MSP

# enroll

kubectl hlf ca enroll --name=org1-ca --namespace=default \
--user=admin --secret=adminpw --mspid Org1MSP \
--ca-name tlsca --output org1msp-tlsca.yaml
```


### Register and enrolling Org1MSP identity

Expand All @@ -421,6 +425,12 @@ kubectl hlf ca enroll --name=org1-ca --namespace=default \
--user=admin --secret=adminpw --mspid Org1MSP \
--ca-name ca --output org1msp.yaml

# enroll
kubectl hlf identity create --name org1-admin --namespace default \
--ca-name org1-ca --ca-namespace default \
--ca ca --mspid Org1MSP --enroll-id admin --enroll-secret adminpw


```

### Create the secret
Expand All @@ -445,9 +455,9 @@ kubectl apply -f - <<EOF
apiVersion: hlf.kungfusoftware.es/v1alpha1
kind: FabricMainChannel
metadata:
name: demo
name: demo2
spec:
name: demo
name: demo2
adminOrdererOrganizations:
- mspID: OrdererMSP
adminPeerOrganizations:
Expand Down Expand Up @@ -493,7 +503,41 @@ spec:
secretKey: org1msp.yaml
secretName: wallet
secretNamespace: default
externalPeerOrganizations: []
externalPeerOrganizations:
- mspID: Org2MSP
tlsRootCert: |
-----BEGIN CERTIFICATE-----
MIICRjCCAeugAwIBAgIQHA5nWgCvnS9ECuauCtat6TAKBggqhkjOPQQDAjBtMQsw
CQYDVQQGEwJFUzERMA8GA1UEBxMIQWxpY2FudGUxETAPBgNVBAkTCEFsaWNhbnRl
MRkwFwYDVQQKExBLdW5nIEZ1IFNvZnR3YXJlMQ0wCwYDVQQLEwRUZWNoMQ4wDAYD
VQQDEwV0bHNjYTAeFw0yMzExMTIxNTA3MTlaFw0zMzExMTMxNTA3MTlaMG0xCzAJ
BgNVBAYTAkVTMREwDwYDVQQHEwhBbGljYW50ZTERMA8GA1UECRMIQWxpY2FudGUx
GTAXBgNVBAoTEEt1bmcgRnUgU29mdHdhcmUxDTALBgNVBAsTBFRlY2gxDjAMBgNV
BAMTBXRsc2NhMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEVB/ZqhVePy96JJH/
QhYCT6hRlH7xQVSrTwI1QjLG6GgENV2c10m2EkXH/BHpLJCjZ8ZkGiqVVx/XMxZd
E6p1B6NtMGswDgYDVR0PAQH/BAQDAgGmMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggr
BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdDgQiBCBcmb8svFB8Yn96e3d4
Ft+8wZTrAYLielLn6Je/zxbjODAKBggqhkjOPQQDAgNJADBGAiEA3ad5GVhA5RSr
mjhnKlazFh53einWYfWxcYNy42v+EbcCIQCt+Fc4nzlMg0ebG3HDlpa9wjJpX9MW
caEunJxSdY4vWg==
-----END CERTIFICATE-----
signRootCert: |
-----BEGIN CERTIFICATE-----
MIICQTCCAeagAwIBAgIRALIMDeWXSDaW2cYssTwLrTAwCgYIKoZIzj0EAwIwajEL
MAkGA1UEBhMCRVMxETAPBgNVBAcTCEFsaWNhbnRlMREwDwYDVQQJEwhBbGljYW50
ZTEZMBcGA1UEChMQS3VuZyBGdSBTb2Z0d2FyZTENMAsGA1UECxMEVGVjaDELMAkG
A1UEAxMCY2EwHhcNMjMxMTEyMTUwNzE5WhcNMzMxMTEzMTUwNzE5WjBqMQswCQYD
VQQGEwJFUzERMA8GA1UEBxMIQWxpY2FudGUxETAPBgNVBAkTCEFsaWNhbnRlMRkw
FwYDVQQKExBLdW5nIEZ1IFNvZnR3YXJlMQ0wCwYDVQQLEwRUZWNoMQswCQYDVQQD
EwJjYTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAomf+HmUFMdRS7D+IZRCEkQ
QEDyrakkXYEE/vwSlXhSW5PsVVXQdi/wmXj2YwoPqsDk2IrXc7KT2tni5wOxJ8mj
bTBrMA4GA1UdDwEB/wQEAwIBpjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
AwEwDwYDVR0TAQH/BAUwAwEB/zApBgNVHQ4EIgQgESLOdwga9AciHgTGFTR6Zxzh
Z/vpQ2gC3V0MbzFvfOYwCgYIKoZIzj0EAwIDSQAwRgIhAKLN7BlISJJzjjHOqJ25
EFfWSywv6MgddOsGgrQxsedCAiEAxhMj/EJcYvVu5rr+xiHo7TarixkdvmA5k/eW
kOWiji0=
-----END CERTIFICATE-----
ordererOrganizations:
- caName: "ord-ca"
caNamespace: "default"
Expand All @@ -502,11 +546,11 @@ spec:
port: 7053
mspID: OrdererMSP
ordererEndpoints:
- ord-node1:7050
- orderer0-ord.localho.st:443
orderersToJoin: []
orderers:
- host: ord-node1
port: 7050
- host: orderer0-ord.localho.st
port: 443
tlsCert: |-
${ORDERER0_TLS_CERT}
Expand All @@ -524,17 +568,17 @@ kubectl apply -f - <<EOF
apiVersion: hlf.kungfusoftware.es/v1alpha1
kind: FabricFollowerChannel
metadata:
name: testbft02-org1msp
name: demo2-org1msp
spec:
anchorPeers:
- host: org1-peer0.default
port: 7051
- host: peer0-org1.localho.st
port: 443
hlfIdentity:
secretKey: org1msp.yaml
secretName: wallet
secretNamespace: default
mspId: Org1MSP
name: testbft02
name: demo2
externalPeersToJoin: []
orderers:
- certificate: |
Expand Down Expand Up @@ -683,12 +727,54 @@ At this point, you should have:

- Ordering service with 1 nodes and a CA
- Peer organization with a peer and a CA
- A channel **demo**
- A channel **demo2**
- A chaincode install in peer0
- A chaincode approved and committed

If something went wrong or didn't work, please, open an issue.



### Prepare connection string for a peer

To prepare the connection string, we have to create a CRD of type `FabricNetworkConfig` with the following command:

```bash
kubectl apply -f - <<EOF
apiVersion: hlf.kungfusoftware.es/v1alpha1
kind: FabricNetworkConfig
metadata:
name: nc
namespace: default
spec:
channels:
- testbft02
identities:
- name: org1-admin
namespace: default
internal: false
namespaces: []
organization: ''
organizations:
- Org1MSP
- OrdererMSP
secretName: nc-networkconfig
EOF

```
## Launch the explorer

```bash
export API_HOST=operator-api.localho.st
export HLF_SECRET_NAME="nc-networkconfig"
export HLF_MSPID="Org1MSP"
export HLF_SECRET_KEY="config.yaml" # e.g. networkConfig.yaml
export HLF_USER="org1-admin-default"
kubectl hlf operatorapi create --name=operator-api --namespace=default --version="v0.0.17-beta9" --hosts=$API_HOST --ingress-class-name=istio \
--hlf-mspid="${HLF_MSPID}" --hlf-secret="${HLF_SECRET_NAME}" --hlf-secret-key="${HLF_SECRET_KEY}" \
--hlf-user="${HLF_USER}"
```

## Cleanup the environment

```bash
Expand Down
2 changes: 1 addition & 1 deletion charts/hlf-ca/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ gatewayApi:
gatewayName: ""
gatewayNamespace: ""

envVars: []
envVars: []
4 changes: 4 additions & 0 deletions charts/hlf-ordnode/templates/service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ metadata:
spec:
type: {{ .Values.service.type }}
ports:
- port: 443
targetPort: 7050
protocol: TCP
name: grpc-443
- port: {{ .Values.service.port }}
targetPort: 7050
protocol: TCP
Expand Down
8 changes: 1 addition & 7 deletions charts/hlf-ordnode/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,7 @@ genesis: CiIaIDucvmxr45nIpNz5bdNbGR37tbG28dfCtU2Uz8ixeKBAEuhDCuVDCuJDCnwKXggBEAE
bootstrapMethod: "none"
channelParticipationEnabled: false

resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
resources: {}

proxy:
enabled: false
Expand Down
22 changes: 8 additions & 14 deletions charts/hlf-peer/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@ istio:
hosts: []
ingressGateway: ingressgateway

traefik:
entryPoints: []
middlewares: []
hosts: []


dockerSocketPath: /var/run/docker.sock

envVars: []
Expand Down Expand Up @@ -87,27 +93,15 @@ couchdb:
pullPolicy: IfNotPresent

resources:
peer:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
peer: {}
couchdbExporter:
limits:
cpu: 300m
memory: 128Mi
requests:
cpu: 100m
memory: 64Mi
couchdb:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
couchdb: {}
chaincode:
limits:
cpu: 100m
Expand Down
2 changes: 2 additions & 0 deletions controllers/ca/ca_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ type FabricCAReconciler struct {
Scheme *runtime.Scheme
Config *rest.Config
ClientSet *kubernetes.Clientset
Wait bool
Timeout time.Duration
}

func parseECDSAPrivateKey(contents []byte) (*ecdsa.PrivateKey, error) {
Expand Down
22 changes: 22 additions & 0 deletions controllers/mainchannel/mainchannel_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1265,6 +1265,10 @@ func updateApplicationChannelConfigTx(currentConfigTX configtx.ConfigTx, newConf
if err != nil {
return errors.Wrapf(err, "failed to set ACLs")
}
err = currentConfigTX.Orderer().SetBatchTimeout(newConfigTx.Orderer.BatchTimeout)
if err != nil {
return errors.Wrapf(err, "failed to set batch timeout")
}
return nil
}
func updateOrdererChannelConfigTx(currentConfigTX configtx.ConfigTx, newConfigTx configtx.Channel) error {
Expand Down Expand Up @@ -1387,6 +1391,24 @@ func updateOrdererChannelConfigTx(currentConfigTX configtx.ConfigTx, newConfigTx
}
}

err = currentConfigTX.Orderer().BatchSize().SetMaxMessageCount(
newConfigTx.Orderer.BatchSize.MaxMessageCount,
)
if err != nil {
return errors.Wrapf(err, "failed to set max message count")
}
err = currentConfigTX.Orderer().BatchSize().SetAbsoluteMaxBytes(
newConfigTx.Orderer.BatchSize.AbsoluteMaxBytes,
)
if err != nil {
return errors.Wrapf(err, "failed to set absolute max bytes")
}
err = currentConfigTX.Orderer().BatchSize().SetPreferredMaxBytes(
newConfigTx.Orderer.BatchSize.PreferredMaxBytes,
)
if err != nil {
return errors.Wrapf(err, "failed to set preferred max bytes")
}
err = currentConfigTX.Orderer().SetPolicies(
newConfigTx.Orderer.Policies,
)
Expand Down
2 changes: 2 additions & 0 deletions controllers/ordnode/ordnode_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ type FabricOrdererNodeReconciler struct {
Config *rest.Config
AutoRenewCertificates bool
AutoRenewCertificatesDelta time.Duration
Wait bool
Timeout time.Duration
}

const ordererNodeFinalizer = "finalizer.orderernode.hlf.kungfusoftware.es"
Expand Down
Loading

0 comments on commit c2bfde6

Please sign in to comment.