This is the exception I see:
Exception in thread "main" org.hyperledger.fabric.gateway.GatewayRuntimeException: org.hyperledger.fabric.sdk.exception.ProposalException: org.hyperledger.fabric.sdk.exception.TransactionException: org.hyperledger.fabric.sdk.exception.ProposalException: getConfigBlock for channel isprintchannel failed with peer peer1.org1.isprint.com. Status FAILURE, details: Channel Channel{id: 1, name: isprintchannel} Sending proposal with transaction: 31101a32ee94cdb3ec65abaca86f0cf828d6b48cd4453257cd7270f94d192b93 to Peer{ id: 2, name: peer1.org1.isprint.com, channelName: isprintchannel, url: grpc://127.0.0.1:7051, mspid: Org1MSP} failed because of: gRPC failure=Status{code=UNAVAILABLE, description=Network closed for unknown reason, cause=null}
at org.hyperledger.fabric.gateway.impl.TransactionImpl.submit(TransactionImpl.java:121)
at org.hyperledger.fabric.gateway.impl.ContractImpl.submitTransaction(ContractImpl.java:50)
at com.isprint.axr.ext.hyperledger.isprint_fabric.isprint_chaincode.ChaincodeEventTester.main(ChaincodeEventTester.java:39)
Caused by: org.hyperledger.fabric.sdk.exception.ProposalException: org.hyperledger.fabric.sdk.exception.TransactionException: org.hyperledger.fabric.sdk.exception.ProposalException: getConfigBlock for channel isprintchannel failed with peer peer1.org1.isprint.com. Status FAILURE, details: Channel Channel{id: 1, name: isprintchannel} Sending proposal with transaction: 31101a32ee94cdb3ec65abaca86f0cf828d6b48cd4453257cd7270f94d192b93 to Peer{ id: 2, name: peer1.org1.isprint.com, channelName: isprintchannel, url: grpc://127.0.0.1:7051, mspid: Org1MSP} failed because of: gRPC failure=Status{code=UNAVAILABLE, description=Network closed for unknown reason, cause=null}
at org.hyperledger.fabric.sdk.Channel.sendProposalToPeers(Channel.java:4387)
at org.hyperledger.fabric.sdk.Channel.sendProposal(Channel.java:4358)
at org.hyperledger.fabric.sdk.Channel.sendTransactionProposal(Channel.java:3908)
at org.hyperledger.fabric.gateway.impl.TransactionImpl.sendTransactionProposal(TransactionImpl.java:161)
at org.hyperledger.fabric.gateway.impl.TransactionImpl.submit(TransactionImpl.java:94)
... 2 more
Caused by: org.hyperledger.fabric.sdk.exception.TransactionException: org.hyperledger.fabric.sdk.exception.ProposalException: getConfigBlock for channel isprintchannel failed with peer peer1.org1.isprint.com. Status FAILURE, details: Channel Channel{id: 1, name: isprintchannel} Sending proposal with transaction: 31101a32ee94cdb3ec65abaca86f0cf828d6b48cd4453257cd7270f94d192b93 to Peer{ id: 2, name: peer1.org1.isprint.com, channelName: isprintchannel, url: grpc://127.0.0.1:7051, mspid: Org1MSP} failed because of: gRPC failure=Status{code=UNAVAILABLE, description=Network closed for unknown reason, cause=null}
at org.hyperledger.fabric.sdk.Channel.parseConfigBlock(Channel.java:2023)
at org.hyperledger.fabric.sdk.Channel.loadCACertificates(Channel.java:1843)
at org.hyperledger.fabric.sdk.Channel.sendProposalToPeers(Channel.java:4385)
... 6 more
Caused by: org.hyperledger.fabric.sdk.exception.ProposalException: getConfigBlock for channel isprintchannel failed with peer peer1.org1.isprint.com. Status FAILURE, details: Channel Channel{id: 1, name: isprintchannel} Sending proposal with transaction: 31101a32ee94cdb3ec65abaca86f0cf828d6b48cd4453257cd7270f94d192b93 to Peer{ id: 2, name: peer1.org1.isprint.com, channelName: isprintchannel, url: grpc://127.0.0.1:7051, mspid: Org1MSP} failed because of: gRPC failure=Status{code=UNAVAILABLE, description=Network closed for unknown reason, cause=null}
at org.hyperledger.fabric.sdk.Channel.getConfigBlock(Channel.java:962)
at org.hyperledger.fabric.sdk.Channel.getConfigBlock(Channel.java:917)
at org.hyperledger.fabric.sdk.Channel.parseConfigBlock(Channel.java:2006)
... 8 more
This is my Gateway code (pretty much unchanged from the boilerplate):
public class Tester {
public static void main(String[] args) throws IOException {
// Load an existing wallet holding identities used to access the network.
Path wdir = Paths.get("wallet");
Wallet wallet = Wallet.createFileSystemWallet(wdir);
// Path to a common connection profile describing the network.
Path cfg = Paths.get("config","local","connection.json");
// Configure the gateway connection used to access the network.
Gateway.Builder builder = Gateway.createBuilder().identity(wallet, "myteareserve_app").networkConfig(cfg);
// Create a gateway connection
try (Gateway gateway = builder.connect()) {
Network network = gateway.getNetwork("isprintchannel");
Contract contract = network.getContract("myteacc");
// this next line throws the above exception
byte[] createProductResult = contract.submitTransaction("createProduct", "tea001", "red");
System.out.println(new String(createProductResult, StandardCharsets.UTF_8));
} catch (ContractException | TimeoutException | InterruptedException e) {
e.printStackTrace();
}
}
}
connection.json is defined as follows:
{
"name": "myteareserve",
"x-type": "hlfv1",
"x-commitTimeout": 1000,
"version": "1.0.0",
"client": {
"organization": "Org1",
"connection": {
"timeout": {
"peer": {
"endorser": "1000",
"eventHub": "1000",
"eventReg": "1000"
},
"orderer": "1000"
}
}
},
"channels": {
"myteachannel": {
"orderers": [
"orderer1.isprint.com",
"orderer2.isprint.com",
"orderer3.isprint.com"
],
"peers": {
"peer1.org1.isprint.com": {
"endorsingPeer": true,
"chaincodeQuery": true,
"ledgerQuery": true,
"eventSource": true
},
"peer2.org1.isprint.com": {
"endorsingPeer": true,
"chaincodeQuery": true,
"ledgerQuery": true,
"eventSource": true
}
}
}
},
"organizations": {
"Org1": {
"mspid": "Org1MSP",
"peers": [
"peer1.org1.isprint.com",
"peer2.org1.isprint.com"
],
"certificateAuthorities": [
"ca.org1.isprint.com"
]
}
},
"orderers": {
"orderer1.isprint.com": {
"url": "grpc://127.0.0.1:7050"
},
"orderer2.isprint.com": {
"url": "grpc://127.0.0.1:8050"
},
"orderer3.isprint.com": {
"url": "grpc://127.0.0.1:9050"
}
},
"peers": {
"peer1.org1.isprint.com": {
"url": "grpc://127.0.0.1:7051"
},
"peer2.org1.isprint.com": {
"url": "grpc://127.0.0.1:8051"
}
},
"certificateAuthorities": {
"ca.org1.isprint.com": {
"url": "http://127.0.0.1:7054",
"caName": "ca.org1.isprint.com"
}
}
}
This is what I see when I follow my peer logs (I had to set debug to INFO, else there's too much gossip DEBUG):
iamuser#isprintdev:~/shared$ docker logs --tail 0 -f eeb970c52a36
2020-05-05 20:38:59.841 UTC [core.comm] ServerHandshake -> ERRO 080 TLS handshake failed with error tls: first record does not look like a TLS handshake server=PeerServer remoteaddress=10.0.2.2:61852
2020-05-05 20:38:59.926 UTC [core.comm] ServerHandshake -> ERRO 081 TLS handshake failed with error tls: first record does not look like a TLS handshake server=PeerServer remoteaddress=10.0.2.2:61853
For completeness here is my Docker compose yaml for peers:
version: '3.4'
volumes:
peer1.org1.isprint.com:
peer2.org1.isprint.com:
couchdb1.org1.isprint.com:
couchdb2.org1.isprint.com:
networks:
isprint:
external:
name: fabric
services:
org1couchdb1:
image: hyperledger/fabric-couchdb
environment:
- COUCHDB_USER= couchdb
- COUCHDB_PASSWORD=couchdb123
volumes:
- couchdb1.org1.isprint.com:/opt/couchdb/data
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.hostname == isprintdev
ports:
- published: 5984
target: 5984
mode: host
networks:
isprint:
aliases:
- couchdb1.org1.isprint.com
org1couchdb2:
image: hyperledger/fabric-couchdb
environment:
- COUCHDB_USER= couchdb
- COUCHDB_PASSWORD=couchdb123
volumes:
- couchdb2.org1.isprint.com:/opt/couchdb/data
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.hostname == isprintdev
ports:
- published: 6984
target: 5984
mode: host
networks:
isprint:
aliases:
- couchdb2.org1.isprint.com
org1peer1:
image: hyperledger/fabric-peer:latest
environment:
# couchdb params
- CORE_LEDGER_STATE_STATEDATABASE=CouchDB
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb1.org1.isprint.com:5984
- CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=couchdb
- CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=couchdb123
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
# the following setting starts chaincode containers on the same
# bridge network as the peers
# https://docs.docker.com/compose/networking/
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=fabric
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
#- CORE_LOGGING_LEVEL=INFO
- FABRIC_LOGGING_SPEC=INFO
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
- CORE_PEER_ID=peer1.org1.isprint.com
- CORE_PEER_ADDRESS=peer1.org1.isprint.com:7051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer2.org1.isprint.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org1.isprint.com:7051
- CORE_PEER_LOCALMSPID=Org1MSP
- CORE_VM_DOCKER_ATTACHSTDOUT=true
- CORE_CHAINCODE_STARTUPTIMEOUT=1200s
- CORE_CHAINCODE_EXECUTETIMEOUT=800s
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/org1.isprint.com/peers/peer1.org1.isprint.com/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/org1.isprint.com/peers/peer1.org1.isprint.com/tls:/etc/hyperledger/fabric/tls
- peer1.org1.isprint.com:/var/hyperledger/production
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.hostname == isprintdev
ports:
- published: 7051
target: 7051
mode: host
- published: 7053
target: 7053
mode: host
networks:
isprint:
aliases:
- peer1.org1.isprint.com
org1peer2:
image: hyperledger/fabric-peer:latest
environment:
# couchdb params
- CORE_LEDGER_STATE_STATEDATABASE=CouchDB
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb2.org1.isprint.com:5984
- CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=couchdb
- CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=couchdb123
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
# the following setting starts chaincode containers on the same
# bridge network as the peers
# https://docs.docker.com/compose/networking/
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=fabric
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
#- CORE_LOGGING_LEVEL=INFO
- FABRIC_LOGGING_SPEC=INFO
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
- CORE_PEER_ID=peer1.org1.isprint.com
- CORE_PEER_ADDRESS=peer2.org1.isprint.com:8051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.isprint.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer2.org1.isprint.com:8051
- CORE_PEER_LOCALMSPID=Org1MSP
- CORE_VM_DOCKER_ATTACHSTDOUT=true
- CORE_CHAINCODE_STARTUPTIMEOUT=1200s
- CORE_CHAINCODE_EXECUTETIMEOUT=800s
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/org1.isprint.com/peers/peer2.org1.isprint.com/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/org1.isprint.com/peers/peer2.org1.isprint.com/tls:/etc/hyperledger/fabric/tls
- peer2.org1.isprint.com:/var/hyperledger/production
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.hostname == isprintdev
ports:
- published: 8051
target: 7051
mode: host
- published: 8053
target: 7053
mode: host
networks:
isprint:
aliases:
- peer2.org1.isprint.com
Do let me know if I should provide more info.
Connection.json seems to be missing Certificates. Please refer to "first-network/connection-org1.json" under "fabric-samples" on how the certificates are used while connecting to the gateway.
One problem I can see is you're using grpc:// instead of grpcs://. If you're using TLS you're going to need to access the orderer/peers using grpcs:// ... much like when you use TLS over HTTP you need https:// (rather than http://). Once you update you should get a more meaningful error message.
Related
Beat input is not transferring to logstash. I have provided filebeat and logstash configuration files below.
Input Test.cs file
Date,Open,High,Low,Close,Volume,Adj Close
2015-04-02,125.03,125.56,124.19,125.32,32120700,125.32
2015-04-01,124.82,125.12,123.10,124.25,40359200,124.25
filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
-C:/ELK Stack/filebeat-8.2.0-windows-x86_64/filebeat-8.2.0-windows-x86_64/Test.csv
output.logstash:
hosts: ["localhost:5044"]
logstash.conf
input {
beats {
port => 5044
}
}
filter {
csv {
separator => ","
columns => ["Date","Open","High","Low","Close","Volume","Adj Close"]
}
mutate {convert => ["High", "float"]}
mutate {convert => ["Open", "float"]}
mutate {convert => ["Low", "float"]}
mutate {convert => ["Close", "float"]}
mutate {convert => ["Volume", "float"]}
}
output {
stdout {}
}
Kindly to check the filebeat yml file as there an issue with indentation
filebeat documentation
filebeat.inputs:
- type: log
paths:
- /var/log/messages
- /var/log/*.log
your filebeat
filebeat.inputs:
- type: log
enabled: true
paths:
-C:/ELK Stack/filebeat-8.2.0-windows-x86_64/filebeat-8.2.0-windows-x86_64/Test.csv
output.logstash:
hosts: ["localhost:5044"]
and for information log input is deprecated, instead use filesream input
I got two spring-boot services and a Postgres DB's integration which Service 1 calls(HTTP) service 2 and service 2 integrate with Postgres and each service is running on a container. The problem is when service 1 calls service 2, it receives a connection refused and I have no clue why once all services seem to be on the same network (that's not a default bridge network. It works when I ping container 2 from container one, but when curl service 2 Endpoint, I got the error 'can't resolve the hostname'. Any help would be very welcome cause I'm tired of docker!
Docker-compose
version: '3.7'
networks:
valkyre-network:
driver: bridge
services:
db:
image: 'postgres:latest'
container_name: db
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
volumes:
- ./postgres-data:/var/lib/postgresql/data
ports:
- "5432:5432"
expose:
- 5432
networks:
- valkyre-network
valreim-player-register:
image: 'reiiissamuel/valreim-player-register:latest'
build:
context: .
container_name: valreim-player-register
depends_on:
- db
environment:
- DATABASE_URL=jdbc:postgresql://db:5432/postgres
- DATABASE_USERNAME=postgres
- DATABASE_PASSWORD=postgres
- FTP_REMOTE_WL_PATH=/files/valheim/valheim/server/BepInEx/config/aac_whitelistedDLLs.txt
- FTP_REMOTE_HOST=http://104.248.238.7:49153
- FTP_REMOTE_HOST_PORT:2022
- FTP_REMOTE_HOST_USER:reiiissamuel
- FTP_REMOTE_HOST_PASSWORD:Sbmvflrfjs23
ports:
- "8087:8087"
expose:
- 8087
networks:
- valkyre-network
valkyre-bot:
container_name: valkyre-bot
build:
context: .
image: 'reiiissamuel/valkyre:latest'
depends_on:
- valreim-player-register
environment:
- REGISTER_API_URL=http://valreim-player-register:8087/register/players
networks:
- valkyre-network
Docker network inspect
$ docker network inspect valkyre_valkyre-network
[
{
"Name": "valkyre_valkyre-network",
"Id": "862793d31abb81b777a9dd363fdf40cf1f867779a81857ea8b814d308f7b8903",
"Created": "2022-02-15T05:51:30.9692795Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.30.0.0/16",
"Gateway": "172.30.0.1"
}
]
},
"Internal": false,
"Attachable": true,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"452659d8b9c4523fe55dcce2caeef374cc2838f88d2fb4502e90e7adfa71d2bd": {
"Name": "db",
"EndpointID": "5c60f27249b478a3e3b388f97f729d8e6363139a179cdbfa179341adfa5420fd",
"MacAddress": "02:42:ac:1e:00:02",
"IPv4Address": "172.30.0.2/16",
"IPv6Address": ""
},
"9bd52710f8239182fe083397a6b022bd774f8982efa5a0495849853a67eaa63d": {
"Name": "valreim-player-register",
"EndpointID": "e205076c01be599aab1c6537e0d2f80d2704d44882bbe190b7266f1c546d7fab",
"MacAddress": "02:42:ac:1e:00:03",
"IPv4Address": "172.30.0.3/16",
"IPv6Address": ""
},
"ac326acd43477f72c1e288ff7fd91d8cb0f2c26472071a3daa81dd82b8ccf886": {
"Name": "valkyre-bot",
"EndpointID": "bd33dded4fb9d2bb05a89073c8acc3981588c9362799f5f3568f3d4831333274",
"MacAddress": "02:42:ac:1e:00:04",
"IPv4Address": "172.30.0.4/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {
"com.docker.compose.network": "valkyre-network",
"com.docker.compose.project": "valkyre",
"com.docker.compose.version": "1.29.2"
}
}
]
Successfull ping from service 1 to service 2
D:\java-projects\valkyre>docker container exec -it valkyre-bot ping valreim-player-register -p 8087
PATTERN: 0x8087
PING valreim-player-register (172.29.0.3) 56(84) bytes of data.
64 bytes from valreim-player-register.valkyre_valkyre-network (172.29.0.3): icmp_seq=1 ttl=64 time=0.067 ms
...
--- valreim-player-register ping statistics ---
10 packets transmitted, 10 received, 0% packet loss, time 9226ms
Unsuccessfully curl endpoint service 2 from service 1
# curl -v -L http://valreim-player-register:8087/register/players/steamid/111
* Trying 172.30.0.3:8087...
* connect to 172.30.0.3 port 8087 failed: Connection refused
* Failed to connect to valreim-player-register port 8087: Connection refused
* Closing connection 0
curl: (7) Failed to connect to valreim-player-register port 8087: Connection refused
Java service's 1 method where service 2 is call
(Guess it wont be useful, but extra info never hurts)
public void sendRegisterUser(PlayerReg player) throws IOException, InterruptedException {
logger.info("Cadastrando player na base...");
String payload = player.toString();
StringEntity entity = new StringEntity(payload,
ContentType.APPLICATION_JSON);
HttpPost request = new HttpPost(PLAYER_REGISTER_API_URI);
request.setEntity(entity);
HttpResponse response = httpClient.execute(request);
if(response.getStatusLine().getStatusCode() != 201)
throw new IOException(String.valueOf(response.getStatusLine().getStatusCode()));
}
I custom the k8s core dns file to resolve a custom name.which works fine in pods checked by ping xx.
But it not resolved in java appliation(jdk14).
Nameserver is ok.
/ # cat /etc/resolv.conf
nameserver 10.96.0.10
search xxxx-5-production.svc.cluster.local svc.cluster.local cluster.local
/ # ping xx
PING xx (192.168.65.2): 56 data bytes
64 bytes from 192.168.65.2: seq=0 ttl=37 time=0.787 ms
Edit: I use coredns rewrite host name xx to host.docker.internal,this is change to coredns config
rewrite name regex (^|(?:\S*\.)*)xx\.?$ {1}host.docker.internal
I add some debug code to the entry:
static void runCommand(String... commands) {
try {
ProcessBuilder cat = new ProcessBuilder(commands);
Process start = cat.start();
start.waitFor();
String output = new BufferedReader(new InputStreamReader(start.getInputStream())).lines().collect(Collectors.joining());
String err = new BufferedReader(new InputStreamReader(start.getErrorStream())).lines().collect(Collectors.joining());
log.info("\n{}: stout {}", Arrays.toString(commands),output);
log.info("\n{}: sterr{}", Arrays.toString(commands),err);
} catch (IOException | InterruptedException e) {
log.error(e.getClass().getCanonicalName(), e);
}
}
public static void main(String[] args) {
try {
InetAddress xx = Inet4Address.getByName("xx");
log.info("{}: {}", "InetAddress xx", xx.getHostAddress());
} catch (IOException e) {
log.error(e.getClass().getCanonicalName(), e);
}
runCommand("cat", "/etc/resolv.conf");
runCommand("ping", "xx","-c","1");
runCommand("ping", "host.docker.internal","-c","1");
runCommand("nslookup", "xx");
runCommand("ifconfig");
SpringApplication.run(FileServerApp.class, args);
}
Here is output:
01:01:39.950 [main] ERROR com.j.file_server_app.FileServerApp - java.net.UnknownHostException
java.net.UnknownHostException: xx: Name or service not known
at java.base/java.net.Inet4AddressImpl.lookupAllHostAddr(Native Method)
at java.base/java.net.InetAddress$PlatformNameService.lookupAllHostAddr(InetAddress.java:932)
at java.base/java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1505)
at java.base/java.net.InetAddress$NameServiceAddresses.get(InetAddress.java:851)
at java.base/java.net.InetAddress.getAllByName0(InetAddress.java:1495)
at java.base/java.net.InetAddress.getAllByName(InetAddress.java:1354)
at java.base/java.net.InetAddress.getAllByName(InetAddress.java:1288)
at java.base/java.net.InetAddress.getByName(InetAddress.java:1238)
at com.j.file_server_app.FileServerApp.main(FileServerApp.java:43)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:48)
at org.springframework.boot.loader.Launcher.launch(Launcher.java:87)
at org.springframework.boot.loader.Launcher.launch(Launcher.java:51)
at org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:52)
01:01:39.983 [main] INFO com.j.file_server_app.FileServerApp -
[cat, /etc/resolv.conf]: stout nameserver 10.96.0.10search default.svc.cluster.local svc.cluster.local cluster.localoptions ndots:5
01:01:39.985 [main] INFO com.j.file_server_app.FileServerApp -
[cat, /etc/resolv.conf]: sterr
01:01:39.991 [main] INFO com.j.file_server_app.FileServerApp -
[ping, xx, -c, 1]: stout
01:01:39.991 [main] INFO com.j.file_server_app.FileServerApp -
[ping, xx, -c, 1]: sterrping: unknown host
01:01:39.998 [main] INFO com.j.file_server_app.FileServerApp -
[ping, host.docker.internal, -c, 1]: stout PING host.docker.internal (192.168.65.2): 56 data bytes64 bytes from 192.168.65.2: icmp_seq=0 ttl=37 time=0.757 ms--- host.docker.internal ping statistics ---1 packets transmitted, 1 packets received, 0% packet lossround-trip min/avg/max/stddev = 0.757/0.757/0.757/0.000 ms
01:01:39.998 [main] INFO com.j.file_server_app.FileServerApp -
[ping, host.docker.internal, -c, 1]: sterr
01:01:40.045 [main] INFO com.j.file_server_app.FileServerApp -
[nslookup, xx]: stout Server: 10.96.0.10Address: 10.96.0.10#53Non-authoritative answer:Name: host.docker.internalAddress: 192.168.65.2** server can't find xx: NXDOMAIN
01:01:40.045 [main] INFO com.j.file_server_app.FileServerApp -
[nslookup, xx]: sterr
01:01:40.048 [main] INFO com.j.file_server_app.FileServerApp -
[ifconfig]: stout eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.1.3.14 netmask 255.255.0.0 broadcast 0.0.0.0 ether ce:71:60:9a:75:05 txqueuelen 0 (Ethernet) RX packets 35 bytes 3776 (3.6 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 22 bytes 1650 (1.6 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 inet 127.0.0.1 netmask 255.0.0.0 loop txqueuelen 1000 (Local Loopback) RX packets 1 bytes 29 (29.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 1 bytes 29 (29.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
01:01:40.048 [main] INFO com.j.file_server_app.FileServerApp -
[ifconfig]: sterr
Looks like coredns not working,but in the front end pod,ping is ok,this is front end Dockerfile
FROM library/nginx:stable-alpine
RUN mkdir /app
EXPOSE 80
ADD dist /app
COPY nginx.conf /etc/nginx/nginx.conf
Using docker inspect for fontend and backend container,both network setting are:
"NetworkSettings": {
"Bridge": "",
"SandboxID": "",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {},
"SandboxKey": "",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {}
}
Both frontend and backend has service with type: LoadBalancer,now my question is why the name resolve behave different in this two pods?
Set<String> graphNames = JanusGraphFactory.getGraphNames();
for(String name:graphNames) {
System.out.println(name);
}
The above snippet produces the following exception
java.lang.IllegalStateException: Gremlin Server must be configured to use the JanusGraphManager.
at com.google.common.base.Preconditions.checkState(Preconditions.java:173)
at org.janusgraph.core.JanusGraphFactory.getGraphNames(JanusGraphFactory.java:175)
at com.JanusTest.controllers.JanusController.getPersonDetail(JanusController.java:66)
my.properties
gremlin.graph=org.janusgraph.core.JanusGraphFactory
storage.backend=cql
storage.hostname=127.0.0.1
cache.db-cache = true
cache.db-cache-clean-wait = 20
cache.db-cache-time = 180000
cache.db-cache-size = 0.5
index.search.backend=elasticsearch
index.search.hostname=127.0.0.1
gremlin-server.yaml
host: 0.0.0.0
port: 8182
scriptEvaluationTimeout: 30000
channelizer: org.apache.tinkerpop.gremlin.server.channel.WebSocketChannelizer
graphManager: org.janusgraph.graphdb.management.JanusGraphManager
graphs: {
ConfigurationManagementGraph: conf/my.properties,
}
plugins:
- janusgraph.imports
scriptEngines: {
gremlin-groovy: {
imports: [java.lang.Math],
staticImports: [java.lang.Math.PI],
scripts: [scripts/empty-sample.groovy]}}
serializers:
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoLiteMessageSerializerV1d0, config: {ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true }}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV2d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }}
processors:
- { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { sessionTimeout: 28800000 }}
- { className: org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor, config: { cacheExpirationTime: 600000, cacheMaxSize: 1000 }}
metrics: {
consoleReporter: {enabled: true, interval: 180000},
csvReporter: {enabled: true, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv},
jmxReporter: {enabled: true},
slf4jReporter: {enabled: true, interval: 180000},
gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},
graphiteReporter: {enabled: false, interval: 180000}}
maxInitialLineLength: 4096
maxHeaderSize: 8192
maxChunkSize: 8192
maxContentLength: 65536
maxAccumulationBufferComponents: 1024
resultIterationBatchSize: 64
writeBufferLowWaterMark: 32768
writeBufferHighWaterMark: 65536
This answer to this is similar to this other question.
The call to JanusGraphFactory.getGraphNames() needs to be sent to the remote server. If you're working in the Gremlin Console, first establish a remote sessioned connection then set remote console mode.
gremlin> :remote connect tinkerpop.server conf/remote.yaml session
==>Configured localhost/127.0.0.1:8182
gremlin> :remote console
==>All scripts will now be sent to Gremlin Server - [localhost:8182]-[5206cdde-b231-41fa-9e6c-69feac0fe2b2] - type ':remote console' to return to local mode
Then as described in the JanusGraph docs for "Listing the Graphs":
ConfiguredGraphFactory.getGraphNames() will return a set of graph names for which you have created configurations using the ConfigurationManagementGraph APIs.
JanusGraphFactory.getGraphNames() on the other hand returns a set of graph names for which you have instantiated and the references are stored inside the JanusGraphManager.
If you are not using the Gremlin Console, then you should be using a remote client, such as the TinkerPop gremlin-driver (Java), to send your requests to the Gremlin Server.
I have a system with HTTP POST requests and it runs with Spring 5 (standalone tomcat). In short it looks like this:
client (Apache AB) ----> micro service (java or golang) --> RabbitMQ --> Core(spring + tomcat).
The thing is, when I use my Java (Spring) service, it is ok. AB shows this output:
ab -n 1000 -k -s 2 -c 10 -s 60 -p test2.sh -A 113:113 -T 'application/json' https://127.0.0.1:8449/SecureChat/chat/v1/rest-message/send
This is ApacheBench, Version 2.3 <$Revision: 1807734 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 100 requests
...
Completed 1000 requests
Finished 1000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 8449
SSL/TLS Protocol: TLSv1.2,ECDHE-RSA-AES256-GCM-SHA384,2048,256
Document Path: /rest-message/send
Document Length: 39 bytes
Concurrency Level: 10
Time taken for tests: 434.853 seconds
Complete requests: 1000
Failed requests: 0
Keep-Alive requests: 0
Total transferred: 498000 bytes
Total body sent: 393000
HTML transferred: 39000 bytes
Requests per second: 2.30 [#/sec] (mean)
Time per request: 4348.528 [ms] (mean)
Time per request: 434.853 [ms] (mean, across all concurrent
requests)
Transfer rate: 1.12 [Kbytes/sec] received
0.88 kb/s sent
2.00 kb/s total
Connection Times (ms)
min mean[+/-sd] median max
Connect: 4 14 7.6 17 53
Processing: 1110 4317 437.2 4285 8383
Waiting: 1107 4314 437.2 4282 8377
Total: 1126 4332 436.8 4300 8403
That is through TLS.
But when I try to use my Golang service I get timeout:
Benchmarking 127.0.0.1 (be patient)...apr_pollset_poll: The timeout specified has expired (70007)
Total of 92 requests completed
And this output:
ab -n 100 -k -s 2 -c 10 -s 60 -p test2.sh -T 'application/json' http://127.0.0.1:8089/
This is ApacheBench, Version 2.3 <$Revision: 1807734 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)...^C
Server Software:
Server Hostname: 127.0.0.1
Server Port: 8089
Document Path: /
Document Length: 39 bytes
Concurrency Level: 10
Time taken for tests: 145.734 seconds
Complete requests: 92
Failed requests: 1
(Connect: 0, Receive: 0, Length: 1, Exceptions: 0)
Keep-Alive requests: 91
Total transferred: 16380 bytes
Total body sent: 32200
HTML transferred: 3549 bytes
Requests per second: 0.63 [#/sec] (mean)
Time per request: 15840.663 [ms] (mean)
Time per request: 1584.066 [ms] (mean, across all concurrent requests)
Transfer rate: 0.11 [Kbytes/sec] received
0.22 kb/s sent
0.33 kb/s total
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 0.0 0 0
Processing: 1229 1494 1955.9 1262 20000
Waiting: 1229 1291 143.8 1262 2212
Total: 1229 1494 1955.9 1262 20000
That is through plane tcp.
I guess I have some mistakes in my code. I made it in one file
func initAmqp(rabbitUrl string) {
var err error
conn, err = amqp.Dial(rabbitUrl)
failOnError(err, "Failed to connect to RabbitMQ")
}
func main() {
err := gcfg.ReadFileInto(&cfg, "config.gcfg")
if err != nil {
log.Fatal(err);
}
PrintConfig(cfg)
if cfg.Section_rabbit.RabbitUrl != "" {
initAmqp(cfg.Section_rabbit.RabbitUrl);
}
mux := http.NewServeMux();
mux.Handle("/", NewLimitHandler(1000, newTestHandler()))
server := http.Server {
Addr: cfg.Section_basic.Port,
Handler: mux,
ReadTimeout: 20 * time.Second,
WriteTimeout: 20 * time.Second,
}
defer conn.Close();
log.Println(server.ListenAndServe());
}
func NewLimitHandler(maxConns int, handler http.Handler) http.Handler {
h := &limitHandler{
connc: make(chan struct{}, maxConns),
handler: handler,
}
for i := 0; i < maxConns; i++ {
h.connc <- struct{}{}
}
return h
}
func newTestHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler(w, r);
})
}
func handler(w http.ResponseWriter, r *http.Request) {
if b, err := ioutil.ReadAll(r.Body); err == nil {
fmt.Println("message is ", string(b));
res := publishMessages(string(b))
w.Write([]byte(res))
w.WriteHeader(http.StatusOK)
counter ++;
}else {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("500 - Something bad happened!"))
}
}
func publishMessages(payload string) string {
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
q, err = ch.QueueDeclare(
"", // name
false, // durable
false, // delete when unused
true, // exclusive
false, // noWait
nil, // arguments
)
failOnError(err, "Failed to declare a queue")
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
corrId := randomString(32)
log.Println("corrId ", corrId)
err = ch.Publish(
"", // exchange
cfg.Section_rabbit.RabbitQeue, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
DeliveryMode: amqp.Transient,
ContentType: "application/json",
CorrelationId: corrId,
Body: []byte(payload),
Timestamp: time.Now(),
ReplyTo: q.Name,
})
failOnError(err, "Failed to Publish on RabbitMQ")
defer ch.Close();
result := "";
for d := range msgs {
if corrId == d.CorrelationId {
failOnError(err, "Failed to convert body to integer")
log.Println("result = ", string(d.Body))
return string(d.Body);
}else {
log.Println("waiting for result = ")
}
}
return result;
}
Can someone help?
EDIT
here are my variables
type limitHandler struct {
connc chan struct{}
handler http.Handler
}
var conn *amqp.Connection
var q amqp.Queue
EDIT 2
func (h *limitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
select {
case <-h.connc:
fmt.Println("ServeHTTP");
h.handler.ServeHTTP(w, req)
h.connc <- struct{}{}
default:
http.Error(w, "503 too busy", http.StatusServiceUnavailable)
}
}
EDIT 3
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
panic(fmt.Sprintf("%s: %s", msg, err))
}
}